/* * It is a driver bug for providing a viommu_size smaller than the core * vIOMMU structure size
*/ if (WARN_ON_ONCE(viommu_size < sizeof(*viommu))) {
rc = -EOPNOTSUPP; goto out_put_idev;
}
xa_init(&viommu->vdevs);
viommu->type = cmd->type;
viommu->ictx = ucmd->ictx;
viommu->hwpt = hwpt_paging;
refcount_inc(&viommu->hwpt->common.obj.users);
INIT_LIST_HEAD(&viommu->veventqs);
init_rwsem(&viommu->veventqs_rwsem); /* * It is the most likely case that a physical IOMMU is unpluggable. A * pluggable IOMMU instance (if exists) is responsible for refcounting * on its own.
*/
viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev);
mutex_lock(&idev->igroup->lock); if (idev->destroying) {
rc = -ENOENT; goto out_unlock_igroup;
}
if (idev->vdev) {
rc = -EEXIST; goto out_unlock_igroup;
}
if (viommu->ops && viommu->ops->vdevice_size) { /* * It is a driver bug for: * - ops->vdevice_size smaller than the core structure size * - not implementing a pairing ops->vdevice_init op
*/ if (WARN_ON_ONCE(viommu->ops->vdevice_size < vdev_size ||
!viommu->ops->vdevice_init)) {
rc = -EOPNOTSUPP; goto out_put_idev;
}
vdev_size = viommu->ops->vdevice_size;
}
vdev->virt_id = virt_id;
vdev->viommu = viommu;
refcount_inc(&viommu->obj.users); /* * A wait_cnt reference is held on the idev so long as we have the * pointer. iommufd_device_pre_destroy() will revoke it before the * idev real destruction.
*/
vdev->idev = idev;
/* * iommufd_device_destroy() delays until idev->vdev is NULL before * freeing the idev, which only happens once the vdev is finished * destruction.
*/
idev->vdev = vdev;
if (hw_queue->destroy)
hw_queue->destroy(hw_queue); if (hw_queue->access)
iommufd_hw_queue_destroy_access(hw_queue->viommu->ictx,
hw_queue->access,
hw_queue->base_addr,
hw_queue->length); if (hw_queue->viommu)
refcount_dec(&hw_queue->viommu->obj.users);
}
/* * When the HW accesses the guest queue via physical addresses, the underlying * physical pages of the guest queue must be contiguous. Also, for the security * concern that IOMMUFD_CMD_IOAS_UNMAP could potentially remove the mappings of * the guest queue from the nesting parent iopt while the HW is still accessing * the guest queue memory physically, such a HW queue must require an access to * pin the underlying pages and prevent that from happening.
*/ staticstruct iommufd_access *
iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd, struct iommufd_viommu *viommu, phys_addr_t *base_pa)
{
u64 aligned_iova = PAGE_ALIGN_DOWN(cmd->nesting_parent_iova);
u64 offset = cmd->nesting_parent_iova - aligned_iova; struct iommufd_access *access; struct page **pages;
size_t max_npages;
size_t length;
size_t i; int rc;
/* max_npages = DIV_ROUND_UP(offset + cmd->length, PAGE_SIZE) */ if (check_add_overflow(offset, cmd->length, &length)) return ERR_PTR(-ERANGE); if (check_add_overflow(length, PAGE_SIZE - 1, &length)) return ERR_PTR(-ERANGE);
max_npages = length / PAGE_SIZE; /* length needs to be page aligned too */
length = max_npages * PAGE_SIZE;
/* * Use kvcalloc() to avoid memory fragmentation for a large page array. * Set __GFP_NOWARN to avoid syzkaller blowups
*/
pages = kvcalloc(max_npages, sizeof(*pages), GFP_KERNEL | __GFP_NOWARN); if (!pages) return ERR_PTR(-ENOMEM);
/* Validate if the underlying physical pages are contiguous */ for (i = 1; i < max_npages; i++) { if (page_to_pfn(pages[i]) == page_to_pfn(pages[i - 1]) + 1) continue;
rc = -EFAULT; goto out_unpin;
}
/* * It is a driver bug for providing a hw_queue_size smaller than the * core HW queue structure size
*/ if (WARN_ON_ONCE(hw_queue_size < sizeof(*hw_queue))) {
rc = -EOPNOTSUPP; goto out_put_viommu;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.