staticbool disable_cmdqv;
module_param(disable_cmdqv, bool, 0444);
MODULE_PARM_DESC(disable_cmdqv, "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
staticbool bypass_vcmdq;
module_param(bypass_vcmdq, bool, 0444);
MODULE_PARM_DESC(bypass_vcmdq, "This allows to bypass VCMDQ for debugging use or perf comparison.");
/** * struct tegra241_vcmdq - Virtual Command Queue * @core: Embedded iommufd_hw_queue structure * @idx: Global index in the CMDQV * @lidx: Local index in the VINTF * @enabled: Enable status * @cmdqv: Parent CMDQV pointer * @vintf: Parent VINTF pointer * @prev: Previous LVCMDQ to depend on * @cmdq: Command Queue struct * @page0: MMIO Page0 base address * @page1: MMIO Page1 base address
*/ struct tegra241_vcmdq { struct iommufd_hw_queue core;
/** * struct tegra241_vintf - Virtual Interface * @vsmmu: Embedded arm_vsmmu structure * @idx: Global index in the CMDQV * @enabled: Enable status * @hyp_own: Owned by hypervisor (in-kernel) * @cmdqv: Parent CMDQV pointer * @lvcmdqs: List of logical VCMDQ pointers * @lvcmdq_mutex: Lock to serialize user-allocated lvcmdqs * @base: MMIO base address * @mmap_offset: Offset argument for mmap() syscall * @sids: Stream ID mapping resources
*/ struct tegra241_vintf { struct arm_vsmmu vsmmu;
u16 idx;
bool enabled; bool hyp_own;
struct tegra241_cmdqv *cmdqv; struct tegra241_vcmdq **lvcmdqs; struct mutex lvcmdq_mutex; /* user space race */
void __iomem *base; unsignedlong mmap_offset;
struct ida sids;
}; #define viommu_to_vintf(v) container_of(v, struct tegra241_vintf, vsmmu.core)
/** * struct tegra241_vintf_sid - Virtual Interface Stream ID Mapping * @core: Embedded iommufd_vdevice structure, holding virtual Stream ID * @vintf: Parent VINTF pointer * @sid: Physical Stream ID * @idx: Mapping index in the VINTF
*/ struct tegra241_vintf_sid { struct iommufd_vdevice core; struct tegra241_vintf *vintf;
u32 sid;
u8 idx;
}; #define vdev_to_vsid(v) container_of(v, struct tegra241_vintf_sid, core)
/** * struct tegra241_cmdqv - CMDQ-V for SMMUv3 * @smmu: SMMUv3 device * @dev: CMDQV device * @base: MMIO base address * @base_phys: MMIO physical base address, for mmap * @irq: IRQ number * @num_vintfs: Total number of VINTFs * @num_vcmdqs: Total number of VCMDQs * @num_lvcmdqs_per_vintf: Number of logical VCMDQs per VINTF * @num_sids_per_vintf: Total number of SID mappings per VINTF * @vintf_ids: VINTF id allocator * @vintfs: List of VINTFs
*/ struct tegra241_cmdqv { struct arm_smmu_device smmu; struct device *dev;
void __iomem *base;
phys_addr_t base_phys; int irq;
/* Use readl_relaxed() as register addresses are not 64-bit aligned */
vintf_map = (u64)readl_relaxed(reg_vintf_map + 0x4) << 32 |
(u64)readl_relaxed(reg_vintf_map);
/* Use SMMU CMDQ if VINTF0 is uninitialized */ if (!READ_ONCE(vintf->enabled)) return NULL;
/* * Select a LVCMDQ to use. Here we use a temporal solution to * balance out traffic on cmdq issuing: each cmdq has its own * lock, if all cpus issue cmdlist using the same cmdq, only * one CPU at a time can enter the process, while the others * will be spinning at the same lock.
*/
lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
vcmdq = vintf->lvcmdqs[lidx]; if (!vcmdq || !READ_ONCE(vcmdq->enabled)) return NULL;
/* Unsupported CMD goes for smmu->cmdq pathway */ if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) return NULL; return &vcmdq->cmdq;
}
/* HW Reset Functions */
/* * When a guest-owned VCMDQ is disabled, if the guest did not enqueue a CMD_SYNC * following an ATC_INV command at the end of the guest queue while this ATC_INV * is timed out, the TIMEOUT will not be reported until this VCMDQ gets assigned * to the next VM, which will be a false alarm potentially causing some unwanted * behavior in the new VM. Thus, a guest-owned VCMDQ must flush the TIMEOUT when * it gets disabled. This can be done by just issuing a CMD_SYNC to SMMU CMDQ.
*/ staticvoid tegra241_vcmdq_hw_flush_timeout(struct tegra241_vcmdq *vcmdq)
{ struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu;
u64 cmd_sync[CMDQ_ENT_DWORDS] = {};
/* * It does not hurt to insert another CMD_SYNC, taking advantage of the * arm_smmu_cmdq_issue_cmdlist() that waits for the CMD_SYNC completion.
*/
arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, cmd_sync, 1, true);
}
/* This function is for LVCMDQ, so @vcmdq must not be unmapped yet */ staticvoid tegra241_vcmdq_hw_deinit(struct tegra241_vcmdq *vcmdq)
{ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64);
u32 gerrorn, gerror;
/* This function is for LVCMDQ, so @vcmdq must be mapped prior */ staticint tegra241_vcmdq_hw_init(struct tegra241_vcmdq *vcmdq)
{ char header[64], *h = lvcmdq_error_header(vcmdq, header, 64); int ret;
/* Configure and enable VINTF */ /* * Note that HYP_OWN bit is wired to zero when running in guest kernel, * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a * restricted set of supported commands.
*/
regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own) |
FIELD_PREP(VINTF_VMID, vintf->vsmmu.vmid);
writel(regval, REG_VINTF(vintf, CONFIG));
ret = vintf_write_config(vintf, regval | VINTF_EN); if (ret) return ret; /* * As being mentioned above, HYP_OWN bit is wired to zero for a guest * kernel, so read it back from HW to ensure that reflects in hyp_own
*/
vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG)));
/* HW requires to map LVCMDQs in ascending order */ for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) { if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) {
tegra241_vcmdq_map_lvcmdq(vintf->lvcmdqs[lidx]);
ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]); if (ret) {
tegra241_vintf_hw_deinit(vintf); return ret;
}
}
}
/* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */
regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
q->llq.max_n_shift =
min_t(u32, CMDQ_MAX_SZ_SHIFT, FIELD_GET(IDR1_CMDQS, regval));
/* Use the common helper to init the VCMDQ, and then... */
ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
TEGRA241_VCMDQ_PROD, TEGRA241_VCMDQ_CONS,
CMDQ_ENT_DWORDS, name); if (ret) return ret;
/* Note that the lvcmdq queue memory space is managed by devres */
tegra241_vintf_deinit_lvcmdq(vintf, lidx);
dev_dbg(vintf->cmdqv->dev, "%sdeallocated\n", lvcmdq_error_header(vcmdq, header, 64)); /* Guest-owned VCMDQ is free-ed with hw_queue by iommufd core */ if (vcmdq->vintf->hyp_own)
kfree(vcmdq);
}
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
tegra241_cmdqv_acpi_is_memory, NULL); if (ret < 0) {
dev_err(dev, "failed to get memory resource: %d\n", ret); return NULL;
}
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); if (!rentry) {
dev_err(dev, "failed to get memory resource entry\n"); goto free_list;
}
/* Caller must free the res */
res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) goto free_list;
*res = *rentry->res;
acpi_dev_free_resource_list(&resource_list);
INIT_LIST_HEAD(&resource_list);
if (irq)
ret = acpi_dev_get_resources(adev, &resource_list,
tegra241_cmdqv_acpi_get_irqs, irq); if (ret < 0 || !irq || *irq <= 0)
dev_warn(dev, "no interrupt. errors will not be reported\n");
staticint tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu)
{ struct tegra241_cmdqv *cmdqv =
container_of(smmu, struct tegra241_cmdqv, smmu); struct tegra241_vintf *vintf; int lidx; int ret;
vintf = kzalloc(sizeof(*vintf), GFP_KERNEL); if (!vintf) return -ENOMEM;
/* Init VINTF0 for in-kernel use */
ret = tegra241_cmdqv_init_vintf(cmdqv, 0, vintf); if (ret) {
dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret); return ret;
}
/* Preallocate logical VCMDQs to VINTF0 */ for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) { struct tegra241_vcmdq *vcmdq;
vcmdq = tegra241_vintf_alloc_lvcmdq(vintf, lidx); if (IS_ERR(vcmdq)) return PTR_ERR(vcmdq);
}
/* Now, we are ready to run all the impl ops */
smmu->impl_ops = &tegra241_cmdqv_impl_ops; return 0;
}
out_fallback:
dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n");
smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV;
put_device(smmu->impl_dev); return ERR_PTR(-ENODEV);
}
if (hw_queue->type != IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV) return -EOPNOTSUPP; if (lidx >= cmdqv->num_lvcmdqs_per_vintf) return -EINVAL;
mutex_lock(&vintf->lvcmdq_mutex);
if (vintf->lvcmdqs[lidx]) {
ret = -EEXIST; goto unlock;
}
/* * HW requires to map LVCMDQs in ascending order, so reject if the * previous lvcmdqs is not allocated yet.
*/ if (lidx) {
prev = vintf->lvcmdqs[lidx - 1]; if (!prev) {
ret = -EIO; goto unlock;
}
}
/* * hw_queue->length must be a power of 2, in range of * [ 32, 2 ^ (idr[1].CMDQS + CMDQ_ENT_SZ_SHIFT) ]
*/
max_n_shift = FIELD_GET(IDR1_CMDQS,
readl_relaxed(smmu->base + ARM_SMMU_IDR1)); if (!is_power_of_2(hw_queue->length) || hw_queue->length < 32 ||
hw_queue->length > (1 << (max_n_shift + CMDQ_ENT_SZ_SHIFT))) {
ret = -EINVAL; goto unlock;
}
log2size = ilog2(hw_queue->length) - CMDQ_ENT_SZ_SHIFT;
/* base_addr_pa must be aligned to hw_queue->length */ if (base_addr_pa & ~VCMDQ_ADDR ||
base_addr_pa & (hw_queue->length - 1)) {
ret = -EINVAL; goto unlock;
}
/* * HW requires to unmap LVCMDQs in descending order, so destroy() must * follow this rule. Set a dependency on its previous LVCMDQ so iommufd * core will help enforce it.
*/ if (prev) {
ret = iommufd_hw_queue_depend(vcmdq, prev, core); if (ret) goto unlock;
}
vcmdq->prev = prev;
ret = tegra241_vintf_init_lvcmdq(vintf, lidx, vcmdq); if (ret) goto undepend_vcmdq;
/* * Unsupported type should be rejected by tegra241_cmdqv_get_vintf_size. * Seeing one here indicates a kernel bug or some data corruption.
*/ if (WARN_ON(vsmmu->core.type != IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV)) return -EOPNOTSUPP;
if (!user_data) return -EINVAL;
ret = iommu_copy_struct_from_user(&data, user_data,
IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
out_vintf_mmap_length); if (ret) return ret;
ret = tegra241_cmdqv_init_vintf(cmdqv, cmdqv->num_vintfs - 1, vintf); if (ret < 0) {
dev_err(cmdqv->dev, "no more available vintf\n"); return ret;
}
/* * Initialize the user-owned VINTF without a LVCMDQ, as it cannot pre- * allocate a LVCMDQ until user space wants one, for security reasons. * It is different than the kernel-owned VINTF0, which had pre-assigned * and pre-allocated global VCMDQs that would be mapped to the LVCMDQs * by the tegra241_vintf_hw_init() call.
*/
ret = tegra241_vintf_hw_init(vintf, false); if (ret) goto deinit_vintf;
page0_base = cmdqv->base_phys + TEGRA241_VINTFi_PAGE0(vintf->idx);
ret = iommufd_viommu_alloc_mmap(&vintf->vsmmu.core, page0_base, SZ_64K,
&vintf->mmap_offset); if (ret) goto hw_deinit_vintf;
data.out_vintf_mmap_length = SZ_64K;
data.out_vintf_mmap_offset = vintf->mmap_offset;
ret = iommu_copy_struct_to_user(user_data, &data,
IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV,
out_vintf_mmap_length); if (ret) goto free_mmap;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.