/* * If no MM is passed then this creates a SVA entry that faults * everything. arm_smmu_write_cd_entry() can hitlessly go between these * two entries types since TTB0 is ignored by HW when EPD0 is set.
*/ if (mm) {
target->data[0] |= cpu_to_le64(
FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
64ULL - vabits_actual) |
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
ARM_LPAE_TCR_RGN_WBWA) |
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
ARM_LPAE_TCR_RGN_WBWA) |
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
/* * Disable stall and immediately generate an abort if stall * disable is permitted. This speeds up cleanup for an unclean * exit if the device is still doing a lot of DMA.
*/ if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
target->data[0] &=
cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
}
/* * MAIR value is pretty much constant and global, so we can just get it * from the current CPU register
*/
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
/* * Note that we don't bother with S1PIE on the SMMU, we just rely on * our default encoding scheme matching direct permissions anyway. * SMMU has no notion of S1POE nor GCS, so make sure that is clear if * either is enabled for CPUs, just in case anyone imagines otherwise.
*/ if (system_supports_poe() || system_supports_gcs())
dev_warn_once(master->smmu->dev, "SVA devices ignore permission overlays and GCS\n");
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
/* * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this * is used as a threshold to replace per-page TLBI commands to issue in the * command queue with an address-space TLBI command, when SMMU w/o a range * invalidation feature handles too many per-page TLBI commands, which will * otherwise result in a soft lockup.
*/ #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
/* * The mm_types defines vm_end as the first byte after the end address, * different from IOMMU subsystem using the last address of an address * range. So do a simple translation here by calculating size correctly.
*/
size = end - start; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
size = 0;
} else { if (size == ULONG_MAX)
size = 0;
}
if (vabits_actual == 52) { /* We don't support LPA2 */ if (PAGE_SIZE != SZ_64K) returnfalse;
feat_mask |= ARM_SMMU_FEAT_VAX;
}
if (system_supports_bbml2_noabort())
feat_mask |= ARM_SMMU_FEAT_BBML2;
if ((smmu->features & feat_mask) != feat_mask) returnfalse;
if (!(smmu->pgsize_bitmap & PAGE_SIZE)) returnfalse;
/* * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're * not even pretending to support AArch32 here. Abort if the MMU outputs * addresses larger than what we support.
*/
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld); if (smmu->oas < oas) returnfalse;
/* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8; if (smmu->asid_bits < asid_bits) returnfalse;
/* * See max_pinned_asids in arch/arm64/mm/context.c. The following is * generally the maximum number of bindable processes.
*/ if (arm64_kernel_unmapped_at_el0())
asid_bits--;
dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
num_possible_cpus() - 2);
returntrue;
}
void arm_smmu_sva_notifier_synchronize(void)
{ /* * Some MMU notifiers may still be waiting to be freed, using * arm_smmu_mmu_notifier_free(). Wait for them.
*/
mmu_notifier_synchronize();
}
if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) return -EOPNOTSUPP;
/* Prevent arm_smmu_mm_release from being called while we are attaching */ if (!mmget_not_zero(domain->mm)) return -EINVAL;
/* * This does not need the arm_smmu_asid_lock because SVA domains never * get reassigned
*/
arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid);
ret = arm_smmu_set_pasid(master, smmu_domain, id, &target, old);
/* * Ensure the ASID is empty in the iommu cache before allowing reuse.
*/
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
/* * Notice that the arm_smmu_mm_arch_invalidate_secondary_tlbs op can * still be called/running at this point. We allow the ASID to be * reused, and if there is a race then it just suffers harmless * unnecessary invalidation.
*/
xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
/* * Actual free is defered to the SRCU callback * arm_smmu_mmu_notifier_free()
*/
mmu_notifier_put(&smmu_domain->mmu_notifier);
}
/* * Choose page_size as the leaf page size for invalidation when * ARM_SMMU_FEAT_RANGE_INV is present
*/
smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
smmu_domain->smmu = smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); if (ret) goto err_free;
smmu_domain->cd.asid = asid;
smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops;
ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm); if (ret) goto err_asid;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.