/* * Ensure that the tags written prior to this function are visible * before the page flags update.
*/
smp_wmb();
set_bit(PG_mte_tagged, &page->flags);
}
staticinlinebool page_mte_tagged(struct page *page)
{ bool ret = test_bit(PG_mte_tagged, &page->flags);
/* * If the page is tagged, ensure ordering with a likely subsequent * read of the tags.
*/ if (ret)
smp_rmb(); return ret;
}
/* * Lock the page for tagging and return 'true' if the page can be tagged, * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the * locking only happens once for page initialisation. * * The page MTE lock state: * * Locked: PG_mte_lock && !PG_mte_tagged * Unlocked: !PG_mte_lock || PG_mte_tagged * * Acquire semantics only if the page is tagged (returning 'false').
*/ staticinlinebool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
if (!test_and_set_bit(PG_mte_lock, &page->flags)) returntrue;
/* * The tags are either being initialised or may have been initialised * already. Check if the PG_mte_tagged flag has been set or wait * otherwise.
*/
smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
/* * Ensure that the tags written prior to this function are visible * before the folio flags update.
*/
smp_wmb();
set_bit(PG_mte_tagged, &folio->flags);
}
staticinlinebool folio_test_hugetlb_mte_tagged(struct folio *folio)
{ bool ret = test_bit(PG_mte_tagged, &folio->flags);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
/* * If the folio is tagged, ensure ordering with a likely subsequent * read of the tags.
*/ if (ret)
smp_rmb(); return ret;
}
if (!test_and_set_bit(PG_mte_lock, &folio->flags)) returntrue;
/* * The tags are either being initialised or may have been initialised * already. Check if the PG_mte_tagged flag has been set or wait * otherwise.
*/
smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
staticinlinevoid mte_disable_tco_entry(struct task_struct *task)
{ if (!system_supports_mte()) return;
/* * Re-enable tag checking (TCO set on exception entry). This is only * necessary if MTE is enabled in either the kernel or the userspace * task in synchronous or asymmetric mode (SCTLR_EL1.TCF0 bit 0 is set * for both). With MTE disabled in the kernel and disabled or * asynchronous in userspace, tag check faults (including in uaccesses) * are not reported, therefore there is no need to re-enable checking. * This is beneficial on microarchitectures where re-enabling TCO is * expensive.
*/ if (kasan_hw_tags_enabled() ||
(task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT))) asmvolatile(SET_PSTATE_TCO(0));
}
staticinlinevoid mte_check_tfsr_entry(void)
{ if (!kasan_hw_tags_enabled()) return;
mte_check_tfsr_el1();
}
staticinlinevoid mte_check_tfsr_exit(void)
{ if (!kasan_hw_tags_enabled()) return;
/* * The asynchronous faults are sync'ed automatically with * TFSR_EL1 on kernel entry but for exit an explicit dsb() * is required.
*/
dsb(nsh);
isb();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.