/* * _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes.
*/ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
/* * PTE updates. This function is called whenever an existing * valid PTE is updated. This does -not- include set_pte_at() * which nowadays only sets a new PTE. * * Depending on the type of MMU, we may need to use atomic updates * and the PTE may be either 32 or 64 bit wide. In the later case, * when using atomic updates, only the low part of the PTE is * accessed atomically. * * In addition, on 44x, we also maintain a global flag indicating * that an executable user mapping was modified, which is needed * to properly flush the virtually tagged instruction cache of * those implementations.
*/ #ifndef pte_update staticinline pte_basic_t pte_update(struct mm_struct *mm, unsignedlong addr, pte_t *p, unsignedlong clr, unsignedlong set, int huge)
{
pte_basic_t old = pte_val(*p);
pte_basic_t new = (old & ~(pte_basic_t)clr) | set; unsignedlong sz; unsignedlong pdsize; int i;
if (new == old) return old;
if (huge)
sz = pte_huge_size(__pte(old)); else
sz = PAGE_SIZE;
/* * Don't just check for any non zero bits in __PAGE_READ, since for book3e * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in * _PAGE_READ. Need to explicitly match _PAGE_BAP_UR bit in that case too.
*/ #ifndef pte_read staticinlinebool pte_read(pte_t pte)
{ return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
} #endif
/* * We only find page table entry in the last level * Hence no need for other accessors
*/ #define pte_access_permitted pte_access_permitted staticinlinebool pte_access_permitted(pte_t pte, bool write)
{ /* * A read-only access is controlled by _PAGE_READ bit. * We have _PAGE_READ set for WRITE
*/ if (!pte_present(pte) || !pte_read(pte)) returnfalse;
if (write && !pte_write(pte)) returnfalse;
returntrue;
}
/* Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. * * Even if PTEs can be unsigned long long, a PFN is always an unsigned * long for now.
*/ staticinline pte_t pfn_pte(unsignedlong pfn, pgprot_t pgprot) { return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
pgprot_val(pgprot)); }
/* This low level function performs the actual PTE insertion * Setting the PTE depends on the MMU type and other factors. It's * an horrible mess that I'm not going to try to clean up now but * I'm keeping it in one place rather than spread around
*/ staticinlinevoid __set_pte_at(struct mm_struct *mm, unsignedlong addr,
pte_t *ptep, pte_t pte, int percpu)
{ /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. * In the percpu case, we also fallback to the simple update
*/ if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
__asm__ __volatile__("\
stw%X0 %2,%0\n\
mbar\n\
stw%X1 %L2,%1"
: "=m" (*ptep), "=m" (*((unsignedchar *)ptep+4))
: "r" (pte) : "memory"); return;
} /* Anything else just stores the PTE normally. That covers all 64-bit * cases, and 32-bit non-hash with 32-bit PTEs.
*/ #ifdefined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte); #else
*ptep = pte; #endif
/* * With hardware tablewalk, a sync is needed to ensure that * subsequent accesses see the PTE we just wrote. Unlike userspace * mappings, we can't tolerate spurious faults, so make sure * the new PTE will be seen the first time.
*/ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
mb();
}
/* * Macro to mark a page protection value as "uncacheable".
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.