/* The variables below are currently only used on 64-bit Book3E * though this will probably be made common with other nohash * implementations at some point
*/ staticint mmu_pte_psize; /* Page size used for PTE pages */ int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ unsignedlong linear_map_top; /* Top of linear mapping */
/* * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug * exceptions. This is used for bolted and e6500 TLB miss handlers which * do not modify this SPRG in the TLB miss code; for other TLB miss handlers, * this is set to zero.
*/ int extlb_level_exc;
/* * Handling of virtual linear page tables or indirect TLB entries * flushing when PTE pages are freed
*/ void tlb_flush_pgtable(struct mmu_gather *tlb, unsignedlong address)
{ int tsize = mmu_psize_defs[mmu_pte_psize].shift - 10;
/* This isn't the most optimal, ideally we would factor out the * while preempt & CPU mask mucking around, or even the IPI but * it will do for now
*/ while (start < end) {
__flush_tlb_page(tlb->mm, start, tsize, 1);
start += size;
}
} else { unsignedlong rmask = 0xf000000000000000ul; unsignedlong rid = (address & rmask) | 0x1000000000000000ul; unsignedlong vpte = address & ~rmask;
/* * We expect 4K subpage size and unrestricted indirect size. * The lack of a restriction on indirect size is a Freescale * extension, indicated by PSn = 0 but SPSn != 0.
*/ if (eptcfg != 2)
book3e_htw_mode = PPC_HTW_NONE;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
/* * Only do the mapping once per core, or else the * transient mapping would cause problems.
*/ #ifdef CONFIG_SMP if (hweight32(get_tensr()) > 1)
map = false; #endif
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
num_cams, false, true);
/* A sync won't hurt us after mucking around with * the MMU configuration
*/
mb();
}
staticvoid __init early_init_mmu_global(void)
{ /* * Freescale booke only supports 4K pages in TLB0, so use that.
*/
mmu_vmemmap_psize = MMU_PAGE_4K;
/* XXX This code only checks for TLB 0 capabilities and doesn't * check what page size combos are supported by the HW. It * also doesn't handle the case where a separate array holds * the IND entries from the array loaded by the PT.
*/ /* Look for supported page sizes */
setup_page_sizes();
/* * If we want to use HW tablewalk, enable it by patching the TLB miss * handlers to branch to the one dedicated to it.
*/
extlb_level_exc = EX_TLB_SIZE; switch (book3e_htw_mode) { case PPC_HTW_E6500:
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); break;
}
/* Set the global containing the top of the linear mapping * for use by the TLB miss code
*/
linear_map_top = memblock_end_of_DRAM();
ioremap_bot = IOREMAP_BASE;
}
staticvoid __init early_mmu_set_memory_limit(void)
{ /* * Limit memory so we dont have linear faults. * Unlike memblock_set_current_limit, which limits * memory available during early boot, this permanently * reduces the memory available to Linux. We need to * do this because highmem is not supported on 64-bit.
*/
memblock_enforce_memory_limit(linear_map_top);
memblock_set_current_limit(linear_map_top);
}
/* boot cpu only */ void __init early_init_mmu(void)
{
early_init_mmu_global();
early_init_this_mmu();
early_mmu_set_memory_limit();
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{ /* * On FSL Embedded 64-bit, usually all RAM is bolted, but with * unusual memory sizes it's possible for some RAM to not be mapped * (such RAM is not used at all by Linux, since we don't support * highmem on 64-bit). We limit ppc64_rma_size to what would be * mappable if this memblock is the only one. Additional memblocks * can only increase, not decrease, the amount that ends up getting * mapped. We still limit max to 1G even if we'll eventually map * more. This is due to what the early init code is set up to do. * * We crop it to the size of the first MEMBLOCK to * avoid going over total available memory just in case...
*/ unsignedlong linear_sz; unsignedint num_cams;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.