// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/m68k/mm/motorola.c * * Routines specific to the Motorola MMU, originally from: * linux/arch/m68k/init.c * which are Copyright (C) 1995 Hamish Macdonald * * Moved 8/20/1999 Sam Creasey
*/
#ifndef mm_cachebits /* * Bits to add to page descriptors for "normal" caching mode. * For 68020/030 this is 0. * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
*/ unsignedlong mm_cachebits;
EXPORT_SYMBOL(mm_cachebits); #endif
/* Prior to calling these routines, the page should have been flushed * from both the cache and ATC, or the CPU might not notice that the * cache setting for the page has been changed. -jskov
*/ staticinlinevoid nocache_page(void *vaddr)
{ unsignedlong addr = (unsignedlong)vaddr;
if (CPU_IS_040_OR_060) {
pte_t *ptep = virt_to_kpte(addr);
if (CPU_IS_040_OR_060) {
pte_t *ptep = virt_to_kpte(addr);
*ptep = pte_mkcache(*ptep);
}
}
/* * Motorola 680x0 user's manual recommends using uncached memory for address * translation tables. * * Seeing how the MMU can be external on (some of) these chips, that seems like * a very important recommendation to follow. Provide some helpers to combat * 'variation' amongst the users of this.
*/
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from struct ptdesc instead of separately kmalloced struct. Stolen from
arch/sparc/mm/srmmu.c ... */
/* unreserve the ptdesc so it's possible to free that ptdesc */
__ClearPageReserved(ptdesc_page(PD_PTDESC(dp)));
init_page_count(ptdesc_page(PD_PTDESC(dp)));
/* * For a pointer table for a user process address space, a * table is taken from a ptdesc allocated for the purpose. Each * ptdesc can hold 8 pointer tables. The ptdesc is remapped in * virtual address space to be noncacheable.
*/ if (mask == 0) { struct ptdesc *ptdesc;
ptable_desc *new; void *pt_addr;
ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0); if (!ptdesc) return NULL;
pt_addr = ptdesc_address(ptdesc);
switch (type) { case TABLE_PTE: /* * m68k doesn't have SPLIT_PTE_PTLOCKS for not having * SMP.
*/
pagetable_pte_ctor(mm, ptdesc); break; case TABLE_PMD:
pagetable_pmd_ctor(mm, ptdesc); break; case TABLE_PGD:
pagetable_pgd_ctor(ptdesc); break;
}
mmu_page_ctor(pt_addr);
new = PD_PTABLE(pt_addr);
PD_MARKBITS(new) = ptable_mask(type) - 1;
list_add_tail(new, dp);
return (pmd_t *)pt_addr;
}
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
;
PD_MARKBITS(dp) = mask & ~tmp; if (!PD_MARKBITS(dp)) { /* move to end of list */
list_move_tail(dp, &ptable_list[type]);
} return ptdesc_address(PD_PTDESC(dp)) + off;
}
if (PD_MARKBITS(dp) == ptable_mask(type)) { /* all tables in ptdesc are free, free ptdesc */
list_del(dp);
mmu_page_dtor((void *)pt_addr);
pagetable_dtor_free(virt_to_ptdesc((void *)pt_addr)); return 1;
} elseif (ptable_list[type].next != dp) { /* * move this descriptor to the front of the list, since * it has one or more free tables.
*/
list_move(dp, &ptable_list[type]);
} return 0;
}
/* size of memory already mapped in head.S */ extern __initdata unsignedlong m68k_init_mapped_size;
if (PAGE_ALIGNED(last_pte_table)) {
pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!pte_table) {
panic("%s: Failed to allocate %lu bytes align=%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
}
clear_page(pte_table);
mmu_page_ctor(pte_table);
last_pte_table = pte_table;
}
last_pte_table += PTRS_PER_PTE;
return pte_table;
}
static pmd_t *last_pmd_table __initdata = NULL;
static pmd_t * __init kernel_ptr_table(void)
{ if (!last_pmd_table) { unsignedlong pmd, last; int i;
/* Find the last ptr table that was used in head.S and * reuse the remaining space in that page for further * ptr tables.
*/
last = (unsignedlong)kernel_pg_dir; for (i = 0; i < PTRS_PER_PGD; i++) {
pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
if (!pud_present(*pud)) continue;
pmd = pgd_page_vaddr(kernel_pg_dir[i]); if (pmd > last)
last = pmd;
}
/* Fix the cache mode in the page descriptors for the 680[46]0. */ if (CPU_IS_040_OR_060) { int i; #ifndef mm_cachebits
mm_cachebits = _PAGE_CACHE040; #endif for (i = 0; i < 16; i++)
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
min_addr = m68k_memory[0].addr;
max_addr = min_addr + m68k_memory[0].size - 1;
memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
MEMBLOCK_NONE); for (i = 1; i < m68k_num_memory;) { if (m68k_memory[i].addr < min_addr) {
printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
m68k_memory[i].addr, m68k_memory[i].size);
printk("Fix your bootloader or use a memfile to make use of this area!\n");
m68k_num_memory--;
memmove(m68k_memory + i, m68k_memory + i + 1,
(m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue;
}
memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
MEMBLOCK_NONE);
addr = m68k_memory[i].addr + m68k_memory[i].size - 1; if (addr > max_addr)
max_addr = addr;
i++;
}
m68k_memoffset = min_addr - PAGE_OFFSET;
m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
/* Reserve kernel text/data/bss and the memory allocated in head.S */
memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
/* * Map the physical memory available into the kernel virtual * address space. Make sure memblock will not try to allocate * pages beyond the memory we already mapped in head.S
*/
memblock_set_bottom_up(true);
for (i = 0; i < m68k_num_memory; i++) {
m68k_setup_node(i);
map_node(i);
}
flush_tlb_all();
early_memtest(min_addr, max_addr);
/* * initialize the bad page table and bad page to point * to a couple of allocated pages
*/
empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
/* * Set up SFC/DFC registers
*/
set_fc(USER_DATA);
#ifdef DEBUG
printk ("before free_area_init\n"); #endif for (i = 0; i < m68k_num_memory; i++) if (node_present_pages(i))
node_set_state(i, N_NORMAL_MEMORY);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.