1 /* 2 * linux/arch/m68k/motorola.c 3 * 4 * Routines specific to the Motorola MMU, originally from: 5 * linux/arch/m68k/init.c 6 * which are Copyright (C) 1995 Hamish Macdonald 7 * 8 * Moved 8/20/1999 Sam Creasey 9 */ 10 11 #include <linux/module.h> 12 #include <linux/signal.h> 13 #include <linux/sched.h> 14 #include <linux/mm.h> 15 #include <linux/swap.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 #include <linux/init.h> 20 #include <linux/bootmem.h> 21 22 #include <asm/setup.h> 23 #include <asm/uaccess.h> 24 #include <asm/page.h> 25 #include <asm/pgalloc.h> 26 #include <asm/system.h> 27 #include <asm/machdep.h> 28 #include <asm/io.h> 29 #include <asm/dma.h> 30 #ifdef CONFIG_ATARI 31 #include <asm/atari_stram.h> 32 #endif 33 34 #undef DEBUG 35 36 #ifndef mm_cachebits 37 /* 38 * Bits to add to page descriptors for "normal" caching mode. 39 * For 68020/030 this is 0. 40 * For 68040, this is _PAGE_CACHE040 (cachable, copyback) 41 */ 42 unsigned long mm_cachebits; 43 EXPORT_SYMBOL(mm_cachebits); 44 #endif 45 46 static pte_t * __init kernel_page_table(void) 47 { 48 pte_t *ptablep; 49 50 ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 51 52 clear_page(ptablep); 53 __flush_page_to_ram(ptablep); 54 flush_tlb_kernel_page(ptablep); 55 nocache_page(ptablep); 56 57 return ptablep; 58 } 59 60 static pmd_t *last_pgtable __initdata = NULL; 61 pmd_t *zero_pgtable __initdata = NULL; 62 63 static pmd_t * __init kernel_ptr_table(void) 64 { 65 if (!last_pgtable) { 66 unsigned long pmd, last; 67 int i; 68 69 /* Find the last ptr table that was used in head.S and 70 * reuse the remaining space in that page for further 71 * ptr tables. 72 */ 73 last = (unsigned long)kernel_pg_dir; 74 for (i = 0; i < PTRS_PER_PGD; i++) { 75 if (!pgd_present(kernel_pg_dir[i])) 76 continue; 77 pmd = __pgd_page(kernel_pg_dir[i]); 78 if (pmd > last) 79 last = pmd; 80 } 81 82 last_pgtable = (pmd_t *)last; 83 #ifdef DEBUG 84 printk("kernel_ptr_init: %p\n", last_pgtable); 85 #endif 86 } 87 88 last_pgtable += PTRS_PER_PMD; 89 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { 90 last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 91 92 clear_page(last_pgtable); 93 __flush_page_to_ram(last_pgtable); 94 flush_tlb_kernel_page(last_pgtable); 95 nocache_page(last_pgtable); 96 } 97 98 return last_pgtable; 99 } 100 101 static unsigned long __init 102 map_chunk (unsigned long addr, long size) 103 { 104 #define PTRTREESIZE (256*1024) 105 #define ROOTTREESIZE (32*1024*1024) 106 static unsigned long virtaddr = PAGE_OFFSET; 107 unsigned long physaddr; 108 pgd_t *pgd_dir; 109 pmd_t *pmd_dir; 110 pte_t *pte_dir; 111 112 physaddr = (addr | m68k_supervisor_cachemode | 113 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); 114 if (CPU_IS_040_OR_060) 115 physaddr |= _PAGE_GLOBAL040; 116 117 while (size > 0) { 118 #ifdef DEBUG 119 if (!(virtaddr & (PTRTREESIZE-1))) 120 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, 121 virtaddr); 122 #endif 123 pgd_dir = pgd_offset_k(virtaddr); 124 if (virtaddr && CPU_IS_020_OR_030) { 125 if (!(virtaddr & (ROOTTREESIZE-1)) && 126 size >= ROOTTREESIZE) { 127 #ifdef DEBUG 128 printk ("[very early term]"); 129 #endif 130 pgd_val(*pgd_dir) = physaddr; 131 size -= ROOTTREESIZE; 132 virtaddr += ROOTTREESIZE; 133 physaddr += ROOTTREESIZE; 134 continue; 135 } 136 } 137 if (!pgd_present(*pgd_dir)) { 138 pmd_dir = kernel_ptr_table(); 139 #ifdef DEBUG 140 printk ("[new pointer %p]", pmd_dir); 141 #endif 142 pgd_set(pgd_dir, pmd_dir); 143 } else 144 pmd_dir = pmd_offset(pgd_dir, virtaddr); 145 146 if (CPU_IS_020_OR_030) { 147 if (virtaddr) { 148 #ifdef DEBUG 149 printk ("[early term]"); 150 #endif 151 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; 152 physaddr += PTRTREESIZE; 153 } else { 154 int i; 155 #ifdef DEBUG 156 printk ("[zero map]"); 157 #endif 158 zero_pgtable = kernel_ptr_table(); 159 pte_dir = (pte_t *)zero_pgtable; 160 pmd_dir->pmd[0] = virt_to_phys(pte_dir) | 161 _PAGE_TABLE | _PAGE_ACCESSED; 162 pte_val(*pte_dir++) = 0; 163 physaddr += PAGE_SIZE; 164 for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) 165 pte_val(*pte_dir++) = physaddr; 166 } 167 size -= PTRTREESIZE; 168 virtaddr += PTRTREESIZE; 169 } else { 170 if (!pmd_present(*pmd_dir)) { 171 #ifdef DEBUG 172 printk ("[new table]"); 173 #endif 174 pte_dir = kernel_page_table(); 175 pmd_set(pmd_dir, pte_dir); 176 } 177 pte_dir = pte_offset_kernel(pmd_dir, virtaddr); 178 179 if (virtaddr) { 180 if (!pte_present(*pte_dir)) 181 pte_val(*pte_dir) = physaddr; 182 } else 183 pte_val(*pte_dir) = 0; 184 size -= PAGE_SIZE; 185 virtaddr += PAGE_SIZE; 186 physaddr += PAGE_SIZE; 187 } 188 189 } 190 #ifdef DEBUG 191 printk("\n"); 192 #endif 193 194 return virtaddr; 195 } 196 197 /* 198 * paging_init() continues the virtual memory environment setup which 199 * was begun by the code in arch/head.S. 200 */ 201 void __init paging_init(void) 202 { 203 int chunk; 204 unsigned long mem_avail = 0; 205 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 206 207 #ifdef DEBUG 208 { 209 extern unsigned long availmem; 210 printk ("start of paging_init (%p, %lx, %lx, %lx)\n", 211 kernel_pg_dir, availmem, start_mem, end_mem); 212 } 213 #endif 214 215 /* Fix the cache mode in the page descriptors for the 680[46]0. */ 216 if (CPU_IS_040_OR_060) { 217 int i; 218 #ifndef mm_cachebits 219 mm_cachebits = _PAGE_CACHE040; 220 #endif 221 for (i = 0; i < 16; i++) 222 pgprot_val(protection_map[i]) |= _PAGE_CACHE040; 223 } 224 225 /* 226 * Map the physical memory available into the kernel virtual 227 * address space. It may allocate some memory for page 228 * tables and thus modify availmem. 229 */ 230 231 for (chunk = 0; chunk < m68k_num_memory; chunk++) { 232 mem_avail = map_chunk (m68k_memory[chunk].addr, 233 m68k_memory[chunk].size); 234 235 } 236 237 flush_tlb_all(); 238 #ifdef DEBUG 239 printk ("memory available is %ldKB\n", mem_avail >> 10); 240 printk ("start_mem is %#lx\nvirtual_end is %#lx\n", 241 start_mem, end_mem); 242 #endif 243 244 /* 245 * initialize the bad page table and bad page to point 246 * to a couple of allocated pages 247 */ 248 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 249 memset(empty_zero_page, 0, PAGE_SIZE); 250 251 /* 252 * Set up SFC/DFC registers 253 */ 254 set_fs(KERNEL_DS); 255 256 #ifdef DEBUG 257 printk ("before free_area_init\n"); 258 #endif 259 zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ? 260 (mach_max_dma_address+1) : (unsigned long)high_memory); 261 zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0]; 262 263 zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT; 264 zones_size[ZONE_NORMAL] >>= PAGE_SHIFT; 265 266 free_area_init(zones_size); 267 } 268 269 extern char __init_begin, __init_end; 270 271 void free_initmem(void) 272 { 273 unsigned long addr; 274 275 addr = (unsigned long)&__init_begin; 276 for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { 277 virt_to_page(addr)->flags &= ~(1 << PG_reserved); 278 init_page_count(virt_to_page(addr)); 279 free_page(addr); 280 totalram_pages++; 281 } 282 } 283 284 285