1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 10 */ 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/signal.h> 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/errno.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 #include <linux/pagemap.h> 20 #include <linux/ptrace.h> 21 #include <linux/mman.h> 22 #include <linux/mm.h> 23 #include <linux/bootmem.h> 24 #include <linux/highmem.h> 25 #include <linux/swap.h> 26 #include <linux/proc_fs.h> 27 #include <linux/pfn.h> 28 29 #include <asm/bootinfo.h> 30 #include <asm/cachectl.h> 31 #include <asm/cpu.h> 32 #include <asm/dma.h> 33 #include <asm/kmap_types.h> 34 #include <asm/mmu_context.h> 35 #include <asm/sections.h> 36 #include <asm/pgtable.h> 37 #include <asm/pgalloc.h> 38 #include <asm/tlb.h> 39 #include <asm/fixmap.h> 40 41 /* Atomicity and interruptability */ 42 #ifdef CONFIG_MIPS_MT_SMTC 43 44 #include <asm/mipsmtregs.h> 45 46 #define ENTER_CRITICAL(flags) \ 47 { \ 48 unsigned int mvpflags; \ 49 local_irq_save(flags);\ 50 mvpflags = dvpe() 51 #define EXIT_CRITICAL(flags) \ 52 evpe(mvpflags); \ 53 local_irq_restore(flags); \ 54 } 55 #else 56 57 #define ENTER_CRITICAL(flags) local_irq_save(flags) 58 #define EXIT_CRITICAL(flags) local_irq_restore(flags) 59 60 #endif /* CONFIG_MIPS_MT_SMTC */ 61 62 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 63 64 unsigned long highstart_pfn, highend_pfn; 65 66 /* 67 * We have up to 8 empty zeroed pages so we can map one of the right colour 68 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 69 * where we have to avoid VCED / VECI exceptions for good performance at 70 * any price. Since page is never written to after the initialization we 71 * don't have to care about aliases on other CPUs. 72 */ 73 unsigned long empty_zero_page, zero_page_mask; 74 75 /* 76 * Not static inline because used by IP27 special magic initialization code 77 */ 78 unsigned long setup_zero_pages(void) 79 { 80 unsigned int order; 81 unsigned long size; 82 struct page *page; 83 84 if (cpu_has_vce) 85 order = 3; 86 else 87 order = 0; 88 89 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 90 if (!empty_zero_page) 91 panic("Oh boy, that early out of memory?"); 92 93 page = virt_to_page((void *)empty_zero_page); 94 split_page(page, order); 95 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { 96 SetPageReserved(page); 97 page++; 98 } 99 100 size = PAGE_SIZE << order; 101 zero_page_mask = (size - 1) & PAGE_MASK; 102 103 return 1UL << order; 104 } 105 106 /* 107 * These are almost like kmap_atomic / kunmap_atmic except they take an 108 * additional address argument as the hint. 109 */ 110 111 #define kmap_get_fixmap_pte(vaddr) \ 112 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 113 114 #ifdef CONFIG_MIPS_MT_SMTC 115 static pte_t *kmap_coherent_pte; 116 static void __init kmap_coherent_init(void) 117 { 118 unsigned long vaddr; 119 120 /* cache the first coherent kmap pte */ 121 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 122 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 123 } 124 #else 125 static inline void kmap_coherent_init(void) {} 126 #endif 127 128 static inline void *kmap_coherent(struct page *page, unsigned long addr) 129 { 130 enum fixed_addresses idx; 131 unsigned long vaddr, flags, entrylo; 132 unsigned long old_ctx; 133 pte_t pte; 134 int tlbidx; 135 136 inc_preempt_count(); 137 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 138 #ifdef CONFIG_MIPS_MT_SMTC 139 idx += FIX_N_COLOURS * smp_processor_id(); 140 #endif 141 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 142 pte = mk_pte(page, PAGE_KERNEL); 143 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 144 entrylo = pte.pte_high; 145 #else 146 entrylo = pte_val(pte) >> 6; 147 #endif 148 149 ENTER_CRITICAL(flags); 150 old_ctx = read_c0_entryhi(); 151 write_c0_entryhi(vaddr & (PAGE_MASK << 1)); 152 write_c0_entrylo0(entrylo); 153 write_c0_entrylo1(entrylo); 154 #ifdef CONFIG_MIPS_MT_SMTC 155 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); 156 /* preload TLB instead of local_flush_tlb_one() */ 157 mtc0_tlbw_hazard(); 158 tlb_probe(); 159 tlb_probe_hazard(); 160 tlbidx = read_c0_index(); 161 mtc0_tlbw_hazard(); 162 if (tlbidx < 0) 163 tlb_write_random(); 164 else 165 tlb_write_indexed(); 166 #else 167 tlbidx = read_c0_wired(); 168 write_c0_wired(tlbidx + 1); 169 write_c0_index(tlbidx); 170 mtc0_tlbw_hazard(); 171 tlb_write_indexed(); 172 #endif 173 tlbw_use_hazard(); 174 write_c0_entryhi(old_ctx); 175 EXIT_CRITICAL(flags); 176 177 return (void*) vaddr; 178 } 179 180 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) 181 182 static inline void kunmap_coherent(struct page *page) 183 { 184 #ifndef CONFIG_MIPS_MT_SMTC 185 unsigned int wired; 186 unsigned long flags, old_ctx; 187 188 ENTER_CRITICAL(flags); 189 old_ctx = read_c0_entryhi(); 190 wired = read_c0_wired() - 1; 191 write_c0_wired(wired); 192 write_c0_index(wired); 193 write_c0_entryhi(UNIQUE_ENTRYHI(wired)); 194 write_c0_entrylo0(0); 195 write_c0_entrylo1(0); 196 mtc0_tlbw_hazard(); 197 tlb_write_indexed(); 198 tlbw_use_hazard(); 199 write_c0_entryhi(old_ctx); 200 EXIT_CRITICAL(flags); 201 #endif 202 dec_preempt_count(); 203 preempt_check_resched(); 204 } 205 206 void copy_user_highpage(struct page *to, struct page *from, 207 unsigned long vaddr, struct vm_area_struct *vma) 208 { 209 void *vfrom, *vto; 210 211 vto = kmap_atomic(to, KM_USER1); 212 if (cpu_has_dc_aliases) { 213 vfrom = kmap_coherent(from, vaddr); 214 copy_page(vto, vfrom); 215 kunmap_coherent(from); 216 } else { 217 vfrom = kmap_atomic(from, KM_USER0); 218 copy_page(vto, vfrom); 219 kunmap_atomic(vfrom, KM_USER0); 220 } 221 if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) || 222 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 223 flush_data_cache_page((unsigned long)vto); 224 kunmap_atomic(vto, KM_USER1); 225 /* Make sure this page is cleared on other CPU's too before using it */ 226 smp_wmb(); 227 } 228 229 EXPORT_SYMBOL(copy_user_highpage); 230 231 void copy_to_user_page(struct vm_area_struct *vma, 232 struct page *page, unsigned long vaddr, void *dst, const void *src, 233 unsigned long len) 234 { 235 if (cpu_has_dc_aliases) { 236 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 237 memcpy(vto, src, len); 238 kunmap_coherent(page); 239 } else 240 memcpy(dst, src, len); 241 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) 242 flush_cache_page(vma, vaddr, page_to_pfn(page)); 243 } 244 245 EXPORT_SYMBOL(copy_to_user_page); 246 247 void copy_from_user_page(struct vm_area_struct *vma, 248 struct page *page, unsigned long vaddr, void *dst, const void *src, 249 unsigned long len) 250 { 251 if (cpu_has_dc_aliases) { 252 void *vfrom = 253 kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 254 memcpy(dst, vfrom, len); 255 kunmap_coherent(page); 256 } else 257 memcpy(dst, src, len); 258 } 259 260 EXPORT_SYMBOL(copy_from_user_page); 261 262 263 #ifdef CONFIG_HIGHMEM 264 pte_t *kmap_pte; 265 pgprot_t kmap_prot; 266 267 static void __init kmap_init(void) 268 { 269 unsigned long kmap_vstart; 270 271 /* cache the first kmap pte */ 272 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 273 kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 274 275 kmap_prot = PAGE_KERNEL; 276 } 277 #endif /* CONFIG_HIGHMEM */ 278 279 void __init fixrange_init(unsigned long start, unsigned long end, 280 pgd_t *pgd_base) 281 { 282 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) 283 pgd_t *pgd; 284 pud_t *pud; 285 pmd_t *pmd; 286 pte_t *pte; 287 int i, j, k; 288 unsigned long vaddr; 289 290 vaddr = start; 291 i = __pgd_offset(vaddr); 292 j = __pud_offset(vaddr); 293 k = __pmd_offset(vaddr); 294 pgd = pgd_base + i; 295 296 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 297 pud = (pud_t *)pgd; 298 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 299 pmd = (pmd_t *)pud; 300 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 301 if (pmd_none(*pmd)) { 302 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 303 set_pmd(pmd, __pmd((unsigned long)pte)); 304 if (pte != pte_offset_kernel(pmd, 0)) 305 BUG(); 306 } 307 vaddr += PMD_SIZE; 308 } 309 k = 0; 310 } 311 j = 0; 312 } 313 #endif 314 } 315 316 #ifndef CONFIG_NEED_MULTIPLE_NODES 317 extern void pagetable_init(void); 318 319 static int __init page_is_ram(unsigned long pagenr) 320 { 321 int i; 322 323 for (i = 0; i < boot_mem_map.nr_map; i++) { 324 unsigned long addr, end; 325 326 if (boot_mem_map.map[i].type != BOOT_MEM_RAM) 327 /* not usable memory */ 328 continue; 329 330 addr = PFN_UP(boot_mem_map.map[i].addr); 331 end = PFN_DOWN(boot_mem_map.map[i].addr + 332 boot_mem_map.map[i].size); 333 334 if (pagenr >= addr && pagenr < end) 335 return 1; 336 } 337 338 return 0; 339 } 340 341 void __init paging_init(void) 342 { 343 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 344 #ifndef CONFIG_FLATMEM 345 unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; 346 unsigned long i, j, pfn; 347 #endif 348 349 pagetable_init(); 350 351 #ifdef CONFIG_HIGHMEM 352 kmap_init(); 353 #endif 354 kmap_coherent_init(); 355 356 #ifdef CONFIG_ISA 357 if (max_low_pfn >= MAX_DMA_PFN) 358 if (min_low_pfn >= MAX_DMA_PFN) { 359 zones_size[ZONE_DMA] = 0; 360 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; 361 } else { 362 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn; 363 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN; 364 } 365 else 366 #endif 367 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn; 368 369 #ifdef CONFIG_HIGHMEM 370 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; 371 372 if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) { 373 printk(KERN_WARNING "This processor doesn't support highmem." 374 " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]); 375 zones_size[ZONE_HIGHMEM] = 0; 376 } 377 #endif 378 379 #ifdef CONFIG_FLATMEM 380 free_area_init(zones_size); 381 #else 382 pfn = 0; 383 for (i = 0; i < MAX_NR_ZONES; i++) 384 for (j = 0; j < zones_size[i]; j++, pfn++) 385 if (!page_is_ram(pfn)) 386 zholes_size[i]++; 387 free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size); 388 #endif 389 } 390 391 static struct kcore_list kcore_mem, kcore_vmalloc; 392 #ifdef CONFIG_64BIT 393 static struct kcore_list kcore_kseg0; 394 #endif 395 396 void __init mem_init(void) 397 { 398 unsigned long codesize, reservedpages, datasize, initsize; 399 unsigned long tmp, ram; 400 401 #ifdef CONFIG_HIGHMEM 402 #ifdef CONFIG_DISCONTIGMEM 403 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" 404 #endif 405 max_mapnr = highend_pfn; 406 #else 407 max_mapnr = max_low_pfn; 408 #endif 409 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 410 411 totalram_pages += free_all_bootmem(); 412 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 413 414 reservedpages = ram = 0; 415 for (tmp = 0; tmp < max_low_pfn; tmp++) 416 if (page_is_ram(tmp)) { 417 ram++; 418 if (PageReserved(pfn_to_page(tmp))) 419 reservedpages++; 420 } 421 num_physpages = ram; 422 423 #ifdef CONFIG_HIGHMEM 424 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 425 struct page *page = mem_map + tmp; 426 427 if (!page_is_ram(tmp)) { 428 SetPageReserved(page); 429 continue; 430 } 431 ClearPageReserved(page); 432 #ifdef CONFIG_LIMITED_DMA 433 set_page_address(page, lowmem_page_address(page)); 434 #endif 435 init_page_count(page); 436 __free_page(page); 437 totalhigh_pages++; 438 } 439 totalram_pages += totalhigh_pages; 440 num_physpages += totalhigh_pages; 441 #endif 442 443 codesize = (unsigned long) &_etext - (unsigned long) &_text; 444 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 445 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 446 447 #ifdef CONFIG_64BIT 448 if ((unsigned long) &_text > (unsigned long) CKSEG0) 449 /* The -4 is a hack so that user tools don't have to handle 450 the overflow. */ 451 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); 452 #endif 453 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 454 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 455 VMALLOC_END-VMALLOC_START); 456 457 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 458 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", 459 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 460 ram << (PAGE_SHIFT-10), 461 codesize >> 10, 462 reservedpages << (PAGE_SHIFT-10), 463 datasize >> 10, 464 initsize >> 10, 465 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); 466 } 467 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 468 469 void free_init_pages(const char *what, unsigned long begin, unsigned long end) 470 { 471 unsigned long pfn; 472 473 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { 474 struct page *page = pfn_to_page(pfn); 475 void *addr = phys_to_virt(PFN_PHYS(pfn)); 476 477 ClearPageReserved(page); 478 init_page_count(page); 479 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); 480 __free_page(page); 481 totalram_pages++; 482 } 483 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 484 } 485 486 #ifdef CONFIG_BLK_DEV_INITRD 487 void free_initrd_mem(unsigned long start, unsigned long end) 488 { 489 free_init_pages("initrd memory", 490 virt_to_phys((void *)start), 491 virt_to_phys((void *)end)); 492 } 493 #endif 494 495 void free_initmem(void) 496 { 497 prom_free_prom_memory(); 498 free_init_pages("unused kernel memory", 499 __pa_symbol(&__init_begin), 500 __pa_symbol(&__init_end)); 501 } 502 503 unsigned long pgd_current[NR_CPUS]; 504 /* 505 * On 64-bit we've got three-level pagetables with a slightly 506 * different layout ... 507 */ 508 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) 509 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); 510 #ifdef CONFIG_64BIT 511 #ifdef MODULE_START 512 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); 513 #endif 514 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); 515 #endif 516 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); 517