1 /* 2 * arch/sparc64/mm/init.c 3 * 4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/initrd.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/poison.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/kprobes.h> 23 #include <linux/cache.h> 24 #include <linux/sort.h> 25 #include <linux/percpu.h> 26 #include <linux/memblock.h> 27 #include <linux/mmzone.h> 28 #include <linux/gfp.h> 29 30 #include <asm/head.h> 31 #include <asm/system.h> 32 #include <asm/page.h> 33 #include <asm/pgalloc.h> 34 #include <asm/pgtable.h> 35 #include <asm/oplib.h> 36 #include <asm/iommu.h> 37 #include <asm/io.h> 38 #include <asm/uaccess.h> 39 #include <asm/mmu_context.h> 40 #include <asm/tlbflush.h> 41 #include <asm/dma.h> 42 #include <asm/starfire.h> 43 #include <asm/tlb.h> 44 #include <asm/spitfire.h> 45 #include <asm/sections.h> 46 #include <asm/tsb.h> 47 #include <asm/hypervisor.h> 48 #include <asm/prom.h> 49 #include <asm/mdesc.h> 50 #include <asm/cpudata.h> 51 #include <asm/irq.h> 52 53 #include "init_64.h" 54 55 unsigned long kern_linear_pte_xor[2] __read_mostly; 56 57 /* A bitmap, one bit for every 256MB of physical memory. If the bit 58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else 59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]). 60 */ 61 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 62 63 #ifndef CONFIG_DEBUG_PAGEALLOC 64 /* A special kernel TSB for 4MB and 256MB linear mappings. 65 * Space is allocated for this right after the trap table 66 * in arch/sparc64/kernel/head.S 67 */ 68 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 69 #endif 70 71 #define MAX_BANKS 32 72 73 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata; 74 static int pavail_ents __devinitdata; 75 76 static int cmp_p64(const void *a, const void *b) 77 { 78 const struct linux_prom64_registers *x = a, *y = b; 79 80 if (x->phys_addr > y->phys_addr) 81 return 1; 82 if (x->phys_addr < y->phys_addr) 83 return -1; 84 return 0; 85 } 86 87 static void __init read_obp_memory(const char *property, 88 struct linux_prom64_registers *regs, 89 int *num_ents) 90 { 91 phandle node = prom_finddevice("/memory"); 92 int prop_size = prom_getproplen(node, property); 93 int ents, ret, i; 94 95 ents = prop_size / sizeof(struct linux_prom64_registers); 96 if (ents > MAX_BANKS) { 97 prom_printf("The machine has more %s property entries than " 98 "this kernel can support (%d).\n", 99 property, MAX_BANKS); 100 prom_halt(); 101 } 102 103 ret = prom_getproperty(node, property, (char *) regs, prop_size); 104 if (ret == -1) { 105 prom_printf("Couldn't get %s property from /memory.\n"); 106 prom_halt(); 107 } 108 109 /* Sanitize what we got from the firmware, by page aligning 110 * everything. 111 */ 112 for (i = 0; i < ents; i++) { 113 unsigned long base, size; 114 115 base = regs[i].phys_addr; 116 size = regs[i].reg_size; 117 118 size &= PAGE_MASK; 119 if (base & ~PAGE_MASK) { 120 unsigned long new_base = PAGE_ALIGN(base); 121 122 size -= new_base - base; 123 if ((long) size < 0L) 124 size = 0UL; 125 base = new_base; 126 } 127 if (size == 0UL) { 128 /* If it is empty, simply get rid of it. 129 * This simplifies the logic of the other 130 * functions that process these arrays. 131 */ 132 memmove(®s[i], ®s[i + 1], 133 (ents - i - 1) * sizeof(regs[0])); 134 i--; 135 ents--; 136 continue; 137 } 138 regs[i].phys_addr = base; 139 regs[i].reg_size = size; 140 } 141 142 *num_ents = ents; 143 144 sort(regs, ents, sizeof(struct linux_prom64_registers), 145 cmp_p64, NULL); 146 } 147 148 unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / 149 sizeof(unsigned long)]; 150 EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 151 152 /* Kernel physical address base and size in bytes. */ 153 unsigned long kern_base __read_mostly; 154 unsigned long kern_size __read_mostly; 155 156 /* Initial ramdisk setup */ 157 extern unsigned long sparc_ramdisk_image64; 158 extern unsigned int sparc_ramdisk_image; 159 extern unsigned int sparc_ramdisk_size; 160 161 struct page *mem_map_zero __read_mostly; 162 EXPORT_SYMBOL(mem_map_zero); 163 164 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; 165 166 unsigned long sparc64_kern_pri_context __read_mostly; 167 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 168 unsigned long sparc64_kern_sec_context __read_mostly; 169 170 int num_kernel_image_mappings; 171 172 #ifdef CONFIG_DEBUG_DCFLUSH 173 atomic_t dcpage_flushes = ATOMIC_INIT(0); 174 #ifdef CONFIG_SMP 175 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); 176 #endif 177 #endif 178 179 inline void flush_dcache_page_impl(struct page *page) 180 { 181 BUG_ON(tlb_type == hypervisor); 182 #ifdef CONFIG_DEBUG_DCFLUSH 183 atomic_inc(&dcpage_flushes); 184 #endif 185 186 #ifdef DCACHE_ALIASING_POSSIBLE 187 __flush_dcache_page(page_address(page), 188 ((tlb_type == spitfire) && 189 page_mapping(page) != NULL)); 190 #else 191 if (page_mapping(page) != NULL && 192 tlb_type == spitfire) 193 __flush_icache_page(__pa(page_address(page))); 194 #endif 195 } 196 197 #define PG_dcache_dirty PG_arch_1 198 #define PG_dcache_cpu_shift 32UL 199 #define PG_dcache_cpu_mask \ 200 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 201 202 #define dcache_dirty_cpu(page) \ 203 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 204 205 static inline void set_dcache_dirty(struct page *page, int this_cpu) 206 { 207 unsigned long mask = this_cpu; 208 unsigned long non_cpu_bits; 209 210 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); 211 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); 212 213 __asm__ __volatile__("1:\n\t" 214 "ldx [%2], %%g7\n\t" 215 "and %%g7, %1, %%g1\n\t" 216 "or %%g1, %0, %%g1\n\t" 217 "casx [%2], %%g7, %%g1\n\t" 218 "cmp %%g7, %%g1\n\t" 219 "bne,pn %%xcc, 1b\n\t" 220 " nop" 221 : /* no outputs */ 222 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) 223 : "g1", "g7"); 224 } 225 226 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) 227 { 228 unsigned long mask = (1UL << PG_dcache_dirty); 229 230 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 231 "1:\n\t" 232 "ldx [%2], %%g7\n\t" 233 "srlx %%g7, %4, %%g1\n\t" 234 "and %%g1, %3, %%g1\n\t" 235 "cmp %%g1, %0\n\t" 236 "bne,pn %%icc, 2f\n\t" 237 " andn %%g7, %1, %%g1\n\t" 238 "casx [%2], %%g7, %%g1\n\t" 239 "cmp %%g7, %%g1\n\t" 240 "bne,pn %%xcc, 1b\n\t" 241 " nop\n" 242 "2:" 243 : /* no outputs */ 244 : "r" (cpu), "r" (mask), "r" (&page->flags), 245 "i" (PG_dcache_cpu_mask), 246 "i" (PG_dcache_cpu_shift) 247 : "g1", "g7"); 248 } 249 250 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) 251 { 252 unsigned long tsb_addr = (unsigned long) ent; 253 254 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 255 tsb_addr = __pa(tsb_addr); 256 257 __tsb_insert(tsb_addr, tag, pte); 258 } 259 260 unsigned long _PAGE_ALL_SZ_BITS __read_mostly; 261 unsigned long _PAGE_SZBITS __read_mostly; 262 263 static void flush_dcache(unsigned long pfn) 264 { 265 struct page *page; 266 267 page = pfn_to_page(pfn); 268 if (page) { 269 unsigned long pg_flags; 270 271 pg_flags = page->flags; 272 if (pg_flags & (1UL << PG_dcache_dirty)) { 273 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 274 PG_dcache_cpu_mask); 275 int this_cpu = get_cpu(); 276 277 /* This is just to optimize away some function calls 278 * in the SMP case. 279 */ 280 if (cpu == this_cpu) 281 flush_dcache_page_impl(page); 282 else 283 smp_flush_dcache_page_impl(page, cpu); 284 285 clear_dcache_dirty_cpu(page, cpu); 286 287 put_cpu(); 288 } 289 } 290 } 291 292 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 293 { 294 struct mm_struct *mm; 295 struct tsb *tsb; 296 unsigned long tag, flags; 297 unsigned long tsb_index, tsb_hash_shift; 298 pte_t pte = *ptep; 299 300 if (tlb_type != hypervisor) { 301 unsigned long pfn = pte_pfn(pte); 302 303 if (pfn_valid(pfn)) 304 flush_dcache(pfn); 305 } 306 307 mm = vma->vm_mm; 308 309 tsb_index = MM_TSB_BASE; 310 tsb_hash_shift = PAGE_SHIFT; 311 312 spin_lock_irqsave(&mm->context.lock, flags); 313 314 #ifdef CONFIG_HUGETLB_PAGE 315 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 316 if ((tlb_type == hypervisor && 317 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 318 (tlb_type != hypervisor && 319 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { 320 tsb_index = MM_TSB_HUGE; 321 tsb_hash_shift = HPAGE_SHIFT; 322 } 323 } 324 #endif 325 326 tsb = mm->context.tsb_block[tsb_index].tsb; 327 tsb += ((address >> tsb_hash_shift) & 328 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 329 tag = (address >> 22UL); 330 tsb_insert(tsb, tag, pte_val(pte)); 331 332 spin_unlock_irqrestore(&mm->context.lock, flags); 333 } 334 335 void flush_dcache_page(struct page *page) 336 { 337 struct address_space *mapping; 338 int this_cpu; 339 340 if (tlb_type == hypervisor) 341 return; 342 343 /* Do not bother with the expensive D-cache flush if it 344 * is merely the zero page. The 'bigcore' testcase in GDB 345 * causes this case to run millions of times. 346 */ 347 if (page == ZERO_PAGE(0)) 348 return; 349 350 this_cpu = get_cpu(); 351 352 mapping = page_mapping(page); 353 if (mapping && !mapping_mapped(mapping)) { 354 int dirty = test_bit(PG_dcache_dirty, &page->flags); 355 if (dirty) { 356 int dirty_cpu = dcache_dirty_cpu(page); 357 358 if (dirty_cpu == this_cpu) 359 goto out; 360 smp_flush_dcache_page_impl(page, dirty_cpu); 361 } 362 set_dcache_dirty(page, this_cpu); 363 } else { 364 /* We could delay the flush for the !page_mapping 365 * case too. But that case is for exec env/arg 366 * pages and those are %99 certainly going to get 367 * faulted into the tlb (and thus flushed) anyways. 368 */ 369 flush_dcache_page_impl(page); 370 } 371 372 out: 373 put_cpu(); 374 } 375 EXPORT_SYMBOL(flush_dcache_page); 376 377 void __kprobes flush_icache_range(unsigned long start, unsigned long end) 378 { 379 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 380 if (tlb_type == spitfire) { 381 unsigned long kaddr; 382 383 /* This code only runs on Spitfire cpus so this is 384 * why we can assume _PAGE_PADDR_4U. 385 */ 386 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { 387 unsigned long paddr, mask = _PAGE_PADDR_4U; 388 389 if (kaddr >= PAGE_OFFSET) 390 paddr = kaddr & mask; 391 else { 392 pgd_t *pgdp = pgd_offset_k(kaddr); 393 pud_t *pudp = pud_offset(pgdp, kaddr); 394 pmd_t *pmdp = pmd_offset(pudp, kaddr); 395 pte_t *ptep = pte_offset_kernel(pmdp, kaddr); 396 397 paddr = pte_val(*ptep) & mask; 398 } 399 __flush_icache_page(paddr); 400 } 401 } 402 } 403 EXPORT_SYMBOL(flush_icache_range); 404 405 void mmu_info(struct seq_file *m) 406 { 407 if (tlb_type == cheetah) 408 seq_printf(m, "MMU Type\t: Cheetah\n"); 409 else if (tlb_type == cheetah_plus) 410 seq_printf(m, "MMU Type\t: Cheetah+\n"); 411 else if (tlb_type == spitfire) 412 seq_printf(m, "MMU Type\t: Spitfire\n"); 413 else if (tlb_type == hypervisor) 414 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); 415 else 416 seq_printf(m, "MMU Type\t: ???\n"); 417 418 #ifdef CONFIG_DEBUG_DCFLUSH 419 seq_printf(m, "DCPageFlushes\t: %d\n", 420 atomic_read(&dcpage_flushes)); 421 #ifdef CONFIG_SMP 422 seq_printf(m, "DCPageFlushesXC\t: %d\n", 423 atomic_read(&dcpage_flushes_xcall)); 424 #endif /* CONFIG_SMP */ 425 #endif /* CONFIG_DEBUG_DCFLUSH */ 426 } 427 428 struct linux_prom_translation prom_trans[512] __read_mostly; 429 unsigned int prom_trans_ents __read_mostly; 430 431 unsigned long kern_locked_tte_data; 432 433 /* The obp translations are saved based on 8k pagesize, since obp can 434 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 435 * HI_OBP_ADDRESS range are handled in ktlb.S. 436 */ 437 static inline int in_obp_range(unsigned long vaddr) 438 { 439 return (vaddr >= LOW_OBP_ADDRESS && 440 vaddr < HI_OBP_ADDRESS); 441 } 442 443 static int cmp_ptrans(const void *a, const void *b) 444 { 445 const struct linux_prom_translation *x = a, *y = b; 446 447 if (x->virt > y->virt) 448 return 1; 449 if (x->virt < y->virt) 450 return -1; 451 return 0; 452 } 453 454 /* Read OBP translations property into 'prom_trans[]'. */ 455 static void __init read_obp_translations(void) 456 { 457 int n, node, ents, first, last, i; 458 459 node = prom_finddevice("/virtual-memory"); 460 n = prom_getproplen(node, "translations"); 461 if (unlikely(n == 0 || n == -1)) { 462 prom_printf("prom_mappings: Couldn't get size.\n"); 463 prom_halt(); 464 } 465 if (unlikely(n > sizeof(prom_trans))) { 466 prom_printf("prom_mappings: Size %Zd is too big.\n", n); 467 prom_halt(); 468 } 469 470 if ((n = prom_getproperty(node, "translations", 471 (char *)&prom_trans[0], 472 sizeof(prom_trans))) == -1) { 473 prom_printf("prom_mappings: Couldn't get property.\n"); 474 prom_halt(); 475 } 476 477 n = n / sizeof(struct linux_prom_translation); 478 479 ents = n; 480 481 sort(prom_trans, ents, sizeof(struct linux_prom_translation), 482 cmp_ptrans, NULL); 483 484 /* Now kick out all the non-OBP entries. */ 485 for (i = 0; i < ents; i++) { 486 if (in_obp_range(prom_trans[i].virt)) 487 break; 488 } 489 first = i; 490 for (; i < ents; i++) { 491 if (!in_obp_range(prom_trans[i].virt)) 492 break; 493 } 494 last = i; 495 496 for (i = 0; i < (last - first); i++) { 497 struct linux_prom_translation *src = &prom_trans[i + first]; 498 struct linux_prom_translation *dest = &prom_trans[i]; 499 500 *dest = *src; 501 } 502 for (; i < ents; i++) { 503 struct linux_prom_translation *dest = &prom_trans[i]; 504 dest->virt = dest->size = dest->data = 0x0UL; 505 } 506 507 prom_trans_ents = last - first; 508 509 if (tlb_type == spitfire) { 510 /* Clear diag TTE bits. */ 511 for (i = 0; i < prom_trans_ents; i++) 512 prom_trans[i].data &= ~0x0003fe0000000000UL; 513 } 514 515 /* Force execute bit on. */ 516 for (i = 0; i < prom_trans_ents; i++) 517 prom_trans[i].data |= (tlb_type == hypervisor ? 518 _PAGE_EXEC_4V : _PAGE_EXEC_4U); 519 } 520 521 static void __init hypervisor_tlb_lock(unsigned long vaddr, 522 unsigned long pte, 523 unsigned long mmu) 524 { 525 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 526 527 if (ret != 0) { 528 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 529 "errors with %lx\n", vaddr, 0, pte, mmu, ret); 530 prom_halt(); 531 } 532 } 533 534 static unsigned long kern_large_tte(unsigned long paddr); 535 536 static void __init remap_kernel(void) 537 { 538 unsigned long phys_page, tte_vaddr, tte_data; 539 int i, tlb_ent = sparc64_highest_locked_tlbent(); 540 541 tte_vaddr = (unsigned long) KERNBASE; 542 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 543 tte_data = kern_large_tte(phys_page); 544 545 kern_locked_tte_data = tte_data; 546 547 /* Now lock us into the TLBs via Hypervisor or OBP. */ 548 if (tlb_type == hypervisor) { 549 for (i = 0; i < num_kernel_image_mappings; i++) { 550 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 551 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 552 tte_vaddr += 0x400000; 553 tte_data += 0x400000; 554 } 555 } else { 556 for (i = 0; i < num_kernel_image_mappings; i++) { 557 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 558 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 559 tte_vaddr += 0x400000; 560 tte_data += 0x400000; 561 } 562 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 563 } 564 if (tlb_type == cheetah_plus) { 565 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 566 CTX_CHEETAH_PLUS_NUC); 567 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; 568 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; 569 } 570 } 571 572 573 static void __init inherit_prom_mappings(void) 574 { 575 /* Now fixup OBP's idea about where we really are mapped. */ 576 printk("Remapping the kernel... "); 577 remap_kernel(); 578 printk("done.\n"); 579 } 580 581 void prom_world(int enter) 582 { 583 if (!enter) 584 set_fs((mm_segment_t) { get_thread_current_ds() }); 585 586 __asm__ __volatile__("flushw"); 587 } 588 589 void __flush_dcache_range(unsigned long start, unsigned long end) 590 { 591 unsigned long va; 592 593 if (tlb_type == spitfire) { 594 int n = 0; 595 596 for (va = start; va < end; va += 32) { 597 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 598 if (++n >= 512) 599 break; 600 } 601 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 602 start = __pa(start); 603 end = __pa(end); 604 for (va = start; va < end; va += 32) 605 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 606 "membar #Sync" 607 : /* no outputs */ 608 : "r" (va), 609 "i" (ASI_DCACHE_INVALIDATE)); 610 } 611 } 612 EXPORT_SYMBOL(__flush_dcache_range); 613 614 /* get_new_mmu_context() uses "cache + 1". */ 615 DEFINE_SPINLOCK(ctx_alloc_lock); 616 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 617 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 618 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 619 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 620 621 /* Caller does TLB context flushing on local CPU if necessary. 622 * The caller also ensures that CTX_VALID(mm->context) is false. 623 * 624 * We must be careful about boundary cases so that we never 625 * let the user have CTX 0 (nucleus) or we ever use a CTX 626 * version of zero (and thus NO_CONTEXT would not be caught 627 * by version mis-match tests in mmu_context.h). 628 * 629 * Always invoked with interrupts disabled. 630 */ 631 void get_new_mmu_context(struct mm_struct *mm) 632 { 633 unsigned long ctx, new_ctx; 634 unsigned long orig_pgsz_bits; 635 unsigned long flags; 636 int new_version; 637 638 spin_lock_irqsave(&ctx_alloc_lock, flags); 639 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 640 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 641 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 642 new_version = 0; 643 if (new_ctx >= (1 << CTX_NR_BITS)) { 644 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 645 if (new_ctx >= ctx) { 646 int i; 647 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 648 CTX_FIRST_VERSION; 649 if (new_ctx == 1) 650 new_ctx = CTX_FIRST_VERSION; 651 652 /* Don't call memset, for 16 entries that's just 653 * plain silly... 654 */ 655 mmu_context_bmap[0] = 3; 656 mmu_context_bmap[1] = 0; 657 mmu_context_bmap[2] = 0; 658 mmu_context_bmap[3] = 0; 659 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 660 mmu_context_bmap[i + 0] = 0; 661 mmu_context_bmap[i + 1] = 0; 662 mmu_context_bmap[i + 2] = 0; 663 mmu_context_bmap[i + 3] = 0; 664 } 665 new_version = 1; 666 goto out; 667 } 668 } 669 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 670 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 671 out: 672 tlb_context_cache = new_ctx; 673 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 674 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 675 676 if (unlikely(new_version)) 677 smp_new_mmu_context_version(); 678 } 679 680 static int numa_enabled = 1; 681 static int numa_debug; 682 683 static int __init early_numa(char *p) 684 { 685 if (!p) 686 return 0; 687 688 if (strstr(p, "off")) 689 numa_enabled = 0; 690 691 if (strstr(p, "debug")) 692 numa_debug = 1; 693 694 return 0; 695 } 696 early_param("numa", early_numa); 697 698 #define numadbg(f, a...) \ 699 do { if (numa_debug) \ 700 printk(KERN_INFO f, ## a); \ 701 } while (0) 702 703 static void __init find_ramdisk(unsigned long phys_base) 704 { 705 #ifdef CONFIG_BLK_DEV_INITRD 706 if (sparc_ramdisk_image || sparc_ramdisk_image64) { 707 unsigned long ramdisk_image; 708 709 /* Older versions of the bootloader only supported a 710 * 32-bit physical address for the ramdisk image 711 * location, stored at sparc_ramdisk_image. Newer 712 * SILO versions set sparc_ramdisk_image to zero and 713 * provide a full 64-bit physical address at 714 * sparc_ramdisk_image64. 715 */ 716 ramdisk_image = sparc_ramdisk_image; 717 if (!ramdisk_image) 718 ramdisk_image = sparc_ramdisk_image64; 719 720 /* Another bootloader quirk. The bootloader normalizes 721 * the physical address to KERNBASE, so we have to 722 * factor that back out and add in the lowest valid 723 * physical page address to get the true physical address. 724 */ 725 ramdisk_image -= KERNBASE; 726 ramdisk_image += phys_base; 727 728 numadbg("Found ramdisk at physical address 0x%lx, size %u\n", 729 ramdisk_image, sparc_ramdisk_size); 730 731 initrd_start = ramdisk_image; 732 initrd_end = ramdisk_image + sparc_ramdisk_size; 733 734 memblock_reserve(initrd_start, sparc_ramdisk_size); 735 736 initrd_start += PAGE_OFFSET; 737 initrd_end += PAGE_OFFSET; 738 } 739 #endif 740 } 741 742 struct node_mem_mask { 743 unsigned long mask; 744 unsigned long val; 745 unsigned long bootmem_paddr; 746 }; 747 static struct node_mem_mask node_masks[MAX_NUMNODES]; 748 static int num_node_masks; 749 750 int numa_cpu_lookup_table[NR_CPUS]; 751 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 752 753 #ifdef CONFIG_NEED_MULTIPLE_NODES 754 755 struct mdesc_mblock { 756 u64 base; 757 u64 size; 758 u64 offset; /* RA-to-PA */ 759 }; 760 static struct mdesc_mblock *mblocks; 761 static int num_mblocks; 762 763 static unsigned long ra_to_pa(unsigned long addr) 764 { 765 int i; 766 767 for (i = 0; i < num_mblocks; i++) { 768 struct mdesc_mblock *m = &mblocks[i]; 769 770 if (addr >= m->base && 771 addr < (m->base + m->size)) { 772 addr += m->offset; 773 break; 774 } 775 } 776 return addr; 777 } 778 779 static int find_node(unsigned long addr) 780 { 781 int i; 782 783 addr = ra_to_pa(addr); 784 for (i = 0; i < num_node_masks; i++) { 785 struct node_mem_mask *p = &node_masks[i]; 786 787 if ((addr & p->mask) == p->val) 788 return i; 789 } 790 return -1; 791 } 792 793 u64 memblock_nid_range(u64 start, u64 end, int *nid) 794 { 795 *nid = find_node(start); 796 start += PAGE_SIZE; 797 while (start < end) { 798 int n = find_node(start); 799 800 if (n != *nid) 801 break; 802 start += PAGE_SIZE; 803 } 804 805 if (start > end) 806 start = end; 807 808 return start; 809 } 810 #else 811 u64 memblock_nid_range(u64 start, u64 end, int *nid) 812 { 813 *nid = 0; 814 return end; 815 } 816 #endif 817 818 /* This must be invoked after performing all of the necessary 819 * add_active_range() calls for 'nid'. We need to be able to get 820 * correct data from get_pfn_range_for_nid(). 821 */ 822 static void __init allocate_node_data(int nid) 823 { 824 unsigned long paddr, num_pages, start_pfn, end_pfn; 825 struct pglist_data *p; 826 827 #ifdef CONFIG_NEED_MULTIPLE_NODES 828 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 829 if (!paddr) { 830 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 831 prom_halt(); 832 } 833 NODE_DATA(nid) = __va(paddr); 834 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 835 836 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 837 #endif 838 839 p = NODE_DATA(nid); 840 841 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 842 p->node_start_pfn = start_pfn; 843 p->node_spanned_pages = end_pfn - start_pfn; 844 845 if (p->node_spanned_pages) { 846 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 847 848 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); 849 if (!paddr) { 850 prom_printf("Cannot allocate bootmap for nid[%d]\n", 851 nid); 852 prom_halt(); 853 } 854 node_masks[nid].bootmem_paddr = paddr; 855 } 856 } 857 858 static void init_node_masks_nonnuma(void) 859 { 860 int i; 861 862 numadbg("Initializing tables for non-numa.\n"); 863 864 node_masks[0].mask = node_masks[0].val = 0; 865 num_node_masks = 1; 866 867 for (i = 0; i < NR_CPUS; i++) 868 numa_cpu_lookup_table[i] = 0; 869 870 cpumask_setall(&numa_cpumask_lookup_table[0]); 871 } 872 873 #ifdef CONFIG_NEED_MULTIPLE_NODES 874 struct pglist_data *node_data[MAX_NUMNODES]; 875 876 EXPORT_SYMBOL(numa_cpu_lookup_table); 877 EXPORT_SYMBOL(numa_cpumask_lookup_table); 878 EXPORT_SYMBOL(node_data); 879 880 struct mdesc_mlgroup { 881 u64 node; 882 u64 latency; 883 u64 match; 884 u64 mask; 885 }; 886 static struct mdesc_mlgroup *mlgroups; 887 static int num_mlgroups; 888 889 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, 890 u32 cfg_handle) 891 { 892 u64 arc; 893 894 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { 895 u64 target = mdesc_arc_target(md, arc); 896 const u64 *val; 897 898 val = mdesc_get_property(md, target, 899 "cfg-handle", NULL); 900 if (val && *val == cfg_handle) 901 return 0; 902 } 903 return -ENODEV; 904 } 905 906 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, 907 u32 cfg_handle) 908 { 909 u64 arc, candidate, best_latency = ~(u64)0; 910 911 candidate = MDESC_NODE_NULL; 912 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 913 u64 target = mdesc_arc_target(md, arc); 914 const char *name = mdesc_node_name(md, target); 915 const u64 *val; 916 917 if (strcmp(name, "pio-latency-group")) 918 continue; 919 920 val = mdesc_get_property(md, target, "latency", NULL); 921 if (!val) 922 continue; 923 924 if (*val < best_latency) { 925 candidate = target; 926 best_latency = *val; 927 } 928 } 929 930 if (candidate == MDESC_NODE_NULL) 931 return -ENODEV; 932 933 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 934 } 935 936 int of_node_to_nid(struct device_node *dp) 937 { 938 const struct linux_prom64_registers *regs; 939 struct mdesc_handle *md; 940 u32 cfg_handle; 941 int count, nid; 942 u64 grp; 943 944 /* This is the right thing to do on currently supported 945 * SUN4U NUMA platforms as well, as the PCI controller does 946 * not sit behind any particular memory controller. 947 */ 948 if (!mlgroups) 949 return -1; 950 951 regs = of_get_property(dp, "reg", NULL); 952 if (!regs) 953 return -1; 954 955 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; 956 957 md = mdesc_grab(); 958 959 count = 0; 960 nid = -1; 961 mdesc_for_each_node_by_name(md, grp, "group") { 962 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 963 nid = count; 964 break; 965 } 966 count++; 967 } 968 969 mdesc_release(md); 970 971 return nid; 972 } 973 974 static void __init add_node_ranges(void) 975 { 976 struct memblock_region *reg; 977 978 for_each_memblock(memory, reg) { 979 unsigned long size = reg->size; 980 unsigned long start, end; 981 982 start = reg->base; 983 end = start + size; 984 while (start < end) { 985 unsigned long this_end; 986 int nid; 987 988 this_end = memblock_nid_range(start, end, &nid); 989 990 numadbg("Adding active range nid[%d] " 991 "start[%lx] end[%lx]\n", 992 nid, start, this_end); 993 994 add_active_range(nid, 995 start >> PAGE_SHIFT, 996 this_end >> PAGE_SHIFT); 997 998 start = this_end; 999 } 1000 } 1001 } 1002 1003 static int __init grab_mlgroups(struct mdesc_handle *md) 1004 { 1005 unsigned long paddr; 1006 int count = 0; 1007 u64 node; 1008 1009 mdesc_for_each_node_by_name(md, node, "memory-latency-group") 1010 count++; 1011 if (!count) 1012 return -ENOENT; 1013 1014 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), 1015 SMP_CACHE_BYTES); 1016 if (!paddr) 1017 return -ENOMEM; 1018 1019 mlgroups = __va(paddr); 1020 num_mlgroups = count; 1021 1022 count = 0; 1023 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { 1024 struct mdesc_mlgroup *m = &mlgroups[count++]; 1025 const u64 *val; 1026 1027 m->node = node; 1028 1029 val = mdesc_get_property(md, node, "latency", NULL); 1030 m->latency = *val; 1031 val = mdesc_get_property(md, node, "address-match", NULL); 1032 m->match = *val; 1033 val = mdesc_get_property(md, node, "address-mask", NULL); 1034 m->mask = *val; 1035 1036 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " 1037 "match[%llx] mask[%llx]\n", 1038 count - 1, m->node, m->latency, m->match, m->mask); 1039 } 1040 1041 return 0; 1042 } 1043 1044 static int __init grab_mblocks(struct mdesc_handle *md) 1045 { 1046 unsigned long paddr; 1047 int count = 0; 1048 u64 node; 1049 1050 mdesc_for_each_node_by_name(md, node, "mblock") 1051 count++; 1052 if (!count) 1053 return -ENOENT; 1054 1055 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), 1056 SMP_CACHE_BYTES); 1057 if (!paddr) 1058 return -ENOMEM; 1059 1060 mblocks = __va(paddr); 1061 num_mblocks = count; 1062 1063 count = 0; 1064 mdesc_for_each_node_by_name(md, node, "mblock") { 1065 struct mdesc_mblock *m = &mblocks[count++]; 1066 const u64 *val; 1067 1068 val = mdesc_get_property(md, node, "base", NULL); 1069 m->base = *val; 1070 val = mdesc_get_property(md, node, "size", NULL); 1071 m->size = *val; 1072 val = mdesc_get_property(md, node, 1073 "address-congruence-offset", NULL); 1074 m->offset = *val; 1075 1076 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1077 count - 1, m->base, m->size, m->offset); 1078 } 1079 1080 return 0; 1081 } 1082 1083 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, 1084 u64 grp, cpumask_t *mask) 1085 { 1086 u64 arc; 1087 1088 cpumask_clear(mask); 1089 1090 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1091 u64 target = mdesc_arc_target(md, arc); 1092 const char *name = mdesc_node_name(md, target); 1093 const u64 *id; 1094 1095 if (strcmp(name, "cpu")) 1096 continue; 1097 id = mdesc_get_property(md, target, "id", NULL); 1098 if (*id < nr_cpu_ids) 1099 cpumask_set_cpu(*id, mask); 1100 } 1101 } 1102 1103 static struct mdesc_mlgroup * __init find_mlgroup(u64 node) 1104 { 1105 int i; 1106 1107 for (i = 0; i < num_mlgroups; i++) { 1108 struct mdesc_mlgroup *m = &mlgroups[i]; 1109 if (m->node == node) 1110 return m; 1111 } 1112 return NULL; 1113 } 1114 1115 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, 1116 int index) 1117 { 1118 struct mdesc_mlgroup *candidate = NULL; 1119 u64 arc, best_latency = ~(u64)0; 1120 struct node_mem_mask *n; 1121 1122 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1123 u64 target = mdesc_arc_target(md, arc); 1124 struct mdesc_mlgroup *m = find_mlgroup(target); 1125 if (!m) 1126 continue; 1127 if (m->latency < best_latency) { 1128 candidate = m; 1129 best_latency = m->latency; 1130 } 1131 } 1132 if (!candidate) 1133 return -ENOENT; 1134 1135 if (num_node_masks != index) { 1136 printk(KERN_ERR "Inconsistent NUMA state, " 1137 "index[%d] != num_node_masks[%d]\n", 1138 index, num_node_masks); 1139 return -EINVAL; 1140 } 1141 1142 n = &node_masks[num_node_masks++]; 1143 1144 n->mask = candidate->mask; 1145 n->val = candidate->match; 1146 1147 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", 1148 index, n->mask, n->val, candidate->latency); 1149 1150 return 0; 1151 } 1152 1153 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, 1154 int index) 1155 { 1156 cpumask_t mask; 1157 int cpu; 1158 1159 numa_parse_mdesc_group_cpus(md, grp, &mask); 1160 1161 for_each_cpu(cpu, &mask) 1162 numa_cpu_lookup_table[cpu] = index; 1163 cpumask_copy(&numa_cpumask_lookup_table[index], &mask); 1164 1165 if (numa_debug) { 1166 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1167 for_each_cpu(cpu, &mask) 1168 printk("%d ", cpu); 1169 printk("]\n"); 1170 } 1171 1172 return numa_attach_mlgroup(md, grp, index); 1173 } 1174 1175 static int __init numa_parse_mdesc(void) 1176 { 1177 struct mdesc_handle *md = mdesc_grab(); 1178 int i, err, count; 1179 u64 node; 1180 1181 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1182 if (node == MDESC_NODE_NULL) { 1183 mdesc_release(md); 1184 return -ENOENT; 1185 } 1186 1187 err = grab_mblocks(md); 1188 if (err < 0) 1189 goto out; 1190 1191 err = grab_mlgroups(md); 1192 if (err < 0) 1193 goto out; 1194 1195 count = 0; 1196 mdesc_for_each_node_by_name(md, node, "group") { 1197 err = numa_parse_mdesc_group(md, node, count); 1198 if (err < 0) 1199 break; 1200 count++; 1201 } 1202 1203 add_node_ranges(); 1204 1205 for (i = 0; i < num_node_masks; i++) { 1206 allocate_node_data(i); 1207 node_set_online(i); 1208 } 1209 1210 err = 0; 1211 out: 1212 mdesc_release(md); 1213 return err; 1214 } 1215 1216 static int __init numa_parse_jbus(void) 1217 { 1218 unsigned long cpu, index; 1219 1220 /* NUMA node id is encoded in bits 36 and higher, and there is 1221 * a 1-to-1 mapping from CPU ID to NUMA node ID. 1222 */ 1223 index = 0; 1224 for_each_present_cpu(cpu) { 1225 numa_cpu_lookup_table[cpu] = index; 1226 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); 1227 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1228 node_masks[index].val = cpu << 36UL; 1229 1230 index++; 1231 } 1232 num_node_masks = index; 1233 1234 add_node_ranges(); 1235 1236 for (index = 0; index < num_node_masks; index++) { 1237 allocate_node_data(index); 1238 node_set_online(index); 1239 } 1240 1241 return 0; 1242 } 1243 1244 static int __init numa_parse_sun4u(void) 1245 { 1246 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1247 unsigned long ver; 1248 1249 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 1250 if ((ver >> 32UL) == __JALAPENO_ID || 1251 (ver >> 32UL) == __SERRANO_ID) 1252 return numa_parse_jbus(); 1253 } 1254 return -1; 1255 } 1256 1257 static int __init bootmem_init_numa(void) 1258 { 1259 int err = -1; 1260 1261 numadbg("bootmem_init_numa()\n"); 1262 1263 if (numa_enabled) { 1264 if (tlb_type == hypervisor) 1265 err = numa_parse_mdesc(); 1266 else 1267 err = numa_parse_sun4u(); 1268 } 1269 return err; 1270 } 1271 1272 #else 1273 1274 static int bootmem_init_numa(void) 1275 { 1276 return -1; 1277 } 1278 1279 #endif 1280 1281 static void __init bootmem_init_nonnuma(void) 1282 { 1283 unsigned long top_of_ram = memblock_end_of_DRAM(); 1284 unsigned long total_ram = memblock_phys_mem_size(); 1285 struct memblock_region *reg; 1286 1287 numadbg("bootmem_init_nonnuma()\n"); 1288 1289 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 1290 top_of_ram, total_ram); 1291 printk(KERN_INFO "Memory hole size: %ldMB\n", 1292 (top_of_ram - total_ram) >> 20); 1293 1294 init_node_masks_nonnuma(); 1295 1296 for_each_memblock(memory, reg) { 1297 unsigned long start_pfn, end_pfn; 1298 1299 if (!reg->size) 1300 continue; 1301 1302 start_pfn = memblock_region_memory_base_pfn(reg); 1303 end_pfn = memblock_region_memory_end_pfn(reg); 1304 add_active_range(0, start_pfn, end_pfn); 1305 } 1306 1307 allocate_node_data(0); 1308 1309 node_set_online(0); 1310 } 1311 1312 static void __init reserve_range_in_node(int nid, unsigned long start, 1313 unsigned long end) 1314 { 1315 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n", 1316 nid, start, end); 1317 while (start < end) { 1318 unsigned long this_end; 1319 int n; 1320 1321 this_end = memblock_nid_range(start, end, &n); 1322 if (n == nid) { 1323 numadbg(" MATCH reserving range [%lx:%lx]\n", 1324 start, this_end); 1325 reserve_bootmem_node(NODE_DATA(nid), start, 1326 (this_end - start), BOOTMEM_DEFAULT); 1327 } else 1328 numadbg(" NO MATCH, advancing start to %lx\n", 1329 this_end); 1330 1331 start = this_end; 1332 } 1333 } 1334 1335 static void __init trim_reserved_in_node(int nid) 1336 { 1337 struct memblock_region *reg; 1338 1339 numadbg(" trim_reserved_in_node(%d)\n", nid); 1340 1341 for_each_memblock(reserved, reg) 1342 reserve_range_in_node(nid, reg->base, reg->base + reg->size); 1343 } 1344 1345 static void __init bootmem_init_one_node(int nid) 1346 { 1347 struct pglist_data *p; 1348 1349 numadbg("bootmem_init_one_node(%d)\n", nid); 1350 1351 p = NODE_DATA(nid); 1352 1353 if (p->node_spanned_pages) { 1354 unsigned long paddr = node_masks[nid].bootmem_paddr; 1355 unsigned long end_pfn; 1356 1357 end_pfn = p->node_start_pfn + p->node_spanned_pages; 1358 1359 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n", 1360 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 1361 1362 init_bootmem_node(p, paddr >> PAGE_SHIFT, 1363 p->node_start_pfn, end_pfn); 1364 1365 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n", 1366 nid, end_pfn); 1367 free_bootmem_with_active_regions(nid, end_pfn); 1368 1369 trim_reserved_in_node(nid); 1370 1371 numadbg(" sparse_memory_present_with_active_regions(%d)\n", 1372 nid); 1373 sparse_memory_present_with_active_regions(nid); 1374 } 1375 } 1376 1377 static unsigned long __init bootmem_init(unsigned long phys_base) 1378 { 1379 unsigned long end_pfn; 1380 int nid; 1381 1382 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1383 max_pfn = max_low_pfn = end_pfn; 1384 min_low_pfn = (phys_base >> PAGE_SHIFT); 1385 1386 if (bootmem_init_numa() < 0) 1387 bootmem_init_nonnuma(); 1388 1389 /* XXX cpu notifier XXX */ 1390 1391 for_each_online_node(nid) 1392 bootmem_init_one_node(nid); 1393 1394 sparse_init(); 1395 1396 return end_pfn; 1397 } 1398 1399 static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1400 static int pall_ents __initdata; 1401 1402 #ifdef CONFIG_DEBUG_PAGEALLOC 1403 static unsigned long __ref kernel_map_range(unsigned long pstart, 1404 unsigned long pend, pgprot_t prot) 1405 { 1406 unsigned long vstart = PAGE_OFFSET + pstart; 1407 unsigned long vend = PAGE_OFFSET + pend; 1408 unsigned long alloc_bytes = 0UL; 1409 1410 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1411 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", 1412 vstart, vend); 1413 prom_halt(); 1414 } 1415 1416 while (vstart < vend) { 1417 unsigned long this_end, paddr = __pa(vstart); 1418 pgd_t *pgd = pgd_offset_k(vstart); 1419 pud_t *pud; 1420 pmd_t *pmd; 1421 pte_t *pte; 1422 1423 pud = pud_offset(pgd, vstart); 1424 if (pud_none(*pud)) { 1425 pmd_t *new; 1426 1427 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1428 alloc_bytes += PAGE_SIZE; 1429 pud_populate(&init_mm, pud, new); 1430 } 1431 1432 pmd = pmd_offset(pud, vstart); 1433 if (!pmd_present(*pmd)) { 1434 pte_t *new; 1435 1436 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1437 alloc_bytes += PAGE_SIZE; 1438 pmd_populate_kernel(&init_mm, pmd, new); 1439 } 1440 1441 pte = pte_offset_kernel(pmd, vstart); 1442 this_end = (vstart + PMD_SIZE) & PMD_MASK; 1443 if (this_end > vend) 1444 this_end = vend; 1445 1446 while (vstart < this_end) { 1447 pte_val(*pte) = (paddr | pgprot_val(prot)); 1448 1449 vstart += PAGE_SIZE; 1450 paddr += PAGE_SIZE; 1451 pte++; 1452 } 1453 } 1454 1455 return alloc_bytes; 1456 } 1457 1458 extern unsigned int kvmap_linear_patch[1]; 1459 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1460 1461 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) 1462 { 1463 const unsigned long shift_256MB = 28; 1464 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); 1465 const unsigned long size_256MB = (1UL << shift_256MB); 1466 1467 while (start < end) { 1468 long remains; 1469 1470 remains = end - start; 1471 if (remains < size_256MB) 1472 break; 1473 1474 if (start & mask_256MB) { 1475 start = (start + size_256MB) & ~mask_256MB; 1476 continue; 1477 } 1478 1479 while (remains >= size_256MB) { 1480 unsigned long index = start >> shift_256MB; 1481 1482 __set_bit(index, kpte_linear_bitmap); 1483 1484 start += size_256MB; 1485 remains -= size_256MB; 1486 } 1487 } 1488 } 1489 1490 static void __init init_kpte_bitmap(void) 1491 { 1492 unsigned long i; 1493 1494 for (i = 0; i < pall_ents; i++) { 1495 unsigned long phys_start, phys_end; 1496 1497 phys_start = pall[i].phys_addr; 1498 phys_end = phys_start + pall[i].reg_size; 1499 1500 mark_kpte_bitmap(phys_start, phys_end); 1501 } 1502 } 1503 1504 static void __init kernel_physical_mapping_init(void) 1505 { 1506 #ifdef CONFIG_DEBUG_PAGEALLOC 1507 unsigned long i, mem_alloced = 0UL; 1508 1509 for (i = 0; i < pall_ents; i++) { 1510 unsigned long phys_start, phys_end; 1511 1512 phys_start = pall[i].phys_addr; 1513 phys_end = phys_start + pall[i].reg_size; 1514 1515 mem_alloced += kernel_map_range(phys_start, phys_end, 1516 PAGE_KERNEL); 1517 } 1518 1519 printk("Allocated %ld bytes for kernel page tables.\n", 1520 mem_alloced); 1521 1522 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1523 flushi(&kvmap_linear_patch[0]); 1524 1525 __flush_tlb_all(); 1526 #endif 1527 } 1528 1529 #ifdef CONFIG_DEBUG_PAGEALLOC 1530 void kernel_map_pages(struct page *page, int numpages, int enable) 1531 { 1532 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1533 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1534 1535 kernel_map_range(phys_start, phys_end, 1536 (enable ? PAGE_KERNEL : __pgprot(0))); 1537 1538 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1539 PAGE_OFFSET + phys_end); 1540 1541 /* we should perform an IPI and flush all tlbs, 1542 * but that can deadlock->flush only current cpu. 1543 */ 1544 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, 1545 PAGE_OFFSET + phys_end); 1546 } 1547 #endif 1548 1549 unsigned long __init find_ecache_flush_span(unsigned long size) 1550 { 1551 int i; 1552 1553 for (i = 0; i < pavail_ents; i++) { 1554 if (pavail[i].reg_size >= size) 1555 return pavail[i].phys_addr; 1556 } 1557 1558 return ~0UL; 1559 } 1560 1561 static void __init tsb_phys_patch(void) 1562 { 1563 struct tsb_ldquad_phys_patch_entry *pquad; 1564 struct tsb_phys_patch_entry *p; 1565 1566 pquad = &__tsb_ldquad_phys_patch; 1567 while (pquad < &__tsb_ldquad_phys_patch_end) { 1568 unsigned long addr = pquad->addr; 1569 1570 if (tlb_type == hypervisor) 1571 *(unsigned int *) addr = pquad->sun4v_insn; 1572 else 1573 *(unsigned int *) addr = pquad->sun4u_insn; 1574 wmb(); 1575 __asm__ __volatile__("flush %0" 1576 : /* no outputs */ 1577 : "r" (addr)); 1578 1579 pquad++; 1580 } 1581 1582 p = &__tsb_phys_patch; 1583 while (p < &__tsb_phys_patch_end) { 1584 unsigned long addr = p->addr; 1585 1586 *(unsigned int *) addr = p->insn; 1587 wmb(); 1588 __asm__ __volatile__("flush %0" 1589 : /* no outputs */ 1590 : "r" (addr)); 1591 1592 p++; 1593 } 1594 } 1595 1596 /* Don't mark as init, we give this to the Hypervisor. */ 1597 #ifndef CONFIG_DEBUG_PAGEALLOC 1598 #define NUM_KTSB_DESCR 2 1599 #else 1600 #define NUM_KTSB_DESCR 1 1601 #endif 1602 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1603 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 1604 1605 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) 1606 { 1607 pa >>= KTSB_PHYS_SHIFT; 1608 1609 while (start < end) { 1610 unsigned int *ia = (unsigned int *)(unsigned long)*start; 1611 1612 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); 1613 __asm__ __volatile__("flush %0" : : "r" (ia)); 1614 1615 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); 1616 __asm__ __volatile__("flush %0" : : "r" (ia + 1)); 1617 1618 start++; 1619 } 1620 } 1621 1622 static void ktsb_phys_patch(void) 1623 { 1624 extern unsigned int __swapper_tsb_phys_patch; 1625 extern unsigned int __swapper_tsb_phys_patch_end; 1626 unsigned long ktsb_pa; 1627 1628 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1629 patch_one_ktsb_phys(&__swapper_tsb_phys_patch, 1630 &__swapper_tsb_phys_patch_end, ktsb_pa); 1631 #ifndef CONFIG_DEBUG_PAGEALLOC 1632 { 1633 extern unsigned int __swapper_4m_tsb_phys_patch; 1634 extern unsigned int __swapper_4m_tsb_phys_patch_end; 1635 ktsb_pa = (kern_base + 1636 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1637 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, 1638 &__swapper_4m_tsb_phys_patch_end, ktsb_pa); 1639 } 1640 #endif 1641 } 1642 1643 static void __init sun4v_ktsb_init(void) 1644 { 1645 unsigned long ktsb_pa; 1646 1647 /* First KTSB for PAGE_SIZE mappings. */ 1648 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1649 1650 switch (PAGE_SIZE) { 1651 case 8 * 1024: 1652 default: 1653 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; 1654 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; 1655 break; 1656 1657 case 64 * 1024: 1658 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; 1659 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; 1660 break; 1661 1662 case 512 * 1024: 1663 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; 1664 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; 1665 break; 1666 1667 case 4 * 1024 * 1024: 1668 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1669 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1670 break; 1671 } 1672 1673 ktsb_descr[0].assoc = 1; 1674 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1675 ktsb_descr[0].ctx_idx = 0; 1676 ktsb_descr[0].tsb_base = ktsb_pa; 1677 ktsb_descr[0].resv = 0; 1678 1679 #ifndef CONFIG_DEBUG_PAGEALLOC 1680 /* Second KTSB for 4MB/256MB mappings. */ 1681 ktsb_pa = (kern_base + 1682 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1683 1684 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; 1685 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | 1686 HV_PGSZ_MASK_256MB); 1687 ktsb_descr[1].assoc = 1; 1688 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; 1689 ktsb_descr[1].ctx_idx = 0; 1690 ktsb_descr[1].tsb_base = ktsb_pa; 1691 ktsb_descr[1].resv = 0; 1692 #endif 1693 } 1694 1695 void __cpuinit sun4v_ktsb_register(void) 1696 { 1697 unsigned long pa, ret; 1698 1699 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1700 1701 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1702 if (ret != 0) { 1703 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1704 "errors with %lx\n", pa, ret); 1705 prom_halt(); 1706 } 1707 } 1708 1709 /* paging_init() sets up the page tables */ 1710 1711 static unsigned long last_valid_pfn; 1712 pgd_t swapper_pg_dir[2048]; 1713 1714 static void sun4u_pgprot_init(void); 1715 static void sun4v_pgprot_init(void); 1716 1717 void __init paging_init(void) 1718 { 1719 unsigned long end_pfn, shift, phys_base; 1720 unsigned long real_end, i; 1721 1722 /* These build time checkes make sure that the dcache_dirty_cpu() 1723 * page->flags usage will work. 1724 * 1725 * When a page gets marked as dcache-dirty, we store the 1726 * cpu number starting at bit 32 in the page->flags. Also, 1727 * functions like clear_dcache_dirty_cpu use the cpu mask 1728 * in 13-bit signed-immediate instruction fields. 1729 */ 1730 1731 /* 1732 * Page flags must not reach into upper 32 bits that are used 1733 * for the cpu number 1734 */ 1735 BUILD_BUG_ON(NR_PAGEFLAGS > 32); 1736 1737 /* 1738 * The bit fields placed in the high range must not reach below 1739 * the 32 bit boundary. Otherwise we cannot place the cpu field 1740 * at the 32 bit boundary. 1741 */ 1742 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 1743 ilog2(roundup_pow_of_two(NR_CPUS)) > 32); 1744 1745 BUILD_BUG_ON(NR_CPUS > 4096); 1746 1747 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1748 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1749 1750 /* Invalidate both kernel TSBs. */ 1751 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1752 #ifndef CONFIG_DEBUG_PAGEALLOC 1753 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 1754 #endif 1755 1756 if (tlb_type == hypervisor) 1757 sun4v_pgprot_init(); 1758 else 1759 sun4u_pgprot_init(); 1760 1761 if (tlb_type == cheetah_plus || 1762 tlb_type == hypervisor) { 1763 tsb_phys_patch(); 1764 ktsb_phys_patch(); 1765 } 1766 1767 if (tlb_type == hypervisor) { 1768 sun4v_patch_tlb_handlers(); 1769 sun4v_ktsb_init(); 1770 } 1771 1772 memblock_init(); 1773 1774 /* Find available physical memory... 1775 * 1776 * Read it twice in order to work around a bug in openfirmware. 1777 * The call to grab this table itself can cause openfirmware to 1778 * allocate memory, which in turn can take away some space from 1779 * the list of available memory. Reading it twice makes sure 1780 * we really do get the final value. 1781 */ 1782 read_obp_translations(); 1783 read_obp_memory("reg", &pall[0], &pall_ents); 1784 read_obp_memory("available", &pavail[0], &pavail_ents); 1785 read_obp_memory("available", &pavail[0], &pavail_ents); 1786 1787 phys_base = 0xffffffffffffffffUL; 1788 for (i = 0; i < pavail_ents; i++) { 1789 phys_base = min(phys_base, pavail[i].phys_addr); 1790 memblock_add(pavail[i].phys_addr, pavail[i].reg_size); 1791 } 1792 1793 memblock_reserve(kern_base, kern_size); 1794 1795 find_ramdisk(phys_base); 1796 1797 memblock_enforce_memory_limit(cmdline_memory_size); 1798 1799 memblock_analyze(); 1800 memblock_dump_all(); 1801 1802 set_bit(0, mmu_context_bmap); 1803 1804 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1805 1806 real_end = (unsigned long)_end; 1807 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1808 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1809 num_kernel_image_mappings); 1810 1811 /* Set kernel pgd to upper alias so physical page computations 1812 * work. 1813 */ 1814 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1815 1816 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); 1817 1818 /* Now can init the kernel/bad page tables. */ 1819 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1820 swapper_low_pmd_dir + (shift / sizeof(pgd_t))); 1821 1822 inherit_prom_mappings(); 1823 1824 init_kpte_bitmap(); 1825 1826 /* Ok, we can use our TLB miss and window trap handlers safely. */ 1827 setup_tba(); 1828 1829 __flush_tlb_all(); 1830 1831 if (tlb_type == hypervisor) 1832 sun4v_ktsb_register(); 1833 1834 prom_build_devicetree(); 1835 of_populate_present_mask(); 1836 #ifndef CONFIG_SMP 1837 of_fill_in_cpu_data(); 1838 #endif 1839 1840 if (tlb_type == hypervisor) { 1841 sun4v_mdesc_init(); 1842 mdesc_populate_present_mask(cpu_all_mask); 1843 #ifndef CONFIG_SMP 1844 mdesc_fill_in_cpu_data(cpu_all_mask); 1845 #endif 1846 } 1847 1848 /* Once the OF device tree and MDESC have been setup, we know 1849 * the list of possible cpus. Therefore we can allocate the 1850 * IRQ stacks. 1851 */ 1852 for_each_possible_cpu(i) { 1853 /* XXX Use node local allocations... XXX */ 1854 softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 1855 hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 1856 } 1857 1858 /* Setup bootmem... */ 1859 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1860 1861 #ifndef CONFIG_NEED_MULTIPLE_NODES 1862 max_mapnr = last_valid_pfn; 1863 #endif 1864 kernel_physical_mapping_init(); 1865 1866 { 1867 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1868 1869 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1870 1871 max_zone_pfns[ZONE_NORMAL] = end_pfn; 1872 1873 free_area_init_nodes(max_zone_pfns); 1874 } 1875 1876 printk("Booting Linux...\n"); 1877 } 1878 1879 int __devinit page_in_phys_avail(unsigned long paddr) 1880 { 1881 int i; 1882 1883 paddr &= PAGE_MASK; 1884 1885 for (i = 0; i < pavail_ents; i++) { 1886 unsigned long start, end; 1887 1888 start = pavail[i].phys_addr; 1889 end = start + pavail[i].reg_size; 1890 1891 if (paddr >= start && paddr < end) 1892 return 1; 1893 } 1894 if (paddr >= kern_base && paddr < (kern_base + kern_size)) 1895 return 1; 1896 #ifdef CONFIG_BLK_DEV_INITRD 1897 if (paddr >= __pa(initrd_start) && 1898 paddr < __pa(PAGE_ALIGN(initrd_end))) 1899 return 1; 1900 #endif 1901 1902 return 0; 1903 } 1904 1905 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; 1906 static int pavail_rescan_ents __initdata; 1907 1908 /* Certain OBP calls, such as fetching "available" properties, can 1909 * claim physical memory. So, along with initializing the valid 1910 * address bitmap, what we do here is refetch the physical available 1911 * memory list again, and make sure it provides at least as much 1912 * memory as 'pavail' does. 1913 */ 1914 static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) 1915 { 1916 int i; 1917 1918 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); 1919 1920 for (i = 0; i < pavail_ents; i++) { 1921 unsigned long old_start, old_end; 1922 1923 old_start = pavail[i].phys_addr; 1924 old_end = old_start + pavail[i].reg_size; 1925 while (old_start < old_end) { 1926 int n; 1927 1928 for (n = 0; n < pavail_rescan_ents; n++) { 1929 unsigned long new_start, new_end; 1930 1931 new_start = pavail_rescan[n].phys_addr; 1932 new_end = new_start + 1933 pavail_rescan[n].reg_size; 1934 1935 if (new_start <= old_start && 1936 new_end >= (old_start + PAGE_SIZE)) { 1937 set_bit(old_start >> 22, bitmap); 1938 goto do_next_page; 1939 } 1940 } 1941 1942 prom_printf("mem_init: Lost memory in pavail\n"); 1943 prom_printf("mem_init: OLD start[%lx] size[%lx]\n", 1944 pavail[i].phys_addr, 1945 pavail[i].reg_size); 1946 prom_printf("mem_init: NEW start[%lx] size[%lx]\n", 1947 pavail_rescan[i].phys_addr, 1948 pavail_rescan[i].reg_size); 1949 prom_printf("mem_init: Cannot continue, aborting.\n"); 1950 prom_halt(); 1951 1952 do_next_page: 1953 old_start += PAGE_SIZE; 1954 } 1955 } 1956 } 1957 1958 static void __init patch_tlb_miss_handler_bitmap(void) 1959 { 1960 extern unsigned int valid_addr_bitmap_insn[]; 1961 extern unsigned int valid_addr_bitmap_patch[]; 1962 1963 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; 1964 mb(); 1965 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; 1966 flushi(&valid_addr_bitmap_insn[0]); 1967 } 1968 1969 void __init mem_init(void) 1970 { 1971 unsigned long codepages, datapages, initpages; 1972 unsigned long addr, last; 1973 1974 addr = PAGE_OFFSET + kern_base; 1975 last = PAGE_ALIGN(kern_size) + addr; 1976 while (addr < last) { 1977 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); 1978 addr += PAGE_SIZE; 1979 } 1980 1981 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); 1982 patch_tlb_miss_handler_bitmap(); 1983 1984 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1985 1986 #ifdef CONFIG_NEED_MULTIPLE_NODES 1987 { 1988 int i; 1989 for_each_online_node(i) { 1990 if (NODE_DATA(i)->node_spanned_pages != 0) { 1991 totalram_pages += 1992 free_all_bootmem_node(NODE_DATA(i)); 1993 } 1994 } 1995 } 1996 #else 1997 totalram_pages = free_all_bootmem(); 1998 #endif 1999 2000 /* We subtract one to account for the mem_map_zero page 2001 * allocated below. 2002 */ 2003 totalram_pages -= 1; 2004 num_physpages = totalram_pages; 2005 2006 /* 2007 * Set up the zero page, mark it reserved, so that page count 2008 * is not manipulated when freeing the page from user ptes. 2009 */ 2010 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 2011 if (mem_map_zero == NULL) { 2012 prom_printf("paging_init: Cannot alloc zero page.\n"); 2013 prom_halt(); 2014 } 2015 SetPageReserved(mem_map_zero); 2016 2017 codepages = (((unsigned long) _etext) - ((unsigned long) _start)); 2018 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; 2019 datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); 2020 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; 2021 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); 2022 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; 2023 2024 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", 2025 nr_free_pages() << (PAGE_SHIFT-10), 2026 codepages << (PAGE_SHIFT-10), 2027 datapages << (PAGE_SHIFT-10), 2028 initpages << (PAGE_SHIFT-10), 2029 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); 2030 2031 if (tlb_type == cheetah || tlb_type == cheetah_plus) 2032 cheetah_ecache_flush_init(); 2033 } 2034 2035 void free_initmem(void) 2036 { 2037 unsigned long addr, initend; 2038 int do_free = 1; 2039 2040 /* If the physical memory maps were trimmed by kernel command 2041 * line options, don't even try freeing this initmem stuff up. 2042 * The kernel image could have been in the trimmed out region 2043 * and if so the freeing below will free invalid page structs. 2044 */ 2045 if (cmdline_memory_size) 2046 do_free = 0; 2047 2048 /* 2049 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. 2050 */ 2051 addr = PAGE_ALIGN((unsigned long)(__init_begin)); 2052 initend = (unsigned long)(__init_end) & PAGE_MASK; 2053 for (; addr < initend; addr += PAGE_SIZE) { 2054 unsigned long page; 2055 struct page *p; 2056 2057 page = (addr + 2058 ((unsigned long) __va(kern_base)) - 2059 ((unsigned long) KERNBASE)); 2060 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 2061 2062 if (do_free) { 2063 p = virt_to_page(page); 2064 2065 ClearPageReserved(p); 2066 init_page_count(p); 2067 __free_page(p); 2068 num_physpages++; 2069 totalram_pages++; 2070 } 2071 } 2072 } 2073 2074 #ifdef CONFIG_BLK_DEV_INITRD 2075 void free_initrd_mem(unsigned long start, unsigned long end) 2076 { 2077 if (start < end) 2078 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 2079 for (; start < end; start += PAGE_SIZE) { 2080 struct page *p = virt_to_page(start); 2081 2082 ClearPageReserved(p); 2083 init_page_count(p); 2084 __free_page(p); 2085 num_physpages++; 2086 totalram_pages++; 2087 } 2088 } 2089 #endif 2090 2091 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 2092 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 2093 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 2094 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 2095 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 2096 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 2097 2098 pgprot_t PAGE_KERNEL __read_mostly; 2099 EXPORT_SYMBOL(PAGE_KERNEL); 2100 2101 pgprot_t PAGE_KERNEL_LOCKED __read_mostly; 2102 pgprot_t PAGE_COPY __read_mostly; 2103 2104 pgprot_t PAGE_SHARED __read_mostly; 2105 EXPORT_SYMBOL(PAGE_SHARED); 2106 2107 unsigned long pg_iobits __read_mostly; 2108 2109 unsigned long _PAGE_IE __read_mostly; 2110 EXPORT_SYMBOL(_PAGE_IE); 2111 2112 unsigned long _PAGE_E __read_mostly; 2113 EXPORT_SYMBOL(_PAGE_E); 2114 2115 unsigned long _PAGE_CACHE __read_mostly; 2116 EXPORT_SYMBOL(_PAGE_CACHE); 2117 2118 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2119 unsigned long vmemmap_table[VMEMMAP_SIZE]; 2120 2121 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2122 { 2123 unsigned long vstart = (unsigned long) start; 2124 unsigned long vend = (unsigned long) (start + nr); 2125 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2126 unsigned long phys_end = (vend - VMEMMAP_BASE); 2127 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2128 unsigned long end = VMEMMAP_ALIGN(phys_end); 2129 unsigned long pte_base; 2130 2131 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2132 _PAGE_CP_4U | _PAGE_CV_4U | 2133 _PAGE_P_4U | _PAGE_W_4U); 2134 if (tlb_type == hypervisor) 2135 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2136 _PAGE_CP_4V | _PAGE_CV_4V | 2137 _PAGE_P_4V | _PAGE_W_4V); 2138 2139 for (; addr < end; addr += VMEMMAP_CHUNK) { 2140 unsigned long *vmem_pp = 2141 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); 2142 void *block; 2143 2144 if (!(*vmem_pp & _PAGE_VALID)) { 2145 block = vmemmap_alloc_block(1UL << 22, node); 2146 if (!block) 2147 return -ENOMEM; 2148 2149 *vmem_pp = pte_base | __pa(block); 2150 2151 printk(KERN_INFO "[%p-%p] page_structs=%lu " 2152 "node=%d entry=%lu/%lu\n", start, block, nr, 2153 node, 2154 addr >> VMEMMAP_CHUNK_SHIFT, 2155 VMEMMAP_SIZE); 2156 } 2157 } 2158 return 0; 2159 } 2160 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2161 2162 static void prot_init_common(unsigned long page_none, 2163 unsigned long page_shared, 2164 unsigned long page_copy, 2165 unsigned long page_readonly, 2166 unsigned long page_exec_bit) 2167 { 2168 PAGE_COPY = __pgprot(page_copy); 2169 PAGE_SHARED = __pgprot(page_shared); 2170 2171 protection_map[0x0] = __pgprot(page_none); 2172 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); 2173 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); 2174 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); 2175 protection_map[0x4] = __pgprot(page_readonly); 2176 protection_map[0x5] = __pgprot(page_readonly); 2177 protection_map[0x6] = __pgprot(page_copy); 2178 protection_map[0x7] = __pgprot(page_copy); 2179 protection_map[0x8] = __pgprot(page_none); 2180 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); 2181 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); 2182 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); 2183 protection_map[0xc] = __pgprot(page_readonly); 2184 protection_map[0xd] = __pgprot(page_readonly); 2185 protection_map[0xe] = __pgprot(page_shared); 2186 protection_map[0xf] = __pgprot(page_shared); 2187 } 2188 2189 static void __init sun4u_pgprot_init(void) 2190 { 2191 unsigned long page_none, page_shared, page_copy, page_readonly; 2192 unsigned long page_exec_bit; 2193 2194 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2195 _PAGE_CACHE_4U | _PAGE_P_4U | 2196 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2197 _PAGE_EXEC_4U); 2198 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2199 _PAGE_CACHE_4U | _PAGE_P_4U | 2200 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2201 _PAGE_EXEC_4U | _PAGE_L_4U); 2202 2203 _PAGE_IE = _PAGE_IE_4U; 2204 _PAGE_E = _PAGE_E_4U; 2205 _PAGE_CACHE = _PAGE_CACHE_4U; 2206 2207 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | 2208 __ACCESS_BITS_4U | _PAGE_E_4U); 2209 2210 #ifdef CONFIG_DEBUG_PAGEALLOC 2211 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ 2212 0xfffff80000000000UL; 2213 #else 2214 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2215 0xfffff80000000000UL; 2216 #endif 2217 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2218 _PAGE_P_4U | _PAGE_W_4U); 2219 2220 /* XXX Should use 256MB on Panther. XXX */ 2221 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 2222 2223 _PAGE_SZBITS = _PAGE_SZBITS_4U; 2224 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | 2225 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | 2226 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); 2227 2228 2229 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; 2230 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2231 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); 2232 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2233 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2234 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2235 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2236 2237 page_exec_bit = _PAGE_EXEC_4U; 2238 2239 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2240 page_exec_bit); 2241 } 2242 2243 static void __init sun4v_pgprot_init(void) 2244 { 2245 unsigned long page_none, page_shared, page_copy, page_readonly; 2246 unsigned long page_exec_bit; 2247 2248 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2249 _PAGE_CACHE_4V | _PAGE_P_4V | 2250 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2251 _PAGE_EXEC_4V); 2252 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2253 2254 _PAGE_IE = _PAGE_IE_4V; 2255 _PAGE_E = _PAGE_E_4V; 2256 _PAGE_CACHE = _PAGE_CACHE_4V; 2257 2258 #ifdef CONFIG_DEBUG_PAGEALLOC 2259 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2260 0xfffff80000000000UL; 2261 #else 2262 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2263 0xfffff80000000000UL; 2264 #endif 2265 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2266 _PAGE_P_4V | _PAGE_W_4V); 2267 2268 #ifdef CONFIG_DEBUG_PAGEALLOC 2269 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2270 0xfffff80000000000UL; 2271 #else 2272 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 2273 0xfffff80000000000UL; 2274 #endif 2275 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2276 _PAGE_P_4V | _PAGE_W_4V); 2277 2278 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | 2279 __ACCESS_BITS_4V | _PAGE_E_4V); 2280 2281 _PAGE_SZBITS = _PAGE_SZBITS_4V; 2282 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | 2283 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | 2284 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2285 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2286 2287 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2288 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2289 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2290 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2291 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2292 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2293 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2294 2295 page_exec_bit = _PAGE_EXEC_4V; 2296 2297 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2298 page_exec_bit); 2299 } 2300 2301 unsigned long pte_sz_bits(unsigned long sz) 2302 { 2303 if (tlb_type == hypervisor) { 2304 switch (sz) { 2305 case 8 * 1024: 2306 default: 2307 return _PAGE_SZ8K_4V; 2308 case 64 * 1024: 2309 return _PAGE_SZ64K_4V; 2310 case 512 * 1024: 2311 return _PAGE_SZ512K_4V; 2312 case 4 * 1024 * 1024: 2313 return _PAGE_SZ4MB_4V; 2314 } 2315 } else { 2316 switch (sz) { 2317 case 8 * 1024: 2318 default: 2319 return _PAGE_SZ8K_4U; 2320 case 64 * 1024: 2321 return _PAGE_SZ64K_4U; 2322 case 512 * 1024: 2323 return _PAGE_SZ512K_4U; 2324 case 4 * 1024 * 1024: 2325 return _PAGE_SZ4MB_4U; 2326 } 2327 } 2328 } 2329 2330 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) 2331 { 2332 pte_t pte; 2333 2334 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); 2335 pte_val(pte) |= (((unsigned long)space) << 32); 2336 pte_val(pte) |= pte_sz_bits(page_size); 2337 2338 return pte; 2339 } 2340 2341 static unsigned long kern_large_tte(unsigned long paddr) 2342 { 2343 unsigned long val; 2344 2345 val = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2346 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | 2347 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2348 if (tlb_type == hypervisor) 2349 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2350 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2351 _PAGE_EXEC_4V | _PAGE_W_4V); 2352 2353 return val | paddr; 2354 } 2355 2356 /* If not locked, zap it. */ 2357 void __flush_tlb_all(void) 2358 { 2359 unsigned long pstate; 2360 int i; 2361 2362 __asm__ __volatile__("flushw\n\t" 2363 "rdpr %%pstate, %0\n\t" 2364 "wrpr %0, %1, %%pstate" 2365 : "=r" (pstate) 2366 : "i" (PSTATE_IE)); 2367 if (tlb_type == hypervisor) { 2368 sun4v_mmu_demap_all(); 2369 } else if (tlb_type == spitfire) { 2370 for (i = 0; i < 64; i++) { 2371 /* Spitfire Errata #32 workaround */ 2372 /* NOTE: Always runs on spitfire, so no 2373 * cheetah+ page size encodings. 2374 */ 2375 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2376 "flush %%g6" 2377 : /* No outputs */ 2378 : "r" (0), 2379 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2380 2381 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { 2382 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2383 "membar #Sync" 2384 : /* no outputs */ 2385 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); 2386 spitfire_put_dtlb_data(i, 0x0UL); 2387 } 2388 2389 /* Spitfire Errata #32 workaround */ 2390 /* NOTE: Always runs on spitfire, so no 2391 * cheetah+ page size encodings. 2392 */ 2393 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2394 "flush %%g6" 2395 : /* No outputs */ 2396 : "r" (0), 2397 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2398 2399 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { 2400 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2401 "membar #Sync" 2402 : /* no outputs */ 2403 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); 2404 spitfire_put_itlb_data(i, 0x0UL); 2405 } 2406 } 2407 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 2408 cheetah_flush_dtlb_all(); 2409 cheetah_flush_itlb_all(); 2410 } 2411 __asm__ __volatile__("wrpr %0, 0, %%pstate" 2412 : : "r" (pstate)); 2413 } 2414