1 /* 2 * arch/sparc64/mm/init.c 3 * 4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/mm.h> 15 #include <linux/hugetlb.h> 16 #include <linux/initrd.h> 17 #include <linux/swap.h> 18 #include <linux/pagemap.h> 19 #include <linux/poison.h> 20 #include <linux/fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/kprobes.h> 23 #include <linux/cache.h> 24 #include <linux/sort.h> 25 #include <linux/percpu.h> 26 #include <linux/memblock.h> 27 #include <linux/mmzone.h> 28 #include <linux/gfp.h> 29 30 #include <asm/head.h> 31 #include <asm/page.h> 32 #include <asm/pgalloc.h> 33 #include <asm/pgtable.h> 34 #include <asm/oplib.h> 35 #include <asm/iommu.h> 36 #include <asm/io.h> 37 #include <asm/uaccess.h> 38 #include <asm/mmu_context.h> 39 #include <asm/tlbflush.h> 40 #include <asm/dma.h> 41 #include <asm/starfire.h> 42 #include <asm/tlb.h> 43 #include <asm/spitfire.h> 44 #include <asm/sections.h> 45 #include <asm/tsb.h> 46 #include <asm/hypervisor.h> 47 #include <asm/prom.h> 48 #include <asm/mdesc.h> 49 #include <asm/cpudata.h> 50 #include <asm/irq.h> 51 52 #include "init_64.h" 53 54 unsigned long kern_linear_pte_xor[2] __read_mostly; 55 56 /* A bitmap, one bit for every 256MB of physical memory. If the bit 57 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else 58 * if set we should use a 256MB page (via kern_linear_pte_xor[1]). 59 */ 60 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 61 62 #ifndef CONFIG_DEBUG_PAGEALLOC 63 /* A special kernel TSB for 4MB and 256MB linear mappings. 64 * Space is allocated for this right after the trap table 65 * in arch/sparc64/kernel/head.S 66 */ 67 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 68 #endif 69 70 #define MAX_BANKS 32 71 72 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata; 73 static int pavail_ents __devinitdata; 74 75 static int cmp_p64(const void *a, const void *b) 76 { 77 const struct linux_prom64_registers *x = a, *y = b; 78 79 if (x->phys_addr > y->phys_addr) 80 return 1; 81 if (x->phys_addr < y->phys_addr) 82 return -1; 83 return 0; 84 } 85 86 static void __init read_obp_memory(const char *property, 87 struct linux_prom64_registers *regs, 88 int *num_ents) 89 { 90 phandle node = prom_finddevice("/memory"); 91 int prop_size = prom_getproplen(node, property); 92 int ents, ret, i; 93 94 ents = prop_size / sizeof(struct linux_prom64_registers); 95 if (ents > MAX_BANKS) { 96 prom_printf("The machine has more %s property entries than " 97 "this kernel can support (%d).\n", 98 property, MAX_BANKS); 99 prom_halt(); 100 } 101 102 ret = prom_getproperty(node, property, (char *) regs, prop_size); 103 if (ret == -1) { 104 prom_printf("Couldn't get %s property from /memory.\n"); 105 prom_halt(); 106 } 107 108 /* Sanitize what we got from the firmware, by page aligning 109 * everything. 110 */ 111 for (i = 0; i < ents; i++) { 112 unsigned long base, size; 113 114 base = regs[i].phys_addr; 115 size = regs[i].reg_size; 116 117 size &= PAGE_MASK; 118 if (base & ~PAGE_MASK) { 119 unsigned long new_base = PAGE_ALIGN(base); 120 121 size -= new_base - base; 122 if ((long) size < 0L) 123 size = 0UL; 124 base = new_base; 125 } 126 if (size == 0UL) { 127 /* If it is empty, simply get rid of it. 128 * This simplifies the logic of the other 129 * functions that process these arrays. 130 */ 131 memmove(®s[i], ®s[i + 1], 132 (ents - i - 1) * sizeof(regs[0])); 133 i--; 134 ents--; 135 continue; 136 } 137 regs[i].phys_addr = base; 138 regs[i].reg_size = size; 139 } 140 141 *num_ents = ents; 142 143 sort(regs, ents, sizeof(struct linux_prom64_registers), 144 cmp_p64, NULL); 145 } 146 147 unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / 148 sizeof(unsigned long)]; 149 EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 150 151 /* Kernel physical address base and size in bytes. */ 152 unsigned long kern_base __read_mostly; 153 unsigned long kern_size __read_mostly; 154 155 /* Initial ramdisk setup */ 156 extern unsigned long sparc_ramdisk_image64; 157 extern unsigned int sparc_ramdisk_image; 158 extern unsigned int sparc_ramdisk_size; 159 160 struct page *mem_map_zero __read_mostly; 161 EXPORT_SYMBOL(mem_map_zero); 162 163 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; 164 165 unsigned long sparc64_kern_pri_context __read_mostly; 166 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 167 unsigned long sparc64_kern_sec_context __read_mostly; 168 169 int num_kernel_image_mappings; 170 171 #ifdef CONFIG_DEBUG_DCFLUSH 172 atomic_t dcpage_flushes = ATOMIC_INIT(0); 173 #ifdef CONFIG_SMP 174 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); 175 #endif 176 #endif 177 178 inline void flush_dcache_page_impl(struct page *page) 179 { 180 BUG_ON(tlb_type == hypervisor); 181 #ifdef CONFIG_DEBUG_DCFLUSH 182 atomic_inc(&dcpage_flushes); 183 #endif 184 185 #ifdef DCACHE_ALIASING_POSSIBLE 186 __flush_dcache_page(page_address(page), 187 ((tlb_type == spitfire) && 188 page_mapping(page) != NULL)); 189 #else 190 if (page_mapping(page) != NULL && 191 tlb_type == spitfire) 192 __flush_icache_page(__pa(page_address(page))); 193 #endif 194 } 195 196 #define PG_dcache_dirty PG_arch_1 197 #define PG_dcache_cpu_shift 32UL 198 #define PG_dcache_cpu_mask \ 199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 200 201 #define dcache_dirty_cpu(page) \ 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) 203 204 static inline void set_dcache_dirty(struct page *page, int this_cpu) 205 { 206 unsigned long mask = this_cpu; 207 unsigned long non_cpu_bits; 208 209 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); 210 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); 211 212 __asm__ __volatile__("1:\n\t" 213 "ldx [%2], %%g7\n\t" 214 "and %%g7, %1, %%g1\n\t" 215 "or %%g1, %0, %%g1\n\t" 216 "casx [%2], %%g7, %%g1\n\t" 217 "cmp %%g7, %%g1\n\t" 218 "bne,pn %%xcc, 1b\n\t" 219 " nop" 220 : /* no outputs */ 221 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) 222 : "g1", "g7"); 223 } 224 225 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) 226 { 227 unsigned long mask = (1UL << PG_dcache_dirty); 228 229 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 230 "1:\n\t" 231 "ldx [%2], %%g7\n\t" 232 "srlx %%g7, %4, %%g1\n\t" 233 "and %%g1, %3, %%g1\n\t" 234 "cmp %%g1, %0\n\t" 235 "bne,pn %%icc, 2f\n\t" 236 " andn %%g7, %1, %%g1\n\t" 237 "casx [%2], %%g7, %%g1\n\t" 238 "cmp %%g7, %%g1\n\t" 239 "bne,pn %%xcc, 1b\n\t" 240 " nop\n" 241 "2:" 242 : /* no outputs */ 243 : "r" (cpu), "r" (mask), "r" (&page->flags), 244 "i" (PG_dcache_cpu_mask), 245 "i" (PG_dcache_cpu_shift) 246 : "g1", "g7"); 247 } 248 249 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) 250 { 251 unsigned long tsb_addr = (unsigned long) ent; 252 253 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 254 tsb_addr = __pa(tsb_addr); 255 256 __tsb_insert(tsb_addr, tag, pte); 257 } 258 259 unsigned long _PAGE_ALL_SZ_BITS __read_mostly; 260 unsigned long _PAGE_SZBITS __read_mostly; 261 262 static void flush_dcache(unsigned long pfn) 263 { 264 struct page *page; 265 266 page = pfn_to_page(pfn); 267 if (page) { 268 unsigned long pg_flags; 269 270 pg_flags = page->flags; 271 if (pg_flags & (1UL << PG_dcache_dirty)) { 272 int cpu = ((pg_flags >> PG_dcache_cpu_shift) & 273 PG_dcache_cpu_mask); 274 int this_cpu = get_cpu(); 275 276 /* This is just to optimize away some function calls 277 * in the SMP case. 278 */ 279 if (cpu == this_cpu) 280 flush_dcache_page_impl(page); 281 else 282 smp_flush_dcache_page_impl(page, cpu); 283 284 clear_dcache_dirty_cpu(page, cpu); 285 286 put_cpu(); 287 } 288 } 289 } 290 291 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 292 { 293 struct mm_struct *mm; 294 struct tsb *tsb; 295 unsigned long tag, flags; 296 unsigned long tsb_index, tsb_hash_shift; 297 pte_t pte = *ptep; 298 299 if (tlb_type != hypervisor) { 300 unsigned long pfn = pte_pfn(pte); 301 302 if (pfn_valid(pfn)) 303 flush_dcache(pfn); 304 } 305 306 mm = vma->vm_mm; 307 308 tsb_index = MM_TSB_BASE; 309 tsb_hash_shift = PAGE_SHIFT; 310 311 spin_lock_irqsave(&mm->context.lock, flags); 312 313 #ifdef CONFIG_HUGETLB_PAGE 314 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 315 if ((tlb_type == hypervisor && 316 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 317 (tlb_type != hypervisor && 318 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { 319 tsb_index = MM_TSB_HUGE; 320 tsb_hash_shift = HPAGE_SHIFT; 321 } 322 } 323 #endif 324 325 tsb = mm->context.tsb_block[tsb_index].tsb; 326 tsb += ((address >> tsb_hash_shift) & 327 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 328 tag = (address >> 22UL); 329 tsb_insert(tsb, tag, pte_val(pte)); 330 331 spin_unlock_irqrestore(&mm->context.lock, flags); 332 } 333 334 void flush_dcache_page(struct page *page) 335 { 336 struct address_space *mapping; 337 int this_cpu; 338 339 if (tlb_type == hypervisor) 340 return; 341 342 /* Do not bother with the expensive D-cache flush if it 343 * is merely the zero page. The 'bigcore' testcase in GDB 344 * causes this case to run millions of times. 345 */ 346 if (page == ZERO_PAGE(0)) 347 return; 348 349 this_cpu = get_cpu(); 350 351 mapping = page_mapping(page); 352 if (mapping && !mapping_mapped(mapping)) { 353 int dirty = test_bit(PG_dcache_dirty, &page->flags); 354 if (dirty) { 355 int dirty_cpu = dcache_dirty_cpu(page); 356 357 if (dirty_cpu == this_cpu) 358 goto out; 359 smp_flush_dcache_page_impl(page, dirty_cpu); 360 } 361 set_dcache_dirty(page, this_cpu); 362 } else { 363 /* We could delay the flush for the !page_mapping 364 * case too. But that case is for exec env/arg 365 * pages and those are %99 certainly going to get 366 * faulted into the tlb (and thus flushed) anyways. 367 */ 368 flush_dcache_page_impl(page); 369 } 370 371 out: 372 put_cpu(); 373 } 374 EXPORT_SYMBOL(flush_dcache_page); 375 376 void __kprobes flush_icache_range(unsigned long start, unsigned long end) 377 { 378 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ 379 if (tlb_type == spitfire) { 380 unsigned long kaddr; 381 382 /* This code only runs on Spitfire cpus so this is 383 * why we can assume _PAGE_PADDR_4U. 384 */ 385 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { 386 unsigned long paddr, mask = _PAGE_PADDR_4U; 387 388 if (kaddr >= PAGE_OFFSET) 389 paddr = kaddr & mask; 390 else { 391 pgd_t *pgdp = pgd_offset_k(kaddr); 392 pud_t *pudp = pud_offset(pgdp, kaddr); 393 pmd_t *pmdp = pmd_offset(pudp, kaddr); 394 pte_t *ptep = pte_offset_kernel(pmdp, kaddr); 395 396 paddr = pte_val(*ptep) & mask; 397 } 398 __flush_icache_page(paddr); 399 } 400 } 401 } 402 EXPORT_SYMBOL(flush_icache_range); 403 404 void mmu_info(struct seq_file *m) 405 { 406 if (tlb_type == cheetah) 407 seq_printf(m, "MMU Type\t: Cheetah\n"); 408 else if (tlb_type == cheetah_plus) 409 seq_printf(m, "MMU Type\t: Cheetah+\n"); 410 else if (tlb_type == spitfire) 411 seq_printf(m, "MMU Type\t: Spitfire\n"); 412 else if (tlb_type == hypervisor) 413 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); 414 else 415 seq_printf(m, "MMU Type\t: ???\n"); 416 417 #ifdef CONFIG_DEBUG_DCFLUSH 418 seq_printf(m, "DCPageFlushes\t: %d\n", 419 atomic_read(&dcpage_flushes)); 420 #ifdef CONFIG_SMP 421 seq_printf(m, "DCPageFlushesXC\t: %d\n", 422 atomic_read(&dcpage_flushes_xcall)); 423 #endif /* CONFIG_SMP */ 424 #endif /* CONFIG_DEBUG_DCFLUSH */ 425 } 426 427 struct linux_prom_translation prom_trans[512] __read_mostly; 428 unsigned int prom_trans_ents __read_mostly; 429 430 unsigned long kern_locked_tte_data; 431 432 /* The obp translations are saved based on 8k pagesize, since obp can 433 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 434 * HI_OBP_ADDRESS range are handled in ktlb.S. 435 */ 436 static inline int in_obp_range(unsigned long vaddr) 437 { 438 return (vaddr >= LOW_OBP_ADDRESS && 439 vaddr < HI_OBP_ADDRESS); 440 } 441 442 static int cmp_ptrans(const void *a, const void *b) 443 { 444 const struct linux_prom_translation *x = a, *y = b; 445 446 if (x->virt > y->virt) 447 return 1; 448 if (x->virt < y->virt) 449 return -1; 450 return 0; 451 } 452 453 /* Read OBP translations property into 'prom_trans[]'. */ 454 static void __init read_obp_translations(void) 455 { 456 int n, node, ents, first, last, i; 457 458 node = prom_finddevice("/virtual-memory"); 459 n = prom_getproplen(node, "translations"); 460 if (unlikely(n == 0 || n == -1)) { 461 prom_printf("prom_mappings: Couldn't get size.\n"); 462 prom_halt(); 463 } 464 if (unlikely(n > sizeof(prom_trans))) { 465 prom_printf("prom_mappings: Size %Zd is too big.\n", n); 466 prom_halt(); 467 } 468 469 if ((n = prom_getproperty(node, "translations", 470 (char *)&prom_trans[0], 471 sizeof(prom_trans))) == -1) { 472 prom_printf("prom_mappings: Couldn't get property.\n"); 473 prom_halt(); 474 } 475 476 n = n / sizeof(struct linux_prom_translation); 477 478 ents = n; 479 480 sort(prom_trans, ents, sizeof(struct linux_prom_translation), 481 cmp_ptrans, NULL); 482 483 /* Now kick out all the non-OBP entries. */ 484 for (i = 0; i < ents; i++) { 485 if (in_obp_range(prom_trans[i].virt)) 486 break; 487 } 488 first = i; 489 for (; i < ents; i++) { 490 if (!in_obp_range(prom_trans[i].virt)) 491 break; 492 } 493 last = i; 494 495 for (i = 0; i < (last - first); i++) { 496 struct linux_prom_translation *src = &prom_trans[i + first]; 497 struct linux_prom_translation *dest = &prom_trans[i]; 498 499 *dest = *src; 500 } 501 for (; i < ents; i++) { 502 struct linux_prom_translation *dest = &prom_trans[i]; 503 dest->virt = dest->size = dest->data = 0x0UL; 504 } 505 506 prom_trans_ents = last - first; 507 508 if (tlb_type == spitfire) { 509 /* Clear diag TTE bits. */ 510 for (i = 0; i < prom_trans_ents; i++) 511 prom_trans[i].data &= ~0x0003fe0000000000UL; 512 } 513 514 /* Force execute bit on. */ 515 for (i = 0; i < prom_trans_ents; i++) 516 prom_trans[i].data |= (tlb_type == hypervisor ? 517 _PAGE_EXEC_4V : _PAGE_EXEC_4U); 518 } 519 520 static void __init hypervisor_tlb_lock(unsigned long vaddr, 521 unsigned long pte, 522 unsigned long mmu) 523 { 524 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 525 526 if (ret != 0) { 527 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 528 "errors with %lx\n", vaddr, 0, pte, mmu, ret); 529 prom_halt(); 530 } 531 } 532 533 static unsigned long kern_large_tte(unsigned long paddr); 534 535 static void __init remap_kernel(void) 536 { 537 unsigned long phys_page, tte_vaddr, tte_data; 538 int i, tlb_ent = sparc64_highest_locked_tlbent(); 539 540 tte_vaddr = (unsigned long) KERNBASE; 541 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 542 tte_data = kern_large_tte(phys_page); 543 544 kern_locked_tte_data = tte_data; 545 546 /* Now lock us into the TLBs via Hypervisor or OBP. */ 547 if (tlb_type == hypervisor) { 548 for (i = 0; i < num_kernel_image_mappings; i++) { 549 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 550 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 551 tte_vaddr += 0x400000; 552 tte_data += 0x400000; 553 } 554 } else { 555 for (i = 0; i < num_kernel_image_mappings; i++) { 556 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 557 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 558 tte_vaddr += 0x400000; 559 tte_data += 0x400000; 560 } 561 sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 562 } 563 if (tlb_type == cheetah_plus) { 564 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | 565 CTX_CHEETAH_PLUS_NUC); 566 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; 567 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; 568 } 569 } 570 571 572 static void __init inherit_prom_mappings(void) 573 { 574 /* Now fixup OBP's idea about where we really are mapped. */ 575 printk("Remapping the kernel... "); 576 remap_kernel(); 577 printk("done.\n"); 578 } 579 580 void prom_world(int enter) 581 { 582 if (!enter) 583 set_fs((mm_segment_t) { get_thread_current_ds() }); 584 585 __asm__ __volatile__("flushw"); 586 } 587 588 void __flush_dcache_range(unsigned long start, unsigned long end) 589 { 590 unsigned long va; 591 592 if (tlb_type == spitfire) { 593 int n = 0; 594 595 for (va = start; va < end; va += 32) { 596 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 597 if (++n >= 512) 598 break; 599 } 600 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 601 start = __pa(start); 602 end = __pa(end); 603 for (va = start; va < end; va += 32) 604 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 605 "membar #Sync" 606 : /* no outputs */ 607 : "r" (va), 608 "i" (ASI_DCACHE_INVALIDATE)); 609 } 610 } 611 EXPORT_SYMBOL(__flush_dcache_range); 612 613 /* get_new_mmu_context() uses "cache + 1". */ 614 DEFINE_SPINLOCK(ctx_alloc_lock); 615 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 616 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 617 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 618 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 619 620 /* Caller does TLB context flushing on local CPU if necessary. 621 * The caller also ensures that CTX_VALID(mm->context) is false. 622 * 623 * We must be careful about boundary cases so that we never 624 * let the user have CTX 0 (nucleus) or we ever use a CTX 625 * version of zero (and thus NO_CONTEXT would not be caught 626 * by version mis-match tests in mmu_context.h). 627 * 628 * Always invoked with interrupts disabled. 629 */ 630 void get_new_mmu_context(struct mm_struct *mm) 631 { 632 unsigned long ctx, new_ctx; 633 unsigned long orig_pgsz_bits; 634 unsigned long flags; 635 int new_version; 636 637 spin_lock_irqsave(&ctx_alloc_lock, flags); 638 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 639 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 640 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 641 new_version = 0; 642 if (new_ctx >= (1 << CTX_NR_BITS)) { 643 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 644 if (new_ctx >= ctx) { 645 int i; 646 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 647 CTX_FIRST_VERSION; 648 if (new_ctx == 1) 649 new_ctx = CTX_FIRST_VERSION; 650 651 /* Don't call memset, for 16 entries that's just 652 * plain silly... 653 */ 654 mmu_context_bmap[0] = 3; 655 mmu_context_bmap[1] = 0; 656 mmu_context_bmap[2] = 0; 657 mmu_context_bmap[3] = 0; 658 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 659 mmu_context_bmap[i + 0] = 0; 660 mmu_context_bmap[i + 1] = 0; 661 mmu_context_bmap[i + 2] = 0; 662 mmu_context_bmap[i + 3] = 0; 663 } 664 new_version = 1; 665 goto out; 666 } 667 } 668 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 669 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 670 out: 671 tlb_context_cache = new_ctx; 672 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 673 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 674 675 if (unlikely(new_version)) 676 smp_new_mmu_context_version(); 677 } 678 679 static int numa_enabled = 1; 680 static int numa_debug; 681 682 static int __init early_numa(char *p) 683 { 684 if (!p) 685 return 0; 686 687 if (strstr(p, "off")) 688 numa_enabled = 0; 689 690 if (strstr(p, "debug")) 691 numa_debug = 1; 692 693 return 0; 694 } 695 early_param("numa", early_numa); 696 697 #define numadbg(f, a...) \ 698 do { if (numa_debug) \ 699 printk(KERN_INFO f, ## a); \ 700 } while (0) 701 702 static void __init find_ramdisk(unsigned long phys_base) 703 { 704 #ifdef CONFIG_BLK_DEV_INITRD 705 if (sparc_ramdisk_image || sparc_ramdisk_image64) { 706 unsigned long ramdisk_image; 707 708 /* Older versions of the bootloader only supported a 709 * 32-bit physical address for the ramdisk image 710 * location, stored at sparc_ramdisk_image. Newer 711 * SILO versions set sparc_ramdisk_image to zero and 712 * provide a full 64-bit physical address at 713 * sparc_ramdisk_image64. 714 */ 715 ramdisk_image = sparc_ramdisk_image; 716 if (!ramdisk_image) 717 ramdisk_image = sparc_ramdisk_image64; 718 719 /* Another bootloader quirk. The bootloader normalizes 720 * the physical address to KERNBASE, so we have to 721 * factor that back out and add in the lowest valid 722 * physical page address to get the true physical address. 723 */ 724 ramdisk_image -= KERNBASE; 725 ramdisk_image += phys_base; 726 727 numadbg("Found ramdisk at physical address 0x%lx, size %u\n", 728 ramdisk_image, sparc_ramdisk_size); 729 730 initrd_start = ramdisk_image; 731 initrd_end = ramdisk_image + sparc_ramdisk_size; 732 733 memblock_reserve(initrd_start, sparc_ramdisk_size); 734 735 initrd_start += PAGE_OFFSET; 736 initrd_end += PAGE_OFFSET; 737 } 738 #endif 739 } 740 741 struct node_mem_mask { 742 unsigned long mask; 743 unsigned long val; 744 }; 745 static struct node_mem_mask node_masks[MAX_NUMNODES]; 746 static int num_node_masks; 747 748 int numa_cpu_lookup_table[NR_CPUS]; 749 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 750 751 #ifdef CONFIG_NEED_MULTIPLE_NODES 752 753 struct mdesc_mblock { 754 u64 base; 755 u64 size; 756 u64 offset; /* RA-to-PA */ 757 }; 758 static struct mdesc_mblock *mblocks; 759 static int num_mblocks; 760 761 static unsigned long ra_to_pa(unsigned long addr) 762 { 763 int i; 764 765 for (i = 0; i < num_mblocks; i++) { 766 struct mdesc_mblock *m = &mblocks[i]; 767 768 if (addr >= m->base && 769 addr < (m->base + m->size)) { 770 addr += m->offset; 771 break; 772 } 773 } 774 return addr; 775 } 776 777 static int find_node(unsigned long addr) 778 { 779 int i; 780 781 addr = ra_to_pa(addr); 782 for (i = 0; i < num_node_masks; i++) { 783 struct node_mem_mask *p = &node_masks[i]; 784 785 if ((addr & p->mask) == p->val) 786 return i; 787 } 788 return -1; 789 } 790 791 static u64 memblock_nid_range(u64 start, u64 end, int *nid) 792 { 793 *nid = find_node(start); 794 start += PAGE_SIZE; 795 while (start < end) { 796 int n = find_node(start); 797 798 if (n != *nid) 799 break; 800 start += PAGE_SIZE; 801 } 802 803 if (start > end) 804 start = end; 805 806 return start; 807 } 808 #endif 809 810 /* This must be invoked after performing all of the necessary 811 * memblock_set_node() calls for 'nid'. We need to be able to get 812 * correct data from get_pfn_range_for_nid(). 813 */ 814 static void __init allocate_node_data(int nid) 815 { 816 struct pglist_data *p; 817 unsigned long start_pfn, end_pfn; 818 #ifdef CONFIG_NEED_MULTIPLE_NODES 819 unsigned long paddr; 820 821 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); 822 if (!paddr) { 823 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 824 prom_halt(); 825 } 826 NODE_DATA(nid) = __va(paddr); 827 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 828 829 NODE_DATA(nid)->node_id = nid; 830 #endif 831 832 p = NODE_DATA(nid); 833 834 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 835 p->node_start_pfn = start_pfn; 836 p->node_spanned_pages = end_pfn - start_pfn; 837 } 838 839 static void init_node_masks_nonnuma(void) 840 { 841 int i; 842 843 numadbg("Initializing tables for non-numa.\n"); 844 845 node_masks[0].mask = node_masks[0].val = 0; 846 num_node_masks = 1; 847 848 for (i = 0; i < NR_CPUS; i++) 849 numa_cpu_lookup_table[i] = 0; 850 851 cpumask_setall(&numa_cpumask_lookup_table[0]); 852 } 853 854 #ifdef CONFIG_NEED_MULTIPLE_NODES 855 struct pglist_data *node_data[MAX_NUMNODES]; 856 857 EXPORT_SYMBOL(numa_cpu_lookup_table); 858 EXPORT_SYMBOL(numa_cpumask_lookup_table); 859 EXPORT_SYMBOL(node_data); 860 861 struct mdesc_mlgroup { 862 u64 node; 863 u64 latency; 864 u64 match; 865 u64 mask; 866 }; 867 static struct mdesc_mlgroup *mlgroups; 868 static int num_mlgroups; 869 870 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, 871 u32 cfg_handle) 872 { 873 u64 arc; 874 875 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { 876 u64 target = mdesc_arc_target(md, arc); 877 const u64 *val; 878 879 val = mdesc_get_property(md, target, 880 "cfg-handle", NULL); 881 if (val && *val == cfg_handle) 882 return 0; 883 } 884 return -ENODEV; 885 } 886 887 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, 888 u32 cfg_handle) 889 { 890 u64 arc, candidate, best_latency = ~(u64)0; 891 892 candidate = MDESC_NODE_NULL; 893 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 894 u64 target = mdesc_arc_target(md, arc); 895 const char *name = mdesc_node_name(md, target); 896 const u64 *val; 897 898 if (strcmp(name, "pio-latency-group")) 899 continue; 900 901 val = mdesc_get_property(md, target, "latency", NULL); 902 if (!val) 903 continue; 904 905 if (*val < best_latency) { 906 candidate = target; 907 best_latency = *val; 908 } 909 } 910 911 if (candidate == MDESC_NODE_NULL) 912 return -ENODEV; 913 914 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 915 } 916 917 int of_node_to_nid(struct device_node *dp) 918 { 919 const struct linux_prom64_registers *regs; 920 struct mdesc_handle *md; 921 u32 cfg_handle; 922 int count, nid; 923 u64 grp; 924 925 /* This is the right thing to do on currently supported 926 * SUN4U NUMA platforms as well, as the PCI controller does 927 * not sit behind any particular memory controller. 928 */ 929 if (!mlgroups) 930 return -1; 931 932 regs = of_get_property(dp, "reg", NULL); 933 if (!regs) 934 return -1; 935 936 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; 937 938 md = mdesc_grab(); 939 940 count = 0; 941 nid = -1; 942 mdesc_for_each_node_by_name(md, grp, "group") { 943 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { 944 nid = count; 945 break; 946 } 947 count++; 948 } 949 950 mdesc_release(md); 951 952 return nid; 953 } 954 955 static void __init add_node_ranges(void) 956 { 957 struct memblock_region *reg; 958 959 for_each_memblock(memory, reg) { 960 unsigned long size = reg->size; 961 unsigned long start, end; 962 963 start = reg->base; 964 end = start + size; 965 while (start < end) { 966 unsigned long this_end; 967 int nid; 968 969 this_end = memblock_nid_range(start, end, &nid); 970 971 numadbg("Setting memblock NUMA node nid[%d] " 972 "start[%lx] end[%lx]\n", 973 nid, start, this_end); 974 975 memblock_set_node(start, this_end - start, nid); 976 start = this_end; 977 } 978 } 979 } 980 981 static int __init grab_mlgroups(struct mdesc_handle *md) 982 { 983 unsigned long paddr; 984 int count = 0; 985 u64 node; 986 987 mdesc_for_each_node_by_name(md, node, "memory-latency-group") 988 count++; 989 if (!count) 990 return -ENOENT; 991 992 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), 993 SMP_CACHE_BYTES); 994 if (!paddr) 995 return -ENOMEM; 996 997 mlgroups = __va(paddr); 998 num_mlgroups = count; 999 1000 count = 0; 1001 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { 1002 struct mdesc_mlgroup *m = &mlgroups[count++]; 1003 const u64 *val; 1004 1005 m->node = node; 1006 1007 val = mdesc_get_property(md, node, "latency", NULL); 1008 m->latency = *val; 1009 val = mdesc_get_property(md, node, "address-match", NULL); 1010 m->match = *val; 1011 val = mdesc_get_property(md, node, "address-mask", NULL); 1012 m->mask = *val; 1013 1014 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " 1015 "match[%llx] mask[%llx]\n", 1016 count - 1, m->node, m->latency, m->match, m->mask); 1017 } 1018 1019 return 0; 1020 } 1021 1022 static int __init grab_mblocks(struct mdesc_handle *md) 1023 { 1024 unsigned long paddr; 1025 int count = 0; 1026 u64 node; 1027 1028 mdesc_for_each_node_by_name(md, node, "mblock") 1029 count++; 1030 if (!count) 1031 return -ENOENT; 1032 1033 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), 1034 SMP_CACHE_BYTES); 1035 if (!paddr) 1036 return -ENOMEM; 1037 1038 mblocks = __va(paddr); 1039 num_mblocks = count; 1040 1041 count = 0; 1042 mdesc_for_each_node_by_name(md, node, "mblock") { 1043 struct mdesc_mblock *m = &mblocks[count++]; 1044 const u64 *val; 1045 1046 val = mdesc_get_property(md, node, "base", NULL); 1047 m->base = *val; 1048 val = mdesc_get_property(md, node, "size", NULL); 1049 m->size = *val; 1050 val = mdesc_get_property(md, node, 1051 "address-congruence-offset", NULL); 1052 m->offset = *val; 1053 1054 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", 1055 count - 1, m->base, m->size, m->offset); 1056 } 1057 1058 return 0; 1059 } 1060 1061 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, 1062 u64 grp, cpumask_t *mask) 1063 { 1064 u64 arc; 1065 1066 cpumask_clear(mask); 1067 1068 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1069 u64 target = mdesc_arc_target(md, arc); 1070 const char *name = mdesc_node_name(md, target); 1071 const u64 *id; 1072 1073 if (strcmp(name, "cpu")) 1074 continue; 1075 id = mdesc_get_property(md, target, "id", NULL); 1076 if (*id < nr_cpu_ids) 1077 cpumask_set_cpu(*id, mask); 1078 } 1079 } 1080 1081 static struct mdesc_mlgroup * __init find_mlgroup(u64 node) 1082 { 1083 int i; 1084 1085 for (i = 0; i < num_mlgroups; i++) { 1086 struct mdesc_mlgroup *m = &mlgroups[i]; 1087 if (m->node == node) 1088 return m; 1089 } 1090 return NULL; 1091 } 1092 1093 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, 1094 int index) 1095 { 1096 struct mdesc_mlgroup *candidate = NULL; 1097 u64 arc, best_latency = ~(u64)0; 1098 struct node_mem_mask *n; 1099 1100 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { 1101 u64 target = mdesc_arc_target(md, arc); 1102 struct mdesc_mlgroup *m = find_mlgroup(target); 1103 if (!m) 1104 continue; 1105 if (m->latency < best_latency) { 1106 candidate = m; 1107 best_latency = m->latency; 1108 } 1109 } 1110 if (!candidate) 1111 return -ENOENT; 1112 1113 if (num_node_masks != index) { 1114 printk(KERN_ERR "Inconsistent NUMA state, " 1115 "index[%d] != num_node_masks[%d]\n", 1116 index, num_node_masks); 1117 return -EINVAL; 1118 } 1119 1120 n = &node_masks[num_node_masks++]; 1121 1122 n->mask = candidate->mask; 1123 n->val = candidate->match; 1124 1125 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n", 1126 index, n->mask, n->val, candidate->latency); 1127 1128 return 0; 1129 } 1130 1131 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, 1132 int index) 1133 { 1134 cpumask_t mask; 1135 int cpu; 1136 1137 numa_parse_mdesc_group_cpus(md, grp, &mask); 1138 1139 for_each_cpu(cpu, &mask) 1140 numa_cpu_lookup_table[cpu] = index; 1141 cpumask_copy(&numa_cpumask_lookup_table[index], &mask); 1142 1143 if (numa_debug) { 1144 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1145 for_each_cpu(cpu, &mask) 1146 printk("%d ", cpu); 1147 printk("]\n"); 1148 } 1149 1150 return numa_attach_mlgroup(md, grp, index); 1151 } 1152 1153 static int __init numa_parse_mdesc(void) 1154 { 1155 struct mdesc_handle *md = mdesc_grab(); 1156 int i, err, count; 1157 u64 node; 1158 1159 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); 1160 if (node == MDESC_NODE_NULL) { 1161 mdesc_release(md); 1162 return -ENOENT; 1163 } 1164 1165 err = grab_mblocks(md); 1166 if (err < 0) 1167 goto out; 1168 1169 err = grab_mlgroups(md); 1170 if (err < 0) 1171 goto out; 1172 1173 count = 0; 1174 mdesc_for_each_node_by_name(md, node, "group") { 1175 err = numa_parse_mdesc_group(md, node, count); 1176 if (err < 0) 1177 break; 1178 count++; 1179 } 1180 1181 add_node_ranges(); 1182 1183 for (i = 0; i < num_node_masks; i++) { 1184 allocate_node_data(i); 1185 node_set_online(i); 1186 } 1187 1188 err = 0; 1189 out: 1190 mdesc_release(md); 1191 return err; 1192 } 1193 1194 static int __init numa_parse_jbus(void) 1195 { 1196 unsigned long cpu, index; 1197 1198 /* NUMA node id is encoded in bits 36 and higher, and there is 1199 * a 1-to-1 mapping from CPU ID to NUMA node ID. 1200 */ 1201 index = 0; 1202 for_each_present_cpu(cpu) { 1203 numa_cpu_lookup_table[cpu] = index; 1204 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); 1205 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1206 node_masks[index].val = cpu << 36UL; 1207 1208 index++; 1209 } 1210 num_node_masks = index; 1211 1212 add_node_ranges(); 1213 1214 for (index = 0; index < num_node_masks; index++) { 1215 allocate_node_data(index); 1216 node_set_online(index); 1217 } 1218 1219 return 0; 1220 } 1221 1222 static int __init numa_parse_sun4u(void) 1223 { 1224 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 1225 unsigned long ver; 1226 1227 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 1228 if ((ver >> 32UL) == __JALAPENO_ID || 1229 (ver >> 32UL) == __SERRANO_ID) 1230 return numa_parse_jbus(); 1231 } 1232 return -1; 1233 } 1234 1235 static int __init bootmem_init_numa(void) 1236 { 1237 int err = -1; 1238 1239 numadbg("bootmem_init_numa()\n"); 1240 1241 if (numa_enabled) { 1242 if (tlb_type == hypervisor) 1243 err = numa_parse_mdesc(); 1244 else 1245 err = numa_parse_sun4u(); 1246 } 1247 return err; 1248 } 1249 1250 #else 1251 1252 static int bootmem_init_numa(void) 1253 { 1254 return -1; 1255 } 1256 1257 #endif 1258 1259 static void __init bootmem_init_nonnuma(void) 1260 { 1261 unsigned long top_of_ram = memblock_end_of_DRAM(); 1262 unsigned long total_ram = memblock_phys_mem_size(); 1263 1264 numadbg("bootmem_init_nonnuma()\n"); 1265 1266 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 1267 top_of_ram, total_ram); 1268 printk(KERN_INFO "Memory hole size: %ldMB\n", 1269 (top_of_ram - total_ram) >> 20); 1270 1271 init_node_masks_nonnuma(); 1272 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); 1273 allocate_node_data(0); 1274 node_set_online(0); 1275 } 1276 1277 static unsigned long __init bootmem_init(unsigned long phys_base) 1278 { 1279 unsigned long end_pfn; 1280 1281 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1282 max_pfn = max_low_pfn = end_pfn; 1283 min_low_pfn = (phys_base >> PAGE_SHIFT); 1284 1285 if (bootmem_init_numa() < 0) 1286 bootmem_init_nonnuma(); 1287 1288 /* Dump memblock with node info. */ 1289 memblock_dump_all(); 1290 1291 /* XXX cpu notifier XXX */ 1292 1293 sparse_memory_present_with_active_regions(MAX_NUMNODES); 1294 sparse_init(); 1295 1296 return end_pfn; 1297 } 1298 1299 static struct linux_prom64_registers pall[MAX_BANKS] __initdata; 1300 static int pall_ents __initdata; 1301 1302 #ifdef CONFIG_DEBUG_PAGEALLOC 1303 static unsigned long __ref kernel_map_range(unsigned long pstart, 1304 unsigned long pend, pgprot_t prot) 1305 { 1306 unsigned long vstart = PAGE_OFFSET + pstart; 1307 unsigned long vend = PAGE_OFFSET + pend; 1308 unsigned long alloc_bytes = 0UL; 1309 1310 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { 1311 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", 1312 vstart, vend); 1313 prom_halt(); 1314 } 1315 1316 while (vstart < vend) { 1317 unsigned long this_end, paddr = __pa(vstart); 1318 pgd_t *pgd = pgd_offset_k(vstart); 1319 pud_t *pud; 1320 pmd_t *pmd; 1321 pte_t *pte; 1322 1323 pud = pud_offset(pgd, vstart); 1324 if (pud_none(*pud)) { 1325 pmd_t *new; 1326 1327 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1328 alloc_bytes += PAGE_SIZE; 1329 pud_populate(&init_mm, pud, new); 1330 } 1331 1332 pmd = pmd_offset(pud, vstart); 1333 if (!pmd_present(*pmd)) { 1334 pte_t *new; 1335 1336 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); 1337 alloc_bytes += PAGE_SIZE; 1338 pmd_populate_kernel(&init_mm, pmd, new); 1339 } 1340 1341 pte = pte_offset_kernel(pmd, vstart); 1342 this_end = (vstart + PMD_SIZE) & PMD_MASK; 1343 if (this_end > vend) 1344 this_end = vend; 1345 1346 while (vstart < this_end) { 1347 pte_val(*pte) = (paddr | pgprot_val(prot)); 1348 1349 vstart += PAGE_SIZE; 1350 paddr += PAGE_SIZE; 1351 pte++; 1352 } 1353 } 1354 1355 return alloc_bytes; 1356 } 1357 1358 extern unsigned int kvmap_linear_patch[1]; 1359 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1360 1361 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) 1362 { 1363 const unsigned long shift_256MB = 28; 1364 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); 1365 const unsigned long size_256MB = (1UL << shift_256MB); 1366 1367 while (start < end) { 1368 long remains; 1369 1370 remains = end - start; 1371 if (remains < size_256MB) 1372 break; 1373 1374 if (start & mask_256MB) { 1375 start = (start + size_256MB) & ~mask_256MB; 1376 continue; 1377 } 1378 1379 while (remains >= size_256MB) { 1380 unsigned long index = start >> shift_256MB; 1381 1382 __set_bit(index, kpte_linear_bitmap); 1383 1384 start += size_256MB; 1385 remains -= size_256MB; 1386 } 1387 } 1388 } 1389 1390 static void __init init_kpte_bitmap(void) 1391 { 1392 unsigned long i; 1393 1394 for (i = 0; i < pall_ents; i++) { 1395 unsigned long phys_start, phys_end; 1396 1397 phys_start = pall[i].phys_addr; 1398 phys_end = phys_start + pall[i].reg_size; 1399 1400 mark_kpte_bitmap(phys_start, phys_end); 1401 } 1402 } 1403 1404 static void __init kernel_physical_mapping_init(void) 1405 { 1406 #ifdef CONFIG_DEBUG_PAGEALLOC 1407 unsigned long i, mem_alloced = 0UL; 1408 1409 for (i = 0; i < pall_ents; i++) { 1410 unsigned long phys_start, phys_end; 1411 1412 phys_start = pall[i].phys_addr; 1413 phys_end = phys_start + pall[i].reg_size; 1414 1415 mem_alloced += kernel_map_range(phys_start, phys_end, 1416 PAGE_KERNEL); 1417 } 1418 1419 printk("Allocated %ld bytes for kernel page tables.\n", 1420 mem_alloced); 1421 1422 kvmap_linear_patch[0] = 0x01000000; /* nop */ 1423 flushi(&kvmap_linear_patch[0]); 1424 1425 __flush_tlb_all(); 1426 #endif 1427 } 1428 1429 #ifdef CONFIG_DEBUG_PAGEALLOC 1430 void kernel_map_pages(struct page *page, int numpages, int enable) 1431 { 1432 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; 1433 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); 1434 1435 kernel_map_range(phys_start, phys_end, 1436 (enable ? PAGE_KERNEL : __pgprot(0))); 1437 1438 flush_tsb_kernel_range(PAGE_OFFSET + phys_start, 1439 PAGE_OFFSET + phys_end); 1440 1441 /* we should perform an IPI and flush all tlbs, 1442 * but that can deadlock->flush only current cpu. 1443 */ 1444 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, 1445 PAGE_OFFSET + phys_end); 1446 } 1447 #endif 1448 1449 unsigned long __init find_ecache_flush_span(unsigned long size) 1450 { 1451 int i; 1452 1453 for (i = 0; i < pavail_ents; i++) { 1454 if (pavail[i].reg_size >= size) 1455 return pavail[i].phys_addr; 1456 } 1457 1458 return ~0UL; 1459 } 1460 1461 static void __init tsb_phys_patch(void) 1462 { 1463 struct tsb_ldquad_phys_patch_entry *pquad; 1464 struct tsb_phys_patch_entry *p; 1465 1466 pquad = &__tsb_ldquad_phys_patch; 1467 while (pquad < &__tsb_ldquad_phys_patch_end) { 1468 unsigned long addr = pquad->addr; 1469 1470 if (tlb_type == hypervisor) 1471 *(unsigned int *) addr = pquad->sun4v_insn; 1472 else 1473 *(unsigned int *) addr = pquad->sun4u_insn; 1474 wmb(); 1475 __asm__ __volatile__("flush %0" 1476 : /* no outputs */ 1477 : "r" (addr)); 1478 1479 pquad++; 1480 } 1481 1482 p = &__tsb_phys_patch; 1483 while (p < &__tsb_phys_patch_end) { 1484 unsigned long addr = p->addr; 1485 1486 *(unsigned int *) addr = p->insn; 1487 wmb(); 1488 __asm__ __volatile__("flush %0" 1489 : /* no outputs */ 1490 : "r" (addr)); 1491 1492 p++; 1493 } 1494 } 1495 1496 /* Don't mark as init, we give this to the Hypervisor. */ 1497 #ifndef CONFIG_DEBUG_PAGEALLOC 1498 #define NUM_KTSB_DESCR 2 1499 #else 1500 #define NUM_KTSB_DESCR 1 1501 #endif 1502 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; 1503 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 1504 1505 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) 1506 { 1507 pa >>= KTSB_PHYS_SHIFT; 1508 1509 while (start < end) { 1510 unsigned int *ia = (unsigned int *)(unsigned long)*start; 1511 1512 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); 1513 __asm__ __volatile__("flush %0" : : "r" (ia)); 1514 1515 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); 1516 __asm__ __volatile__("flush %0" : : "r" (ia + 1)); 1517 1518 start++; 1519 } 1520 } 1521 1522 static void ktsb_phys_patch(void) 1523 { 1524 extern unsigned int __swapper_tsb_phys_patch; 1525 extern unsigned int __swapper_tsb_phys_patch_end; 1526 unsigned long ktsb_pa; 1527 1528 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1529 patch_one_ktsb_phys(&__swapper_tsb_phys_patch, 1530 &__swapper_tsb_phys_patch_end, ktsb_pa); 1531 #ifndef CONFIG_DEBUG_PAGEALLOC 1532 { 1533 extern unsigned int __swapper_4m_tsb_phys_patch; 1534 extern unsigned int __swapper_4m_tsb_phys_patch_end; 1535 ktsb_pa = (kern_base + 1536 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1537 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, 1538 &__swapper_4m_tsb_phys_patch_end, ktsb_pa); 1539 } 1540 #endif 1541 } 1542 1543 static void __init sun4v_ktsb_init(void) 1544 { 1545 unsigned long ktsb_pa; 1546 1547 /* First KTSB for PAGE_SIZE mappings. */ 1548 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); 1549 1550 switch (PAGE_SIZE) { 1551 case 8 * 1024: 1552 default: 1553 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; 1554 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; 1555 break; 1556 1557 case 64 * 1024: 1558 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; 1559 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; 1560 break; 1561 1562 case 512 * 1024: 1563 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; 1564 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; 1565 break; 1566 1567 case 4 * 1024 * 1024: 1568 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1569 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1570 break; 1571 } 1572 1573 ktsb_descr[0].assoc = 1; 1574 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1575 ktsb_descr[0].ctx_idx = 0; 1576 ktsb_descr[0].tsb_base = ktsb_pa; 1577 ktsb_descr[0].resv = 0; 1578 1579 #ifndef CONFIG_DEBUG_PAGEALLOC 1580 /* Second KTSB for 4MB/256MB mappings. */ 1581 ktsb_pa = (kern_base + 1582 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); 1583 1584 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; 1585 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | 1586 HV_PGSZ_MASK_256MB); 1587 ktsb_descr[1].assoc = 1; 1588 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; 1589 ktsb_descr[1].ctx_idx = 0; 1590 ktsb_descr[1].tsb_base = ktsb_pa; 1591 ktsb_descr[1].resv = 0; 1592 #endif 1593 } 1594 1595 void __cpuinit sun4v_ktsb_register(void) 1596 { 1597 unsigned long pa, ret; 1598 1599 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1600 1601 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1602 if (ret != 0) { 1603 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1604 "errors with %lx\n", pa, ret); 1605 prom_halt(); 1606 } 1607 } 1608 1609 /* paging_init() sets up the page tables */ 1610 1611 static unsigned long last_valid_pfn; 1612 pgd_t swapper_pg_dir[2048]; 1613 1614 static void sun4u_pgprot_init(void); 1615 static void sun4v_pgprot_init(void); 1616 1617 void __init paging_init(void) 1618 { 1619 unsigned long end_pfn, shift, phys_base; 1620 unsigned long real_end, i; 1621 int node; 1622 1623 /* These build time checkes make sure that the dcache_dirty_cpu() 1624 * page->flags usage will work. 1625 * 1626 * When a page gets marked as dcache-dirty, we store the 1627 * cpu number starting at bit 32 in the page->flags. Also, 1628 * functions like clear_dcache_dirty_cpu use the cpu mask 1629 * in 13-bit signed-immediate instruction fields. 1630 */ 1631 1632 /* 1633 * Page flags must not reach into upper 32 bits that are used 1634 * for the cpu number 1635 */ 1636 BUILD_BUG_ON(NR_PAGEFLAGS > 32); 1637 1638 /* 1639 * The bit fields placed in the high range must not reach below 1640 * the 32 bit boundary. Otherwise we cannot place the cpu field 1641 * at the 32 bit boundary. 1642 */ 1643 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 1644 ilog2(roundup_pow_of_two(NR_CPUS)) > 32); 1645 1646 BUILD_BUG_ON(NR_CPUS > 4096); 1647 1648 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1649 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1650 1651 /* Invalidate both kernel TSBs. */ 1652 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); 1653 #ifndef CONFIG_DEBUG_PAGEALLOC 1654 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); 1655 #endif 1656 1657 if (tlb_type == hypervisor) 1658 sun4v_pgprot_init(); 1659 else 1660 sun4u_pgprot_init(); 1661 1662 if (tlb_type == cheetah_plus || 1663 tlb_type == hypervisor) { 1664 tsb_phys_patch(); 1665 ktsb_phys_patch(); 1666 } 1667 1668 if (tlb_type == hypervisor) { 1669 sun4v_patch_tlb_handlers(); 1670 sun4v_ktsb_init(); 1671 } 1672 1673 /* Find available physical memory... 1674 * 1675 * Read it twice in order to work around a bug in openfirmware. 1676 * The call to grab this table itself can cause openfirmware to 1677 * allocate memory, which in turn can take away some space from 1678 * the list of available memory. Reading it twice makes sure 1679 * we really do get the final value. 1680 */ 1681 read_obp_translations(); 1682 read_obp_memory("reg", &pall[0], &pall_ents); 1683 read_obp_memory("available", &pavail[0], &pavail_ents); 1684 read_obp_memory("available", &pavail[0], &pavail_ents); 1685 1686 phys_base = 0xffffffffffffffffUL; 1687 for (i = 0; i < pavail_ents; i++) { 1688 phys_base = min(phys_base, pavail[i].phys_addr); 1689 memblock_add(pavail[i].phys_addr, pavail[i].reg_size); 1690 } 1691 1692 memblock_reserve(kern_base, kern_size); 1693 1694 find_ramdisk(phys_base); 1695 1696 memblock_enforce_memory_limit(cmdline_memory_size); 1697 1698 memblock_allow_resize(); 1699 memblock_dump_all(); 1700 1701 set_bit(0, mmu_context_bmap); 1702 1703 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1704 1705 real_end = (unsigned long)_end; 1706 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1707 printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1708 num_kernel_image_mappings); 1709 1710 /* Set kernel pgd to upper alias so physical page computations 1711 * work. 1712 */ 1713 init_mm.pgd += ((shift) / (sizeof(pgd_t))); 1714 1715 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); 1716 1717 /* Now can init the kernel/bad page tables. */ 1718 pud_set(pud_offset(&swapper_pg_dir[0], 0), 1719 swapper_low_pmd_dir + (shift / sizeof(pgd_t))); 1720 1721 inherit_prom_mappings(); 1722 1723 init_kpte_bitmap(); 1724 1725 /* Ok, we can use our TLB miss and window trap handlers safely. */ 1726 setup_tba(); 1727 1728 __flush_tlb_all(); 1729 1730 if (tlb_type == hypervisor) 1731 sun4v_ktsb_register(); 1732 1733 prom_build_devicetree(); 1734 of_populate_present_mask(); 1735 #ifndef CONFIG_SMP 1736 of_fill_in_cpu_data(); 1737 #endif 1738 1739 if (tlb_type == hypervisor) { 1740 sun4v_mdesc_init(); 1741 mdesc_populate_present_mask(cpu_all_mask); 1742 #ifndef CONFIG_SMP 1743 mdesc_fill_in_cpu_data(cpu_all_mask); 1744 #endif 1745 } 1746 1747 /* Setup bootmem... */ 1748 last_valid_pfn = end_pfn = bootmem_init(phys_base); 1749 1750 /* Once the OF device tree and MDESC have been setup, we know 1751 * the list of possible cpus. Therefore we can allocate the 1752 * IRQ stacks. 1753 */ 1754 for_each_possible_cpu(i) { 1755 node = cpu_to_node(i); 1756 1757 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 1758 THREAD_SIZE, 1759 THREAD_SIZE, 0); 1760 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 1761 THREAD_SIZE, 1762 THREAD_SIZE, 0); 1763 } 1764 1765 kernel_physical_mapping_init(); 1766 1767 { 1768 unsigned long max_zone_pfns[MAX_NR_ZONES]; 1769 1770 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 1771 1772 max_zone_pfns[ZONE_NORMAL] = end_pfn; 1773 1774 free_area_init_nodes(max_zone_pfns); 1775 } 1776 1777 printk("Booting Linux...\n"); 1778 } 1779 1780 int __devinit page_in_phys_avail(unsigned long paddr) 1781 { 1782 int i; 1783 1784 paddr &= PAGE_MASK; 1785 1786 for (i = 0; i < pavail_ents; i++) { 1787 unsigned long start, end; 1788 1789 start = pavail[i].phys_addr; 1790 end = start + pavail[i].reg_size; 1791 1792 if (paddr >= start && paddr < end) 1793 return 1; 1794 } 1795 if (paddr >= kern_base && paddr < (kern_base + kern_size)) 1796 return 1; 1797 #ifdef CONFIG_BLK_DEV_INITRD 1798 if (paddr >= __pa(initrd_start) && 1799 paddr < __pa(PAGE_ALIGN(initrd_end))) 1800 return 1; 1801 #endif 1802 1803 return 0; 1804 } 1805 1806 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; 1807 static int pavail_rescan_ents __initdata; 1808 1809 /* Certain OBP calls, such as fetching "available" properties, can 1810 * claim physical memory. So, along with initializing the valid 1811 * address bitmap, what we do here is refetch the physical available 1812 * memory list again, and make sure it provides at least as much 1813 * memory as 'pavail' does. 1814 */ 1815 static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) 1816 { 1817 int i; 1818 1819 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); 1820 1821 for (i = 0; i < pavail_ents; i++) { 1822 unsigned long old_start, old_end; 1823 1824 old_start = pavail[i].phys_addr; 1825 old_end = old_start + pavail[i].reg_size; 1826 while (old_start < old_end) { 1827 int n; 1828 1829 for (n = 0; n < pavail_rescan_ents; n++) { 1830 unsigned long new_start, new_end; 1831 1832 new_start = pavail_rescan[n].phys_addr; 1833 new_end = new_start + 1834 pavail_rescan[n].reg_size; 1835 1836 if (new_start <= old_start && 1837 new_end >= (old_start + PAGE_SIZE)) { 1838 set_bit(old_start >> 22, bitmap); 1839 goto do_next_page; 1840 } 1841 } 1842 1843 prom_printf("mem_init: Lost memory in pavail\n"); 1844 prom_printf("mem_init: OLD start[%lx] size[%lx]\n", 1845 pavail[i].phys_addr, 1846 pavail[i].reg_size); 1847 prom_printf("mem_init: NEW start[%lx] size[%lx]\n", 1848 pavail_rescan[i].phys_addr, 1849 pavail_rescan[i].reg_size); 1850 prom_printf("mem_init: Cannot continue, aborting.\n"); 1851 prom_halt(); 1852 1853 do_next_page: 1854 old_start += PAGE_SIZE; 1855 } 1856 } 1857 } 1858 1859 static void __init patch_tlb_miss_handler_bitmap(void) 1860 { 1861 extern unsigned int valid_addr_bitmap_insn[]; 1862 extern unsigned int valid_addr_bitmap_patch[]; 1863 1864 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; 1865 mb(); 1866 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; 1867 flushi(&valid_addr_bitmap_insn[0]); 1868 } 1869 1870 void __init mem_init(void) 1871 { 1872 unsigned long codepages, datapages, initpages; 1873 unsigned long addr, last; 1874 1875 addr = PAGE_OFFSET + kern_base; 1876 last = PAGE_ALIGN(kern_size) + addr; 1877 while (addr < last) { 1878 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); 1879 addr += PAGE_SIZE; 1880 } 1881 1882 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); 1883 patch_tlb_miss_handler_bitmap(); 1884 1885 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1886 1887 #ifdef CONFIG_NEED_MULTIPLE_NODES 1888 { 1889 int i; 1890 for_each_online_node(i) { 1891 if (NODE_DATA(i)->node_spanned_pages != 0) { 1892 totalram_pages += 1893 free_all_bootmem_node(NODE_DATA(i)); 1894 } 1895 } 1896 totalram_pages += free_low_memory_core_early(MAX_NUMNODES); 1897 } 1898 #else 1899 totalram_pages = free_all_bootmem(); 1900 #endif 1901 1902 /* We subtract one to account for the mem_map_zero page 1903 * allocated below. 1904 */ 1905 totalram_pages -= 1; 1906 num_physpages = totalram_pages; 1907 1908 /* 1909 * Set up the zero page, mark it reserved, so that page count 1910 * is not manipulated when freeing the page from user ptes. 1911 */ 1912 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); 1913 if (mem_map_zero == NULL) { 1914 prom_printf("paging_init: Cannot alloc zero page.\n"); 1915 prom_halt(); 1916 } 1917 SetPageReserved(mem_map_zero); 1918 1919 codepages = (((unsigned long) _etext) - ((unsigned long) _start)); 1920 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; 1921 datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); 1922 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; 1923 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); 1924 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; 1925 1926 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", 1927 nr_free_pages() << (PAGE_SHIFT-10), 1928 codepages << (PAGE_SHIFT-10), 1929 datapages << (PAGE_SHIFT-10), 1930 initpages << (PAGE_SHIFT-10), 1931 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); 1932 1933 if (tlb_type == cheetah || tlb_type == cheetah_plus) 1934 cheetah_ecache_flush_init(); 1935 } 1936 1937 void free_initmem(void) 1938 { 1939 unsigned long addr, initend; 1940 int do_free = 1; 1941 1942 /* If the physical memory maps were trimmed by kernel command 1943 * line options, don't even try freeing this initmem stuff up. 1944 * The kernel image could have been in the trimmed out region 1945 * and if so the freeing below will free invalid page structs. 1946 */ 1947 if (cmdline_memory_size) 1948 do_free = 0; 1949 1950 /* 1951 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. 1952 */ 1953 addr = PAGE_ALIGN((unsigned long)(__init_begin)); 1954 initend = (unsigned long)(__init_end) & PAGE_MASK; 1955 for (; addr < initend; addr += PAGE_SIZE) { 1956 unsigned long page; 1957 struct page *p; 1958 1959 page = (addr + 1960 ((unsigned long) __va(kern_base)) - 1961 ((unsigned long) KERNBASE)); 1962 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 1963 1964 if (do_free) { 1965 p = virt_to_page(page); 1966 1967 ClearPageReserved(p); 1968 init_page_count(p); 1969 __free_page(p); 1970 num_physpages++; 1971 totalram_pages++; 1972 } 1973 } 1974 } 1975 1976 #ifdef CONFIG_BLK_DEV_INITRD 1977 void free_initrd_mem(unsigned long start, unsigned long end) 1978 { 1979 if (start < end) 1980 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 1981 for (; start < end; start += PAGE_SIZE) { 1982 struct page *p = virt_to_page(start); 1983 1984 ClearPageReserved(p); 1985 init_page_count(p); 1986 __free_page(p); 1987 num_physpages++; 1988 totalram_pages++; 1989 } 1990 } 1991 #endif 1992 1993 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) 1994 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) 1995 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) 1996 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) 1997 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) 1998 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) 1999 2000 pgprot_t PAGE_KERNEL __read_mostly; 2001 EXPORT_SYMBOL(PAGE_KERNEL); 2002 2003 pgprot_t PAGE_KERNEL_LOCKED __read_mostly; 2004 pgprot_t PAGE_COPY __read_mostly; 2005 2006 pgprot_t PAGE_SHARED __read_mostly; 2007 EXPORT_SYMBOL(PAGE_SHARED); 2008 2009 unsigned long pg_iobits __read_mostly; 2010 2011 unsigned long _PAGE_IE __read_mostly; 2012 EXPORT_SYMBOL(_PAGE_IE); 2013 2014 unsigned long _PAGE_E __read_mostly; 2015 EXPORT_SYMBOL(_PAGE_E); 2016 2017 unsigned long _PAGE_CACHE __read_mostly; 2018 EXPORT_SYMBOL(_PAGE_CACHE); 2019 2020 #ifdef CONFIG_SPARSEMEM_VMEMMAP 2021 unsigned long vmemmap_table[VMEMMAP_SIZE]; 2022 2023 static long __meminitdata addr_start, addr_end; 2024 static int __meminitdata node_start; 2025 2026 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2027 { 2028 unsigned long vstart = (unsigned long) start; 2029 unsigned long vend = (unsigned long) (start + nr); 2030 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2031 unsigned long phys_end = (vend - VMEMMAP_BASE); 2032 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2033 unsigned long end = VMEMMAP_ALIGN(phys_end); 2034 unsigned long pte_base; 2035 2036 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2037 _PAGE_CP_4U | _PAGE_CV_4U | 2038 _PAGE_P_4U | _PAGE_W_4U); 2039 if (tlb_type == hypervisor) 2040 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2041 _PAGE_CP_4V | _PAGE_CV_4V | 2042 _PAGE_P_4V | _PAGE_W_4V); 2043 2044 for (; addr < end; addr += VMEMMAP_CHUNK) { 2045 unsigned long *vmem_pp = 2046 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); 2047 void *block; 2048 2049 if (!(*vmem_pp & _PAGE_VALID)) { 2050 block = vmemmap_alloc_block(1UL << 22, node); 2051 if (!block) 2052 return -ENOMEM; 2053 2054 *vmem_pp = pte_base | __pa(block); 2055 2056 /* check to see if we have contiguous blocks */ 2057 if (addr_end != addr || node_start != node) { 2058 if (addr_start) 2059 printk(KERN_DEBUG " [%lx-%lx] on node %d\n", 2060 addr_start, addr_end-1, node_start); 2061 addr_start = addr; 2062 node_start = node; 2063 } 2064 addr_end = addr + VMEMMAP_CHUNK; 2065 } 2066 } 2067 return 0; 2068 } 2069 2070 void __meminit vmemmap_populate_print_last(void) 2071 { 2072 if (addr_start) { 2073 printk(KERN_DEBUG " [%lx-%lx] on node %d\n", 2074 addr_start, addr_end-1, node_start); 2075 addr_start = 0; 2076 addr_end = 0; 2077 node_start = 0; 2078 } 2079 } 2080 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2081 2082 static void prot_init_common(unsigned long page_none, 2083 unsigned long page_shared, 2084 unsigned long page_copy, 2085 unsigned long page_readonly, 2086 unsigned long page_exec_bit) 2087 { 2088 PAGE_COPY = __pgprot(page_copy); 2089 PAGE_SHARED = __pgprot(page_shared); 2090 2091 protection_map[0x0] = __pgprot(page_none); 2092 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); 2093 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); 2094 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); 2095 protection_map[0x4] = __pgprot(page_readonly); 2096 protection_map[0x5] = __pgprot(page_readonly); 2097 protection_map[0x6] = __pgprot(page_copy); 2098 protection_map[0x7] = __pgprot(page_copy); 2099 protection_map[0x8] = __pgprot(page_none); 2100 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); 2101 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); 2102 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); 2103 protection_map[0xc] = __pgprot(page_readonly); 2104 protection_map[0xd] = __pgprot(page_readonly); 2105 protection_map[0xe] = __pgprot(page_shared); 2106 protection_map[0xf] = __pgprot(page_shared); 2107 } 2108 2109 static void __init sun4u_pgprot_init(void) 2110 { 2111 unsigned long page_none, page_shared, page_copy, page_readonly; 2112 unsigned long page_exec_bit; 2113 2114 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2115 _PAGE_CACHE_4U | _PAGE_P_4U | 2116 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2117 _PAGE_EXEC_4U); 2118 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | 2119 _PAGE_CACHE_4U | _PAGE_P_4U | 2120 __ACCESS_BITS_4U | __DIRTY_BITS_4U | 2121 _PAGE_EXEC_4U | _PAGE_L_4U); 2122 2123 _PAGE_IE = _PAGE_IE_4U; 2124 _PAGE_E = _PAGE_E_4U; 2125 _PAGE_CACHE = _PAGE_CACHE_4U; 2126 2127 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | 2128 __ACCESS_BITS_4U | _PAGE_E_4U); 2129 2130 #ifdef CONFIG_DEBUG_PAGEALLOC 2131 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ 2132 0xfffff80000000000UL; 2133 #else 2134 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 2135 0xfffff80000000000UL; 2136 #endif 2137 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | 2138 _PAGE_P_4U | _PAGE_W_4U); 2139 2140 /* XXX Should use 256MB on Panther. XXX */ 2141 kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; 2142 2143 _PAGE_SZBITS = _PAGE_SZBITS_4U; 2144 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | 2145 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | 2146 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); 2147 2148 2149 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; 2150 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2151 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); 2152 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2153 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2154 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | 2155 __ACCESS_BITS_4U | _PAGE_EXEC_4U); 2156 2157 page_exec_bit = _PAGE_EXEC_4U; 2158 2159 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2160 page_exec_bit); 2161 } 2162 2163 static void __init sun4v_pgprot_init(void) 2164 { 2165 unsigned long page_none, page_shared, page_copy, page_readonly; 2166 unsigned long page_exec_bit; 2167 2168 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | 2169 _PAGE_CACHE_4V | _PAGE_P_4V | 2170 __ACCESS_BITS_4V | __DIRTY_BITS_4V | 2171 _PAGE_EXEC_4V); 2172 PAGE_KERNEL_LOCKED = PAGE_KERNEL; 2173 2174 _PAGE_IE = _PAGE_IE_4V; 2175 _PAGE_E = _PAGE_E_4V; 2176 _PAGE_CACHE = _PAGE_CACHE_4V; 2177 2178 #ifdef CONFIG_DEBUG_PAGEALLOC 2179 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2180 0xfffff80000000000UL; 2181 #else 2182 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ 2183 0xfffff80000000000UL; 2184 #endif 2185 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2186 _PAGE_P_4V | _PAGE_W_4V); 2187 2188 #ifdef CONFIG_DEBUG_PAGEALLOC 2189 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ 2190 0xfffff80000000000UL; 2191 #else 2192 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 2193 0xfffff80000000000UL; 2194 #endif 2195 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | 2196 _PAGE_P_4V | _PAGE_W_4V); 2197 2198 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | 2199 __ACCESS_BITS_4V | _PAGE_E_4V); 2200 2201 _PAGE_SZBITS = _PAGE_SZBITS_4V; 2202 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | 2203 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | 2204 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | 2205 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); 2206 2207 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; 2208 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2209 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); 2210 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2211 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2212 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | 2213 __ACCESS_BITS_4V | _PAGE_EXEC_4V); 2214 2215 page_exec_bit = _PAGE_EXEC_4V; 2216 2217 prot_init_common(page_none, page_shared, page_copy, page_readonly, 2218 page_exec_bit); 2219 } 2220 2221 unsigned long pte_sz_bits(unsigned long sz) 2222 { 2223 if (tlb_type == hypervisor) { 2224 switch (sz) { 2225 case 8 * 1024: 2226 default: 2227 return _PAGE_SZ8K_4V; 2228 case 64 * 1024: 2229 return _PAGE_SZ64K_4V; 2230 case 512 * 1024: 2231 return _PAGE_SZ512K_4V; 2232 case 4 * 1024 * 1024: 2233 return _PAGE_SZ4MB_4V; 2234 } 2235 } else { 2236 switch (sz) { 2237 case 8 * 1024: 2238 default: 2239 return _PAGE_SZ8K_4U; 2240 case 64 * 1024: 2241 return _PAGE_SZ64K_4U; 2242 case 512 * 1024: 2243 return _PAGE_SZ512K_4U; 2244 case 4 * 1024 * 1024: 2245 return _PAGE_SZ4MB_4U; 2246 } 2247 } 2248 } 2249 2250 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) 2251 { 2252 pte_t pte; 2253 2254 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); 2255 pte_val(pte) |= (((unsigned long)space) << 32); 2256 pte_val(pte) |= pte_sz_bits(page_size); 2257 2258 return pte; 2259 } 2260 2261 static unsigned long kern_large_tte(unsigned long paddr) 2262 { 2263 unsigned long val; 2264 2265 val = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2266 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | 2267 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); 2268 if (tlb_type == hypervisor) 2269 val = (_PAGE_VALID | _PAGE_SZ4MB_4V | 2270 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | 2271 _PAGE_EXEC_4V | _PAGE_W_4V); 2272 2273 return val | paddr; 2274 } 2275 2276 /* If not locked, zap it. */ 2277 void __flush_tlb_all(void) 2278 { 2279 unsigned long pstate; 2280 int i; 2281 2282 __asm__ __volatile__("flushw\n\t" 2283 "rdpr %%pstate, %0\n\t" 2284 "wrpr %0, %1, %%pstate" 2285 : "=r" (pstate) 2286 : "i" (PSTATE_IE)); 2287 if (tlb_type == hypervisor) { 2288 sun4v_mmu_demap_all(); 2289 } else if (tlb_type == spitfire) { 2290 for (i = 0; i < 64; i++) { 2291 /* Spitfire Errata #32 workaround */ 2292 /* NOTE: Always runs on spitfire, so no 2293 * cheetah+ page size encodings. 2294 */ 2295 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2296 "flush %%g6" 2297 : /* No outputs */ 2298 : "r" (0), 2299 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2300 2301 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { 2302 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2303 "membar #Sync" 2304 : /* no outputs */ 2305 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); 2306 spitfire_put_dtlb_data(i, 0x0UL); 2307 } 2308 2309 /* Spitfire Errata #32 workaround */ 2310 /* NOTE: Always runs on spitfire, so no 2311 * cheetah+ page size encodings. 2312 */ 2313 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 2314 "flush %%g6" 2315 : /* No outputs */ 2316 : "r" (0), 2317 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); 2318 2319 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { 2320 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 2321 "membar #Sync" 2322 : /* no outputs */ 2323 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); 2324 spitfire_put_itlb_data(i, 0x0UL); 2325 } 2326 } 2327 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 2328 cheetah_flush_dtlb_all(); 2329 cheetah_flush_itlb_all(); 2330 } 2331 __asm__ __volatile__("wrpr %0, 0, %%pstate" 2332 : : "r" (pstate)); 2333 } 2334