1 /* 2 * linux/arch/parisc/mm/init.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Copyright 1999 SuSE GmbH 6 * changed by Philipp Rumpf 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 8 * Copyright 2004 Randolph Chung (tausq@debian.org) 9 * Copyright 2006-2007 Helge Deller (deller@gmx.de) 10 * 11 */ 12 13 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/bootmem.h> 17 #include <linux/gfp.h> 18 #include <linux/delay.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */ 21 #include <linux/initrd.h> 22 #include <linux/swap.h> 23 #include <linux/unistd.h> 24 #include <linux/nodemask.h> /* for node_online_map */ 25 #include <linux/pagemap.h> /* for release_pages and page_cache_release */ 26 27 #include <asm/pgalloc.h> 28 #include <asm/pgtable.h> 29 #include <asm/tlb.h> 30 #include <asm/pdc_chassis.h> 31 #include <asm/mmzone.h> 32 #include <asm/sections.h> 33 34 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35 36 extern int data_start; 37 38 #ifdef CONFIG_DISCONTIGMEM 39 struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 40 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 41 #endif 42 43 static struct resource data_resource = { 44 .name = "Kernel data", 45 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 46 }; 47 48 static struct resource code_resource = { 49 .name = "Kernel code", 50 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 51 }; 52 53 static struct resource pdcdata_resource = { 54 .name = "PDC data (Page Zero)", 55 .start = 0, 56 .end = 0x9ff, 57 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 58 }; 59 60 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; 61 62 /* The following array is initialized from the firmware specific 63 * information retrieved in kernel/inventory.c. 64 */ 65 66 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; 67 int npmem_ranges __read_mostly; 68 69 #ifdef CONFIG_64BIT 70 #define MAX_MEM (~0UL) 71 #else /* !CONFIG_64BIT */ 72 #define MAX_MEM (3584U*1024U*1024U) 73 #endif /* !CONFIG_64BIT */ 74 75 static unsigned long mem_limit __read_mostly = MAX_MEM; 76 77 static void __init mem_limit_func(void) 78 { 79 char *cp, *end; 80 unsigned long limit; 81 82 /* We need this before __setup() functions are called */ 83 84 limit = MAX_MEM; 85 for (cp = boot_command_line; *cp; ) { 86 if (memcmp(cp, "mem=", 4) == 0) { 87 cp += 4; 88 limit = memparse(cp, &end); 89 if (end != cp) 90 break; 91 cp = end; 92 } else { 93 while (*cp != ' ' && *cp) 94 ++cp; 95 while (*cp == ' ') 96 ++cp; 97 } 98 } 99 100 if (limit < mem_limit) 101 mem_limit = limit; 102 } 103 104 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) 105 106 static void __init setup_bootmem(void) 107 { 108 unsigned long bootmap_size; 109 unsigned long mem_max; 110 unsigned long bootmap_pages; 111 unsigned long bootmap_start_pfn; 112 unsigned long bootmap_pfn; 113 #ifndef CONFIG_DISCONTIGMEM 114 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; 115 int npmem_holes; 116 #endif 117 int i, sysram_resource_count; 118 119 disable_sr_hashing(); /* Turn off space register hashing */ 120 121 /* 122 * Sort the ranges. Since the number of ranges is typically 123 * small, and performance is not an issue here, just do 124 * a simple insertion sort. 125 */ 126 127 for (i = 1; i < npmem_ranges; i++) { 128 int j; 129 130 for (j = i; j > 0; j--) { 131 unsigned long tmp; 132 133 if (pmem_ranges[j-1].start_pfn < 134 pmem_ranges[j].start_pfn) { 135 136 break; 137 } 138 tmp = pmem_ranges[j-1].start_pfn; 139 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; 140 pmem_ranges[j].start_pfn = tmp; 141 tmp = pmem_ranges[j-1].pages; 142 pmem_ranges[j-1].pages = pmem_ranges[j].pages; 143 pmem_ranges[j].pages = tmp; 144 } 145 } 146 147 #ifndef CONFIG_DISCONTIGMEM 148 /* 149 * Throw out ranges that are too far apart (controlled by 150 * MAX_GAP). 151 */ 152 153 for (i = 1; i < npmem_ranges; i++) { 154 if (pmem_ranges[i].start_pfn - 155 (pmem_ranges[i-1].start_pfn + 156 pmem_ranges[i-1].pages) > MAX_GAP) { 157 npmem_ranges = i; 158 printk("Large gap in memory detected (%ld pages). " 159 "Consider turning on CONFIG_DISCONTIGMEM\n", 160 pmem_ranges[i].start_pfn - 161 (pmem_ranges[i-1].start_pfn + 162 pmem_ranges[i-1].pages)); 163 break; 164 } 165 } 166 #endif 167 168 if (npmem_ranges > 1) { 169 170 /* Print the memory ranges */ 171 172 printk(KERN_INFO "Memory Ranges:\n"); 173 174 for (i = 0; i < npmem_ranges; i++) { 175 unsigned long start; 176 unsigned long size; 177 178 size = (pmem_ranges[i].pages << PAGE_SHIFT); 179 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); 180 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", 181 i,start, start + (size - 1), size >> 20); 182 } 183 } 184 185 sysram_resource_count = npmem_ranges; 186 for (i = 0; i < sysram_resource_count; i++) { 187 struct resource *res = &sysram_resources[i]; 188 res->name = "System RAM"; 189 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; 190 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; 191 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 192 request_resource(&iomem_resource, res); 193 } 194 195 /* 196 * For 32 bit kernels we limit the amount of memory we can 197 * support, in order to preserve enough kernel address space 198 * for other purposes. For 64 bit kernels we don't normally 199 * limit the memory, but this mechanism can be used to 200 * artificially limit the amount of memory (and it is written 201 * to work with multiple memory ranges). 202 */ 203 204 mem_limit_func(); /* check for "mem=" argument */ 205 206 mem_max = 0; 207 num_physpages = 0; 208 for (i = 0; i < npmem_ranges; i++) { 209 unsigned long rsize; 210 211 rsize = pmem_ranges[i].pages << PAGE_SHIFT; 212 if ((mem_max + rsize) > mem_limit) { 213 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); 214 if (mem_max == mem_limit) 215 npmem_ranges = i; 216 else { 217 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) 218 - (mem_max >> PAGE_SHIFT); 219 npmem_ranges = i + 1; 220 mem_max = mem_limit; 221 } 222 num_physpages += pmem_ranges[i].pages; 223 break; 224 } 225 num_physpages += pmem_ranges[i].pages; 226 mem_max += rsize; 227 } 228 229 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); 230 231 #ifndef CONFIG_DISCONTIGMEM 232 /* Merge the ranges, keeping track of the holes */ 233 234 { 235 unsigned long end_pfn; 236 unsigned long hole_pages; 237 238 npmem_holes = 0; 239 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; 240 for (i = 1; i < npmem_ranges; i++) { 241 242 hole_pages = pmem_ranges[i].start_pfn - end_pfn; 243 if (hole_pages) { 244 pmem_holes[npmem_holes].start_pfn = end_pfn; 245 pmem_holes[npmem_holes++].pages = hole_pages; 246 end_pfn += hole_pages; 247 } 248 end_pfn += pmem_ranges[i].pages; 249 } 250 251 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; 252 npmem_ranges = 1; 253 } 254 #endif 255 256 bootmap_pages = 0; 257 for (i = 0; i < npmem_ranges; i++) 258 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); 259 260 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; 261 262 #ifdef CONFIG_DISCONTIGMEM 263 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { 264 memset(NODE_DATA(i), 0, sizeof(pg_data_t)); 265 NODE_DATA(i)->bdata = &bootmem_node_data[i]; 266 } 267 memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); 268 269 for (i = 0; i < npmem_ranges; i++) { 270 node_set_state(i, N_NORMAL_MEMORY); 271 node_set_online(i); 272 } 273 #endif 274 275 /* 276 * Initialize and free the full range of memory in each range. 277 * Note that the only writing these routines do are to the bootmap, 278 * and we've made sure to locate the bootmap properly so that they 279 * won't be writing over anything important. 280 */ 281 282 bootmap_pfn = bootmap_start_pfn; 283 max_pfn = 0; 284 for (i = 0; i < npmem_ranges; i++) { 285 unsigned long start_pfn; 286 unsigned long npages; 287 288 start_pfn = pmem_ranges[i].start_pfn; 289 npages = pmem_ranges[i].pages; 290 291 bootmap_size = init_bootmem_node(NODE_DATA(i), 292 bootmap_pfn, 293 start_pfn, 294 (start_pfn + npages) ); 295 free_bootmem_node(NODE_DATA(i), 296 (start_pfn << PAGE_SHIFT), 297 (npages << PAGE_SHIFT) ); 298 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 299 if ((start_pfn + npages) > max_pfn) 300 max_pfn = start_pfn + npages; 301 } 302 303 /* IOMMU is always used to access "high mem" on those boxes 304 * that can support enough mem that a PCI device couldn't 305 * directly DMA to any physical addresses. 306 * ISA DMA support will need to revisit this. 307 */ 308 max_low_pfn = max_pfn; 309 310 /* bootmap sizing messed up? */ 311 BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages); 312 313 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ 314 315 #define PDC_CONSOLE_IO_IODC_SIZE 32768 316 317 reserve_bootmem_node(NODE_DATA(0), 0UL, 318 (unsigned long)(PAGE0->mem_free + 319 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); 320 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), 321 (unsigned long)(_end - _text), BOOTMEM_DEFAULT); 322 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 323 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), 324 BOOTMEM_DEFAULT); 325 326 #ifndef CONFIG_DISCONTIGMEM 327 328 /* reserve the holes */ 329 330 for (i = 0; i < npmem_holes; i++) { 331 reserve_bootmem_node(NODE_DATA(0), 332 (pmem_holes[i].start_pfn << PAGE_SHIFT), 333 (pmem_holes[i].pages << PAGE_SHIFT), 334 BOOTMEM_DEFAULT); 335 } 336 #endif 337 338 #ifdef CONFIG_BLK_DEV_INITRD 339 if (initrd_start) { 340 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); 341 if (__pa(initrd_start) < mem_max) { 342 unsigned long initrd_reserve; 343 344 if (__pa(initrd_end) > mem_max) { 345 initrd_reserve = mem_max - __pa(initrd_start); 346 } else { 347 initrd_reserve = initrd_end - initrd_start; 348 } 349 initrd_below_start_ok = 1; 350 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); 351 352 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start), 353 initrd_reserve, BOOTMEM_DEFAULT); 354 } 355 } 356 #endif 357 358 data_resource.start = virt_to_phys(&data_start); 359 data_resource.end = virt_to_phys(_end) - 1; 360 code_resource.start = virt_to_phys(_text); 361 code_resource.end = virt_to_phys(&data_start)-1; 362 363 /* We don't know which region the kernel will be in, so try 364 * all of them. 365 */ 366 for (i = 0; i < sysram_resource_count; i++) { 367 struct resource *res = &sysram_resources[i]; 368 request_resource(res, &code_resource); 369 request_resource(res, &data_resource); 370 } 371 request_resource(&sysram_resources[0], &pdcdata_resource); 372 } 373 374 static void __init map_pages(unsigned long start_vaddr, 375 unsigned long start_paddr, unsigned long size, 376 pgprot_t pgprot, int force) 377 { 378 pgd_t *pg_dir; 379 pmd_t *pmd; 380 pte_t *pg_table; 381 unsigned long end_paddr; 382 unsigned long start_pmd; 383 unsigned long start_pte; 384 unsigned long tmp1; 385 unsigned long tmp2; 386 unsigned long address; 387 unsigned long vaddr; 388 unsigned long ro_start; 389 unsigned long ro_end; 390 unsigned long fv_addr; 391 unsigned long gw_addr; 392 extern const unsigned long fault_vector_20; 393 extern void * const linux_gateway_page; 394 395 ro_start = __pa((unsigned long)_text); 396 ro_end = __pa((unsigned long)&data_start); 397 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 398 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; 399 400 end_paddr = start_paddr + size; 401 402 pg_dir = pgd_offset_k(start_vaddr); 403 404 #if PTRS_PER_PMD == 1 405 start_pmd = 0; 406 #else 407 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 408 #endif 409 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 410 411 address = start_paddr; 412 vaddr = start_vaddr; 413 while (address < end_paddr) { 414 #if PTRS_PER_PMD == 1 415 pmd = (pmd_t *)__pa(pg_dir); 416 #else 417 pmd = (pmd_t *)pgd_address(*pg_dir); 418 419 /* 420 * pmd is physical at this point 421 */ 422 423 if (!pmd) { 424 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); 425 pmd = (pmd_t *) __pa(pmd); 426 } 427 428 pgd_populate(NULL, pg_dir, __va(pmd)); 429 #endif 430 pg_dir++; 431 432 /* now change pmd to kernel virtual addresses */ 433 434 pmd = (pmd_t *)__va(pmd) + start_pmd; 435 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { 436 437 /* 438 * pg_table is physical at this point 439 */ 440 441 pg_table = (pte_t *)pmd_address(*pmd); 442 if (!pg_table) { 443 pg_table = (pte_t *) 444 alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); 445 pg_table = (pte_t *) __pa(pg_table); 446 } 447 448 pmd_populate_kernel(NULL, pmd, __va(pg_table)); 449 450 /* now change pg_table to kernel virtual addresses */ 451 452 pg_table = (pte_t *) __va(pg_table) + start_pte; 453 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 454 pte_t pte; 455 456 /* 457 * Map the fault vector writable so we can 458 * write the HPMC checksum. 459 */ 460 if (force) 461 pte = __mk_pte(address, pgprot); 462 else if (core_kernel_text(vaddr) && 463 address != fv_addr) 464 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 465 else 466 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 467 if (address >= ro_start && address < ro_end 468 && address != fv_addr 469 && address != gw_addr) 470 pte = __mk_pte(address, PAGE_KERNEL_RO); 471 else 472 #endif 473 pte = __mk_pte(address, pgprot); 474 475 if (address >= end_paddr) { 476 if (force) 477 break; 478 else 479 pte_val(pte) = 0; 480 } 481 482 set_pte(pg_table, pte); 483 484 address += PAGE_SIZE; 485 vaddr += PAGE_SIZE; 486 } 487 start_pte = 0; 488 489 if (address >= end_paddr) 490 break; 491 } 492 start_pmd = 0; 493 } 494 } 495 496 void free_initmem(void) 497 { 498 unsigned long addr; 499 unsigned long init_begin = (unsigned long)__init_begin; 500 unsigned long init_end = (unsigned long)__init_end; 501 502 /* The init text pages are marked R-X. We have to 503 * flush the icache and mark them RW- 504 * 505 * This is tricky, because map_pages is in the init section. 506 * Do a dummy remap of the data section first (the data 507 * section is already PAGE_KERNEL) to pull in the TLB entries 508 * for map_kernel */ 509 map_pages(init_begin, __pa(init_begin), init_end - init_begin, 510 PAGE_KERNEL_RWX, 1); 511 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute 512 * map_pages */ 513 map_pages(init_begin, __pa(init_begin), init_end - init_begin, 514 PAGE_KERNEL, 1); 515 516 /* force the kernel to see the new TLB entries */ 517 __flush_tlb_range(0, init_begin, init_end); 518 /* Attempt to catch anyone trying to execute code here 519 * by filling the page with BRK insns. 520 */ 521 memset((void *)init_begin, 0x00, init_end - init_begin); 522 /* finally dump all the instructions which were cached, since the 523 * pages are no-longer executable */ 524 flush_icache_range(init_begin, init_end); 525 526 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 527 ClearPageReserved(virt_to_page(addr)); 528 init_page_count(virt_to_page(addr)); 529 free_page(addr); 530 num_physpages++; 531 totalram_pages++; 532 } 533 534 /* set up a new led state on systems shipped LED State panel */ 535 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 536 537 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", 538 (init_end - init_begin) >> 10); 539 } 540 541 542 #ifdef CONFIG_DEBUG_RODATA 543 void mark_rodata_ro(void) 544 { 545 /* rodata memory was already mapped with KERNEL_RO access rights by 546 pagetable_init() and map_pages(). No need to do additional stuff here */ 547 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", 548 (unsigned long)(__end_rodata - __start_rodata) >> 10); 549 } 550 #endif 551 552 553 /* 554 * Just an arbitrary offset to serve as a "hole" between mapping areas 555 * (between top of physical memory and a potential pcxl dma mapping 556 * area, and below the vmalloc mapping area). 557 * 558 * The current 32K value just means that there will be a 32K "hole" 559 * between mapping areas. That means that any out-of-bounds memory 560 * accesses will hopefully be caught. The vmalloc() routines leaves 561 * a hole of 4kB between each vmalloced area for the same reason. 562 */ 563 564 /* Leave room for gateway page expansion */ 565 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE 566 #error KERNEL_MAP_START is in gateway reserved region 567 #endif 568 #define MAP_START (KERNEL_MAP_START) 569 570 #define VM_MAP_OFFSET (32*1024) 571 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 572 & ~(VM_MAP_OFFSET-1))) 573 574 void *parisc_vmalloc_start __read_mostly; 575 EXPORT_SYMBOL(parisc_vmalloc_start); 576 577 #ifdef CONFIG_PA11 578 unsigned long pcxl_dma_start __read_mostly; 579 #endif 580 581 void __init mem_init(void) 582 { 583 int codesize, reservedpages, datasize, initsize; 584 585 /* Do sanity checks on page table constants */ 586 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); 587 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); 588 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); 589 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD 590 > BITS_PER_LONG); 591 592 high_memory = __va((max_pfn << PAGE_SHIFT)); 593 594 #ifndef CONFIG_DISCONTIGMEM 595 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; 596 totalram_pages += free_all_bootmem(); 597 #else 598 { 599 int i; 600 601 for (i = 0; i < npmem_ranges; i++) 602 totalram_pages += free_all_bootmem_node(NODE_DATA(i)); 603 } 604 #endif 605 606 codesize = (unsigned long)_etext - (unsigned long)_text; 607 datasize = (unsigned long)_edata - (unsigned long)_etext; 608 initsize = (unsigned long)__init_end - (unsigned long)__init_begin; 609 610 reservedpages = 0; 611 { 612 unsigned long pfn; 613 #ifdef CONFIG_DISCONTIGMEM 614 int i; 615 616 for (i = 0; i < npmem_ranges; i++) { 617 for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) { 618 if (PageReserved(pfn_to_page(pfn))) 619 reservedpages++; 620 } 621 } 622 #else /* !CONFIG_DISCONTIGMEM */ 623 for (pfn = 0; pfn < max_pfn; pfn++) { 624 /* 625 * Only count reserved RAM pages 626 */ 627 if (PageReserved(pfn_to_page(pfn))) 628 reservedpages++; 629 } 630 #endif 631 } 632 633 #ifdef CONFIG_PA11 634 if (hppa_dma_ops == &pcxl_dma_ops) { 635 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 636 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start 637 + PCXL_DMA_MAP_SIZE); 638 } else { 639 pcxl_dma_start = 0; 640 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); 641 } 642 #else 643 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); 644 #endif 645 646 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", 647 nr_free_pages() << (PAGE_SHIFT-10), 648 num_physpages << (PAGE_SHIFT-10), 649 codesize >> 10, 650 reservedpages << (PAGE_SHIFT-10), 651 datasize >> 10, 652 initsize >> 10 653 ); 654 655 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ 656 printk("virtual kernel memory layout:\n" 657 " vmalloc : 0x%p - 0x%p (%4ld MB)\n" 658 " memory : 0x%p - 0x%p (%4ld MB)\n" 659 " .init : 0x%p - 0x%p (%4ld kB)\n" 660 " .data : 0x%p - 0x%p (%4ld kB)\n" 661 " .text : 0x%p - 0x%p (%4ld kB)\n", 662 663 (void*)VMALLOC_START, (void*)VMALLOC_END, 664 (VMALLOC_END - VMALLOC_START) >> 20, 665 666 __va(0), high_memory, 667 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, 668 669 __init_begin, __init_end, 670 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, 671 672 _etext, _edata, 673 ((unsigned long)_edata - (unsigned long)_etext) >> 10, 674 675 _text, _etext, 676 ((unsigned long)_etext - (unsigned long)_text) >> 10); 677 #endif 678 } 679 680 unsigned long *empty_zero_page __read_mostly; 681 EXPORT_SYMBOL(empty_zero_page); 682 683 void show_mem(unsigned int filter) 684 { 685 int i,free = 0,total = 0,reserved = 0; 686 int shared = 0, cached = 0; 687 688 printk(KERN_INFO "Mem-info:\n"); 689 show_free_areas(); 690 #ifndef CONFIG_DISCONTIGMEM 691 i = max_mapnr; 692 while (i-- > 0) { 693 total++; 694 if (PageReserved(mem_map+i)) 695 reserved++; 696 else if (PageSwapCache(mem_map+i)) 697 cached++; 698 else if (!page_count(&mem_map[i])) 699 free++; 700 else 701 shared += page_count(&mem_map[i]) - 1; 702 } 703 #else 704 for (i = 0; i < npmem_ranges; i++) { 705 int j; 706 707 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { 708 struct page *p; 709 unsigned long flags; 710 711 pgdat_resize_lock(NODE_DATA(i), &flags); 712 p = nid_page_nr(i, j) - node_start_pfn(i); 713 714 total++; 715 if (PageReserved(p)) 716 reserved++; 717 else if (PageSwapCache(p)) 718 cached++; 719 else if (!page_count(p)) 720 free++; 721 else 722 shared += page_count(p) - 1; 723 pgdat_resize_unlock(NODE_DATA(i), &flags); 724 } 725 } 726 #endif 727 printk(KERN_INFO "%d pages of RAM\n", total); 728 printk(KERN_INFO "%d reserved pages\n", reserved); 729 printk(KERN_INFO "%d pages shared\n", shared); 730 printk(KERN_INFO "%d pages swap cached\n", cached); 731 732 733 #ifdef CONFIG_DISCONTIGMEM 734 { 735 struct zonelist *zl; 736 int i, j; 737 738 for (i = 0; i < npmem_ranges; i++) { 739 zl = node_zonelist(i, 0); 740 for (j = 0; j < MAX_NR_ZONES; j++) { 741 struct zoneref *z; 742 struct zone *zone; 743 744 printk("Zone list for zone %d on node %d: ", j, i); 745 for_each_zone_zonelist(zone, z, zl, j) 746 printk("[%d/%s] ", zone_to_nid(zone), 747 zone->name); 748 printk("\n"); 749 } 750 } 751 } 752 #endif 753 } 754 755 /* 756 * pagetable_init() sets up the page tables 757 * 758 * Note that gateway_init() places the Linux gateway page at page 0. 759 * Since gateway pages cannot be dereferenced this has the desirable 760 * side effect of trapping those pesky NULL-reference errors in the 761 * kernel. 762 */ 763 static void __init pagetable_init(void) 764 { 765 int range; 766 767 /* Map each physical memory range to its kernel vaddr */ 768 769 for (range = 0; range < npmem_ranges; range++) { 770 unsigned long start_paddr; 771 unsigned long end_paddr; 772 unsigned long size; 773 774 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 775 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); 776 size = pmem_ranges[range].pages << PAGE_SHIFT; 777 778 map_pages((unsigned long)__va(start_paddr), start_paddr, 779 size, PAGE_KERNEL, 0); 780 } 781 782 #ifdef CONFIG_BLK_DEV_INITRD 783 if (initrd_end && initrd_end > mem_limit) { 784 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); 785 map_pages(initrd_start, __pa(initrd_start), 786 initrd_end - initrd_start, PAGE_KERNEL, 0); 787 } 788 #endif 789 790 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 791 memset(empty_zero_page, 0, PAGE_SIZE); 792 } 793 794 static void __init gateway_init(void) 795 { 796 unsigned long linux_gateway_page_addr; 797 /* FIXME: This is 'const' in order to trick the compiler 798 into not treating it as DP-relative data. */ 799 extern void * const linux_gateway_page; 800 801 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; 802 803 /* 804 * Setup Linux Gateway page. 805 * 806 * The Linux gateway page will reside in kernel space (on virtual 807 * page 0), so it doesn't need to be aliased into user space. 808 */ 809 810 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), 811 PAGE_SIZE, PAGE_GATEWAY, 1); 812 } 813 814 #ifdef CONFIG_HPUX 815 void 816 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm) 817 { 818 pgd_t *pg_dir; 819 pmd_t *pmd; 820 pte_t *pg_table; 821 unsigned long start_pmd; 822 unsigned long start_pte; 823 unsigned long address; 824 unsigned long hpux_gw_page_addr; 825 /* FIXME: This is 'const' in order to trick the compiler 826 into not treating it as DP-relative data. */ 827 extern void * const hpux_gateway_page; 828 829 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; 830 831 /* 832 * Setup HP-UX Gateway page. 833 * 834 * The HP-UX gateway page resides in the user address space, 835 * so it needs to be aliased into each process. 836 */ 837 838 pg_dir = pgd_offset(mm,hpux_gw_page_addr); 839 840 #if PTRS_PER_PMD == 1 841 start_pmd = 0; 842 #else 843 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 844 #endif 845 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 846 847 address = __pa(&hpux_gateway_page); 848 #if PTRS_PER_PMD == 1 849 pmd = (pmd_t *)__pa(pg_dir); 850 #else 851 pmd = (pmd_t *) pgd_address(*pg_dir); 852 853 /* 854 * pmd is physical at this point 855 */ 856 857 if (!pmd) { 858 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); 859 pmd = (pmd_t *) __pa(pmd); 860 } 861 862 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd); 863 #endif 864 /* now change pmd to kernel virtual addresses */ 865 866 pmd = (pmd_t *)__va(pmd) + start_pmd; 867 868 /* 869 * pg_table is physical at this point 870 */ 871 872 pg_table = (pte_t *) pmd_address(*pmd); 873 if (!pg_table) 874 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); 875 876 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); 877 878 /* now change pg_table to kernel virtual addresses */ 879 880 pg_table = (pte_t *) __va(pg_table) + start_pte; 881 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY)); 882 } 883 EXPORT_SYMBOL(map_hpux_gateway_page); 884 #endif 885 886 void __init paging_init(void) 887 { 888 int i; 889 890 setup_bootmem(); 891 pagetable_init(); 892 gateway_init(); 893 flush_cache_all_local(); /* start with known state */ 894 flush_tlb_all_local(NULL); 895 896 for (i = 0; i < npmem_ranges; i++) { 897 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 898 899 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages; 900 901 #ifdef CONFIG_DISCONTIGMEM 902 /* Need to initialize the pfnnid_map before we can initialize 903 the zone */ 904 { 905 int j; 906 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); 907 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); 908 j++) { 909 pfnnid_map[j] = i; 910 } 911 } 912 #endif 913 914 free_area_init_node(i, zones_size, 915 pmem_ranges[i].start_pfn, NULL); 916 } 917 } 918 919 #ifdef CONFIG_PA20 920 921 /* 922 * Currently, all PA20 chips have 18 bit protection IDs, which is the 923 * limiting factor (space ids are 32 bits). 924 */ 925 926 #define NR_SPACE_IDS 262144 927 928 #else 929 930 /* 931 * Currently we have a one-to-one relationship between space IDs and 932 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only 933 * support 15 bit protection IDs, so that is the limiting factor. 934 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's 935 * probably not worth the effort for a special case here. 936 */ 937 938 #define NR_SPACE_IDS 32768 939 940 #endif /* !CONFIG_PA20 */ 941 942 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) 943 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) 944 945 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ 946 static unsigned long dirty_space_id[SID_ARRAY_SIZE]; 947 static unsigned long space_id_index; 948 static unsigned long free_space_ids = NR_SPACE_IDS - 1; 949 static unsigned long dirty_space_ids = 0; 950 951 static DEFINE_SPINLOCK(sid_lock); 952 953 unsigned long alloc_sid(void) 954 { 955 unsigned long index; 956 957 spin_lock(&sid_lock); 958 959 if (free_space_ids == 0) { 960 if (dirty_space_ids != 0) { 961 spin_unlock(&sid_lock); 962 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ 963 spin_lock(&sid_lock); 964 } 965 BUG_ON(free_space_ids == 0); 966 } 967 968 free_space_ids--; 969 970 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); 971 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); 972 space_id_index = index; 973 974 spin_unlock(&sid_lock); 975 976 return index << SPACEID_SHIFT; 977 } 978 979 void free_sid(unsigned long spaceid) 980 { 981 unsigned long index = spaceid >> SPACEID_SHIFT; 982 unsigned long *dirty_space_offset; 983 984 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); 985 index &= (BITS_PER_LONG - 1); 986 987 spin_lock(&sid_lock); 988 989 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ 990 991 *dirty_space_offset |= (1L << index); 992 dirty_space_ids++; 993 994 spin_unlock(&sid_lock); 995 } 996 997 998 #ifdef CONFIG_SMP 999 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) 1000 { 1001 int i; 1002 1003 /* NOTE: sid_lock must be held upon entry */ 1004 1005 *ndirtyptr = dirty_space_ids; 1006 if (dirty_space_ids != 0) { 1007 for (i = 0; i < SID_ARRAY_SIZE; i++) { 1008 dirty_array[i] = dirty_space_id[i]; 1009 dirty_space_id[i] = 0; 1010 } 1011 dirty_space_ids = 0; 1012 } 1013 1014 return; 1015 } 1016 1017 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) 1018 { 1019 int i; 1020 1021 /* NOTE: sid_lock must be held upon entry */ 1022 1023 if (ndirty != 0) { 1024 for (i = 0; i < SID_ARRAY_SIZE; i++) { 1025 space_id[i] ^= dirty_array[i]; 1026 } 1027 1028 free_space_ids += ndirty; 1029 space_id_index = 0; 1030 } 1031 } 1032 1033 #else /* CONFIG_SMP */ 1034 1035 static void recycle_sids(void) 1036 { 1037 int i; 1038 1039 /* NOTE: sid_lock must be held upon entry */ 1040 1041 if (dirty_space_ids != 0) { 1042 for (i = 0; i < SID_ARRAY_SIZE; i++) { 1043 space_id[i] ^= dirty_space_id[i]; 1044 dirty_space_id[i] = 0; 1045 } 1046 1047 free_space_ids += dirty_space_ids; 1048 dirty_space_ids = 0; 1049 space_id_index = 0; 1050 } 1051 } 1052 #endif 1053 1054 /* 1055 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is 1056 * purged, we can safely reuse the space ids that were released but 1057 * not flushed from the tlb. 1058 */ 1059 1060 #ifdef CONFIG_SMP 1061 1062 static unsigned long recycle_ndirty; 1063 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; 1064 static unsigned int recycle_inuse; 1065 1066 void flush_tlb_all(void) 1067 { 1068 int do_recycle; 1069 1070 do_recycle = 0; 1071 spin_lock(&sid_lock); 1072 if (dirty_space_ids > RECYCLE_THRESHOLD) { 1073 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ 1074 get_dirty_sids(&recycle_ndirty,recycle_dirty_array); 1075 recycle_inuse++; 1076 do_recycle++; 1077 } 1078 spin_unlock(&sid_lock); 1079 on_each_cpu(flush_tlb_all_local, NULL, 1); 1080 if (do_recycle) { 1081 spin_lock(&sid_lock); 1082 recycle_sids(recycle_ndirty,recycle_dirty_array); 1083 recycle_inuse = 0; 1084 spin_unlock(&sid_lock); 1085 } 1086 } 1087 #else 1088 void flush_tlb_all(void) 1089 { 1090 spin_lock(&sid_lock); 1091 flush_tlb_all_local(NULL); 1092 recycle_sids(); 1093 spin_unlock(&sid_lock); 1094 } 1095 #endif 1096 1097 #ifdef CONFIG_BLK_DEV_INITRD 1098 void free_initrd_mem(unsigned long start, unsigned long end) 1099 { 1100 if (start >= end) 1101 return; 1102 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 1103 for (; start < end; start += PAGE_SIZE) { 1104 ClearPageReserved(virt_to_page(start)); 1105 init_page_count(virt_to_page(start)); 1106 free_page(start); 1107 num_physpages++; 1108 totalram_pages++; 1109 } 1110 } 1111 #endif 1112