1 /* 2 * linux/arch/parisc/mm/init.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Copyright 1999 SuSE GmbH 6 * changed by Philipp Rumpf 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 8 * Copyright 2004 Randolph Chung (tausq@debian.org) 9 * Copyright 2006 Helge Deller (deller@gmx.de) 10 * 11 */ 12 13 #include <linux/config.h> 14 15 #include <linux/module.h> 16 #include <linux/mm.h> 17 #include <linux/bootmem.h> 18 #include <linux/delay.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */ 21 #include <linux/initrd.h> 22 #include <linux/swap.h> 23 #include <linux/unistd.h> 24 #include <linux/nodemask.h> /* for node_online_map */ 25 #include <linux/pagemap.h> /* for release_pages and page_cache_release */ 26 27 #include <asm/pgalloc.h> 28 #include <asm/tlb.h> 29 #include <asm/pdc_chassis.h> 30 #include <asm/mmzone.h> 31 32 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 33 34 extern char _text; /* start of kernel code, defined by linker */ 35 extern int data_start; 36 extern char _end; /* end of BSS, defined by linker */ 37 extern char __init_begin, __init_end; 38 39 #ifdef CONFIG_DISCONTIGMEM 40 struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 41 bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly; 42 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 43 #endif 44 45 static struct resource data_resource = { 46 .name = "Kernel data", 47 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 48 }; 49 50 static struct resource code_resource = { 51 .name = "Kernel code", 52 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 53 }; 54 55 static struct resource pdcdata_resource = { 56 .name = "PDC data (Page Zero)", 57 .start = 0, 58 .end = 0x9ff, 59 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 60 }; 61 62 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; 63 64 /* The following array is initialized from the firmware specific 65 * information retrieved in kernel/inventory.c. 66 */ 67 68 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; 69 int npmem_ranges __read_mostly; 70 71 #ifdef __LP64__ 72 #define MAX_MEM (~0UL) 73 #else /* !__LP64__ */ 74 #define MAX_MEM (3584U*1024U*1024U) 75 #endif /* !__LP64__ */ 76 77 static unsigned long mem_limit __read_mostly = MAX_MEM; 78 79 static void __init mem_limit_func(void) 80 { 81 char *cp, *end; 82 unsigned long limit; 83 extern char saved_command_line[]; 84 85 /* We need this before __setup() functions are called */ 86 87 limit = MAX_MEM; 88 for (cp = saved_command_line; *cp; ) { 89 if (memcmp(cp, "mem=", 4) == 0) { 90 cp += 4; 91 limit = memparse(cp, &end); 92 if (end != cp) 93 break; 94 cp = end; 95 } else { 96 while (*cp != ' ' && *cp) 97 ++cp; 98 while (*cp == ' ') 99 ++cp; 100 } 101 } 102 103 if (limit < mem_limit) 104 mem_limit = limit; 105 } 106 107 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) 108 109 static void __init setup_bootmem(void) 110 { 111 unsigned long bootmap_size; 112 unsigned long mem_max; 113 unsigned long bootmap_pages; 114 unsigned long bootmap_start_pfn; 115 unsigned long bootmap_pfn; 116 #ifndef CONFIG_DISCONTIGMEM 117 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; 118 int npmem_holes; 119 #endif 120 int i, sysram_resource_count; 121 122 disable_sr_hashing(); /* Turn off space register hashing */ 123 124 /* 125 * Sort the ranges. Since the number of ranges is typically 126 * small, and performance is not an issue here, just do 127 * a simple insertion sort. 128 */ 129 130 for (i = 1; i < npmem_ranges; i++) { 131 int j; 132 133 for (j = i; j > 0; j--) { 134 unsigned long tmp; 135 136 if (pmem_ranges[j-1].start_pfn < 137 pmem_ranges[j].start_pfn) { 138 139 break; 140 } 141 tmp = pmem_ranges[j-1].start_pfn; 142 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; 143 pmem_ranges[j].start_pfn = tmp; 144 tmp = pmem_ranges[j-1].pages; 145 pmem_ranges[j-1].pages = pmem_ranges[j].pages; 146 pmem_ranges[j].pages = tmp; 147 } 148 } 149 150 #ifndef CONFIG_DISCONTIGMEM 151 /* 152 * Throw out ranges that are too far apart (controlled by 153 * MAX_GAP). 154 */ 155 156 for (i = 1; i < npmem_ranges; i++) { 157 if (pmem_ranges[i].start_pfn - 158 (pmem_ranges[i-1].start_pfn + 159 pmem_ranges[i-1].pages) > MAX_GAP) { 160 npmem_ranges = i; 161 printk("Large gap in memory detected (%ld pages). " 162 "Consider turning on CONFIG_DISCONTIGMEM\n", 163 pmem_ranges[i].start_pfn - 164 (pmem_ranges[i-1].start_pfn + 165 pmem_ranges[i-1].pages)); 166 break; 167 } 168 } 169 #endif 170 171 if (npmem_ranges > 1) { 172 173 /* Print the memory ranges */ 174 175 printk(KERN_INFO "Memory Ranges:\n"); 176 177 for (i = 0; i < npmem_ranges; i++) { 178 unsigned long start; 179 unsigned long size; 180 181 size = (pmem_ranges[i].pages << PAGE_SHIFT); 182 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); 183 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", 184 i,start, start + (size - 1), size >> 20); 185 } 186 } 187 188 sysram_resource_count = npmem_ranges; 189 for (i = 0; i < sysram_resource_count; i++) { 190 struct resource *res = &sysram_resources[i]; 191 res->name = "System RAM"; 192 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; 193 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; 194 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 195 request_resource(&iomem_resource, res); 196 } 197 198 /* 199 * For 32 bit kernels we limit the amount of memory we can 200 * support, in order to preserve enough kernel address space 201 * for other purposes. For 64 bit kernels we don't normally 202 * limit the memory, but this mechanism can be used to 203 * artificially limit the amount of memory (and it is written 204 * to work with multiple memory ranges). 205 */ 206 207 mem_limit_func(); /* check for "mem=" argument */ 208 209 mem_max = 0; 210 num_physpages = 0; 211 for (i = 0; i < npmem_ranges; i++) { 212 unsigned long rsize; 213 214 rsize = pmem_ranges[i].pages << PAGE_SHIFT; 215 if ((mem_max + rsize) > mem_limit) { 216 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); 217 if (mem_max == mem_limit) 218 npmem_ranges = i; 219 else { 220 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) 221 - (mem_max >> PAGE_SHIFT); 222 npmem_ranges = i + 1; 223 mem_max = mem_limit; 224 } 225 num_physpages += pmem_ranges[i].pages; 226 break; 227 } 228 num_physpages += pmem_ranges[i].pages; 229 mem_max += rsize; 230 } 231 232 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); 233 234 #ifndef CONFIG_DISCONTIGMEM 235 /* Merge the ranges, keeping track of the holes */ 236 237 { 238 unsigned long end_pfn; 239 unsigned long hole_pages; 240 241 npmem_holes = 0; 242 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; 243 for (i = 1; i < npmem_ranges; i++) { 244 245 hole_pages = pmem_ranges[i].start_pfn - end_pfn; 246 if (hole_pages) { 247 pmem_holes[npmem_holes].start_pfn = end_pfn; 248 pmem_holes[npmem_holes++].pages = hole_pages; 249 end_pfn += hole_pages; 250 } 251 end_pfn += pmem_ranges[i].pages; 252 } 253 254 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; 255 npmem_ranges = 1; 256 } 257 #endif 258 259 bootmap_pages = 0; 260 for (i = 0; i < npmem_ranges; i++) 261 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); 262 263 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; 264 265 #ifdef CONFIG_DISCONTIGMEM 266 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { 267 memset(NODE_DATA(i), 0, sizeof(pg_data_t)); 268 NODE_DATA(i)->bdata = &bmem_data[i]; 269 } 270 memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); 271 272 for (i = 0; i < npmem_ranges; i++) 273 node_set_online(i); 274 #endif 275 276 /* 277 * Initialize and free the full range of memory in each range. 278 * Note that the only writing these routines do are to the bootmap, 279 * and we've made sure to locate the bootmap properly so that they 280 * won't be writing over anything important. 281 */ 282 283 bootmap_pfn = bootmap_start_pfn; 284 max_pfn = 0; 285 for (i = 0; i < npmem_ranges; i++) { 286 unsigned long start_pfn; 287 unsigned long npages; 288 289 start_pfn = pmem_ranges[i].start_pfn; 290 npages = pmem_ranges[i].pages; 291 292 bootmap_size = init_bootmem_node(NODE_DATA(i), 293 bootmap_pfn, 294 start_pfn, 295 (start_pfn + npages) ); 296 free_bootmem_node(NODE_DATA(i), 297 (start_pfn << PAGE_SHIFT), 298 (npages << PAGE_SHIFT) ); 299 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 300 if ((start_pfn + npages) > max_pfn) 301 max_pfn = start_pfn + npages; 302 } 303 304 /* IOMMU is always used to access "high mem" on those boxes 305 * that can support enough mem that a PCI device couldn't 306 * directly DMA to any physical addresses. 307 * ISA DMA support will need to revisit this. 308 */ 309 max_low_pfn = max_pfn; 310 311 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { 312 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); 313 BUG(); 314 } 315 316 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ 317 318 #define PDC_CONSOLE_IO_IODC_SIZE 32768 319 320 reserve_bootmem_node(NODE_DATA(0), 0UL, 321 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE)); 322 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text), 323 (unsigned long)(&_end - &_text)); 324 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 325 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT)); 326 327 #ifndef CONFIG_DISCONTIGMEM 328 329 /* reserve the holes */ 330 331 for (i = 0; i < npmem_holes; i++) { 332 reserve_bootmem_node(NODE_DATA(0), 333 (pmem_holes[i].start_pfn << PAGE_SHIFT), 334 (pmem_holes[i].pages << PAGE_SHIFT)); 335 } 336 #endif 337 338 #ifdef CONFIG_BLK_DEV_INITRD 339 if (initrd_start) { 340 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); 341 if (__pa(initrd_start) < mem_max) { 342 unsigned long initrd_reserve; 343 344 if (__pa(initrd_end) > mem_max) { 345 initrd_reserve = mem_max - __pa(initrd_start); 346 } else { 347 initrd_reserve = initrd_end - initrd_start; 348 } 349 initrd_below_start_ok = 1; 350 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); 351 352 reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve); 353 } 354 } 355 #endif 356 357 data_resource.start = virt_to_phys(&data_start); 358 data_resource.end = virt_to_phys(&_end)-1; 359 code_resource.start = virt_to_phys(&_text); 360 code_resource.end = virt_to_phys(&data_start)-1; 361 362 /* We don't know which region the kernel will be in, so try 363 * all of them. 364 */ 365 for (i = 0; i < sysram_resource_count; i++) { 366 struct resource *res = &sysram_resources[i]; 367 request_resource(res, &code_resource); 368 request_resource(res, &data_resource); 369 } 370 request_resource(&sysram_resources[0], &pdcdata_resource); 371 } 372 373 void free_initmem(void) 374 { 375 unsigned long addr, init_begin, init_end; 376 377 printk(KERN_INFO "Freeing unused kernel memory: "); 378 379 #ifdef CONFIG_DEBUG_KERNEL 380 /* Attempt to catch anyone trying to execute code here 381 * by filling the page with BRK insns. 382 * 383 * If we disable interrupts for all CPUs, then IPI stops working. 384 * Kinda breaks the global cache flushing. 385 */ 386 local_irq_disable(); 387 388 memset(&__init_begin, 0x00, 389 (unsigned long)&__init_end - (unsigned long)&__init_begin); 390 391 flush_data_cache(); 392 asm volatile("sync" : : ); 393 flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end); 394 asm volatile("sync" : : ); 395 396 local_irq_enable(); 397 #endif 398 399 /* align __init_begin and __init_end to page size, 400 ignoring linker script where we might have tried to save RAM */ 401 init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); 402 init_end = PAGE_ALIGN((unsigned long)(&__init_end)); 403 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 404 ClearPageReserved(virt_to_page(addr)); 405 init_page_count(virt_to_page(addr)); 406 free_page(addr); 407 num_physpages++; 408 totalram_pages++; 409 } 410 411 /* set up a new led state on systems shipped LED State panel */ 412 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 413 414 printk("%luk freed\n", (init_end - init_begin) >> 10); 415 } 416 417 418 #ifdef CONFIG_DEBUG_RODATA 419 void mark_rodata_ro(void) 420 { 421 extern char __start_rodata, __end_rodata; 422 /* rodata memory was already mapped with KERNEL_RO access rights by 423 pagetable_init() and map_pages(). No need to do additional stuff here */ 424 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", 425 (unsigned long)(&__end_rodata - &__start_rodata) >> 10); 426 } 427 #endif 428 429 430 /* 431 * Just an arbitrary offset to serve as a "hole" between mapping areas 432 * (between top of physical memory and a potential pcxl dma mapping 433 * area, and below the vmalloc mapping area). 434 * 435 * The current 32K value just means that there will be a 32K "hole" 436 * between mapping areas. That means that any out-of-bounds memory 437 * accesses will hopefully be caught. The vmalloc() routines leaves 438 * a hole of 4kB between each vmalloced area for the same reason. 439 */ 440 441 /* Leave room for gateway page expansion */ 442 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE 443 #error KERNEL_MAP_START is in gateway reserved region 444 #endif 445 #define MAP_START (KERNEL_MAP_START) 446 447 #define VM_MAP_OFFSET (32*1024) 448 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 449 & ~(VM_MAP_OFFSET-1))) 450 451 void *vmalloc_start __read_mostly; 452 EXPORT_SYMBOL(vmalloc_start); 453 454 #ifdef CONFIG_PA11 455 unsigned long pcxl_dma_start __read_mostly; 456 #endif 457 458 void __init mem_init(void) 459 { 460 high_memory = __va((max_pfn << PAGE_SHIFT)); 461 462 #ifndef CONFIG_DISCONTIGMEM 463 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; 464 totalram_pages += free_all_bootmem(); 465 #else 466 { 467 int i; 468 469 for (i = 0; i < npmem_ranges; i++) 470 totalram_pages += free_all_bootmem_node(NODE_DATA(i)); 471 } 472 #endif 473 474 printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10)); 475 476 #ifdef CONFIG_PA11 477 if (hppa_dma_ops == &pcxl_dma_ops) { 478 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 479 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); 480 } else { 481 pcxl_dma_start = 0; 482 vmalloc_start = SET_MAP_OFFSET(MAP_START); 483 } 484 #else 485 vmalloc_start = SET_MAP_OFFSET(MAP_START); 486 #endif 487 488 } 489 490 unsigned long *empty_zero_page __read_mostly; 491 492 void show_mem(void) 493 { 494 int i,free = 0,total = 0,reserved = 0; 495 int shared = 0, cached = 0; 496 497 printk(KERN_INFO "Mem-info:\n"); 498 show_free_areas(); 499 printk(KERN_INFO "Free swap: %6ldkB\n", 500 nr_swap_pages<<(PAGE_SHIFT-10)); 501 #ifndef CONFIG_DISCONTIGMEM 502 i = max_mapnr; 503 while (i-- > 0) { 504 total++; 505 if (PageReserved(mem_map+i)) 506 reserved++; 507 else if (PageSwapCache(mem_map+i)) 508 cached++; 509 else if (!page_count(&mem_map[i])) 510 free++; 511 else 512 shared += page_count(&mem_map[i]) - 1; 513 } 514 #else 515 for (i = 0; i < npmem_ranges; i++) { 516 int j; 517 518 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { 519 struct page *p; 520 unsigned long flags; 521 522 pgdat_resize_lock(NODE_DATA(i), &flags); 523 p = nid_page_nr(i, j) - node_start_pfn(i); 524 525 total++; 526 if (PageReserved(p)) 527 reserved++; 528 else if (PageSwapCache(p)) 529 cached++; 530 else if (!page_count(p)) 531 free++; 532 else 533 shared += page_count(p) - 1; 534 pgdat_resize_unlock(NODE_DATA(i), &flags); 535 } 536 } 537 #endif 538 printk(KERN_INFO "%d pages of RAM\n", total); 539 printk(KERN_INFO "%d reserved pages\n", reserved); 540 printk(KERN_INFO "%d pages shared\n", shared); 541 printk(KERN_INFO "%d pages swap cached\n", cached); 542 543 544 #ifdef CONFIG_DISCONTIGMEM 545 { 546 struct zonelist *zl; 547 int i, j, k; 548 549 for (i = 0; i < npmem_ranges; i++) { 550 for (j = 0; j < MAX_NR_ZONES; j++) { 551 zl = NODE_DATA(i)->node_zonelists + j; 552 553 printk("Zone list for zone %d on node %d: ", j, i); 554 for (k = 0; zl->zones[k] != NULL; k++) 555 printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name); 556 printk("\n"); 557 } 558 } 559 } 560 #endif 561 } 562 563 564 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot) 565 { 566 pgd_t *pg_dir; 567 pmd_t *pmd; 568 pte_t *pg_table; 569 unsigned long end_paddr; 570 unsigned long start_pmd; 571 unsigned long start_pte; 572 unsigned long tmp1; 573 unsigned long tmp2; 574 unsigned long address; 575 unsigned long ro_start; 576 unsigned long ro_end; 577 unsigned long fv_addr; 578 unsigned long gw_addr; 579 extern const unsigned long fault_vector_20; 580 extern void * const linux_gateway_page; 581 582 ro_start = __pa((unsigned long)&_text); 583 ro_end = __pa((unsigned long)&data_start); 584 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 585 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; 586 587 end_paddr = start_paddr + size; 588 589 pg_dir = pgd_offset_k(start_vaddr); 590 591 #if PTRS_PER_PMD == 1 592 start_pmd = 0; 593 #else 594 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 595 #endif 596 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 597 598 address = start_paddr; 599 while (address < end_paddr) { 600 #if PTRS_PER_PMD == 1 601 pmd = (pmd_t *)__pa(pg_dir); 602 #else 603 pmd = (pmd_t *)pgd_address(*pg_dir); 604 605 /* 606 * pmd is physical at this point 607 */ 608 609 if (!pmd) { 610 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); 611 pmd = (pmd_t *) __pa(pmd); 612 } 613 614 pgd_populate(NULL, pg_dir, __va(pmd)); 615 #endif 616 pg_dir++; 617 618 /* now change pmd to kernel virtual addresses */ 619 620 pmd = (pmd_t *)__va(pmd) + start_pmd; 621 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { 622 623 /* 624 * pg_table is physical at this point 625 */ 626 627 pg_table = (pte_t *)pmd_address(*pmd); 628 if (!pg_table) { 629 pg_table = (pte_t *) 630 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); 631 pg_table = (pte_t *) __pa(pg_table); 632 } 633 634 pmd_populate_kernel(NULL, pmd, __va(pg_table)); 635 636 /* now change pg_table to kernel virtual addresses */ 637 638 pg_table = (pte_t *) __va(pg_table) + start_pte; 639 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { 640 pte_t pte; 641 642 /* 643 * Map the fault vector writable so we can 644 * write the HPMC checksum. 645 */ 646 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 647 if (address >= ro_start && address < ro_end 648 && address != fv_addr 649 && address != gw_addr) 650 pte = __mk_pte(address, PAGE_KERNEL_RO); 651 else 652 #endif 653 pte = __mk_pte(address, pgprot); 654 655 if (address >= end_paddr) 656 pte_val(pte) = 0; 657 658 set_pte(pg_table, pte); 659 660 address += PAGE_SIZE; 661 } 662 start_pte = 0; 663 664 if (address >= end_paddr) 665 break; 666 } 667 start_pmd = 0; 668 } 669 } 670 671 /* 672 * pagetable_init() sets up the page tables 673 * 674 * Note that gateway_init() places the Linux gateway page at page 0. 675 * Since gateway pages cannot be dereferenced this has the desirable 676 * side effect of trapping those pesky NULL-reference errors in the 677 * kernel. 678 */ 679 static void __init pagetable_init(void) 680 { 681 int range; 682 683 /* Map each physical memory range to its kernel vaddr */ 684 685 for (range = 0; range < npmem_ranges; range++) { 686 unsigned long start_paddr; 687 unsigned long end_paddr; 688 unsigned long size; 689 690 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 691 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); 692 size = pmem_ranges[range].pages << PAGE_SHIFT; 693 694 map_pages((unsigned long)__va(start_paddr), start_paddr, 695 size, PAGE_KERNEL); 696 } 697 698 #ifdef CONFIG_BLK_DEV_INITRD 699 if (initrd_end && initrd_end > mem_limit) { 700 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); 701 map_pages(initrd_start, __pa(initrd_start), 702 initrd_end - initrd_start, PAGE_KERNEL); 703 } 704 #endif 705 706 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 707 memset(empty_zero_page, 0, PAGE_SIZE); 708 } 709 710 static void __init gateway_init(void) 711 { 712 unsigned long linux_gateway_page_addr; 713 /* FIXME: This is 'const' in order to trick the compiler 714 into not treating it as DP-relative data. */ 715 extern void * const linux_gateway_page; 716 717 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; 718 719 /* 720 * Setup Linux Gateway page. 721 * 722 * The Linux gateway page will reside in kernel space (on virtual 723 * page 0), so it doesn't need to be aliased into user space. 724 */ 725 726 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), 727 PAGE_SIZE, PAGE_GATEWAY); 728 } 729 730 #ifdef CONFIG_HPUX 731 void 732 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm) 733 { 734 pgd_t *pg_dir; 735 pmd_t *pmd; 736 pte_t *pg_table; 737 unsigned long start_pmd; 738 unsigned long start_pte; 739 unsigned long address; 740 unsigned long hpux_gw_page_addr; 741 /* FIXME: This is 'const' in order to trick the compiler 742 into not treating it as DP-relative data. */ 743 extern void * const hpux_gateway_page; 744 745 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; 746 747 /* 748 * Setup HP-UX Gateway page. 749 * 750 * The HP-UX gateway page resides in the user address space, 751 * so it needs to be aliased into each process. 752 */ 753 754 pg_dir = pgd_offset(mm,hpux_gw_page_addr); 755 756 #if PTRS_PER_PMD == 1 757 start_pmd = 0; 758 #else 759 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 760 #endif 761 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 762 763 address = __pa(&hpux_gateway_page); 764 #if PTRS_PER_PMD == 1 765 pmd = (pmd_t *)__pa(pg_dir); 766 #else 767 pmd = (pmd_t *) pgd_address(*pg_dir); 768 769 /* 770 * pmd is physical at this point 771 */ 772 773 if (!pmd) { 774 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); 775 pmd = (pmd_t *) __pa(pmd); 776 } 777 778 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd); 779 #endif 780 /* now change pmd to kernel virtual addresses */ 781 782 pmd = (pmd_t *)__va(pmd) + start_pmd; 783 784 /* 785 * pg_table is physical at this point 786 */ 787 788 pg_table = (pte_t *) pmd_address(*pmd); 789 if (!pg_table) 790 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); 791 792 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); 793 794 /* now change pg_table to kernel virtual addresses */ 795 796 pg_table = (pte_t *) __va(pg_table) + start_pte; 797 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY)); 798 } 799 EXPORT_SYMBOL(map_hpux_gateway_page); 800 #endif 801 802 void __init paging_init(void) 803 { 804 int i; 805 806 setup_bootmem(); 807 pagetable_init(); 808 gateway_init(); 809 flush_cache_all_local(); /* start with known state */ 810 flush_tlb_all_local(NULL); 811 812 for (i = 0; i < npmem_ranges; i++) { 813 unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 }; 814 815 /* We have an IOMMU, so all memory can go into a single 816 ZONE_DMA zone. */ 817 zones_size[ZONE_DMA] = pmem_ranges[i].pages; 818 819 #ifdef CONFIG_DISCONTIGMEM 820 /* Need to initialize the pfnnid_map before we can initialize 821 the zone */ 822 { 823 int j; 824 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); 825 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); 826 j++) { 827 pfnnid_map[j] = i; 828 } 829 } 830 #endif 831 832 free_area_init_node(i, NODE_DATA(i), zones_size, 833 pmem_ranges[i].start_pfn, NULL); 834 } 835 } 836 837 #ifdef CONFIG_PA20 838 839 /* 840 * Currently, all PA20 chips have 18 bit protection id's, which is the 841 * limiting factor (space ids are 32 bits). 842 */ 843 844 #define NR_SPACE_IDS 262144 845 846 #else 847 848 /* 849 * Currently we have a one-to-one relationship between space id's and 850 * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only 851 * support 15 bit protection id's, so that is the limiting factor. 852 * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's 853 * probably not worth the effort for a special case here. 854 */ 855 856 #define NR_SPACE_IDS 32768 857 858 #endif /* !CONFIG_PA20 */ 859 860 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) 861 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) 862 863 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ 864 static unsigned long dirty_space_id[SID_ARRAY_SIZE]; 865 static unsigned long space_id_index; 866 static unsigned long free_space_ids = NR_SPACE_IDS - 1; 867 static unsigned long dirty_space_ids = 0; 868 869 static DEFINE_SPINLOCK(sid_lock); 870 871 unsigned long alloc_sid(void) 872 { 873 unsigned long index; 874 875 spin_lock(&sid_lock); 876 877 if (free_space_ids == 0) { 878 if (dirty_space_ids != 0) { 879 spin_unlock(&sid_lock); 880 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ 881 spin_lock(&sid_lock); 882 } 883 BUG_ON(free_space_ids == 0); 884 } 885 886 free_space_ids--; 887 888 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); 889 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); 890 space_id_index = index; 891 892 spin_unlock(&sid_lock); 893 894 return index << SPACEID_SHIFT; 895 } 896 897 void free_sid(unsigned long spaceid) 898 { 899 unsigned long index = spaceid >> SPACEID_SHIFT; 900 unsigned long *dirty_space_offset; 901 902 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); 903 index &= (BITS_PER_LONG - 1); 904 905 spin_lock(&sid_lock); 906 907 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ 908 909 *dirty_space_offset |= (1L << index); 910 dirty_space_ids++; 911 912 spin_unlock(&sid_lock); 913 } 914 915 916 #ifdef CONFIG_SMP 917 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) 918 { 919 int i; 920 921 /* NOTE: sid_lock must be held upon entry */ 922 923 *ndirtyptr = dirty_space_ids; 924 if (dirty_space_ids != 0) { 925 for (i = 0; i < SID_ARRAY_SIZE; i++) { 926 dirty_array[i] = dirty_space_id[i]; 927 dirty_space_id[i] = 0; 928 } 929 dirty_space_ids = 0; 930 } 931 932 return; 933 } 934 935 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) 936 { 937 int i; 938 939 /* NOTE: sid_lock must be held upon entry */ 940 941 if (ndirty != 0) { 942 for (i = 0; i < SID_ARRAY_SIZE; i++) { 943 space_id[i] ^= dirty_array[i]; 944 } 945 946 free_space_ids += ndirty; 947 space_id_index = 0; 948 } 949 } 950 951 #else /* CONFIG_SMP */ 952 953 static void recycle_sids(void) 954 { 955 int i; 956 957 /* NOTE: sid_lock must be held upon entry */ 958 959 if (dirty_space_ids != 0) { 960 for (i = 0; i < SID_ARRAY_SIZE; i++) { 961 space_id[i] ^= dirty_space_id[i]; 962 dirty_space_id[i] = 0; 963 } 964 965 free_space_ids += dirty_space_ids; 966 dirty_space_ids = 0; 967 space_id_index = 0; 968 } 969 } 970 #endif 971 972 /* 973 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is 974 * purged, we can safely reuse the space ids that were released but 975 * not flushed from the tlb. 976 */ 977 978 #ifdef CONFIG_SMP 979 980 static unsigned long recycle_ndirty; 981 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; 982 static unsigned int recycle_inuse; 983 984 void flush_tlb_all(void) 985 { 986 int do_recycle; 987 988 do_recycle = 0; 989 spin_lock(&sid_lock); 990 if (dirty_space_ids > RECYCLE_THRESHOLD) { 991 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ 992 get_dirty_sids(&recycle_ndirty,recycle_dirty_array); 993 recycle_inuse++; 994 do_recycle++; 995 } 996 spin_unlock(&sid_lock); 997 on_each_cpu(flush_tlb_all_local, NULL, 1, 1); 998 if (do_recycle) { 999 spin_lock(&sid_lock); 1000 recycle_sids(recycle_ndirty,recycle_dirty_array); 1001 recycle_inuse = 0; 1002 spin_unlock(&sid_lock); 1003 } 1004 } 1005 #else 1006 void flush_tlb_all(void) 1007 { 1008 spin_lock(&sid_lock); 1009 flush_tlb_all_local(NULL); 1010 recycle_sids(); 1011 spin_unlock(&sid_lock); 1012 } 1013 #endif 1014 1015 #ifdef CONFIG_BLK_DEV_INITRD 1016 void free_initrd_mem(unsigned long start, unsigned long end) 1017 { 1018 if (start >= end) 1019 return; 1020 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 1021 for (; start < end; start += PAGE_SIZE) { 1022 ClearPageReserved(virt_to_page(start)); 1023 init_page_count(virt_to_page(start)); 1024 free_page(start); 1025 num_physpages++; 1026 totalram_pages++; 1027 } 1028 } 1029 #endif 1030