1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/parisc/mm/init.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * Copyright 1999 SuSE GmbH 7 * changed by Philipp Rumpf 8 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 9 * Copyright 2004 Randolph Chung (tausq@debian.org) 10 * Copyright 2006-2007 Helge Deller (deller@gmx.de) 11 * 12 */ 13 14 15 #include <linux/module.h> 16 #include <linux/mm.h> 17 #include <linux/memblock.h> 18 #include <linux/gfp.h> 19 #include <linux/delay.h> 20 #include <linux/init.h> 21 #include <linux/initrd.h> 22 #include <linux/swap.h> 23 #include <linux/unistd.h> 24 #include <linux/nodemask.h> /* for node_online_map */ 25 #include <linux/pagemap.h> /* for release_pages */ 26 #include <linux/compat.h> 27 #include <linux/execmem.h> 28 29 #include <asm/pgalloc.h> 30 #include <asm/tlb.h> 31 #include <asm/pdc_chassis.h> 32 #include <asm/mmzone.h> 33 #include <asm/sections.h> 34 #include <asm/msgbuf.h> 35 #include <asm/sparsemem.h> 36 #include <asm/asm-offsets.h> 37 #include <asm/shmbuf.h> 38 39 extern int data_start; 40 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ 41 42 #if CONFIG_PGTABLE_LEVELS == 3 43 pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); 44 #endif 45 46 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE))); 47 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE))); 48 49 static struct resource data_resource = { 50 .name = "Kernel data", 51 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 52 }; 53 54 static struct resource code_resource = { 55 .name = "Kernel code", 56 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 57 }; 58 59 static struct resource pdcdata_resource = { 60 .name = "PDC data (Page Zero)", 61 .start = 0, 62 .end = 0x9ff, 63 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 64 }; 65 66 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; 67 68 /* The following array is initialized from the firmware specific 69 * information retrieved in kernel/inventory.c. 70 */ 71 72 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata; 73 int npmem_ranges __initdata; 74 75 #ifdef CONFIG_64BIT 76 #define MAX_MEM (1UL << MAX_PHYSMEM_BITS) 77 #else /* !CONFIG_64BIT */ 78 #define MAX_MEM (3584U*1024U*1024U) 79 #endif /* !CONFIG_64BIT */ 80 81 static unsigned long mem_limit __read_mostly = MAX_MEM; 82 83 static void __init mem_limit_func(void) 84 { 85 char *cp, *end; 86 unsigned long limit; 87 88 /* We need this before __setup() functions are called */ 89 90 limit = MAX_MEM; 91 for (cp = boot_command_line; *cp; ) { 92 if (memcmp(cp, "mem=", 4) == 0) { 93 cp += 4; 94 limit = memparse(cp, &end); 95 if (end != cp) 96 break; 97 cp = end; 98 } else { 99 while (*cp != ' ' && *cp) 100 ++cp; 101 while (*cp == ' ') 102 ++cp; 103 } 104 } 105 106 if (limit < mem_limit) 107 mem_limit = limit; 108 } 109 110 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) 111 112 static void __init setup_bootmem(void) 113 { 114 unsigned long mem_max; 115 #ifndef CONFIG_SPARSEMEM 116 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; 117 int npmem_holes; 118 #endif 119 int i, sysram_resource_count; 120 121 disable_sr_hashing(); /* Turn off space register hashing */ 122 123 /* 124 * Sort the ranges. Since the number of ranges is typically 125 * small, and performance is not an issue here, just do 126 * a simple insertion sort. 127 */ 128 129 for (i = 1; i < npmem_ranges; i++) { 130 int j; 131 132 for (j = i; j > 0; j--) { 133 if (pmem_ranges[j-1].start_pfn < 134 pmem_ranges[j].start_pfn) { 135 136 break; 137 } 138 swap(pmem_ranges[j-1], pmem_ranges[j]); 139 } 140 } 141 142 #ifndef CONFIG_SPARSEMEM 143 /* 144 * Throw out ranges that are too far apart (controlled by 145 * MAX_GAP). 146 */ 147 148 for (i = 1; i < npmem_ranges; i++) { 149 if (pmem_ranges[i].start_pfn - 150 (pmem_ranges[i-1].start_pfn + 151 pmem_ranges[i-1].pages) > MAX_GAP) { 152 npmem_ranges = i; 153 printk("Large gap in memory detected (%ld pages). " 154 "Consider turning on CONFIG_SPARSEMEM\n", 155 pmem_ranges[i].start_pfn - 156 (pmem_ranges[i-1].start_pfn + 157 pmem_ranges[i-1].pages)); 158 break; 159 } 160 } 161 #endif 162 163 /* Print the memory ranges */ 164 pr_info("Memory Ranges:\n"); 165 166 for (i = 0; i < npmem_ranges; i++) { 167 struct resource *res = &sysram_resources[i]; 168 unsigned long start; 169 unsigned long size; 170 171 size = (pmem_ranges[i].pages << PAGE_SHIFT); 172 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); 173 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", 174 i, start, start + (size - 1), size >> 20); 175 176 /* request memory resource */ 177 res->name = "System RAM"; 178 res->start = start; 179 res->end = start + size - 1; 180 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 181 request_resource(&iomem_resource, res); 182 } 183 184 sysram_resource_count = npmem_ranges; 185 186 /* 187 * For 32 bit kernels we limit the amount of memory we can 188 * support, in order to preserve enough kernel address space 189 * for other purposes. For 64 bit kernels we don't normally 190 * limit the memory, but this mechanism can be used to 191 * artificially limit the amount of memory (and it is written 192 * to work with multiple memory ranges). 193 */ 194 195 mem_limit_func(); /* check for "mem=" argument */ 196 197 mem_max = 0; 198 for (i = 0; i < npmem_ranges; i++) { 199 unsigned long rsize; 200 201 rsize = pmem_ranges[i].pages << PAGE_SHIFT; 202 if ((mem_max + rsize) > mem_limit) { 203 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); 204 if (mem_max == mem_limit) 205 npmem_ranges = i; 206 else { 207 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) 208 - (mem_max >> PAGE_SHIFT); 209 npmem_ranges = i + 1; 210 mem_max = mem_limit; 211 } 212 break; 213 } 214 mem_max += rsize; 215 } 216 217 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); 218 219 #ifndef CONFIG_SPARSEMEM 220 /* Merge the ranges, keeping track of the holes */ 221 { 222 unsigned long end_pfn; 223 unsigned long hole_pages; 224 225 npmem_holes = 0; 226 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; 227 for (i = 1; i < npmem_ranges; i++) { 228 229 hole_pages = pmem_ranges[i].start_pfn - end_pfn; 230 if (hole_pages) { 231 pmem_holes[npmem_holes].start_pfn = end_pfn; 232 pmem_holes[npmem_holes++].pages = hole_pages; 233 end_pfn += hole_pages; 234 } 235 end_pfn += pmem_ranges[i].pages; 236 } 237 238 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; 239 npmem_ranges = 1; 240 } 241 #endif 242 243 /* 244 * Initialize and free the full range of memory in each range. 245 */ 246 247 max_pfn = 0; 248 for (i = 0; i < npmem_ranges; i++) { 249 unsigned long start_pfn; 250 unsigned long npages; 251 unsigned long start; 252 unsigned long size; 253 254 start_pfn = pmem_ranges[i].start_pfn; 255 npages = pmem_ranges[i].pages; 256 257 start = start_pfn << PAGE_SHIFT; 258 size = npages << PAGE_SHIFT; 259 260 /* add system RAM memblock */ 261 memblock_add(start, size); 262 263 if ((start_pfn + npages) > max_pfn) 264 max_pfn = start_pfn + npages; 265 } 266 267 /* 268 * We can't use memblock top-down allocations because we only 269 * created the initial mapping up to KERNEL_INITIAL_SIZE in 270 * the assembly bootup code. 271 */ 272 memblock_set_bottom_up(true); 273 274 /* IOMMU is always used to access "high mem" on those boxes 275 * that can support enough mem that a PCI device couldn't 276 * directly DMA to any physical addresses. 277 * ISA DMA support will need to revisit this. 278 */ 279 max_low_pfn = max_pfn; 280 281 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ 282 283 #define PDC_CONSOLE_IO_IODC_SIZE 32768 284 285 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + 286 PDC_CONSOLE_IO_IODC_SIZE)); 287 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), 288 (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); 289 290 #ifndef CONFIG_SPARSEMEM 291 292 /* reserve the holes */ 293 294 for (i = 0; i < npmem_holes; i++) { 295 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), 296 (pmem_holes[i].pages << PAGE_SHIFT)); 297 } 298 #endif 299 300 #ifdef CONFIG_BLK_DEV_INITRD 301 if (initrd_start) { 302 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); 303 if (__pa(initrd_start) < mem_max) { 304 unsigned long initrd_reserve; 305 306 if (__pa(initrd_end) > mem_max) { 307 initrd_reserve = mem_max - __pa(initrd_start); 308 } else { 309 initrd_reserve = initrd_end - initrd_start; 310 } 311 initrd_below_start_ok = 1; 312 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); 313 314 memblock_reserve(__pa(initrd_start), initrd_reserve); 315 } 316 } 317 #endif 318 319 data_resource.start = virt_to_phys(&data_start); 320 data_resource.end = virt_to_phys(_end) - 1; 321 code_resource.start = virt_to_phys(_text); 322 code_resource.end = virt_to_phys(&data_start)-1; 323 324 /* We don't know which region the kernel will be in, so try 325 * all of them. 326 */ 327 for (i = 0; i < sysram_resource_count; i++) { 328 struct resource *res = &sysram_resources[i]; 329 request_resource(res, &code_resource); 330 request_resource(res, &data_resource); 331 } 332 request_resource(&sysram_resources[0], &pdcdata_resource); 333 334 /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ 335 pdc_pdt_init(); 336 337 memblock_allow_resize(); 338 memblock_dump_all(); 339 } 340 341 static bool kernel_set_to_readonly; 342 343 static void __ref map_pages(unsigned long start_vaddr, 344 unsigned long start_paddr, unsigned long size, 345 pgprot_t pgprot, int force) 346 { 347 pmd_t *pmd; 348 pte_t *pg_table; 349 unsigned long end_paddr; 350 unsigned long start_pmd; 351 unsigned long start_pte; 352 unsigned long tmp1; 353 unsigned long tmp2; 354 unsigned long address; 355 unsigned long vaddr; 356 unsigned long ro_start; 357 unsigned long ro_end; 358 unsigned long kernel_start, kernel_end; 359 360 ro_start = __pa((unsigned long)_text); 361 ro_end = __pa((unsigned long)&data_start); 362 kernel_start = __pa((unsigned long)&__init_begin); 363 kernel_end = __pa((unsigned long)&_end); 364 365 end_paddr = start_paddr + size; 366 367 /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ 368 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 369 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 370 371 address = start_paddr; 372 vaddr = start_vaddr; 373 while (address < end_paddr) { 374 pgd_t *pgd = pgd_offset_k(vaddr); 375 p4d_t *p4d = p4d_offset(pgd, vaddr); 376 pud_t *pud = pud_offset(p4d, vaddr); 377 378 #if CONFIG_PGTABLE_LEVELS == 3 379 if (pud_none(*pud)) { 380 pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, 381 PAGE_SIZE << PMD_TABLE_ORDER); 382 pud_populate(NULL, pud, pmd); 383 } 384 #endif 385 386 pmd = pmd_offset(pud, vaddr); 387 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { 388 if (pmd_none(*pmd)) { 389 pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); 390 pmd_populate_kernel(NULL, pmd, pg_table); 391 } 392 393 pg_table = pte_offset_kernel(pmd, vaddr); 394 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 395 pte_t pte; 396 pgprot_t prot; 397 bool huge = false; 398 399 if (force) { 400 prot = pgprot; 401 } else if (address < kernel_start || address >= kernel_end) { 402 /* outside kernel memory */ 403 prot = PAGE_KERNEL; 404 } else if (!kernel_set_to_readonly) { 405 /* still initializing, allow writing to RO memory */ 406 prot = PAGE_KERNEL_RWX; 407 huge = true; 408 } else if (address >= ro_start) { 409 /* Code (ro) and Data areas */ 410 prot = (address < ro_end) ? 411 PAGE_KERNEL_EXEC : PAGE_KERNEL; 412 huge = true; 413 } else { 414 prot = PAGE_KERNEL; 415 } 416 417 pte = __mk_pte(address, prot); 418 if (huge) 419 pte = pte_mkhuge(pte); 420 421 if (address >= end_paddr) 422 break; 423 424 set_pte(pg_table, pte); 425 426 address += PAGE_SIZE; 427 vaddr += PAGE_SIZE; 428 } 429 start_pte = 0; 430 431 if (address >= end_paddr) 432 break; 433 } 434 start_pmd = 0; 435 } 436 } 437 438 void __init set_kernel_text_rw(int enable_read_write) 439 { 440 unsigned long start = (unsigned long) __init_begin; 441 unsigned long end = (unsigned long) &data_start; 442 443 map_pages(start, __pa(start), end-start, 444 PAGE_KERNEL_RWX, enable_read_write ? 1:0); 445 446 /* force the kernel to see the new page table entries */ 447 flush_cache_all(); 448 flush_tlb_all(); 449 } 450 451 void free_initmem(void) 452 { 453 unsigned long init_begin = (unsigned long)__init_begin; 454 unsigned long init_end = (unsigned long)__init_end; 455 unsigned long kernel_end = (unsigned long)&_end; 456 457 /* Remap kernel text and data, but do not touch init section yet. */ 458 map_pages(init_end, __pa(init_end), kernel_end - init_end, 459 PAGE_KERNEL, 0); 460 461 /* The init text pages are marked R-X. We have to 462 * flush the icache and mark them RW- 463 * 464 * Do a dummy remap of the data section first (the data 465 * section is already PAGE_KERNEL) to pull in the TLB entries 466 * for map_kernel */ 467 map_pages(init_begin, __pa(init_begin), init_end - init_begin, 468 PAGE_KERNEL_RWX, 1); 469 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute 470 * map_pages */ 471 map_pages(init_begin, __pa(init_begin), init_end - init_begin, 472 PAGE_KERNEL, 1); 473 474 /* force the kernel to see the new TLB entries */ 475 __flush_tlb_range(0, init_begin, kernel_end); 476 477 /* finally dump all the instructions which were cached, since the 478 * pages are no-longer executable */ 479 flush_icache_range(init_begin, init_end); 480 481 free_initmem_default(POISON_FREE_INITMEM); 482 483 /* set up a new led state on systems shipped LED State panel */ 484 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 485 } 486 487 488 #ifdef CONFIG_STRICT_KERNEL_RWX 489 void mark_rodata_ro(void) 490 { 491 unsigned long start = (unsigned long) &__start_rodata; 492 unsigned long end = (unsigned long) &__end_rodata; 493 494 pr_info("Write protecting the kernel read-only data: %luk\n", 495 (end - start) >> 10); 496 497 kernel_set_to_readonly = true; 498 map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0); 499 500 /* force the kernel to see the new page table entries */ 501 flush_cache_all(); 502 flush_tlb_all(); 503 } 504 #endif 505 506 507 /* 508 * Just an arbitrary offset to serve as a "hole" between mapping areas 509 * (between top of physical memory and a potential pcxl dma mapping 510 * area, and below the vmalloc mapping area). 511 * 512 * The current 32K value just means that there will be a 32K "hole" 513 * between mapping areas. That means that any out-of-bounds memory 514 * accesses will hopefully be caught. The vmalloc() routines leaves 515 * a hole of 4kB between each vmalloced area for the same reason. 516 */ 517 518 /* Leave room for gateway page expansion */ 519 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE 520 #error KERNEL_MAP_START is in gateway reserved region 521 #endif 522 #define MAP_START (KERNEL_MAP_START) 523 524 #define VM_MAP_OFFSET (32*1024) 525 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 526 & ~(VM_MAP_OFFSET-1))) 527 528 void *parisc_vmalloc_start __ro_after_init; 529 EXPORT_SYMBOL(parisc_vmalloc_start); 530 531 void __init mem_init(void) 532 { 533 /* Do sanity checks on IPC (compat) structures */ 534 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); 535 #ifndef CONFIG_64BIT 536 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); 537 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); 538 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); 539 #endif 540 #ifdef CONFIG_COMPAT 541 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); 542 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); 543 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); 544 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); 545 #endif 546 547 /* Do sanity checks on page table constants */ 548 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); 549 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); 550 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); 551 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD 552 > BITS_PER_LONG); 553 #if CONFIG_PGTABLE_LEVELS == 3 554 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); 555 #else 556 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); 557 #endif 558 559 #ifdef CONFIG_64BIT 560 /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */ 561 BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000); 562 BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); 563 #endif 564 565 #ifdef CONFIG_PA11 566 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { 567 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 568 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start 569 + PCXL_DMA_MAP_SIZE); 570 } else 571 #endif 572 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); 573 574 #if 0 575 /* 576 * Do not expose the virtual kernel memory layout to userspace. 577 * But keep code for debugging purposes. 578 */ 579 printk("virtual kernel memory layout:\n" 580 " vmalloc : 0x%px - 0x%px (%4ld MB)\n" 581 " fixmap : 0x%px - 0x%px (%4ld kB)\n" 582 " memory : 0x%px - 0x%px (%4ld MB)\n" 583 " .init : 0x%px - 0x%px (%4ld kB)\n" 584 " .data : 0x%px - 0x%px (%4ld kB)\n" 585 " .text : 0x%px - 0x%px (%4ld kB)\n", 586 587 (void*)VMALLOC_START, (void*)VMALLOC_END, 588 (VMALLOC_END - VMALLOC_START) >> 20, 589 590 (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE), 591 (unsigned long)(FIXMAP_SIZE / 1024), 592 593 __va(0), high_memory, 594 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, 595 596 __init_begin, __init_end, 597 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, 598 599 _etext, _edata, 600 ((unsigned long)_edata - (unsigned long)_etext) >> 10, 601 602 _text, _etext, 603 ((unsigned long)_etext - (unsigned long)_text) >> 10); 604 #endif 605 } 606 607 unsigned long *empty_zero_page __ro_after_init; 608 EXPORT_SYMBOL(empty_zero_page); 609 610 /* 611 * pagetable_init() sets up the page tables 612 * 613 * Note that gateway_init() places the Linux gateway page at page 0. 614 * Since gateway pages cannot be dereferenced this has the desirable 615 * side effect of trapping those pesky NULL-reference errors in the 616 * kernel. 617 */ 618 static void __init pagetable_init(void) 619 { 620 int range; 621 622 /* Map each physical memory range to its kernel vaddr */ 623 624 for (range = 0; range < npmem_ranges; range++) { 625 unsigned long start_paddr; 626 unsigned long size; 627 628 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 629 size = pmem_ranges[range].pages << PAGE_SHIFT; 630 631 map_pages((unsigned long)__va(start_paddr), start_paddr, 632 size, PAGE_KERNEL, 0); 633 } 634 635 #ifdef CONFIG_BLK_DEV_INITRD 636 if (initrd_end && initrd_end > mem_limit) { 637 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); 638 map_pages(initrd_start, __pa(initrd_start), 639 initrd_end - initrd_start, PAGE_KERNEL, 0); 640 } 641 #endif 642 643 empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); 644 645 } 646 647 static void __init gateway_init(void) 648 { 649 unsigned long linux_gateway_page_addr; 650 /* FIXME: This is 'const' in order to trick the compiler 651 into not treating it as DP-relative data. */ 652 extern void * const linux_gateway_page; 653 654 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; 655 656 /* 657 * Setup Linux Gateway page. 658 * 659 * The Linux gateway page will reside in kernel space (on virtual 660 * page 0), so it doesn't need to be aliased into user space. 661 */ 662 663 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), 664 PAGE_SIZE, PAGE_GATEWAY, 1); 665 } 666 667 static void __init fixmap_init(void) 668 { 669 unsigned long addr = FIXMAP_START; 670 unsigned long end = FIXMAP_START + FIXMAP_SIZE; 671 pgd_t *pgd = pgd_offset_k(addr); 672 p4d_t *p4d = p4d_offset(pgd, addr); 673 pud_t *pud = pud_offset(p4d, addr); 674 pmd_t *pmd; 675 676 BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE); 677 678 #if CONFIG_PGTABLE_LEVELS == 3 679 if (pud_none(*pud)) { 680 pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, 681 PAGE_SIZE << PMD_TABLE_ORDER); 682 pud_populate(NULL, pud, pmd); 683 } 684 #endif 685 686 pmd = pmd_offset(pud, addr); 687 do { 688 pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); 689 690 pmd_populate_kernel(&init_mm, pmd, pte); 691 692 addr += PAGE_SIZE; 693 } while (addr < end); 694 } 695 696 void __init arch_zone_limits_init(unsigned long *max_zone_pfns) 697 { 698 max_zone_pfns[ZONE_NORMAL] = PFN_DOWN(memblock_end_of_DRAM()); 699 } 700 701 void __init paging_init(void) 702 { 703 setup_bootmem(); 704 pagetable_init(); 705 gateway_init(); 706 fixmap_init(); 707 flush_cache_all_local(); /* start with known state */ 708 flush_tlb_all_local(NULL); 709 } 710 711 static void alloc_btlb(unsigned long start, unsigned long end, int *slot, 712 unsigned long entry_info) 713 { 714 const int slot_max = btlb_info.fixed_range_info.num_comb; 715 int min_num_pages = btlb_info.min_size; 716 unsigned long size; 717 718 /* map at minimum 4 pages */ 719 if (min_num_pages < 4) 720 min_num_pages = 4; 721 722 size = HUGEPAGE_SIZE; 723 while (start < end && *slot < slot_max && size >= PAGE_SIZE) { 724 /* starting address must have same alignment as size! */ 725 /* if correctly aligned and fits in double size, increase */ 726 if (((start & (2 * size - 1)) == 0) && 727 (end - start) >= (2 * size)) { 728 size <<= 1; 729 continue; 730 } 731 /* if current size alignment is too big, try smaller size */ 732 if ((start & (size - 1)) != 0) { 733 size >>= 1; 734 continue; 735 } 736 if ((end - start) >= size) { 737 if ((size >> PAGE_SHIFT) >= min_num_pages) 738 pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT, 739 size >> PAGE_SHIFT, entry_info, *slot); 740 (*slot)++; 741 start += size; 742 continue; 743 } 744 size /= 2; 745 continue; 746 } 747 } 748 749 void btlb_init_per_cpu(void) 750 { 751 unsigned long s, t, e; 752 int slot; 753 754 /* BTLBs are not available on 64-bit CPUs */ 755 if (IS_ENABLED(CONFIG_PA20)) 756 return; 757 else if (pdc_btlb_info(&btlb_info) < 0) { 758 memset(&btlb_info, 0, sizeof btlb_info); 759 } 760 761 /* insert BLTLBs for code and data segments */ 762 s = (uintptr_t) dereference_function_descriptor(&_stext); 763 e = (uintptr_t) dereference_function_descriptor(&_etext); 764 t = (uintptr_t) dereference_function_descriptor(&_sdata); 765 BUG_ON(t != e); 766 767 /* code segments */ 768 slot = 0; 769 alloc_btlb(s, e, &slot, 0x13800000); 770 771 /* sanity check */ 772 t = (uintptr_t) dereference_function_descriptor(&_edata); 773 e = (uintptr_t) dereference_function_descriptor(&__bss_start); 774 BUG_ON(t != e); 775 776 /* data segments */ 777 s = (uintptr_t) dereference_function_descriptor(&_sdata); 778 e = (uintptr_t) dereference_function_descriptor(&__bss_stop); 779 alloc_btlb(s, e, &slot, 0x11800000); 780 } 781 782 #ifdef CONFIG_PA20 783 784 /* 785 * Currently, all PA20 chips have 18 bit protection IDs, which is the 786 * limiting factor (space ids are 32 bits). 787 */ 788 789 #define NR_SPACE_IDS 262144 790 791 #else 792 793 /* 794 * Currently we have a one-to-one relationship between space IDs and 795 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only 796 * support 15 bit protection IDs, so that is the limiting factor. 797 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's 798 * probably not worth the effort for a special case here. 799 */ 800 801 #define NR_SPACE_IDS 32768 802 803 #endif /* !CONFIG_PA20 */ 804 805 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) 806 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) 807 808 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ 809 static unsigned long dirty_space_id[SID_ARRAY_SIZE]; 810 static unsigned long space_id_index; 811 static unsigned long free_space_ids = NR_SPACE_IDS - 1; 812 static unsigned long dirty_space_ids; 813 814 static DEFINE_SPINLOCK(sid_lock); 815 816 unsigned long alloc_sid(void) 817 { 818 unsigned long index; 819 820 spin_lock(&sid_lock); 821 822 if (free_space_ids == 0) { 823 if (dirty_space_ids != 0) { 824 spin_unlock(&sid_lock); 825 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ 826 spin_lock(&sid_lock); 827 } 828 BUG_ON(free_space_ids == 0); 829 } 830 831 free_space_ids--; 832 833 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); 834 space_id[BIT_WORD(index)] |= BIT_MASK(index); 835 space_id_index = index; 836 837 spin_unlock(&sid_lock); 838 839 return index << SPACEID_SHIFT; 840 } 841 842 void free_sid(unsigned long spaceid) 843 { 844 unsigned long index = spaceid >> SPACEID_SHIFT; 845 unsigned long *dirty_space_offset, mask; 846 847 dirty_space_offset = &dirty_space_id[BIT_WORD(index)]; 848 mask = BIT_MASK(index); 849 850 spin_lock(&sid_lock); 851 852 BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */ 853 854 *dirty_space_offset |= mask; 855 dirty_space_ids++; 856 857 spin_unlock(&sid_lock); 858 } 859 860 861 #ifdef CONFIG_SMP 862 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) 863 { 864 int i; 865 866 /* NOTE: sid_lock must be held upon entry */ 867 868 *ndirtyptr = dirty_space_ids; 869 if (dirty_space_ids != 0) { 870 for (i = 0; i < SID_ARRAY_SIZE; i++) { 871 dirty_array[i] = dirty_space_id[i]; 872 dirty_space_id[i] = 0; 873 } 874 dirty_space_ids = 0; 875 } 876 877 return; 878 } 879 880 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) 881 { 882 int i; 883 884 /* NOTE: sid_lock must be held upon entry */ 885 886 if (ndirty != 0) { 887 for (i = 0; i < SID_ARRAY_SIZE; i++) { 888 space_id[i] ^= dirty_array[i]; 889 } 890 891 free_space_ids += ndirty; 892 space_id_index = 0; 893 } 894 } 895 896 #else /* CONFIG_SMP */ 897 898 static void recycle_sids(void) 899 { 900 int i; 901 902 /* NOTE: sid_lock must be held upon entry */ 903 904 if (dirty_space_ids != 0) { 905 for (i = 0; i < SID_ARRAY_SIZE; i++) { 906 space_id[i] ^= dirty_space_id[i]; 907 dirty_space_id[i] = 0; 908 } 909 910 free_space_ids += dirty_space_ids; 911 dirty_space_ids = 0; 912 space_id_index = 0; 913 } 914 } 915 #endif 916 917 /* 918 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is 919 * purged, we can safely reuse the space ids that were released but 920 * not flushed from the tlb. 921 */ 922 923 #ifdef CONFIG_SMP 924 925 static unsigned long recycle_ndirty; 926 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; 927 static unsigned int recycle_inuse; 928 929 void flush_tlb_all(void) 930 { 931 int do_recycle; 932 933 do_recycle = 0; 934 spin_lock(&sid_lock); 935 __inc_irq_stat(irq_tlb_count); 936 if (dirty_space_ids > RECYCLE_THRESHOLD) { 937 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ 938 get_dirty_sids(&recycle_ndirty,recycle_dirty_array); 939 recycle_inuse++; 940 do_recycle++; 941 } 942 spin_unlock(&sid_lock); 943 on_each_cpu(flush_tlb_all_local, NULL, 1); 944 if (do_recycle) { 945 spin_lock(&sid_lock); 946 recycle_sids(recycle_ndirty,recycle_dirty_array); 947 recycle_inuse = 0; 948 spin_unlock(&sid_lock); 949 } 950 } 951 #else 952 void flush_tlb_all(void) 953 { 954 spin_lock(&sid_lock); 955 __inc_irq_stat(irq_tlb_count); 956 flush_tlb_all_local(NULL); 957 recycle_sids(); 958 spin_unlock(&sid_lock); 959 } 960 #endif 961 962 static const pgprot_t protection_map[16] = { 963 [VM_NONE] = PAGE_NONE, 964 [VM_READ] = PAGE_READONLY, 965 [VM_WRITE] = PAGE_NONE, 966 [VM_WRITE | VM_READ] = PAGE_READONLY, 967 [VM_EXEC] = PAGE_EXECREAD, 968 [VM_EXEC | VM_READ] = PAGE_EXECREAD, 969 [VM_EXEC | VM_WRITE] = PAGE_EXECREAD, 970 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD, 971 [VM_SHARED] = PAGE_NONE, 972 [VM_SHARED | VM_READ] = PAGE_READONLY, 973 [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, 974 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 975 [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, 976 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, 977 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, 978 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX 979 }; 980 DECLARE_VM_GET_PAGE_PROT 981 982 #ifdef CONFIG_EXECMEM 983 static struct execmem_info execmem_info __ro_after_init; 984 985 struct execmem_info __init *execmem_arch_setup(void) 986 { 987 execmem_info = (struct execmem_info){ 988 .ranges = { 989 [EXECMEM_DEFAULT] = { 990 .start = VMALLOC_START, 991 .end = VMALLOC_END, 992 .pgprot = PAGE_KERNEL_RWX, 993 .alignment = 1, 994 }, 995 }, 996 }; 997 998 return &execmem_info; 999 } 1000 #endif /* CONFIG_EXECMEM */ 1001