1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2006 4 */ 5 6 #include <linux/memory_hotplug.h> 7 #include <linux/memblock.h> 8 #include <linux/kasan.h> 9 #include <linux/pfn.h> 10 #include <linux/mm.h> 11 #include <linux/init.h> 12 #include <linux/list.h> 13 #include <linux/hugetlb.h> 14 #include <linux/slab.h> 15 #include <linux/sort.h> 16 #include <asm/cacheflush.h> 17 #include <asm/nospec-branch.h> 18 #include <asm/pgalloc.h> 19 #include <asm/setup.h> 20 #include <asm/tlbflush.h> 21 #include <asm/sections.h> 22 #include <asm/set_memory.h> 23 24 static DEFINE_MUTEX(vmem_mutex); 25 26 static void __ref *vmem_alloc_pages(unsigned int order) 27 { 28 unsigned long size = PAGE_SIZE << order; 29 30 if (slab_is_available()) 31 return (void *)__get_free_pages(GFP_KERNEL, order); 32 return memblock_alloc(size, size); 33 } 34 35 static void vmem_free_pages(unsigned long addr, int order) 36 { 37 /* We don't expect boot memory to be removed ever. */ 38 if (!slab_is_available() || 39 WARN_ON_ONCE(PageReserved(virt_to_page(addr)))) 40 return; 41 free_pages(addr, order); 42 } 43 44 void *vmem_crst_alloc(unsigned long val) 45 { 46 unsigned long *table; 47 48 table = vmem_alloc_pages(CRST_ALLOC_ORDER); 49 if (table) 50 crst_table_init(table, val); 51 return table; 52 } 53 54 pte_t __ref *vmem_pte_alloc(void) 55 { 56 unsigned long size = PTRS_PER_PTE * sizeof(pte_t); 57 pte_t *pte; 58 59 if (slab_is_available()) 60 pte = (pte_t *) page_table_alloc(&init_mm); 61 else 62 pte = (pte_t *) memblock_alloc(size, size); 63 if (!pte) 64 return NULL; 65 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); 66 return pte; 67 } 68 69 static void vmem_pte_free(unsigned long *table) 70 { 71 /* We don't expect boot memory to be removed ever. */ 72 if (!slab_is_available() || 73 WARN_ON_ONCE(PageReserved(virt_to_page(table)))) 74 return; 75 page_table_free(&init_mm, table); 76 } 77 78 #define PAGE_UNUSED 0xFD 79 80 /* 81 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges 82 * from unused_sub_pmd_start to next PMD_SIZE boundary. 83 */ 84 static unsigned long unused_sub_pmd_start; 85 86 static void vmemmap_flush_unused_sub_pmd(void) 87 { 88 if (!unused_sub_pmd_start) 89 return; 90 memset((void *)unused_sub_pmd_start, PAGE_UNUSED, 91 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); 92 unused_sub_pmd_start = 0; 93 } 94 95 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end) 96 { 97 /* 98 * As we expect to add in the same granularity as we remove, it's 99 * sufficient to mark only some piece used to block the memmap page from 100 * getting removed (just in case the memmap never gets initialized, 101 * e.g., because the memory block never gets onlined). 102 */ 103 memset((void *)start, 0, sizeof(struct page)); 104 } 105 106 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) 107 { 108 /* 109 * We only optimize if the new used range directly follows the 110 * previously unused range (esp., when populating consecutive sections). 111 */ 112 if (unused_sub_pmd_start == start) { 113 unused_sub_pmd_start = end; 114 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) 115 unused_sub_pmd_start = 0; 116 return; 117 } 118 vmemmap_flush_unused_sub_pmd(); 119 vmemmap_mark_sub_pmd_used(start, end); 120 } 121 122 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) 123 { 124 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); 125 126 vmemmap_flush_unused_sub_pmd(); 127 128 /* Could be our memmap page is filled with PAGE_UNUSED already ... */ 129 vmemmap_mark_sub_pmd_used(start, end); 130 131 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */ 132 if (!IS_ALIGNED(start, PMD_SIZE)) 133 memset((void *)page, PAGE_UNUSED, start - page); 134 /* 135 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of 136 * consecutive sections. Remember for the last added PMD the last 137 * unused range in the populated PMD. 138 */ 139 if (!IS_ALIGNED(end, PMD_SIZE)) 140 unused_sub_pmd_start = end; 141 } 142 143 /* Returns true if the PMD is completely unused and can be freed. */ 144 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end) 145 { 146 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); 147 148 vmemmap_flush_unused_sub_pmd(); 149 memset((void *)start, PAGE_UNUSED, end - start); 150 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE); 151 } 152 153 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ 154 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, 155 unsigned long end, bool add, bool direct) 156 { 157 unsigned long prot, pages = 0; 158 int ret = -ENOMEM; 159 pte_t *pte; 160 161 prot = pgprot_val(PAGE_KERNEL); 162 if (!MACHINE_HAS_NX) 163 prot &= ~_PAGE_NOEXEC; 164 165 pte = pte_offset_kernel(pmd, addr); 166 for (; addr < end; addr += PAGE_SIZE, pte++) { 167 if (!add) { 168 if (pte_none(*pte)) 169 continue; 170 if (!direct) 171 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0); 172 pte_clear(&init_mm, addr, pte); 173 } else if (pte_none(*pte)) { 174 if (!direct) { 175 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE); 176 177 if (!new_page) 178 goto out; 179 set_pte(pte, __pte(__pa(new_page) | prot)); 180 } else { 181 set_pte(pte, __pte(__pa(addr) | prot)); 182 } 183 } else { 184 continue; 185 } 186 pages++; 187 } 188 ret = 0; 189 out: 190 if (direct) 191 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages); 192 return ret; 193 } 194 195 static void try_free_pte_table(pmd_t *pmd, unsigned long start) 196 { 197 pte_t *pte; 198 int i; 199 200 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */ 201 pte = pte_offset_kernel(pmd, start); 202 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 203 if (!pte_none(*pte)) 204 return; 205 } 206 vmem_pte_free((unsigned long *) pmd_deref(*pmd)); 207 pmd_clear(pmd); 208 } 209 210 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ 211 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr, 212 unsigned long end, bool add, bool direct) 213 { 214 unsigned long next, prot, pages = 0; 215 int ret = -ENOMEM; 216 pmd_t *pmd; 217 pte_t *pte; 218 219 prot = pgprot_val(SEGMENT_KERNEL); 220 if (!MACHINE_HAS_NX) 221 prot &= ~_SEGMENT_ENTRY_NOEXEC; 222 223 pmd = pmd_offset(pud, addr); 224 for (; addr < end; addr = next, pmd++) { 225 next = pmd_addr_end(addr, end); 226 if (!add) { 227 if (pmd_none(*pmd)) 228 continue; 229 if (pmd_large(*pmd)) { 230 if (IS_ALIGNED(addr, PMD_SIZE) && 231 IS_ALIGNED(next, PMD_SIZE)) { 232 if (!direct) 233 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE)); 234 pmd_clear(pmd); 235 pages++; 236 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) { 237 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE)); 238 pmd_clear(pmd); 239 } 240 continue; 241 } 242 } else if (pmd_none(*pmd)) { 243 if (IS_ALIGNED(addr, PMD_SIZE) && 244 IS_ALIGNED(next, PMD_SIZE) && 245 MACHINE_HAS_EDAT1 && direct && 246 !debug_pagealloc_enabled()) { 247 set_pmd(pmd, __pmd(__pa(addr) | prot)); 248 pages++; 249 continue; 250 } else if (!direct && MACHINE_HAS_EDAT1) { 251 void *new_page; 252 253 /* 254 * Use 1MB frames for vmemmap if available. We 255 * always use large frames even if they are only 256 * partially used. Otherwise we would have also 257 * page tables since vmemmap_populate gets 258 * called for each section separately. 259 */ 260 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE); 261 if (new_page) { 262 set_pmd(pmd, __pmd(__pa(new_page) | prot)); 263 if (!IS_ALIGNED(addr, PMD_SIZE) || 264 !IS_ALIGNED(next, PMD_SIZE)) { 265 vmemmap_use_new_sub_pmd(addr, next); 266 } 267 continue; 268 } 269 } 270 pte = vmem_pte_alloc(); 271 if (!pte) 272 goto out; 273 pmd_populate(&init_mm, pmd, pte); 274 } else if (pmd_large(*pmd)) { 275 if (!direct) 276 vmemmap_use_sub_pmd(addr, next); 277 continue; 278 } 279 ret = modify_pte_table(pmd, addr, next, add, direct); 280 if (ret) 281 goto out; 282 if (!add) 283 try_free_pte_table(pmd, addr & PMD_MASK); 284 } 285 ret = 0; 286 out: 287 if (direct) 288 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages); 289 return ret; 290 } 291 292 static void try_free_pmd_table(pud_t *pud, unsigned long start) 293 { 294 const unsigned long end = start + PUD_SIZE; 295 pmd_t *pmd; 296 int i; 297 298 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 299 if (end > VMALLOC_START) 300 return; 301 302 pmd = pmd_offset(pud, start); 303 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) 304 if (!pmd_none(*pmd)) 305 return; 306 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER); 307 pud_clear(pud); 308 } 309 310 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, 311 bool add, bool direct) 312 { 313 unsigned long next, prot, pages = 0; 314 int ret = -ENOMEM; 315 pud_t *pud; 316 pmd_t *pmd; 317 318 prot = pgprot_val(REGION3_KERNEL); 319 if (!MACHINE_HAS_NX) 320 prot &= ~_REGION_ENTRY_NOEXEC; 321 pud = pud_offset(p4d, addr); 322 for (; addr < end; addr = next, pud++) { 323 next = pud_addr_end(addr, end); 324 if (!add) { 325 if (pud_none(*pud)) 326 continue; 327 if (pud_large(*pud)) { 328 if (IS_ALIGNED(addr, PUD_SIZE) && 329 IS_ALIGNED(next, PUD_SIZE)) { 330 pud_clear(pud); 331 pages++; 332 } 333 continue; 334 } 335 } else if (pud_none(*pud)) { 336 if (IS_ALIGNED(addr, PUD_SIZE) && 337 IS_ALIGNED(next, PUD_SIZE) && 338 MACHINE_HAS_EDAT2 && direct && 339 !debug_pagealloc_enabled()) { 340 set_pud(pud, __pud(__pa(addr) | prot)); 341 pages++; 342 continue; 343 } 344 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); 345 if (!pmd) 346 goto out; 347 pud_populate(&init_mm, pud, pmd); 348 } else if (pud_large(*pud)) { 349 continue; 350 } 351 ret = modify_pmd_table(pud, addr, next, add, direct); 352 if (ret) 353 goto out; 354 if (!add) 355 try_free_pmd_table(pud, addr & PUD_MASK); 356 } 357 ret = 0; 358 out: 359 if (direct) 360 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages); 361 return ret; 362 } 363 364 static void try_free_pud_table(p4d_t *p4d, unsigned long start) 365 { 366 const unsigned long end = start + P4D_SIZE; 367 pud_t *pud; 368 int i; 369 370 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 371 if (end > VMALLOC_START) 372 return; 373 374 pud = pud_offset(p4d, start); 375 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 376 if (!pud_none(*pud)) 377 return; 378 } 379 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER); 380 p4d_clear(p4d); 381 } 382 383 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end, 384 bool add, bool direct) 385 { 386 unsigned long next; 387 int ret = -ENOMEM; 388 p4d_t *p4d; 389 pud_t *pud; 390 391 p4d = p4d_offset(pgd, addr); 392 for (; addr < end; addr = next, p4d++) { 393 next = p4d_addr_end(addr, end); 394 if (!add) { 395 if (p4d_none(*p4d)) 396 continue; 397 } else if (p4d_none(*p4d)) { 398 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); 399 if (!pud) 400 goto out; 401 p4d_populate(&init_mm, p4d, pud); 402 } 403 ret = modify_pud_table(p4d, addr, next, add, direct); 404 if (ret) 405 goto out; 406 if (!add) 407 try_free_pud_table(p4d, addr & P4D_MASK); 408 } 409 ret = 0; 410 out: 411 return ret; 412 } 413 414 static void try_free_p4d_table(pgd_t *pgd, unsigned long start) 415 { 416 const unsigned long end = start + PGDIR_SIZE; 417 p4d_t *p4d; 418 int i; 419 420 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ 421 if (end > VMALLOC_START) 422 return; 423 424 p4d = p4d_offset(pgd, start); 425 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) { 426 if (!p4d_none(*p4d)) 427 return; 428 } 429 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER); 430 pgd_clear(pgd); 431 } 432 433 static int modify_pagetable(unsigned long start, unsigned long end, bool add, 434 bool direct) 435 { 436 unsigned long addr, next; 437 int ret = -ENOMEM; 438 pgd_t *pgd; 439 p4d_t *p4d; 440 441 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) 442 return -EINVAL; 443 for (addr = start; addr < end; addr = next) { 444 next = pgd_addr_end(addr, end); 445 pgd = pgd_offset_k(addr); 446 447 if (!add) { 448 if (pgd_none(*pgd)) 449 continue; 450 } else if (pgd_none(*pgd)) { 451 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); 452 if (!p4d) 453 goto out; 454 pgd_populate(&init_mm, pgd, p4d); 455 } 456 ret = modify_p4d_table(pgd, addr, next, add, direct); 457 if (ret) 458 goto out; 459 if (!add) 460 try_free_p4d_table(pgd, addr & PGDIR_MASK); 461 } 462 ret = 0; 463 out: 464 if (!add) 465 flush_tlb_kernel_range(start, end); 466 return ret; 467 } 468 469 static int add_pagetable(unsigned long start, unsigned long end, bool direct) 470 { 471 return modify_pagetable(start, end, true, direct); 472 } 473 474 static int remove_pagetable(unsigned long start, unsigned long end, bool direct) 475 { 476 return modify_pagetable(start, end, false, direct); 477 } 478 479 /* 480 * Add a physical memory range to the 1:1 mapping. 481 */ 482 static int vmem_add_range(unsigned long start, unsigned long size) 483 { 484 start = (unsigned long)__va(start); 485 return add_pagetable(start, start + size, true); 486 } 487 488 /* 489 * Remove a physical memory range from the 1:1 mapping. 490 */ 491 static void vmem_remove_range(unsigned long start, unsigned long size) 492 { 493 start = (unsigned long)__va(start); 494 remove_pagetable(start, start + size, true); 495 } 496 497 /* 498 * Add a backed mem_map array to the virtual mem_map array. 499 */ 500 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 501 struct vmem_altmap *altmap) 502 { 503 int ret; 504 505 mutex_lock(&vmem_mutex); 506 /* We don't care about the node, just use NUMA_NO_NODE on allocations */ 507 ret = add_pagetable(start, end, false); 508 if (ret) 509 remove_pagetable(start, end, false); 510 mutex_unlock(&vmem_mutex); 511 return ret; 512 } 513 514 void vmemmap_free(unsigned long start, unsigned long end, 515 struct vmem_altmap *altmap) 516 { 517 mutex_lock(&vmem_mutex); 518 remove_pagetable(start, end, false); 519 mutex_unlock(&vmem_mutex); 520 } 521 522 void vmem_remove_mapping(unsigned long start, unsigned long size) 523 { 524 mutex_lock(&vmem_mutex); 525 vmem_remove_range(start, size); 526 mutex_unlock(&vmem_mutex); 527 } 528 529 struct range arch_get_mappable_range(void) 530 { 531 struct range mhp_range; 532 533 mhp_range.start = 0; 534 mhp_range.end = VMEM_MAX_PHYS - 1; 535 return mhp_range; 536 } 537 538 int vmem_add_mapping(unsigned long start, unsigned long size) 539 { 540 struct range range = arch_get_mappable_range(); 541 int ret; 542 543 if (start < range.start || 544 start + size > range.end + 1 || 545 start + size < start) 546 return -ERANGE; 547 548 mutex_lock(&vmem_mutex); 549 ret = vmem_add_range(start, size); 550 if (ret) 551 vmem_remove_range(start, size); 552 mutex_unlock(&vmem_mutex); 553 return ret; 554 } 555 556 /* 557 * Allocate new or return existing page-table entry, but do not map it 558 * to any physical address. If missing, allocate segment- and region- 559 * table entries along. Meeting a large segment- or region-table entry 560 * while traversing is an error, since the function is expected to be 561 * called against virtual regions reserved for 4KB mappings only. 562 */ 563 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc) 564 { 565 pte_t *ptep = NULL; 566 pgd_t *pgd; 567 p4d_t *p4d; 568 pud_t *pud; 569 pmd_t *pmd; 570 pte_t *pte; 571 572 pgd = pgd_offset_k(addr); 573 if (pgd_none(*pgd)) { 574 if (!alloc) 575 goto out; 576 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); 577 if (!p4d) 578 goto out; 579 pgd_populate(&init_mm, pgd, p4d); 580 } 581 p4d = p4d_offset(pgd, addr); 582 if (p4d_none(*p4d)) { 583 if (!alloc) 584 goto out; 585 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); 586 if (!pud) 587 goto out; 588 p4d_populate(&init_mm, p4d, pud); 589 } 590 pud = pud_offset(p4d, addr); 591 if (pud_none(*pud)) { 592 if (!alloc) 593 goto out; 594 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); 595 if (!pmd) 596 goto out; 597 pud_populate(&init_mm, pud, pmd); 598 } else if (WARN_ON_ONCE(pud_large(*pud))) { 599 goto out; 600 } 601 pmd = pmd_offset(pud, addr); 602 if (pmd_none(*pmd)) { 603 if (!alloc) 604 goto out; 605 pte = vmem_pte_alloc(); 606 if (!pte) 607 goto out; 608 pmd_populate(&init_mm, pmd, pte); 609 } else if (WARN_ON_ONCE(pmd_large(*pmd))) { 610 goto out; 611 } 612 ptep = pte_offset_kernel(pmd, addr); 613 out: 614 return ptep; 615 } 616 617 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc) 618 { 619 pte_t *ptep, pte; 620 621 if (!IS_ALIGNED(addr, PAGE_SIZE)) 622 return -EINVAL; 623 ptep = vmem_get_alloc_pte(addr, alloc); 624 if (!ptep) 625 return -ENOMEM; 626 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); 627 pte = mk_pte_phys(phys, prot); 628 set_pte(ptep, pte); 629 return 0; 630 } 631 632 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot) 633 { 634 int rc; 635 636 mutex_lock(&vmem_mutex); 637 rc = __vmem_map_4k_page(addr, phys, prot, true); 638 mutex_unlock(&vmem_mutex); 639 return rc; 640 } 641 642 void vmem_unmap_4k_page(unsigned long addr) 643 { 644 pte_t *ptep; 645 646 mutex_lock(&vmem_mutex); 647 ptep = virt_to_kpte(addr); 648 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL); 649 pte_clear(&init_mm, addr, ptep); 650 mutex_unlock(&vmem_mutex); 651 } 652 653 static int __init memblock_region_cmp(const void *a, const void *b) 654 { 655 const struct memblock_region *r1 = a; 656 const struct memblock_region *r2 = b; 657 658 if (r1->base < r2->base) 659 return -1; 660 if (r1->base > r2->base) 661 return 1; 662 return 0; 663 } 664 665 static void __init memblock_region_swap(void *a, void *b, int size) 666 { 667 swap(*(struct memblock_region *)a, *(struct memblock_region *)b); 668 } 669 670 #ifdef CONFIG_KASAN 671 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) 672 673 static inline int set_memory_kasan(unsigned long start, unsigned long end) 674 { 675 start = PAGE_ALIGN_DOWN(__sha(start)); 676 end = PAGE_ALIGN(__sha(end)); 677 return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT); 678 } 679 #endif 680 681 /* 682 * map whole physical memory to virtual memory (identity mapping) 683 * we reserve enough space in the vmalloc area for vmemmap to hotplug 684 * additional memory segments. 685 */ 686 void __init vmem_map_init(void) 687 { 688 struct memblock_region memory_rwx_regions[] = { 689 { 690 .base = 0, 691 .size = sizeof(struct lowcore), 692 .flags = MEMBLOCK_NONE, 693 #ifdef CONFIG_NUMA 694 .nid = NUMA_NO_NODE, 695 #endif 696 }, 697 { 698 .base = __pa(_stext), 699 .size = _etext - _stext, 700 .flags = MEMBLOCK_NONE, 701 #ifdef CONFIG_NUMA 702 .nid = NUMA_NO_NODE, 703 #endif 704 }, 705 { 706 .base = __pa(_sinittext), 707 .size = _einittext - _sinittext, 708 .flags = MEMBLOCK_NONE, 709 #ifdef CONFIG_NUMA 710 .nid = NUMA_NO_NODE, 711 #endif 712 }, 713 { 714 .base = __stext_amode31, 715 .size = __etext_amode31 - __stext_amode31, 716 .flags = MEMBLOCK_NONE, 717 #ifdef CONFIG_NUMA 718 .nid = NUMA_NO_NODE, 719 #endif 720 }, 721 }; 722 struct memblock_type memory_rwx = { 723 .regions = memory_rwx_regions, 724 .cnt = ARRAY_SIZE(memory_rwx_regions), 725 .max = ARRAY_SIZE(memory_rwx_regions), 726 }; 727 phys_addr_t base, end; 728 u64 i; 729 730 /* 731 * Set RW+NX attribute on all memory, except regions enumerated with 732 * memory_rwx exclude type. These regions need different attributes, 733 * which are enforced afterwards. 734 * 735 * __for_each_mem_range() iterate and exclude types should be sorted. 736 * The relative location of _stext and _sinittext is hardcoded in the 737 * linker script. However a location of __stext_amode31 and the kernel 738 * image itself are chosen dynamically. Thus, sort the exclude type. 739 */ 740 sort(&memory_rwx_regions, 741 ARRAY_SIZE(memory_rwx_regions), sizeof(memory_rwx_regions[0]), 742 memblock_region_cmp, memblock_region_swap); 743 __for_each_mem_range(i, &memblock.memory, &memory_rwx, 744 NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) { 745 set_memory_rwnx((unsigned long)__va(base), 746 (end - base) >> PAGE_SHIFT); 747 } 748 749 #ifdef CONFIG_KASAN 750 for_each_mem_range(i, &base, &end) 751 set_memory_kasan(base, end); 752 #endif 753 set_memory_rox((unsigned long)_stext, 754 (unsigned long)(_etext - _stext) >> PAGE_SHIFT); 755 set_memory_ro((unsigned long)_etext, 756 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT); 757 set_memory_rox((unsigned long)_sinittext, 758 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT); 759 set_memory_rox(__stext_amode31, 760 (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT); 761 762 /* lowcore must be executable for LPSWE */ 763 if (static_key_enabled(&cpu_has_bear)) 764 set_memory_nx(0, 1); 765 set_memory_nx(PAGE_SIZE, 1); 766 767 pr_info("Write protected kernel read-only data: %luk\n", 768 (unsigned long)(__end_rodata - _stext) >> 10); 769 } 770