xref: /linux/arch/s390/mm/vmem.c (revision 2697b79a469b68e3ad3640f55284359c1396278d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2006
4  */
5 
6 #include <linux/memory_hotplug.h>
7 #include <linux/memblock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <asm/page-states.h>
16 #include <asm/abs_lowcore.h>
17 #include <asm/cacheflush.h>
18 #include <asm/maccess.h>
19 #include <asm/nospec-branch.h>
20 #include <asm/ctlreg.h>
21 #include <asm/pgalloc.h>
22 #include <asm/setup.h>
23 #include <asm/tlbflush.h>
24 #include <asm/sections.h>
25 #include <asm/set_memory.h>
26 #include <asm/physmem_info.h>
27 
28 static DEFINE_MUTEX(vmem_mutex);
29 
30 static void __ref *vmem_alloc_pages(unsigned int order)
31 {
32 	unsigned long size = PAGE_SIZE << order;
33 
34 	if (slab_is_available())
35 		return (void *)__get_free_pages(GFP_KERNEL, order);
36 	return memblock_alloc(size, size);
37 }
38 
39 static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap)
40 {
41 	if (altmap) {
42 		vmem_altmap_free(altmap, 1 << order);
43 		return;
44 	}
45 	/* We don't expect boot memory to be removed ever. */
46 	if (!slab_is_available() ||
47 	    WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
48 		return;
49 	free_pages(addr, order);
50 }
51 
52 void *vmem_crst_alloc(unsigned long val)
53 {
54 	unsigned long *table;
55 
56 	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
57 	if (!table)
58 		return NULL;
59 	crst_table_init(table, val);
60 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
61 	return table;
62 }
63 
64 pte_t __ref *vmem_pte_alloc(void)
65 {
66 	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
67 	pte_t *pte;
68 
69 	if (slab_is_available())
70 		pte = (pte_t *) page_table_alloc(&init_mm);
71 	else
72 		pte = (pte_t *) memblock_alloc(size, size);
73 	if (!pte)
74 		return NULL;
75 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
76 	__arch_set_page_dat(pte, 1);
77 	return pte;
78 }
79 
80 static void vmem_pte_free(unsigned long *table)
81 {
82 	/* We don't expect boot memory to be removed ever. */
83 	if (!slab_is_available() ||
84 	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
85 		return;
86 	page_table_free(&init_mm, table);
87 }
88 
89 #define PAGE_UNUSED 0xFD
90 
91 /*
92  * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
93  * from unused_sub_pmd_start to next PMD_SIZE boundary.
94  */
95 static unsigned long unused_sub_pmd_start;
96 
97 static void vmemmap_flush_unused_sub_pmd(void)
98 {
99 	if (!unused_sub_pmd_start)
100 		return;
101 	memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
102 	       ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
103 	unused_sub_pmd_start = 0;
104 }
105 
106 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
107 {
108 	/*
109 	 * As we expect to add in the same granularity as we remove, it's
110 	 * sufficient to mark only some piece used to block the memmap page from
111 	 * getting removed (just in case the memmap never gets initialized,
112 	 * e.g., because the memory block never gets onlined).
113 	 */
114 	memset((void *)start, 0, sizeof(struct page));
115 }
116 
117 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
118 {
119 	/*
120 	 * We only optimize if the new used range directly follows the
121 	 * previously unused range (esp., when populating consecutive sections).
122 	 */
123 	if (unused_sub_pmd_start == start) {
124 		unused_sub_pmd_start = end;
125 		if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
126 			unused_sub_pmd_start = 0;
127 		return;
128 	}
129 	vmemmap_flush_unused_sub_pmd();
130 	vmemmap_mark_sub_pmd_used(start, end);
131 }
132 
133 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
134 {
135 	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
136 
137 	vmemmap_flush_unused_sub_pmd();
138 
139 	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
140 	vmemmap_mark_sub_pmd_used(start, end);
141 
142 	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
143 	if (!IS_ALIGNED(start, PMD_SIZE))
144 		memset((void *)page, PAGE_UNUSED, start - page);
145 	/*
146 	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
147 	 * consecutive sections. Remember for the last added PMD the last
148 	 * unused range in the populated PMD.
149 	 */
150 	if (!IS_ALIGNED(end, PMD_SIZE))
151 		unused_sub_pmd_start = end;
152 }
153 
154 /* Returns true if the PMD is completely unused and can be freed. */
155 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
156 {
157 	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
158 
159 	vmemmap_flush_unused_sub_pmd();
160 	memset((void *)start, PAGE_UNUSED, end - start);
161 	return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
162 }
163 
164 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
165 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
166 				  unsigned long end, bool add, bool direct,
167 				  struct vmem_altmap *altmap)
168 {
169 	unsigned long prot, pages = 0;
170 	int ret = -ENOMEM;
171 	pte_t *pte;
172 
173 	prot = pgprot_val(PAGE_KERNEL);
174 	if (!MACHINE_HAS_NX)
175 		prot &= ~_PAGE_NOEXEC;
176 
177 	pte = pte_offset_kernel(pmd, addr);
178 	for (; addr < end; addr += PAGE_SIZE, pte++) {
179 		if (!add) {
180 			if (pte_none(*pte))
181 				continue;
182 			if (!direct)
183 				vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(*pte)), get_order(PAGE_SIZE), altmap);
184 			pte_clear(&init_mm, addr, pte);
185 		} else if (pte_none(*pte)) {
186 			if (!direct) {
187 				void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap);
188 
189 				if (!new_page)
190 					goto out;
191 				set_pte(pte, __pte(__pa(new_page) | prot));
192 			} else {
193 				set_pte(pte, __pte(__pa(addr) | prot));
194 			}
195 		} else {
196 			continue;
197 		}
198 		pages++;
199 	}
200 	ret = 0;
201 out:
202 	if (direct)
203 		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
204 	return ret;
205 }
206 
207 static void try_free_pte_table(pmd_t *pmd, unsigned long start)
208 {
209 	pte_t *pte;
210 	int i;
211 
212 	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
213 	pte = pte_offset_kernel(pmd, start);
214 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
215 		if (!pte_none(*pte))
216 			return;
217 	}
218 	vmem_pte_free((unsigned long *) pmd_deref(*pmd));
219 	pmd_clear(pmd);
220 }
221 
222 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
223 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
224 				  unsigned long end, bool add, bool direct,
225 				  struct vmem_altmap *altmap)
226 {
227 	unsigned long next, prot, pages = 0;
228 	int ret = -ENOMEM;
229 	pmd_t *pmd;
230 	pte_t *pte;
231 
232 	prot = pgprot_val(SEGMENT_KERNEL);
233 	if (!MACHINE_HAS_NX)
234 		prot &= ~_SEGMENT_ENTRY_NOEXEC;
235 
236 	pmd = pmd_offset(pud, addr);
237 	for (; addr < end; addr = next, pmd++) {
238 		next = pmd_addr_end(addr, end);
239 		if (!add) {
240 			if (pmd_none(*pmd))
241 				continue;
242 			if (pmd_leaf(*pmd)) {
243 				if (IS_ALIGNED(addr, PMD_SIZE) &&
244 				    IS_ALIGNED(next, PMD_SIZE)) {
245 					if (!direct)
246 						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
247 					pmd_clear(pmd);
248 					pages++;
249 				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
250 					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
251 					pmd_clear(pmd);
252 				}
253 				continue;
254 			}
255 		} else if (pmd_none(*pmd)) {
256 			if (IS_ALIGNED(addr, PMD_SIZE) &&
257 			    IS_ALIGNED(next, PMD_SIZE) &&
258 			    MACHINE_HAS_EDAT1 && direct &&
259 			    !debug_pagealloc_enabled()) {
260 				set_pmd(pmd, __pmd(__pa(addr) | prot));
261 				pages++;
262 				continue;
263 			} else if (!direct && MACHINE_HAS_EDAT1) {
264 				void *new_page;
265 
266 				/*
267 				 * Use 1MB frames for vmemmap if available. We
268 				 * always use large frames even if they are only
269 				 * partially used. Otherwise we would have also
270 				 * page tables since vmemmap_populate gets
271 				 * called for each section separately.
272 				 */
273 				new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap);
274 				if (new_page) {
275 					set_pmd(pmd, __pmd(__pa(new_page) | prot));
276 					if (!IS_ALIGNED(addr, PMD_SIZE) ||
277 					    !IS_ALIGNED(next, PMD_SIZE)) {
278 						vmemmap_use_new_sub_pmd(addr, next);
279 					}
280 					continue;
281 				}
282 			}
283 			pte = vmem_pte_alloc();
284 			if (!pte)
285 				goto out;
286 			pmd_populate(&init_mm, pmd, pte);
287 		} else if (pmd_leaf(*pmd)) {
288 			if (!direct)
289 				vmemmap_use_sub_pmd(addr, next);
290 			continue;
291 		}
292 		ret = modify_pte_table(pmd, addr, next, add, direct, altmap);
293 		if (ret)
294 			goto out;
295 		if (!add)
296 			try_free_pte_table(pmd, addr & PMD_MASK);
297 	}
298 	ret = 0;
299 out:
300 	if (direct)
301 		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
302 	return ret;
303 }
304 
305 static void try_free_pmd_table(pud_t *pud, unsigned long start)
306 {
307 	pmd_t *pmd;
308 	int i;
309 
310 	pmd = pmd_offset(pud, start);
311 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
312 		if (!pmd_none(*pmd))
313 			return;
314 	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER, NULL);
315 	pud_clear(pud);
316 }
317 
318 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
319 			    bool add, bool direct, struct vmem_altmap *altmap)
320 {
321 	unsigned long next, prot, pages = 0;
322 	int ret = -ENOMEM;
323 	pud_t *pud;
324 	pmd_t *pmd;
325 
326 	prot = pgprot_val(REGION3_KERNEL);
327 	if (!MACHINE_HAS_NX)
328 		prot &= ~_REGION_ENTRY_NOEXEC;
329 	pud = pud_offset(p4d, addr);
330 	for (; addr < end; addr = next, pud++) {
331 		next = pud_addr_end(addr, end);
332 		if (!add) {
333 			if (pud_none(*pud))
334 				continue;
335 			if (pud_leaf(*pud)) {
336 				if (IS_ALIGNED(addr, PUD_SIZE) &&
337 				    IS_ALIGNED(next, PUD_SIZE)) {
338 					pud_clear(pud);
339 					pages++;
340 				}
341 				continue;
342 			}
343 		} else if (pud_none(*pud)) {
344 			if (IS_ALIGNED(addr, PUD_SIZE) &&
345 			    IS_ALIGNED(next, PUD_SIZE) &&
346 			    MACHINE_HAS_EDAT2 && direct &&
347 			    !debug_pagealloc_enabled()) {
348 				set_pud(pud, __pud(__pa(addr) | prot));
349 				pages++;
350 				continue;
351 			}
352 			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
353 			if (!pmd)
354 				goto out;
355 			pud_populate(&init_mm, pud, pmd);
356 		} else if (pud_leaf(*pud)) {
357 			continue;
358 		}
359 		ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
360 		if (ret)
361 			goto out;
362 		if (!add)
363 			try_free_pmd_table(pud, addr & PUD_MASK);
364 	}
365 	ret = 0;
366 out:
367 	if (direct)
368 		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
369 	return ret;
370 }
371 
372 static void try_free_pud_table(p4d_t *p4d, unsigned long start)
373 {
374 	pud_t *pud;
375 	int i;
376 
377 	pud = pud_offset(p4d, start);
378 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
379 		if (!pud_none(*pud))
380 			return;
381 	}
382 	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER, NULL);
383 	p4d_clear(p4d);
384 }
385 
386 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
387 			    bool add, bool direct, struct vmem_altmap *altmap)
388 {
389 	unsigned long next;
390 	int ret = -ENOMEM;
391 	p4d_t *p4d;
392 	pud_t *pud;
393 
394 	p4d = p4d_offset(pgd, addr);
395 	for (; addr < end; addr = next, p4d++) {
396 		next = p4d_addr_end(addr, end);
397 		if (!add) {
398 			if (p4d_none(*p4d))
399 				continue;
400 		} else if (p4d_none(*p4d)) {
401 			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
402 			if (!pud)
403 				goto out;
404 			p4d_populate(&init_mm, p4d, pud);
405 		}
406 		ret = modify_pud_table(p4d, addr, next, add, direct, altmap);
407 		if (ret)
408 			goto out;
409 		if (!add)
410 			try_free_pud_table(p4d, addr & P4D_MASK);
411 	}
412 	ret = 0;
413 out:
414 	return ret;
415 }
416 
417 static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
418 {
419 	p4d_t *p4d;
420 	int i;
421 
422 	p4d = p4d_offset(pgd, start);
423 	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
424 		if (!p4d_none(*p4d))
425 			return;
426 	}
427 	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER, NULL);
428 	pgd_clear(pgd);
429 }
430 
431 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
432 			    bool direct, struct vmem_altmap *altmap)
433 {
434 	unsigned long addr, next;
435 	int ret = -ENOMEM;
436 	pgd_t *pgd;
437 	p4d_t *p4d;
438 
439 	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
440 		return -EINVAL;
441 	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
442 	if (WARN_ON_ONCE(end > __abs_lowcore))
443 		return -EINVAL;
444 	for (addr = start; addr < end; addr = next) {
445 		next = pgd_addr_end(addr, end);
446 		pgd = pgd_offset_k(addr);
447 
448 		if (!add) {
449 			if (pgd_none(*pgd))
450 				continue;
451 		} else if (pgd_none(*pgd)) {
452 			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
453 			if (!p4d)
454 				goto out;
455 			pgd_populate(&init_mm, pgd, p4d);
456 		}
457 		ret = modify_p4d_table(pgd, addr, next, add, direct, altmap);
458 		if (ret)
459 			goto out;
460 		if (!add)
461 			try_free_p4d_table(pgd, addr & PGDIR_MASK);
462 	}
463 	ret = 0;
464 out:
465 	if (!add)
466 		flush_tlb_kernel_range(start, end);
467 	return ret;
468 }
469 
470 static int add_pagetable(unsigned long start, unsigned long end, bool direct,
471 			 struct vmem_altmap *altmap)
472 {
473 	return modify_pagetable(start, end, true, direct, altmap);
474 }
475 
476 static int remove_pagetable(unsigned long start, unsigned long end, bool direct,
477 			    struct vmem_altmap *altmap)
478 {
479 	return modify_pagetable(start, end, false, direct, altmap);
480 }
481 
482 /*
483  * Add a physical memory range to the 1:1 mapping.
484  */
485 static int vmem_add_range(unsigned long start, unsigned long size)
486 {
487 	start = (unsigned long)__va(start);
488 	return add_pagetable(start, start + size, true, NULL);
489 }
490 
491 /*
492  * Remove a physical memory range from the 1:1 mapping.
493  */
494 static void vmem_remove_range(unsigned long start, unsigned long size)
495 {
496 	start = (unsigned long)__va(start);
497 	remove_pagetable(start, start + size, true, NULL);
498 }
499 
500 /*
501  * Add a backed mem_map array to the virtual mem_map array.
502  */
503 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
504 			       struct vmem_altmap *altmap)
505 {
506 	int ret;
507 
508 	mutex_lock(&vmem_mutex);
509 	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
510 	ret = add_pagetable(start, end, false, altmap);
511 	if (ret)
512 		remove_pagetable(start, end, false, altmap);
513 	mutex_unlock(&vmem_mutex);
514 	return ret;
515 }
516 
517 #ifdef CONFIG_MEMORY_HOTPLUG
518 
519 void vmemmap_free(unsigned long start, unsigned long end,
520 		  struct vmem_altmap *altmap)
521 {
522 	mutex_lock(&vmem_mutex);
523 	remove_pagetable(start, end, false, altmap);
524 	mutex_unlock(&vmem_mutex);
525 }
526 
527 #endif
528 
529 void vmem_remove_mapping(unsigned long start, unsigned long size)
530 {
531 	mutex_lock(&vmem_mutex);
532 	vmem_remove_range(start, size);
533 	mutex_unlock(&vmem_mutex);
534 }
535 
536 struct range arch_get_mappable_range(void)
537 {
538 	struct range mhp_range;
539 
540 	mhp_range.start = 0;
541 	mhp_range.end = max_mappable - 1;
542 	return mhp_range;
543 }
544 
545 int vmem_add_mapping(unsigned long start, unsigned long size)
546 {
547 	struct range range = arch_get_mappable_range();
548 	int ret;
549 
550 	if (start < range.start ||
551 	    start + size > range.end + 1 ||
552 	    start + size < start)
553 		return -ERANGE;
554 
555 	mutex_lock(&vmem_mutex);
556 	ret = vmem_add_range(start, size);
557 	if (ret)
558 		vmem_remove_range(start, size);
559 	mutex_unlock(&vmem_mutex);
560 	return ret;
561 }
562 
563 /*
564  * Allocate new or return existing page-table entry, but do not map it
565  * to any physical address. If missing, allocate segment- and region-
566  * table entries along. Meeting a large segment- or region-table entry
567  * while traversing is an error, since the function is expected to be
568  * called against virtual regions reserved for 4KB mappings only.
569  */
570 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
571 {
572 	pte_t *ptep = NULL;
573 	pgd_t *pgd;
574 	p4d_t *p4d;
575 	pud_t *pud;
576 	pmd_t *pmd;
577 	pte_t *pte;
578 
579 	pgd = pgd_offset_k(addr);
580 	if (pgd_none(*pgd)) {
581 		if (!alloc)
582 			goto out;
583 		p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
584 		if (!p4d)
585 			goto out;
586 		pgd_populate(&init_mm, pgd, p4d);
587 	}
588 	p4d = p4d_offset(pgd, addr);
589 	if (p4d_none(*p4d)) {
590 		if (!alloc)
591 			goto out;
592 		pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
593 		if (!pud)
594 			goto out;
595 		p4d_populate(&init_mm, p4d, pud);
596 	}
597 	pud = pud_offset(p4d, addr);
598 	if (pud_none(*pud)) {
599 		if (!alloc)
600 			goto out;
601 		pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
602 		if (!pmd)
603 			goto out;
604 		pud_populate(&init_mm, pud, pmd);
605 	} else if (WARN_ON_ONCE(pud_leaf(*pud))) {
606 		goto out;
607 	}
608 	pmd = pmd_offset(pud, addr);
609 	if (pmd_none(*pmd)) {
610 		if (!alloc)
611 			goto out;
612 		pte = vmem_pte_alloc();
613 		if (!pte)
614 			goto out;
615 		pmd_populate(&init_mm, pmd, pte);
616 	} else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
617 		goto out;
618 	}
619 	ptep = pte_offset_kernel(pmd, addr);
620 out:
621 	return ptep;
622 }
623 
624 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
625 {
626 	pte_t *ptep, pte;
627 
628 	if (!IS_ALIGNED(addr, PAGE_SIZE))
629 		return -EINVAL;
630 	ptep = vmem_get_alloc_pte(addr, alloc);
631 	if (!ptep)
632 		return -ENOMEM;
633 	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
634 	pte = mk_pte_phys(phys, prot);
635 	set_pte(ptep, pte);
636 	return 0;
637 }
638 
639 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
640 {
641 	int rc;
642 
643 	mutex_lock(&vmem_mutex);
644 	rc = __vmem_map_4k_page(addr, phys, prot, true);
645 	mutex_unlock(&vmem_mutex);
646 	return rc;
647 }
648 
649 void vmem_unmap_4k_page(unsigned long addr)
650 {
651 	pte_t *ptep;
652 
653 	mutex_lock(&vmem_mutex);
654 	ptep = virt_to_kpte(addr);
655 	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
656 	pte_clear(&init_mm, addr, ptep);
657 	mutex_unlock(&vmem_mutex);
658 }
659 
660 void __init vmem_map_init(void)
661 {
662 	__set_memory_rox(_stext, _etext);
663 	__set_memory_ro(_etext, __end_rodata);
664 	__set_memory_rox(_sinittext, _einittext);
665 	__set_memory_rox(__stext_amode31, __etext_amode31);
666 	/*
667 	 * If the BEAR-enhancement facility is not installed the first
668 	 * prefix page is used to return to the previous context with
669 	 * an LPSWE instruction and therefore must be executable.
670 	 */
671 	if (!static_key_enabled(&cpu_has_bear))
672 		set_memory_x(0, 1);
673 	if (debug_pagealloc_enabled()) {
674 		/*
675 		 * Use RELOC_HIDE() as long as __va(0) translates to NULL,
676 		 * since performing pointer arithmetic on a NULL pointer
677 		 * has undefined behavior and generates compiler warnings.
678 		 */
679 		__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
680 	}
681 	if (MACHINE_HAS_NX)
682 		system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
683 	pr_info("Write protected kernel read-only data: %luk\n",
684 		(unsigned long)(__end_rodata - _stext) >> 10);
685 }
686