xref: /linux/arch/s390/mm/vmem.c (revision 8e3ed5440b0c305dcd1d5fa7419bd8066d22ef42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2006
4  */
5 
6 #include <linux/memory_hotplug.h>
7 #include <linux/memblock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <asm/page-states.h>
16 #include <asm/cacheflush.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/ctlreg.h>
19 #include <asm/pgalloc.h>
20 #include <asm/setup.h>
21 #include <asm/tlbflush.h>
22 #include <asm/sections.h>
23 #include <asm/set_memory.h>
24 
25 static DEFINE_MUTEX(vmem_mutex);
26 
27 static void __ref *vmem_alloc_pages(unsigned int order)
28 {
29 	unsigned long size = PAGE_SIZE << order;
30 
31 	if (slab_is_available())
32 		return (void *)__get_free_pages(GFP_KERNEL, order);
33 	return memblock_alloc(size, size);
34 }
35 
36 static void vmem_free_pages(unsigned long addr, int order)
37 {
38 	/* We don't expect boot memory to be removed ever. */
39 	if (!slab_is_available() ||
40 	    WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
41 		return;
42 	free_pages(addr, order);
43 }
44 
45 void *vmem_crst_alloc(unsigned long val)
46 {
47 	unsigned long *table;
48 
49 	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
50 	if (!table)
51 		return NULL;
52 	crst_table_init(table, val);
53 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
54 	return table;
55 }
56 
57 pte_t __ref *vmem_pte_alloc(void)
58 {
59 	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
60 	pte_t *pte;
61 
62 	if (slab_is_available())
63 		pte = (pte_t *) page_table_alloc(&init_mm);
64 	else
65 		pte = (pte_t *) memblock_alloc(size, size);
66 	if (!pte)
67 		return NULL;
68 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
69 	__arch_set_page_dat(pte, 1);
70 	return pte;
71 }
72 
73 static void vmem_pte_free(unsigned long *table)
74 {
75 	/* We don't expect boot memory to be removed ever. */
76 	if (!slab_is_available() ||
77 	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
78 		return;
79 	page_table_free(&init_mm, table);
80 }
81 
82 #define PAGE_UNUSED 0xFD
83 
84 /*
85  * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
86  * from unused_sub_pmd_start to next PMD_SIZE boundary.
87  */
88 static unsigned long unused_sub_pmd_start;
89 
90 static void vmemmap_flush_unused_sub_pmd(void)
91 {
92 	if (!unused_sub_pmd_start)
93 		return;
94 	memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
95 	       ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
96 	unused_sub_pmd_start = 0;
97 }
98 
99 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
100 {
101 	/*
102 	 * As we expect to add in the same granularity as we remove, it's
103 	 * sufficient to mark only some piece used to block the memmap page from
104 	 * getting removed (just in case the memmap never gets initialized,
105 	 * e.g., because the memory block never gets onlined).
106 	 */
107 	memset((void *)start, 0, sizeof(struct page));
108 }
109 
110 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
111 {
112 	/*
113 	 * We only optimize if the new used range directly follows the
114 	 * previously unused range (esp., when populating consecutive sections).
115 	 */
116 	if (unused_sub_pmd_start == start) {
117 		unused_sub_pmd_start = end;
118 		if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
119 			unused_sub_pmd_start = 0;
120 		return;
121 	}
122 	vmemmap_flush_unused_sub_pmd();
123 	vmemmap_mark_sub_pmd_used(start, end);
124 }
125 
126 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
127 {
128 	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
129 
130 	vmemmap_flush_unused_sub_pmd();
131 
132 	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
133 	vmemmap_mark_sub_pmd_used(start, end);
134 
135 	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
136 	if (!IS_ALIGNED(start, PMD_SIZE))
137 		memset((void *)page, PAGE_UNUSED, start - page);
138 	/*
139 	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
140 	 * consecutive sections. Remember for the last added PMD the last
141 	 * unused range in the populated PMD.
142 	 */
143 	if (!IS_ALIGNED(end, PMD_SIZE))
144 		unused_sub_pmd_start = end;
145 }
146 
147 /* Returns true if the PMD is completely unused and can be freed. */
148 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
149 {
150 	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
151 
152 	vmemmap_flush_unused_sub_pmd();
153 	memset((void *)start, PAGE_UNUSED, end - start);
154 	return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
155 }
156 
157 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
158 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
159 				  unsigned long end, bool add, bool direct)
160 {
161 	unsigned long prot, pages = 0;
162 	int ret = -ENOMEM;
163 	pte_t *pte;
164 
165 	prot = pgprot_val(PAGE_KERNEL);
166 	if (!MACHINE_HAS_NX)
167 		prot &= ~_PAGE_NOEXEC;
168 
169 	pte = pte_offset_kernel(pmd, addr);
170 	for (; addr < end; addr += PAGE_SIZE, pte++) {
171 		if (!add) {
172 			if (pte_none(*pte))
173 				continue;
174 			if (!direct)
175 				vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
176 			pte_clear(&init_mm, addr, pte);
177 		} else if (pte_none(*pte)) {
178 			if (!direct) {
179 				void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
180 
181 				if (!new_page)
182 					goto out;
183 				set_pte(pte, __pte(__pa(new_page) | prot));
184 			} else {
185 				set_pte(pte, __pte(__pa(addr) | prot));
186 			}
187 		} else {
188 			continue;
189 		}
190 		pages++;
191 	}
192 	ret = 0;
193 out:
194 	if (direct)
195 		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
196 	return ret;
197 }
198 
199 static void try_free_pte_table(pmd_t *pmd, unsigned long start)
200 {
201 	pte_t *pte;
202 	int i;
203 
204 	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
205 	pte = pte_offset_kernel(pmd, start);
206 	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
207 		if (!pte_none(*pte))
208 			return;
209 	}
210 	vmem_pte_free((unsigned long *) pmd_deref(*pmd));
211 	pmd_clear(pmd);
212 }
213 
214 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
215 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
216 				  unsigned long end, bool add, bool direct)
217 {
218 	unsigned long next, prot, pages = 0;
219 	int ret = -ENOMEM;
220 	pmd_t *pmd;
221 	pte_t *pte;
222 
223 	prot = pgprot_val(SEGMENT_KERNEL);
224 	if (!MACHINE_HAS_NX)
225 		prot &= ~_SEGMENT_ENTRY_NOEXEC;
226 
227 	pmd = pmd_offset(pud, addr);
228 	for (; addr < end; addr = next, pmd++) {
229 		next = pmd_addr_end(addr, end);
230 		if (!add) {
231 			if (pmd_none(*pmd))
232 				continue;
233 			if (pmd_large(*pmd)) {
234 				if (IS_ALIGNED(addr, PMD_SIZE) &&
235 				    IS_ALIGNED(next, PMD_SIZE)) {
236 					if (!direct)
237 						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
238 					pmd_clear(pmd);
239 					pages++;
240 				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
241 					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
242 					pmd_clear(pmd);
243 				}
244 				continue;
245 			}
246 		} else if (pmd_none(*pmd)) {
247 			if (IS_ALIGNED(addr, PMD_SIZE) &&
248 			    IS_ALIGNED(next, PMD_SIZE) &&
249 			    MACHINE_HAS_EDAT1 && direct &&
250 			    !debug_pagealloc_enabled()) {
251 				set_pmd(pmd, __pmd(__pa(addr) | prot));
252 				pages++;
253 				continue;
254 			} else if (!direct && MACHINE_HAS_EDAT1) {
255 				void *new_page;
256 
257 				/*
258 				 * Use 1MB frames for vmemmap if available. We
259 				 * always use large frames even if they are only
260 				 * partially used. Otherwise we would have also
261 				 * page tables since vmemmap_populate gets
262 				 * called for each section separately.
263 				 */
264 				new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
265 				if (new_page) {
266 					set_pmd(pmd, __pmd(__pa(new_page) | prot));
267 					if (!IS_ALIGNED(addr, PMD_SIZE) ||
268 					    !IS_ALIGNED(next, PMD_SIZE)) {
269 						vmemmap_use_new_sub_pmd(addr, next);
270 					}
271 					continue;
272 				}
273 			}
274 			pte = vmem_pte_alloc();
275 			if (!pte)
276 				goto out;
277 			pmd_populate(&init_mm, pmd, pte);
278 		} else if (pmd_large(*pmd)) {
279 			if (!direct)
280 				vmemmap_use_sub_pmd(addr, next);
281 			continue;
282 		}
283 		ret = modify_pte_table(pmd, addr, next, add, direct);
284 		if (ret)
285 			goto out;
286 		if (!add)
287 			try_free_pte_table(pmd, addr & PMD_MASK);
288 	}
289 	ret = 0;
290 out:
291 	if (direct)
292 		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
293 	return ret;
294 }
295 
296 static void try_free_pmd_table(pud_t *pud, unsigned long start)
297 {
298 	pmd_t *pmd;
299 	int i;
300 
301 	pmd = pmd_offset(pud, start);
302 	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
303 		if (!pmd_none(*pmd))
304 			return;
305 	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
306 	pud_clear(pud);
307 }
308 
309 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
310 			    bool add, bool direct)
311 {
312 	unsigned long next, prot, pages = 0;
313 	int ret = -ENOMEM;
314 	pud_t *pud;
315 	pmd_t *pmd;
316 
317 	prot = pgprot_val(REGION3_KERNEL);
318 	if (!MACHINE_HAS_NX)
319 		prot &= ~_REGION_ENTRY_NOEXEC;
320 	pud = pud_offset(p4d, addr);
321 	for (; addr < end; addr = next, pud++) {
322 		next = pud_addr_end(addr, end);
323 		if (!add) {
324 			if (pud_none(*pud))
325 				continue;
326 			if (pud_large(*pud)) {
327 				if (IS_ALIGNED(addr, PUD_SIZE) &&
328 				    IS_ALIGNED(next, PUD_SIZE)) {
329 					pud_clear(pud);
330 					pages++;
331 				}
332 				continue;
333 			}
334 		} else if (pud_none(*pud)) {
335 			if (IS_ALIGNED(addr, PUD_SIZE) &&
336 			    IS_ALIGNED(next, PUD_SIZE) &&
337 			    MACHINE_HAS_EDAT2 && direct &&
338 			    !debug_pagealloc_enabled()) {
339 				set_pud(pud, __pud(__pa(addr) | prot));
340 				pages++;
341 				continue;
342 			}
343 			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
344 			if (!pmd)
345 				goto out;
346 			pud_populate(&init_mm, pud, pmd);
347 		} else if (pud_large(*pud)) {
348 			continue;
349 		}
350 		ret = modify_pmd_table(pud, addr, next, add, direct);
351 		if (ret)
352 			goto out;
353 		if (!add)
354 			try_free_pmd_table(pud, addr & PUD_MASK);
355 	}
356 	ret = 0;
357 out:
358 	if (direct)
359 		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
360 	return ret;
361 }
362 
363 static void try_free_pud_table(p4d_t *p4d, unsigned long start)
364 {
365 	pud_t *pud;
366 	int i;
367 
368 	pud = pud_offset(p4d, start);
369 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
370 		if (!pud_none(*pud))
371 			return;
372 	}
373 	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
374 	p4d_clear(p4d);
375 }
376 
377 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
378 			    bool add, bool direct)
379 {
380 	unsigned long next;
381 	int ret = -ENOMEM;
382 	p4d_t *p4d;
383 	pud_t *pud;
384 
385 	p4d = p4d_offset(pgd, addr);
386 	for (; addr < end; addr = next, p4d++) {
387 		next = p4d_addr_end(addr, end);
388 		if (!add) {
389 			if (p4d_none(*p4d))
390 				continue;
391 		} else if (p4d_none(*p4d)) {
392 			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
393 			if (!pud)
394 				goto out;
395 			p4d_populate(&init_mm, p4d, pud);
396 		}
397 		ret = modify_pud_table(p4d, addr, next, add, direct);
398 		if (ret)
399 			goto out;
400 		if (!add)
401 			try_free_pud_table(p4d, addr & P4D_MASK);
402 	}
403 	ret = 0;
404 out:
405 	return ret;
406 }
407 
408 static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
409 {
410 	p4d_t *p4d;
411 	int i;
412 
413 	p4d = p4d_offset(pgd, start);
414 	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
415 		if (!p4d_none(*p4d))
416 			return;
417 	}
418 	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
419 	pgd_clear(pgd);
420 }
421 
422 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
423 			    bool direct)
424 {
425 	unsigned long addr, next;
426 	int ret = -ENOMEM;
427 	pgd_t *pgd;
428 	p4d_t *p4d;
429 
430 	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
431 		return -EINVAL;
432 	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
433 	if (WARN_ON_ONCE(end > VMALLOC_START))
434 		return -EINVAL;
435 	for (addr = start; addr < end; addr = next) {
436 		next = pgd_addr_end(addr, end);
437 		pgd = pgd_offset_k(addr);
438 
439 		if (!add) {
440 			if (pgd_none(*pgd))
441 				continue;
442 		} else if (pgd_none(*pgd)) {
443 			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
444 			if (!p4d)
445 				goto out;
446 			pgd_populate(&init_mm, pgd, p4d);
447 		}
448 		ret = modify_p4d_table(pgd, addr, next, add, direct);
449 		if (ret)
450 			goto out;
451 		if (!add)
452 			try_free_p4d_table(pgd, addr & PGDIR_MASK);
453 	}
454 	ret = 0;
455 out:
456 	if (!add)
457 		flush_tlb_kernel_range(start, end);
458 	return ret;
459 }
460 
461 static int add_pagetable(unsigned long start, unsigned long end, bool direct)
462 {
463 	return modify_pagetable(start, end, true, direct);
464 }
465 
466 static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
467 {
468 	return modify_pagetable(start, end, false, direct);
469 }
470 
471 /*
472  * Add a physical memory range to the 1:1 mapping.
473  */
474 static int vmem_add_range(unsigned long start, unsigned long size)
475 {
476 	start = (unsigned long)__va(start);
477 	return add_pagetable(start, start + size, true);
478 }
479 
480 /*
481  * Remove a physical memory range from the 1:1 mapping.
482  */
483 static void vmem_remove_range(unsigned long start, unsigned long size)
484 {
485 	start = (unsigned long)__va(start);
486 	remove_pagetable(start, start + size, true);
487 }
488 
489 /*
490  * Add a backed mem_map array to the virtual mem_map array.
491  */
492 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
493 			       struct vmem_altmap *altmap)
494 {
495 	int ret;
496 
497 	mutex_lock(&vmem_mutex);
498 	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
499 	ret = add_pagetable(start, end, false);
500 	if (ret)
501 		remove_pagetable(start, end, false);
502 	mutex_unlock(&vmem_mutex);
503 	return ret;
504 }
505 
506 #ifdef CONFIG_MEMORY_HOTPLUG
507 
508 void vmemmap_free(unsigned long start, unsigned long end,
509 		  struct vmem_altmap *altmap)
510 {
511 	mutex_lock(&vmem_mutex);
512 	remove_pagetable(start, end, false);
513 	mutex_unlock(&vmem_mutex);
514 }
515 
516 #endif
517 
518 void vmem_remove_mapping(unsigned long start, unsigned long size)
519 {
520 	mutex_lock(&vmem_mutex);
521 	vmem_remove_range(start, size);
522 	mutex_unlock(&vmem_mutex);
523 }
524 
525 struct range arch_get_mappable_range(void)
526 {
527 	struct range mhp_range;
528 
529 	mhp_range.start = 0;
530 	mhp_range.end = max_mappable - 1;
531 	return mhp_range;
532 }
533 
534 int vmem_add_mapping(unsigned long start, unsigned long size)
535 {
536 	struct range range = arch_get_mappable_range();
537 	int ret;
538 
539 	if (start < range.start ||
540 	    start + size > range.end + 1 ||
541 	    start + size < start)
542 		return -ERANGE;
543 
544 	mutex_lock(&vmem_mutex);
545 	ret = vmem_add_range(start, size);
546 	if (ret)
547 		vmem_remove_range(start, size);
548 	mutex_unlock(&vmem_mutex);
549 	return ret;
550 }
551 
552 /*
553  * Allocate new or return existing page-table entry, but do not map it
554  * to any physical address. If missing, allocate segment- and region-
555  * table entries along. Meeting a large segment- or region-table entry
556  * while traversing is an error, since the function is expected to be
557  * called against virtual regions reserved for 4KB mappings only.
558  */
559 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
560 {
561 	pte_t *ptep = NULL;
562 	pgd_t *pgd;
563 	p4d_t *p4d;
564 	pud_t *pud;
565 	pmd_t *pmd;
566 	pte_t *pte;
567 
568 	pgd = pgd_offset_k(addr);
569 	if (pgd_none(*pgd)) {
570 		if (!alloc)
571 			goto out;
572 		p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
573 		if (!p4d)
574 			goto out;
575 		pgd_populate(&init_mm, pgd, p4d);
576 	}
577 	p4d = p4d_offset(pgd, addr);
578 	if (p4d_none(*p4d)) {
579 		if (!alloc)
580 			goto out;
581 		pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
582 		if (!pud)
583 			goto out;
584 		p4d_populate(&init_mm, p4d, pud);
585 	}
586 	pud = pud_offset(p4d, addr);
587 	if (pud_none(*pud)) {
588 		if (!alloc)
589 			goto out;
590 		pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
591 		if (!pmd)
592 			goto out;
593 		pud_populate(&init_mm, pud, pmd);
594 	} else if (WARN_ON_ONCE(pud_large(*pud))) {
595 		goto out;
596 	}
597 	pmd = pmd_offset(pud, addr);
598 	if (pmd_none(*pmd)) {
599 		if (!alloc)
600 			goto out;
601 		pte = vmem_pte_alloc();
602 		if (!pte)
603 			goto out;
604 		pmd_populate(&init_mm, pmd, pte);
605 	} else if (WARN_ON_ONCE(pmd_large(*pmd))) {
606 		goto out;
607 	}
608 	ptep = pte_offset_kernel(pmd, addr);
609 out:
610 	return ptep;
611 }
612 
613 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
614 {
615 	pte_t *ptep, pte;
616 
617 	if (!IS_ALIGNED(addr, PAGE_SIZE))
618 		return -EINVAL;
619 	ptep = vmem_get_alloc_pte(addr, alloc);
620 	if (!ptep)
621 		return -ENOMEM;
622 	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
623 	pte = mk_pte_phys(phys, prot);
624 	set_pte(ptep, pte);
625 	return 0;
626 }
627 
628 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
629 {
630 	int rc;
631 
632 	mutex_lock(&vmem_mutex);
633 	rc = __vmem_map_4k_page(addr, phys, prot, true);
634 	mutex_unlock(&vmem_mutex);
635 	return rc;
636 }
637 
638 void vmem_unmap_4k_page(unsigned long addr)
639 {
640 	pte_t *ptep;
641 
642 	mutex_lock(&vmem_mutex);
643 	ptep = virt_to_kpte(addr);
644 	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
645 	pte_clear(&init_mm, addr, ptep);
646 	mutex_unlock(&vmem_mutex);
647 }
648 
649 void __init vmem_map_init(void)
650 {
651 	__set_memory_rox(_stext, _etext);
652 	__set_memory_ro(_etext, __end_rodata);
653 	__set_memory_rox(_sinittext, _einittext);
654 	__set_memory_rox(__stext_amode31, __etext_amode31);
655 	/*
656 	 * If the BEAR-enhancement facility is not installed the first
657 	 * prefix page is used to return to the previous context with
658 	 * an LPSWE instruction and therefore must be executable.
659 	 */
660 	if (!static_key_enabled(&cpu_has_bear))
661 		set_memory_x(0, 1);
662 	if (debug_pagealloc_enabled()) {
663 		/*
664 		 * Use RELOC_HIDE() as long as __va(0) translates to NULL,
665 		 * since performing pointer arithmetic on a NULL pointer
666 		 * has undefined behavior and generates compiler warnings.
667 		 */
668 		__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
669 	}
670 	if (MACHINE_HAS_NX)
671 		system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
672 	pr_info("Write protected kernel read-only data: %luk\n",
673 		(unsigned long)(__end_rodata - _stext) >> 10);
674 }
675