xref: /linux/arch/arm64/mm/mmu.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/mmu.c
4  *
5  * Copyright (C) 1995-2005 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/memblock.h>
20 #include <linux/memremap.h>
21 #include <linux/memory.h>
22 #include <linux/fs.h>
23 #include <linux/io.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/set_memory.h>
27 #include <linux/kfence.h>
28 
29 #include <asm/barrier.h>
30 #include <asm/cputype.h>
31 #include <asm/fixmap.h>
32 #include <asm/kasan.h>
33 #include <asm/kernel-pgtable.h>
34 #include <asm/sections.h>
35 #include <asm/setup.h>
36 #include <linux/sizes.h>
37 #include <asm/tlb.h>
38 #include <asm/mmu_context.h>
39 #include <asm/ptdump.h>
40 #include <asm/tlbflush.h>
41 #include <asm/pgalloc.h>
42 #include <asm/kfence.h>
43 
44 #define NO_BLOCK_MAPPINGS	BIT(0)
45 #define NO_CONT_MAPPINGS	BIT(1)
46 #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
47 
48 u64 kimage_voffset __ro_after_init;
49 EXPORT_SYMBOL(kimage_voffset);
50 
51 u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };
52 
53 static bool rodata_is_rw __ro_after_init = true;
54 
55 /*
56  * The booting CPU updates the failed status @__early_cpu_boot_status,
57  * with MMU turned off.
58  */
59 long __section(".mmuoff.data.write") __early_cpu_boot_status;
60 
61 /*
62  * Empty_zero_page is a special page that is used for zero-initialized data
63  * and COW.
64  */
65 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
66 EXPORT_SYMBOL(empty_zero_page);
67 
68 static DEFINE_SPINLOCK(swapper_pgdir_lock);
69 static DEFINE_MUTEX(fixmap_lock);
70 
71 void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
72 {
73 	pgd_t *fixmap_pgdp;
74 
75 	/*
76 	 * Don't bother with the fixmap if swapper_pg_dir is still mapped
77 	 * writable in the kernel mapping.
78 	 */
79 	if (rodata_is_rw) {
80 		WRITE_ONCE(*pgdp, pgd);
81 		dsb(ishst);
82 		isb();
83 		return;
84 	}
85 
86 	spin_lock(&swapper_pgdir_lock);
87 	fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
88 	WRITE_ONCE(*fixmap_pgdp, pgd);
89 	/*
90 	 * We need dsb(ishst) here to ensure the page-table-walker sees
91 	 * our new entry before set_p?d() returns. The fixmap's
92 	 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
93 	 */
94 	pgd_clear_fixmap();
95 	spin_unlock(&swapper_pgdir_lock);
96 }
97 
98 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
99 			      unsigned long size, pgprot_t vma_prot)
100 {
101 	if (!pfn_is_map_memory(pfn))
102 		return pgprot_noncached(vma_prot);
103 	else if (file->f_flags & O_SYNC)
104 		return pgprot_writecombine(vma_prot);
105 	return vma_prot;
106 }
107 EXPORT_SYMBOL(phys_mem_access_prot);
108 
109 static phys_addr_t __init early_pgtable_alloc(int shift)
110 {
111 	phys_addr_t phys;
112 
113 	phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
114 					 MEMBLOCK_ALLOC_NOLEAKTRACE);
115 	if (!phys)
116 		panic("Failed to allocate page table page\n");
117 
118 	return phys;
119 }
120 
121 bool pgattr_change_is_safe(u64 old, u64 new)
122 {
123 	/*
124 	 * The following mapping attributes may be updated in live
125 	 * kernel mappings without the need for break-before-make.
126 	 */
127 	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
128 
129 	/* creating or taking down mappings is always safe */
130 	if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
131 		return true;
132 
133 	/* A live entry's pfn should not change */
134 	if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
135 		return false;
136 
137 	/* live contiguous mappings may not be manipulated at all */
138 	if ((old | new) & PTE_CONT)
139 		return false;
140 
141 	/* Transitioning from Non-Global to Global is unsafe */
142 	if (old & ~new & PTE_NG)
143 		return false;
144 
145 	/*
146 	 * Changing the memory type between Normal and Normal-Tagged is safe
147 	 * since Tagged is considered a permission attribute from the
148 	 * mismatched attribute aliases perspective.
149 	 */
150 	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
151 	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
152 	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
153 	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
154 		mask |= PTE_ATTRINDX_MASK;
155 
156 	return ((old ^ new) & ~mask) == 0;
157 }
158 
159 static void init_clear_pgtable(void *table)
160 {
161 	clear_page(table);
162 
163 	/* Ensure the zeroing is observed by page table walks. */
164 	dsb(ishst);
165 }
166 
167 static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
168 		     phys_addr_t phys, pgprot_t prot)
169 {
170 	do {
171 		pte_t old_pte = __ptep_get(ptep);
172 
173 		/*
174 		 * Required barriers to make this visible to the table walker
175 		 * are deferred to the end of alloc_init_cont_pte().
176 		 */
177 		__set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot));
178 
179 		/*
180 		 * After the PTE entry has been populated once, we
181 		 * only allow updates to the permission attributes.
182 		 */
183 		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
184 					      pte_val(__ptep_get(ptep))));
185 
186 		phys += PAGE_SIZE;
187 	} while (ptep++, addr += PAGE_SIZE, addr != end);
188 }
189 
190 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
191 				unsigned long end, phys_addr_t phys,
192 				pgprot_t prot,
193 				phys_addr_t (*pgtable_alloc)(int),
194 				int flags)
195 {
196 	unsigned long next;
197 	pmd_t pmd = READ_ONCE(*pmdp);
198 	pte_t *ptep;
199 
200 	BUG_ON(pmd_sect(pmd));
201 	if (pmd_none(pmd)) {
202 		pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
203 		phys_addr_t pte_phys;
204 
205 		if (flags & NO_EXEC_MAPPINGS)
206 			pmdval |= PMD_TABLE_PXN;
207 		BUG_ON(!pgtable_alloc);
208 		pte_phys = pgtable_alloc(PAGE_SHIFT);
209 		ptep = pte_set_fixmap(pte_phys);
210 		init_clear_pgtable(ptep);
211 		ptep += pte_index(addr);
212 		__pmd_populate(pmdp, pte_phys, pmdval);
213 	} else {
214 		BUG_ON(pmd_bad(pmd));
215 		ptep = pte_set_fixmap_offset(pmdp, addr);
216 	}
217 
218 	do {
219 		pgprot_t __prot = prot;
220 
221 		next = pte_cont_addr_end(addr, end);
222 
223 		/* use a contiguous mapping if the range is suitably aligned */
224 		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
225 		    (flags & NO_CONT_MAPPINGS) == 0)
226 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
227 
228 		init_pte(ptep, addr, next, phys, __prot);
229 
230 		ptep += pte_index(next) - pte_index(addr);
231 		phys += next - addr;
232 	} while (addr = next, addr != end);
233 
234 	/*
235 	 * Note: barriers and maintenance necessary to clear the fixmap slot
236 	 * ensure that all previous pgtable writes are visible to the table
237 	 * walker.
238 	 */
239 	pte_clear_fixmap();
240 }
241 
242 static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
243 		     phys_addr_t phys, pgprot_t prot,
244 		     phys_addr_t (*pgtable_alloc)(int), int flags)
245 {
246 	unsigned long next;
247 
248 	do {
249 		pmd_t old_pmd = READ_ONCE(*pmdp);
250 
251 		next = pmd_addr_end(addr, end);
252 
253 		/* try section mapping first */
254 		if (((addr | next | phys) & ~PMD_MASK) == 0 &&
255 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
256 			pmd_set_huge(pmdp, phys, prot);
257 
258 			/*
259 			 * After the PMD entry has been populated once, we
260 			 * only allow updates to the permission attributes.
261 			 */
262 			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
263 						      READ_ONCE(pmd_val(*pmdp))));
264 		} else {
265 			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
266 					    pgtable_alloc, flags);
267 
268 			BUG_ON(pmd_val(old_pmd) != 0 &&
269 			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
270 		}
271 		phys += next - addr;
272 	} while (pmdp++, addr = next, addr != end);
273 }
274 
275 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
276 				unsigned long end, phys_addr_t phys,
277 				pgprot_t prot,
278 				phys_addr_t (*pgtable_alloc)(int), int flags)
279 {
280 	unsigned long next;
281 	pud_t pud = READ_ONCE(*pudp);
282 	pmd_t *pmdp;
283 
284 	/*
285 	 * Check for initial section mappings in the pgd/pud.
286 	 */
287 	BUG_ON(pud_sect(pud));
288 	if (pud_none(pud)) {
289 		pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
290 		phys_addr_t pmd_phys;
291 
292 		if (flags & NO_EXEC_MAPPINGS)
293 			pudval |= PUD_TABLE_PXN;
294 		BUG_ON(!pgtable_alloc);
295 		pmd_phys = pgtable_alloc(PMD_SHIFT);
296 		pmdp = pmd_set_fixmap(pmd_phys);
297 		init_clear_pgtable(pmdp);
298 		pmdp += pmd_index(addr);
299 		__pud_populate(pudp, pmd_phys, pudval);
300 	} else {
301 		BUG_ON(pud_bad(pud));
302 		pmdp = pmd_set_fixmap_offset(pudp, addr);
303 	}
304 
305 	do {
306 		pgprot_t __prot = prot;
307 
308 		next = pmd_cont_addr_end(addr, end);
309 
310 		/* use a contiguous mapping if the range is suitably aligned */
311 		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
312 		    (flags & NO_CONT_MAPPINGS) == 0)
313 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
314 
315 		init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags);
316 
317 		pmdp += pmd_index(next) - pmd_index(addr);
318 		phys += next - addr;
319 	} while (addr = next, addr != end);
320 
321 	pmd_clear_fixmap();
322 }
323 
324 static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
325 			   phys_addr_t phys, pgprot_t prot,
326 			   phys_addr_t (*pgtable_alloc)(int),
327 			   int flags)
328 {
329 	unsigned long next;
330 	p4d_t p4d = READ_ONCE(*p4dp);
331 	pud_t *pudp;
332 
333 	if (p4d_none(p4d)) {
334 		p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
335 		phys_addr_t pud_phys;
336 
337 		if (flags & NO_EXEC_MAPPINGS)
338 			p4dval |= P4D_TABLE_PXN;
339 		BUG_ON(!pgtable_alloc);
340 		pud_phys = pgtable_alloc(PUD_SHIFT);
341 		pudp = pud_set_fixmap(pud_phys);
342 		init_clear_pgtable(pudp);
343 		pudp += pud_index(addr);
344 		__p4d_populate(p4dp, pud_phys, p4dval);
345 	} else {
346 		BUG_ON(p4d_bad(p4d));
347 		pudp = pud_set_fixmap_offset(p4dp, addr);
348 	}
349 
350 	do {
351 		pud_t old_pud = READ_ONCE(*pudp);
352 
353 		next = pud_addr_end(addr, end);
354 
355 		/*
356 		 * For 4K granule only, attempt to put down a 1GB block
357 		 */
358 		if (pud_sect_supported() &&
359 		   ((addr | next | phys) & ~PUD_MASK) == 0 &&
360 		    (flags & NO_BLOCK_MAPPINGS) == 0) {
361 			pud_set_huge(pudp, phys, prot);
362 
363 			/*
364 			 * After the PUD entry has been populated once, we
365 			 * only allow updates to the permission attributes.
366 			 */
367 			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
368 						      READ_ONCE(pud_val(*pudp))));
369 		} else {
370 			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
371 					    pgtable_alloc, flags);
372 
373 			BUG_ON(pud_val(old_pud) != 0 &&
374 			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
375 		}
376 		phys += next - addr;
377 	} while (pudp++, addr = next, addr != end);
378 
379 	pud_clear_fixmap();
380 }
381 
382 static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
383 			   phys_addr_t phys, pgprot_t prot,
384 			   phys_addr_t (*pgtable_alloc)(int),
385 			   int flags)
386 {
387 	unsigned long next;
388 	pgd_t pgd = READ_ONCE(*pgdp);
389 	p4d_t *p4dp;
390 
391 	if (pgd_none(pgd)) {
392 		pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN;
393 		phys_addr_t p4d_phys;
394 
395 		if (flags & NO_EXEC_MAPPINGS)
396 			pgdval |= PGD_TABLE_PXN;
397 		BUG_ON(!pgtable_alloc);
398 		p4d_phys = pgtable_alloc(P4D_SHIFT);
399 		p4dp = p4d_set_fixmap(p4d_phys);
400 		init_clear_pgtable(p4dp);
401 		p4dp += p4d_index(addr);
402 		__pgd_populate(pgdp, p4d_phys, pgdval);
403 	} else {
404 		BUG_ON(pgd_bad(pgd));
405 		p4dp = p4d_set_fixmap_offset(pgdp, addr);
406 	}
407 
408 	do {
409 		p4d_t old_p4d = READ_ONCE(*p4dp);
410 
411 		next = p4d_addr_end(addr, end);
412 
413 		alloc_init_pud(p4dp, addr, next, phys, prot,
414 			       pgtable_alloc, flags);
415 
416 		BUG_ON(p4d_val(old_p4d) != 0 &&
417 		       p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp)));
418 
419 		phys += next - addr;
420 	} while (p4dp++, addr = next, addr != end);
421 
422 	p4d_clear_fixmap();
423 }
424 
425 static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
426 					unsigned long virt, phys_addr_t size,
427 					pgprot_t prot,
428 					phys_addr_t (*pgtable_alloc)(int),
429 					int flags)
430 {
431 	unsigned long addr, end, next;
432 	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
433 
434 	/*
435 	 * If the virtual and physical address don't have the same offset
436 	 * within a page, we cannot map the region as the caller expects.
437 	 */
438 	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
439 		return;
440 
441 	phys &= PAGE_MASK;
442 	addr = virt & PAGE_MASK;
443 	end = PAGE_ALIGN(virt + size);
444 
445 	do {
446 		next = pgd_addr_end(addr, end);
447 		alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc,
448 			       flags);
449 		phys += next - addr;
450 	} while (pgdp++, addr = next, addr != end);
451 }
452 
453 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
454 				 unsigned long virt, phys_addr_t size,
455 				 pgprot_t prot,
456 				 phys_addr_t (*pgtable_alloc)(int),
457 				 int flags)
458 {
459 	mutex_lock(&fixmap_lock);
460 	__create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
461 				    pgtable_alloc, flags);
462 	mutex_unlock(&fixmap_lock);
463 }
464 
465 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
466 extern __alias(__create_pgd_mapping_locked)
467 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
468 			     phys_addr_t size, pgprot_t prot,
469 			     phys_addr_t (*pgtable_alloc)(int), int flags);
470 #endif
471 
472 static phys_addr_t __pgd_pgtable_alloc(int shift)
473 {
474 	/* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
475 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL & ~__GFP_ZERO);
476 
477 	BUG_ON(!ptr);
478 	return __pa(ptr);
479 }
480 
481 static phys_addr_t pgd_pgtable_alloc(int shift)
482 {
483 	phys_addr_t pa = __pgd_pgtable_alloc(shift);
484 	struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa));
485 
486 	/*
487 	 * Call proper page table ctor in case later we need to
488 	 * call core mm functions like apply_to_page_range() on
489 	 * this pre-allocated page table.
490 	 *
491 	 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
492 	 * folded, and if so pagetable_pte_ctor() becomes nop.
493 	 */
494 	if (shift == PAGE_SHIFT)
495 		BUG_ON(!pagetable_pte_ctor(ptdesc));
496 	else if (shift == PMD_SHIFT)
497 		BUG_ON(!pagetable_pmd_ctor(ptdesc));
498 
499 	return pa;
500 }
501 
502 /*
503  * This function can only be used to modify existing table entries,
504  * without allocating new levels of table. Note that this permits the
505  * creation of new section or page entries.
506  */
507 void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
508 				   phys_addr_t size, pgprot_t prot)
509 {
510 	if (virt < PAGE_OFFSET) {
511 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
512 			&phys, virt);
513 		return;
514 	}
515 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
516 			     NO_CONT_MAPPINGS);
517 }
518 
519 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
520 			       unsigned long virt, phys_addr_t size,
521 			       pgprot_t prot, bool page_mappings_only)
522 {
523 	int flags = 0;
524 
525 	BUG_ON(mm == &init_mm);
526 
527 	if (page_mappings_only)
528 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
529 
530 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
531 			     pgd_pgtable_alloc, flags);
532 }
533 
534 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
535 				phys_addr_t size, pgprot_t prot)
536 {
537 	if (virt < PAGE_OFFSET) {
538 		pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
539 			&phys, virt);
540 		return;
541 	}
542 
543 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
544 			     NO_CONT_MAPPINGS);
545 
546 	/* flush the TLBs after updating live kernel mappings */
547 	flush_tlb_kernel_range(virt, virt + size);
548 }
549 
550 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
551 				  phys_addr_t end, pgprot_t prot, int flags)
552 {
553 	__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
554 			     prot, early_pgtable_alloc, flags);
555 }
556 
557 void __init mark_linear_text_alias_ro(void)
558 {
559 	/*
560 	 * Remove the write permissions from the linear alias of .text/.rodata
561 	 */
562 	update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
563 			    (unsigned long)__init_begin - (unsigned long)_stext,
564 			    PAGE_KERNEL_RO);
565 }
566 
567 #ifdef CONFIG_KFENCE
568 
569 bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
570 
571 /* early_param() will be parsed before map_mem() below. */
572 static int __init parse_kfence_early_init(char *arg)
573 {
574 	int val;
575 
576 	if (get_option(&arg, &val))
577 		kfence_early_init = !!val;
578 	return 0;
579 }
580 early_param("kfence.sample_interval", parse_kfence_early_init);
581 
582 static phys_addr_t __init arm64_kfence_alloc_pool(void)
583 {
584 	phys_addr_t kfence_pool;
585 
586 	if (!kfence_early_init)
587 		return 0;
588 
589 	kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
590 	if (!kfence_pool) {
591 		pr_err("failed to allocate kfence pool\n");
592 		kfence_early_init = false;
593 		return 0;
594 	}
595 
596 	/* Temporarily mark as NOMAP. */
597 	memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
598 
599 	return kfence_pool;
600 }
601 
602 static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
603 {
604 	if (!kfence_pool)
605 		return;
606 
607 	/* KFENCE pool needs page-level mapping. */
608 	__map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
609 			pgprot_tagged(PAGE_KERNEL),
610 			NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
611 	memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
612 	__kfence_pool = phys_to_virt(kfence_pool);
613 }
614 #else /* CONFIG_KFENCE */
615 
616 static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
617 static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
618 
619 #endif /* CONFIG_KFENCE */
620 
621 static void __init map_mem(pgd_t *pgdp)
622 {
623 	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
624 	phys_addr_t kernel_start = __pa_symbol(_stext);
625 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
626 	phys_addr_t start, end;
627 	phys_addr_t early_kfence_pool;
628 	int flags = NO_EXEC_MAPPINGS;
629 	u64 i;
630 
631 	/*
632 	 * Setting hierarchical PXNTable attributes on table entries covering
633 	 * the linear region is only possible if it is guaranteed that no table
634 	 * entries at any level are being shared between the linear region and
635 	 * the vmalloc region. Check whether this is true for the PGD level, in
636 	 * which case it is guaranteed to be true for all other levels as well.
637 	 * (Unless we are running with support for LPA2, in which case the
638 	 * entire reduced VA space is covered by a single pgd_t which will have
639 	 * been populated without the PXNTable attribute by the time we get here.)
640 	 */
641 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) &&
642 		     pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1);
643 
644 	early_kfence_pool = arm64_kfence_alloc_pool();
645 
646 	if (can_set_direct_map())
647 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
648 
649 	/*
650 	 * Take care not to create a writable alias for the
651 	 * read-only text and rodata sections of the kernel image.
652 	 * So temporarily mark them as NOMAP to skip mappings in
653 	 * the following for-loop
654 	 */
655 	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
656 
657 	/* map all the memory banks */
658 	for_each_mem_range(i, &start, &end) {
659 		if (start >= end)
660 			break;
661 		/*
662 		 * The linear map must allow allocation tags reading/writing
663 		 * if MTE is present. Otherwise, it has the same attributes as
664 		 * PAGE_KERNEL.
665 		 */
666 		__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
667 			       flags);
668 	}
669 
670 	/*
671 	 * Map the linear alias of the [_stext, __init_begin) interval
672 	 * as non-executable now, and remove the write permission in
673 	 * mark_linear_text_alias_ro() below (which will be called after
674 	 * alternative patching has completed). This makes the contents
675 	 * of the region accessible to subsystems such as hibernate,
676 	 * but protects it from inadvertent modification or execution.
677 	 * Note that contiguous mappings cannot be remapped in this way,
678 	 * so we should avoid them here.
679 	 */
680 	__map_memblock(pgdp, kernel_start, kernel_end,
681 		       PAGE_KERNEL, NO_CONT_MAPPINGS);
682 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
683 	arm64_kfence_map_pool(early_kfence_pool, pgdp);
684 }
685 
686 void mark_rodata_ro(void)
687 {
688 	unsigned long section_size;
689 
690 	/*
691 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
692 	 * to cover NOTES and EXCEPTION_TABLE.
693 	 */
694 	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
695 	WRITE_ONCE(rodata_is_rw, false);
696 	update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
697 			    section_size, PAGE_KERNEL_RO);
698 }
699 
700 static void __init declare_vma(struct vm_struct *vma,
701 			       void *va_start, void *va_end,
702 			       unsigned long vm_flags)
703 {
704 	phys_addr_t pa_start = __pa_symbol(va_start);
705 	unsigned long size = va_end - va_start;
706 
707 	BUG_ON(!PAGE_ALIGNED(pa_start));
708 	BUG_ON(!PAGE_ALIGNED(size));
709 
710 	if (!(vm_flags & VM_NO_GUARD))
711 		size += PAGE_SIZE;
712 
713 	vma->addr	= va_start;
714 	vma->phys_addr	= pa_start;
715 	vma->size	= size;
716 	vma->flags	= VM_MAP | vm_flags;
717 	vma->caller	= __builtin_return_address(0);
718 
719 	vm_area_add_early(vma);
720 }
721 
722 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
723 static pgprot_t kernel_exec_prot(void)
724 {
725 	return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
726 }
727 
728 static int __init map_entry_trampoline(void)
729 {
730 	int i;
731 
732 	if (!arm64_kernel_unmapped_at_el0())
733 		return 0;
734 
735 	pgprot_t prot = kernel_exec_prot();
736 	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
737 
738 	/* The trampoline is always mapped and can therefore be global */
739 	pgprot_val(prot) &= ~PTE_NG;
740 
741 	/* Map only the text into the trampoline page table */
742 	memset(tramp_pg_dir, 0, PGD_SIZE);
743 	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
744 			     entry_tramp_text_size(), prot,
745 			     __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
746 
747 	/* Map both the text and data into the kernel page table */
748 	for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
749 		__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
750 			     pa_start + i * PAGE_SIZE, prot);
751 
752 	if (IS_ENABLED(CONFIG_RELOCATABLE))
753 		__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
754 			     pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
755 
756 	return 0;
757 }
758 core_initcall(map_entry_trampoline);
759 #endif
760 
761 /*
762  * Declare the VMA areas for the kernel
763  */
764 static void __init declare_kernel_vmas(void)
765 {
766 	static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
767 
768 	declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
769 	declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
770 	declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
771 	declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
772 	declare_vma(&vmlinux_seg[4], _data, _end, 0);
773 }
774 
775 void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
776 		    int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
777 
778 static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
779 	  kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
780 
781 static void __init create_idmap(void)
782 {
783 	u64 start = __pa_symbol(__idmap_text_start);
784 	u64 end   = __pa_symbol(__idmap_text_end);
785 	u64 ptep  = __pa_symbol(idmap_ptes);
786 
787 	__pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX,
788 		       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
789 		       __phys_to_virt(ptep) - ptep);
790 
791 	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) {
792 		extern u32 __idmap_kpti_flag;
793 		u64 pa = __pa_symbol(&__idmap_kpti_flag);
794 
795 		/*
796 		 * The KPTI G-to-nG conversion code needs a read-write mapping
797 		 * of its synchronization flag in the ID map.
798 		 */
799 		ptep = __pa_symbol(kpti_ptes);
800 		__pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
801 			       IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
802 			       __phys_to_virt(ptep) - ptep);
803 	}
804 }
805 
806 void __init paging_init(void)
807 {
808 	map_mem(swapper_pg_dir);
809 
810 	memblock_allow_resize();
811 
812 	create_idmap();
813 	declare_kernel_vmas();
814 }
815 
816 #ifdef CONFIG_MEMORY_HOTPLUG
817 static void free_hotplug_page_range(struct page *page, size_t size,
818 				    struct vmem_altmap *altmap)
819 {
820 	if (altmap) {
821 		vmem_altmap_free(altmap, size >> PAGE_SHIFT);
822 	} else {
823 		WARN_ON(PageReserved(page));
824 		free_pages((unsigned long)page_address(page), get_order(size));
825 	}
826 }
827 
828 static void free_hotplug_pgtable_page(struct page *page)
829 {
830 	free_hotplug_page_range(page, PAGE_SIZE, NULL);
831 }
832 
833 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
834 				  unsigned long floor, unsigned long ceiling,
835 				  unsigned long mask)
836 {
837 	start &= mask;
838 	if (start < floor)
839 		return false;
840 
841 	if (ceiling) {
842 		ceiling &= mask;
843 		if (!ceiling)
844 			return false;
845 	}
846 
847 	if (end - 1 > ceiling - 1)
848 		return false;
849 	return true;
850 }
851 
852 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
853 				    unsigned long end, bool free_mapped,
854 				    struct vmem_altmap *altmap)
855 {
856 	pte_t *ptep, pte;
857 
858 	do {
859 		ptep = pte_offset_kernel(pmdp, addr);
860 		pte = __ptep_get(ptep);
861 		if (pte_none(pte))
862 			continue;
863 
864 		WARN_ON(!pte_present(pte));
865 		__pte_clear(&init_mm, addr, ptep);
866 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
867 		if (free_mapped)
868 			free_hotplug_page_range(pte_page(pte),
869 						PAGE_SIZE, altmap);
870 	} while (addr += PAGE_SIZE, addr < end);
871 }
872 
873 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
874 				    unsigned long end, bool free_mapped,
875 				    struct vmem_altmap *altmap)
876 {
877 	unsigned long next;
878 	pmd_t *pmdp, pmd;
879 
880 	do {
881 		next = pmd_addr_end(addr, end);
882 		pmdp = pmd_offset(pudp, addr);
883 		pmd = READ_ONCE(*pmdp);
884 		if (pmd_none(pmd))
885 			continue;
886 
887 		WARN_ON(!pmd_present(pmd));
888 		if (pmd_sect(pmd)) {
889 			pmd_clear(pmdp);
890 
891 			/*
892 			 * One TLBI should be sufficient here as the PMD_SIZE
893 			 * range is mapped with a single block entry.
894 			 */
895 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
896 			if (free_mapped)
897 				free_hotplug_page_range(pmd_page(pmd),
898 							PMD_SIZE, altmap);
899 			continue;
900 		}
901 		WARN_ON(!pmd_table(pmd));
902 		unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
903 	} while (addr = next, addr < end);
904 }
905 
906 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
907 				    unsigned long end, bool free_mapped,
908 				    struct vmem_altmap *altmap)
909 {
910 	unsigned long next;
911 	pud_t *pudp, pud;
912 
913 	do {
914 		next = pud_addr_end(addr, end);
915 		pudp = pud_offset(p4dp, addr);
916 		pud = READ_ONCE(*pudp);
917 		if (pud_none(pud))
918 			continue;
919 
920 		WARN_ON(!pud_present(pud));
921 		if (pud_sect(pud)) {
922 			pud_clear(pudp);
923 
924 			/*
925 			 * One TLBI should be sufficient here as the PUD_SIZE
926 			 * range is mapped with a single block entry.
927 			 */
928 			flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
929 			if (free_mapped)
930 				free_hotplug_page_range(pud_page(pud),
931 							PUD_SIZE, altmap);
932 			continue;
933 		}
934 		WARN_ON(!pud_table(pud));
935 		unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
936 	} while (addr = next, addr < end);
937 }
938 
939 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
940 				    unsigned long end, bool free_mapped,
941 				    struct vmem_altmap *altmap)
942 {
943 	unsigned long next;
944 	p4d_t *p4dp, p4d;
945 
946 	do {
947 		next = p4d_addr_end(addr, end);
948 		p4dp = p4d_offset(pgdp, addr);
949 		p4d = READ_ONCE(*p4dp);
950 		if (p4d_none(p4d))
951 			continue;
952 
953 		WARN_ON(!p4d_present(p4d));
954 		unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
955 	} while (addr = next, addr < end);
956 }
957 
958 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
959 				bool free_mapped, struct vmem_altmap *altmap)
960 {
961 	unsigned long next;
962 	pgd_t *pgdp, pgd;
963 
964 	/*
965 	 * altmap can only be used as vmemmap mapping backing memory.
966 	 * In case the backing memory itself is not being freed, then
967 	 * altmap is irrelevant. Warn about this inconsistency when
968 	 * encountered.
969 	 */
970 	WARN_ON(!free_mapped && altmap);
971 
972 	do {
973 		next = pgd_addr_end(addr, end);
974 		pgdp = pgd_offset_k(addr);
975 		pgd = READ_ONCE(*pgdp);
976 		if (pgd_none(pgd))
977 			continue;
978 
979 		WARN_ON(!pgd_present(pgd));
980 		unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
981 	} while (addr = next, addr < end);
982 }
983 
984 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
985 				 unsigned long end, unsigned long floor,
986 				 unsigned long ceiling)
987 {
988 	pte_t *ptep, pte;
989 	unsigned long i, start = addr;
990 
991 	do {
992 		ptep = pte_offset_kernel(pmdp, addr);
993 		pte = __ptep_get(ptep);
994 
995 		/*
996 		 * This is just a sanity check here which verifies that
997 		 * pte clearing has been done by earlier unmap loops.
998 		 */
999 		WARN_ON(!pte_none(pte));
1000 	} while (addr += PAGE_SIZE, addr < end);
1001 
1002 	if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
1003 		return;
1004 
1005 	/*
1006 	 * Check whether we can free the pte page if the rest of the
1007 	 * entries are empty. Overlap with other regions have been
1008 	 * handled by the floor/ceiling check.
1009 	 */
1010 	ptep = pte_offset_kernel(pmdp, 0UL);
1011 	for (i = 0; i < PTRS_PER_PTE; i++) {
1012 		if (!pte_none(__ptep_get(&ptep[i])))
1013 			return;
1014 	}
1015 
1016 	pmd_clear(pmdp);
1017 	__flush_tlb_kernel_pgtable(start);
1018 	free_hotplug_pgtable_page(virt_to_page(ptep));
1019 }
1020 
1021 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
1022 				 unsigned long end, unsigned long floor,
1023 				 unsigned long ceiling)
1024 {
1025 	pmd_t *pmdp, pmd;
1026 	unsigned long i, next, start = addr;
1027 
1028 	do {
1029 		next = pmd_addr_end(addr, end);
1030 		pmdp = pmd_offset(pudp, addr);
1031 		pmd = READ_ONCE(*pmdp);
1032 		if (pmd_none(pmd))
1033 			continue;
1034 
1035 		WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
1036 		free_empty_pte_table(pmdp, addr, next, floor, ceiling);
1037 	} while (addr = next, addr < end);
1038 
1039 	if (CONFIG_PGTABLE_LEVELS <= 2)
1040 		return;
1041 
1042 	if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1043 		return;
1044 
1045 	/*
1046 	 * Check whether we can free the pmd page if the rest of the
1047 	 * entries are empty. Overlap with other regions have been
1048 	 * handled by the floor/ceiling check.
1049 	 */
1050 	pmdp = pmd_offset(pudp, 0UL);
1051 	for (i = 0; i < PTRS_PER_PMD; i++) {
1052 		if (!pmd_none(READ_ONCE(pmdp[i])))
1053 			return;
1054 	}
1055 
1056 	pud_clear(pudp);
1057 	__flush_tlb_kernel_pgtable(start);
1058 	free_hotplug_pgtable_page(virt_to_page(pmdp));
1059 }
1060 
1061 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1062 				 unsigned long end, unsigned long floor,
1063 				 unsigned long ceiling)
1064 {
1065 	pud_t *pudp, pud;
1066 	unsigned long i, next, start = addr;
1067 
1068 	do {
1069 		next = pud_addr_end(addr, end);
1070 		pudp = pud_offset(p4dp, addr);
1071 		pud = READ_ONCE(*pudp);
1072 		if (pud_none(pud))
1073 			continue;
1074 
1075 		WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1076 		free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1077 	} while (addr = next, addr < end);
1078 
1079 	if (!pgtable_l4_enabled())
1080 		return;
1081 
1082 	if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK))
1083 		return;
1084 
1085 	/*
1086 	 * Check whether we can free the pud page if the rest of the
1087 	 * entries are empty. Overlap with other regions have been
1088 	 * handled by the floor/ceiling check.
1089 	 */
1090 	pudp = pud_offset(p4dp, 0UL);
1091 	for (i = 0; i < PTRS_PER_PUD; i++) {
1092 		if (!pud_none(READ_ONCE(pudp[i])))
1093 			return;
1094 	}
1095 
1096 	p4d_clear(p4dp);
1097 	__flush_tlb_kernel_pgtable(start);
1098 	free_hotplug_pgtable_page(virt_to_page(pudp));
1099 }
1100 
1101 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1102 				 unsigned long end, unsigned long floor,
1103 				 unsigned long ceiling)
1104 {
1105 	p4d_t *p4dp, p4d;
1106 	unsigned long i, next, start = addr;
1107 
1108 	do {
1109 		next = p4d_addr_end(addr, end);
1110 		p4dp = p4d_offset(pgdp, addr);
1111 		p4d = READ_ONCE(*p4dp);
1112 		if (p4d_none(p4d))
1113 			continue;
1114 
1115 		WARN_ON(!p4d_present(p4d));
1116 		free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1117 	} while (addr = next, addr < end);
1118 
1119 	if (!pgtable_l5_enabled())
1120 		return;
1121 
1122 	if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1123 		return;
1124 
1125 	/*
1126 	 * Check whether we can free the p4d page if the rest of the
1127 	 * entries are empty. Overlap with other regions have been
1128 	 * handled by the floor/ceiling check.
1129 	 */
1130 	p4dp = p4d_offset(pgdp, 0UL);
1131 	for (i = 0; i < PTRS_PER_P4D; i++) {
1132 		if (!p4d_none(READ_ONCE(p4dp[i])))
1133 			return;
1134 	}
1135 
1136 	pgd_clear(pgdp);
1137 	__flush_tlb_kernel_pgtable(start);
1138 	free_hotplug_pgtable_page(virt_to_page(p4dp));
1139 }
1140 
1141 static void free_empty_tables(unsigned long addr, unsigned long end,
1142 			      unsigned long floor, unsigned long ceiling)
1143 {
1144 	unsigned long next;
1145 	pgd_t *pgdp, pgd;
1146 
1147 	do {
1148 		next = pgd_addr_end(addr, end);
1149 		pgdp = pgd_offset_k(addr);
1150 		pgd = READ_ONCE(*pgdp);
1151 		if (pgd_none(pgd))
1152 			continue;
1153 
1154 		WARN_ON(!pgd_present(pgd));
1155 		free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1156 	} while (addr = next, addr < end);
1157 }
1158 #endif
1159 
1160 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
1161 			       unsigned long addr, unsigned long next)
1162 {
1163 	pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1164 }
1165 
1166 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
1167 				unsigned long addr, unsigned long next)
1168 {
1169 	vmemmap_verify((pte_t *)pmdp, node, addr, next);
1170 	return 1;
1171 }
1172 
1173 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1174 		struct vmem_altmap *altmap)
1175 {
1176 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1177 
1178 	if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
1179 		return vmemmap_populate_basepages(start, end, node, altmap);
1180 	else
1181 		return vmemmap_populate_hugepages(start, end, node, altmap);
1182 }
1183 
1184 #ifdef CONFIG_MEMORY_HOTPLUG
1185 void vmemmap_free(unsigned long start, unsigned long end,
1186 		struct vmem_altmap *altmap)
1187 {
1188 	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1189 
1190 	unmap_hotplug_range(start, end, true, altmap);
1191 	free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1192 }
1193 #endif /* CONFIG_MEMORY_HOTPLUG */
1194 
1195 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1196 {
1197 	pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1198 
1199 	/* Only allow permission changes for now */
1200 	if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1201 				   pud_val(new_pud)))
1202 		return 0;
1203 
1204 	VM_BUG_ON(phys & ~PUD_MASK);
1205 	set_pud(pudp, new_pud);
1206 	return 1;
1207 }
1208 
1209 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1210 {
1211 	pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1212 
1213 	/* Only allow permission changes for now */
1214 	if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1215 				   pmd_val(new_pmd)))
1216 		return 0;
1217 
1218 	VM_BUG_ON(phys & ~PMD_MASK);
1219 	set_pmd(pmdp, new_pmd);
1220 	return 1;
1221 }
1222 
1223 #ifndef __PAGETABLE_P4D_FOLDED
1224 void p4d_clear_huge(p4d_t *p4dp)
1225 {
1226 }
1227 #endif
1228 
1229 int pud_clear_huge(pud_t *pudp)
1230 {
1231 	if (!pud_sect(READ_ONCE(*pudp)))
1232 		return 0;
1233 	pud_clear(pudp);
1234 	return 1;
1235 }
1236 
1237 int pmd_clear_huge(pmd_t *pmdp)
1238 {
1239 	if (!pmd_sect(READ_ONCE(*pmdp)))
1240 		return 0;
1241 	pmd_clear(pmdp);
1242 	return 1;
1243 }
1244 
1245 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1246 {
1247 	pte_t *table;
1248 	pmd_t pmd;
1249 
1250 	pmd = READ_ONCE(*pmdp);
1251 
1252 	if (!pmd_table(pmd)) {
1253 		VM_WARN_ON(1);
1254 		return 1;
1255 	}
1256 
1257 	table = pte_offset_kernel(pmdp, addr);
1258 	pmd_clear(pmdp);
1259 	__flush_tlb_kernel_pgtable(addr);
1260 	pte_free_kernel(NULL, table);
1261 	return 1;
1262 }
1263 
1264 int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1265 {
1266 	pmd_t *table;
1267 	pmd_t *pmdp;
1268 	pud_t pud;
1269 	unsigned long next, end;
1270 
1271 	pud = READ_ONCE(*pudp);
1272 
1273 	if (!pud_table(pud)) {
1274 		VM_WARN_ON(1);
1275 		return 1;
1276 	}
1277 
1278 	table = pmd_offset(pudp, addr);
1279 	pmdp = table;
1280 	next = addr;
1281 	end = addr + PUD_SIZE;
1282 	do {
1283 		pmd_free_pte_page(pmdp, next);
1284 	} while (pmdp++, next += PMD_SIZE, next != end);
1285 
1286 	pud_clear(pudp);
1287 	__flush_tlb_kernel_pgtable(addr);
1288 	pmd_free(NULL, table);
1289 	return 1;
1290 }
1291 
1292 #ifdef CONFIG_MEMORY_HOTPLUG
1293 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1294 {
1295 	unsigned long end = start + size;
1296 
1297 	WARN_ON(pgdir != init_mm.pgd);
1298 	WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1299 
1300 	unmap_hotplug_range(start, end, false, NULL);
1301 	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1302 }
1303 
1304 struct range arch_get_mappable_range(void)
1305 {
1306 	struct range mhp_range;
1307 	u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
1308 	u64 end_linear_pa = __pa(PAGE_END - 1);
1309 
1310 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1311 		/*
1312 		 * Check for a wrap, it is possible because of randomized linear
1313 		 * mapping the start physical address is actually bigger than
1314 		 * the end physical address. In this case set start to zero
1315 		 * because [0, end_linear_pa] range must still be able to cover
1316 		 * all addressable physical addresses.
1317 		 */
1318 		if (start_linear_pa > end_linear_pa)
1319 			start_linear_pa = 0;
1320 	}
1321 
1322 	WARN_ON(start_linear_pa > end_linear_pa);
1323 
1324 	/*
1325 	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
1326 	 * accommodating both its ends but excluding PAGE_END. Max physical
1327 	 * range which can be mapped inside this linear mapping range, must
1328 	 * also be derived from its end points.
1329 	 */
1330 	mhp_range.start = start_linear_pa;
1331 	mhp_range.end =  end_linear_pa;
1332 
1333 	return mhp_range;
1334 }
1335 
1336 int arch_add_memory(int nid, u64 start, u64 size,
1337 		    struct mhp_params *params)
1338 {
1339 	int ret, flags = NO_EXEC_MAPPINGS;
1340 
1341 	VM_BUG_ON(!mhp_range_allowed(start, size, true));
1342 
1343 	if (can_set_direct_map())
1344 		flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1345 
1346 	__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1347 			     size, params->pgprot, __pgd_pgtable_alloc,
1348 			     flags);
1349 
1350 	memblock_clear_nomap(start, size);
1351 
1352 	ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1353 			   params);
1354 	if (ret)
1355 		__remove_pgd_mapping(swapper_pg_dir,
1356 				     __phys_to_virt(start), size);
1357 	else {
1358 		max_pfn = PFN_UP(start + size);
1359 		max_low_pfn = max_pfn;
1360 	}
1361 
1362 	return ret;
1363 }
1364 
1365 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1366 {
1367 	unsigned long start_pfn = start >> PAGE_SHIFT;
1368 	unsigned long nr_pages = size >> PAGE_SHIFT;
1369 
1370 	__remove_pages(start_pfn, nr_pages, altmap);
1371 	__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1372 }
1373 
1374 /*
1375  * This memory hotplug notifier helps prevent boot memory from being
1376  * inadvertently removed as it blocks pfn range offlining process in
1377  * __offline_pages(). Hence this prevents both offlining as well as
1378  * removal process for boot memory which is initially always online.
1379  * In future if and when boot memory could be removed, this notifier
1380  * should be dropped and free_hotplug_page_range() should handle any
1381  * reserved pages allocated during boot.
1382  */
1383 static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1384 					   unsigned long action, void *data)
1385 {
1386 	struct mem_section *ms;
1387 	struct memory_notify *arg = data;
1388 	unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1389 	unsigned long pfn = arg->start_pfn;
1390 
1391 	if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
1392 		return NOTIFY_OK;
1393 
1394 	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1395 		unsigned long start = PFN_PHYS(pfn);
1396 		unsigned long end = start + (1UL << PA_SECTION_SHIFT);
1397 
1398 		ms = __pfn_to_section(pfn);
1399 		if (!early_section(ms))
1400 			continue;
1401 
1402 		if (action == MEM_GOING_OFFLINE) {
1403 			/*
1404 			 * Boot memory removal is not supported. Prevent
1405 			 * it via blocking any attempted offline request
1406 			 * for the boot memory and just report it.
1407 			 */
1408 			pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1409 			return NOTIFY_BAD;
1410 		} else if (action == MEM_OFFLINE) {
1411 			/*
1412 			 * This should have never happened. Boot memory
1413 			 * offlining should have been prevented by this
1414 			 * very notifier. Probably some memory removal
1415 			 * procedure might have changed which would then
1416 			 * require further debug.
1417 			 */
1418 			pr_err("Boot memory [%lx %lx] offlined\n", start, end);
1419 
1420 			/*
1421 			 * Core memory hotplug does not process a return
1422 			 * code from the notifier for MEM_OFFLINE events.
1423 			 * The error condition has been reported. Return
1424 			 * from here as if ignored.
1425 			 */
1426 			return NOTIFY_DONE;
1427 		}
1428 	}
1429 	return NOTIFY_OK;
1430 }
1431 
1432 static struct notifier_block prevent_bootmem_remove_nb = {
1433 	.notifier_call = prevent_bootmem_remove_notifier,
1434 };
1435 
1436 /*
1437  * This ensures that boot memory sections on the platform are online
1438  * from early boot. Memory sections could not be prevented from being
1439  * offlined, unless for some reason they are not online to begin with.
1440  * This helps validate the basic assumption on which the above memory
1441  * event notifier works to prevent boot memory section offlining and
1442  * its possible removal.
1443  */
1444 static void validate_bootmem_online(void)
1445 {
1446 	phys_addr_t start, end, addr;
1447 	struct mem_section *ms;
1448 	u64 i;
1449 
1450 	/*
1451 	 * Scanning across all memblock might be expensive
1452 	 * on some big memory systems. Hence enable this
1453 	 * validation only with DEBUG_VM.
1454 	 */
1455 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
1456 		return;
1457 
1458 	for_each_mem_range(i, &start, &end) {
1459 		for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
1460 			ms = __pfn_to_section(PHYS_PFN(addr));
1461 
1462 			/*
1463 			 * All memory ranges in the system at this point
1464 			 * should have been marked as early sections.
1465 			 */
1466 			WARN_ON(!early_section(ms));
1467 
1468 			/*
1469 			 * Memory notifier mechanism here to prevent boot
1470 			 * memory offlining depends on the fact that each
1471 			 * early section memory on the system is initially
1472 			 * online. Otherwise a given memory section which
1473 			 * is already offline will be overlooked and can
1474 			 * be removed completely. Call out such sections.
1475 			 */
1476 			if (!online_section(ms))
1477 				pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
1478 					addr, addr + (1UL << PA_SECTION_SHIFT));
1479 		}
1480 	}
1481 }
1482 
1483 static int __init prevent_bootmem_remove_init(void)
1484 {
1485 	int ret = 0;
1486 
1487 	if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
1488 		return ret;
1489 
1490 	validate_bootmem_online();
1491 	ret = register_memory_notifier(&prevent_bootmem_remove_nb);
1492 	if (ret)
1493 		pr_err("%s: Notifier registration failed %d\n", __func__, ret);
1494 
1495 	return ret;
1496 }
1497 early_initcall(prevent_bootmem_remove_init);
1498 #endif
1499 
1500 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
1501 {
1502 	if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
1503 		/*
1504 		 * Break-before-make (BBM) is required for all user space mappings
1505 		 * when the permission changes from executable to non-executable
1506 		 * in cases where cpu is affected with errata #2645198.
1507 		 */
1508 		if (pte_user_exec(ptep_get(ptep)))
1509 			return ptep_clear_flush(vma, addr, ptep);
1510 	}
1511 	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1512 }
1513 
1514 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1515 			     pte_t old_pte, pte_t pte)
1516 {
1517 	set_pte_at(vma->vm_mm, addr, ptep, pte);
1518 }
1519 
1520 /*
1521  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
1522  * avoiding the possibility of conflicting TLB entries being allocated.
1523  */
1524 void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
1525 {
1526 	typedef void (ttbr_replace_func)(phys_addr_t);
1527 	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
1528 	ttbr_replace_func *replace_phys;
1529 	unsigned long daif;
1530 
1531 	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
1532 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
1533 
1534 	if (cnp)
1535 		ttbr1 |= TTBR_CNP_BIT;
1536 
1537 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
1538 
1539 	cpu_install_idmap();
1540 
1541 	/*
1542 	 * We really don't want to take *any* exceptions while TTBR1 is
1543 	 * in the process of being replaced so mask everything.
1544 	 */
1545 	daif = local_daif_save();
1546 	replace_phys(ttbr1);
1547 	local_daif_restore(daif);
1548 
1549 	cpu_uninstall_idmap();
1550 }
1551