1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/mmu.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/cache.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/memblock.h>
20 #include <linux/memremap.h>
21 #include <linux/memory.h>
22 #include <linux/fs.h>
23 #include <linux/io.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/set_memory.h>
27 #include <linux/kfence.h>
28 #include <linux/pkeys.h>
29
30 #include <asm/barrier.h>
31 #include <asm/cputype.h>
32 #include <asm/fixmap.h>
33 #include <asm/kasan.h>
34 #include <asm/kernel-pgtable.h>
35 #include <asm/sections.h>
36 #include <asm/setup.h>
37 #include <linux/sizes.h>
38 #include <asm/tlb.h>
39 #include <asm/mmu_context.h>
40 #include <asm/ptdump.h>
41 #include <asm/tlbflush.h>
42 #include <asm/pgalloc.h>
43 #include <asm/kfence.h>
44
45 #define NO_BLOCK_MAPPINGS BIT(0)
46 #define NO_CONT_MAPPINGS BIT(1)
47 #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
48
49 u64 kimage_voffset __ro_after_init;
50 EXPORT_SYMBOL(kimage_voffset);
51
52 u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 };
53
54 static bool rodata_is_rw __ro_after_init = true;
55
56 /*
57 * The booting CPU updates the failed status @__early_cpu_boot_status,
58 * with MMU turned off.
59 */
60 long __section(".mmuoff.data.write") __early_cpu_boot_status;
61
62 /*
63 * Empty_zero_page is a special page that is used for zero-initialized data
64 * and COW.
65 */
66 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
67 EXPORT_SYMBOL(empty_zero_page);
68
69 static DEFINE_SPINLOCK(swapper_pgdir_lock);
70 static DEFINE_MUTEX(fixmap_lock);
71
set_swapper_pgd(pgd_t * pgdp,pgd_t pgd)72 void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
73 {
74 pgd_t *fixmap_pgdp;
75
76 /*
77 * Don't bother with the fixmap if swapper_pg_dir is still mapped
78 * writable in the kernel mapping.
79 */
80 if (rodata_is_rw) {
81 WRITE_ONCE(*pgdp, pgd);
82 dsb(ishst);
83 isb();
84 return;
85 }
86
87 spin_lock(&swapper_pgdir_lock);
88 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
89 WRITE_ONCE(*fixmap_pgdp, pgd);
90 /*
91 * We need dsb(ishst) here to ensure the page-table-walker sees
92 * our new entry before set_p?d() returns. The fixmap's
93 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
94 */
95 pgd_clear_fixmap();
96 spin_unlock(&swapper_pgdir_lock);
97 }
98
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)99 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
100 unsigned long size, pgprot_t vma_prot)
101 {
102 if (!pfn_is_map_memory(pfn))
103 return pgprot_noncached(vma_prot);
104 else if (file->f_flags & O_SYNC)
105 return pgprot_writecombine(vma_prot);
106 return vma_prot;
107 }
108 EXPORT_SYMBOL(phys_mem_access_prot);
109
early_pgtable_alloc(int shift)110 static phys_addr_t __init early_pgtable_alloc(int shift)
111 {
112 phys_addr_t phys;
113
114 phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0,
115 MEMBLOCK_ALLOC_NOLEAKTRACE);
116 if (!phys)
117 panic("Failed to allocate page table page\n");
118
119 return phys;
120 }
121
pgattr_change_is_safe(pteval_t old,pteval_t new)122 bool pgattr_change_is_safe(pteval_t old, pteval_t new)
123 {
124 /*
125 * The following mapping attributes may be updated in live
126 * kernel mappings without the need for break-before-make.
127 */
128 pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG |
129 PTE_SWBITS_MASK;
130
131 /* creating or taking down mappings is always safe */
132 if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
133 return true;
134
135 /* A live entry's pfn should not change */
136 if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
137 return false;
138
139 /* live contiguous mappings may not be manipulated at all */
140 if ((old | new) & PTE_CONT)
141 return false;
142
143 /* Transitioning from Non-Global to Global is unsafe */
144 if (old & ~new & PTE_NG)
145 return false;
146
147 /*
148 * Changing the memory type between Normal and Normal-Tagged is safe
149 * since Tagged is considered a permission attribute from the
150 * mismatched attribute aliases perspective.
151 */
152 if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
153 (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
154 ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
155 (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
156 mask |= PTE_ATTRINDX_MASK;
157
158 return ((old ^ new) & ~mask) == 0;
159 }
160
init_clear_pgtable(void * table)161 static void init_clear_pgtable(void *table)
162 {
163 clear_page(table);
164
165 /* Ensure the zeroing is observed by page table walks. */
166 dsb(ishst);
167 }
168
init_pte(pte_t * ptep,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot)169 static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
170 phys_addr_t phys, pgprot_t prot)
171 {
172 do {
173 pte_t old_pte = __ptep_get(ptep);
174
175 /*
176 * Required barriers to make this visible to the table walker
177 * are deferred to the end of alloc_init_cont_pte().
178 */
179 __set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot));
180
181 /*
182 * After the PTE entry has been populated once, we
183 * only allow updates to the permission attributes.
184 */
185 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
186 pte_val(__ptep_get(ptep))));
187
188 phys += PAGE_SIZE;
189 } while (ptep++, addr += PAGE_SIZE, addr != end);
190 }
191
alloc_init_cont_pte(pmd_t * pmdp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)192 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
193 unsigned long end, phys_addr_t phys,
194 pgprot_t prot,
195 phys_addr_t (*pgtable_alloc)(int),
196 int flags)
197 {
198 unsigned long next;
199 pmd_t pmd = READ_ONCE(*pmdp);
200 pte_t *ptep;
201
202 BUG_ON(pmd_sect(pmd));
203 if (pmd_none(pmd)) {
204 pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF;
205 phys_addr_t pte_phys;
206
207 if (flags & NO_EXEC_MAPPINGS)
208 pmdval |= PMD_TABLE_PXN;
209 BUG_ON(!pgtable_alloc);
210 pte_phys = pgtable_alloc(PAGE_SHIFT);
211 ptep = pte_set_fixmap(pte_phys);
212 init_clear_pgtable(ptep);
213 ptep += pte_index(addr);
214 __pmd_populate(pmdp, pte_phys, pmdval);
215 } else {
216 BUG_ON(pmd_bad(pmd));
217 ptep = pte_set_fixmap_offset(pmdp, addr);
218 }
219
220 do {
221 pgprot_t __prot = prot;
222
223 next = pte_cont_addr_end(addr, end);
224
225 /* use a contiguous mapping if the range is suitably aligned */
226 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
227 (flags & NO_CONT_MAPPINGS) == 0)
228 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
229
230 init_pte(ptep, addr, next, phys, __prot);
231
232 ptep += pte_index(next) - pte_index(addr);
233 phys += next - addr;
234 } while (addr = next, addr != end);
235
236 /*
237 * Note: barriers and maintenance necessary to clear the fixmap slot
238 * ensure that all previous pgtable writes are visible to the table
239 * walker.
240 */
241 pte_clear_fixmap();
242 }
243
init_pmd(pmd_t * pmdp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)244 static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
245 phys_addr_t phys, pgprot_t prot,
246 phys_addr_t (*pgtable_alloc)(int), int flags)
247 {
248 unsigned long next;
249
250 do {
251 pmd_t old_pmd = READ_ONCE(*pmdp);
252
253 next = pmd_addr_end(addr, end);
254
255 /* try section mapping first */
256 if (((addr | next | phys) & ~PMD_MASK) == 0 &&
257 (flags & NO_BLOCK_MAPPINGS) == 0) {
258 pmd_set_huge(pmdp, phys, prot);
259
260 /*
261 * After the PMD entry has been populated once, we
262 * only allow updates to the permission attributes.
263 */
264 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
265 READ_ONCE(pmd_val(*pmdp))));
266 } else {
267 alloc_init_cont_pte(pmdp, addr, next, phys, prot,
268 pgtable_alloc, flags);
269
270 BUG_ON(pmd_val(old_pmd) != 0 &&
271 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
272 }
273 phys += next - addr;
274 } while (pmdp++, addr = next, addr != end);
275 }
276
alloc_init_cont_pmd(pud_t * pudp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)277 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
278 unsigned long end, phys_addr_t phys,
279 pgprot_t prot,
280 phys_addr_t (*pgtable_alloc)(int), int flags)
281 {
282 unsigned long next;
283 pud_t pud = READ_ONCE(*pudp);
284 pmd_t *pmdp;
285
286 /*
287 * Check for initial section mappings in the pgd/pud.
288 */
289 BUG_ON(pud_sect(pud));
290 if (pud_none(pud)) {
291 pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF;
292 phys_addr_t pmd_phys;
293
294 if (flags & NO_EXEC_MAPPINGS)
295 pudval |= PUD_TABLE_PXN;
296 BUG_ON(!pgtable_alloc);
297 pmd_phys = pgtable_alloc(PMD_SHIFT);
298 pmdp = pmd_set_fixmap(pmd_phys);
299 init_clear_pgtable(pmdp);
300 pmdp += pmd_index(addr);
301 __pud_populate(pudp, pmd_phys, pudval);
302 } else {
303 BUG_ON(pud_bad(pud));
304 pmdp = pmd_set_fixmap_offset(pudp, addr);
305 }
306
307 do {
308 pgprot_t __prot = prot;
309
310 next = pmd_cont_addr_end(addr, end);
311
312 /* use a contiguous mapping if the range is suitably aligned */
313 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
314 (flags & NO_CONT_MAPPINGS) == 0)
315 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
316
317 init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags);
318
319 pmdp += pmd_index(next) - pmd_index(addr);
320 phys += next - addr;
321 } while (addr = next, addr != end);
322
323 pmd_clear_fixmap();
324 }
325
alloc_init_pud(p4d_t * p4dp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)326 static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,
327 phys_addr_t phys, pgprot_t prot,
328 phys_addr_t (*pgtable_alloc)(int),
329 int flags)
330 {
331 unsigned long next;
332 p4d_t p4d = READ_ONCE(*p4dp);
333 pud_t *pudp;
334
335 if (p4d_none(p4d)) {
336 p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF;
337 phys_addr_t pud_phys;
338
339 if (flags & NO_EXEC_MAPPINGS)
340 p4dval |= P4D_TABLE_PXN;
341 BUG_ON(!pgtable_alloc);
342 pud_phys = pgtable_alloc(PUD_SHIFT);
343 pudp = pud_set_fixmap(pud_phys);
344 init_clear_pgtable(pudp);
345 pudp += pud_index(addr);
346 __p4d_populate(p4dp, pud_phys, p4dval);
347 } else {
348 BUG_ON(p4d_bad(p4d));
349 pudp = pud_set_fixmap_offset(p4dp, addr);
350 }
351
352 do {
353 pud_t old_pud = READ_ONCE(*pudp);
354
355 next = pud_addr_end(addr, end);
356
357 /*
358 * For 4K granule only, attempt to put down a 1GB block
359 */
360 if (pud_sect_supported() &&
361 ((addr | next | phys) & ~PUD_MASK) == 0 &&
362 (flags & NO_BLOCK_MAPPINGS) == 0) {
363 pud_set_huge(pudp, phys, prot);
364
365 /*
366 * After the PUD entry has been populated once, we
367 * only allow updates to the permission attributes.
368 */
369 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
370 READ_ONCE(pud_val(*pudp))));
371 } else {
372 alloc_init_cont_pmd(pudp, addr, next, phys, prot,
373 pgtable_alloc, flags);
374
375 BUG_ON(pud_val(old_pud) != 0 &&
376 pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
377 }
378 phys += next - addr;
379 } while (pudp++, addr = next, addr != end);
380
381 pud_clear_fixmap();
382 }
383
alloc_init_p4d(pgd_t * pgdp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)384 static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end,
385 phys_addr_t phys, pgprot_t prot,
386 phys_addr_t (*pgtable_alloc)(int),
387 int flags)
388 {
389 unsigned long next;
390 pgd_t pgd = READ_ONCE(*pgdp);
391 p4d_t *p4dp;
392
393 if (pgd_none(pgd)) {
394 pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF;
395 phys_addr_t p4d_phys;
396
397 if (flags & NO_EXEC_MAPPINGS)
398 pgdval |= PGD_TABLE_PXN;
399 BUG_ON(!pgtable_alloc);
400 p4d_phys = pgtable_alloc(P4D_SHIFT);
401 p4dp = p4d_set_fixmap(p4d_phys);
402 init_clear_pgtable(p4dp);
403 p4dp += p4d_index(addr);
404 __pgd_populate(pgdp, p4d_phys, pgdval);
405 } else {
406 BUG_ON(pgd_bad(pgd));
407 p4dp = p4d_set_fixmap_offset(pgdp, addr);
408 }
409
410 do {
411 p4d_t old_p4d = READ_ONCE(*p4dp);
412
413 next = p4d_addr_end(addr, end);
414
415 alloc_init_pud(p4dp, addr, next, phys, prot,
416 pgtable_alloc, flags);
417
418 BUG_ON(p4d_val(old_p4d) != 0 &&
419 p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp)));
420
421 phys += next - addr;
422 } while (p4dp++, addr = next, addr != end);
423
424 p4d_clear_fixmap();
425 }
426
__create_pgd_mapping_locked(pgd_t * pgdir,phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)427 static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
428 unsigned long virt, phys_addr_t size,
429 pgprot_t prot,
430 phys_addr_t (*pgtable_alloc)(int),
431 int flags)
432 {
433 unsigned long addr, end, next;
434 pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
435
436 /*
437 * If the virtual and physical address don't have the same offset
438 * within a page, we cannot map the region as the caller expects.
439 */
440 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
441 return;
442
443 phys &= PAGE_MASK;
444 addr = virt & PAGE_MASK;
445 end = PAGE_ALIGN(virt + size);
446
447 do {
448 next = pgd_addr_end(addr, end);
449 alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc,
450 flags);
451 phys += next - addr;
452 } while (pgdp++, addr = next, addr != end);
453 }
454
__create_pgd_mapping(pgd_t * pgdir,phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)455 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
456 unsigned long virt, phys_addr_t size,
457 pgprot_t prot,
458 phys_addr_t (*pgtable_alloc)(int),
459 int flags)
460 {
461 mutex_lock(&fixmap_lock);
462 __create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
463 pgtable_alloc, flags);
464 mutex_unlock(&fixmap_lock);
465 }
466
467 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
468 extern __alias(__create_pgd_mapping_locked)
469 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
470 phys_addr_t size, pgprot_t prot,
471 phys_addr_t (*pgtable_alloc)(int), int flags);
472 #endif
473
__pgd_pgtable_alloc(int shift)474 static phys_addr_t __pgd_pgtable_alloc(int shift)
475 {
476 /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */
477 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL & ~__GFP_ZERO);
478
479 BUG_ON(!ptr);
480 return __pa(ptr);
481 }
482
pgd_pgtable_alloc(int shift)483 static phys_addr_t pgd_pgtable_alloc(int shift)
484 {
485 phys_addr_t pa = __pgd_pgtable_alloc(shift);
486 struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa));
487
488 /*
489 * Call proper page table ctor in case later we need to
490 * call core mm functions like apply_to_page_range() on
491 * this pre-allocated page table.
492 *
493 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
494 * folded, and if so pagetable_pte_ctor() becomes nop.
495 */
496 if (shift == PAGE_SHIFT)
497 BUG_ON(!pagetable_pte_ctor(ptdesc));
498 else if (shift == PMD_SHIFT)
499 BUG_ON(!pagetable_pmd_ctor(ptdesc));
500
501 return pa;
502 }
503
504 /*
505 * This function can only be used to modify existing table entries,
506 * without allocating new levels of table. Note that this permits the
507 * creation of new section or page entries.
508 */
create_mapping_noalloc(phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot)509 void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
510 phys_addr_t size, pgprot_t prot)
511 {
512 if (virt < PAGE_OFFSET) {
513 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
514 &phys, virt);
515 return;
516 }
517 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
518 NO_CONT_MAPPINGS);
519 }
520
create_pgd_mapping(struct mm_struct * mm,phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot,bool page_mappings_only)521 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
522 unsigned long virt, phys_addr_t size,
523 pgprot_t prot, bool page_mappings_only)
524 {
525 int flags = 0;
526
527 BUG_ON(mm == &init_mm);
528
529 if (page_mappings_only)
530 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
531
532 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
533 pgd_pgtable_alloc, flags);
534 }
535
update_mapping_prot(phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot)536 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
537 phys_addr_t size, pgprot_t prot)
538 {
539 if (virt < PAGE_OFFSET) {
540 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
541 &phys, virt);
542 return;
543 }
544
545 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
546 NO_CONT_MAPPINGS);
547
548 /* flush the TLBs after updating live kernel mappings */
549 flush_tlb_kernel_range(virt, virt + size);
550 }
551
__map_memblock(pgd_t * pgdp,phys_addr_t start,phys_addr_t end,pgprot_t prot,int flags)552 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
553 phys_addr_t end, pgprot_t prot, int flags)
554 {
555 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
556 prot, early_pgtable_alloc, flags);
557 }
558
mark_linear_text_alias_ro(void)559 void __init mark_linear_text_alias_ro(void)
560 {
561 /*
562 * Remove the write permissions from the linear alias of .text/.rodata
563 */
564 update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
565 (unsigned long)__init_begin - (unsigned long)_stext,
566 PAGE_KERNEL_RO);
567 }
568
569 #ifdef CONFIG_KFENCE
570
571 bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
572
573 /* early_param() will be parsed before map_mem() below. */
parse_kfence_early_init(char * arg)574 static int __init parse_kfence_early_init(char *arg)
575 {
576 int val;
577
578 if (get_option(&arg, &val))
579 kfence_early_init = !!val;
580 return 0;
581 }
582 early_param("kfence.sample_interval", parse_kfence_early_init);
583
arm64_kfence_alloc_pool(void)584 static phys_addr_t __init arm64_kfence_alloc_pool(void)
585 {
586 phys_addr_t kfence_pool;
587
588 if (!kfence_early_init)
589 return 0;
590
591 kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
592 if (!kfence_pool) {
593 pr_err("failed to allocate kfence pool\n");
594 kfence_early_init = false;
595 return 0;
596 }
597
598 /* Temporarily mark as NOMAP. */
599 memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
600
601 return kfence_pool;
602 }
603
arm64_kfence_map_pool(phys_addr_t kfence_pool,pgd_t * pgdp)604 static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
605 {
606 if (!kfence_pool)
607 return;
608
609 /* KFENCE pool needs page-level mapping. */
610 __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
611 pgprot_tagged(PAGE_KERNEL),
612 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
613 memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
614 __kfence_pool = phys_to_virt(kfence_pool);
615 }
616 #else /* CONFIG_KFENCE */
617
arm64_kfence_alloc_pool(void)618 static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
arm64_kfence_map_pool(phys_addr_t kfence_pool,pgd_t * pgdp)619 static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
620
621 #endif /* CONFIG_KFENCE */
622
map_mem(pgd_t * pgdp)623 static void __init map_mem(pgd_t *pgdp)
624 {
625 static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
626 phys_addr_t kernel_start = __pa_symbol(_stext);
627 phys_addr_t kernel_end = __pa_symbol(__init_begin);
628 phys_addr_t start, end;
629 phys_addr_t early_kfence_pool;
630 int flags = NO_EXEC_MAPPINGS;
631 u64 i;
632
633 /*
634 * Setting hierarchical PXNTable attributes on table entries covering
635 * the linear region is only possible if it is guaranteed that no table
636 * entries at any level are being shared between the linear region and
637 * the vmalloc region. Check whether this is true for the PGD level, in
638 * which case it is guaranteed to be true for all other levels as well.
639 * (Unless we are running with support for LPA2, in which case the
640 * entire reduced VA space is covered by a single pgd_t which will have
641 * been populated without the PXNTable attribute by the time we get here.)
642 */
643 BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) &&
644 pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1);
645
646 early_kfence_pool = arm64_kfence_alloc_pool();
647
648 if (can_set_direct_map())
649 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
650
651 /*
652 * Take care not to create a writable alias for the
653 * read-only text and rodata sections of the kernel image.
654 * So temporarily mark them as NOMAP to skip mappings in
655 * the following for-loop
656 */
657 memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
658
659 /* map all the memory banks */
660 for_each_mem_range(i, &start, &end) {
661 if (start >= end)
662 break;
663 /*
664 * The linear map must allow allocation tags reading/writing
665 * if MTE is present. Otherwise, it has the same attributes as
666 * PAGE_KERNEL.
667 */
668 __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
669 flags);
670 }
671
672 /*
673 * Map the linear alias of the [_stext, __init_begin) interval
674 * as non-executable now, and remove the write permission in
675 * mark_linear_text_alias_ro() below (which will be called after
676 * alternative patching has completed). This makes the contents
677 * of the region accessible to subsystems such as hibernate,
678 * but protects it from inadvertent modification or execution.
679 * Note that contiguous mappings cannot be remapped in this way,
680 * so we should avoid them here.
681 */
682 __map_memblock(pgdp, kernel_start, kernel_end,
683 PAGE_KERNEL, NO_CONT_MAPPINGS);
684 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
685 arm64_kfence_map_pool(early_kfence_pool, pgdp);
686 }
687
mark_rodata_ro(void)688 void mark_rodata_ro(void)
689 {
690 unsigned long section_size;
691
692 /*
693 * mark .rodata as read only. Use __init_begin rather than __end_rodata
694 * to cover NOTES and EXCEPTION_TABLE.
695 */
696 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
697 WRITE_ONCE(rodata_is_rw, false);
698 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
699 section_size, PAGE_KERNEL_RO);
700 }
701
declare_vma(struct vm_struct * vma,void * va_start,void * va_end,unsigned long vm_flags)702 static void __init declare_vma(struct vm_struct *vma,
703 void *va_start, void *va_end,
704 unsigned long vm_flags)
705 {
706 phys_addr_t pa_start = __pa_symbol(va_start);
707 unsigned long size = va_end - va_start;
708
709 BUG_ON(!PAGE_ALIGNED(pa_start));
710 BUG_ON(!PAGE_ALIGNED(size));
711
712 if (!(vm_flags & VM_NO_GUARD))
713 size += PAGE_SIZE;
714
715 vma->addr = va_start;
716 vma->phys_addr = pa_start;
717 vma->size = size;
718 vma->flags = VM_MAP | vm_flags;
719 vma->caller = __builtin_return_address(0);
720
721 vm_area_add_early(vma);
722 }
723
724 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
kernel_exec_prot(void)725 static pgprot_t kernel_exec_prot(void)
726 {
727 return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
728 }
729
map_entry_trampoline(void)730 static int __init map_entry_trampoline(void)
731 {
732 int i;
733
734 if (!arm64_kernel_unmapped_at_el0())
735 return 0;
736
737 pgprot_t prot = kernel_exec_prot();
738 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
739
740 /* The trampoline is always mapped and can therefore be global */
741 pgprot_val(prot) &= ~PTE_NG;
742
743 /* Map only the text into the trampoline page table */
744 memset(tramp_pg_dir, 0, PGD_SIZE);
745 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
746 entry_tramp_text_size(), prot,
747 __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
748
749 /* Map both the text and data into the kernel page table */
750 for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
751 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
752 pa_start + i * PAGE_SIZE, prot);
753
754 if (IS_ENABLED(CONFIG_RELOCATABLE))
755 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
756 pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
757
758 return 0;
759 }
760 core_initcall(map_entry_trampoline);
761 #endif
762
763 /*
764 * Declare the VMA areas for the kernel
765 */
declare_kernel_vmas(void)766 static void __init declare_kernel_vmas(void)
767 {
768 static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
769
770 declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
771 declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
772 declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
773 declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
774 declare_vma(&vmlinux_seg[4], _data, _end, 0);
775 }
776
777 void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot,
778 int level, pte_t *tbl, bool may_use_cont, u64 va_offset);
779
780 static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
781 kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
782
create_idmap(void)783 static void __init create_idmap(void)
784 {
785 u64 start = __pa_symbol(__idmap_text_start);
786 u64 end = __pa_symbol(__idmap_text_end);
787 u64 ptep = __pa_symbol(idmap_ptes);
788
789 __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX,
790 IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
791 __phys_to_virt(ptep) - ptep);
792
793 if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) {
794 extern u32 __idmap_kpti_flag;
795 u64 pa = __pa_symbol(&__idmap_kpti_flag);
796
797 /*
798 * The KPTI G-to-nG conversion code needs a read-write mapping
799 * of its synchronization flag in the ID map.
800 */
801 ptep = __pa_symbol(kpti_ptes);
802 __pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL,
803 IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
804 __phys_to_virt(ptep) - ptep);
805 }
806 }
807
paging_init(void)808 void __init paging_init(void)
809 {
810 map_mem(swapper_pg_dir);
811
812 memblock_allow_resize();
813
814 create_idmap();
815 declare_kernel_vmas();
816 }
817
818 #ifdef CONFIG_MEMORY_HOTPLUG
free_hotplug_page_range(struct page * page,size_t size,struct vmem_altmap * altmap)819 static void free_hotplug_page_range(struct page *page, size_t size,
820 struct vmem_altmap *altmap)
821 {
822 if (altmap) {
823 vmem_altmap_free(altmap, size >> PAGE_SHIFT);
824 } else {
825 WARN_ON(PageReserved(page));
826 free_pages((unsigned long)page_address(page), get_order(size));
827 }
828 }
829
free_hotplug_pgtable_page(struct page * page)830 static void free_hotplug_pgtable_page(struct page *page)
831 {
832 free_hotplug_page_range(page, PAGE_SIZE, NULL);
833 }
834
pgtable_range_aligned(unsigned long start,unsigned long end,unsigned long floor,unsigned long ceiling,unsigned long mask)835 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
836 unsigned long floor, unsigned long ceiling,
837 unsigned long mask)
838 {
839 start &= mask;
840 if (start < floor)
841 return false;
842
843 if (ceiling) {
844 ceiling &= mask;
845 if (!ceiling)
846 return false;
847 }
848
849 if (end - 1 > ceiling - 1)
850 return false;
851 return true;
852 }
853
unmap_hotplug_pte_range(pmd_t * pmdp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)854 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
855 unsigned long end, bool free_mapped,
856 struct vmem_altmap *altmap)
857 {
858 pte_t *ptep, pte;
859
860 do {
861 ptep = pte_offset_kernel(pmdp, addr);
862 pte = __ptep_get(ptep);
863 if (pte_none(pte))
864 continue;
865
866 WARN_ON(!pte_present(pte));
867 __pte_clear(&init_mm, addr, ptep);
868 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
869 if (free_mapped)
870 free_hotplug_page_range(pte_page(pte),
871 PAGE_SIZE, altmap);
872 } while (addr += PAGE_SIZE, addr < end);
873 }
874
unmap_hotplug_pmd_range(pud_t * pudp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)875 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
876 unsigned long end, bool free_mapped,
877 struct vmem_altmap *altmap)
878 {
879 unsigned long next;
880 pmd_t *pmdp, pmd;
881
882 do {
883 next = pmd_addr_end(addr, end);
884 pmdp = pmd_offset(pudp, addr);
885 pmd = READ_ONCE(*pmdp);
886 if (pmd_none(pmd))
887 continue;
888
889 WARN_ON(!pmd_present(pmd));
890 if (pmd_sect(pmd)) {
891 pmd_clear(pmdp);
892
893 /*
894 * One TLBI should be sufficient here as the PMD_SIZE
895 * range is mapped with a single block entry.
896 */
897 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
898 if (free_mapped)
899 free_hotplug_page_range(pmd_page(pmd),
900 PMD_SIZE, altmap);
901 continue;
902 }
903 WARN_ON(!pmd_table(pmd));
904 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
905 } while (addr = next, addr < end);
906 }
907
unmap_hotplug_pud_range(p4d_t * p4dp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)908 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
909 unsigned long end, bool free_mapped,
910 struct vmem_altmap *altmap)
911 {
912 unsigned long next;
913 pud_t *pudp, pud;
914
915 do {
916 next = pud_addr_end(addr, end);
917 pudp = pud_offset(p4dp, addr);
918 pud = READ_ONCE(*pudp);
919 if (pud_none(pud))
920 continue;
921
922 WARN_ON(!pud_present(pud));
923 if (pud_sect(pud)) {
924 pud_clear(pudp);
925
926 /*
927 * One TLBI should be sufficient here as the PUD_SIZE
928 * range is mapped with a single block entry.
929 */
930 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
931 if (free_mapped)
932 free_hotplug_page_range(pud_page(pud),
933 PUD_SIZE, altmap);
934 continue;
935 }
936 WARN_ON(!pud_table(pud));
937 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
938 } while (addr = next, addr < end);
939 }
940
unmap_hotplug_p4d_range(pgd_t * pgdp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)941 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
942 unsigned long end, bool free_mapped,
943 struct vmem_altmap *altmap)
944 {
945 unsigned long next;
946 p4d_t *p4dp, p4d;
947
948 do {
949 next = p4d_addr_end(addr, end);
950 p4dp = p4d_offset(pgdp, addr);
951 p4d = READ_ONCE(*p4dp);
952 if (p4d_none(p4d))
953 continue;
954
955 WARN_ON(!p4d_present(p4d));
956 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
957 } while (addr = next, addr < end);
958 }
959
unmap_hotplug_range(unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)960 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
961 bool free_mapped, struct vmem_altmap *altmap)
962 {
963 unsigned long next;
964 pgd_t *pgdp, pgd;
965
966 /*
967 * altmap can only be used as vmemmap mapping backing memory.
968 * In case the backing memory itself is not being freed, then
969 * altmap is irrelevant. Warn about this inconsistency when
970 * encountered.
971 */
972 WARN_ON(!free_mapped && altmap);
973
974 do {
975 next = pgd_addr_end(addr, end);
976 pgdp = pgd_offset_k(addr);
977 pgd = READ_ONCE(*pgdp);
978 if (pgd_none(pgd))
979 continue;
980
981 WARN_ON(!pgd_present(pgd));
982 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
983 } while (addr = next, addr < end);
984 }
985
free_empty_pte_table(pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)986 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
987 unsigned long end, unsigned long floor,
988 unsigned long ceiling)
989 {
990 pte_t *ptep, pte;
991 unsigned long i, start = addr;
992
993 do {
994 ptep = pte_offset_kernel(pmdp, addr);
995 pte = __ptep_get(ptep);
996
997 /*
998 * This is just a sanity check here which verifies that
999 * pte clearing has been done by earlier unmap loops.
1000 */
1001 WARN_ON(!pte_none(pte));
1002 } while (addr += PAGE_SIZE, addr < end);
1003
1004 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
1005 return;
1006
1007 /*
1008 * Check whether we can free the pte page if the rest of the
1009 * entries are empty. Overlap with other regions have been
1010 * handled by the floor/ceiling check.
1011 */
1012 ptep = pte_offset_kernel(pmdp, 0UL);
1013 for (i = 0; i < PTRS_PER_PTE; i++) {
1014 if (!pte_none(__ptep_get(&ptep[i])))
1015 return;
1016 }
1017
1018 pmd_clear(pmdp);
1019 __flush_tlb_kernel_pgtable(start);
1020 free_hotplug_pgtable_page(virt_to_page(ptep));
1021 }
1022
free_empty_pmd_table(pud_t * pudp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1023 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
1024 unsigned long end, unsigned long floor,
1025 unsigned long ceiling)
1026 {
1027 pmd_t *pmdp, pmd;
1028 unsigned long i, next, start = addr;
1029
1030 do {
1031 next = pmd_addr_end(addr, end);
1032 pmdp = pmd_offset(pudp, addr);
1033 pmd = READ_ONCE(*pmdp);
1034 if (pmd_none(pmd))
1035 continue;
1036
1037 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
1038 free_empty_pte_table(pmdp, addr, next, floor, ceiling);
1039 } while (addr = next, addr < end);
1040
1041 if (CONFIG_PGTABLE_LEVELS <= 2)
1042 return;
1043
1044 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1045 return;
1046
1047 /*
1048 * Check whether we can free the pmd page if the rest of the
1049 * entries are empty. Overlap with other regions have been
1050 * handled by the floor/ceiling check.
1051 */
1052 pmdp = pmd_offset(pudp, 0UL);
1053 for (i = 0; i < PTRS_PER_PMD; i++) {
1054 if (!pmd_none(READ_ONCE(pmdp[i])))
1055 return;
1056 }
1057
1058 pud_clear(pudp);
1059 __flush_tlb_kernel_pgtable(start);
1060 free_hotplug_pgtable_page(virt_to_page(pmdp));
1061 }
1062
free_empty_pud_table(p4d_t * p4dp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1063 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1064 unsigned long end, unsigned long floor,
1065 unsigned long ceiling)
1066 {
1067 pud_t *pudp, pud;
1068 unsigned long i, next, start = addr;
1069
1070 do {
1071 next = pud_addr_end(addr, end);
1072 pudp = pud_offset(p4dp, addr);
1073 pud = READ_ONCE(*pudp);
1074 if (pud_none(pud))
1075 continue;
1076
1077 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1078 free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1079 } while (addr = next, addr < end);
1080
1081 if (!pgtable_l4_enabled())
1082 return;
1083
1084 if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK))
1085 return;
1086
1087 /*
1088 * Check whether we can free the pud page if the rest of the
1089 * entries are empty. Overlap with other regions have been
1090 * handled by the floor/ceiling check.
1091 */
1092 pudp = pud_offset(p4dp, 0UL);
1093 for (i = 0; i < PTRS_PER_PUD; i++) {
1094 if (!pud_none(READ_ONCE(pudp[i])))
1095 return;
1096 }
1097
1098 p4d_clear(p4dp);
1099 __flush_tlb_kernel_pgtable(start);
1100 free_hotplug_pgtable_page(virt_to_page(pudp));
1101 }
1102
free_empty_p4d_table(pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1103 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1104 unsigned long end, unsigned long floor,
1105 unsigned long ceiling)
1106 {
1107 p4d_t *p4dp, p4d;
1108 unsigned long i, next, start = addr;
1109
1110 do {
1111 next = p4d_addr_end(addr, end);
1112 p4dp = p4d_offset(pgdp, addr);
1113 p4d = READ_ONCE(*p4dp);
1114 if (p4d_none(p4d))
1115 continue;
1116
1117 WARN_ON(!p4d_present(p4d));
1118 free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1119 } while (addr = next, addr < end);
1120
1121 if (!pgtable_l5_enabled())
1122 return;
1123
1124 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1125 return;
1126
1127 /*
1128 * Check whether we can free the p4d page if the rest of the
1129 * entries are empty. Overlap with other regions have been
1130 * handled by the floor/ceiling check.
1131 */
1132 p4dp = p4d_offset(pgdp, 0UL);
1133 for (i = 0; i < PTRS_PER_P4D; i++) {
1134 if (!p4d_none(READ_ONCE(p4dp[i])))
1135 return;
1136 }
1137
1138 pgd_clear(pgdp);
1139 __flush_tlb_kernel_pgtable(start);
1140 free_hotplug_pgtable_page(virt_to_page(p4dp));
1141 }
1142
free_empty_tables(unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1143 static void free_empty_tables(unsigned long addr, unsigned long end,
1144 unsigned long floor, unsigned long ceiling)
1145 {
1146 unsigned long next;
1147 pgd_t *pgdp, pgd;
1148
1149 do {
1150 next = pgd_addr_end(addr, end);
1151 pgdp = pgd_offset_k(addr);
1152 pgd = READ_ONCE(*pgdp);
1153 if (pgd_none(pgd))
1154 continue;
1155
1156 WARN_ON(!pgd_present(pgd));
1157 free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1158 } while (addr = next, addr < end);
1159 }
1160 #endif
1161
vmemmap_set_pmd(pmd_t * pmdp,void * p,int node,unsigned long addr,unsigned long next)1162 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
1163 unsigned long addr, unsigned long next)
1164 {
1165 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1166 }
1167
vmemmap_check_pmd(pmd_t * pmdp,int node,unsigned long addr,unsigned long next)1168 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
1169 unsigned long addr, unsigned long next)
1170 {
1171 vmemmap_verify((pte_t *)pmdp, node, addr, next);
1172
1173 return pmd_sect(READ_ONCE(*pmdp));
1174 }
1175
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1176 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1177 struct vmem_altmap *altmap)
1178 {
1179 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1180 /* [start, end] should be within one section */
1181 WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page));
1182
1183 if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) ||
1184 (end - start < PAGES_PER_SECTION * sizeof(struct page)))
1185 return vmemmap_populate_basepages(start, end, node, altmap);
1186 else
1187 return vmemmap_populate_hugepages(start, end, node, altmap);
1188 }
1189
1190 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)1191 void vmemmap_free(unsigned long start, unsigned long end,
1192 struct vmem_altmap *altmap)
1193 {
1194 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1195
1196 unmap_hotplug_range(start, end, true, altmap);
1197 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1198 }
1199 #endif /* CONFIG_MEMORY_HOTPLUG */
1200
pud_set_huge(pud_t * pudp,phys_addr_t phys,pgprot_t prot)1201 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1202 {
1203 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1204
1205 /* Only allow permission changes for now */
1206 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1207 pud_val(new_pud)))
1208 return 0;
1209
1210 VM_BUG_ON(phys & ~PUD_MASK);
1211 set_pud(pudp, new_pud);
1212 return 1;
1213 }
1214
pmd_set_huge(pmd_t * pmdp,phys_addr_t phys,pgprot_t prot)1215 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1216 {
1217 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1218
1219 /* Only allow permission changes for now */
1220 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1221 pmd_val(new_pmd)))
1222 return 0;
1223
1224 VM_BUG_ON(phys & ~PMD_MASK);
1225 set_pmd(pmdp, new_pmd);
1226 return 1;
1227 }
1228
1229 #ifndef __PAGETABLE_P4D_FOLDED
p4d_clear_huge(p4d_t * p4dp)1230 void p4d_clear_huge(p4d_t *p4dp)
1231 {
1232 }
1233 #endif
1234
pud_clear_huge(pud_t * pudp)1235 int pud_clear_huge(pud_t *pudp)
1236 {
1237 if (!pud_sect(READ_ONCE(*pudp)))
1238 return 0;
1239 pud_clear(pudp);
1240 return 1;
1241 }
1242
pmd_clear_huge(pmd_t * pmdp)1243 int pmd_clear_huge(pmd_t *pmdp)
1244 {
1245 if (!pmd_sect(READ_ONCE(*pmdp)))
1246 return 0;
1247 pmd_clear(pmdp);
1248 return 1;
1249 }
1250
pmd_free_pte_page(pmd_t * pmdp,unsigned long addr)1251 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1252 {
1253 pte_t *table;
1254 pmd_t pmd;
1255
1256 pmd = READ_ONCE(*pmdp);
1257
1258 if (!pmd_table(pmd)) {
1259 VM_WARN_ON(1);
1260 return 1;
1261 }
1262
1263 table = pte_offset_kernel(pmdp, addr);
1264 pmd_clear(pmdp);
1265 __flush_tlb_kernel_pgtable(addr);
1266 pte_free_kernel(NULL, table);
1267 return 1;
1268 }
1269
pud_free_pmd_page(pud_t * pudp,unsigned long addr)1270 int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1271 {
1272 pmd_t *table;
1273 pmd_t *pmdp;
1274 pud_t pud;
1275 unsigned long next, end;
1276
1277 pud = READ_ONCE(*pudp);
1278
1279 if (!pud_table(pud)) {
1280 VM_WARN_ON(1);
1281 return 1;
1282 }
1283
1284 table = pmd_offset(pudp, addr);
1285 pmdp = table;
1286 next = addr;
1287 end = addr + PUD_SIZE;
1288 do {
1289 pmd_free_pte_page(pmdp, next);
1290 } while (pmdp++, next += PMD_SIZE, next != end);
1291
1292 pud_clear(pudp);
1293 __flush_tlb_kernel_pgtable(addr);
1294 pmd_free(NULL, table);
1295 return 1;
1296 }
1297
1298 #ifdef CONFIG_MEMORY_HOTPLUG
__remove_pgd_mapping(pgd_t * pgdir,unsigned long start,u64 size)1299 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1300 {
1301 unsigned long end = start + size;
1302
1303 WARN_ON(pgdir != init_mm.pgd);
1304 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1305
1306 unmap_hotplug_range(start, end, false, NULL);
1307 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1308 }
1309
arch_get_mappable_range(void)1310 struct range arch_get_mappable_range(void)
1311 {
1312 struct range mhp_range;
1313 u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
1314 u64 end_linear_pa = __pa(PAGE_END - 1);
1315
1316 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1317 /*
1318 * Check for a wrap, it is possible because of randomized linear
1319 * mapping the start physical address is actually bigger than
1320 * the end physical address. In this case set start to zero
1321 * because [0, end_linear_pa] range must still be able to cover
1322 * all addressable physical addresses.
1323 */
1324 if (start_linear_pa > end_linear_pa)
1325 start_linear_pa = 0;
1326 }
1327
1328 WARN_ON(start_linear_pa > end_linear_pa);
1329
1330 /*
1331 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
1332 * accommodating both its ends but excluding PAGE_END. Max physical
1333 * range which can be mapped inside this linear mapping range, must
1334 * also be derived from its end points.
1335 */
1336 mhp_range.start = start_linear_pa;
1337 mhp_range.end = end_linear_pa;
1338
1339 return mhp_range;
1340 }
1341
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)1342 int arch_add_memory(int nid, u64 start, u64 size,
1343 struct mhp_params *params)
1344 {
1345 int ret, flags = NO_EXEC_MAPPINGS;
1346
1347 VM_BUG_ON(!mhp_range_allowed(start, size, true));
1348
1349 if (can_set_direct_map())
1350 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1351
1352 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1353 size, params->pgprot, __pgd_pgtable_alloc,
1354 flags);
1355
1356 memblock_clear_nomap(start, size);
1357
1358 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1359 params);
1360 if (ret)
1361 __remove_pgd_mapping(swapper_pg_dir,
1362 __phys_to_virt(start), size);
1363 else {
1364 /* Address of hotplugged memory can be smaller */
1365 max_pfn = max(max_pfn, PFN_UP(start + size));
1366 max_low_pfn = max_pfn;
1367 }
1368
1369 return ret;
1370 }
1371
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)1372 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1373 {
1374 unsigned long start_pfn = start >> PAGE_SHIFT;
1375 unsigned long nr_pages = size >> PAGE_SHIFT;
1376
1377 __remove_pages(start_pfn, nr_pages, altmap);
1378 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1379 }
1380
1381 /*
1382 * This memory hotplug notifier helps prevent boot memory from being
1383 * inadvertently removed as it blocks pfn range offlining process in
1384 * __offline_pages(). Hence this prevents both offlining as well as
1385 * removal process for boot memory which is initially always online.
1386 * In future if and when boot memory could be removed, this notifier
1387 * should be dropped and free_hotplug_page_range() should handle any
1388 * reserved pages allocated during boot.
1389 */
prevent_bootmem_remove_notifier(struct notifier_block * nb,unsigned long action,void * data)1390 static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1391 unsigned long action, void *data)
1392 {
1393 struct mem_section *ms;
1394 struct memory_notify *arg = data;
1395 unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1396 unsigned long pfn = arg->start_pfn;
1397
1398 if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
1399 return NOTIFY_OK;
1400
1401 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1402 unsigned long start = PFN_PHYS(pfn);
1403 unsigned long end = start + (1UL << PA_SECTION_SHIFT);
1404
1405 ms = __pfn_to_section(pfn);
1406 if (!early_section(ms))
1407 continue;
1408
1409 if (action == MEM_GOING_OFFLINE) {
1410 /*
1411 * Boot memory removal is not supported. Prevent
1412 * it via blocking any attempted offline request
1413 * for the boot memory and just report it.
1414 */
1415 pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1416 return NOTIFY_BAD;
1417 } else if (action == MEM_OFFLINE) {
1418 /*
1419 * This should have never happened. Boot memory
1420 * offlining should have been prevented by this
1421 * very notifier. Probably some memory removal
1422 * procedure might have changed which would then
1423 * require further debug.
1424 */
1425 pr_err("Boot memory [%lx %lx] offlined\n", start, end);
1426
1427 /*
1428 * Core memory hotplug does not process a return
1429 * code from the notifier for MEM_OFFLINE events.
1430 * The error condition has been reported. Return
1431 * from here as if ignored.
1432 */
1433 return NOTIFY_DONE;
1434 }
1435 }
1436 return NOTIFY_OK;
1437 }
1438
1439 static struct notifier_block prevent_bootmem_remove_nb = {
1440 .notifier_call = prevent_bootmem_remove_notifier,
1441 };
1442
1443 /*
1444 * This ensures that boot memory sections on the platform are online
1445 * from early boot. Memory sections could not be prevented from being
1446 * offlined, unless for some reason they are not online to begin with.
1447 * This helps validate the basic assumption on which the above memory
1448 * event notifier works to prevent boot memory section offlining and
1449 * its possible removal.
1450 */
validate_bootmem_online(void)1451 static void validate_bootmem_online(void)
1452 {
1453 phys_addr_t start, end, addr;
1454 struct mem_section *ms;
1455 u64 i;
1456
1457 /*
1458 * Scanning across all memblock might be expensive
1459 * on some big memory systems. Hence enable this
1460 * validation only with DEBUG_VM.
1461 */
1462 if (!IS_ENABLED(CONFIG_DEBUG_VM))
1463 return;
1464
1465 for_each_mem_range(i, &start, &end) {
1466 for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
1467 ms = __pfn_to_section(PHYS_PFN(addr));
1468
1469 /*
1470 * All memory ranges in the system at this point
1471 * should have been marked as early sections.
1472 */
1473 WARN_ON(!early_section(ms));
1474
1475 /*
1476 * Memory notifier mechanism here to prevent boot
1477 * memory offlining depends on the fact that each
1478 * early section memory on the system is initially
1479 * online. Otherwise a given memory section which
1480 * is already offline will be overlooked and can
1481 * be removed completely. Call out such sections.
1482 */
1483 if (!online_section(ms))
1484 pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
1485 addr, addr + (1UL << PA_SECTION_SHIFT));
1486 }
1487 }
1488 }
1489
prevent_bootmem_remove_init(void)1490 static int __init prevent_bootmem_remove_init(void)
1491 {
1492 int ret = 0;
1493
1494 if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
1495 return ret;
1496
1497 validate_bootmem_online();
1498 ret = register_memory_notifier(&prevent_bootmem_remove_nb);
1499 if (ret)
1500 pr_err("%s: Notifier registration failed %d\n", __func__, ret);
1501
1502 return ret;
1503 }
1504 early_initcall(prevent_bootmem_remove_init);
1505 #endif
1506
ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1507 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
1508 {
1509 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
1510 /*
1511 * Break-before-make (BBM) is required for all user space mappings
1512 * when the permission changes from executable to non-executable
1513 * in cases where cpu is affected with errata #2645198.
1514 */
1515 if (pte_user_exec(ptep_get(ptep)))
1516 return ptep_clear_flush(vma, addr, ptep);
1517 }
1518 return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1519 }
1520
ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1521 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1522 pte_t old_pte, pte_t pte)
1523 {
1524 set_pte_at(vma->vm_mm, addr, ptep, pte);
1525 }
1526
1527 /*
1528 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
1529 * avoiding the possibility of conflicting TLB entries being allocated.
1530 */
__cpu_replace_ttbr1(pgd_t * pgdp,bool cnp)1531 void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
1532 {
1533 typedef void (ttbr_replace_func)(phys_addr_t);
1534 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
1535 ttbr_replace_func *replace_phys;
1536 unsigned long daif;
1537
1538 /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
1539 phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
1540
1541 if (cnp)
1542 ttbr1 |= TTBR_CNP_BIT;
1543
1544 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
1545
1546 cpu_install_idmap();
1547
1548 /*
1549 * We really don't want to take *any* exceptions while TTBR1 is
1550 * in the process of being replaced so mask everything.
1551 */
1552 daif = local_daif_save();
1553 replace_phys(ttbr1);
1554 local_daif_restore(daif);
1555
1556 cpu_uninstall_idmap();
1557 }
1558
1559 #ifdef CONFIG_ARCH_HAS_PKEYS
arch_set_user_pkey_access(struct task_struct * tsk,int pkey,unsigned long init_val)1560 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val)
1561 {
1562 u64 new_por;
1563 u64 old_por;
1564
1565 if (!system_supports_poe())
1566 return -ENOSPC;
1567
1568 /*
1569 * This code should only be called with valid 'pkey'
1570 * values originating from in-kernel users. Complain
1571 * if a bad value is observed.
1572 */
1573 if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
1574 return -EINVAL;
1575
1576 /* Set the bits we need in POR: */
1577 new_por = POE_RWX;
1578 if (init_val & PKEY_DISABLE_WRITE)
1579 new_por &= ~POE_W;
1580 if (init_val & PKEY_DISABLE_ACCESS)
1581 new_por &= ~POE_RW;
1582 if (init_val & PKEY_DISABLE_READ)
1583 new_por &= ~POE_R;
1584 if (init_val & PKEY_DISABLE_EXECUTE)
1585 new_por &= ~POE_X;
1586
1587 /* Shift the bits in to the correct place in POR for pkey: */
1588 new_por = POR_ELx_PERM_PREP(pkey, new_por);
1589
1590 /* Get old POR and mask off any old bits in place: */
1591 old_por = read_sysreg_s(SYS_POR_EL0);
1592 old_por &= ~(POE_MASK << POR_ELx_PERM_SHIFT(pkey));
1593
1594 /* Write old part along with new part: */
1595 write_sysreg_s(old_por | new_por, SYS_POR_EL0);
1596
1597 return 0;
1598 }
1599 #endif
1600