xref: /linux/arch/arm64/kernel/pi/map_range.c (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2023 Google LLC
3 // Author: Ard Biesheuvel <ardb@google.com>
4 
5 #include <linux/types.h>
6 #include <linux/sizes.h>
7 
8 #include <asm/memory.h>
9 #include <asm/pgalloc.h>
10 #include <asm/pgtable.h>
11 
12 #include "pi.h"
13 
14 /**
15  * map_range - Map a contiguous range of physical pages into virtual memory
16  *
17  * @pte:		Address of physical pointer to array of pages to
18  *			allocate page tables from
19  * @start:		Virtual address of the start of the range
20  * @end:		Virtual address of the end of the range (exclusive)
21  * @pa:			Physical address of the start of the range
22  * @prot:		Access permissions of the range
23  * @level:		Translation level for the mapping
24  * @tbl:		The level @level page table to create the mappings in
25  * @may_use_cont:	Whether the use of the contiguous attribute is allowed
26  * @va_offset:		Offset between a physical page and its current mapping
27  * 			in the VA space
28  */
map_range(phys_addr_t * pte,u64 start,u64 end,phys_addr_t pa,pgprot_t prot,int level,pte_t * tbl,bool may_use_cont,u64 va_offset)29 void __init map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa,
30 		      pgprot_t prot, int level, pte_t *tbl, bool may_use_cont,
31 		      u64 va_offset)
32 {
33 	u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
34 	ptdesc_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
35 	int lshift = (3 - level) * PTDESC_TABLE_SHIFT;
36 	u64 lmask = (PAGE_SIZE << lshift) - 1;
37 
38 	start	&= PAGE_MASK;
39 	pa	&= PAGE_MASK;
40 
41 	/* Advance tbl to the entry that covers start */
42 	tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
43 
44 	/*
45 	 * Set the right block/page bits for this level unless we are
46 	 * clearing the mapping
47 	 */
48 	if (protval)
49 		protval |= (level == 2) ? PMD_TYPE_SECT : PTE_TYPE_PAGE;
50 
51 	while (start < end) {
52 		u64 next = min((start | lmask) + 1, PAGE_ALIGN(end));
53 
54 		if (level < 2 || (level == 2 && (start | next | pa) & lmask)) {
55 			/*
56 			 * This chunk needs a finer grained mapping. Create a
57 			 * table mapping if necessary and recurse.
58 			 */
59 			if (pte_none(*tbl)) {
60 				*tbl = __pte(__phys_to_pte_val(*pte) |
61 					     PMD_TYPE_TABLE | PMD_TABLE_UXN);
62 				*pte += PTRS_PER_PTE * sizeof(pte_t);
63 			}
64 			map_range(pte, start, next, pa, prot, level + 1,
65 				  (pte_t *)(__pte_to_phys(*tbl) + va_offset),
66 				  may_use_cont, va_offset);
67 		} else {
68 			/*
69 			 * Start a contiguous range if start and pa are
70 			 * suitably aligned
71 			 */
72 			if (((start | pa) & cmask) == 0 && may_use_cont)
73 				protval |= PTE_CONT;
74 
75 			/*
76 			 * Clear the contiguous attribute if the remaining
77 			 * range does not cover a contiguous block
78 			 */
79 			if ((end & ~cmask) <= start)
80 				protval &= ~PTE_CONT;
81 
82 			/* Put down a block or page mapping */
83 			*tbl = __pte(__phys_to_pte_val(pa) | protval);
84 		}
85 		pa += next - start;
86 		start = next;
87 		tbl++;
88 	}
89 }
90 
create_init_idmap(pgd_t * pg_dir,ptdesc_t clrmask)91 asmlinkage phys_addr_t __init create_init_idmap(pgd_t *pg_dir, ptdesc_t clrmask)
92 {
93 	phys_addr_t ptep = (phys_addr_t)pg_dir + PAGE_SIZE; /* MMU is off */
94 	pgprot_t text_prot = PAGE_KERNEL_ROX;
95 	pgprot_t data_prot = PAGE_KERNEL;
96 
97 	pgprot_val(text_prot) &= ~clrmask;
98 	pgprot_val(data_prot) &= ~clrmask;
99 
100 	/* MMU is off; pointer casts to phys_addr_t are safe */
101 	map_range(&ptep, (u64)_stext, (u64)__initdata_begin,
102 		  (phys_addr_t)_stext, text_prot, IDMAP_ROOT_LEVEL,
103 		  (pte_t *)pg_dir, false, 0);
104 	map_range(&ptep, (u64)__initdata_begin, (u64)_end,
105 		  (phys_addr_t)__initdata_begin, data_prot, IDMAP_ROOT_LEVEL,
106 		  (pte_t *)pg_dir, false, 0);
107 
108 	return ptep;
109 }
110