xref: /linux/arch/arm64/mm/pageattr.c (revision fc6dfd5547794b0bf10790576a9d97443d975439)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/vmalloc.h>
10 
11 #include <asm/cacheflush.h>
12 #include <asm/set_memory.h>
13 #include <asm/tlbflush.h>
14 
15 struct page_change_data {
16 	pgprot_t set_mask;
17 	pgprot_t clear_mask;
18 };
19 
20 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
21 
22 bool can_set_direct_map(void)
23 {
24 	/*
25 	 * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
26 	 * mapped at page granularity, so that it is possible to
27 	 * protect/unprotect single pages.
28 	 */
29 	return rodata_full || debug_pagealloc_enabled() ||
30 		IS_ENABLED(CONFIG_KFENCE);
31 }
32 
33 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
34 {
35 	struct page_change_data *cdata = data;
36 	pte_t pte = READ_ONCE(*ptep);
37 
38 	pte = clear_pte_bit(pte, cdata->clear_mask);
39 	pte = set_pte_bit(pte, cdata->set_mask);
40 
41 	set_pte(ptep, pte);
42 	return 0;
43 }
44 
45 /*
46  * This function assumes that the range is mapped with PAGE_SIZE pages.
47  */
48 static int __change_memory_common(unsigned long start, unsigned long size,
49 				pgprot_t set_mask, pgprot_t clear_mask)
50 {
51 	struct page_change_data data;
52 	int ret;
53 
54 	data.set_mask = set_mask;
55 	data.clear_mask = clear_mask;
56 
57 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
58 					&data);
59 
60 	flush_tlb_kernel_range(start, start + size);
61 	return ret;
62 }
63 
64 static int change_memory_common(unsigned long addr, int numpages,
65 				pgprot_t set_mask, pgprot_t clear_mask)
66 {
67 	unsigned long start = addr;
68 	unsigned long size = PAGE_SIZE * numpages;
69 	unsigned long end = start + size;
70 	struct vm_struct *area;
71 	int i;
72 
73 	if (!PAGE_ALIGNED(addr)) {
74 		start &= PAGE_MASK;
75 		end = start + size;
76 		WARN_ON_ONCE(1);
77 	}
78 
79 	/*
80 	 * Kernel VA mappings are always live, and splitting live section
81 	 * mappings into page mappings may cause TLB conflicts. This means
82 	 * we have to ensure that changing the permission bits of the range
83 	 * we are operating on does not result in such splitting.
84 	 *
85 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
86 	 * Those are guaranteed to consist entirely of page mappings, and
87 	 * splitting is never needed.
88 	 *
89 	 * So check whether the [addr, addr + size) interval is entirely
90 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
91 	 */
92 	area = find_vm_area((void *)addr);
93 	if (!area ||
94 	    end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
95 	    !(area->flags & VM_ALLOC))
96 		return -EINVAL;
97 
98 	if (!numpages)
99 		return 0;
100 
101 	/*
102 	 * If we are manipulating read-only permissions, apply the same
103 	 * change to the linear mapping of the pages that back this VM area.
104 	 */
105 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
106 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
107 		for (i = 0; i < area->nr_pages; i++) {
108 			__change_memory_common((u64)page_address(area->pages[i]),
109 					       PAGE_SIZE, set_mask, clear_mask);
110 		}
111 	}
112 
113 	/*
114 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
115 	 * have permissions set that deviate from the ones we are setting here.
116 	 */
117 	vm_unmap_aliases();
118 
119 	return __change_memory_common(start, size, set_mask, clear_mask);
120 }
121 
122 int set_memory_ro(unsigned long addr, int numpages)
123 {
124 	return change_memory_common(addr, numpages,
125 					__pgprot(PTE_RDONLY),
126 					__pgprot(PTE_WRITE));
127 }
128 
129 int set_memory_rw(unsigned long addr, int numpages)
130 {
131 	return change_memory_common(addr, numpages,
132 					__pgprot(PTE_WRITE),
133 					__pgprot(PTE_RDONLY));
134 }
135 
136 int set_memory_nx(unsigned long addr, int numpages)
137 {
138 	return change_memory_common(addr, numpages,
139 					__pgprot(PTE_PXN),
140 					__pgprot(PTE_MAYBE_GP));
141 }
142 
143 int set_memory_x(unsigned long addr, int numpages)
144 {
145 	return change_memory_common(addr, numpages,
146 					__pgprot(PTE_MAYBE_GP),
147 					__pgprot(PTE_PXN));
148 }
149 
150 int set_memory_valid(unsigned long addr, int numpages, int enable)
151 {
152 	if (enable)
153 		return __change_memory_common(addr, PAGE_SIZE * numpages,
154 					__pgprot(PTE_VALID),
155 					__pgprot(0));
156 	else
157 		return __change_memory_common(addr, PAGE_SIZE * numpages,
158 					__pgprot(0),
159 					__pgprot(PTE_VALID));
160 }
161 
162 int set_direct_map_invalid_noflush(struct page *page)
163 {
164 	struct page_change_data data = {
165 		.set_mask = __pgprot(0),
166 		.clear_mask = __pgprot(PTE_VALID),
167 	};
168 
169 	if (!can_set_direct_map())
170 		return 0;
171 
172 	return apply_to_page_range(&init_mm,
173 				   (unsigned long)page_address(page),
174 				   PAGE_SIZE, change_page_range, &data);
175 }
176 
177 int set_direct_map_default_noflush(struct page *page)
178 {
179 	struct page_change_data data = {
180 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
181 		.clear_mask = __pgprot(PTE_RDONLY),
182 	};
183 
184 	if (!can_set_direct_map())
185 		return 0;
186 
187 	return apply_to_page_range(&init_mm,
188 				   (unsigned long)page_address(page),
189 				   PAGE_SIZE, change_page_range, &data);
190 }
191 
192 #ifdef CONFIG_DEBUG_PAGEALLOC
193 void __kernel_map_pages(struct page *page, int numpages, int enable)
194 {
195 	if (!can_set_direct_map())
196 		return;
197 
198 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
199 }
200 #endif /* CONFIG_DEBUG_PAGEALLOC */
201 
202 /*
203  * This function is used to determine if a linear map page has been marked as
204  * not-valid. Walk the page table and check the PTE_VALID bit. This is based
205  * on kern_addr_valid(), which almost does what we need.
206  *
207  * Because this is only called on the kernel linear map,  p?d_sect() implies
208  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
209  * disabled.
210  */
211 bool kernel_page_present(struct page *page)
212 {
213 	pgd_t *pgdp;
214 	p4d_t *p4dp;
215 	pud_t *pudp, pud;
216 	pmd_t *pmdp, pmd;
217 	pte_t *ptep;
218 	unsigned long addr = (unsigned long)page_address(page);
219 
220 	if (!can_set_direct_map())
221 		return true;
222 
223 	pgdp = pgd_offset_k(addr);
224 	if (pgd_none(READ_ONCE(*pgdp)))
225 		return false;
226 
227 	p4dp = p4d_offset(pgdp, addr);
228 	if (p4d_none(READ_ONCE(*p4dp)))
229 		return false;
230 
231 	pudp = pud_offset(p4dp, addr);
232 	pud = READ_ONCE(*pudp);
233 	if (pud_none(pud))
234 		return false;
235 	if (pud_sect(pud))
236 		return true;
237 
238 	pmdp = pmd_offset(pudp, addr);
239 	pmd = READ_ONCE(*pmdp);
240 	if (pmd_none(pmd))
241 		return false;
242 	if (pmd_sect(pmd))
243 		return true;
244 
245 	ptep = pte_offset_kernel(pmdp, addr);
246 	return pte_valid(READ_ONCE(*ptep));
247 }
248