xref: /linux/arch/arm64/mm/pageattr.c (revision 63307d015b91e626c97bb82e88054af3d0b74643)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/vmalloc.h>
10 
11 #include <asm/pgtable.h>
12 #include <asm/set_memory.h>
13 #include <asm/tlbflush.h>
14 
15 struct page_change_data {
16 	pgprot_t set_mask;
17 	pgprot_t clear_mask;
18 };
19 
20 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
21 
22 static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
23 			void *data)
24 {
25 	struct page_change_data *cdata = data;
26 	pte_t pte = READ_ONCE(*ptep);
27 
28 	pte = clear_pte_bit(pte, cdata->clear_mask);
29 	pte = set_pte_bit(pte, cdata->set_mask);
30 
31 	set_pte(ptep, pte);
32 	return 0;
33 }
34 
35 /*
36  * This function assumes that the range is mapped with PAGE_SIZE pages.
37  */
38 static int __change_memory_common(unsigned long start, unsigned long size,
39 				pgprot_t set_mask, pgprot_t clear_mask)
40 {
41 	struct page_change_data data;
42 	int ret;
43 
44 	data.set_mask = set_mask;
45 	data.clear_mask = clear_mask;
46 
47 	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
48 					&data);
49 
50 	flush_tlb_kernel_range(start, start + size);
51 	return ret;
52 }
53 
54 static int change_memory_common(unsigned long addr, int numpages,
55 				pgprot_t set_mask, pgprot_t clear_mask)
56 {
57 	unsigned long start = addr;
58 	unsigned long size = PAGE_SIZE*numpages;
59 	unsigned long end = start + size;
60 	struct vm_struct *area;
61 	int i;
62 
63 	if (!PAGE_ALIGNED(addr)) {
64 		start &= PAGE_MASK;
65 		end = start + size;
66 		WARN_ON_ONCE(1);
67 	}
68 
69 	/*
70 	 * Kernel VA mappings are always live, and splitting live section
71 	 * mappings into page mappings may cause TLB conflicts. This means
72 	 * we have to ensure that changing the permission bits of the range
73 	 * we are operating on does not result in such splitting.
74 	 *
75 	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
76 	 * Those are guaranteed to consist entirely of page mappings, and
77 	 * splitting is never needed.
78 	 *
79 	 * So check whether the [addr, addr + size) interval is entirely
80 	 * covered by precisely one VM area that has the VM_ALLOC flag set.
81 	 */
82 	area = find_vm_area((void *)addr);
83 	if (!area ||
84 	    end > (unsigned long)area->addr + area->size ||
85 	    !(area->flags & VM_ALLOC))
86 		return -EINVAL;
87 
88 	if (!numpages)
89 		return 0;
90 
91 	/*
92 	 * If we are manipulating read-only permissions, apply the same
93 	 * change to the linear mapping of the pages that back this VM area.
94 	 */
95 	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
96 			    pgprot_val(clear_mask) == PTE_RDONLY)) {
97 		for (i = 0; i < area->nr_pages; i++) {
98 			__change_memory_common((u64)page_address(area->pages[i]),
99 					       PAGE_SIZE, set_mask, clear_mask);
100 		}
101 	}
102 
103 	/*
104 	 * Get rid of potentially aliasing lazily unmapped vm areas that may
105 	 * have permissions set that deviate from the ones we are setting here.
106 	 */
107 	vm_unmap_aliases();
108 
109 	return __change_memory_common(start, size, set_mask, clear_mask);
110 }
111 
112 int set_memory_ro(unsigned long addr, int numpages)
113 {
114 	return change_memory_common(addr, numpages,
115 					__pgprot(PTE_RDONLY),
116 					__pgprot(PTE_WRITE));
117 }
118 
119 int set_memory_rw(unsigned long addr, int numpages)
120 {
121 	return change_memory_common(addr, numpages,
122 					__pgprot(PTE_WRITE),
123 					__pgprot(PTE_RDONLY));
124 }
125 
126 int set_memory_nx(unsigned long addr, int numpages)
127 {
128 	return change_memory_common(addr, numpages,
129 					__pgprot(PTE_PXN),
130 					__pgprot(0));
131 }
132 EXPORT_SYMBOL_GPL(set_memory_nx);
133 
134 int set_memory_x(unsigned long addr, int numpages)
135 {
136 	return change_memory_common(addr, numpages,
137 					__pgprot(0),
138 					__pgprot(PTE_PXN));
139 }
140 EXPORT_SYMBOL_GPL(set_memory_x);
141 
142 int set_memory_valid(unsigned long addr, int numpages, int enable)
143 {
144 	if (enable)
145 		return __change_memory_common(addr, PAGE_SIZE * numpages,
146 					__pgprot(PTE_VALID),
147 					__pgprot(0));
148 	else
149 		return __change_memory_common(addr, PAGE_SIZE * numpages,
150 					__pgprot(0),
151 					__pgprot(PTE_VALID));
152 }
153 
154 #ifdef CONFIG_DEBUG_PAGEALLOC
155 void __kernel_map_pages(struct page *page, int numpages, int enable)
156 {
157 	set_memory_valid((unsigned long)page_address(page), numpages, enable);
158 }
159 #ifdef CONFIG_HIBERNATION
160 /*
161  * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
162  * is used to determine if a linear map page has been marked as not-valid by
163  * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
164  * This is based on kern_addr_valid(), which almost does what we need.
165  *
166  * Because this is only called on the kernel linear map,  p?d_sect() implies
167  * p?d_present(). When debug_pagealloc is enabled, sections mappings are
168  * disabled.
169  */
170 bool kernel_page_present(struct page *page)
171 {
172 	pgd_t *pgdp;
173 	pud_t *pudp, pud;
174 	pmd_t *pmdp, pmd;
175 	pte_t *ptep;
176 	unsigned long addr = (unsigned long)page_address(page);
177 
178 	pgdp = pgd_offset_k(addr);
179 	if (pgd_none(READ_ONCE(*pgdp)))
180 		return false;
181 
182 	pudp = pud_offset(pgdp, addr);
183 	pud = READ_ONCE(*pudp);
184 	if (pud_none(pud))
185 		return false;
186 	if (pud_sect(pud))
187 		return true;
188 
189 	pmdp = pmd_offset(pudp, addr);
190 	pmd = READ_ONCE(*pmdp);
191 	if (pmd_none(pmd))
192 		return false;
193 	if (pmd_sect(pmd))
194 		return true;
195 
196 	ptep = pte_offset_kernel(pmdp, addr);
197 	return pte_valid(READ_ONCE(*ptep));
198 }
199 #endif /* CONFIG_HIBERNATION */
200 #endif /* CONFIG_DEBUG_PAGEALLOC */
201