xref: /linux/mm/page_table_check.c (revision f14aa5ea415b8add245e976bfab96a12986c6843)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/kstrtox.h>
8 #include <linux/mm.h>
9 #include <linux/page_table_check.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 
13 #undef pr_fmt
14 #define pr_fmt(fmt)	"page_table_check: " fmt
15 
16 struct page_table_check {
17 	atomic_t anon_map_count;
18 	atomic_t file_map_count;
19 };
20 
21 static bool __page_table_check_enabled __initdata =
22 				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
23 
24 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
25 EXPORT_SYMBOL(page_table_check_disabled);
26 
27 static int __init early_page_table_check_param(char *buf)
28 {
29 	return kstrtobool(buf, &__page_table_check_enabled);
30 }
31 
32 early_param("page_table_check", early_page_table_check_param);
33 
34 static bool __init need_page_table_check(void)
35 {
36 	return __page_table_check_enabled;
37 }
38 
39 static void __init init_page_table_check(void)
40 {
41 	if (!__page_table_check_enabled)
42 		return;
43 	static_branch_disable(&page_table_check_disabled);
44 }
45 
46 struct page_ext_operations page_table_check_ops = {
47 	.size = sizeof(struct page_table_check),
48 	.need = need_page_table_check,
49 	.init = init_page_table_check,
50 	.need_shared_flags = false,
51 };
52 
53 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
54 {
55 	BUG_ON(!page_ext);
56 	return page_ext_data(page_ext, &page_table_check_ops);
57 }
58 
59 /*
60  * An entry is removed from the page table, decrement the counters for that page
61  * verify that it is of correct type and counters do not become negative.
62  */
63 static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
64 {
65 	struct page_ext *page_ext;
66 	struct page *page;
67 	unsigned long i;
68 	bool anon;
69 
70 	if (!pfn_valid(pfn))
71 		return;
72 
73 	page = pfn_to_page(pfn);
74 	page_ext = page_ext_get(page);
75 
76 	BUG_ON(PageSlab(page));
77 	anon = PageAnon(page);
78 
79 	for (i = 0; i < pgcnt; i++) {
80 		struct page_table_check *ptc = get_page_table_check(page_ext);
81 
82 		if (anon) {
83 			BUG_ON(atomic_read(&ptc->file_map_count));
84 			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
85 		} else {
86 			BUG_ON(atomic_read(&ptc->anon_map_count));
87 			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
88 		}
89 		page_ext = page_ext_next(page_ext);
90 	}
91 	page_ext_put(page_ext);
92 }
93 
94 /*
95  * A new entry is added to the page table, increment the counters for that page
96  * verify that it is of correct type and is not being mapped with a different
97  * type to a different process.
98  */
99 static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
100 				 bool rw)
101 {
102 	struct page_ext *page_ext;
103 	struct page *page;
104 	unsigned long i;
105 	bool anon;
106 
107 	if (!pfn_valid(pfn))
108 		return;
109 
110 	page = pfn_to_page(pfn);
111 	page_ext = page_ext_get(page);
112 
113 	BUG_ON(PageSlab(page));
114 	anon = PageAnon(page);
115 
116 	for (i = 0; i < pgcnt; i++) {
117 		struct page_table_check *ptc = get_page_table_check(page_ext);
118 
119 		if (anon) {
120 			BUG_ON(atomic_read(&ptc->file_map_count));
121 			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
122 		} else {
123 			BUG_ON(atomic_read(&ptc->anon_map_count));
124 			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
125 		}
126 		page_ext = page_ext_next(page_ext);
127 	}
128 	page_ext_put(page_ext);
129 }
130 
131 /*
132  * page is on free list, or is being allocated, verify that counters are zeroes
133  * crash if they are not.
134  */
135 void __page_table_check_zero(struct page *page, unsigned int order)
136 {
137 	struct page_ext *page_ext;
138 	unsigned long i;
139 
140 	BUG_ON(PageSlab(page));
141 
142 	page_ext = page_ext_get(page);
143 	BUG_ON(!page_ext);
144 	for (i = 0; i < (1ul << order); i++) {
145 		struct page_table_check *ptc = get_page_table_check(page_ext);
146 
147 		BUG_ON(atomic_read(&ptc->anon_map_count));
148 		BUG_ON(atomic_read(&ptc->file_map_count));
149 		page_ext = page_ext_next(page_ext);
150 	}
151 	page_ext_put(page_ext);
152 }
153 
154 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
155 {
156 	if (&init_mm == mm)
157 		return;
158 
159 	if (pte_user_accessible_page(pte)) {
160 		page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
161 	}
162 }
163 EXPORT_SYMBOL(__page_table_check_pte_clear);
164 
165 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
166 {
167 	if (&init_mm == mm)
168 		return;
169 
170 	if (pmd_user_accessible_page(pmd)) {
171 		page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
172 	}
173 }
174 EXPORT_SYMBOL(__page_table_check_pmd_clear);
175 
176 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
177 {
178 	if (&init_mm == mm)
179 		return;
180 
181 	if (pud_user_accessible_page(pud)) {
182 		page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
183 	}
184 }
185 EXPORT_SYMBOL(__page_table_check_pud_clear);
186 
187 /* Whether the swap entry cached writable information */
188 static inline bool swap_cached_writable(swp_entry_t entry)
189 {
190 	return is_writable_device_exclusive_entry(entry) ||
191 	    is_writable_device_private_entry(entry) ||
192 	    is_writable_migration_entry(entry);
193 }
194 
195 static inline void page_table_check_pte_flags(pte_t pte)
196 {
197 	if (pte_present(pte) && pte_uffd_wp(pte))
198 		WARN_ON_ONCE(pte_write(pte));
199 	else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
200 		WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
201 }
202 
203 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
204 		unsigned int nr)
205 {
206 	unsigned int i;
207 
208 	if (&init_mm == mm)
209 		return;
210 
211 	page_table_check_pte_flags(pte);
212 
213 	for (i = 0; i < nr; i++)
214 		__page_table_check_pte_clear(mm, ptep_get(ptep + i));
215 	if (pte_user_accessible_page(pte))
216 		page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
217 }
218 EXPORT_SYMBOL(__page_table_check_ptes_set);
219 
220 static inline void page_table_check_pmd_flags(pmd_t pmd)
221 {
222 	if (pmd_present(pmd) && pmd_uffd_wp(pmd))
223 		WARN_ON_ONCE(pmd_write(pmd));
224 	else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
225 		WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
226 }
227 
228 void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
229 {
230 	if (&init_mm == mm)
231 		return;
232 
233 	page_table_check_pmd_flags(pmd);
234 
235 	__page_table_check_pmd_clear(mm, *pmdp);
236 	if (pmd_user_accessible_page(pmd)) {
237 		page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
238 				     pmd_write(pmd));
239 	}
240 }
241 EXPORT_SYMBOL(__page_table_check_pmd_set);
242 
243 void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
244 {
245 	if (&init_mm == mm)
246 		return;
247 
248 	__page_table_check_pud_clear(mm, *pudp);
249 	if (pud_user_accessible_page(pud)) {
250 		page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
251 				     pud_write(pud));
252 	}
253 }
254 EXPORT_SYMBOL(__page_table_check_pud_set);
255 
256 void __page_table_check_pte_clear_range(struct mm_struct *mm,
257 					unsigned long addr,
258 					pmd_t pmd)
259 {
260 	if (&init_mm == mm)
261 		return;
262 
263 	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
264 		pte_t *ptep = pte_offset_map(&pmd, addr);
265 		unsigned long i;
266 
267 		if (WARN_ON(!ptep))
268 			return;
269 		for (i = 0; i < PTRS_PER_PTE; i++) {
270 			__page_table_check_pte_clear(mm, ptep_get(ptep));
271 			addr += PAGE_SIZE;
272 			ptep++;
273 		}
274 		pte_unmap(ptep - PTRS_PER_PTE);
275 	}
276 }
277