xref: /linux/tools/testing/selftests/mm/uffd-wp-mremap.c (revision 2ccd9fecd9163f168761d4398564c81554f636ef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #define _GNU_SOURCE
4 #include <stdbool.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <assert.h>
8 #include <linux/mman.h>
9 #include <sys/mman.h>
10 #include "../kselftest.h"
11 #include "thp_settings.h"
12 #include "uffd-common.h"
13 
14 static int pagemap_fd;
15 static size_t pagesize;
16 static int nr_pagesizes = 1;
17 static int nr_thpsizes;
18 static size_t thpsizes[20];
19 static int nr_hugetlbsizes;
20 static size_t hugetlbsizes[10];
21 
22 static int detect_thp_sizes(size_t sizes[], int max)
23 {
24 	int count = 0;
25 	unsigned long orders;
26 	size_t kb;
27 	int i;
28 
29 	/* thp not supported at all. */
30 	if (!read_pmd_pagesize())
31 		return 0;
32 
33 	orders = thp_supported_orders();
34 
35 	for (i = 0; orders && count < max; i++) {
36 		if (!(orders & (1UL << i)))
37 			continue;
38 		orders &= ~(1UL << i);
39 		kb = (pagesize >> 10) << i;
40 		sizes[count++] = kb * 1024;
41 		ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb);
42 	}
43 
44 	return count;
45 }
46 
47 static void *mmap_aligned(size_t size, int prot, int flags)
48 {
49 	size_t mmap_size = size * 2;
50 	char *mmap_mem, *mem;
51 
52 	mmap_mem = mmap(NULL, mmap_size, prot, flags, -1, 0);
53 	if (mmap_mem == MAP_FAILED)
54 		return mmap_mem;
55 
56 	mem = (char *)(((uintptr_t)mmap_mem + size - 1) & ~(size - 1));
57 	munmap(mmap_mem, mem - mmap_mem);
58 	munmap(mem + size, mmap_mem + mmap_size - mem - size);
59 
60 	return mem;
61 }
62 
63 static void *alloc_one_folio(size_t size, bool private, bool hugetlb)
64 {
65 	bool thp = !hugetlb && size > pagesize;
66 	int flags = MAP_ANONYMOUS;
67 	int prot = PROT_READ | PROT_WRITE;
68 	char *mem, *addr;
69 
70 	assert((size & (size - 1)) == 0);
71 
72 	if (private)
73 		flags |= MAP_PRIVATE;
74 	else
75 		flags |= MAP_SHARED;
76 
77 	/*
78 	 * For THP, we must explicitly enable the THP size, allocate twice the
79 	 * required space then manually align.
80 	 */
81 	if (thp) {
82 		struct thp_settings settings = *thp_current_settings();
83 
84 		if (private)
85 			settings.hugepages[sz2ord(size, pagesize)].enabled = THP_ALWAYS;
86 		else
87 			settings.shmem_hugepages[sz2ord(size, pagesize)].enabled = SHMEM_ALWAYS;
88 
89 		thp_push_settings(&settings);
90 
91 		mem = mmap_aligned(size, prot, flags);
92 	} else {
93 		if (hugetlb) {
94 			flags |= MAP_HUGETLB;
95 			flags |= __builtin_ctzll(size) << MAP_HUGE_SHIFT;
96 		}
97 
98 		mem = mmap(NULL, size, prot, flags, -1, 0);
99 	}
100 
101 	if (mem == MAP_FAILED) {
102 		mem = NULL;
103 		goto out;
104 	}
105 
106 	assert(((uintptr_t)mem & (size - 1)) == 0);
107 
108 	/*
109 	 * Populate the folio by writing the first byte and check that all pages
110 	 * are populated. Finally set the whole thing to non-zero data to avoid
111 	 * kernel from mapping it back to the zero page.
112 	 */
113 	mem[0] = 1;
114 	for (addr = mem; addr < mem + size; addr += pagesize) {
115 		if (!pagemap_is_populated(pagemap_fd, addr)) {
116 			munmap(mem, size);
117 			mem = NULL;
118 			goto out;
119 		}
120 	}
121 	memset(mem, 1, size);
122 out:
123 	if (thp)
124 		thp_pop_settings();
125 
126 	return mem;
127 }
128 
129 static bool check_uffd_wp_state(void *mem, size_t size, bool expect)
130 {
131 	uint64_t pte;
132 	void *addr;
133 
134 	for (addr = mem; addr < mem + size; addr += pagesize) {
135 		pte = pagemap_get_entry(pagemap_fd, addr);
136 		if (!!(pte & PM_UFFD_WP) != expect) {
137 			ksft_test_result_fail("uffd-wp not %s for pte %lu!\n",
138 					      expect ? "set" : "clear",
139 					      (addr - mem) / pagesize);
140 			return false;
141 		}
142 	}
143 
144 	return true;
145 }
146 
147 static bool range_is_swapped(void *addr, size_t size)
148 {
149 	for (; size; addr += pagesize, size -= pagesize)
150 		if (!pagemap_is_swapped(pagemap_fd, addr))
151 			return false;
152 	return true;
153 }
154 
155 static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb)
156 {
157 	struct uffdio_writeprotect wp_prms;
158 	uint64_t features = 0;
159 	void *addr = NULL;
160 	void *mem = NULL;
161 
162 	assert(!(hugetlb && swapout));
163 
164 	ksft_print_msg("[RUN] %s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n",
165 				__func__,
166 				size,
167 				private ? "true" : "false",
168 				swapout ? "true" : "false",
169 				hugetlb ? "true" : "false");
170 
171 	/* Allocate a folio of required size and type. */
172 	mem = alloc_one_folio(size, private, hugetlb);
173 	if (!mem) {
174 		ksft_test_result_fail("alloc_one_folio() failed\n");
175 		goto out;
176 	}
177 
178 	/* Register range for uffd-wp. */
179 	if (userfaultfd_open(&features)) {
180 		if (errno == ENOENT)
181 			ksft_test_result_skip("userfaultfd not available\n");
182 		else
183 			ksft_test_result_fail("userfaultfd_open() failed\n");
184 		goto out;
185 	}
186 	if (uffd_register(uffd, mem, size, false, true, false)) {
187 		ksft_test_result_fail("uffd_register() failed\n");
188 		goto out;
189 	}
190 	wp_prms.mode = UFFDIO_WRITEPROTECT_MODE_WP;
191 	wp_prms.range.start = (uintptr_t)mem;
192 	wp_prms.range.len = size;
193 	if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp_prms)) {
194 		ksft_test_result_fail("ioctl(UFFDIO_WRITEPROTECT) failed\n");
195 		goto out;
196 	}
197 
198 	if (swapout) {
199 		madvise(mem, size, MADV_PAGEOUT);
200 		if (!range_is_swapped(mem, size)) {
201 			ksft_test_result_skip("MADV_PAGEOUT did not work, is swap enabled?\n");
202 			goto out;
203 		}
204 	}
205 
206 	/* Check that uffd-wp is set for all PTEs in range. */
207 	if (!check_uffd_wp_state(mem, size, true))
208 		goto out;
209 
210 	/*
211 	 * Move the mapping to a new, aligned location. Since
212 	 * UFFD_FEATURE_EVENT_REMAP is not set, we expect the uffd-wp bit for
213 	 * each PTE to be cleared in the new mapping.
214 	 */
215 	addr = mmap_aligned(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS);
216 	if (addr == MAP_FAILED) {
217 		ksft_test_result_fail("mmap_aligned() failed\n");
218 		goto out;
219 	}
220 	if (mremap(mem, size, size, MREMAP_FIXED | MREMAP_MAYMOVE, addr) == MAP_FAILED) {
221 		ksft_test_result_fail("mremap() failed\n");
222 		munmap(addr, size);
223 		goto out;
224 	}
225 	mem = addr;
226 
227 	/* Check that uffd-wp is cleared for all PTEs in range. */
228 	if (!check_uffd_wp_state(mem, size, false))
229 		goto out;
230 
231 	ksft_test_result_pass("%s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n",
232 				__func__,
233 				size,
234 				private ? "true" : "false",
235 				swapout ? "true" : "false",
236 				hugetlb ? "true" : "false");
237 out:
238 	if (mem)
239 		munmap(mem, size);
240 	if (uffd >= 0) {
241 		close(uffd);
242 		uffd = -1;
243 	}
244 }
245 
246 struct testcase {
247 	size_t *sizes;
248 	int *nr_sizes;
249 	bool private;
250 	bool swapout;
251 	bool hugetlb;
252 };
253 
254 static const struct testcase testcases[] = {
255 	/* base pages. */
256 	{
257 		.sizes = &pagesize,
258 		.nr_sizes = &nr_pagesizes,
259 		.private = false,
260 		.swapout = false,
261 		.hugetlb = false,
262 	},
263 	{
264 		.sizes = &pagesize,
265 		.nr_sizes = &nr_pagesizes,
266 		.private = true,
267 		.swapout = false,
268 		.hugetlb = false,
269 	},
270 	{
271 		.sizes = &pagesize,
272 		.nr_sizes = &nr_pagesizes,
273 		.private = false,
274 		.swapout = true,
275 		.hugetlb = false,
276 	},
277 	{
278 		.sizes = &pagesize,
279 		.nr_sizes = &nr_pagesizes,
280 		.private = true,
281 		.swapout = true,
282 		.hugetlb = false,
283 	},
284 
285 	/* thp. */
286 	{
287 		.sizes = thpsizes,
288 		.nr_sizes = &nr_thpsizes,
289 		.private = false,
290 		.swapout = false,
291 		.hugetlb = false,
292 	},
293 	{
294 		.sizes = thpsizes,
295 		.nr_sizes = &nr_thpsizes,
296 		.private = true,
297 		.swapout = false,
298 		.hugetlb = false,
299 	},
300 	{
301 		.sizes = thpsizes,
302 		.nr_sizes = &nr_thpsizes,
303 		.private = false,
304 		.swapout = true,
305 		.hugetlb = false,
306 	},
307 	{
308 		.sizes = thpsizes,
309 		.nr_sizes = &nr_thpsizes,
310 		.private = true,
311 		.swapout = true,
312 		.hugetlb = false,
313 	},
314 
315 	/* hugetlb. */
316 	{
317 		.sizes = hugetlbsizes,
318 		.nr_sizes = &nr_hugetlbsizes,
319 		.private = false,
320 		.swapout = false,
321 		.hugetlb = true,
322 	},
323 	{
324 		.sizes = hugetlbsizes,
325 		.nr_sizes = &nr_hugetlbsizes,
326 		.private = true,
327 		.swapout = false,
328 		.hugetlb = true,
329 	},
330 };
331 
332 int main(int argc, char **argv)
333 {
334 	struct thp_settings settings;
335 	int i, j, plan = 0;
336 
337 	pagesize = getpagesize();
338 	nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes));
339 	nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes,
340 						    ARRAY_SIZE(hugetlbsizes));
341 
342 	/* If THP is supported, save THP settings and initially disable THP. */
343 	if (nr_thpsizes) {
344 		thp_save_settings();
345 		thp_read_settings(&settings);
346 		for (i = 0; i < NR_ORDERS; i++) {
347 			settings.hugepages[i].enabled = THP_NEVER;
348 			settings.shmem_hugepages[i].enabled = SHMEM_NEVER;
349 		}
350 		thp_push_settings(&settings);
351 	}
352 
353 	for (i = 0; i < ARRAY_SIZE(testcases); i++)
354 		plan += *testcases[i].nr_sizes;
355 	ksft_set_plan(plan);
356 
357 	pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
358 	if (pagemap_fd < 0)
359 		ksft_exit_fail_msg("opening pagemap failed\n");
360 
361 	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
362 		const struct testcase *tc = &testcases[i];
363 
364 		for (j = 0; j < *tc->nr_sizes; j++)
365 			test_one_folio(tc->sizes[j], tc->private, tc->swapout,
366 				       tc->hugetlb);
367 	}
368 
369 	/* If THP is supported, restore original THP settings. */
370 	if (nr_thpsizes)
371 		thp_restore_settings();
372 
373 	i = ksft_get_fail_cnt();
374 	if (i)
375 		ksft_exit_fail_msg("%d out of %d tests failed\n",
376 				   i, ksft_test_num());
377 	ksft_exit_pass();
378 }
379