1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #define _GNU_SOURCE
4 #include <stdbool.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <assert.h>
8 #include <linux/mman.h>
9 #include <sys/mman.h>
10 #include "../kselftest.h"
11 #include "thp_settings.h"
12 #include "uffd-common.h"
13
14 static int pagemap_fd;
15 static size_t pagesize;
16 static int nr_pagesizes = 1;
17 static int nr_thpsizes;
18 static size_t thpsizes[20];
19 static int nr_hugetlbsizes;
20 static size_t hugetlbsizes[10];
21
detect_thp_sizes(size_t sizes[],int max)22 static int detect_thp_sizes(size_t sizes[], int max)
23 {
24 int count = 0;
25 unsigned long orders;
26 size_t kb;
27 int i;
28
29 /* thp not supported at all. */
30 if (!read_pmd_pagesize())
31 return 0;
32
33 orders = thp_supported_orders();
34
35 for (i = 0; orders && count < max; i++) {
36 if (!(orders & (1UL << i)))
37 continue;
38 orders &= ~(1UL << i);
39 kb = (pagesize >> 10) << i;
40 sizes[count++] = kb * 1024;
41 ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb);
42 }
43
44 return count;
45 }
46
mmap_aligned(size_t size,int prot,int flags)47 static void *mmap_aligned(size_t size, int prot, int flags)
48 {
49 size_t mmap_size = size * 2;
50 char *mmap_mem, *mem;
51
52 mmap_mem = mmap(NULL, mmap_size, prot, flags, -1, 0);
53 if (mmap_mem == MAP_FAILED)
54 return mmap_mem;
55
56 mem = (char *)(((uintptr_t)mmap_mem + size - 1) & ~(size - 1));
57 munmap(mmap_mem, mem - mmap_mem);
58 munmap(mem + size, mmap_mem + mmap_size - mem - size);
59
60 return mem;
61 }
62
alloc_one_folio(size_t size,bool private,bool hugetlb)63 static void *alloc_one_folio(size_t size, bool private, bool hugetlb)
64 {
65 bool thp = !hugetlb && size > pagesize;
66 int flags = MAP_ANONYMOUS;
67 int prot = PROT_READ | PROT_WRITE;
68 char *mem, *addr;
69
70 assert((size & (size - 1)) == 0);
71
72 if (private)
73 flags |= MAP_PRIVATE;
74 else
75 flags |= MAP_SHARED;
76
77 /*
78 * For THP, we must explicitly enable the THP size, allocate twice the
79 * required space then manually align.
80 */
81 if (thp) {
82 struct thp_settings settings = *thp_current_settings();
83
84 if (private)
85 settings.hugepages[sz2ord(size, pagesize)].enabled = THP_ALWAYS;
86 else
87 settings.shmem_hugepages[sz2ord(size, pagesize)].enabled = SHMEM_ALWAYS;
88
89 thp_push_settings(&settings);
90
91 mem = mmap_aligned(size, prot, flags);
92 } else {
93 if (hugetlb) {
94 flags |= MAP_HUGETLB;
95 flags |= __builtin_ctzll(size) << MAP_HUGE_SHIFT;
96 }
97
98 mem = mmap(NULL, size, prot, flags, -1, 0);
99 }
100
101 if (mem == MAP_FAILED) {
102 mem = NULL;
103 goto out;
104 }
105
106 assert(((uintptr_t)mem & (size - 1)) == 0);
107
108 /*
109 * Populate the folio by writing the first byte and check that all pages
110 * are populated. Finally set the whole thing to non-zero data to avoid
111 * kernel from mapping it back to the zero page.
112 */
113 mem[0] = 1;
114 for (addr = mem; addr < mem + size; addr += pagesize) {
115 if (!pagemap_is_populated(pagemap_fd, addr)) {
116 munmap(mem, size);
117 mem = NULL;
118 goto out;
119 }
120 }
121 memset(mem, 1, size);
122 out:
123 if (thp)
124 thp_pop_settings();
125
126 return mem;
127 }
128
check_uffd_wp_state(void * mem,size_t size,bool expect)129 static bool check_uffd_wp_state(void *mem, size_t size, bool expect)
130 {
131 uint64_t pte;
132 void *addr;
133
134 for (addr = mem; addr < mem + size; addr += pagesize) {
135 pte = pagemap_get_entry(pagemap_fd, addr);
136 if (!!(pte & PM_UFFD_WP) != expect) {
137 ksft_test_result_fail("uffd-wp not %s for pte %lu!\n",
138 expect ? "set" : "clear",
139 (addr - mem) / pagesize);
140 return false;
141 }
142 }
143
144 return true;
145 }
146
range_is_swapped(void * addr,size_t size)147 static bool range_is_swapped(void *addr, size_t size)
148 {
149 for (; size; addr += pagesize, size -= pagesize)
150 if (!pagemap_is_swapped(pagemap_fd, addr))
151 return false;
152 return true;
153 }
154
test_one_folio(uffd_global_test_opts_t * gopts,size_t size,bool private,bool swapout,bool hugetlb)155 static void test_one_folio(uffd_global_test_opts_t *gopts, size_t size, bool private,
156 bool swapout, bool hugetlb)
157 {
158 struct uffdio_writeprotect wp_prms;
159 uint64_t features = 0;
160 void *addr = NULL;
161 void *mem = NULL;
162
163 assert(!(hugetlb && swapout));
164
165 ksft_print_msg("[RUN] %s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n",
166 __func__,
167 size,
168 private ? "true" : "false",
169 swapout ? "true" : "false",
170 hugetlb ? "true" : "false");
171
172 /* Allocate a folio of required size and type. */
173 mem = alloc_one_folio(size, private, hugetlb);
174 if (!mem) {
175 ksft_test_result_fail("alloc_one_folio() failed\n");
176 goto out;
177 }
178
179 /* Register range for uffd-wp. */
180 if (userfaultfd_open(gopts, &features)) {
181 if (errno == ENOENT)
182 ksft_test_result_skip("userfaultfd not available\n");
183 else
184 ksft_test_result_fail("userfaultfd_open() failed\n");
185 goto out;
186 }
187 if (uffd_register(gopts->uffd, mem, size, false, true, false)) {
188 ksft_test_result_fail("uffd_register() failed\n");
189 goto out;
190 }
191 wp_prms.mode = UFFDIO_WRITEPROTECT_MODE_WP;
192 wp_prms.range.start = (uintptr_t)mem;
193 wp_prms.range.len = size;
194 if (ioctl(gopts->uffd, UFFDIO_WRITEPROTECT, &wp_prms)) {
195 ksft_test_result_fail("ioctl(UFFDIO_WRITEPROTECT) failed\n");
196 goto out;
197 }
198
199 if (swapout) {
200 madvise(mem, size, MADV_PAGEOUT);
201 if (!range_is_swapped(mem, size)) {
202 ksft_test_result_skip("MADV_PAGEOUT did not work, is swap enabled?\n");
203 goto out;
204 }
205 }
206
207 /* Check that uffd-wp is set for all PTEs in range. */
208 if (!check_uffd_wp_state(mem, size, true))
209 goto out;
210
211 /*
212 * Move the mapping to a new, aligned location. Since
213 * UFFD_FEATURE_EVENT_REMAP is not set, we expect the uffd-wp bit for
214 * each PTE to be cleared in the new mapping.
215 */
216 addr = mmap_aligned(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS);
217 if (addr == MAP_FAILED) {
218 ksft_test_result_fail("mmap_aligned() failed\n");
219 goto out;
220 }
221 if (mremap(mem, size, size, MREMAP_FIXED | MREMAP_MAYMOVE, addr) == MAP_FAILED) {
222 ksft_test_result_fail("mremap() failed\n");
223 munmap(addr, size);
224 goto out;
225 }
226 mem = addr;
227
228 /* Check that uffd-wp is cleared for all PTEs in range. */
229 if (!check_uffd_wp_state(mem, size, false))
230 goto out;
231
232 ksft_test_result_pass("%s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n",
233 __func__,
234 size,
235 private ? "true" : "false",
236 swapout ? "true" : "false",
237 hugetlb ? "true" : "false");
238 out:
239 if (mem)
240 munmap(mem, size);
241 if (gopts->uffd >= 0) {
242 close(gopts->uffd);
243 gopts->uffd = -1;
244 }
245 }
246
247 struct testcase {
248 size_t *sizes;
249 int *nr_sizes;
250 bool private;
251 bool swapout;
252 bool hugetlb;
253 };
254
255 static const struct testcase testcases[] = {
256 /* base pages. */
257 {
258 .sizes = &pagesize,
259 .nr_sizes = &nr_pagesizes,
260 .private = false,
261 .swapout = false,
262 .hugetlb = false,
263 },
264 {
265 .sizes = &pagesize,
266 .nr_sizes = &nr_pagesizes,
267 .private = true,
268 .swapout = false,
269 .hugetlb = false,
270 },
271 {
272 .sizes = &pagesize,
273 .nr_sizes = &nr_pagesizes,
274 .private = false,
275 .swapout = true,
276 .hugetlb = false,
277 },
278 {
279 .sizes = &pagesize,
280 .nr_sizes = &nr_pagesizes,
281 .private = true,
282 .swapout = true,
283 .hugetlb = false,
284 },
285
286 /* thp. */
287 {
288 .sizes = thpsizes,
289 .nr_sizes = &nr_thpsizes,
290 .private = false,
291 .swapout = false,
292 .hugetlb = false,
293 },
294 {
295 .sizes = thpsizes,
296 .nr_sizes = &nr_thpsizes,
297 .private = true,
298 .swapout = false,
299 .hugetlb = false,
300 },
301 {
302 .sizes = thpsizes,
303 .nr_sizes = &nr_thpsizes,
304 .private = false,
305 .swapout = true,
306 .hugetlb = false,
307 },
308 {
309 .sizes = thpsizes,
310 .nr_sizes = &nr_thpsizes,
311 .private = true,
312 .swapout = true,
313 .hugetlb = false,
314 },
315
316 /* hugetlb. */
317 {
318 .sizes = hugetlbsizes,
319 .nr_sizes = &nr_hugetlbsizes,
320 .private = false,
321 .swapout = false,
322 .hugetlb = true,
323 },
324 {
325 .sizes = hugetlbsizes,
326 .nr_sizes = &nr_hugetlbsizes,
327 .private = true,
328 .swapout = false,
329 .hugetlb = true,
330 },
331 };
332
main(int argc,char ** argv)333 int main(int argc, char **argv)
334 {
335 uffd_global_test_opts_t gopts = { 0 };
336 struct thp_settings settings;
337 int i, j, plan = 0;
338
339 pagesize = getpagesize();
340 nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes));
341 nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes,
342 ARRAY_SIZE(hugetlbsizes));
343
344 /* If THP is supported, save THP settings and initially disable THP. */
345 if (nr_thpsizes) {
346 thp_save_settings();
347 thp_read_settings(&settings);
348 for (i = 0; i < NR_ORDERS; i++) {
349 settings.hugepages[i].enabled = THP_NEVER;
350 settings.shmem_hugepages[i].enabled = SHMEM_NEVER;
351 }
352 thp_push_settings(&settings);
353 }
354
355 for (i = 0; i < ARRAY_SIZE(testcases); i++)
356 plan += *testcases[i].nr_sizes;
357 ksft_set_plan(plan);
358
359 pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
360 if (pagemap_fd < 0)
361 ksft_exit_fail_msg("opening pagemap failed\n");
362
363 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
364 const struct testcase *tc = &testcases[i];
365
366 for (j = 0; j < *tc->nr_sizes; j++)
367 test_one_folio(&gopts, tc->sizes[j], tc->private,
368 tc->swapout, tc->hugetlb);
369 }
370
371 /* If THP is supported, restore original THP settings. */
372 if (nr_thpsizes)
373 thp_restore_settings();
374
375 i = ksft_get_fail_cnt();
376 if (i)
377 ksft_exit_fail_msg("%d out of %d tests failed\n",
378 i, ksft_test_num());
379 ksft_exit_pass();
380 }
381