1 // SPDX-License-Identifier: GPL-2.0
2
3 #define _GNU_SOURCE
4 #include <stdio.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <sys/mman.h>
8 #include <errno.h>
9 #include <malloc.h>
10 #include "vm_util.h"
11 #include "../kselftest.h"
12 #include <linux/types.h>
13 #include <linux/memfd.h>
14 #include <linux/userfaultfd.h>
15 #include <linux/fs.h>
16 #include <sys/ioctl.h>
17 #include <sys/stat.h>
18 #include <math.h>
19 #include <asm/unistd.h>
20 #include <pthread.h>
21 #include <sys/resource.h>
22 #include <assert.h>
23 #include <sys/ipc.h>
24 #include <sys/shm.h>
25
26 #define PAGEMAP_BITS_ALL (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \
27 PAGE_IS_FILE | PAGE_IS_PRESENT | \
28 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \
29 PAGE_IS_HUGE)
30 #define PAGEMAP_NON_WRITTEN_BITS (PAGE_IS_WPALLOWED | PAGE_IS_FILE | \
31 PAGE_IS_PRESENT | PAGE_IS_SWAPPED | \
32 PAGE_IS_PFNZERO | PAGE_IS_HUGE)
33
34 #define TEST_ITERATIONS 100
35 #define PAGEMAP "/proc/self/pagemap"
36 int pagemap_fd;
37 int uffd;
38 size_t page_size;
39 size_t hpage_size;
40 const char *progname;
41
42 #define LEN(region) ((region.end - region.start)/page_size)
43
pagemap_ioctl(void * start,int len,void * vec,int vec_len,int flag,int max_pages,long required_mask,long anyof_mask,long excluded_mask,long return_mask)44 static long pagemap_ioctl(void *start, int len, void *vec, int vec_len, int flag,
45 int max_pages, long required_mask, long anyof_mask, long excluded_mask,
46 long return_mask)
47 {
48 struct pm_scan_arg arg;
49
50 arg.start = (uintptr_t)start;
51 arg.end = (uintptr_t)(start + len);
52 arg.vec = (uintptr_t)vec;
53 arg.vec_len = vec_len;
54 arg.flags = flag;
55 arg.size = sizeof(struct pm_scan_arg);
56 arg.max_pages = max_pages;
57 arg.category_mask = required_mask;
58 arg.category_anyof_mask = anyof_mask;
59 arg.category_inverted = excluded_mask;
60 arg.return_mask = return_mask;
61
62 return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
63 }
64
pagemap_ioc(void * start,int len,void * vec,int vec_len,int flag,int max_pages,long required_mask,long anyof_mask,long excluded_mask,long return_mask,long * walk_end)65 static long pagemap_ioc(void *start, int len, void *vec, int vec_len, int flag,
66 int max_pages, long required_mask, long anyof_mask, long excluded_mask,
67 long return_mask, long *walk_end)
68 {
69 struct pm_scan_arg arg;
70 int ret;
71
72 arg.start = (uintptr_t)start;
73 arg.end = (uintptr_t)(start + len);
74 arg.vec = (uintptr_t)vec;
75 arg.vec_len = vec_len;
76 arg.flags = flag;
77 arg.size = sizeof(struct pm_scan_arg);
78 arg.max_pages = max_pages;
79 arg.category_mask = required_mask;
80 arg.category_anyof_mask = anyof_mask;
81 arg.category_inverted = excluded_mask;
82 arg.return_mask = return_mask;
83
84 ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
85
86 if (walk_end)
87 *walk_end = arg.walk_end;
88
89 return ret;
90 }
91
92
init_uffd(void)93 int init_uffd(void)
94 {
95 struct uffdio_api uffdio_api;
96
97 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY);
98 if (uffd == -1)
99 return uffd;
100
101 uffdio_api.api = UFFD_API;
102 uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC |
103 UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
104 if (ioctl(uffd, UFFDIO_API, &uffdio_api))
105 return -1;
106
107 if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) ||
108 !(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) ||
109 !(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) ||
110 !(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM))
111 return -1;
112
113 return 0;
114 }
115
wp_init(void * lpBaseAddress,long dwRegionSize)116 int wp_init(void *lpBaseAddress, long dwRegionSize)
117 {
118 struct uffdio_register uffdio_register;
119 struct uffdio_writeprotect wp;
120
121 uffdio_register.range.start = (unsigned long)lpBaseAddress;
122 uffdio_register.range.len = dwRegionSize;
123 uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
124 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
125 ksft_exit_fail_msg("ioctl(UFFDIO_REGISTER) %d %s\n", errno, strerror(errno));
126
127 if (!(uffdio_register.ioctls & UFFDIO_WRITEPROTECT))
128 ksft_exit_fail_msg("ioctl set is incorrect\n");
129
130 wp.range.start = (unsigned long)lpBaseAddress;
131 wp.range.len = dwRegionSize;
132 wp.mode = UFFDIO_WRITEPROTECT_MODE_WP;
133
134 if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp))
135 ksft_exit_fail_msg("ioctl(UFFDIO_WRITEPROTECT)\n");
136
137 return 0;
138 }
139
wp_free(void * lpBaseAddress,long dwRegionSize)140 int wp_free(void *lpBaseAddress, long dwRegionSize)
141 {
142 struct uffdio_register uffdio_register;
143
144 uffdio_register.range.start = (unsigned long)lpBaseAddress;
145 uffdio_register.range.len = dwRegionSize;
146 uffdio_register.mode = UFFDIO_REGISTER_MODE_WP;
147 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range))
148 ksft_exit_fail_msg("ioctl unregister failure\n");
149 return 0;
150 }
151
wp_addr_range(void * lpBaseAddress,int dwRegionSize)152 int wp_addr_range(void *lpBaseAddress, int dwRegionSize)
153 {
154 if (pagemap_ioctl(lpBaseAddress, dwRegionSize, NULL, 0,
155 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
156 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0)
157 ksft_exit_fail_msg("error %d %d %s\n", 1, errno, strerror(errno));
158
159 return 0;
160 }
161
gethugetlb_mem(int size,int * shmid)162 void *gethugetlb_mem(int size, int *shmid)
163 {
164 char *mem;
165
166 if (shmid) {
167 *shmid = shmget(2, size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W);
168 if (*shmid < 0)
169 return NULL;
170
171 mem = shmat(*shmid, 0, 0);
172 if (mem == (char *)-1) {
173 shmctl(*shmid, IPC_RMID, NULL);
174 ksft_exit_fail_msg("Shared memory attach failure\n");
175 }
176 } else {
177 mem = mmap(NULL, size, PROT_READ | PROT_WRITE,
178 MAP_ANONYMOUS | MAP_HUGETLB | MAP_PRIVATE, -1, 0);
179 if (mem == MAP_FAILED)
180 return NULL;
181 }
182
183 return mem;
184 }
185
userfaultfd_tests(void)186 int userfaultfd_tests(void)
187 {
188 long mem_size, vec_size, written, num_pages = 16;
189 char *mem, *vec;
190
191 mem_size = num_pages * page_size;
192 mem = mmap(NULL, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
193 if (mem == MAP_FAILED)
194 ksft_exit_fail_msg("error nomem\n");
195
196 wp_init(mem, mem_size);
197
198 /* Change protection of pages differently */
199 mprotect(mem, mem_size/8, PROT_READ|PROT_WRITE);
200 mprotect(mem + 1 * mem_size/8, mem_size/8, PROT_READ);
201 mprotect(mem + 2 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE);
202 mprotect(mem + 3 * mem_size/8, mem_size/8, PROT_READ);
203 mprotect(mem + 4 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE);
204 mprotect(mem + 5 * mem_size/8, mem_size/8, PROT_NONE);
205 mprotect(mem + 6 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE);
206 mprotect(mem + 7 * mem_size/8, mem_size/8, PROT_READ);
207
208 wp_addr_range(mem + (mem_size/16), mem_size - 2 * (mem_size/8));
209 wp_addr_range(mem, mem_size);
210
211 vec_size = mem_size/page_size;
212 vec = calloc(vec_size, sizeof(struct page_region));
213
214 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
215 vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
216 if (written < 0)
217 ksft_exit_fail_msg("error %ld %d %s\n", written, errno, strerror(errno));
218
219 ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", __func__);
220
221 wp_free(mem, mem_size);
222 munmap(mem, mem_size);
223 free(vec);
224 return 0;
225 }
226
get_reads(struct page_region * vec,int vec_size)227 int get_reads(struct page_region *vec, int vec_size)
228 {
229 int i, sum = 0;
230
231 for (i = 0; i < vec_size; i++)
232 sum += LEN(vec[i]);
233
234 return sum;
235 }
236
sanity_tests_sd(void)237 int sanity_tests_sd(void)
238 {
239 unsigned long long mem_size, vec_size, i, total_pages = 0;
240 long ret, ret2, ret3;
241 int num_pages = 1000;
242 int total_writes, total_reads, reads, count;
243 struct page_region *vec, *vec2;
244 char *mem, *m[2];
245 long walk_end;
246
247 vec_size = num_pages/2;
248 mem_size = num_pages * page_size;
249
250 vec = calloc(vec_size, sizeof(struct page_region));
251 if (!vec)
252 ksft_exit_fail_msg("error nomem\n");
253
254 vec2 = calloc(vec_size, sizeof(struct page_region));
255 if (!vec2)
256 ksft_exit_fail_msg("error nomem\n");
257
258 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
259 if (mem == MAP_FAILED)
260 ksft_exit_fail_msg("error nomem\n");
261
262 wp_init(mem, mem_size);
263 wp_addr_range(mem, mem_size);
264
265 /* 1. wrong operation */
266 ksft_test_result(pagemap_ioctl(mem, 0, vec, vec_size, 0,
267 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0,
268 "%s Zero range size is valid\n", __func__);
269
270 ksft_test_result(pagemap_ioctl(mem, mem_size, NULL, vec_size, 0,
271 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) < 0,
272 "%s output buffer must be specified with size\n", __func__);
273
274 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, 0, 0,
275 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0,
276 "%s output buffer can be 0\n", __func__);
277
278 ksft_test_result(pagemap_ioctl(mem, mem_size, 0, 0, 0,
279 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0,
280 "%s output buffer can be 0\n", __func__);
281
282 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, -1,
283 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0,
284 "%s wrong flag specified\n", __func__);
285
286 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
287 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC | 0xFF,
288 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0,
289 "%s flag has extra bits specified\n", __func__);
290
291 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0,
292 0, 0, 0, 0, PAGE_IS_WRITTEN) >= 0,
293 "%s no selection mask is specified\n", __func__);
294
295 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0,
296 0, PAGE_IS_WRITTEN, PAGE_IS_WRITTEN, 0, 0) == 0,
297 "%s no return mask is specified\n", __func__);
298
299 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0,
300 0, PAGE_IS_WRITTEN, 0, 0, 0x1000) < 0,
301 "%s wrong return mask specified\n", __func__);
302
303 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
304 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
305 0, 0xFFF, PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN) < 0,
306 "%s mixture of correct and wrong flag\n", __func__);
307
308 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
309 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
310 0, 0, 0, PAGEMAP_BITS_ALL, PAGE_IS_WRITTEN) >= 0,
311 "%s PAGEMAP_BITS_ALL can be specified with PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n",
312 __func__);
313
314 /* 2. Clear area with larger vec size */
315 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
316 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
317 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
318 ksft_test_result(ret >= 0, "%s Clear area with larger vec size\n", __func__);
319
320 /* 3. Repeated pattern of written and non-written pages */
321 for (i = 0; i < mem_size; i += 2 * page_size)
322 mem[i]++;
323
324 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0,
325 0, PAGE_IS_WRITTEN);
326 if (ret < 0)
327 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
328
329 ksft_test_result((unsigned long long)ret == mem_size/(page_size * 2),
330 "%s Repeated pattern of written and non-written pages\n", __func__);
331
332 /* 4. Repeated pattern of written and non-written pages in parts */
333 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
334 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
335 num_pages/2 - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
336 if (ret < 0)
337 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
338
339 ret2 = pagemap_ioctl(mem, mem_size, vec, 2, 0, 0, PAGE_IS_WRITTEN, 0, 0,
340 PAGE_IS_WRITTEN);
341 if (ret2 < 0)
342 ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno));
343
344 ret3 = pagemap_ioctl(mem, mem_size, vec, vec_size,
345 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
346 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
347 if (ret3 < 0)
348 ksft_exit_fail_msg("error %ld %d %s\n", ret3, errno, strerror(errno));
349
350 ksft_test_result((ret + ret3) == num_pages/2 && ret2 == 2,
351 "%s Repeated pattern of written and non-written pages in parts %ld %ld %ld\n",
352 __func__, ret, ret3, ret2);
353
354 /* 5. Repeated pattern of written and non-written pages max_pages */
355 for (i = 0; i < mem_size; i += 2 * page_size)
356 mem[i]++;
357 mem[(mem_size/page_size - 1) * page_size]++;
358
359 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
360 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
361 num_pages/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
362 if (ret < 0)
363 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
364
365 ret2 = pagemap_ioctl(mem, mem_size, vec, vec_size,
366 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
367 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
368 if (ret2 < 0)
369 ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno));
370
371 ksft_test_result(ret == num_pages/2 && ret2 == 1,
372 "%s Repeated pattern of written and non-written pages max_pages\n",
373 __func__);
374
375 /* 6. only get 2 dirty pages and clear them as well */
376 vec_size = mem_size/page_size;
377 memset(mem, -1, mem_size);
378
379 /* get and clear second and third pages */
380 ret = pagemap_ioctl(mem + page_size, 2 * page_size, vec, 1,
381 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
382 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
383 if (ret < 0)
384 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
385
386 ret2 = pagemap_ioctl(mem, mem_size, vec2, vec_size, 0, 0,
387 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
388 if (ret2 < 0)
389 ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno));
390
391 ksft_test_result(ret == 1 && LEN(vec[0]) == 2 &&
392 vec[0].start == (uintptr_t)(mem + page_size) &&
393 ret2 == 2 && LEN(vec2[0]) == 1 && vec2[0].start == (uintptr_t)mem &&
394 LEN(vec2[1]) == vec_size - 3 &&
395 vec2[1].start == (uintptr_t)(mem + 3 * page_size),
396 "%s only get 2 written pages and clear them as well\n", __func__);
397
398 wp_free(mem, mem_size);
399 munmap(mem, mem_size);
400
401 /* 7. Two regions */
402 m[0] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
403 if (m[0] == MAP_FAILED)
404 ksft_exit_fail_msg("error nomem\n");
405 m[1] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
406 if (m[1] == MAP_FAILED)
407 ksft_exit_fail_msg("error nomem\n");
408
409 wp_init(m[0], mem_size);
410 wp_init(m[1], mem_size);
411 wp_addr_range(m[0], mem_size);
412 wp_addr_range(m[1], mem_size);
413
414 memset(m[0], 'a', mem_size);
415 memset(m[1], 'b', mem_size);
416
417 wp_addr_range(m[0], mem_size);
418
419 ret = pagemap_ioctl(m[1], mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0,
420 PAGE_IS_WRITTEN);
421 if (ret < 0)
422 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
423
424 ksft_test_result(ret == 1 && LEN(vec[0]) == mem_size/page_size,
425 "%s Two regions\n", __func__);
426
427 wp_free(m[0], mem_size);
428 wp_free(m[1], mem_size);
429 munmap(m[0], mem_size);
430 munmap(m[1], mem_size);
431
432 free(vec);
433 free(vec2);
434
435 /* 8. Smaller vec */
436 mem_size = 1050 * page_size;
437 vec_size = mem_size/(page_size*2);
438
439 vec = calloc(vec_size, sizeof(struct page_region));
440 if (!vec)
441 ksft_exit_fail_msg("error nomem\n");
442
443 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
444 if (mem == MAP_FAILED)
445 ksft_exit_fail_msg("error nomem\n");
446
447 wp_init(mem, mem_size);
448 wp_addr_range(mem, mem_size);
449
450 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
451 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
452 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
453 if (ret < 0)
454 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
455
456 for (i = 0; i < mem_size/page_size; i += 2)
457 mem[i * page_size]++;
458
459 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
460 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
461 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
462 if (ret < 0)
463 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
464
465 total_pages += ret;
466
467 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
468 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
469 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
470 if (ret < 0)
471 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
472
473 total_pages += ret;
474
475 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
476 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
477 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
478 if (ret < 0)
479 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
480
481 total_pages += ret;
482
483 ksft_test_result(total_pages == mem_size/(page_size*2), "%s Smaller max_pages\n", __func__);
484
485 free(vec);
486 wp_free(mem, mem_size);
487 munmap(mem, mem_size);
488 total_pages = 0;
489
490 /* 9. Smaller vec */
491 mem_size = 10000 * page_size;
492 vec_size = 50;
493
494 vec = calloc(vec_size, sizeof(struct page_region));
495 if (!vec)
496 ksft_exit_fail_msg("error nomem\n");
497
498 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
499 if (mem == MAP_FAILED)
500 ksft_exit_fail_msg("error nomem\n");
501
502 wp_init(mem, mem_size);
503 wp_addr_range(mem, mem_size);
504
505 for (count = 0; count < TEST_ITERATIONS; count++) {
506 total_writes = total_reads = 0;
507 walk_end = (long)mem;
508
509 for (i = 0; i < mem_size; i += page_size) {
510 if (rand() % 2) {
511 mem[i]++;
512 total_writes++;
513 }
514 }
515
516 while (total_reads < total_writes) {
517 ret = pagemap_ioc((void *)walk_end, mem_size-(walk_end - (long)mem), vec,
518 vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
519 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
520 if (ret < 0)
521 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
522
523 if ((unsigned long)ret > vec_size)
524 break;
525
526 reads = get_reads(vec, ret);
527 total_reads += reads;
528 }
529
530 if (total_reads != total_writes)
531 break;
532 }
533
534 ksft_test_result(count == TEST_ITERATIONS, "Smaller vec\n");
535
536 free(vec);
537 wp_free(mem, mem_size);
538 munmap(mem, mem_size);
539
540 /* 10. Walk_end tester */
541 vec_size = 1000;
542 mem_size = vec_size * page_size;
543
544 vec = calloc(vec_size, sizeof(struct page_region));
545 if (!vec)
546 ksft_exit_fail_msg("error nomem\n");
547
548 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
549 if (mem == MAP_FAILED)
550 ksft_exit_fail_msg("error nomem\n");
551
552 wp_init(mem, mem_size);
553 wp_addr_range(mem, mem_size);
554
555 memset(mem, 0, mem_size);
556
557 ret = pagemap_ioc(mem, 0, vec, vec_size, 0,
558 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
559 if (ret < 0)
560 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
561 ksft_test_result(ret == 0 && walk_end == (long)mem,
562 "Walk_end: Same start and end address\n");
563
564 ret = pagemap_ioc(mem, 0, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
565 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
566 if (ret < 0)
567 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
568 ksft_test_result(ret == 0 && walk_end == (long)mem,
569 "Walk_end: Same start and end with WP\n");
570
571 ret = pagemap_ioc(mem, 0, vec, 0, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
572 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
573 if (ret < 0)
574 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
575 ksft_test_result(ret == 0 && walk_end == (long)mem,
576 "Walk_end: Same start and end with 0 output buffer\n");
577
578 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
579 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
580 if (ret < 0)
581 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
582 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
583 "Walk_end: Big vec\n");
584
585 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
586 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
587 if (ret < 0)
588 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
589 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
590 "Walk_end: vec of minimum length\n");
591
592 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
593 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
594 if (ret < 0)
595 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
596 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
597 "Walk_end: Max pages specified\n");
598
599 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
600 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
601 if (ret < 0)
602 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
603 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size/2),
604 "Walk_end: Half max pages\n");
605
606 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
607 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
608 if (ret < 0)
609 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
610 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size),
611 "Walk_end: 1 max page\n");
612
613 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
614 -1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
615 if (ret < 0)
616 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
617 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size),
618 "Walk_end: max pages\n");
619
620 wp_addr_range(mem, mem_size);
621 for (i = 0; i < mem_size; i += 2 * page_size)
622 mem[i]++;
623
624 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
625 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
626 if (ret < 0)
627 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
628 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
629 "Walk_end sparse: Big vec\n");
630
631 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
632 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
633 if (ret < 0)
634 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
635 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2),
636 "Walk_end sparse: vec of minimum length\n");
637
638 ret = pagemap_ioc(mem, mem_size, vec, 1, 0,
639 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
640 if (ret < 0)
641 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
642 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2),
643 "Walk_end sparse: Max pages specified\n");
644
645 ret = pagemap_ioc(mem, mem_size, vec, vec_size/2, 0,
646 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
647 if (ret < 0)
648 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
649 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
650 "Walk_end sparse: Max pages specified\n");
651
652 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
653 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
654 if (ret < 0)
655 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
656 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
657 "Walk_end sparse: Max pages specified\n");
658
659 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
660 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
661 if (ret < 0)
662 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
663 ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size),
664 "Walk_endsparse : Half max pages\n");
665
666 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0,
667 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end);
668 if (ret < 0)
669 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
670 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2),
671 "Walk_end: 1 max page\n");
672
673 free(vec);
674 wp_free(mem, mem_size);
675 munmap(mem, mem_size);
676
677 return 0;
678 }
679
base_tests(char * prefix,char * mem,unsigned long long mem_size,int skip)680 int base_tests(char *prefix, char *mem, unsigned long long mem_size, int skip)
681 {
682 unsigned long long vec_size;
683 int written;
684 struct page_region *vec, *vec2;
685
686 if (skip) {
687 ksft_test_result_skip("%s all new pages must not be written (dirty)\n", prefix);
688 ksft_test_result_skip("%s all pages must be written (dirty)\n", prefix);
689 ksft_test_result_skip("%s all pages dirty other than first and the last one\n",
690 prefix);
691 ksft_test_result_skip("%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix);
692 ksft_test_result_skip("%s only middle page dirty\n", prefix);
693 ksft_test_result_skip("%s only two middle pages dirty\n", prefix);
694 return 0;
695 }
696
697 vec_size = mem_size/page_size;
698 vec = calloc(vec_size, sizeof(struct page_region));
699 vec2 = calloc(vec_size, sizeof(struct page_region));
700
701 /* 1. all new pages must be not be written (dirty) */
702 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
703 vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
704 if (written < 0)
705 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
706
707 ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", prefix);
708
709 /* 2. all pages must be written */
710 memset(mem, -1, mem_size);
711
712 written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0,
713 PAGE_IS_WRITTEN);
714 if (written < 0)
715 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
716
717 ksft_test_result(written == 1 && LEN(vec[0]) == mem_size/page_size,
718 "%s all pages must be written (dirty)\n", prefix);
719
720 /* 3. all pages dirty other than first and the last one */
721 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
722 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
723 if (written < 0)
724 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
725
726 memset(mem + page_size, 0, mem_size - (2 * page_size));
727
728 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
729 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
730 if (written < 0)
731 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
732
733 ksft_test_result(written == 1 && LEN(vec[0]) >= vec_size - 2 && LEN(vec[0]) <= vec_size,
734 "%s all pages dirty other than first and the last one\n", prefix);
735
736 written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0,
737 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
738 if (written < 0)
739 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
740
741 ksft_test_result(written == 0,
742 "%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix);
743
744 /* 4. only middle page dirty */
745 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
746 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
747 if (written < 0)
748 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
749
750 mem[vec_size/2 * page_size]++;
751
752 written = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN,
753 0, 0, PAGE_IS_WRITTEN);
754 if (written < 0)
755 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
756
757 ksft_test_result(written == 1 && LEN(vec[0]) >= 1,
758 "%s only middle page dirty\n", prefix);
759
760 /* 5. only two middle pages dirty and walk over only middle pages */
761 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
762 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE);
763 if (written < 0)
764 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
765
766 mem[vec_size/2 * page_size]++;
767 mem[(vec_size/2 + 1) * page_size]++;
768
769 written = pagemap_ioctl(&mem[vec_size/2 * page_size], 2 * page_size, vec, 1, 0,
770 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE);
771 if (written < 0)
772 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
773
774 ksft_test_result(written == 1 && vec[0].start == (uintptr_t)(&mem[vec_size/2 * page_size])
775 && LEN(vec[0]) == 2,
776 "%s only two middle pages dirty\n", prefix);
777
778 free(vec);
779 free(vec2);
780 return 0;
781 }
782
gethugepage(int map_size)783 void *gethugepage(int map_size)
784 {
785 int ret;
786 char *map;
787
788 map = memalign(hpage_size, map_size);
789 if (!map)
790 ksft_exit_fail_msg("memalign failed %d %s\n", errno, strerror(errno));
791
792 ret = madvise(map, map_size, MADV_HUGEPAGE);
793 if (ret)
794 return NULL;
795
796 memset(map, 0, map_size);
797
798 return map;
799 }
800
hpage_unit_tests(void)801 int hpage_unit_tests(void)
802 {
803 char *map;
804 int ret, ret2;
805 size_t num_pages = 10;
806 unsigned long long map_size = hpage_size * num_pages;
807 unsigned long long vec_size = map_size/page_size;
808 struct page_region *vec, *vec2;
809
810 vec = calloc(vec_size, sizeof(struct page_region));
811 vec2 = calloc(vec_size, sizeof(struct page_region));
812 if (!vec || !vec2)
813 ksft_exit_fail_msg("malloc failed\n");
814
815 map = gethugepage(map_size);
816 if (map) {
817 wp_init(map, map_size);
818 wp_addr_range(map, map_size);
819
820 /* 1. all new huge page must not be written (dirty) */
821 ret = pagemap_ioctl(map, map_size, vec, vec_size,
822 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
823 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
824 if (ret < 0)
825 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
826
827 ksft_test_result(ret == 0, "%s all new huge page must not be written (dirty)\n",
828 __func__);
829
830 /* 2. all the huge page must not be written */
831 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
832 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
833 if (ret < 0)
834 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
835
836 ksft_test_result(ret == 0, "%s all the huge page must not be written\n", __func__);
837
838 /* 3. all the huge page must be written and clear dirty as well */
839 memset(map, -1, map_size);
840 ret = pagemap_ioctl(map, map_size, vec, vec_size,
841 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
842 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
843 if (ret < 0)
844 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
845
846 ksft_test_result(ret == 1 && vec[0].start == (uintptr_t)map &&
847 LEN(vec[0]) == vec_size && vec[0].categories == PAGE_IS_WRITTEN,
848 "%s all the huge page must be written and clear\n", __func__);
849
850 /* 4. only middle page written */
851 wp_free(map, map_size);
852 free(map);
853 map = gethugepage(map_size);
854 wp_init(map, map_size);
855 wp_addr_range(map, map_size);
856 map[vec_size/2 * page_size]++;
857
858 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
859 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
860 if (ret < 0)
861 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
862
863 ksft_test_result(ret == 1 && LEN(vec[0]) > 0,
864 "%s only middle page written\n", __func__);
865
866 wp_free(map, map_size);
867 free(map);
868 } else {
869 ksft_test_result_skip("%s all new huge page must be written\n", __func__);
870 ksft_test_result_skip("%s all the huge page must not be written\n", __func__);
871 ksft_test_result_skip("%s all the huge page must be written and clear\n", __func__);
872 ksft_test_result_skip("%s only middle page written\n", __func__);
873 }
874
875 /* 5. clear first half of huge page */
876 map = gethugepage(map_size);
877 if (map) {
878 wp_init(map, map_size);
879 wp_addr_range(map, map_size);
880
881 memset(map, 0, map_size);
882
883 wp_addr_range(map, map_size/2);
884
885 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
886 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
887 if (ret < 0)
888 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
889
890 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 &&
891 vec[0].start == (uintptr_t)(map + map_size/2),
892 "%s clear first half of huge page\n", __func__);
893 wp_free(map, map_size);
894 free(map);
895 } else {
896 ksft_test_result_skip("%s clear first half of huge page\n", __func__);
897 }
898
899 /* 6. clear first half of huge page with limited buffer */
900 map = gethugepage(map_size);
901 if (map) {
902 wp_init(map, map_size);
903 wp_addr_range(map, map_size);
904
905 memset(map, 0, map_size);
906
907 ret = pagemap_ioctl(map, map_size, vec, vec_size,
908 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
909 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
910 if (ret < 0)
911 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
912
913 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
914 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
915 if (ret < 0)
916 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
917
918 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 &&
919 vec[0].start == (uintptr_t)(map + map_size/2),
920 "%s clear first half of huge page with limited buffer\n",
921 __func__);
922 wp_free(map, map_size);
923 free(map);
924 } else {
925 ksft_test_result_skip("%s clear first half of huge page with limited buffer\n",
926 __func__);
927 }
928
929 /* 7. clear second half of huge page */
930 map = gethugepage(map_size);
931 if (map) {
932 wp_init(map, map_size);
933 wp_addr_range(map, map_size);
934
935 memset(map, -1, map_size);
936
937 ret = pagemap_ioctl(map + map_size/2, map_size/2, vec, vec_size,
938 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, vec_size/2,
939 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
940 if (ret < 0)
941 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
942
943 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
944 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
945 if (ret < 0)
946 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
947
948 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2,
949 "%s clear second half huge page\n", __func__);
950 wp_free(map, map_size);
951 free(map);
952 } else {
953 ksft_test_result_skip("%s clear second half huge page\n", __func__);
954 }
955
956 /* 8. get half huge page */
957 map = gethugepage(map_size);
958 if (map) {
959 wp_init(map, map_size);
960 wp_addr_range(map, map_size);
961
962 memset(map, -1, map_size);
963 usleep(100);
964
965 ret = pagemap_ioctl(map, map_size, vec, 1,
966 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
967 hpage_size/(2*page_size), PAGE_IS_WRITTEN, 0, 0,
968 PAGE_IS_WRITTEN);
969 if (ret < 0)
970 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
971
972 ksft_test_result(ret == 1 && LEN(vec[0]) == hpage_size/(2*page_size),
973 "%s get half huge page\n", __func__);
974
975 ret2 = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0,
976 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN);
977 if (ret2 < 0)
978 ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno));
979
980 ksft_test_result(ret2 == 1 && LEN(vec[0]) == (map_size - hpage_size/2)/page_size,
981 "%s get half huge page\n", __func__);
982
983 wp_free(map, map_size);
984 free(map);
985 } else {
986 ksft_test_result_skip("%s get half huge page\n", __func__);
987 ksft_test_result_skip("%s get half huge page\n", __func__);
988 }
989
990 free(vec);
991 free(vec2);
992 return 0;
993 }
994
unmapped_region_tests(void)995 int unmapped_region_tests(void)
996 {
997 void *start = (void *)0x10000000;
998 int written, len = 0x00040000;
999 long vec_size = len / page_size;
1000 struct page_region *vec = calloc(vec_size, sizeof(struct page_region));
1001
1002 /* 1. Get written pages */
1003 written = pagemap_ioctl(start, len, vec, vec_size, 0, 0,
1004 PAGEMAP_NON_WRITTEN_BITS, 0, 0, PAGEMAP_NON_WRITTEN_BITS);
1005 if (written < 0)
1006 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno));
1007
1008 ksft_test_result(written >= 0, "%s Get status of pages\n", __func__);
1009
1010 free(vec);
1011 return 0;
1012 }
1013
test_simple(void)1014 static void test_simple(void)
1015 {
1016 int i;
1017 char *map;
1018 struct page_region vec;
1019
1020 map = aligned_alloc(page_size, page_size);
1021 if (!map)
1022 ksft_exit_fail_msg("aligned_alloc failed\n");
1023
1024 wp_init(map, page_size);
1025 wp_addr_range(map, page_size);
1026
1027 for (i = 0 ; i < TEST_ITERATIONS; i++) {
1028 if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0,
1029 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 1) {
1030 ksft_print_msg("written bit was 1, but should be 0 (i=%d)\n", i);
1031 break;
1032 }
1033
1034 wp_addr_range(map, page_size);
1035 /* Write something to the page to get the written bit enabled on the page */
1036 map[0]++;
1037
1038 if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0,
1039 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0) {
1040 ksft_print_msg("written bit was 0, but should be 1 (i=%d)\n", i);
1041 break;
1042 }
1043
1044 wp_addr_range(map, page_size);
1045 }
1046 wp_free(map, page_size);
1047 free(map);
1048
1049 ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__);
1050 }
1051
sanity_tests(void)1052 int sanity_tests(void)
1053 {
1054 unsigned long long mem_size, vec_size;
1055 long ret, fd, i, buf_size;
1056 struct page_region *vec;
1057 char *mem, *fmem;
1058 struct stat sbuf;
1059 char *tmp_buf;
1060
1061 /* 1. wrong operation */
1062 mem_size = 10 * page_size;
1063 vec_size = mem_size / page_size;
1064
1065 vec = calloc(vec_size, sizeof(struct page_region));
1066 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1067 if (mem == MAP_FAILED || vec == MAP_FAILED)
1068 ksft_exit_fail_msg("error nomem\n");
1069
1070 wp_init(mem, mem_size);
1071 wp_addr_range(mem, mem_size);
1072
1073 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size,
1074 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC,
1075 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0,
1076 "%s WP op can be specified with !PAGE_IS_WRITTEN\n", __func__);
1077 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1078 PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0,
1079 "%s required_mask specified\n", __func__);
1080 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1081 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL) >= 0,
1082 "%s anyof_mask specified\n", __func__);
1083 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1084 0, 0, PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL) >= 0,
1085 "%s excluded_mask specified\n", __func__);
1086 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1087 PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL, 0,
1088 PAGEMAP_BITS_ALL) >= 0,
1089 "%s required_mask and anyof_mask specified\n", __func__);
1090 wp_free(mem, mem_size);
1091 munmap(mem, mem_size);
1092
1093 /* 2. Get sd and present pages with anyof_mask */
1094 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1095 if (mem == MAP_FAILED)
1096 ksft_exit_fail_msg("error nomem\n");
1097 wp_init(mem, mem_size);
1098 wp_addr_range(mem, mem_size);
1099
1100 memset(mem, 0, mem_size);
1101
1102 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1103 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL);
1104 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1105 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) ==
1106 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT),
1107 "%s Get sd and present pages with anyof_mask\n", __func__);
1108
1109 /* 3. Get sd and present pages with required_mask */
1110 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1111 PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL);
1112 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1113 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) ==
1114 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT),
1115 "%s Get all the pages with required_mask\n", __func__);
1116
1117 /* 4. Get sd and present pages with required_mask and anyof_mask */
1118 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1119 PAGE_IS_WRITTEN, PAGE_IS_PRESENT, 0, PAGEMAP_BITS_ALL);
1120 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1121 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) ==
1122 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT),
1123 "%s Get sd and present pages with required_mask and anyof_mask\n",
1124 __func__);
1125
1126 /* 5. Don't get sd pages */
1127 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1128 PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN, PAGEMAP_BITS_ALL);
1129 ksft_test_result(ret == 0, "%s Don't get sd pages\n", __func__);
1130
1131 /* 6. Don't get present pages */
1132 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0,
1133 PAGE_IS_PRESENT, 0, PAGE_IS_PRESENT, PAGEMAP_BITS_ALL);
1134 ksft_test_result(ret == 0, "%s Don't get present pages\n", __func__);
1135
1136 wp_free(mem, mem_size);
1137 munmap(mem, mem_size);
1138
1139 /* 8. Find written present pages with return mask */
1140 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1141 if (mem == MAP_FAILED)
1142 ksft_exit_fail_msg("error nomem\n");
1143 wp_init(mem, mem_size);
1144 wp_addr_range(mem, mem_size);
1145
1146 memset(mem, 0, mem_size);
1147
1148 ret = pagemap_ioctl(mem, mem_size, vec, vec_size,
1149 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0,
1150 0, PAGEMAP_BITS_ALL, 0, PAGE_IS_WRITTEN);
1151 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size &&
1152 vec[0].categories == PAGE_IS_WRITTEN,
1153 "%s Find written present pages with return mask\n", __func__);
1154 wp_free(mem, mem_size);
1155 munmap(mem, mem_size);
1156
1157 /* 9. Memory mapped file */
1158 fd = open(progname, O_RDONLY);
1159 if (fd < 0)
1160 ksft_exit_fail_msg("%s Memory mapped file\n", __func__);
1161
1162 ret = stat(progname, &sbuf);
1163 if (ret < 0)
1164 ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno));
1165
1166 fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
1167 if (fmem == MAP_FAILED)
1168 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1169
1170 tmp_buf = malloc(sbuf.st_size);
1171 memcpy(tmp_buf, fmem, sbuf.st_size);
1172
1173 ret = pagemap_ioctl(fmem, sbuf.st_size, vec, vec_size, 0, 0,
1174 0, PAGEMAP_NON_WRITTEN_BITS, 0, PAGEMAP_NON_WRITTEN_BITS);
1175
1176 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem &&
1177 LEN(vec[0]) == ceilf((float)sbuf.st_size/page_size) &&
1178 (vec[0].categories & PAGE_IS_FILE),
1179 "%s Memory mapped file\n", __func__);
1180
1181 munmap(fmem, sbuf.st_size);
1182 close(fd);
1183
1184 /* 10. Create and read/write to a memory mapped file */
1185 buf_size = page_size * 10;
1186
1187 fd = open(__FILE__".tmp2", O_RDWR | O_CREAT, 0666);
1188 if (fd < 0)
1189 ksft_exit_fail_msg("Read/write to memory: %s\n",
1190 strerror(errno));
1191
1192 for (i = 0; i < buf_size; i++)
1193 if (write(fd, "c", 1) < 0)
1194 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n");
1195
1196 fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1197 if (fmem == MAP_FAILED)
1198 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1199
1200 wp_init(fmem, buf_size);
1201 wp_addr_range(fmem, buf_size);
1202
1203 for (i = 0; i < buf_size; i++)
1204 fmem[i] = 'z';
1205
1206 msync(fmem, buf_size, MS_SYNC);
1207
1208 ret = pagemap_ioctl(fmem, buf_size, vec, vec_size, 0, 0,
1209 PAGE_IS_WRITTEN, PAGE_IS_PRESENT | PAGE_IS_SWAPPED | PAGE_IS_FILE, 0,
1210 PAGEMAP_BITS_ALL);
1211
1212 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem &&
1213 LEN(vec[0]) == (buf_size/page_size) &&
1214 (vec[0].categories & PAGE_IS_WRITTEN),
1215 "%s Read/write to memory\n", __func__);
1216
1217 wp_free(fmem, buf_size);
1218 munmap(fmem, buf_size);
1219 close(fd);
1220
1221 free(vec);
1222 return 0;
1223 }
1224
mprotect_tests(void)1225 int mprotect_tests(void)
1226 {
1227 int ret;
1228 char *mem, *mem2;
1229 struct page_region vec;
1230 int pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
1231
1232 if (pagemap_fd < 0) {
1233 fprintf(stderr, "open() failed\n");
1234 exit(1);
1235 }
1236
1237 /* 1. Map two pages */
1238 mem = mmap(0, 2 * page_size, PROT_READ|PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1239 if (mem == MAP_FAILED)
1240 ksft_exit_fail_msg("error nomem\n");
1241 wp_init(mem, 2 * page_size);
1242 wp_addr_range(mem, 2 * page_size);
1243
1244 /* Populate both pages. */
1245 memset(mem, 1, 2 * page_size);
1246
1247 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN,
1248 0, 0, PAGE_IS_WRITTEN);
1249 if (ret < 0)
1250 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1251
1252 ksft_test_result(ret == 1 && LEN(vec) == 2, "%s Both pages written\n", __func__);
1253
1254 /* 2. Start tracking */
1255 wp_addr_range(mem, 2 * page_size);
1256
1257 ksft_test_result(pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0,
1258 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0,
1259 "%s Both pages are not written (dirty)\n", __func__);
1260
1261 /* 3. Remap the second page */
1262 mem2 = mmap(mem + page_size, page_size, PROT_READ|PROT_WRITE,
1263 MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
1264 if (mem2 == MAP_FAILED)
1265 ksft_exit_fail_msg("error nomem\n");
1266 wp_init(mem2, page_size);
1267 wp_addr_range(mem2, page_size);
1268
1269 /* Protect + unprotect. */
1270 mprotect(mem, page_size, PROT_NONE);
1271 mprotect(mem, 2 * page_size, PROT_READ);
1272 mprotect(mem, 2 * page_size, PROT_READ|PROT_WRITE);
1273
1274 /* Modify both pages. */
1275 memset(mem, 2, 2 * page_size);
1276
1277 /* Protect + unprotect. */
1278 mprotect(mem, page_size, PROT_NONE);
1279 mprotect(mem, page_size, PROT_READ);
1280 mprotect(mem, page_size, PROT_READ|PROT_WRITE);
1281
1282 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN,
1283 0, 0, PAGE_IS_WRITTEN);
1284 if (ret < 0)
1285 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1286
1287 ksft_test_result(ret == 1 && LEN(vec) == 2,
1288 "%s Both pages written after remap and mprotect\n", __func__);
1289
1290 /* 4. Clear and make the pages written */
1291 wp_addr_range(mem, 2 * page_size);
1292
1293 memset(mem, 'A', 2 * page_size);
1294
1295 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN,
1296 0, 0, PAGE_IS_WRITTEN);
1297 if (ret < 0)
1298 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1299
1300 ksft_test_result(ret == 1 && LEN(vec) == 2,
1301 "%s Clear and make the pages written\n", __func__);
1302
1303 wp_free(mem, 2 * page_size);
1304 munmap(mem, 2 * page_size);
1305 return 0;
1306 }
1307
1308 /* transact test */
1309 static const unsigned int nthreads = 6, pages_per_thread = 32, access_per_thread = 8;
1310 static pthread_barrier_t start_barrier, end_barrier;
1311 static unsigned int extra_thread_faults;
1312 static unsigned int iter_count = 1000;
1313 static volatile int finish;
1314
get_dirty_pages_reset(char * mem,unsigned int count,int reset,int page_size)1315 static ssize_t get_dirty_pages_reset(char *mem, unsigned int count,
1316 int reset, int page_size)
1317 {
1318 struct pm_scan_arg arg = {0};
1319 struct page_region rgns[256];
1320 unsigned long long i, j;
1321 long ret;
1322 int cnt;
1323
1324 arg.size = sizeof(struct pm_scan_arg);
1325 arg.start = (uintptr_t)mem;
1326 arg.max_pages = count;
1327 arg.end = (uintptr_t)(mem + count * page_size);
1328 arg.vec = (uintptr_t)rgns;
1329 arg.vec_len = sizeof(rgns) / sizeof(*rgns);
1330 if (reset)
1331 arg.flags |= PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC;
1332 arg.category_mask = PAGE_IS_WRITTEN;
1333 arg.return_mask = PAGE_IS_WRITTEN;
1334
1335 ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg);
1336 if (ret < 0)
1337 ksft_exit_fail_msg("ioctl failed\n");
1338
1339 cnt = 0;
1340 for (i = 0; i < (unsigned long)ret; ++i) {
1341 if (rgns[i].categories != PAGE_IS_WRITTEN)
1342 ksft_exit_fail_msg("wrong flags\n");
1343
1344 for (j = 0; j < LEN(rgns[i]); ++j)
1345 cnt++;
1346 }
1347
1348 return cnt;
1349 }
1350
thread_proc(void * mem)1351 void *thread_proc(void *mem)
1352 {
1353 int *m = mem;
1354 long curr_faults, faults;
1355 struct rusage r;
1356 unsigned int i;
1357 int ret;
1358
1359 if (getrusage(RUSAGE_THREAD, &r))
1360 ksft_exit_fail_msg("getrusage\n");
1361
1362 curr_faults = r.ru_minflt;
1363
1364 while (!finish) {
1365 ret = pthread_barrier_wait(&start_barrier);
1366 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1367 ksft_exit_fail_msg("pthread_barrier_wait\n");
1368
1369 for (i = 0; i < access_per_thread; ++i)
1370 __atomic_add_fetch(m + i * (0x1000 / sizeof(*m)), 1, __ATOMIC_SEQ_CST);
1371
1372 ret = pthread_barrier_wait(&end_barrier);
1373 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1374 ksft_exit_fail_msg("pthread_barrier_wait\n");
1375
1376 if (getrusage(RUSAGE_THREAD, &r))
1377 ksft_exit_fail_msg("getrusage\n");
1378
1379 faults = r.ru_minflt - curr_faults;
1380 if (faults < access_per_thread)
1381 ksft_exit_fail_msg("faults < access_per_thread");
1382
1383 __atomic_add_fetch(&extra_thread_faults, faults - access_per_thread,
1384 __ATOMIC_SEQ_CST);
1385 curr_faults = r.ru_minflt;
1386 }
1387
1388 return NULL;
1389 }
1390
transact_test(int page_size)1391 static void transact_test(int page_size)
1392 {
1393 unsigned int i, count, extra_pages;
1394 unsigned int c;
1395 pthread_t th;
1396 char *mem;
1397 int ret;
1398
1399 if (pthread_barrier_init(&start_barrier, NULL, nthreads + 1))
1400 ksft_exit_fail_msg("pthread_barrier_init\n");
1401
1402 if (pthread_barrier_init(&end_barrier, NULL, nthreads + 1))
1403 ksft_exit_fail_msg("pthread_barrier_init\n");
1404
1405 mem = mmap(NULL, 0x1000 * nthreads * pages_per_thread, PROT_READ | PROT_WRITE,
1406 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
1407 if (mem == MAP_FAILED)
1408 ksft_exit_fail_msg("Error mmap %s.\n", strerror(errno));
1409
1410 wp_init(mem, 0x1000 * nthreads * pages_per_thread);
1411 wp_addr_range(mem, 0x1000 * nthreads * pages_per_thread);
1412
1413 memset(mem, 0, 0x1000 * nthreads * pages_per_thread);
1414
1415 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size);
1416 ksft_test_result(count > 0, "%s count %u\n", __func__, count);
1417 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size);
1418 ksft_test_result(count == 0, "%s count %u\n", __func__, count);
1419
1420 finish = 0;
1421 for (i = 0; i < nthreads; ++i)
1422 pthread_create(&th, NULL, thread_proc, mem + 0x1000 * i * pages_per_thread);
1423
1424 extra_pages = 0;
1425 for (i = 0; i < iter_count; ++i) {
1426 count = 0;
1427
1428 ret = pthread_barrier_wait(&start_barrier);
1429 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1430 ksft_exit_fail_msg("pthread_barrier_wait\n");
1431
1432 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1,
1433 page_size);
1434
1435 ret = pthread_barrier_wait(&end_barrier);
1436 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD)
1437 ksft_exit_fail_msg("pthread_barrier_wait\n");
1438
1439 if (count > nthreads * access_per_thread)
1440 ksft_exit_fail_msg("Too big count %u expected %u, iter %u\n",
1441 count, nthreads * access_per_thread, i);
1442
1443 c = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size);
1444 count += c;
1445
1446 if (c > nthreads * access_per_thread) {
1447 ksft_test_result_fail(" %s count > nthreads\n", __func__);
1448 return;
1449 }
1450
1451 if (count != nthreads * access_per_thread) {
1452 /*
1453 * The purpose of the test is to make sure that no page updates are lost
1454 * when the page updates and read-resetting soft dirty flags are performed
1455 * in parallel. However, it is possible that the application will get the
1456 * soft dirty flags twice on the two consecutive read-resets. This seems
1457 * unavoidable as soft dirty flag is handled in software through page faults
1458 * in kernel. While the updating the flags is supposed to be synchronized
1459 * between page fault handling and read-reset, it is possible that
1460 * read-reset happens after page fault PTE update but before the application
1461 * re-executes write instruction. So read-reset gets the flag, clears write
1462 * access and application gets page fault again for the same write.
1463 */
1464 if (count < nthreads * access_per_thread) {
1465 ksft_test_result_fail("Lost update, iter %u, %u vs %u.\n", i, count,
1466 nthreads * access_per_thread);
1467 return;
1468 }
1469
1470 extra_pages += count - nthreads * access_per_thread;
1471 }
1472 }
1473
1474 pthread_barrier_wait(&start_barrier);
1475 finish = 1;
1476 pthread_barrier_wait(&end_barrier);
1477
1478 ksft_test_result_pass("%s Extra pages %u (%.1lf%%), extra thread faults %u.\n", __func__,
1479 extra_pages,
1480 100.0 * extra_pages / (iter_count * nthreads * access_per_thread),
1481 extra_thread_faults);
1482 }
1483
zeropfn_tests(void)1484 void zeropfn_tests(void)
1485 {
1486 unsigned long long mem_size;
1487 struct page_region vec;
1488 int i, ret;
1489 char *mmap_mem, *mem;
1490
1491 /* Test with normal memory */
1492 mem_size = 10 * page_size;
1493 mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
1494 if (mem == MAP_FAILED)
1495 ksft_exit_fail_msg("error nomem\n");
1496
1497 /* Touch each page to ensure it's mapped */
1498 for (i = 0; i < mem_size; i += page_size)
1499 (void)((volatile char *)mem)[i];
1500
1501 ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
1502 (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
1503 if (ret < 0)
1504 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1505
1506 ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
1507 "%s all pages must have PFNZERO set\n", __func__);
1508
1509 munmap(mem, mem_size);
1510
1511 /* Test with huge page if user_zero_page is set to 1 */
1512 if (!detect_huge_zeropage()) {
1513 ksft_test_result_skip("%s use_zero_page not supported or set to 1\n", __func__);
1514 return;
1515 }
1516
1517 mem_size = 2 * hpage_size;
1518 mmap_mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
1519 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1520 if (mmap_mem == MAP_FAILED)
1521 ksft_exit_fail_msg("error nomem\n");
1522
1523 /* We need a THP-aligned memory area. */
1524 mem = (char *)(((uintptr_t)mmap_mem + hpage_size) & ~(hpage_size - 1));
1525
1526 ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
1527 if (!ret) {
1528 FORCE_READ(*mem);
1529
1530 ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
1531 0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
1532 if (ret < 0)
1533 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1534
1535 ksft_test_result(ret == 1 && LEN(vec) == (hpage_size / page_size),
1536 "%s all huge pages must have PFNZERO set\n", __func__);
1537 } else {
1538 ksft_test_result_skip("%s huge page not supported\n", __func__);
1539 }
1540
1541 munmap(mmap_mem, mem_size);
1542 }
1543
main(int argc,char * argv[])1544 int main(int __attribute__((unused)) argc, char *argv[])
1545 {
1546 int shmid, buf_size, fd, i, ret;
1547 unsigned long long mem_size;
1548 char *mem, *map, *fmem;
1549 struct stat sbuf;
1550
1551 progname = argv[0];
1552
1553 ksft_print_header();
1554
1555 if (init_uffd())
1556 ksft_exit_pass();
1557
1558 ksft_set_plan(117);
1559
1560 page_size = getpagesize();
1561 hpage_size = read_pmd_pagesize();
1562
1563 pagemap_fd = open(PAGEMAP, O_RDONLY);
1564 if (pagemap_fd < 0)
1565 return -EINVAL;
1566
1567 /* 1. Sanity testing */
1568 sanity_tests_sd();
1569
1570 /* 2. Normal page testing */
1571 mem_size = 10 * page_size;
1572 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1573 if (mem == MAP_FAILED)
1574 ksft_exit_fail_msg("error nomem\n");
1575 wp_init(mem, mem_size);
1576 wp_addr_range(mem, mem_size);
1577
1578 base_tests("Page testing:", mem, mem_size, 0);
1579
1580 wp_free(mem, mem_size);
1581 munmap(mem, mem_size);
1582
1583 /* 3. Large page testing */
1584 mem_size = 512 * 10 * page_size;
1585 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1586 if (mem == MAP_FAILED)
1587 ksft_exit_fail_msg("error nomem\n");
1588 wp_init(mem, mem_size);
1589 wp_addr_range(mem, mem_size);
1590
1591 base_tests("Large Page testing:", mem, mem_size, 0);
1592
1593 wp_free(mem, mem_size);
1594 munmap(mem, mem_size);
1595
1596 /* 4. Huge page testing */
1597 map = gethugepage(hpage_size);
1598 if (map) {
1599 wp_init(map, hpage_size);
1600 wp_addr_range(map, hpage_size);
1601 base_tests("Huge page testing:", map, hpage_size, 0);
1602 wp_free(map, hpage_size);
1603 free(map);
1604 } else {
1605 base_tests("Huge page testing:", NULL, 0, 1);
1606 }
1607
1608 /* 5. SHM Hugetlb page testing */
1609 mem_size = 2*1024*1024;
1610 mem = gethugetlb_mem(mem_size, &shmid);
1611 if (mem) {
1612 wp_init(mem, mem_size);
1613 wp_addr_range(mem, mem_size);
1614
1615 base_tests("Hugetlb shmem testing:", mem, mem_size, 0);
1616
1617 wp_free(mem, mem_size);
1618 shmctl(shmid, IPC_RMID, NULL);
1619 } else {
1620 base_tests("Hugetlb shmem testing:", NULL, 0, 1);
1621 }
1622
1623 /* 6. Hugetlb page testing */
1624 mem = gethugetlb_mem(mem_size, NULL);
1625 if (mem) {
1626 wp_init(mem, mem_size);
1627 wp_addr_range(mem, mem_size);
1628
1629 base_tests("Hugetlb mem testing:", mem, mem_size, 0);
1630
1631 wp_free(mem, mem_size);
1632 } else {
1633 base_tests("Hugetlb mem testing:", NULL, 0, 1);
1634 }
1635
1636 /* 7. File Hugetlb testing */
1637 mem_size = 2*1024*1024;
1638 fd = memfd_create("uffd-test", MFD_HUGETLB | MFD_NOEXEC_SEAL);
1639 if (fd < 0)
1640 ksft_exit_fail_msg("uffd-test creation failed %d %s\n", errno, strerror(errno));
1641 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1642 if (mem != MAP_FAILED) {
1643 wp_init(mem, mem_size);
1644 wp_addr_range(mem, mem_size);
1645
1646 base_tests("Hugetlb shmem testing:", mem, mem_size, 0);
1647
1648 wp_free(mem, mem_size);
1649 shmctl(shmid, IPC_RMID, NULL);
1650 } else {
1651 base_tests("Hugetlb shmem testing:", NULL, 0, 1);
1652 }
1653 close(fd);
1654
1655 /* 8. File memory testing */
1656 buf_size = page_size * 10;
1657
1658 fd = open(__FILE__".tmp0", O_RDWR | O_CREAT, 0777);
1659 if (fd < 0)
1660 ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n",
1661 strerror(errno));
1662
1663 for (i = 0; i < buf_size; i++)
1664 if (write(fd, "c", 1) < 0)
1665 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n");
1666
1667 ret = stat(__FILE__".tmp0", &sbuf);
1668 if (ret < 0)
1669 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
1670
1671 fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1672 if (fmem == MAP_FAILED)
1673 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1674
1675 wp_init(fmem, sbuf.st_size);
1676 wp_addr_range(fmem, sbuf.st_size);
1677
1678 base_tests("File memory testing:", fmem, sbuf.st_size, 0);
1679
1680 wp_free(fmem, sbuf.st_size);
1681 munmap(fmem, sbuf.st_size);
1682 close(fd);
1683
1684 /* 9. File memory testing */
1685 buf_size = page_size * 10;
1686
1687 fd = memfd_create(__FILE__".tmp00", MFD_NOEXEC_SEAL);
1688 if (fd < 0)
1689 ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n",
1690 strerror(errno));
1691
1692 if (ftruncate(fd, buf_size))
1693 ksft_exit_fail_msg("Error ftruncate\n");
1694
1695 for (i = 0; i < buf_size; i++)
1696 if (write(fd, "c", 1) < 0)
1697 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n");
1698
1699 fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1700 if (fmem == MAP_FAILED)
1701 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno));
1702
1703 wp_init(fmem, buf_size);
1704 wp_addr_range(fmem, buf_size);
1705
1706 base_tests("File anonymous memory testing:", fmem, buf_size, 0);
1707
1708 wp_free(fmem, buf_size);
1709 munmap(fmem, buf_size);
1710 close(fd);
1711
1712 /* 10. Huge page tests */
1713 hpage_unit_tests();
1714
1715 /* 11. Iterative test */
1716 test_simple();
1717
1718 /* 12. Mprotect test */
1719 mprotect_tests();
1720
1721 /* 13. Transact test */
1722 transact_test(page_size);
1723
1724 /* 14. Sanity testing */
1725 sanity_tests();
1726
1727 /*15. Unmapped address test */
1728 unmapped_region_tests();
1729
1730 /* 16. Userfaultfd tests */
1731 userfaultfd_tests();
1732
1733 /* 17. ZEROPFN tests */
1734 zeropfn_tests();
1735
1736 close(pagemap_fd);
1737 ksft_exit_pass();
1738 }
1739