1 // SPDX-License-Identifier: GPL-2.0 2 #define _GNU_SOURCE 3 #include <stdio.h> 4 #include <fcntl.h> 5 #include <string.h> 6 #include <sys/mman.h> 7 #include <errno.h> 8 #include <malloc.h> 9 #include "vm_util.h" 10 #include "../kselftest.h" 11 #include <linux/types.h> 12 #include <linux/memfd.h> 13 #include <linux/userfaultfd.h> 14 #include <linux/fs.h> 15 #include <sys/ioctl.h> 16 #include <sys/stat.h> 17 #include <math.h> 18 #include <asm/unistd.h> 19 #include <pthread.h> 20 #include <sys/resource.h> 21 #include <assert.h> 22 #include <sys/ipc.h> 23 #include <sys/shm.h> 24 25 #define PAGEMAP_BITS_ALL (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \ 26 PAGE_IS_FILE | PAGE_IS_PRESENT | \ 27 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \ 28 PAGE_IS_HUGE) 29 #define PAGEMAP_NON_WRITTEN_BITS (PAGE_IS_WPALLOWED | PAGE_IS_FILE | \ 30 PAGE_IS_PRESENT | PAGE_IS_SWAPPED | \ 31 PAGE_IS_PFNZERO | PAGE_IS_HUGE) 32 33 #define TEST_ITERATIONS 100 34 #define PAGEMAP "/proc/self/pagemap" 35 int pagemap_fd; 36 int uffd; 37 int page_size; 38 int hpage_size; 39 const char *progname; 40 41 #define LEN(region) ((region.end - region.start)/page_size) 42 43 static long pagemap_ioctl(void *start, int len, void *vec, int vec_len, int flag, 44 int max_pages, long required_mask, long anyof_mask, long excluded_mask, 45 long return_mask) 46 { 47 struct pm_scan_arg arg; 48 49 arg.start = (uintptr_t)start; 50 arg.end = (uintptr_t)(start + len); 51 arg.vec = (uintptr_t)vec; 52 arg.vec_len = vec_len; 53 arg.flags = flag; 54 arg.size = sizeof(struct pm_scan_arg); 55 arg.max_pages = max_pages; 56 arg.category_mask = required_mask; 57 arg.category_anyof_mask = anyof_mask; 58 arg.category_inverted = excluded_mask; 59 arg.return_mask = return_mask; 60 61 return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); 62 } 63 64 static long pagemap_ioc(void *start, int len, void *vec, int vec_len, int flag, 65 int max_pages, long required_mask, long anyof_mask, long excluded_mask, 66 long return_mask, long *walk_end) 67 { 68 struct pm_scan_arg arg; 69 int ret; 70 71 arg.start = (uintptr_t)start; 72 arg.end = (uintptr_t)(start + len); 73 arg.vec = (uintptr_t)vec; 74 arg.vec_len = vec_len; 75 arg.flags = flag; 76 arg.size = sizeof(struct pm_scan_arg); 77 arg.max_pages = max_pages; 78 arg.category_mask = required_mask; 79 arg.category_anyof_mask = anyof_mask; 80 arg.category_inverted = excluded_mask; 81 arg.return_mask = return_mask; 82 83 ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); 84 85 if (walk_end) 86 *walk_end = arg.walk_end; 87 88 return ret; 89 } 90 91 92 int init_uffd(void) 93 { 94 struct uffdio_api uffdio_api; 95 96 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY); 97 if (uffd == -1) 98 return uffd; 99 100 uffdio_api.api = UFFD_API; 101 uffdio_api.features = UFFD_FEATURE_WP_UNPOPULATED | UFFD_FEATURE_WP_ASYNC | 102 UFFD_FEATURE_WP_HUGETLBFS_SHMEM; 103 if (ioctl(uffd, UFFDIO_API, &uffdio_api)) 104 return -1; 105 106 if (!(uffdio_api.api & UFFDIO_REGISTER_MODE_WP) || 107 !(uffdio_api.features & UFFD_FEATURE_WP_UNPOPULATED) || 108 !(uffdio_api.features & UFFD_FEATURE_WP_ASYNC) || 109 !(uffdio_api.features & UFFD_FEATURE_WP_HUGETLBFS_SHMEM)) 110 return -1; 111 112 return 0; 113 } 114 115 int wp_init(void *lpBaseAddress, int dwRegionSize) 116 { 117 struct uffdio_register uffdio_register; 118 struct uffdio_writeprotect wp; 119 120 uffdio_register.range.start = (unsigned long)lpBaseAddress; 121 uffdio_register.range.len = dwRegionSize; 122 uffdio_register.mode = UFFDIO_REGISTER_MODE_WP; 123 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) 124 ksft_exit_fail_msg("ioctl(UFFDIO_REGISTER) %d %s\n", errno, strerror(errno)); 125 126 if (!(uffdio_register.ioctls & UFFDIO_WRITEPROTECT)) 127 ksft_exit_fail_msg("ioctl set is incorrect\n"); 128 129 wp.range.start = (unsigned long)lpBaseAddress; 130 wp.range.len = dwRegionSize; 131 wp.mode = UFFDIO_WRITEPROTECT_MODE_WP; 132 133 if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp)) 134 ksft_exit_fail_msg("ioctl(UFFDIO_WRITEPROTECT)\n"); 135 136 return 0; 137 } 138 139 int wp_free(void *lpBaseAddress, int dwRegionSize) 140 { 141 struct uffdio_register uffdio_register; 142 143 uffdio_register.range.start = (unsigned long)lpBaseAddress; 144 uffdio_register.range.len = dwRegionSize; 145 uffdio_register.mode = UFFDIO_REGISTER_MODE_WP; 146 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) 147 ksft_exit_fail_msg("ioctl unregister failure\n"); 148 return 0; 149 } 150 151 int wp_addr_range(void *lpBaseAddress, int dwRegionSize) 152 { 153 if (pagemap_ioctl(lpBaseAddress, dwRegionSize, NULL, 0, 154 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 155 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0) 156 ksft_exit_fail_msg("error %d %d %s\n", 1, errno, strerror(errno)); 157 158 return 0; 159 } 160 161 void *gethugetlb_mem(int size, int *shmid) 162 { 163 char *mem; 164 165 if (shmid) { 166 *shmid = shmget(2, size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W); 167 if (*shmid < 0) 168 return NULL; 169 170 mem = shmat(*shmid, 0, 0); 171 if (mem == (char *)-1) { 172 shmctl(*shmid, IPC_RMID, NULL); 173 ksft_exit_fail_msg("Shared memory attach failure\n"); 174 } 175 } else { 176 mem = mmap(NULL, size, PROT_READ | PROT_WRITE, 177 MAP_ANONYMOUS | MAP_HUGETLB | MAP_PRIVATE, -1, 0); 178 if (mem == MAP_FAILED) 179 return NULL; 180 } 181 182 return mem; 183 } 184 185 int userfaultfd_tests(void) 186 { 187 int mem_size, vec_size, written, num_pages = 16; 188 char *mem, *vec; 189 190 mem_size = num_pages * page_size; 191 mem = mmap(NULL, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); 192 if (mem == MAP_FAILED) 193 ksft_exit_fail_msg("error nomem\n"); 194 195 wp_init(mem, mem_size); 196 197 /* Change protection of pages differently */ 198 mprotect(mem, mem_size/8, PROT_READ|PROT_WRITE); 199 mprotect(mem + 1 * mem_size/8, mem_size/8, PROT_READ); 200 mprotect(mem + 2 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE); 201 mprotect(mem + 3 * mem_size/8, mem_size/8, PROT_READ); 202 mprotect(mem + 4 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE); 203 mprotect(mem + 5 * mem_size/8, mem_size/8, PROT_NONE); 204 mprotect(mem + 6 * mem_size/8, mem_size/8, PROT_READ|PROT_WRITE); 205 mprotect(mem + 7 * mem_size/8, mem_size/8, PROT_READ); 206 207 wp_addr_range(mem + (mem_size/16), mem_size - 2 * (mem_size/8)); 208 wp_addr_range(mem, mem_size); 209 210 vec_size = mem_size/page_size; 211 vec = malloc(sizeof(struct page_region) * vec_size); 212 213 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 214 vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 215 if (written < 0) 216 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 217 218 ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", __func__); 219 220 wp_free(mem, mem_size); 221 munmap(mem, mem_size); 222 free(vec); 223 return 0; 224 } 225 226 int get_reads(struct page_region *vec, int vec_size) 227 { 228 int i, sum = 0; 229 230 for (i = 0; i < vec_size; i++) 231 sum += LEN(vec[i]); 232 233 return sum; 234 } 235 236 int sanity_tests_sd(void) 237 { 238 int mem_size, vec_size, ret, ret2, ret3, i, num_pages = 1000, total_pages = 0; 239 int total_writes, total_reads, reads, count; 240 struct page_region *vec, *vec2; 241 char *mem, *m[2]; 242 long walk_end; 243 244 vec_size = num_pages/2; 245 mem_size = num_pages * page_size; 246 247 vec = malloc(sizeof(struct page_region) * vec_size); 248 if (!vec) 249 ksft_exit_fail_msg("error nomem\n"); 250 251 vec2 = malloc(sizeof(struct page_region) * vec_size); 252 if (!vec2) 253 ksft_exit_fail_msg("error nomem\n"); 254 255 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 256 if (mem == MAP_FAILED) 257 ksft_exit_fail_msg("error nomem\n"); 258 259 wp_init(mem, mem_size); 260 wp_addr_range(mem, mem_size); 261 262 /* 1. wrong operation */ 263 ksft_test_result(pagemap_ioctl(mem, 0, vec, vec_size, 0, 264 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0, 265 "%s Zero range size is valid\n", __func__); 266 267 ksft_test_result(pagemap_ioctl(mem, mem_size, NULL, vec_size, 0, 268 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) < 0, 269 "%s output buffer must be specified with size\n", __func__); 270 271 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, 0, 0, 272 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0, 273 "%s output buffer can be 0\n", __func__); 274 275 ksft_test_result(pagemap_ioctl(mem, mem_size, 0, 0, 0, 276 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) == 0, 277 "%s output buffer can be 0\n", __func__); 278 279 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, -1, 280 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0, 281 "%s wrong flag specified\n", __func__); 282 283 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 284 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC | 0xFF, 285 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) < 0, 286 "%s flag has extra bits specified\n", __func__); 287 288 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 289 0, 0, 0, 0, PAGE_IS_WRITTEN) >= 0, 290 "%s no selection mask is specified\n", __func__); 291 292 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 293 0, PAGE_IS_WRITTEN, PAGE_IS_WRITTEN, 0, 0) == 0, 294 "%s no return mask is specified\n", __func__); 295 296 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 297 0, PAGE_IS_WRITTEN, 0, 0, 0x1000) < 0, 298 "%s wrong return mask specified\n", __func__); 299 300 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 301 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 302 0, 0xFFF, PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN) < 0, 303 "%s mixture of correct and wrong flag\n", __func__); 304 305 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 306 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 307 0, 0, 0, PAGEMAP_BITS_ALL, PAGE_IS_WRITTEN) >= 0, 308 "%s PAGEMAP_BITS_ALL can be specified with PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", 309 __func__); 310 311 /* 2. Clear area with larger vec size */ 312 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 313 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, 314 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 315 ksft_test_result(ret >= 0, "%s Clear area with larger vec size\n", __func__); 316 317 /* 3. Repeated pattern of written and non-written pages */ 318 for (i = 0; i < mem_size; i += 2 * page_size) 319 mem[i]++; 320 321 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, 322 0, PAGE_IS_WRITTEN); 323 if (ret < 0) 324 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 325 326 ksft_test_result(ret == mem_size/(page_size * 2), 327 "%s Repeated pattern of written and non-written pages\n", __func__); 328 329 /* 4. Repeated pattern of written and non-written pages in parts */ 330 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 331 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 332 num_pages/2 - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 333 if (ret < 0) 334 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 335 336 ret2 = pagemap_ioctl(mem, mem_size, vec, 2, 0, 0, PAGE_IS_WRITTEN, 0, 0, 337 PAGE_IS_WRITTEN); 338 if (ret2 < 0) 339 ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); 340 341 ret3 = pagemap_ioctl(mem, mem_size, vec, vec_size, 342 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 343 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 344 if (ret3 < 0) 345 ksft_exit_fail_msg("error %d %d %s\n", ret3, errno, strerror(errno)); 346 347 ksft_test_result((ret + ret3) == num_pages/2 && ret2 == 2, 348 "%s Repeated pattern of written and non-written pages in parts %d %d %d\n", 349 __func__, ret, ret3, ret2); 350 351 /* 5. Repeated pattern of written and non-written pages max_pages */ 352 for (i = 0; i < mem_size; i += 2 * page_size) 353 mem[i]++; 354 mem[(mem_size/page_size - 1) * page_size]++; 355 356 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 357 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 358 num_pages/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 359 if (ret < 0) 360 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 361 362 ret2 = pagemap_ioctl(mem, mem_size, vec, vec_size, 363 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 364 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 365 if (ret2 < 0) 366 ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); 367 368 ksft_test_result(ret == num_pages/2 && ret2 == 1, 369 "%s Repeated pattern of written and non-written pages max_pages\n", 370 __func__); 371 372 /* 6. only get 2 dirty pages and clear them as well */ 373 vec_size = mem_size/page_size; 374 memset(mem, -1, mem_size); 375 376 /* get and clear second and third pages */ 377 ret = pagemap_ioctl(mem + page_size, 2 * page_size, vec, 1, 378 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 379 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 380 if (ret < 0) 381 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 382 383 ret2 = pagemap_ioctl(mem, mem_size, vec2, vec_size, 0, 0, 384 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 385 if (ret2 < 0) 386 ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); 387 388 ksft_test_result(ret == 1 && LEN(vec[0]) == 2 && 389 vec[0].start == (uintptr_t)(mem + page_size) && 390 ret2 == 2 && LEN(vec2[0]) == 1 && vec2[0].start == (uintptr_t)mem && 391 LEN(vec2[1]) == vec_size - 3 && 392 vec2[1].start == (uintptr_t)(mem + 3 * page_size), 393 "%s only get 2 written pages and clear them as well\n", __func__); 394 395 wp_free(mem, mem_size); 396 munmap(mem, mem_size); 397 398 /* 7. Two regions */ 399 m[0] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 400 if (m[0] == MAP_FAILED) 401 ksft_exit_fail_msg("error nomem\n"); 402 m[1] = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 403 if (m[1] == MAP_FAILED) 404 ksft_exit_fail_msg("error nomem\n"); 405 406 wp_init(m[0], mem_size); 407 wp_init(m[1], mem_size); 408 wp_addr_range(m[0], mem_size); 409 wp_addr_range(m[1], mem_size); 410 411 memset(m[0], 'a', mem_size); 412 memset(m[1], 'b', mem_size); 413 414 wp_addr_range(m[0], mem_size); 415 416 ret = pagemap_ioctl(m[1], mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, 417 PAGE_IS_WRITTEN); 418 if (ret < 0) 419 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 420 421 ksft_test_result(ret == 1 && LEN(vec[0]) == mem_size/page_size, 422 "%s Two regions\n", __func__); 423 424 wp_free(m[0], mem_size); 425 wp_free(m[1], mem_size); 426 munmap(m[0], mem_size); 427 munmap(m[1], mem_size); 428 429 free(vec); 430 free(vec2); 431 432 /* 8. Smaller vec */ 433 mem_size = 1050 * page_size; 434 vec_size = mem_size/(page_size*2); 435 436 vec = malloc(sizeof(struct page_region) * vec_size); 437 if (!vec) 438 ksft_exit_fail_msg("error nomem\n"); 439 440 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 441 if (mem == MAP_FAILED) 442 ksft_exit_fail_msg("error nomem\n"); 443 444 wp_init(mem, mem_size); 445 wp_addr_range(mem, mem_size); 446 447 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 448 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, 449 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 450 if (ret < 0) 451 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 452 453 for (i = 0; i < mem_size/page_size; i += 2) 454 mem[i * page_size]++; 455 456 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 457 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 458 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 459 if (ret < 0) 460 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 461 462 total_pages += ret; 463 464 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 465 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 466 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 467 if (ret < 0) 468 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 469 470 total_pages += ret; 471 472 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 473 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 474 mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 475 if (ret < 0) 476 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 477 478 total_pages += ret; 479 480 ksft_test_result(total_pages == mem_size/(page_size*2), "%s Smaller max_pages\n", __func__); 481 482 free(vec); 483 wp_free(mem, mem_size); 484 munmap(mem, mem_size); 485 total_pages = 0; 486 487 /* 9. Smaller vec */ 488 mem_size = 10000 * page_size; 489 vec_size = 50; 490 491 vec = malloc(sizeof(struct page_region) * vec_size); 492 if (!vec) 493 ksft_exit_fail_msg("error nomem\n"); 494 495 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 496 if (mem == MAP_FAILED) 497 ksft_exit_fail_msg("error nomem\n"); 498 499 wp_init(mem, mem_size); 500 wp_addr_range(mem, mem_size); 501 502 for (count = 0; count < TEST_ITERATIONS; count++) { 503 total_writes = total_reads = 0; 504 walk_end = (long)mem; 505 506 for (i = 0; i < mem_size; i += page_size) { 507 if (rand() % 2) { 508 mem[i]++; 509 total_writes++; 510 } 511 } 512 513 while (total_reads < total_writes) { 514 ret = pagemap_ioc((void *)walk_end, mem_size-(walk_end - (long)mem), vec, 515 vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 516 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 517 if (ret < 0) 518 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 519 520 if (ret > vec_size) 521 break; 522 523 reads = get_reads(vec, ret); 524 total_reads += reads; 525 } 526 527 if (total_reads != total_writes) 528 break; 529 } 530 531 ksft_test_result(count == TEST_ITERATIONS, "Smaller vec\n"); 532 533 free(vec); 534 wp_free(mem, mem_size); 535 munmap(mem, mem_size); 536 537 /* 10. Walk_end tester */ 538 vec_size = 1000; 539 mem_size = vec_size * page_size; 540 541 vec = malloc(sizeof(struct page_region) * vec_size); 542 if (!vec) 543 ksft_exit_fail_msg("error nomem\n"); 544 545 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 546 if (mem == MAP_FAILED) 547 ksft_exit_fail_msg("error nomem\n"); 548 549 wp_init(mem, mem_size); 550 wp_addr_range(mem, mem_size); 551 552 memset(mem, 0, mem_size); 553 554 ret = pagemap_ioc(mem, 0, vec, vec_size, 0, 555 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 556 if (ret < 0) 557 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 558 ksft_test_result(ret == 0 && walk_end == (long)mem, 559 "Walk_end: Same start and end address\n"); 560 561 ret = pagemap_ioc(mem, 0, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 562 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 563 if (ret < 0) 564 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 565 ksft_test_result(ret == 0 && walk_end == (long)mem, 566 "Walk_end: Same start and end with WP\n"); 567 568 ret = pagemap_ioc(mem, 0, vec, 0, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 569 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 570 if (ret < 0) 571 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 572 ksft_test_result(ret == 0 && walk_end == (long)mem, 573 "Walk_end: Same start and end with 0 output buffer\n"); 574 575 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 576 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 577 if (ret < 0) 578 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 579 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), 580 "Walk_end: Big vec\n"); 581 582 ret = pagemap_ioc(mem, mem_size, vec, 1, 0, 583 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 584 if (ret < 0) 585 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 586 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), 587 "Walk_end: vec of minimum length\n"); 588 589 ret = pagemap_ioc(mem, mem_size, vec, 1, 0, 590 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 591 if (ret < 0) 592 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 593 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), 594 "Walk_end: Max pages specified\n"); 595 596 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 597 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 598 if (ret < 0) 599 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 600 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size/2), 601 "Walk_end: Half max pages\n"); 602 603 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 604 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 605 if (ret < 0) 606 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 607 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size), 608 "Walk_end: 1 max page\n"); 609 610 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 611 -1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 612 if (ret < 0) 613 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 614 ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), 615 "Walk_end: max pages\n"); 616 617 wp_addr_range(mem, mem_size); 618 for (i = 0; i < mem_size; i += 2 * page_size) 619 mem[i]++; 620 621 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 622 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 623 if (ret < 0) 624 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 625 ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), 626 "Walk_end sparse: Big vec\n"); 627 628 ret = pagemap_ioc(mem, mem_size, vec, 1, 0, 629 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 630 if (ret < 0) 631 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 632 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), 633 "Walk_end sparse: vec of minimum length\n"); 634 635 ret = pagemap_ioc(mem, mem_size, vec, 1, 0, 636 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 637 if (ret < 0) 638 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 639 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), 640 "Walk_end sparse: Max pages specified\n"); 641 642 ret = pagemap_ioc(mem, mem_size, vec, vec_size/2, 0, 643 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 644 if (ret < 0) 645 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 646 ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), 647 "Walk_end sparse: Max pages specified\n"); 648 649 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 650 vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 651 if (ret < 0) 652 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 653 ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), 654 "Walk_end sparse: Max pages specified\n"); 655 656 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 657 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 658 if (ret < 0) 659 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 660 ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), 661 "Walk_endsparse : Half max pages\n"); 662 663 ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 664 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); 665 if (ret < 0) 666 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 667 ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), 668 "Walk_end: 1 max page\n"); 669 670 free(vec); 671 wp_free(mem, mem_size); 672 munmap(mem, mem_size); 673 674 return 0; 675 } 676 677 int base_tests(char *prefix, char *mem, int mem_size, int skip) 678 { 679 int vec_size, written; 680 struct page_region *vec, *vec2; 681 682 if (skip) { 683 ksft_test_result_skip("%s all new pages must not be written (dirty)\n", prefix); 684 ksft_test_result_skip("%s all pages must be written (dirty)\n", prefix); 685 ksft_test_result_skip("%s all pages dirty other than first and the last one\n", 686 prefix); 687 ksft_test_result_skip("%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix); 688 ksft_test_result_skip("%s only middle page dirty\n", prefix); 689 ksft_test_result_skip("%s only two middle pages dirty\n", prefix); 690 return 0; 691 } 692 693 vec_size = mem_size/page_size; 694 vec = malloc(sizeof(struct page_region) * vec_size); 695 vec2 = malloc(sizeof(struct page_region) * vec_size); 696 697 /* 1. all new pages must be not be written (dirty) */ 698 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 699 vec_size - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 700 if (written < 0) 701 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 702 703 ksft_test_result(written == 0, "%s all new pages must not be written (dirty)\n", prefix); 704 705 /* 2. all pages must be written */ 706 memset(mem, -1, mem_size); 707 708 written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, 709 PAGE_IS_WRITTEN); 710 if (written < 0) 711 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 712 713 ksft_test_result(written == 1 && LEN(vec[0]) == mem_size/page_size, 714 "%s all pages must be written (dirty)\n", prefix); 715 716 /* 3. all pages dirty other than first and the last one */ 717 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 718 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 719 if (written < 0) 720 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 721 722 memset(mem + page_size, 0, mem_size - (2 * page_size)); 723 724 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 725 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 726 if (written < 0) 727 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 728 729 ksft_test_result(written == 1 && LEN(vec[0]) >= vec_size - 2 && LEN(vec[0]) <= vec_size, 730 "%s all pages dirty other than first and the last one\n", prefix); 731 732 written = pagemap_ioctl(mem, mem_size, vec, 1, 0, 0, 733 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 734 if (written < 0) 735 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 736 737 ksft_test_result(written == 0, 738 "%s PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC\n", prefix); 739 740 /* 4. only middle page dirty */ 741 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 742 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 743 if (written < 0) 744 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 745 746 mem[vec_size/2 * page_size]++; 747 748 written = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 749 0, 0, PAGE_IS_WRITTEN); 750 if (written < 0) 751 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 752 753 ksft_test_result(written == 1 && LEN(vec[0]) >= 1, 754 "%s only middle page dirty\n", prefix); 755 756 /* 5. only two middle pages dirty and walk over only middle pages */ 757 written = pagemap_ioctl(mem, mem_size, vec, 1, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 758 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE); 759 if (written < 0) 760 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 761 762 mem[vec_size/2 * page_size]++; 763 mem[(vec_size/2 + 1) * page_size]++; 764 765 written = pagemap_ioctl(&mem[vec_size/2 * page_size], 2 * page_size, vec, 1, 0, 766 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN | PAGE_IS_HUGE); 767 if (written < 0) 768 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 769 770 ksft_test_result(written == 1 && vec[0].start == (uintptr_t)(&mem[vec_size/2 * page_size]) 771 && LEN(vec[0]) == 2, 772 "%s only two middle pages dirty\n", prefix); 773 774 free(vec); 775 free(vec2); 776 return 0; 777 } 778 779 void *gethugepage(int map_size) 780 { 781 int ret; 782 char *map; 783 784 map = memalign(hpage_size, map_size); 785 if (!map) 786 ksft_exit_fail_msg("memalign failed %d %s\n", errno, strerror(errno)); 787 788 ret = madvise(map, map_size, MADV_HUGEPAGE); 789 if (ret) 790 return NULL; 791 792 memset(map, 0, map_size); 793 794 return map; 795 } 796 797 int hpage_unit_tests(void) 798 { 799 char *map; 800 int ret, ret2; 801 size_t num_pages = 10; 802 int map_size = hpage_size * num_pages; 803 int vec_size = map_size/page_size; 804 struct page_region *vec, *vec2; 805 806 vec = malloc(sizeof(struct page_region) * vec_size); 807 vec2 = malloc(sizeof(struct page_region) * vec_size); 808 if (!vec || !vec2) 809 ksft_exit_fail_msg("malloc failed\n"); 810 811 map = gethugepage(map_size); 812 if (map) { 813 wp_init(map, map_size); 814 wp_addr_range(map, map_size); 815 816 /* 1. all new huge page must not be written (dirty) */ 817 ret = pagemap_ioctl(map, map_size, vec, vec_size, 818 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, 819 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 820 if (ret < 0) 821 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 822 823 ksft_test_result(ret == 0, "%s all new huge page must not be written (dirty)\n", 824 __func__); 825 826 /* 2. all the huge page must not be written */ 827 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, 828 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 829 if (ret < 0) 830 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 831 832 ksft_test_result(ret == 0, "%s all the huge page must not be written\n", __func__); 833 834 /* 3. all the huge page must be written and clear dirty as well */ 835 memset(map, -1, map_size); 836 ret = pagemap_ioctl(map, map_size, vec, vec_size, 837 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 838 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 839 if (ret < 0) 840 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 841 842 ksft_test_result(ret == 1 && vec[0].start == (uintptr_t)map && 843 LEN(vec[0]) == vec_size && vec[0].categories == PAGE_IS_WRITTEN, 844 "%s all the huge page must be written and clear\n", __func__); 845 846 /* 4. only middle page written */ 847 wp_free(map, map_size); 848 free(map); 849 map = gethugepage(map_size); 850 wp_init(map, map_size); 851 wp_addr_range(map, map_size); 852 map[vec_size/2 * page_size]++; 853 854 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, 855 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 856 if (ret < 0) 857 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 858 859 ksft_test_result(ret == 1 && LEN(vec[0]) > 0, 860 "%s only middle page written\n", __func__); 861 862 wp_free(map, map_size); 863 free(map); 864 } else { 865 ksft_test_result_skip("%s all new huge page must be written\n", __func__); 866 ksft_test_result_skip("%s all the huge page must not be written\n", __func__); 867 ksft_test_result_skip("%s all the huge page must be written and clear\n", __func__); 868 ksft_test_result_skip("%s only middle page written\n", __func__); 869 } 870 871 /* 5. clear first half of huge page */ 872 map = gethugepage(map_size); 873 if (map) { 874 wp_init(map, map_size); 875 wp_addr_range(map, map_size); 876 877 memset(map, 0, map_size); 878 879 wp_addr_range(map, map_size/2); 880 881 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, 882 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 883 if (ret < 0) 884 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 885 886 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 && 887 vec[0].start == (uintptr_t)(map + map_size/2), 888 "%s clear first half of huge page\n", __func__); 889 wp_free(map, map_size); 890 free(map); 891 } else { 892 ksft_test_result_skip("%s clear first half of huge page\n", __func__); 893 } 894 895 /* 6. clear first half of huge page with limited buffer */ 896 map = gethugepage(map_size); 897 if (map) { 898 wp_init(map, map_size); 899 wp_addr_range(map, map_size); 900 901 memset(map, 0, map_size); 902 903 ret = pagemap_ioctl(map, map_size, vec, vec_size, 904 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 905 vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 906 if (ret < 0) 907 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 908 909 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, 910 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 911 if (ret < 0) 912 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 913 914 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2 && 915 vec[0].start == (uintptr_t)(map + map_size/2), 916 "%s clear first half of huge page with limited buffer\n", 917 __func__); 918 wp_free(map, map_size); 919 free(map); 920 } else { 921 ksft_test_result_skip("%s clear first half of huge page with limited buffer\n", 922 __func__); 923 } 924 925 /* 7. clear second half of huge page */ 926 map = gethugepage(map_size); 927 if (map) { 928 wp_init(map, map_size); 929 wp_addr_range(map, map_size); 930 931 memset(map, -1, map_size); 932 933 ret = pagemap_ioctl(map + map_size/2, map_size/2, vec, vec_size, 934 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, vec_size/2, 935 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 936 if (ret < 0) 937 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 938 939 ret = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, 940 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 941 if (ret < 0) 942 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 943 944 ksft_test_result(ret == 1 && LEN(vec[0]) == vec_size/2, 945 "%s clear second half huge page\n", __func__); 946 wp_free(map, map_size); 947 free(map); 948 } else { 949 ksft_test_result_skip("%s clear second half huge page\n", __func__); 950 } 951 952 /* 8. get half huge page */ 953 map = gethugepage(map_size); 954 if (map) { 955 wp_init(map, map_size); 956 wp_addr_range(map, map_size); 957 958 memset(map, -1, map_size); 959 usleep(100); 960 961 ret = pagemap_ioctl(map, map_size, vec, 1, 962 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 963 hpage_size/(2*page_size), PAGE_IS_WRITTEN, 0, 0, 964 PAGE_IS_WRITTEN); 965 if (ret < 0) 966 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 967 968 ksft_test_result(ret == 1 && LEN(vec[0]) == hpage_size/(2*page_size), 969 "%s get half huge page\n", __func__); 970 971 ret2 = pagemap_ioctl(map, map_size, vec, vec_size, 0, 0, 972 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); 973 if (ret2 < 0) 974 ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); 975 976 ksft_test_result(ret2 == 1 && LEN(vec[0]) == (map_size - hpage_size/2)/page_size, 977 "%s get half huge page\n", __func__); 978 979 wp_free(map, map_size); 980 free(map); 981 } else { 982 ksft_test_result_skip("%s get half huge page\n", __func__); 983 ksft_test_result_skip("%s get half huge page\n", __func__); 984 } 985 986 free(vec); 987 free(vec2); 988 return 0; 989 } 990 991 int unmapped_region_tests(void) 992 { 993 void *start = (void *)0x10000000; 994 int written, len = 0x00040000; 995 int vec_size = len / page_size; 996 struct page_region *vec = malloc(sizeof(struct page_region) * vec_size); 997 998 /* 1. Get written pages */ 999 written = pagemap_ioctl(start, len, vec, vec_size, 0, 0, 1000 PAGEMAP_NON_WRITTEN_BITS, 0, 0, PAGEMAP_NON_WRITTEN_BITS); 1001 if (written < 0) 1002 ksft_exit_fail_msg("error %d %d %s\n", written, errno, strerror(errno)); 1003 1004 ksft_test_result(written >= 0, "%s Get status of pages\n", __func__); 1005 1006 free(vec); 1007 return 0; 1008 } 1009 1010 static void test_simple(void) 1011 { 1012 int i; 1013 char *map; 1014 struct page_region vec; 1015 1016 map = aligned_alloc(page_size, page_size); 1017 if (!map) 1018 ksft_exit_fail_msg("aligned_alloc failed\n"); 1019 1020 wp_init(map, page_size); 1021 wp_addr_range(map, page_size); 1022 1023 for (i = 0 ; i < TEST_ITERATIONS; i++) { 1024 if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0, 1025 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 1) { 1026 ksft_print_msg("written bit was 1, but should be 0 (i=%d)\n", i); 1027 break; 1028 } 1029 1030 wp_addr_range(map, page_size); 1031 /* Write something to the page to get the written bit enabled on the page */ 1032 map[0]++; 1033 1034 if (pagemap_ioctl(map, page_size, &vec, 1, 0, 0, 1035 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0) { 1036 ksft_print_msg("written bit was 0, but should be 1 (i=%d)\n", i); 1037 break; 1038 } 1039 1040 wp_addr_range(map, page_size); 1041 } 1042 wp_free(map, page_size); 1043 free(map); 1044 1045 ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__); 1046 } 1047 1048 int sanity_tests(void) 1049 { 1050 int mem_size, vec_size, ret, fd, i, buf_size; 1051 struct page_region *vec; 1052 char *mem, *fmem; 1053 struct stat sbuf; 1054 char *tmp_buf; 1055 1056 /* 1. wrong operation */ 1057 mem_size = 10 * page_size; 1058 vec_size = mem_size / page_size; 1059 1060 vec = malloc(sizeof(struct page_region) * vec_size); 1061 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1062 if (mem == MAP_FAILED || vec == MAP_FAILED) 1063 ksft_exit_fail_msg("error nomem\n"); 1064 1065 wp_init(mem, mem_size); 1066 wp_addr_range(mem, mem_size); 1067 1068 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 1069 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 1070 0, PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0, 1071 "%s WP op can be specified with !PAGE_IS_WRITTEN\n", __func__); 1072 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1073 PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL) >= 0, 1074 "%s required_mask specified\n", __func__); 1075 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1076 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL) >= 0, 1077 "%s anyof_mask specified\n", __func__); 1078 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1079 0, 0, PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL) >= 0, 1080 "%s excluded_mask specified\n", __func__); 1081 ksft_test_result(pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1082 PAGEMAP_BITS_ALL, PAGEMAP_BITS_ALL, 0, 1083 PAGEMAP_BITS_ALL) >= 0, 1084 "%s required_mask and anyof_mask specified\n", __func__); 1085 wp_free(mem, mem_size); 1086 munmap(mem, mem_size); 1087 1088 /* 2. Get sd and present pages with anyof_mask */ 1089 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1090 if (mem == MAP_FAILED) 1091 ksft_exit_fail_msg("error nomem\n"); 1092 wp_init(mem, mem_size); 1093 wp_addr_range(mem, mem_size); 1094 1095 memset(mem, 0, mem_size); 1096 1097 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1098 0, PAGEMAP_BITS_ALL, 0, PAGEMAP_BITS_ALL); 1099 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && 1100 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) == 1101 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT), 1102 "%s Get sd and present pages with anyof_mask\n", __func__); 1103 1104 /* 3. Get sd and present pages with required_mask */ 1105 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1106 PAGEMAP_BITS_ALL, 0, 0, PAGEMAP_BITS_ALL); 1107 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && 1108 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) == 1109 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT), 1110 "%s Get all the pages with required_mask\n", __func__); 1111 1112 /* 4. Get sd and present pages with required_mask and anyof_mask */ 1113 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1114 PAGE_IS_WRITTEN, PAGE_IS_PRESENT, 0, PAGEMAP_BITS_ALL); 1115 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && 1116 (vec[0].categories & (PAGE_IS_WRITTEN | PAGE_IS_PRESENT)) == 1117 (PAGE_IS_WRITTEN | PAGE_IS_PRESENT), 1118 "%s Get sd and present pages with required_mask and anyof_mask\n", 1119 __func__); 1120 1121 /* 5. Don't get sd pages */ 1122 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1123 PAGE_IS_WRITTEN, 0, PAGE_IS_WRITTEN, PAGEMAP_BITS_ALL); 1124 ksft_test_result(ret == 0, "%s Don't get sd pages\n", __func__); 1125 1126 /* 6. Don't get present pages */ 1127 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, 1128 PAGE_IS_PRESENT, 0, PAGE_IS_PRESENT, PAGEMAP_BITS_ALL); 1129 ksft_test_result(ret == 0, "%s Don't get present pages\n", __func__); 1130 1131 wp_free(mem, mem_size); 1132 munmap(mem, mem_size); 1133 1134 /* 8. Find written present pages with return mask */ 1135 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1136 if (mem == MAP_FAILED) 1137 ksft_exit_fail_msg("error nomem\n"); 1138 wp_init(mem, mem_size); 1139 wp_addr_range(mem, mem_size); 1140 1141 memset(mem, 0, mem_size); 1142 1143 ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 1144 PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, 1145 0, PAGEMAP_BITS_ALL, 0, PAGE_IS_WRITTEN); 1146 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)mem && LEN(vec[0]) == vec_size && 1147 vec[0].categories == PAGE_IS_WRITTEN, 1148 "%s Find written present pages with return mask\n", __func__); 1149 wp_free(mem, mem_size); 1150 munmap(mem, mem_size); 1151 1152 /* 9. Memory mapped file */ 1153 fd = open(progname, O_RDONLY); 1154 if (fd < 0) 1155 ksft_exit_fail_msg("%s Memory mapped file\n", __func__); 1156 1157 ret = stat(progname, &sbuf); 1158 if (ret < 0) 1159 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 1160 1161 fmem = mmap(NULL, sbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 1162 if (fmem == MAP_FAILED) 1163 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); 1164 1165 tmp_buf = malloc(sbuf.st_size); 1166 memcpy(tmp_buf, fmem, sbuf.st_size); 1167 1168 ret = pagemap_ioctl(fmem, sbuf.st_size, vec, vec_size, 0, 0, 1169 0, PAGEMAP_NON_WRITTEN_BITS, 0, PAGEMAP_NON_WRITTEN_BITS); 1170 1171 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem && 1172 LEN(vec[0]) == ceilf((float)sbuf.st_size/page_size) && 1173 (vec[0].categories & PAGE_IS_FILE), 1174 "%s Memory mapped file\n", __func__); 1175 1176 munmap(fmem, sbuf.st_size); 1177 close(fd); 1178 1179 /* 10. Create and read/write to a memory mapped file */ 1180 buf_size = page_size * 10; 1181 1182 fd = open(__FILE__".tmp2", O_RDWR | O_CREAT, 0666); 1183 if (fd < 0) 1184 ksft_exit_fail_msg("Read/write to memory: %s\n", 1185 strerror(errno)); 1186 1187 for (i = 0; i < buf_size; i++) 1188 if (write(fd, "c", 1) < 0) 1189 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n"); 1190 1191 fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 1192 if (fmem == MAP_FAILED) 1193 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); 1194 1195 wp_init(fmem, buf_size); 1196 wp_addr_range(fmem, buf_size); 1197 1198 for (i = 0; i < buf_size; i++) 1199 fmem[i] = 'z'; 1200 1201 msync(fmem, buf_size, MS_SYNC); 1202 1203 ret = pagemap_ioctl(fmem, buf_size, vec, vec_size, 0, 0, 1204 PAGE_IS_WRITTEN, PAGE_IS_PRESENT | PAGE_IS_SWAPPED | PAGE_IS_FILE, 0, 1205 PAGEMAP_BITS_ALL); 1206 1207 ksft_test_result(ret >= 0 && vec[0].start == (uintptr_t)fmem && 1208 LEN(vec[0]) == (buf_size/page_size) && 1209 (vec[0].categories & PAGE_IS_WRITTEN), 1210 "%s Read/write to memory\n", __func__); 1211 1212 wp_free(fmem, buf_size); 1213 munmap(fmem, buf_size); 1214 close(fd); 1215 1216 free(vec); 1217 return 0; 1218 } 1219 1220 int mprotect_tests(void) 1221 { 1222 int ret; 1223 char *mem, *mem2; 1224 struct page_region vec; 1225 int pagemap_fd = open("/proc/self/pagemap", O_RDONLY); 1226 1227 if (pagemap_fd < 0) { 1228 fprintf(stderr, "open() failed\n"); 1229 exit(1); 1230 } 1231 1232 /* 1. Map two pages */ 1233 mem = mmap(0, 2 * page_size, PROT_READ|PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1234 if (mem == MAP_FAILED) 1235 ksft_exit_fail_msg("error nomem\n"); 1236 wp_init(mem, 2 * page_size); 1237 wp_addr_range(mem, 2 * page_size); 1238 1239 /* Populate both pages. */ 1240 memset(mem, 1, 2 * page_size); 1241 1242 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN, 1243 0, 0, PAGE_IS_WRITTEN); 1244 if (ret < 0) 1245 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 1246 1247 ksft_test_result(ret == 1 && LEN(vec) == 2, "%s Both pages written\n", __func__); 1248 1249 /* 2. Start tracking */ 1250 wp_addr_range(mem, 2 * page_size); 1251 1252 ksft_test_result(pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, 1253 PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN) == 0, 1254 "%s Both pages are not written (dirty)\n", __func__); 1255 1256 /* 3. Remap the second page */ 1257 mem2 = mmap(mem + page_size, page_size, PROT_READ|PROT_WRITE, 1258 MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0); 1259 if (mem2 == MAP_FAILED) 1260 ksft_exit_fail_msg("error nomem\n"); 1261 wp_init(mem2, page_size); 1262 wp_addr_range(mem2, page_size); 1263 1264 /* Protect + unprotect. */ 1265 mprotect(mem, page_size, PROT_NONE); 1266 mprotect(mem, 2 * page_size, PROT_READ); 1267 mprotect(mem, 2 * page_size, PROT_READ|PROT_WRITE); 1268 1269 /* Modify both pages. */ 1270 memset(mem, 2, 2 * page_size); 1271 1272 /* Protect + unprotect. */ 1273 mprotect(mem, page_size, PROT_NONE); 1274 mprotect(mem, page_size, PROT_READ); 1275 mprotect(mem, page_size, PROT_READ|PROT_WRITE); 1276 1277 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN, 1278 0, 0, PAGE_IS_WRITTEN); 1279 if (ret < 0) 1280 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 1281 1282 ksft_test_result(ret == 1 && LEN(vec) == 2, 1283 "%s Both pages written after remap and mprotect\n", __func__); 1284 1285 /* 4. Clear and make the pages written */ 1286 wp_addr_range(mem, 2 * page_size); 1287 1288 memset(mem, 'A', 2 * page_size); 1289 1290 ret = pagemap_ioctl(mem, 2 * page_size, &vec, 1, 0, 0, PAGE_IS_WRITTEN, 1291 0, 0, PAGE_IS_WRITTEN); 1292 if (ret < 0) 1293 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 1294 1295 ksft_test_result(ret == 1 && LEN(vec) == 2, 1296 "%s Clear and make the pages written\n", __func__); 1297 1298 wp_free(mem, 2 * page_size); 1299 munmap(mem, 2 * page_size); 1300 return 0; 1301 } 1302 1303 /* transact test */ 1304 static const unsigned int nthreads = 6, pages_per_thread = 32, access_per_thread = 8; 1305 static pthread_barrier_t start_barrier, end_barrier; 1306 static unsigned int extra_thread_faults; 1307 static unsigned int iter_count = 1000; 1308 static volatile int finish; 1309 1310 static ssize_t get_dirty_pages_reset(char *mem, unsigned int count, 1311 int reset, int page_size) 1312 { 1313 struct pm_scan_arg arg = {0}; 1314 struct page_region rgns[256]; 1315 int i, j, cnt, ret; 1316 1317 arg.size = sizeof(struct pm_scan_arg); 1318 arg.start = (uintptr_t)mem; 1319 arg.max_pages = count; 1320 arg.end = (uintptr_t)(mem + count * page_size); 1321 arg.vec = (uintptr_t)rgns; 1322 arg.vec_len = sizeof(rgns) / sizeof(*rgns); 1323 if (reset) 1324 arg.flags |= PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC; 1325 arg.category_mask = PAGE_IS_WRITTEN; 1326 arg.return_mask = PAGE_IS_WRITTEN; 1327 1328 ret = ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); 1329 if (ret < 0) 1330 ksft_exit_fail_msg("ioctl failed\n"); 1331 1332 cnt = 0; 1333 for (i = 0; i < ret; ++i) { 1334 if (rgns[i].categories != PAGE_IS_WRITTEN) 1335 ksft_exit_fail_msg("wrong flags\n"); 1336 1337 for (j = 0; j < LEN(rgns[i]); ++j) 1338 cnt++; 1339 } 1340 1341 return cnt; 1342 } 1343 1344 void *thread_proc(void *mem) 1345 { 1346 int *m = mem; 1347 long curr_faults, faults; 1348 struct rusage r; 1349 unsigned int i; 1350 int ret; 1351 1352 if (getrusage(RUSAGE_THREAD, &r)) 1353 ksft_exit_fail_msg("getrusage\n"); 1354 1355 curr_faults = r.ru_minflt; 1356 1357 while (!finish) { 1358 ret = pthread_barrier_wait(&start_barrier); 1359 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) 1360 ksft_exit_fail_msg("pthread_barrier_wait\n"); 1361 1362 for (i = 0; i < access_per_thread; ++i) 1363 __atomic_add_fetch(m + i * (0x1000 / sizeof(*m)), 1, __ATOMIC_SEQ_CST); 1364 1365 ret = pthread_barrier_wait(&end_barrier); 1366 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) 1367 ksft_exit_fail_msg("pthread_barrier_wait\n"); 1368 1369 if (getrusage(RUSAGE_THREAD, &r)) 1370 ksft_exit_fail_msg("getrusage\n"); 1371 1372 faults = r.ru_minflt - curr_faults; 1373 if (faults < access_per_thread) 1374 ksft_exit_fail_msg("faults < access_per_thread"); 1375 1376 __atomic_add_fetch(&extra_thread_faults, faults - access_per_thread, 1377 __ATOMIC_SEQ_CST); 1378 curr_faults = r.ru_minflt; 1379 } 1380 1381 return NULL; 1382 } 1383 1384 static void transact_test(int page_size) 1385 { 1386 unsigned int i, count, extra_pages; 1387 pthread_t th; 1388 char *mem; 1389 int ret, c; 1390 1391 if (pthread_barrier_init(&start_barrier, NULL, nthreads + 1)) 1392 ksft_exit_fail_msg("pthread_barrier_init\n"); 1393 1394 if (pthread_barrier_init(&end_barrier, NULL, nthreads + 1)) 1395 ksft_exit_fail_msg("pthread_barrier_init\n"); 1396 1397 mem = mmap(NULL, 0x1000 * nthreads * pages_per_thread, PROT_READ | PROT_WRITE, 1398 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 1399 if (mem == MAP_FAILED) 1400 ksft_exit_fail_msg("Error mmap %s.\n", strerror(errno)); 1401 1402 wp_init(mem, 0x1000 * nthreads * pages_per_thread); 1403 wp_addr_range(mem, 0x1000 * nthreads * pages_per_thread); 1404 1405 memset(mem, 0, 0x1000 * nthreads * pages_per_thread); 1406 1407 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); 1408 ksft_test_result(count > 0, "%s count %d\n", __func__, count); 1409 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); 1410 ksft_test_result(count == 0, "%s count %d\n", __func__, count); 1411 1412 finish = 0; 1413 for (i = 0; i < nthreads; ++i) 1414 pthread_create(&th, NULL, thread_proc, mem + 0x1000 * i * pages_per_thread); 1415 1416 extra_pages = 0; 1417 for (i = 0; i < iter_count; ++i) { 1418 count = 0; 1419 1420 ret = pthread_barrier_wait(&start_barrier); 1421 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) 1422 ksft_exit_fail_msg("pthread_barrier_wait\n"); 1423 1424 count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, 1425 page_size); 1426 1427 ret = pthread_barrier_wait(&end_barrier); 1428 if (ret && ret != PTHREAD_BARRIER_SERIAL_THREAD) 1429 ksft_exit_fail_msg("pthread_barrier_wait\n"); 1430 1431 if (count > nthreads * access_per_thread) 1432 ksft_exit_fail_msg("Too big count %d expected %d, iter %d\n", 1433 count, nthreads * access_per_thread, i); 1434 1435 c = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); 1436 count += c; 1437 1438 if (c > nthreads * access_per_thread) { 1439 ksft_test_result_fail(" %s count > nthreads\n", __func__); 1440 return; 1441 } 1442 1443 if (count != nthreads * access_per_thread) { 1444 /* 1445 * The purpose of the test is to make sure that no page updates are lost 1446 * when the page updates and read-resetting soft dirty flags are performed 1447 * in parallel. However, it is possible that the application will get the 1448 * soft dirty flags twice on the two consecutive read-resets. This seems 1449 * unavoidable as soft dirty flag is handled in software through page faults 1450 * in kernel. While the updating the flags is supposed to be synchronized 1451 * between page fault handling and read-reset, it is possible that 1452 * read-reset happens after page fault PTE update but before the application 1453 * re-executes write instruction. So read-reset gets the flag, clears write 1454 * access and application gets page fault again for the same write. 1455 */ 1456 if (count < nthreads * access_per_thread) { 1457 ksft_test_result_fail("Lost update, iter %d, %d vs %d.\n", i, count, 1458 nthreads * access_per_thread); 1459 return; 1460 } 1461 1462 extra_pages += count - nthreads * access_per_thread; 1463 } 1464 } 1465 1466 pthread_barrier_wait(&start_barrier); 1467 finish = 1; 1468 pthread_barrier_wait(&end_barrier); 1469 1470 ksft_test_result_pass("%s Extra pages %u (%.1lf%%), extra thread faults %d.\n", __func__, 1471 extra_pages, 1472 100.0 * extra_pages / (iter_count * nthreads * access_per_thread), 1473 extra_thread_faults); 1474 } 1475 1476 int main(int argc, char *argv[]) 1477 { 1478 int mem_size, shmid, buf_size, fd, i, ret; 1479 char *mem, *map, *fmem; 1480 struct stat sbuf; 1481 1482 progname = argv[0]; 1483 1484 ksft_print_header(); 1485 1486 if (init_uffd()) 1487 ksft_exit_pass(); 1488 1489 ksft_set_plan(115); 1490 1491 page_size = getpagesize(); 1492 hpage_size = read_pmd_pagesize(); 1493 1494 pagemap_fd = open(PAGEMAP, O_RDONLY); 1495 if (pagemap_fd < 0) 1496 return -EINVAL; 1497 1498 /* 1. Sanity testing */ 1499 sanity_tests_sd(); 1500 1501 /* 2. Normal page testing */ 1502 mem_size = 10 * page_size; 1503 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1504 if (mem == MAP_FAILED) 1505 ksft_exit_fail_msg("error nomem\n"); 1506 wp_init(mem, mem_size); 1507 wp_addr_range(mem, mem_size); 1508 1509 base_tests("Page testing:", mem, mem_size, 0); 1510 1511 wp_free(mem, mem_size); 1512 munmap(mem, mem_size); 1513 1514 /* 3. Large page testing */ 1515 mem_size = 512 * 10 * page_size; 1516 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 1517 if (mem == MAP_FAILED) 1518 ksft_exit_fail_msg("error nomem\n"); 1519 wp_init(mem, mem_size); 1520 wp_addr_range(mem, mem_size); 1521 1522 base_tests("Large Page testing:", mem, mem_size, 0); 1523 1524 wp_free(mem, mem_size); 1525 munmap(mem, mem_size); 1526 1527 /* 4. Huge page testing */ 1528 map = gethugepage(hpage_size); 1529 if (map) { 1530 wp_init(map, hpage_size); 1531 wp_addr_range(map, hpage_size); 1532 base_tests("Huge page testing:", map, hpage_size, 0); 1533 wp_free(map, hpage_size); 1534 free(map); 1535 } else { 1536 base_tests("Huge page testing:", NULL, 0, 1); 1537 } 1538 1539 /* 5. SHM Hugetlb page testing */ 1540 mem_size = 2*1024*1024; 1541 mem = gethugetlb_mem(mem_size, &shmid); 1542 if (mem) { 1543 wp_init(mem, mem_size); 1544 wp_addr_range(mem, mem_size); 1545 1546 base_tests("Hugetlb shmem testing:", mem, mem_size, 0); 1547 1548 wp_free(mem, mem_size); 1549 shmctl(shmid, IPC_RMID, NULL); 1550 } else { 1551 base_tests("Hugetlb shmem testing:", NULL, 0, 1); 1552 } 1553 1554 /* 6. Hugetlb page testing */ 1555 mem = gethugetlb_mem(mem_size, NULL); 1556 if (mem) { 1557 wp_init(mem, mem_size); 1558 wp_addr_range(mem, mem_size); 1559 1560 base_tests("Hugetlb mem testing:", mem, mem_size, 0); 1561 1562 wp_free(mem, mem_size); 1563 } else { 1564 base_tests("Hugetlb mem testing:", NULL, 0, 1); 1565 } 1566 1567 /* 7. File Hugetlb testing */ 1568 mem_size = 2*1024*1024; 1569 fd = memfd_create("uffd-test", MFD_HUGETLB | MFD_NOEXEC_SEAL); 1570 if (fd < 0) 1571 ksft_exit_fail_msg("uffd-test creation failed %d %s\n", errno, strerror(errno)); 1572 mem = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 1573 if (mem != MAP_FAILED) { 1574 wp_init(mem, mem_size); 1575 wp_addr_range(mem, mem_size); 1576 1577 base_tests("Hugetlb shmem testing:", mem, mem_size, 0); 1578 1579 wp_free(mem, mem_size); 1580 shmctl(shmid, IPC_RMID, NULL); 1581 } else { 1582 base_tests("Hugetlb shmem testing:", NULL, 0, 1); 1583 } 1584 close(fd); 1585 1586 /* 8. File memory testing */ 1587 buf_size = page_size * 10; 1588 1589 fd = open(__FILE__".tmp0", O_RDWR | O_CREAT, 0777); 1590 if (fd < 0) 1591 ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n", 1592 strerror(errno)); 1593 1594 for (i = 0; i < buf_size; i++) 1595 if (write(fd, "c", 1) < 0) 1596 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n"); 1597 1598 ret = stat(__FILE__".tmp0", &sbuf); 1599 if (ret < 0) 1600 ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); 1601 1602 fmem = mmap(NULL, sbuf.st_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 1603 if (fmem == MAP_FAILED) 1604 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); 1605 1606 wp_init(fmem, sbuf.st_size); 1607 wp_addr_range(fmem, sbuf.st_size); 1608 1609 base_tests("File memory testing:", fmem, sbuf.st_size, 0); 1610 1611 wp_free(fmem, sbuf.st_size); 1612 munmap(fmem, sbuf.st_size); 1613 close(fd); 1614 1615 /* 9. File memory testing */ 1616 buf_size = page_size * 10; 1617 1618 fd = memfd_create(__FILE__".tmp00", MFD_NOEXEC_SEAL); 1619 if (fd < 0) 1620 ksft_exit_fail_msg("Create and read/write to a memory mapped file: %s\n", 1621 strerror(errno)); 1622 1623 if (ftruncate(fd, buf_size)) 1624 ksft_exit_fail_msg("Error ftruncate\n"); 1625 1626 for (i = 0; i < buf_size; i++) 1627 if (write(fd, "c", 1) < 0) 1628 ksft_exit_fail_msg("Create and read/write to a memory mapped file\n"); 1629 1630 fmem = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 1631 if (fmem == MAP_FAILED) 1632 ksft_exit_fail_msg("error nomem %d %s\n", errno, strerror(errno)); 1633 1634 wp_init(fmem, buf_size); 1635 wp_addr_range(fmem, buf_size); 1636 1637 base_tests("File anonymous memory testing:", fmem, buf_size, 0); 1638 1639 wp_free(fmem, buf_size); 1640 munmap(fmem, buf_size); 1641 close(fd); 1642 1643 /* 10. Huge page tests */ 1644 hpage_unit_tests(); 1645 1646 /* 11. Iterative test */ 1647 test_simple(); 1648 1649 /* 12. Mprotect test */ 1650 mprotect_tests(); 1651 1652 /* 13. Transact test */ 1653 transact_test(page_size); 1654 1655 /* 14. Sanity testing */ 1656 sanity_tests(); 1657 1658 /*15. Unmapped address test */ 1659 unmapped_region_tests(); 1660 1661 /* 16. Userfaultfd tests */ 1662 userfaultfd_tests(); 1663 1664 close(pagemap_fd); 1665 ksft_exit_pass(); 1666 } 1667