1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #define _GNU_SOURCE 4 #include <stdbool.h> 5 #include <stdint.h> 6 #include <fcntl.h> 7 #include <assert.h> 8 #include <linux/mman.h> 9 #include <sys/mman.h> 10 #include "../kselftest.h" 11 #include "thp_settings.h" 12 #include "uffd-common.h" 13 14 static int pagemap_fd; 15 static size_t pagesize; 16 static int nr_pagesizes = 1; 17 static int nr_thpsizes; 18 static size_t thpsizes[20]; 19 static int nr_hugetlbsizes; 20 static size_t hugetlbsizes[10]; 21 22 static int sz2ord(size_t size) 23 { 24 return __builtin_ctzll(size / pagesize); 25 } 26 27 static int detect_thp_sizes(size_t sizes[], int max) 28 { 29 int count = 0; 30 unsigned long orders; 31 size_t kb; 32 int i; 33 34 /* thp not supported at all. */ 35 if (!read_pmd_pagesize()) 36 return 0; 37 38 orders = thp_supported_orders(); 39 40 for (i = 0; orders && count < max; i++) { 41 if (!(orders & (1UL << i))) 42 continue; 43 orders &= ~(1UL << i); 44 kb = (pagesize >> 10) << i; 45 sizes[count++] = kb * 1024; 46 ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb); 47 } 48 49 return count; 50 } 51 52 static void *mmap_aligned(size_t size, int prot, int flags) 53 { 54 size_t mmap_size = size * 2; 55 char *mmap_mem, *mem; 56 57 mmap_mem = mmap(NULL, mmap_size, prot, flags, -1, 0); 58 if (mmap_mem == MAP_FAILED) 59 return mmap_mem; 60 61 mem = (char *)(((uintptr_t)mmap_mem + size - 1) & ~(size - 1)); 62 munmap(mmap_mem, mem - mmap_mem); 63 munmap(mem + size, mmap_mem + mmap_size - mem - size); 64 65 return mem; 66 } 67 68 static void *alloc_one_folio(size_t size, bool private, bool hugetlb) 69 { 70 bool thp = !hugetlb && size > pagesize; 71 int flags = MAP_ANONYMOUS; 72 int prot = PROT_READ | PROT_WRITE; 73 char *mem, *addr; 74 75 assert((size & (size - 1)) == 0); 76 77 if (private) 78 flags |= MAP_PRIVATE; 79 else 80 flags |= MAP_SHARED; 81 82 /* 83 * For THP, we must explicitly enable the THP size, allocate twice the 84 * required space then manually align. 85 */ 86 if (thp) { 87 struct thp_settings settings = *thp_current_settings(); 88 89 if (private) 90 settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS; 91 else 92 settings.shmem_hugepages[sz2ord(size)].enabled = SHMEM_ALWAYS; 93 94 thp_push_settings(&settings); 95 96 mem = mmap_aligned(size, prot, flags); 97 } else { 98 if (hugetlb) { 99 flags |= MAP_HUGETLB; 100 flags |= __builtin_ctzll(size) << MAP_HUGE_SHIFT; 101 } 102 103 mem = mmap(NULL, size, prot, flags, -1, 0); 104 } 105 106 if (mem == MAP_FAILED) { 107 mem = NULL; 108 goto out; 109 } 110 111 assert(((uintptr_t)mem & (size - 1)) == 0); 112 113 /* 114 * Populate the folio by writing the first byte and check that all pages 115 * are populated. Finally set the whole thing to non-zero data to avoid 116 * kernel from mapping it back to the zero page. 117 */ 118 mem[0] = 1; 119 for (addr = mem; addr < mem + size; addr += pagesize) { 120 if (!pagemap_is_populated(pagemap_fd, addr)) { 121 munmap(mem, size); 122 mem = NULL; 123 goto out; 124 } 125 } 126 memset(mem, 1, size); 127 out: 128 if (thp) 129 thp_pop_settings(); 130 131 return mem; 132 } 133 134 static bool check_uffd_wp_state(void *mem, size_t size, bool expect) 135 { 136 uint64_t pte; 137 void *addr; 138 139 for (addr = mem; addr < mem + size; addr += pagesize) { 140 pte = pagemap_get_entry(pagemap_fd, addr); 141 if (!!(pte & PM_UFFD_WP) != expect) { 142 ksft_test_result_fail("uffd-wp not %s for pte %lu!\n", 143 expect ? "set" : "clear", 144 (addr - mem) / pagesize); 145 return false; 146 } 147 } 148 149 return true; 150 } 151 152 static bool range_is_swapped(void *addr, size_t size) 153 { 154 for (; size; addr += pagesize, size -= pagesize) 155 if (!pagemap_is_swapped(pagemap_fd, addr)) 156 return false; 157 return true; 158 } 159 160 static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb) 161 { 162 struct uffdio_writeprotect wp_prms; 163 uint64_t features = 0; 164 void *addr = NULL; 165 void *mem = NULL; 166 167 assert(!(hugetlb && swapout)); 168 169 ksft_print_msg("[RUN] %s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n", 170 __func__, 171 size, 172 private ? "true" : "false", 173 swapout ? "true" : "false", 174 hugetlb ? "true" : "false"); 175 176 /* Allocate a folio of required size and type. */ 177 mem = alloc_one_folio(size, private, hugetlb); 178 if (!mem) { 179 ksft_test_result_fail("alloc_one_folio() failed\n"); 180 goto out; 181 } 182 183 /* Register range for uffd-wp. */ 184 if (userfaultfd_open(&features)) { 185 ksft_test_result_fail("userfaultfd_open() failed\n"); 186 goto out; 187 } 188 if (uffd_register(uffd, mem, size, false, true, false)) { 189 ksft_test_result_fail("uffd_register() failed\n"); 190 goto out; 191 } 192 wp_prms.mode = UFFDIO_WRITEPROTECT_MODE_WP; 193 wp_prms.range.start = (uintptr_t)mem; 194 wp_prms.range.len = size; 195 if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp_prms)) { 196 ksft_test_result_fail("ioctl(UFFDIO_WRITEPROTECT) failed\n"); 197 goto out; 198 } 199 200 if (swapout) { 201 madvise(mem, size, MADV_PAGEOUT); 202 if (!range_is_swapped(mem, size)) { 203 ksft_test_result_skip("MADV_PAGEOUT did not work, is swap enabled?\n"); 204 goto out; 205 } 206 } 207 208 /* Check that uffd-wp is set for all PTEs in range. */ 209 if (!check_uffd_wp_state(mem, size, true)) 210 goto out; 211 212 /* 213 * Move the mapping to a new, aligned location. Since 214 * UFFD_FEATURE_EVENT_REMAP is not set, we expect the uffd-wp bit for 215 * each PTE to be cleared in the new mapping. 216 */ 217 addr = mmap_aligned(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS); 218 if (addr == MAP_FAILED) { 219 ksft_test_result_fail("mmap_aligned() failed\n"); 220 goto out; 221 } 222 if (mremap(mem, size, size, MREMAP_FIXED | MREMAP_MAYMOVE, addr) == MAP_FAILED) { 223 ksft_test_result_fail("mremap() failed\n"); 224 munmap(addr, size); 225 goto out; 226 } 227 mem = addr; 228 229 /* Check that uffd-wp is cleared for all PTEs in range. */ 230 if (!check_uffd_wp_state(mem, size, false)) 231 goto out; 232 233 ksft_test_result_pass("%s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n", 234 __func__, 235 size, 236 private ? "true" : "false", 237 swapout ? "true" : "false", 238 hugetlb ? "true" : "false"); 239 out: 240 if (mem) 241 munmap(mem, size); 242 if (uffd >= 0) { 243 close(uffd); 244 uffd = -1; 245 } 246 } 247 248 struct testcase { 249 size_t *sizes; 250 int *nr_sizes; 251 bool private; 252 bool swapout; 253 bool hugetlb; 254 }; 255 256 static const struct testcase testcases[] = { 257 /* base pages. */ 258 { 259 .sizes = &pagesize, 260 .nr_sizes = &nr_pagesizes, 261 .private = false, 262 .swapout = false, 263 .hugetlb = false, 264 }, 265 { 266 .sizes = &pagesize, 267 .nr_sizes = &nr_pagesizes, 268 .private = true, 269 .swapout = false, 270 .hugetlb = false, 271 }, 272 { 273 .sizes = &pagesize, 274 .nr_sizes = &nr_pagesizes, 275 .private = false, 276 .swapout = true, 277 .hugetlb = false, 278 }, 279 { 280 .sizes = &pagesize, 281 .nr_sizes = &nr_pagesizes, 282 .private = true, 283 .swapout = true, 284 .hugetlb = false, 285 }, 286 287 /* thp. */ 288 { 289 .sizes = thpsizes, 290 .nr_sizes = &nr_thpsizes, 291 .private = false, 292 .swapout = false, 293 .hugetlb = false, 294 }, 295 { 296 .sizes = thpsizes, 297 .nr_sizes = &nr_thpsizes, 298 .private = true, 299 .swapout = false, 300 .hugetlb = false, 301 }, 302 { 303 .sizes = thpsizes, 304 .nr_sizes = &nr_thpsizes, 305 .private = false, 306 .swapout = true, 307 .hugetlb = false, 308 }, 309 { 310 .sizes = thpsizes, 311 .nr_sizes = &nr_thpsizes, 312 .private = true, 313 .swapout = true, 314 .hugetlb = false, 315 }, 316 317 /* hugetlb. */ 318 { 319 .sizes = hugetlbsizes, 320 .nr_sizes = &nr_hugetlbsizes, 321 .private = false, 322 .swapout = false, 323 .hugetlb = true, 324 }, 325 { 326 .sizes = hugetlbsizes, 327 .nr_sizes = &nr_hugetlbsizes, 328 .private = true, 329 .swapout = false, 330 .hugetlb = true, 331 }, 332 }; 333 334 int main(int argc, char **argv) 335 { 336 struct thp_settings settings; 337 int i, j, plan = 0; 338 339 pagesize = getpagesize(); 340 nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes)); 341 nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes, 342 ARRAY_SIZE(hugetlbsizes)); 343 344 /* If THP is supported, save THP settings and initially disable THP. */ 345 if (nr_thpsizes) { 346 thp_save_settings(); 347 thp_read_settings(&settings); 348 for (i = 0; i < NR_ORDERS; i++) { 349 settings.hugepages[i].enabled = THP_NEVER; 350 settings.shmem_hugepages[i].enabled = SHMEM_NEVER; 351 } 352 thp_push_settings(&settings); 353 } 354 355 for (i = 0; i < ARRAY_SIZE(testcases); i++) 356 plan += *testcases[i].nr_sizes; 357 ksft_set_plan(plan); 358 359 pagemap_fd = open("/proc/self/pagemap", O_RDONLY); 360 if (pagemap_fd < 0) 361 ksft_exit_fail_msg("opening pagemap failed\n"); 362 363 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 364 const struct testcase *tc = &testcases[i]; 365 366 for (j = 0; j < *tc->nr_sizes; j++) 367 test_one_folio(tc->sizes[j], tc->private, tc->swapout, 368 tc->hugetlb); 369 } 370 371 /* If THP is supported, restore original THP settings. */ 372 if (nr_thpsizes) 373 thp_restore_settings(); 374 375 i = ksft_get_fail_cnt(); 376 if (i) 377 ksft_exit_fail_msg("%d out of %d tests failed\n", 378 i, ksft_test_num()); 379 ksft_exit_pass(); 380 } 381