1#!/bin/bash 2# SPDX-License-Identifier: GPL-2.0 3# Please run as root 4 5# Kselftest framework requirement - SKIP code is 4. 6ksft_skip=4 7 8count_total=0 9count_pass=0 10count_fail=0 11count_skip=0 12exitcode=0 13 14usage() { 15 cat <<EOF 16usage: ${BASH_SOURCE[0]:-$0} [ options ] 17 18 -a: run all tests, including extra ones (other than destructive ones) 19 -t: specify specific categories to tests to run 20 -h: display this message 21 -n: disable TAP output 22 -d: run destructive tests 23 24The default behavior is to run required tests only. If -a is specified, 25will run all tests. 26 27Alternatively, specific groups tests can be run by passing a string 28to the -t argument containing one or more of the following categories 29separated by spaces: 30- mmap 31 tests for mmap(2) 32- gup_test 33 tests for gup 34- userfaultfd 35 tests for userfaultfd(2) 36- compaction 37 a test for the patch "Allow compaction of unevictable pages" 38- mlock 39 tests for mlock(2) 40- mremap 41 tests for mremap(2) 42- hugevm 43 tests for very large virtual address space 44- vmalloc 45 vmalloc smoke tests 46- hmm 47 hmm smoke tests 48- madv_guard 49 test madvise(2) MADV_GUARD_INSTALL and MADV_GUARD_REMOVE options 50- madv_populate 51 test memadvise(2) MADV_POPULATE_{READ,WRITE} options 52- memfd_secret 53 test memfd_secret(2) 54- process_mrelease 55 test process_mrelease(2) 56- ksm 57 ksm tests that do not require >=2 NUMA nodes 58- ksm_numa 59 ksm tests that require >=2 NUMA nodes 60- pkey 61 memory protection key tests 62- soft_dirty 63 test soft dirty page bit semantics 64- pagemap 65 test pagemap_scan IOCTL 66- pfnmap 67 tests for VM_PFNMAP handling 68- process_madv 69 test for process_madv 70- cow 71 test copy-on-write semantics 72- thp 73 test transparent huge pages 74- hugetlb 75 test hugetlbfs huge pages 76- migration 77 invoke move_pages(2) to exercise the migration entry code 78 paths in the kernel 79- mkdirty 80 test handling of code that might set PTE/PMD dirty in 81 read-only VMAs 82- mdwe 83 test prctl(PR_SET_MDWE, ...) 84- page_frag 85 test handling of page fragment allocation and freeing 86- vma_merge 87 test VMA merge cases behave as expected 88- rmap 89 test rmap behaves as expected 90 91example: ./run_vmtests.sh -t "hmm mmap ksm" 92EOF 93 exit 0 94} 95 96RUN_ALL=false 97RUN_DESTRUCTIVE=false 98TAP_PREFIX="# " 99 100while getopts "aht:n" OPT; do 101 case ${OPT} in 102 "a") RUN_ALL=true ;; 103 "h") usage ;; 104 "t") VM_SELFTEST_ITEMS=${OPTARG} ;; 105 "n") TAP_PREFIX= ;; 106 "d") RUN_DESTRUCTIVE=true ;; 107 esac 108done 109shift $((OPTIND -1)) 110 111# default behavior: run all tests 112VM_SELFTEST_ITEMS=${VM_SELFTEST_ITEMS:-default} 113 114test_selected() { 115 if [ "$VM_SELFTEST_ITEMS" == "default" ]; then 116 # If no VM_SELFTEST_ITEMS are specified, run all tests 117 return 0 118 fi 119 # If test selected argument is one of the test items 120 if [[ " ${VM_SELFTEST_ITEMS[*]} " =~ " ${1} " ]]; then 121 return 0 122 else 123 return 1 124 fi 125} 126 127run_gup_matrix() { 128 # -t: thp=on, -T: thp=off, -H: hugetlb=on 129 local hugetlb_mb=$(( needmem_KB / 1024 )) 130 131 for huge in -t -T "-H -m $hugetlb_mb"; do 132 # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm 133 for test_cmd in -u -U -a -b -L; do 134 # -w: write=1, -W: write=0 135 for write in -w -W; do 136 # -S: shared 137 for share in -S " "; do 138 # -n: How many pages to fetch together? 512 is special 139 # because it's default thp size (or 2M on x86), 123 to 140 # just test partial gup when hit a huge in whatever form 141 for num in "-n 1" "-n 512" "-n 123"; do 142 CATEGORY="gup_test" run_test ./gup_test \ 143 $huge $test_cmd $write $share $num 144 done 145 done 146 done 147 done 148 done 149} 150 151# get huge pagesize and freepages from /proc/meminfo 152while read -r name size unit; do 153 if [ "$name" = "HugePages_Free:" ]; then 154 freepgs="$size" 155 fi 156 if [ "$name" = "Hugepagesize:" ]; then 157 hpgsize_KB="$size" 158 fi 159done < /proc/meminfo 160 161# Simple hugetlbfs tests have a hardcoded minimum requirement of 162# huge pages totaling 256MB (262144KB) in size. The userfaultfd 163# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take 164# both of these requirements into account and attempt to increase 165# number of huge pages available. 166nr_cpus=$(nproc) 167uffd_min_KB=$((hpgsize_KB * nr_cpus * 2)) 168hugetlb_min_KB=$((256 * 1024)) 169if [[ $uffd_min_KB -gt $hugetlb_min_KB ]]; then 170 needmem_KB=$uffd_min_KB 171else 172 needmem_KB=$hugetlb_min_KB 173fi 174 175# set proper nr_hugepages 176if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then 177 nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages) 178 needpgs=$((needmem_KB / hpgsize_KB)) 179 tries=2 180 while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do 181 lackpgs=$((needpgs - freepgs)) 182 echo 3 > /proc/sys/vm/drop_caches 183 if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then 184 echo "Please run this test as root" 185 exit $ksft_skip 186 fi 187 while read -r name size unit; do 188 if [ "$name" = "HugePages_Free:" ]; then 189 freepgs=$size 190 fi 191 done < /proc/meminfo 192 tries=$((tries - 1)) 193 done 194 if [ "$freepgs" -lt "$needpgs" ]; then 195 printf "Not enough huge pages available (%d < %d)\n" \ 196 "$freepgs" "$needpgs" 197 fi 198 HAVE_HUGEPAGES=1 199else 200 echo "no hugetlbfs support in kernel?" 201 HAVE_HUGEPAGES=0 202fi 203 204# filter 64bit architectures 205ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64" 206if [ -z "$ARCH" ]; then 207 ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/') 208fi 209VADDR64=0 210echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1 211 212tap_prefix() { 213 sed -e "s/^/${TAP_PREFIX}/" 214} 215 216tap_output() { 217 if [[ ! -z "$TAP_PREFIX" ]]; then 218 read str 219 echo $str 220 fi 221} 222 223pretty_name() { 224 echo "$*" | sed -e 's/^\(bash \)\?\.\///' 225} 226 227# Usage: run_test [test binary] [arbitrary test arguments...] 228run_test() { 229 if test_selected ${CATEGORY}; then 230 local skip=0 231 232 # On memory constrainted systems some tests can fail to allocate hugepages. 233 # perform some cleanup before the test for a higher success rate. 234 if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then 235 if [ "${HAVE_HUGEPAGES}" = "1" ]; then 236 echo 3 > /proc/sys/vm/drop_caches 237 sleep 2 238 echo 1 > /proc/sys/vm/compact_memory 239 sleep 2 240 else 241 echo "hugepages not supported" | tap_prefix 242 skip=1 243 fi 244 fi 245 246 local test=$(pretty_name "$*") 247 local title="running $*" 248 local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -) 249 printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix 250 251 if [ "${skip}" != "1" ]; then 252 ("$@" 2>&1) | tap_prefix 253 local ret=${PIPESTATUS[0]} 254 else 255 local ret=$ksft_skip 256 fi 257 count_total=$(( count_total + 1 )) 258 if [ $ret -eq 0 ]; then 259 count_pass=$(( count_pass + 1 )) 260 echo "[PASS]" | tap_prefix 261 echo "ok ${count_total} ${test}" | tap_output 262 elif [ $ret -eq $ksft_skip ]; then 263 count_skip=$(( count_skip + 1 )) 264 echo "[SKIP]" | tap_prefix 265 echo "ok ${count_total} ${test} # SKIP" | tap_output 266 exitcode=$ksft_skip 267 else 268 count_fail=$(( count_fail + 1 )) 269 echo "[FAIL]" | tap_prefix 270 echo "not ok ${count_total} ${test} # exit=$ret" | tap_output 271 exitcode=1 272 fi 273 fi # test_selected 274} 275 276echo "TAP version 13" | tap_output 277 278CATEGORY="hugetlb" run_test ./hugepage-mmap 279 280shmmax=$(cat /proc/sys/kernel/shmmax) 281shmall=$(cat /proc/sys/kernel/shmall) 282echo 268435456 > /proc/sys/kernel/shmmax 283echo 4194304 > /proc/sys/kernel/shmall 284CATEGORY="hugetlb" run_test ./hugepage-shm 285echo "$shmmax" > /proc/sys/kernel/shmmax 286echo "$shmall" > /proc/sys/kernel/shmall 287 288CATEGORY="hugetlb" run_test ./map_hugetlb 289CATEGORY="hugetlb" run_test ./hugepage-mremap 290CATEGORY="hugetlb" run_test ./hugepage-vmemmap 291CATEGORY="hugetlb" run_test ./hugetlb-madvise 292CATEGORY="hugetlb" run_test ./hugetlb_dio 293 294if [ "${HAVE_HUGEPAGES}" = "1" ]; then 295 nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) 296 # For this test, we need one and just one huge page 297 echo 1 > /proc/sys/vm/nr_hugepages 298 CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv 299 CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map 300 # Restore the previous number of huge pages, since further tests rely on it 301 echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages 302fi 303 304if test_selected "hugetlb"; then 305 echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix 306 echo " https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix 307 echo " hugetlb regression testing." | tap_prefix 308fi 309 310CATEGORY="mmap" run_test ./map_fixed_noreplace 311 312if $RUN_ALL; then 313 run_gup_matrix 314else 315 # get_user_pages_fast() benchmark 316 CATEGORY="gup_test" run_test ./gup_test -u 317 # pin_user_pages_fast() benchmark 318 CATEGORY="gup_test" run_test ./gup_test -a 319fi 320# Dump pages 0, 19, and 4096, using pin_user_pages: 321CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000 322CATEGORY="gup_test" run_test ./gup_longterm 323 324CATEGORY="userfaultfd" run_test ./uffd-unit-tests 325uffd_stress_bin=./uffd-stress 326CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16 327# Hugetlb tests require source and destination huge pages. Pass in half 328# the size of the free pages we have, which is used for *each*. 329# uffd-stress expects a region expressed in MiB, so we adjust 330# half_ufd_size_MB accordingly. 331half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2)) 332CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32 333CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32 334CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16 335CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16 336# uffd-wp-mremap requires at least one page of each size. 337have_all_size_hugepgs=true 338declare -A nr_size_hugepgs 339for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do 340 old=$(cat $f) 341 nr_size_hugepgs["$f"]="$old" 342 if [ "$old" == 0 ]; then 343 echo 1 > "$f" 344 fi 345 if [ $(cat "$f") == 0 ]; then 346 have_all_size_hugepgs=false 347 break 348 fi 349done 350if $have_all_size_hugepgs; then 351 CATEGORY="userfaultfd" run_test ./uffd-wp-mremap 352else 353 echo "# SKIP ./uffd-wp-mremap" 354fi 355 356#cleanup 357for f in "${!nr_size_hugepgs[@]}"; do 358 echo "${nr_size_hugepgs["$f"]}" > "$f" 359done 360echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages 361 362CATEGORY="compaction" run_test ./compaction_test 363 364if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null; 365then 366 CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit 367else 368 echo "# SKIP ./on-fault-limit" 369fi 370 371CATEGORY="mmap" run_test ./map_populate 372 373CATEGORY="mlock" run_test ./mlock-random-test 374 375CATEGORY="mlock" run_test ./mlock2-tests 376 377CATEGORY="process_mrelease" run_test ./mrelease_test 378 379CATEGORY="mremap" run_test ./mremap_test 380 381CATEGORY="hugetlb" run_test ./thuge-gen 382CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2 383CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2 384if $RUN_DESTRUCTIVE; then 385nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) 386enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline) 387echo 8 > /proc/sys/vm/nr_hugepages 388CATEGORY="hugetlb" run_test ./hugetlb-soft-offline 389echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages 390echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline 391CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison 392fi 393 394if [ $VADDR64 -ne 0 ]; then 395 396 # set overcommit_policy as OVERCOMMIT_ALWAYS so that kernel 397 # allows high virtual address allocation requests independent 398 # of platform's physical memory. 399 400 if [ -x ./virtual_address_range ]; then 401 prev_policy=$(cat /proc/sys/vm/overcommit_memory) 402 echo 1 > /proc/sys/vm/overcommit_memory 403 CATEGORY="hugevm" run_test ./virtual_address_range 404 echo $prev_policy > /proc/sys/vm/overcommit_memory 405 fi 406 407 # va high address boundary switch test 408 ARCH_ARM64="arm64" 409 prev_nr_hugepages=$(cat /proc/sys/vm/nr_hugepages) 410 if [ "$ARCH" == "$ARCH_ARM64" ]; then 411 echo 6 > /proc/sys/vm/nr_hugepages 412 fi 413 CATEGORY="hugevm" run_test bash ./va_high_addr_switch.sh 414 if [ "$ARCH" == "$ARCH_ARM64" ]; then 415 echo $prev_nr_hugepages > /proc/sys/vm/nr_hugepages 416 fi 417fi # VADDR64 418 419# vmalloc stability smoke test 420CATEGORY="vmalloc" run_test bash ./test_vmalloc.sh smoke 421 422CATEGORY="mremap" run_test ./mremap_dontunmap 423 424CATEGORY="hmm" run_test bash ./test_hmm.sh smoke 425 426# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests 427CATEGORY="madv_guard" run_test ./guard-regions 428 429# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests 430CATEGORY="madv_populate" run_test ./madv_populate 431 432# PROCESS_MADV test 433CATEGORY="process_madv" run_test ./process_madv 434 435CATEGORY="vma_merge" run_test ./merge 436 437if [ -x ./memfd_secret ] 438then 439if [ -f /proc/sys/kernel/yama/ptrace_scope ]; then 440 (echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix 441fi 442CATEGORY="memfd_secret" run_test ./memfd_secret 443fi 444 445# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100 446if [ "${HAVE_HUGEPAGES}" = "1" ]; then 447 CATEGORY="ksm" run_test ./ksm_tests -H -s 100 448fi 449# KSM KSM_MERGE_TIME test with size of 100 450CATEGORY="ksm" run_test ./ksm_tests -P -s 100 451# KSM MADV_MERGEABLE test with 10 identical pages 452CATEGORY="ksm" run_test ./ksm_tests -M -p 10 453# KSM unmerge test 454CATEGORY="ksm" run_test ./ksm_tests -U 455# KSM test with 10 zero pages and use_zero_pages = 0 456CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0 457# KSM test with 10 zero pages and use_zero_pages = 1 458CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1 459# KSM test with 2 NUMA nodes and merge_across_nodes = 1 460CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1 461# KSM test with 2 NUMA nodes and merge_across_nodes = 0 462CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0 463 464CATEGORY="ksm" run_test ./ksm_functional_tests 465 466# protection_keys tests 467nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages) 468if [ -x ./protection_keys_32 ] 469then 470 CATEGORY="pkey" run_test ./protection_keys_32 471fi 472 473if [ -x ./protection_keys_64 ] 474then 475 CATEGORY="pkey" run_test ./protection_keys_64 476fi 477echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages 478 479if [ -x ./soft-dirty ] 480then 481 CATEGORY="soft_dirty" run_test ./soft-dirty 482fi 483 484CATEGORY="pagemap" run_test ./pagemap_ioctl 485 486CATEGORY="pfnmap" run_test ./pfnmap 487 488# COW tests 489CATEGORY="cow" run_test ./cow 490 491CATEGORY="thp" run_test ./khugepaged 492 493CATEGORY="thp" run_test ./khugepaged -s 2 494 495CATEGORY="thp" run_test ./khugepaged all:shmem 496 497CATEGORY="thp" run_test ./khugepaged -s 4 all:shmem 498 499CATEGORY="thp" run_test ./transhuge-stress -d 20 500 501# Try to create XFS if not provided 502if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then 503 if [ "${HAVE_HUGEPAGES}" = "1" ]; then 504 if test_selected "thp"; then 505 if grep xfs /proc/filesystems &>/dev/null; then 506 XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX) 507 SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX) 508 truncate -s 314572800 ${XFS_IMG} 509 mkfs.xfs -q ${XFS_IMG} 510 mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} 511 MOUNTED_XFS=1 512 fi 513 fi 514 fi 515fi 516 517CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} 518 519if [ -n "${MOUNTED_XFS}" ]; then 520 umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} 521 rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} 522 rm -f ${XFS_IMG} 523fi 524 525CATEGORY="migration" run_test ./migration 526 527CATEGORY="mkdirty" run_test ./mkdirty 528 529CATEGORY="mdwe" run_test ./mdwe_test 530 531CATEGORY="page_frag" run_test ./test_page_frag.sh smoke 532 533CATEGORY="page_frag" run_test ./test_page_frag.sh aligned 534 535CATEGORY="page_frag" run_test ./test_page_frag.sh nonaligned 536 537CATEGORY="rmap" run_test ./rmap 538 539echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix 540echo "1..${count_total}" | tap_output 541 542exit $exitcode 543