xref: /linux/tools/testing/selftests/mm/run_vmtests.sh (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3# Please run as root
4
5# IMPORTANT: If you add a new test CATEGORY please add a simple wrapper
6# script so kunit knows to run it, and add it to the list below.
7# If you do not YOUR TESTS WILL NOT RUN IN THE CI.
8
9# Kselftest framework requirement - SKIP code is 4.
10ksft_skip=4
11
12count_total=0
13count_pass=0
14count_fail=0
15count_skip=0
16exitcode=0
17
18usage() {
19	cat <<EOF
20usage: ${BASH_SOURCE[0]:-$0} [ options ]
21
22  -a: run all tests, including extra ones (other than destructive ones)
23  -t: specify specific categories to tests to run
24  -h: display this message
25  -n: disable TAP output
26  -d: run destructive tests
27
28The default behavior is to run required tests only.  If -a is specified,
29will run all tests.
30
31Alternatively, specific groups tests can be run by passing a string
32to the -t argument containing one or more of the following categories
33separated by spaces:
34- mmap
35	tests for mmap(2)
36- gup_test
37	tests for gup
38- userfaultfd
39	tests for  userfaultfd(2)
40- compaction
41	a test for the patch "Allow compaction of unevictable pages"
42- mlock
43	tests for mlock(2)
44- mremap
45	tests for mremap(2)
46- hugevm
47	tests for very large virtual address space
48- vmalloc
49	vmalloc smoke tests
50- hmm
51	hmm smoke tests
52- madv_guard
53	test madvise(2) MADV_GUARD_INSTALL and MADV_GUARD_REMOVE options
54- madv_populate
55	test memadvise(2) MADV_POPULATE_{READ,WRITE} options
56- memfd_secret
57	test memfd_secret(2)
58- process_mrelease
59	test process_mrelease(2)
60- ksm
61	ksm tests that do not require >=2 NUMA nodes
62- ksm_numa
63	ksm tests that require >=2 NUMA nodes
64- pkey
65	memory protection key tests
66- soft_dirty
67	test soft dirty page bit semantics
68- pagemap
69	test pagemap_scan IOCTL
70- pfnmap
71	tests for VM_PFNMAP handling
72- process_madv
73	test for process_madv
74- cow
75	test copy-on-write semantics
76- thp
77	test transparent huge pages
78- hugetlb
79	test hugetlbfs huge pages
80- migration
81	invoke move_pages(2) to exercise the migration entry code
82	paths in the kernel
83- mkdirty
84	test handling of code that might set PTE/PMD dirty in
85	read-only VMAs
86- mdwe
87	test prctl(PR_SET_MDWE, ...)
88- page_frag
89	test handling of page fragment allocation and freeing
90- vma_merge
91	test VMA merge cases behave as expected
92- rmap
93	test rmap behaves as expected
94- memory-failure
95	test memory-failure behaves as expected
96
97example: ./run_vmtests.sh -t "hmm mmap ksm"
98EOF
99	exit 0
100}
101
102RUN_ALL=false
103RUN_DESTRUCTIVE=false
104TAP_PREFIX="# "
105
106while getopts "aht:n" OPT; do
107	case ${OPT} in
108		"a") RUN_ALL=true ;;
109		"h") usage ;;
110		"t") VM_SELFTEST_ITEMS=${OPTARG} ;;
111		"n") TAP_PREFIX= ;;
112		"d") RUN_DESTRUCTIVE=true ;;
113	esac
114done
115shift $((OPTIND -1))
116
117# default behavior: run all tests
118VM_SELFTEST_ITEMS=${VM_SELFTEST_ITEMS:-default}
119
120test_selected() {
121	if [ "$VM_SELFTEST_ITEMS" == "default" ]; then
122		# If no VM_SELFTEST_ITEMS are specified, run all tests
123		return 0
124	fi
125	# If test selected argument is one of the test items
126	if [[ " ${VM_SELFTEST_ITEMS[*]} " =~ " ${1} " ]]; then
127	        return 0
128	else
129	        return 1
130	fi
131}
132
133run_gup_matrix() {
134    # -t: thp=on, -T: thp=off, -H: hugetlb=on
135    local hugetlb_mb=$(( needmem_KB / 1024 ))
136
137    for huge in -t -T "-H -m $hugetlb_mb"; do
138        # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm
139        for test_cmd in -u -U -a -b -L; do
140            # -w: write=1, -W: write=0
141            for write in -w -W; do
142                # -S: shared
143                for share in -S " "; do
144                    # -n: How many pages to fetch together?  512 is special
145                    # because it's default thp size (or 2M on x86), 123 to
146                    # just test partial gup when hit a huge in whatever form
147                    for num in "-n 1" "-n 512" "-n 123" "-n -1"; do
148                        CATEGORY="gup_test" run_test ./gup_test \
149                                $huge $test_cmd $write $share $num
150                    done
151                done
152            done
153        done
154    done
155}
156
157# get huge pagesize and freepages from /proc/meminfo
158while read -r name size unit; do
159	if [ "$name" = "HugePages_Free:" ]; then
160		freepgs="$size"
161	fi
162	if [ "$name" = "Hugepagesize:" ]; then
163		hpgsize_KB="$size"
164	fi
165done < /proc/meminfo
166
167# Simple hugetlbfs tests have a hardcoded minimum requirement of
168# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
169# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
170# both of these requirements into account and attempt to increase
171# number of huge pages available.
172nr_cpus=$(nproc)
173uffd_min_KB=$((hpgsize_KB * nr_cpus * 2))
174hugetlb_min_KB=$((256 * 1024))
175if [[ $uffd_min_KB -gt $hugetlb_min_KB ]]; then
176	needmem_KB=$uffd_min_KB
177else
178	needmem_KB=$hugetlb_min_KB
179fi
180
181# set proper nr_hugepages
182if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
183	orig_nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
184	needpgs=$((needmem_KB / hpgsize_KB))
185	tries=2
186	while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
187		lackpgs=$((needpgs - freepgs))
188		echo 3 > /proc/sys/vm/drop_caches
189		if ! echo $((lackpgs + orig_nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
190			echo "Please run this test as root"
191			exit $ksft_skip
192		fi
193		while read -r name size unit; do
194			if [ "$name" = "HugePages_Free:" ]; then
195				freepgs=$size
196			fi
197		done < /proc/meminfo
198		tries=$((tries - 1))
199	done
200	nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
201	if [ "$freepgs" -lt "$needpgs" ]; then
202		printf "Not enough huge pages available (%d < %d)\n" \
203		       "$freepgs" "$needpgs"
204	fi
205	HAVE_HUGEPAGES=1
206else
207	echo "no hugetlbfs support in kernel?"
208	HAVE_HUGEPAGES=0
209fi
210
211# filter 64bit architectures
212ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
213if [ -z "$ARCH" ]; then
214	ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
215fi
216VADDR64=0
217echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
218
219tap_prefix() {
220	sed -e "s/^/${TAP_PREFIX}/"
221}
222
223tap_output() {
224	if [[ ! -z "$TAP_PREFIX" ]]; then
225		read str
226		echo $str
227	fi
228}
229
230pretty_name() {
231	echo "$*" | sed -e 's/^\(bash \)\?\.\///'
232}
233
234# Usage: run_test [test binary] [arbitrary test arguments...]
235run_test() {
236	if test_selected ${CATEGORY}; then
237		local skip=0
238
239		# On memory constrainted systems some tests can fail to allocate hugepages.
240		# perform some cleanup before the test for a higher success rate.
241		if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then
242			if [ "${HAVE_HUGEPAGES}" = "1" ]; then
243				echo 3 > /proc/sys/vm/drop_caches
244				sleep 2
245				echo 1 > /proc/sys/vm/compact_memory
246				sleep 2
247			else
248				echo "hugepages not supported" | tap_prefix
249				skip=1
250			fi
251		fi
252
253		local test=$(pretty_name "$*")
254		local title="running $*"
255		local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
256		printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
257
258		if [ "${skip}" != "1" ]; then
259			("$@" 2>&1) | tap_prefix
260			local ret=${PIPESTATUS[0]}
261		else
262			local ret=$ksft_skip
263		fi
264		count_total=$(( count_total + 1 ))
265		if [ $ret -eq 0 ]; then
266			count_pass=$(( count_pass + 1 ))
267			echo "[PASS]" | tap_prefix
268			echo "ok ${count_total} ${test}" | tap_output
269		elif [ $ret -eq $ksft_skip ]; then
270			count_skip=$(( count_skip + 1 ))
271			echo "[SKIP]" | tap_prefix
272			echo "ok ${count_total} ${test} # SKIP" | tap_output
273			exitcode=$ksft_skip
274		else
275			count_fail=$(( count_fail + 1 ))
276			echo "[FAIL]" | tap_prefix
277			echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
278			exitcode=1
279		fi
280	fi # test_selected
281}
282
283echo "TAP version 13" | tap_output
284
285CATEGORY="hugetlb" run_test ./hugepage-mmap
286
287shmmax=$(cat /proc/sys/kernel/shmmax)
288shmall=$(cat /proc/sys/kernel/shmall)
289echo 268435456 > /proc/sys/kernel/shmmax
290echo 4194304 > /proc/sys/kernel/shmall
291CATEGORY="hugetlb" run_test ./hugepage-shm
292echo "$shmmax" > /proc/sys/kernel/shmmax
293echo "$shmall" > /proc/sys/kernel/shmall
294
295CATEGORY="hugetlb" run_test ./map_hugetlb
296CATEGORY="hugetlb" run_test ./hugepage-mremap
297CATEGORY="hugetlb" run_test ./hugepage-vmemmap
298CATEGORY="hugetlb" run_test ./hugetlb-madvise
299CATEGORY="hugetlb" run_test ./hugetlb_dio
300
301if [ "${HAVE_HUGEPAGES}" = "1" ]; then
302	nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
303	# For this test, we need one and just one huge page
304	echo 1 > /proc/sys/vm/nr_hugepages
305	CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
306	CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
307	# Restore the previous number of huge pages, since further tests rely on it
308	echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
309fi
310
311if test_selected "hugetlb"; then
312	echo "NOTE: These hugetlb tests provide minimal coverage.  Use"	  | tap_prefix
313	echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
314	echo "      hugetlb regression testing."			  | tap_prefix
315fi
316
317CATEGORY="mmap" run_test ./map_fixed_noreplace
318
319if $RUN_ALL; then
320    run_gup_matrix
321else
322    # get_user_pages_fast() benchmark
323    CATEGORY="gup_test" run_test ./gup_test -u -n 1
324    CATEGORY="gup_test" run_test ./gup_test -u -n -1
325    # pin_user_pages_fast() benchmark
326    CATEGORY="gup_test" run_test ./gup_test -a -n 1
327    CATEGORY="gup_test" run_test ./gup_test -a -n -1
328fi
329# Dump pages 0, 19, and 4096, using pin_user_pages:
330CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
331CATEGORY="gup_test" run_test ./gup_longterm
332
333CATEGORY="userfaultfd" run_test ./uffd-unit-tests
334uffd_stress_bin=./uffd-stress
335CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
336# Hugetlb tests require source and destination huge pages. Pass in almost half
337# the size of the free pages we have, which is used for *each*. An adjustment
338# of (nr_parallel - 1) is done (see nr_parallel in uffd-stress.c) to have some
339# extra hugepages - this is done to prevent the test from failing by racily
340# reserving more hugepages than strictly required.
341# uffd-stress expects a region expressed in MiB, so we adjust
342# half_ufd_size_MB accordingly.
343adjustment=$(( (31 < (nr_cpus - 1)) ? 31 : (nr_cpus - 1) ))
344half_ufd_size_MB=$((((freepgs - adjustment) * hpgsize_KB) / 1024 / 2))
345CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
346CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
347CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
348CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
349# uffd-wp-mremap requires at least one page of each size.
350have_all_size_hugepgs=true
351declare -A nr_size_hugepgs
352for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do
353	old=$(cat $f)
354	nr_size_hugepgs["$f"]="$old"
355	if [ "$old" == 0 ]; then
356		echo 1 > "$f"
357	fi
358	if [ $(cat "$f") == 0 ]; then
359		have_all_size_hugepgs=false
360		break
361	fi
362done
363if $have_all_size_hugepgs; then
364	CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
365else
366	echo "# SKIP ./uffd-wp-mremap"
367fi
368
369#cleanup
370for f in "${!nr_size_hugepgs[@]}"; do
371	echo "${nr_size_hugepgs["$f"]}" > "$f"
372done
373echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
374
375CATEGORY="compaction" run_test ./compaction_test
376
377if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null;
378then
379	CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
380else
381	echo "# SKIP ./on-fault-limit"
382fi
383
384CATEGORY="mmap" run_test ./map_populate
385
386CATEGORY="mlock" run_test ./mlock-random-test
387
388CATEGORY="mlock" run_test ./mlock2-tests
389
390CATEGORY="process_mrelease" run_test ./mrelease_test
391
392CATEGORY="mremap" run_test ./mremap_test
393
394CATEGORY="hugetlb" run_test ./thuge-gen
395CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2
396CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2
397if $RUN_DESTRUCTIVE; then
398nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
399enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline)
400echo 8 > /proc/sys/vm/nr_hugepages
401CATEGORY="hugetlb" run_test ./hugetlb-soft-offline
402echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
403echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline
404CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison
405fi
406
407if [ $VADDR64 -ne 0 ]; then
408	# va high address boundary switch test
409	CATEGORY="hugevm" run_test bash ./va_high_addr_switch.sh
410fi # VADDR64
411
412# vmalloc stability smoke test
413CATEGORY="vmalloc" run_test bash ./test_vmalloc.sh smoke
414
415CATEGORY="mremap" run_test ./mremap_dontunmap
416
417CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
418
419# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests
420CATEGORY="madv_guard" run_test ./guard-regions
421
422# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
423CATEGORY="madv_populate" run_test ./madv_populate
424
425# PROCESS_MADV test
426CATEGORY="process_madv" run_test ./process_madv
427
428CATEGORY="vma_merge" run_test ./merge
429
430if [ -x ./memfd_secret ]
431then
432if [ -f /proc/sys/kernel/yama/ptrace_scope ]; then
433	(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
434fi
435CATEGORY="memfd_secret" run_test ./memfd_secret
436fi
437
438# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
439if [ "${HAVE_HUGEPAGES}" = "1" ]; then
440	CATEGORY="ksm" run_test ./ksm_tests -H -s 100
441fi
442# KSM KSM_MERGE_TIME test with size of 100
443CATEGORY="ksm" run_test ./ksm_tests -P -s 100
444# KSM MADV_MERGEABLE test with 10 identical pages
445CATEGORY="ksm" run_test ./ksm_tests -M -p 10
446# KSM unmerge test
447CATEGORY="ksm" run_test ./ksm_tests -U
448# KSM test with 10 zero pages and use_zero_pages = 0
449CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
450# KSM test with 10 zero pages and use_zero_pages = 1
451CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
452# KSM test with 2 NUMA nodes and merge_across_nodes = 1
453CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
454# KSM test with 2 NUMA nodes and merge_across_nodes = 0
455CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
456
457CATEGORY="ksm" run_test ./ksm_functional_tests
458
459# protection_keys tests
460nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
461if [ -x ./protection_keys_32 ]
462then
463	CATEGORY="pkey" run_test ./protection_keys_32
464fi
465
466if [ -x ./protection_keys_64 ]
467then
468	CATEGORY="pkey" run_test ./protection_keys_64
469fi
470echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
471
472if [ -x ./soft-dirty ]
473then
474	CATEGORY="soft_dirty" run_test ./soft-dirty
475fi
476
477CATEGORY="pagemap" run_test ./pagemap_ioctl
478
479CATEGORY="pfnmap" run_test ./pfnmap
480
481# COW tests
482CATEGORY="cow" run_test ./cow
483
484CATEGORY="thp" run_test ./khugepaged
485
486CATEGORY="thp" run_test ./khugepaged -s 2
487
488CATEGORY="thp" run_test ./khugepaged all:shmem
489
490CATEGORY="thp" run_test ./khugepaged -s 4 all:shmem
491
492CATEGORY="thp" run_test ./transhuge-stress -d 20
493
494# Try to create XFS if not provided
495if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
496    if [ "${HAVE_HUGEPAGES}" = "1" ]; then
497	if test_selected "thp"; then
498	    if grep xfs /proc/filesystems &>/dev/null; then
499		XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
500		SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
501		truncate -s 314572800 ${XFS_IMG}
502		mkfs.xfs -q ${XFS_IMG}
503		mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
504		MOUNTED_XFS=1
505	    fi
506	fi
507    fi
508fi
509
510CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
511
512if [ -n "${MOUNTED_XFS}" ]; then
513    umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
514    rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
515    rm -f ${XFS_IMG}
516fi
517
518CATEGORY="migration" run_test ./migration
519
520CATEGORY="mkdirty" run_test ./mkdirty
521
522CATEGORY="mdwe" run_test ./mdwe_test
523
524CATEGORY="page_frag" run_test ./test_page_frag.sh smoke
525
526CATEGORY="page_frag" run_test ./test_page_frag.sh aligned
527
528CATEGORY="page_frag" run_test ./test_page_frag.sh nonaligned
529
530CATEGORY="rmap" run_test ./rmap
531
532# Try to load hwpoison_inject if not present.
533HWPOISON_DIR=/sys/kernel/debug/hwpoison/
534if [ ! -d "$HWPOISON_DIR" ]; then
535	if ! modprobe -q -R hwpoison_inject; then
536		echo "Module hwpoison_inject not found, skipping..."
537	else
538		modprobe hwpoison_inject > /dev/null 2>&1
539		LOADED_MOD=1
540	fi
541fi
542
543if [ -d "$HWPOISON_DIR" ]; then
544	CATEGORY="memory-failure" run_test ./memory-failure
545fi
546
547if [ -n "${LOADED_MOD}" ]; then
548	modprobe -r hwpoison_inject > /dev/null 2>&1
549fi
550
551if [ "${HAVE_HUGEPAGES}" = 1 ]; then
552	echo "$orig_nr_hugepgs" > /proc/sys/vm/nr_hugepages
553fi
554
555echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
556echo "1..${count_total}" | tap_output
557
558exit $exitcode
559