xref: /linux/tools/testing/selftests/mm/run_vmtests.sh (revision 1b30456150e57a79e300b82eb2efac40c25a162e)
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3# Please run as root
4
5# Kselftest framework requirement - SKIP code is 4.
6ksft_skip=4
7
8count_total=0
9count_pass=0
10count_fail=0
11count_skip=0
12exitcode=0
13
14usage() {
15	cat <<EOF
16usage: ${BASH_SOURCE[0]:-$0} [ options ]
17
18  -a: run all tests, including extra ones (other than destructive ones)
19  -t: specify specific categories to tests to run
20  -h: display this message
21  -n: disable TAP output
22  -d: run destructive tests
23
24The default behavior is to run required tests only.  If -a is specified,
25will run all tests.
26
27Alternatively, specific groups tests can be run by passing a string
28to the -t argument containing one or more of the following categories
29separated by spaces:
30- mmap
31	tests for mmap(2)
32- gup_test
33	tests for gup
34- userfaultfd
35	tests for  userfaultfd(2)
36- compaction
37	a test for the patch "Allow compaction of unevictable pages"
38- mlock
39	tests for mlock(2)
40- mremap
41	tests for mremap(2)
42- hugevm
43	tests for very large virtual address space
44- vmalloc
45	vmalloc smoke tests
46- hmm
47	hmm smoke tests
48- madv_guard
49	test madvise(2) MADV_GUARD_INSTALL and MADV_GUARD_REMOVE options
50- madv_populate
51	test memadvise(2) MADV_POPULATE_{READ,WRITE} options
52- memfd_secret
53	test memfd_secret(2)
54- process_mrelease
55	test process_mrelease(2)
56- ksm
57	ksm tests that do not require >=2 NUMA nodes
58- ksm_numa
59	ksm tests that require >=2 NUMA nodes
60- pkey
61	memory protection key tests
62- soft_dirty
63	test soft dirty page bit semantics
64- pagemap
65	test pagemap_scan IOCTL
66- cow
67	test copy-on-write semantics
68- thp
69	test transparent huge pages
70- hugetlb
71	test hugetlbfs huge pages
72- migration
73	invoke move_pages(2) to exercise the migration entry code
74	paths in the kernel
75- mkdirty
76	test handling of code that might set PTE/PMD dirty in
77	read-only VMAs
78- mdwe
79	test prctl(PR_SET_MDWE, ...)
80- page_frag
81	test handling of page fragment allocation and freeing
82
83example: ./run_vmtests.sh -t "hmm mmap ksm"
84EOF
85	exit 0
86}
87
88RUN_ALL=false
89RUN_DESTRUCTIVE=false
90TAP_PREFIX="# "
91
92while getopts "aht:n" OPT; do
93	case ${OPT} in
94		"a") RUN_ALL=true ;;
95		"h") usage ;;
96		"t") VM_SELFTEST_ITEMS=${OPTARG} ;;
97		"n") TAP_PREFIX= ;;
98		"d") RUN_DESTRUCTIVE=true ;;
99	esac
100done
101shift $((OPTIND -1))
102
103# default behavior: run all tests
104VM_SELFTEST_ITEMS=${VM_SELFTEST_ITEMS:-default}
105
106test_selected() {
107	if [ "$VM_SELFTEST_ITEMS" == "default" ]; then
108		# If no VM_SELFTEST_ITEMS are specified, run all tests
109		return 0
110	fi
111	# If test selected argument is one of the test items
112	if [[ " ${VM_SELFTEST_ITEMS[*]} " =~ " ${1} " ]]; then
113	        return 0
114	else
115	        return 1
116	fi
117}
118
119run_gup_matrix() {
120    # -t: thp=on, -T: thp=off, -H: hugetlb=on
121    local hugetlb_mb=$(( needmem_KB / 1024 ))
122
123    for huge in -t -T "-H -m $hugetlb_mb"; do
124        # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm
125        for test_cmd in -u -U -a -b -L; do
126            # -w: write=1, -W: write=0
127            for write in -w -W; do
128                # -S: shared
129                for share in -S " "; do
130                    # -n: How many pages to fetch together?  512 is special
131                    # because it's default thp size (or 2M on x86), 123 to
132                    # just test partial gup when hit a huge in whatever form
133                    for num in "-n 1" "-n 512" "-n 123"; do
134                        CATEGORY="gup_test" run_test ./gup_test \
135                                $huge $test_cmd $write $share $num
136                    done
137                done
138            done
139        done
140    done
141}
142
143# get huge pagesize and freepages from /proc/meminfo
144while read -r name size unit; do
145	if [ "$name" = "HugePages_Free:" ]; then
146		freepgs="$size"
147	fi
148	if [ "$name" = "Hugepagesize:" ]; then
149		hpgsize_KB="$size"
150	fi
151done < /proc/meminfo
152
153# Simple hugetlbfs tests have a hardcoded minimum requirement of
154# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
155# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
156# both of these requirements into account and attempt to increase
157# number of huge pages available.
158nr_cpus=$(nproc)
159uffd_min_KB=$((hpgsize_KB * nr_cpus * 2))
160hugetlb_min_KB=$((256 * 1024))
161if [[ $uffd_min_KB -gt $hugetlb_min_KB ]]; then
162	needmem_KB=$uffd_min_KB
163else
164	needmem_KB=$hugetlb_min_KB
165fi
166
167# set proper nr_hugepages
168if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
169	nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
170	needpgs=$((needmem_KB / hpgsize_KB))
171	tries=2
172	while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
173		lackpgs=$((needpgs - freepgs))
174		echo 3 > /proc/sys/vm/drop_caches
175		if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
176			echo "Please run this test as root"
177			exit $ksft_skip
178		fi
179		while read -r name size unit; do
180			if [ "$name" = "HugePages_Free:" ]; then
181				freepgs=$size
182			fi
183		done < /proc/meminfo
184		tries=$((tries - 1))
185	done
186	if [ "$freepgs" -lt "$needpgs" ]; then
187		printf "Not enough huge pages available (%d < %d)\n" \
188		       "$freepgs" "$needpgs"
189	fi
190else
191	echo "no hugetlbfs support in kernel?"
192	exit 1
193fi
194
195# filter 64bit architectures
196ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
197if [ -z "$ARCH" ]; then
198	ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
199fi
200VADDR64=0
201echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
202
203tap_prefix() {
204	sed -e "s/^/${TAP_PREFIX}/"
205}
206
207tap_output() {
208	if [[ ! -z "$TAP_PREFIX" ]]; then
209		read str
210		echo $str
211	fi
212}
213
214pretty_name() {
215	echo "$*" | sed -e 's/^\(bash \)\?\.\///'
216}
217
218# Usage: run_test [test binary] [arbitrary test arguments...]
219run_test() {
220	if test_selected ${CATEGORY}; then
221		# On memory constrainted systems some tests can fail to allocate hugepages.
222		# perform some cleanup before the test for a higher success rate.
223		if [ ${CATEGORY} == "thp" ] | [ ${CATEGORY} == "hugetlb" ]; then
224			echo 3 > /proc/sys/vm/drop_caches
225			sleep 2
226			echo 1 > /proc/sys/vm/compact_memory
227			sleep 2
228		fi
229
230		local test=$(pretty_name "$*")
231		local title="running $*"
232		local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
233		printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
234
235		("$@" 2>&1) | tap_prefix
236		local ret=${PIPESTATUS[0]}
237		count_total=$(( count_total + 1 ))
238		if [ $ret -eq 0 ]; then
239			count_pass=$(( count_pass + 1 ))
240			echo "[PASS]" | tap_prefix
241			echo "ok ${count_total} ${test}" | tap_output
242		elif [ $ret -eq $ksft_skip ]; then
243			count_skip=$(( count_skip + 1 ))
244			echo "[SKIP]" | tap_prefix
245			echo "ok ${count_total} ${test} # SKIP" | tap_output
246			exitcode=$ksft_skip
247		else
248			count_fail=$(( count_fail + 1 ))
249			echo "[FAIL]" | tap_prefix
250			echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
251			exitcode=1
252		fi
253	fi # test_selected
254}
255
256echo "TAP version 13" | tap_output
257
258CATEGORY="hugetlb" run_test ./hugepage-mmap
259
260shmmax=$(cat /proc/sys/kernel/shmmax)
261shmall=$(cat /proc/sys/kernel/shmall)
262echo 268435456 > /proc/sys/kernel/shmmax
263echo 4194304 > /proc/sys/kernel/shmall
264CATEGORY="hugetlb" run_test ./hugepage-shm
265echo "$shmmax" > /proc/sys/kernel/shmmax
266echo "$shmall" > /proc/sys/kernel/shmall
267
268CATEGORY="hugetlb" run_test ./map_hugetlb
269CATEGORY="hugetlb" run_test ./hugepage-mremap
270CATEGORY="hugetlb" run_test ./hugepage-vmemmap
271CATEGORY="hugetlb" run_test ./hugetlb-madvise
272CATEGORY="hugetlb" run_test ./hugetlb_dio
273
274nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
275# For this test, we need one and just one huge page
276echo 1 > /proc/sys/vm/nr_hugepages
277CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
278CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
279# Restore the previous number of huge pages, since further tests rely on it
280echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
281
282if test_selected "hugetlb"; then
283	echo "NOTE: These hugetlb tests provide minimal coverage.  Use"	  | tap_prefix
284	echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
285	echo "      hugetlb regression testing."			  | tap_prefix
286fi
287
288CATEGORY="mmap" run_test ./map_fixed_noreplace
289
290if $RUN_ALL; then
291    run_gup_matrix
292else
293    # get_user_pages_fast() benchmark
294    CATEGORY="gup_test" run_test ./gup_test -u
295    # pin_user_pages_fast() benchmark
296    CATEGORY="gup_test" run_test ./gup_test -a
297fi
298# Dump pages 0, 19, and 4096, using pin_user_pages:
299CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
300CATEGORY="gup_test" run_test ./gup_longterm
301
302CATEGORY="userfaultfd" run_test ./uffd-unit-tests
303uffd_stress_bin=./uffd-stress
304CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
305# Hugetlb tests require source and destination huge pages. Pass in half
306# the size of the free pages we have, which is used for *each*.
307half_ufd_size_MB=$((freepgs / 2))
308CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
309CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
310CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
311CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
312CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
313
314#cleanup
315echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
316
317CATEGORY="compaction" run_test ./compaction_test
318
319if command -v sudo &> /dev/null;
320then
321	CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
322else
323	echo "# SKIP ./on-fault-limit"
324fi
325
326CATEGORY="mmap" run_test ./map_populate
327
328CATEGORY="mlock" run_test ./mlock-random-test
329
330CATEGORY="mlock" run_test ./mlock2-tests
331
332CATEGORY="process_mrelease" run_test ./mrelease_test
333
334CATEGORY="mremap" run_test ./mremap_test
335
336CATEGORY="hugetlb" run_test ./thuge-gen
337CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2
338CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2
339if $RUN_DESTRUCTIVE; then
340nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
341enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline)
342echo 8 > /proc/sys/vm/nr_hugepages
343CATEGORY="hugetlb" run_test ./hugetlb-soft-offline
344echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
345echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline
346CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison
347fi
348
349if [ $VADDR64 -ne 0 ]; then
350
351	# set overcommit_policy as OVERCOMMIT_ALWAYS so that kernel
352	# allows high virtual address allocation requests independent
353	# of platform's physical memory.
354
355	if [ -x ./virtual_address_range ]; then
356		prev_policy=$(cat /proc/sys/vm/overcommit_memory)
357		echo 1 > /proc/sys/vm/overcommit_memory
358		CATEGORY="hugevm" run_test ./virtual_address_range
359		echo $prev_policy > /proc/sys/vm/overcommit_memory
360	fi
361
362	# va high address boundary switch test
363	ARCH_ARM64="arm64"
364	prev_nr_hugepages=$(cat /proc/sys/vm/nr_hugepages)
365	if [ "$ARCH" == "$ARCH_ARM64" ]; then
366		echo 6 > /proc/sys/vm/nr_hugepages
367	fi
368	CATEGORY="hugevm" run_test bash ./va_high_addr_switch.sh
369	if [ "$ARCH" == "$ARCH_ARM64" ]; then
370		echo $prev_nr_hugepages > /proc/sys/vm/nr_hugepages
371	fi
372fi # VADDR64
373
374# vmalloc stability smoke test
375CATEGORY="vmalloc" run_test bash ./test_vmalloc.sh smoke
376
377CATEGORY="mremap" run_test ./mremap_dontunmap
378
379CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
380
381# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests
382CATEGORY="madv_guard" run_test ./guard-pages
383
384# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
385CATEGORY="madv_populate" run_test ./madv_populate
386
387if [ -x ./memfd_secret ]
388then
389(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
390CATEGORY="memfd_secret" run_test ./memfd_secret
391fi
392
393# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
394CATEGORY="ksm" run_test ./ksm_tests -H -s 100
395# KSM KSM_MERGE_TIME test with size of 100
396CATEGORY="ksm" run_test ./ksm_tests -P -s 100
397# KSM MADV_MERGEABLE test with 10 identical pages
398CATEGORY="ksm" run_test ./ksm_tests -M -p 10
399# KSM unmerge test
400CATEGORY="ksm" run_test ./ksm_tests -U
401# KSM test with 10 zero pages and use_zero_pages = 0
402CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
403# KSM test with 10 zero pages and use_zero_pages = 1
404CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
405# KSM test with 2 NUMA nodes and merge_across_nodes = 1
406CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
407# KSM test with 2 NUMA nodes and merge_across_nodes = 0
408CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
409
410CATEGORY="ksm" run_test ./ksm_functional_tests
411
412# protection_keys tests
413nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
414if [ -x ./protection_keys_32 ]
415then
416	CATEGORY="pkey" run_test ./protection_keys_32
417fi
418
419if [ -x ./protection_keys_64 ]
420then
421	CATEGORY="pkey" run_test ./protection_keys_64
422fi
423echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
424
425if [ -x ./soft-dirty ]
426then
427	CATEGORY="soft_dirty" run_test ./soft-dirty
428fi
429
430CATEGORY="pagemap" run_test ./pagemap_ioctl
431
432# COW tests
433CATEGORY="cow" run_test ./cow
434
435CATEGORY="thp" run_test ./khugepaged
436
437CATEGORY="thp" run_test ./khugepaged -s 2
438
439CATEGORY="thp" run_test ./transhuge-stress -d 20
440
441# Try to create XFS if not provided
442if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
443    if test_selected "thp"; then
444        if grep xfs /proc/filesystems &>/dev/null; then
445            XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
446            SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
447            truncate -s 314572800 ${XFS_IMG}
448            mkfs.xfs -q ${XFS_IMG}
449            mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
450            MOUNTED_XFS=1
451        fi
452    fi
453fi
454
455CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
456
457if [ -n "${MOUNTED_XFS}" ]; then
458    umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
459    rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
460    rm -f ${XFS_IMG}
461fi
462
463CATEGORY="migration" run_test ./migration
464
465CATEGORY="mkdirty" run_test ./mkdirty
466
467CATEGORY="mdwe" run_test ./mdwe_test
468
469CATEGORY="page_frag" run_test ./test_page_frag.sh smoke
470
471CATEGORY="page_frag" run_test ./test_page_frag.sh aligned
472
473CATEGORY="page_frag" run_test ./test_page_frag.sh nonaligned
474
475echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
476echo "1..${count_total}" | tap_output
477
478exit $exitcode
479