xref: /linux/tools/testing/selftests/mm/run_vmtests.sh (revision 2ccd9fecd9163f168761d4398564c81554f636ef)
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3# Please run as root
4
5# Kselftest framework requirement - SKIP code is 4.
6ksft_skip=4
7
8count_total=0
9count_pass=0
10count_fail=0
11count_skip=0
12exitcode=0
13
14usage() {
15	cat <<EOF
16usage: ${BASH_SOURCE[0]:-$0} [ options ]
17
18  -a: run all tests, including extra ones (other than destructive ones)
19  -t: specify specific categories to tests to run
20  -h: display this message
21  -n: disable TAP output
22  -d: run destructive tests
23
24The default behavior is to run required tests only.  If -a is specified,
25will run all tests.
26
27Alternatively, specific groups tests can be run by passing a string
28to the -t argument containing one or more of the following categories
29separated by spaces:
30- mmap
31	tests for mmap(2)
32- gup_test
33	tests for gup
34- userfaultfd
35	tests for  userfaultfd(2)
36- compaction
37	a test for the patch "Allow compaction of unevictable pages"
38- mlock
39	tests for mlock(2)
40- mremap
41	tests for mremap(2)
42- hugevm
43	tests for very large virtual address space
44- vmalloc
45	vmalloc smoke tests
46- hmm
47	hmm smoke tests
48- madv_guard
49	test madvise(2) MADV_GUARD_INSTALL and MADV_GUARD_REMOVE options
50- madv_populate
51	test memadvise(2) MADV_POPULATE_{READ,WRITE} options
52- memfd_secret
53	test memfd_secret(2)
54- process_mrelease
55	test process_mrelease(2)
56- ksm
57	ksm tests that do not require >=2 NUMA nodes
58- ksm_numa
59	ksm tests that require >=2 NUMA nodes
60- pkey
61	memory protection key tests
62- soft_dirty
63	test soft dirty page bit semantics
64- pagemap
65	test pagemap_scan IOCTL
66- pfnmap
67	tests for VM_PFNMAP handling
68- process_madv
69	test for process_madv
70- cow
71	test copy-on-write semantics
72- thp
73	test transparent huge pages
74- hugetlb
75	test hugetlbfs huge pages
76- migration
77	invoke move_pages(2) to exercise the migration entry code
78	paths in the kernel
79- mkdirty
80	test handling of code that might set PTE/PMD dirty in
81	read-only VMAs
82- mdwe
83	test prctl(PR_SET_MDWE, ...)
84- page_frag
85	test handling of page fragment allocation and freeing
86- vma_merge
87	test VMA merge cases behave as expected
88- rmap
89	test rmap behaves as expected
90
91example: ./run_vmtests.sh -t "hmm mmap ksm"
92EOF
93	exit 0
94}
95
96RUN_ALL=false
97RUN_DESTRUCTIVE=false
98TAP_PREFIX="# "
99
100while getopts "aht:n" OPT; do
101	case ${OPT} in
102		"a") RUN_ALL=true ;;
103		"h") usage ;;
104		"t") VM_SELFTEST_ITEMS=${OPTARG} ;;
105		"n") TAP_PREFIX= ;;
106		"d") RUN_DESTRUCTIVE=true ;;
107	esac
108done
109shift $((OPTIND -1))
110
111# default behavior: run all tests
112VM_SELFTEST_ITEMS=${VM_SELFTEST_ITEMS:-default}
113
114test_selected() {
115	if [ "$VM_SELFTEST_ITEMS" == "default" ]; then
116		# If no VM_SELFTEST_ITEMS are specified, run all tests
117		return 0
118	fi
119	# If test selected argument is one of the test items
120	if [[ " ${VM_SELFTEST_ITEMS[*]} " =~ " ${1} " ]]; then
121	        return 0
122	else
123	        return 1
124	fi
125}
126
127run_gup_matrix() {
128    # -t: thp=on, -T: thp=off, -H: hugetlb=on
129    local hugetlb_mb=$(( needmem_KB / 1024 ))
130
131    for huge in -t -T "-H -m $hugetlb_mb"; do
132        # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm
133        for test_cmd in -u -U -a -b -L; do
134            # -w: write=1, -W: write=0
135            for write in -w -W; do
136                # -S: shared
137                for share in -S " "; do
138                    # -n: How many pages to fetch together?  512 is special
139                    # because it's default thp size (or 2M on x86), 123 to
140                    # just test partial gup when hit a huge in whatever form
141                    for num in "-n 1" "-n 512" "-n 123"; do
142                        CATEGORY="gup_test" run_test ./gup_test \
143                                $huge $test_cmd $write $share $num
144                    done
145                done
146            done
147        done
148    done
149}
150
151# get huge pagesize and freepages from /proc/meminfo
152while read -r name size unit; do
153	if [ "$name" = "HugePages_Free:" ]; then
154		freepgs="$size"
155	fi
156	if [ "$name" = "Hugepagesize:" ]; then
157		hpgsize_KB="$size"
158	fi
159done < /proc/meminfo
160
161# Simple hugetlbfs tests have a hardcoded minimum requirement of
162# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
163# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
164# both of these requirements into account and attempt to increase
165# number of huge pages available.
166nr_cpus=$(nproc)
167uffd_min_KB=$((hpgsize_KB * nr_cpus * 2))
168hugetlb_min_KB=$((256 * 1024))
169if [[ $uffd_min_KB -gt $hugetlb_min_KB ]]; then
170	needmem_KB=$uffd_min_KB
171else
172	needmem_KB=$hugetlb_min_KB
173fi
174
175# set proper nr_hugepages
176if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
177	nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
178	needpgs=$((needmem_KB / hpgsize_KB))
179	tries=2
180	while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
181		lackpgs=$((needpgs - freepgs))
182		echo 3 > /proc/sys/vm/drop_caches
183		if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
184			echo "Please run this test as root"
185			exit $ksft_skip
186		fi
187		while read -r name size unit; do
188			if [ "$name" = "HugePages_Free:" ]; then
189				freepgs=$size
190			fi
191		done < /proc/meminfo
192		tries=$((tries - 1))
193	done
194	if [ "$freepgs" -lt "$needpgs" ]; then
195		printf "Not enough huge pages available (%d < %d)\n" \
196		       "$freepgs" "$needpgs"
197	fi
198	HAVE_HUGEPAGES=1
199else
200	echo "no hugetlbfs support in kernel?"
201	HAVE_HUGEPAGES=0
202fi
203
204# filter 64bit architectures
205ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
206if [ -z "$ARCH" ]; then
207	ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
208fi
209VADDR64=0
210echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
211
212tap_prefix() {
213	sed -e "s/^/${TAP_PREFIX}/"
214}
215
216tap_output() {
217	if [[ ! -z "$TAP_PREFIX" ]]; then
218		read str
219		echo $str
220	fi
221}
222
223pretty_name() {
224	echo "$*" | sed -e 's/^\(bash \)\?\.\///'
225}
226
227# Usage: run_test [test binary] [arbitrary test arguments...]
228run_test() {
229	if test_selected ${CATEGORY}; then
230		local skip=0
231
232		# On memory constrainted systems some tests can fail to allocate hugepages.
233		# perform some cleanup before the test for a higher success rate.
234		if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then
235			if [ "${HAVE_HUGEPAGES}" = "1" ]; then
236				echo 3 > /proc/sys/vm/drop_caches
237				sleep 2
238				echo 1 > /proc/sys/vm/compact_memory
239				sleep 2
240			else
241				echo "hugepages not supported" | tap_prefix
242				skip=1
243			fi
244		fi
245
246		local test=$(pretty_name "$*")
247		local title="running $*"
248		local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
249		printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
250
251		if [ "${skip}" != "1" ]; then
252			("$@" 2>&1) | tap_prefix
253			local ret=${PIPESTATUS[0]}
254		else
255			local ret=$ksft_skip
256		fi
257		count_total=$(( count_total + 1 ))
258		if [ $ret -eq 0 ]; then
259			count_pass=$(( count_pass + 1 ))
260			echo "[PASS]" | tap_prefix
261			echo "ok ${count_total} ${test}" | tap_output
262		elif [ $ret -eq $ksft_skip ]; then
263			count_skip=$(( count_skip + 1 ))
264			echo "[SKIP]" | tap_prefix
265			echo "ok ${count_total} ${test} # SKIP" | tap_output
266			exitcode=$ksft_skip
267		else
268			count_fail=$(( count_fail + 1 ))
269			echo "[FAIL]" | tap_prefix
270			echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
271			exitcode=1
272		fi
273	fi # test_selected
274}
275
276echo "TAP version 13" | tap_output
277
278CATEGORY="hugetlb" run_test ./hugepage-mmap
279
280shmmax=$(cat /proc/sys/kernel/shmmax)
281shmall=$(cat /proc/sys/kernel/shmall)
282echo 268435456 > /proc/sys/kernel/shmmax
283echo 4194304 > /proc/sys/kernel/shmall
284CATEGORY="hugetlb" run_test ./hugepage-shm
285echo "$shmmax" > /proc/sys/kernel/shmmax
286echo "$shmall" > /proc/sys/kernel/shmall
287
288CATEGORY="hugetlb" run_test ./map_hugetlb
289CATEGORY="hugetlb" run_test ./hugepage-mremap
290CATEGORY="hugetlb" run_test ./hugepage-vmemmap
291CATEGORY="hugetlb" run_test ./hugetlb-madvise
292CATEGORY="hugetlb" run_test ./hugetlb_dio
293
294if [ "${HAVE_HUGEPAGES}" = "1" ]; then
295	nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
296	# For this test, we need one and just one huge page
297	echo 1 > /proc/sys/vm/nr_hugepages
298	CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
299	CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
300	# Restore the previous number of huge pages, since further tests rely on it
301	echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
302fi
303
304if test_selected "hugetlb"; then
305	echo "NOTE: These hugetlb tests provide minimal coverage.  Use"	  | tap_prefix
306	echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
307	echo "      hugetlb regression testing."			  | tap_prefix
308fi
309
310CATEGORY="mmap" run_test ./map_fixed_noreplace
311
312if $RUN_ALL; then
313    run_gup_matrix
314else
315    # get_user_pages_fast() benchmark
316    CATEGORY="gup_test" run_test ./gup_test -u
317    # pin_user_pages_fast() benchmark
318    CATEGORY="gup_test" run_test ./gup_test -a
319fi
320# Dump pages 0, 19, and 4096, using pin_user_pages:
321CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
322CATEGORY="gup_test" run_test ./gup_longterm
323
324CATEGORY="userfaultfd" run_test ./uffd-unit-tests
325uffd_stress_bin=./uffd-stress
326CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
327# Hugetlb tests require source and destination huge pages. Pass in almost half
328# the size of the free pages we have, which is used for *each*. An adjustment
329# of (nr_parallel - 1) is done (see nr_parallel in uffd-stress.c) to have some
330# extra hugepages - this is done to prevent the test from failing by racily
331# reserving more hugepages than strictly required.
332# uffd-stress expects a region expressed in MiB, so we adjust
333# half_ufd_size_MB accordingly.
334adjustment=$(( (31 < (nr_cpus - 1)) ? 31 : (nr_cpus - 1) ))
335half_ufd_size_MB=$((((freepgs - adjustment) * hpgsize_KB) / 1024 / 2))
336CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
337CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
338CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
339CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
340# uffd-wp-mremap requires at least one page of each size.
341have_all_size_hugepgs=true
342declare -A nr_size_hugepgs
343for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do
344	old=$(cat $f)
345	nr_size_hugepgs["$f"]="$old"
346	if [ "$old" == 0 ]; then
347		echo 1 > "$f"
348	fi
349	if [ $(cat "$f") == 0 ]; then
350		have_all_size_hugepgs=false
351		break
352	fi
353done
354if $have_all_size_hugepgs; then
355	CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
356else
357	echo "# SKIP ./uffd-wp-mremap"
358fi
359
360#cleanup
361for f in "${!nr_size_hugepgs[@]}"; do
362	echo "${nr_size_hugepgs["$f"]}" > "$f"
363done
364echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
365
366CATEGORY="compaction" run_test ./compaction_test
367
368if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null;
369then
370	CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
371else
372	echo "# SKIP ./on-fault-limit"
373fi
374
375CATEGORY="mmap" run_test ./map_populate
376
377CATEGORY="mlock" run_test ./mlock-random-test
378
379CATEGORY="mlock" run_test ./mlock2-tests
380
381CATEGORY="process_mrelease" run_test ./mrelease_test
382
383CATEGORY="mremap" run_test ./mremap_test
384
385CATEGORY="hugetlb" run_test ./thuge-gen
386CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2
387CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2
388if $RUN_DESTRUCTIVE; then
389nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
390enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline)
391echo 8 > /proc/sys/vm/nr_hugepages
392CATEGORY="hugetlb" run_test ./hugetlb-soft-offline
393echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
394echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline
395CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison
396fi
397
398if [ $VADDR64 -ne 0 ]; then
399
400	# set overcommit_policy as OVERCOMMIT_ALWAYS so that kernel
401	# allows high virtual address allocation requests independent
402	# of platform's physical memory.
403
404	if [ -x ./virtual_address_range ]; then
405		prev_policy=$(cat /proc/sys/vm/overcommit_memory)
406		echo 1 > /proc/sys/vm/overcommit_memory
407		CATEGORY="hugevm" run_test ./virtual_address_range
408		echo $prev_policy > /proc/sys/vm/overcommit_memory
409	fi
410
411	# va high address boundary switch test
412	ARCH_ARM64="arm64"
413	prev_nr_hugepages=$(cat /proc/sys/vm/nr_hugepages)
414	if [ "$ARCH" == "$ARCH_ARM64" ]; then
415		echo 6 > /proc/sys/vm/nr_hugepages
416	fi
417	CATEGORY="hugevm" run_test bash ./va_high_addr_switch.sh
418	if [ "$ARCH" == "$ARCH_ARM64" ]; then
419		echo $prev_nr_hugepages > /proc/sys/vm/nr_hugepages
420	fi
421fi # VADDR64
422
423# vmalloc stability smoke test
424CATEGORY="vmalloc" run_test bash ./test_vmalloc.sh smoke
425
426CATEGORY="mremap" run_test ./mremap_dontunmap
427
428CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
429
430# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests
431CATEGORY="madv_guard" run_test ./guard-regions
432
433# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
434CATEGORY="madv_populate" run_test ./madv_populate
435
436# PROCESS_MADV test
437CATEGORY="process_madv" run_test ./process_madv
438
439CATEGORY="vma_merge" run_test ./merge
440
441if [ -x ./memfd_secret ]
442then
443if [ -f /proc/sys/kernel/yama/ptrace_scope ]; then
444	(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
445fi
446CATEGORY="memfd_secret" run_test ./memfd_secret
447fi
448
449# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
450if [ "${HAVE_HUGEPAGES}" = "1" ]; then
451	CATEGORY="ksm" run_test ./ksm_tests -H -s 100
452fi
453# KSM KSM_MERGE_TIME test with size of 100
454CATEGORY="ksm" run_test ./ksm_tests -P -s 100
455# KSM MADV_MERGEABLE test with 10 identical pages
456CATEGORY="ksm" run_test ./ksm_tests -M -p 10
457# KSM unmerge test
458CATEGORY="ksm" run_test ./ksm_tests -U
459# KSM test with 10 zero pages and use_zero_pages = 0
460CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
461# KSM test with 10 zero pages and use_zero_pages = 1
462CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
463# KSM test with 2 NUMA nodes and merge_across_nodes = 1
464CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
465# KSM test with 2 NUMA nodes and merge_across_nodes = 0
466CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
467
468CATEGORY="ksm" run_test ./ksm_functional_tests
469
470# protection_keys tests
471nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
472if [ -x ./protection_keys_32 ]
473then
474	CATEGORY="pkey" run_test ./protection_keys_32
475fi
476
477if [ -x ./protection_keys_64 ]
478then
479	CATEGORY="pkey" run_test ./protection_keys_64
480fi
481echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
482
483if [ -x ./soft-dirty ]
484then
485	CATEGORY="soft_dirty" run_test ./soft-dirty
486fi
487
488CATEGORY="pagemap" run_test ./pagemap_ioctl
489
490CATEGORY="pfnmap" run_test ./pfnmap
491
492# COW tests
493CATEGORY="cow" run_test ./cow
494
495CATEGORY="thp" run_test ./khugepaged
496
497CATEGORY="thp" run_test ./khugepaged -s 2
498
499CATEGORY="thp" run_test ./khugepaged all:shmem
500
501CATEGORY="thp" run_test ./khugepaged -s 4 all:shmem
502
503CATEGORY="thp" run_test ./transhuge-stress -d 20
504
505# Try to create XFS if not provided
506if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
507    if [ "${HAVE_HUGEPAGES}" = "1" ]; then
508	if test_selected "thp"; then
509	    if grep xfs /proc/filesystems &>/dev/null; then
510		XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
511		SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
512		truncate -s 314572800 ${XFS_IMG}
513		mkfs.xfs -q ${XFS_IMG}
514		mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
515		MOUNTED_XFS=1
516	    fi
517	fi
518    fi
519fi
520
521CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
522
523if [ -n "${MOUNTED_XFS}" ]; then
524    umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
525    rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
526    rm -f ${XFS_IMG}
527fi
528
529CATEGORY="migration" run_test ./migration
530
531CATEGORY="mkdirty" run_test ./mkdirty
532
533CATEGORY="mdwe" run_test ./mdwe_test
534
535CATEGORY="page_frag" run_test ./test_page_frag.sh smoke
536
537CATEGORY="page_frag" run_test ./test_page_frag.sh aligned
538
539CATEGORY="page_frag" run_test ./test_page_frag.sh nonaligned
540
541CATEGORY="rmap" run_test ./rmap
542
543echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
544echo "1..${count_total}" | tap_output
545
546exit $exitcode
547