xref: /linux/tools/testing/selftests/mm/run_vmtests.sh (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3# Please run as root
4
5# Kselftest framework requirement - SKIP code is 4.
6ksft_skip=4
7
8count_total=0
9count_pass=0
10count_fail=0
11count_skip=0
12exitcode=0
13
14usage() {
15	cat <<EOF
16usage: ${BASH_SOURCE[0]:-$0} [ options ]
17
18  -a: run all tests, including extra ones (other than destructive ones)
19  -t: specify specific categories to tests to run
20  -h: display this message
21  -n: disable TAP output
22  -d: run destructive tests
23
24The default behavior is to run required tests only.  If -a is specified,
25will run all tests.
26
27Alternatively, specific groups tests can be run by passing a string
28to the -t argument containing one or more of the following categories
29separated by spaces:
30- mmap
31	tests for mmap(2)
32- gup_test
33	tests for gup
34- userfaultfd
35	tests for  userfaultfd(2)
36- compaction
37	a test for the patch "Allow compaction of unevictable pages"
38- mlock
39	tests for mlock(2)
40- mremap
41	tests for mremap(2)
42- hugevm
43	tests for very large virtual address space
44- vmalloc
45	vmalloc smoke tests
46- hmm
47	hmm smoke tests
48- madv_populate
49	test memadvise(2) MADV_POPULATE_{READ,WRITE} options
50- memfd_secret
51	test memfd_secret(2)
52- process_mrelease
53	test process_mrelease(2)
54- ksm
55	ksm tests that do not require >=2 NUMA nodes
56- ksm_numa
57	ksm tests that require >=2 NUMA nodes
58- pkey
59	memory protection key tests
60- soft_dirty
61	test soft dirty page bit semantics
62- pagemap
63	test pagemap_scan IOCTL
64- cow
65	test copy-on-write semantics
66- thp
67	test transparent huge pages
68- hugetlb
69	test hugetlbfs huge pages
70- migration
71	invoke move_pages(2) to exercise the migration entry code
72	paths in the kernel
73- mkdirty
74	test handling of code that might set PTE/PMD dirty in
75	read-only VMAs
76- mdwe
77	test prctl(PR_SET_MDWE, ...)
78
79example: ./run_vmtests.sh -t "hmm mmap ksm"
80EOF
81	exit 0
82}
83
84RUN_ALL=false
85RUN_DESTRUCTIVE=false
86TAP_PREFIX="# "
87
88while getopts "aht:n" OPT; do
89	case ${OPT} in
90		"a") RUN_ALL=true ;;
91		"h") usage ;;
92		"t") VM_SELFTEST_ITEMS=${OPTARG} ;;
93		"n") TAP_PREFIX= ;;
94		"d") RUN_DESTRUCTIVE=true ;;
95	esac
96done
97shift $((OPTIND -1))
98
99# default behavior: run all tests
100VM_SELFTEST_ITEMS=${VM_SELFTEST_ITEMS:-default}
101
102test_selected() {
103	if [ "$VM_SELFTEST_ITEMS" == "default" ]; then
104		# If no VM_SELFTEST_ITEMS are specified, run all tests
105		return 0
106	fi
107	# If test selected argument is one of the test items
108	if [[ " ${VM_SELFTEST_ITEMS[*]} " =~ " ${1} " ]]; then
109	        return 0
110	else
111	        return 1
112	fi
113}
114
115run_gup_matrix() {
116    # -t: thp=on, -T: thp=off, -H: hugetlb=on
117    local hugetlb_mb=$(( needmem_KB / 1024 ))
118
119    for huge in -t -T "-H -m $hugetlb_mb"; do
120        # -u: gup-fast, -U: gup-basic, -a: pin-fast, -b: pin-basic, -L: pin-longterm
121        for test_cmd in -u -U -a -b -L; do
122            # -w: write=1, -W: write=0
123            for write in -w -W; do
124                # -S: shared
125                for share in -S " "; do
126                    # -n: How many pages to fetch together?  512 is special
127                    # because it's default thp size (or 2M on x86), 123 to
128                    # just test partial gup when hit a huge in whatever form
129                    for num in "-n 1" "-n 512" "-n 123"; do
130                        CATEGORY="gup_test" run_test ./gup_test \
131                                $huge $test_cmd $write $share $num
132                    done
133                done
134            done
135        done
136    done
137}
138
139# get huge pagesize and freepages from /proc/meminfo
140while read -r name size unit; do
141	if [ "$name" = "HugePages_Free:" ]; then
142		freepgs="$size"
143	fi
144	if [ "$name" = "Hugepagesize:" ]; then
145		hpgsize_KB="$size"
146	fi
147done < /proc/meminfo
148
149# Simple hugetlbfs tests have a hardcoded minimum requirement of
150# huge pages totaling 256MB (262144KB) in size.  The userfaultfd
151# hugetlb test requires a minimum of 2 * nr_cpus huge pages.  Take
152# both of these requirements into account and attempt to increase
153# number of huge pages available.
154nr_cpus=$(nproc)
155uffd_min_KB=$((hpgsize_KB * nr_cpus * 2))
156hugetlb_min_KB=$((256 * 1024))
157if [[ $uffd_min_KB -gt $hugetlb_min_KB ]]; then
158	needmem_KB=$uffd_min_KB
159else
160	needmem_KB=$hugetlb_min_KB
161fi
162
163# set proper nr_hugepages
164if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
165	nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
166	needpgs=$((needmem_KB / hpgsize_KB))
167	tries=2
168	while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
169		lackpgs=$((needpgs - freepgs))
170		echo 3 > /proc/sys/vm/drop_caches
171		if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
172			echo "Please run this test as root"
173			exit $ksft_skip
174		fi
175		while read -r name size unit; do
176			if [ "$name" = "HugePages_Free:" ]; then
177				freepgs=$size
178			fi
179		done < /proc/meminfo
180		tries=$((tries - 1))
181	done
182	if [ "$freepgs" -lt "$needpgs" ]; then
183		printf "Not enough huge pages available (%d < %d)\n" \
184		       "$freepgs" "$needpgs"
185	fi
186else
187	echo "no hugetlbfs support in kernel?"
188	exit 1
189fi
190
191# filter 64bit architectures
192ARCH64STR="arm64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sparc64 x86_64"
193if [ -z "$ARCH" ]; then
194	ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
195fi
196VADDR64=0
197echo "$ARCH64STR" | grep "$ARCH" &>/dev/null && VADDR64=1
198
199tap_prefix() {
200	sed -e "s/^/${TAP_PREFIX}/"
201}
202
203tap_output() {
204	if [[ ! -z "$TAP_PREFIX" ]]; then
205		read str
206		echo $str
207	fi
208}
209
210pretty_name() {
211	echo "$*" | sed -e 's/^\(bash \)\?\.\///'
212}
213
214# Usage: run_test [test binary] [arbitrary test arguments...]
215run_test() {
216	if test_selected ${CATEGORY}; then
217		# On memory constrainted systems some tests can fail to allocate hugepages.
218		# perform some cleanup before the test for a higher success rate.
219		if [ ${CATEGORY} == "thp" ] | [ ${CATEGORY} == "hugetlb" ]; then
220			echo 3 > /proc/sys/vm/drop_caches
221			sleep 2
222			echo 1 > /proc/sys/vm/compact_memory
223			sleep 2
224		fi
225
226		local test=$(pretty_name "$*")
227		local title="running $*"
228		local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
229		printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
230
231		("$@" 2>&1) | tap_prefix
232		local ret=${PIPESTATUS[0]}
233		count_total=$(( count_total + 1 ))
234		if [ $ret -eq 0 ]; then
235			count_pass=$(( count_pass + 1 ))
236			echo "[PASS]" | tap_prefix
237			echo "ok ${count_total} ${test}" | tap_output
238		elif [ $ret -eq $ksft_skip ]; then
239			count_skip=$(( count_skip + 1 ))
240			echo "[SKIP]" | tap_prefix
241			echo "ok ${count_total} ${test} # SKIP" | tap_output
242			exitcode=$ksft_skip
243		else
244			count_fail=$(( count_fail + 1 ))
245			echo "[FAIL]" | tap_prefix
246			echo "not ok ${count_total} ${test} # exit=$ret" | tap_output
247			exitcode=1
248		fi
249	fi # test_selected
250}
251
252echo "TAP version 13" | tap_output
253
254CATEGORY="hugetlb" run_test ./hugepage-mmap
255
256shmmax=$(cat /proc/sys/kernel/shmmax)
257shmall=$(cat /proc/sys/kernel/shmall)
258echo 268435456 > /proc/sys/kernel/shmmax
259echo 4194304 > /proc/sys/kernel/shmall
260CATEGORY="hugetlb" run_test ./hugepage-shm
261echo "$shmmax" > /proc/sys/kernel/shmmax
262echo "$shmall" > /proc/sys/kernel/shmall
263
264CATEGORY="hugetlb" run_test ./map_hugetlb
265CATEGORY="hugetlb" run_test ./hugepage-mremap
266CATEGORY="hugetlb" run_test ./hugepage-vmemmap
267CATEGORY="hugetlb" run_test ./hugetlb-madvise
268CATEGORY="hugetlb" run_test ./hugetlb_dio
269
270nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
271# For this test, we need one and just one huge page
272echo 1 > /proc/sys/vm/nr_hugepages
273CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
274CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
275# Restore the previous number of huge pages, since further tests rely on it
276echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
277
278if test_selected "hugetlb"; then
279	echo "NOTE: These hugetlb tests provide minimal coverage.  Use"	  | tap_prefix
280	echo "      https://github.com/libhugetlbfs/libhugetlbfs.git for" | tap_prefix
281	echo "      hugetlb regression testing."			  | tap_prefix
282fi
283
284CATEGORY="mmap" run_test ./map_fixed_noreplace
285
286if $RUN_ALL; then
287    run_gup_matrix
288else
289    # get_user_pages_fast() benchmark
290    CATEGORY="gup_test" run_test ./gup_test -u
291    # pin_user_pages_fast() benchmark
292    CATEGORY="gup_test" run_test ./gup_test -a
293fi
294# Dump pages 0, 19, and 4096, using pin_user_pages:
295CATEGORY="gup_test" run_test ./gup_test -ct -F 0x1 0 19 0x1000
296CATEGORY="gup_test" run_test ./gup_longterm
297
298CATEGORY="userfaultfd" run_test ./uffd-unit-tests
299uffd_stress_bin=./uffd-stress
300CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16
301# Hugetlb tests require source and destination huge pages. Pass in half
302# the size of the free pages we have, which is used for *each*.
303half_ufd_size_MB=$((freepgs / 2))
304CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32
305CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
306CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
307CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
308
309#cleanup
310echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
311
312CATEGORY="compaction" run_test ./compaction_test
313
314if command -v sudo &> /dev/null;
315then
316	CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
317else
318	echo "# SKIP ./on-fault-limit"
319fi
320
321CATEGORY="mmap" run_test ./map_populate
322
323CATEGORY="mlock" run_test ./mlock-random-test
324
325CATEGORY="mlock" run_test ./mlock2-tests
326
327CATEGORY="process_mrelease" run_test ./mrelease_test
328
329CATEGORY="mremap" run_test ./mremap_test
330
331CATEGORY="hugetlb" run_test ./thuge-gen
332CATEGORY="hugetlb" run_test ./charge_reserved_hugetlb.sh -cgroup-v2
333CATEGORY="hugetlb" run_test ./hugetlb_reparenting_test.sh -cgroup-v2
334if $RUN_DESTRUCTIVE; then
335nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
336enable_soft_offline=$(cat /proc/sys/vm/enable_soft_offline)
337echo 8 > /proc/sys/vm/nr_hugepages
338CATEGORY="hugetlb" run_test ./hugetlb-soft-offline
339echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
340echo "$enable_soft_offline" > /proc/sys/vm/enable_soft_offline
341CATEGORY="hugetlb" run_test ./hugetlb-read-hwpoison
342fi
343
344if [ $VADDR64 -ne 0 ]; then
345
346	# set overcommit_policy as OVERCOMMIT_ALWAYS so that kernel
347	# allows high virtual address allocation requests independent
348	# of platform's physical memory.
349
350	prev_policy=$(cat /proc/sys/vm/overcommit_memory)
351	echo 1 > /proc/sys/vm/overcommit_memory
352	CATEGORY="hugevm" run_test ./virtual_address_range
353	echo $prev_policy > /proc/sys/vm/overcommit_memory
354
355	# va high address boundary switch test
356	ARCH_ARM64="arm64"
357	prev_nr_hugepages=$(cat /proc/sys/vm/nr_hugepages)
358	if [ "$ARCH" == "$ARCH_ARM64" ]; then
359		echo 6 > /proc/sys/vm/nr_hugepages
360	fi
361	CATEGORY="hugevm" run_test bash ./va_high_addr_switch.sh
362	if [ "$ARCH" == "$ARCH_ARM64" ]; then
363		echo $prev_nr_hugepages > /proc/sys/vm/nr_hugepages
364	fi
365fi # VADDR64
366
367# vmalloc stability smoke test
368CATEGORY="vmalloc" run_test bash ./test_vmalloc.sh smoke
369
370CATEGORY="mremap" run_test ./mremap_dontunmap
371
372CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
373
374# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
375CATEGORY="madv_populate" run_test ./madv_populate
376
377if [ -x ./memfd_secret ]
378then
379(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
380CATEGORY="memfd_secret" run_test ./memfd_secret
381fi
382
383# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
384CATEGORY="ksm" run_test ./ksm_tests -H -s 100
385# KSM KSM_MERGE_TIME test with size of 100
386CATEGORY="ksm" run_test ./ksm_tests -P -s 100
387# KSM MADV_MERGEABLE test with 10 identical pages
388CATEGORY="ksm" run_test ./ksm_tests -M -p 10
389# KSM unmerge test
390CATEGORY="ksm" run_test ./ksm_tests -U
391# KSM test with 10 zero pages and use_zero_pages = 0
392CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 0
393# KSM test with 10 zero pages and use_zero_pages = 1
394CATEGORY="ksm" run_test ./ksm_tests -Z -p 10 -z 1
395# KSM test with 2 NUMA nodes and merge_across_nodes = 1
396CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 1
397# KSM test with 2 NUMA nodes and merge_across_nodes = 0
398CATEGORY="ksm_numa" run_test ./ksm_tests -N -m 0
399
400CATEGORY="ksm" run_test ./ksm_functional_tests
401
402# protection_keys tests
403nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
404if [ -x ./protection_keys_32 ]
405then
406	CATEGORY="pkey" run_test ./protection_keys_32
407fi
408
409if [ -x ./protection_keys_64 ]
410then
411	CATEGORY="pkey" run_test ./protection_keys_64
412fi
413echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
414
415if [ -x ./soft-dirty ]
416then
417	CATEGORY="soft_dirty" run_test ./soft-dirty
418fi
419
420CATEGORY="pagemap" run_test ./pagemap_ioctl
421
422# COW tests
423CATEGORY="cow" run_test ./cow
424
425CATEGORY="thp" run_test ./khugepaged
426
427CATEGORY="thp" run_test ./khugepaged -s 2
428
429CATEGORY="thp" run_test ./transhuge-stress -d 20
430
431# Try to create XFS if not provided
432if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
433    if test_selected "thp"; then
434        if grep xfs /proc/filesystems &>/dev/null; then
435            XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
436            SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
437            truncate -s 314572800 ${XFS_IMG}
438            mkfs.xfs -q ${XFS_IMG}
439            mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
440            MOUNTED_XFS=1
441        fi
442    fi
443fi
444
445CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
446
447if [ -n "${MOUNTED_XFS}" ]; then
448    umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
449    rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
450    rm -f ${XFS_IMG}
451fi
452
453CATEGORY="migration" run_test ./migration
454
455CATEGORY="mkdirty" run_test ./mkdirty
456
457CATEGORY="mdwe" run_test ./mdwe_test
458
459echo "SUMMARY: PASS=${count_pass} SKIP=${count_skip} FAIL=${count_fail}" | tap_prefix
460echo "1..${count_total}" | tap_output
461
462exit $exitcode
463