xref: /illumos-gate/usr/src/test/zfs-tests/tests/perf/perf.shlib (revision c160bf3613805cfb4a89a0433ae896d3594f551f)
1#
2# This file and its contents are supplied under the terms of the
3# Common Development and Distribution License ("CDDL"), version 1.0.
4# You may only use this file in accordance with the terms of version
5# 1.0 of the CDDL.
6#
7# A full copy of the text of the CDDL should have accompanied this
8# source.  A copy of the CDDL is also available via the Internet at
9# http://www.illumos.org/license/CDDL.
10#
11
12#
13# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
14#
15
16. $STF_SUITE/include/libtest.shlib
17
18# If neither is specified, do a nightly run.
19[[ -z $PERF_REGRESSION_WEEKLY ]] && export PERF_REGRESSION_NIGHTLY=1
20
21# Default runtime for each type of test run.
22export PERF_RUNTIME_WEEKLY=$((30 * 60))
23export PERF_RUNTIME_NIGHTLY=$((10 * 60))
24
25# Default fs creation options
26export PERF_FS_OPTS=${PERF_FS_OPTS:-'-o recsize=8k -o compress=lz4' \
27    ' -o checksum=sha256 -o redundant_metadata=most'}
28
29function get_sync_str
30{
31	typeset sync=$1
32	typeset sync_str=''
33
34	[[ $sync -eq 0 ]] && sync_str='async'
35	[[ $sync -eq 1 ]] && sync_str='sync'
36	echo $sync_str
37}
38
39#
40# This function will run fio in a loop, according to the .fio file passed
41# in and a number of environment variables. The following variables can be
42# set before launching zfstest to override the defaults.
43#
44# PERF_RUNTIME: The time in seconds each fio invocation should run.
45# PERF_RUNTYPE: A human readable tag that appears in logs. The defaults are
46#    nightly and weekly.
47# PERF_NTHREADS: A list of how many threads each fio invocation will use.
48# PERF_SYNC_TYPES: Whether to use (O_SYNC) or not. 1 is sync IO, 0 is async IO.
49# PERF_IOSIZES: A list of blocksizes in which each fio invocation will do IO.
50# PERF_COLLECT_SCRIPTS: A comma delimited list of 'command args, logfile_tag'
51#    pairs that will be added to the scripts specified in each test.
52#
53function do_fio_run
54{
55	typeset script=$1
56	typeset do_recreate=$2
57	typeset clear_cache=$3
58	typeset threads sync iosize
59
60	for threads in $PERF_NTHREADS; do
61		for sync in $PERF_SYNC_TYPES; do
62			for iosize in $PERF_IOSIZES; do
63				typeset sync_str=$(get_sync_str $sync)
64				log_note "Running with $threads" \
65				    "$sync_str threads, $iosize ios"
66
67				if $do_recreate; then
68					recreate_perfpool
69					log_must zfs create $PERF_FS_OPTS \
70					    $TESTFS
71				fi
72
73				if $clear_cache; then
74					# Clear the ARC
75					zpool export $PERFPOOL
76					zpool import $PERFPOOL
77				fi
78
79				export RUNTIME=$PERF_RUNTIME
80				export FILESIZE=$((TOTAL_SIZE / threads))
81				export NUMJOBS=$threads
82				export SYNC_TYPE=$sync
83				export BLOCKSIZE=$iosize
84				sync
85
86				# Start the data collection
87				do_collect_scripts $threads $sync $iosize
88
89				# This will be part of the output filename.
90				typeset suffix="$sync_str.$iosize-ios.$threads-threads"
91
92				# Define output file
93				typeset logbase="$(get_perf_output_dir)/$($BASENAME \
94				    $SUDO_COMMAND)"
95				typeset outfile="$logbase.fio.$suffix"
96
97				# Start the load
98				log_must fio --output $outfile $FIO_SCRIPTS/$script
99			done
100		done
101	done
102}
103
104#
105# This function iterates through the value pairs in $PERF_COLLECT_SCRIPTS.
106# The script at index N is launched in the background, with its output
107# redirected to a logfile containing the tag specified at index N + 1.
108#
109function do_collect_scripts
110{
111	typeset threads=$1
112	typeset sync=$2
113	typeset iosize=$3
114
115	[[ -n $collect_scripts ]] || log_fail "No data collection scripts."
116	[[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified."
117
118	# This will be part of the output filename.
119	typeset sync_str=$(get_sync_str $sync)
120	typeset suffix="$sync_str.$iosize-ios.$threads-threads"
121
122	# Add in user supplied scripts and logfiles, if any.
123	typeset oIFS=$IFS
124	IFS=','
125	for item in $PERF_COLLECT_SCRIPTS; do
126		collect_scripts+=($(echo $item | sed 's/^ *//g'))
127	done
128	IFS=$oIFS
129
130	typeset idx=0
131	while [[ $idx -lt "${#collect_scripts[@]}" ]]; do
132		typeset logbase="$(get_perf_output_dir)/$(basename \
133		    $SUDO_COMMAND)"
134		typeset outfile="$logbase.${collect_scripts[$idx + 1]}.$suffix"
135
136		timeout $PERF_RUNTIME ${collect_scripts[$idx]} >$outfile 2>&1 &
137		((idx += 2))
138	done
139
140	# Need to explicitly return 0 because timeout(1) will kill
141	# a child process and cause us to return non-zero.
142	return 0
143}
144
145# Find a place to deposit performance data collected while under load.
146function get_perf_output_dir
147{
148	typeset dir="$(pwd)/perf_data"
149	[[ -d $dir ]] || mkdir -p $dir
150
151	echo $dir
152}
153
154#
155# Destroy and create the pool used for performance tests. The
156# PERFPOOL_CREATE_CMD variable allows users to test with a custom pool
157# configuration by specifying the pool creation command in their environment.
158# If PERFPOOL_CREATE_CMD is empty, a pool using all available disks is created.
159#
160function recreate_perfpool
161{
162	[[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set."
163
164	poolexists $PERFPOOL && destroy_pool $PERFPOOL
165
166	if [[ -n $PERFPOOL_CREATE_CMD ]]; then
167		log_must $PERFPOOL_CREATE_CMD
168	else
169		log_must eval "zpool create -f $PERFPOOL $DISKS"
170	fi
171}
172
173function get_max_arc_size
174{
175	typeset -l max_arc_size=$(dtrace -qn 'BEGIN {
176	    printf("%u\n", `arc_stats.arcstat_c_max.value.ui64);
177	    exit(0);
178	}')
179
180	[[ $? -eq 0 ]] || log_fail "get_max_arc_size failed"
181
182	echo $max_arc_size
183}
184
185# Create a file with some information about how this system is configured.
186function get_system_config
187{
188	typeset config=$PERF_DATA_DIR/$1
189
190	echo "{" >>$config
191	dtrace -qn 'BEGIN{
192	    printf("  \"ncpus\": %d,\n", `ncpus);
193	    printf("  \"physmem\": %u,\n", `physmem * `_pagesize);
194	    printf("  \"c_max\": %u,\n", `arc_stats.arcstat_c_max.value.ui64);
195	    printf("  \"kmem_flags\": \"0x%x\",", `kmem_flags);
196	    exit(0)}' >>$config
197	echo "  \"hostname\": \"$(uname -n)\"," >>$config
198	echo "  \"kernel version\": \"$(uname -v)\"," >>$config
199	iostat -En | awk 'BEGIN {
200	    printf("  \"disks\": {\n"); first = 1}
201	    /^c/ {disk = $1}
202	    /^Size: [^0]/ {size = $2;
203	    if (first != 1) {printf(",\n")} else {first = 0}
204	    printf("    \"%s\": \"%s\"", disk, size)}
205	    END {printf("\n  },\n")}' >>$config
206	sed -n 's/^set \(.*\)[ ]=[ ]\(.*\)/\1=\2/p' /etc/system | \
207	    awk -F= 'BEGIN {printf("  \"system\": {\n"); first = 1}
208	    {if (first != 1) {printf(",\n")} else {first = 0};
209	    printf("    \"%s\": %s", $1, $2)}
210	    END {printf("\n  }\n")}' >>$config
211	echo "}" >>$config
212}
213
214function num_jobs_by_cpu
215{
216	typeset ncpu=$(psrinfo | wc -l)
217	typeset num_jobs=$ncpu
218
219	[[ $ncpu -gt 8 ]] && num_jobs=$(echo "$ncpu * 3 / 4" | bc)
220
221	echo $num_jobs
222}
223
224function pool_to_lun_list
225{
226	typeset pool=$1
227	typeset ctd ctds devname lun
228	typeset lun_list=':'
229
230	ctds=$(zpool list -v $pool | awk '/c[0-9]*t[0-9a-fA-F]*d[0-9]*/ \
231	    {print $1}')
232
233	for ctd in $ctds; do
234		# Get the device name as it appears in /etc/path_to_inst
235		devname=$(readlink -f /dev/dsk/${ctd}s0 | sed -n \
236		    's/\/devices\([^:]*\):.*/\1/p')
237		# Add a string composed of the driver name and instance
238		# number to the list for comparison with dev_statname.
239		lun=$(sed 's/"//g' /etc/path_to_inst | grep $devname | awk \
240		    '{print $3$2}')
241		lun_list="$lun_list$lun:"
242	done
243	echo $lun_list
244}
245
246# Create a perf_data directory to hold performance statistics and
247# configuration information.
248export PERF_DATA_DIR=$(get_perf_output_dir)
249[[ -f $PERF_DATA_DIR/config.json ]] || get_system_config config.json
250