xref: /illumos-gate/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh (revision 8dfe5547fbf0979fc1065a8b6fddc1e940a7cf4f)
1#!/usr/bin/ksh
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
16#
17
18#
19# Description:
20# Trigger fio runs using the sequential_reads job file. The number of runs and
21# data collected is determined by the PERF_* variables. See do_fio_run for
22# details about these variables.
23#
24# The files to read from are created prior to the first fio run, and used
25# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
26# so reads will go to disk.
27#
28# Thread/Concurrency settings:
29#    PERF_NTHREADS defines the number of files created in the test filesystem,
30#    as well as the number of threads that will simultaneously drive IO to
31#    those files.  The settings chosen are from measurements in the
32#    PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
33#    are at peak throughput but lowest latency.  Higher concurrency introduces
34#    queue time latency and would reduce the impact of code-induced performance
35#    regressions.
36#
37
38. $STF_SUITE/include/libtest.shlib
39. $STF_SUITE/tests/perf/perf.shlib
40
41function cleanup
42{
43	log_must zfs destroy $TESTFS
44}
45
46log_assert "Measure IO stats during sequential read load"
47log_onexit cleanup
48
49export TESTFS=$PERFPOOL/testfs
50recreate_perfpool
51log_must zfs create $PERF_FS_OPTS $TESTFS
52
53# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
54export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
55
56# Variables for use by fio.
57if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
58	export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
59	export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
60	export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
61	export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
62	export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
63elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
64	export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
65	export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
66	export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
67	export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
68	export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
69fi
70
71# Layout the files to be used by the read tests. Create as many files as the
72# largest number of threads. An fio run with fewer threads will use a subset
73# of the available files.
74export NUMJOBS=$(get_max $PERF_NTHREADS)
75export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
76log_must fio $FIO_SCRIPTS/mkfiles.fio
77
78# Set up the scripts and output files that will log performance data.
79lun_list=$(pool_to_lun_list $PERFPOOL)
80log_note "Collecting backend IO stats with lun list $lun_list"
81export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
82    "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
83    "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
84    "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
85
86log_note "Sequential reads with $PERF_RUNTYPE settings"
87do_fio_run sequential_reads.fio false true
88log_pass "Measure IO stats during sequential read load"
89