xref: /illumos-gate/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh (revision b7daf79982d77b491ef9662483cd4549e0e5da9a)
1#!/usr/bin/ksh
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
16#
17
18#
19# Description:
20# Trigger fio runs using the sequential_reads job file. The number of runs and
21# data collected is determined by the PERF_* variables. See do_fio_run for
22# details about these variables.
23#
24# The files to read from are created prior to the first fio run, and used
25# for all fio runs. This test will exercise cached read performance from
26# a clone filesystem. The data is initially cached in the ARC and then
27# a snapshot and clone are created. All the performance runs are then
28# initiated against the clone filesystem to exercise the performance of
29# reads when the ARC has to create another buffer from a different dataset.
30# It will also exercise the need to evict the duplicate buffer once the last
31# reference on that buffer is released.
32#
33
34. $STF_SUITE/include/libtest.shlib
35. $STF_SUITE/tests/perf/perf.shlib
36
37function cleanup
38{
39	log_must zfs destroy $TESTFS
40}
41
42log_assert "Measure IO stats during sequential read load"
43log_onexit cleanup
44
45export TESTFS=$PERFPOOL/testfs
46recreate_perfpool
47log_must zfs create $PERF_FS_OPTS $TESTFS
48
49# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
50export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
51
52# Variables for use by fio.
53if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
54	export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
55	export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
56	export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
57	export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
58	export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
59elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
60	export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
61	export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
62	export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
63	export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
64	export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
65fi
66
67# Layout the files to be used by the read tests. Create as many files as the
68# largest number of threads. An fio run with fewer threads will use a subset
69# of the available files.
70export NUMJOBS=$(get_max $PERF_NTHREADS)
71export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
72log_must fio $FIO_SCRIPTS/mkfiles.fio
73
74log_note "Creating snapshot, $TESTSNAP, of $TESTFS"
75create_snapshot $TESTFS $TESTSNAP
76log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP"
77create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE
78
79#
80# Reset the TESTFS to point to the clone
81#
82export TESTFS=$PERFPOOL/$TESTCLONE
83
84# Set up the scripts and output files that will log performance data.
85lun_list=$(pool_to_lun_list $PERFPOOL)
86log_note "Collecting backend IO stats with lun list $lun_list"
87export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
88    "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
89    "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
90    "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
91
92log_note "Sequential cached reads from $TESTFS with $PERF_RUNTYPE settings"
93do_fio_run sequential_reads.fio false false
94log_pass "Measure IO stats during sequential cached read load"
95