xref: /illumos-gate/usr/src/test/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib (revision 77c6489aa82fd0cbcd71cb2714a2b72e6154378d)
1#
2# This file and its contents are supplied under the terms of the
3# Common Development and Distribution License ("CDDL"), version 1.0.
4# You may only use this file in accordance with the terms of version
5# 1.0 of the CDDL.
6#
7# A full copy of the text of the CDDL should have accompanied this
8# source.  A copy of the CDDL is also available via the Internet at
9# http://www.illumos.org/license/CDDL.
10#
11
12#
13# Copyright (c) 2017, 2018 by Delphix. All rights reserved.
14#
15
16. $STF_SUITE/include/libtest.shlib
17. $STF_SUITE/tests/functional/removal/removal.kshlib
18
19#
20# In general all the tests related to the pool checkpoint can
21# be divided into two categories. TESTS that verify features
22# provided by the checkpoint (e.g. checkpoint_rewind) and tests
23# that stress-test the checkpoint (e.g. checkpoint_big_rewind).
24#
25# For the first group we don't really care about the size of
26# the pool or the individual file sizes within the filesystems.
27# This is why these tests run directly on pools that use a
28# "real disk vdev" (meaning not a file based one). These tests
29# use the $TESTPOOL pool that is created on top of $TESTDISK.
30# This pool is refered to as the "test pool" and thus all
31# the tests of this group use the testpool-related functions of
32# this file (not the nested_pools ones).
33#
34# For the second group we generally try to bring the pool to its
35# limits by increasing fragmentation, filling all allocatable
36# space, attempting to use vdevs that the checkpoint spacemap
37# cannot represent, etc. For these tests we need to control
38# almost all parameters of the pool and the vdevs that back it
39# so we create them based on file-based vdevs that we carefully
40# create within the $TESTPOOL pool. So most of these tests, in
41# order to create this nested pool sctructure, generally start
42# like this:
43# 1] We create the test pool ($TESTPOOL).
44# 2] We create a filesystem and we populate it with files of
45#    some predetermined size.
46# 3] We use those files as vdevs for the pool that the test
47#    will use ($NESTEDPOOL).
48# 4] Go on and let the test run and operate on $NESTEDPOOL.
49#
50
51#
52# These disks are used to back $TESTPOOL
53#
54TESTDISK="$(echo $DISKS | cut -d' ' -f1)"
55EXTRATESTDISK="$(echo $DISKS | cut -d' ' -f2)"
56
57FS0=$TESTPOOL/$TESTFS
58FS1=$TESTPOOL/$TESTFS1
59FS2=$TESTPOOL/$TESTFS2
60
61FS0FILE=/$FS0/$TESTFILE0
62FS1FILE=/$FS1/$TESTFILE1
63FS2FILE=/$FS2/$TESTFILE2
64
65#
66# The following are created within $TESTPOOL and
67# will be used to back $NESTEDPOOL
68#
69DISKFS=$TESTPOOL/disks
70FILEDISKDIR=/$DISKFS
71FILEDISK1=/$DISKFS/dsk1
72FILEDISK2=/$DISKFS/dsk2
73FILEDISKS="$FILEDISK1 $FILEDISK2"
74
75#
76# $NESTEDPOOL related variables
77#
78NESTEDPOOL=nestedpool
79NESTEDFS0=$NESTEDPOOL/$TESTFS
80NESTEDFS1=$NESTEDPOOL/$TESTFS1
81NESTEDFS2=$NESTEDPOOL/$TESTFS2
82NESTEDFS0FILE=/$NESTEDFS0/$TESTFILE0
83NESTEDFS1FILE=/$NESTEDFS1/$TESTFILE1
84NESTEDFS2FILE=/$NESTEDFS2/$TESTFILE2
85
86#
87# In the tests that stress-test the pool (second category
88# mentioned above), there exist some that need to bring
89# fragmentation at high percentages in a relatively short
90# period of time. In order to do that we set the following
91# parameters:
92#
93# * We use two disks of 1G each, to create a pool of size 2G.
94#   The point is that 2G is not small nor large, and we also
95#   want to have 2 disks to introduce indirect vdevs on our
96#   setup.
97# * We enable compression and set the record size of all
98#   filesystems to 8K. The point of compression is to
99#   ensure that we are not filling up the whole pool (that's
100#   what checkpoint_capacity is for), and the specific
101#   record size is set to match the block size of randwritecomp
102#   which is used to increase fragmentation by writing on
103#   files.
104# * We always have 2 big files present of 512M each, which
105#   should account for 40%~50% capacity by the end of each
106#   test with fragmentation around 50~60%.
107# * At each file we attempt to do enough random writes to
108#   touch every offset twice on average.
109#
110# Note that the amount of random writes per files are based
111# on the following calculation:
112#
113# ((512M / 8K) * 3) * 2 = ~400000
114#
115# Given that the file is 512M and one write is 8K, we would
116# need (512M / 8K) writes to go through the whole file.
117# Assuming though that each write has a compression ratio of
118# 3, then we want 3 times that to cover the same amount of
119# space. Finally, we multiply that by 2 since our goal is to
120# touch each offset twice on average.
121#
122# Examples of those tests are checkpoint_big_rewind and
123# checkpoint_discard_busy.
124#
125FILEDISKSIZE=1g
126DISKSIZE=1g
127BIGFILESIZE=512M
128RANDOMWRITES=400000
129
130
131#
132# Assumes create_test_pool has been called beforehand.
133#
134function setup_nested_pool
135{
136	log_must zfs create $DISKFS
137
138	log_must mkfile $DISKSIZE $FILEDISK1
139	log_must mkfile $DISKSIZE $FILEDISK2
140
141	log_must zpool create $NESTEDPOOL $FILEDISKS
142}
143
144function setup_test_pool
145{
146	log_must zpool create $TESTPOOL "$TESTDISK"
147}
148
149function setup_nested_pools
150{
151	setup_test_pool
152	setup_nested_pool
153}
154
155function cleanup_nested_pool
156{
157	if poolexists $NESTEDPOOL; then
158		log_must zpool destroy $NESTEDPOOL
159	fi
160	log_must rm -f $FILEDISKS
161}
162
163function cleanup_test_pool
164{
165	if poolexists $TESTPOOL; then
166		log_must zpool destroy $TESTPOOL
167	fi
168
169	#
170	# We always clear the labels of all disks
171	# between tests so imports from zpool or
172	# or zdb do not get confused with leftover
173	# data from old pools.
174	#
175	for disk in $DISKS; do
176		zpool labelclear -f $disk
177	done
178}
179
180function cleanup_nested_pools
181{
182	cleanup_nested_pool
183	cleanup_test_pool
184}
185
186#
187# Remove and re-add each vdev to ensure that data is
188# moved between disks and indirect mappings are created
189#
190function introduce_indirection
191{
192	for disk in ${FILEDISKS[@]}; do
193		log_must zpool remove $NESTEDPOOL $disk
194		log_must wait_for_removal $NESTEDPOOL
195		log_mustnot vdevs_in_pool $NESTEDPOOL $disk
196		log_must zpool add $NESTEDPOOL $disk
197	done
198}
199
200FILECONTENTS0="Can't wait to be checkpointed!"
201FILECONTENTS1="Can't wait to be checkpointed too!"
202NEWFILECONTENTS0="I survived after the checkpoint!"
203NEWFILECONTENTS2="I was born after the checkpoint!"
204
205function populate_test_pool
206{
207	log_must zfs create -o compression=lz4 -o recordsize=8k $FS0
208	log_must zfs create -o compression=lz4 -o recordsize=8k $FS1
209
210	echo $FILECONTENTS0 > $FS0FILE
211	echo $FILECONTENTS1 > $FS1FILE
212}
213
214function populate_nested_pool
215{
216	log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS0
217	log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS1
218
219	echo $FILECONTENTS0 > $NESTEDFS0FILE
220	echo $FILECONTENTS1 > $NESTEDFS1FILE
221}
222
223function test_verify_pre_checkpoint_state
224{
225	log_must zfs list $FS0
226	log_must zfs list $FS1
227	log_must [ "$(cat $FS0FILE)" = "$FILECONTENTS0" ]
228	log_must [ "$(cat $FS1FILE)" = "$FILECONTENTS1" ]
229
230	#
231	# If we've opened the checkpointed state of the
232	# pool as read-only without rewinding on-disk we
233	# can't really use zdb on it.
234	#
235	if [[ "$1" != "ro-check" ]] ; then
236		log_must zdb $TESTPOOL
237	fi
238
239	#
240	# Ensure post-checkpoint state is not present
241	#
242	log_mustnot zfs list $FS2
243	log_mustnot [ "$(cat $FS0FILE)" = "$NEWFILECONTENTS0" ]
244}
245
246function nested_verify_pre_checkpoint_state
247{
248	log_must zfs list $NESTEDFS0
249	log_must zfs list $NESTEDFS1
250	log_must [ "$(cat $NESTEDFS0FILE)" = "$FILECONTENTS0" ]
251	log_must [ "$(cat $NESTEDFS1FILE)" = "$FILECONTENTS1" ]
252
253	#
254	# If we've opened the checkpointed state of the
255	# pool as read-only without rewinding on-disk we
256	# can't really use zdb on it.
257	#
258	if [[ "$1" != "ro-check" ]] ; then
259		log_must zdb $NESTEDPOOL
260	fi
261
262	#
263	# Ensure post-checkpoint state is not present
264	#
265	log_mustnot zfs list $NESTEDFS2
266	log_mustnot [ "$(cat $NESTEDFS0FILE)" = "$NEWFILECONTENTS0" ]
267}
268
269function test_change_state_after_checkpoint
270{
271	log_must zfs destroy $FS1
272	log_must zfs create -o compression=lz4 -o recordsize=8k $FS2
273
274	echo $NEWFILECONTENTS0 > $FS0FILE
275	echo $NEWFILECONTENTS2 > $FS2FILE
276}
277
278function nested_change_state_after_checkpoint
279{
280	log_must zfs destroy $NESTEDFS1
281	log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2
282
283	echo $NEWFILECONTENTS0 > $NESTEDFS0FILE
284	echo $NEWFILECONTENTS2 > $NESTEDFS2FILE
285}
286
287function test_verify_post_checkpoint_state
288{
289	log_must zfs list $FS0
290	log_must zfs list $FS2
291	log_must [ "$(cat $FS0FILE)" = "$NEWFILECONTENTS0" ]
292	log_must [ "$(cat $FS2FILE)" = "$NEWFILECONTENTS2" ]
293
294	log_must zdb $TESTPOOL
295
296	#
297	# Ensure pre-checkpointed state that was removed post-checkpoint
298	# is not present
299	#
300	log_mustnot zfs list $FS1
301	log_mustnot [ "$(cat $FS0FILE)" = "$FILECONTENTS0" ]
302}
303
304function fragment_before_checkpoint
305{
306	populate_nested_pool
307	log_must mkfile -n $BIGFILESIZE $NESTEDFS0FILE
308	log_must mkfile -n $BIGFILESIZE $NESTEDFS1FILE
309	log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES
310	log_must randwritecomp $NESTEDFS1FILE $RANDOMWRITES
311
312	#
313	# Display fragmentation on test log
314	#
315	log_must zpool list -v
316}
317
318function fragment_after_checkpoint_and_verify
319{
320	log_must zfs destroy $NESTEDFS1
321	log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2
322	log_must mkfile -n $BIGFILESIZE $NESTEDFS2FILE
323	log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES
324	log_must randwritecomp $NESTEDFS2FILE $RANDOMWRITES
325
326	#
327	# Display fragmentation on test log
328	#
329	log_must zpool list -v
330
331	#
332	# Typically we would just run zdb at this point and things
333	# would be fine. Unfortunately, if there is still any
334	# background I/O in the pool the zdb command can fail with
335	# checksum errors temporarily.
336	#
337	# Export the pool when running zdb so the pool is idle and
338	# the verification results are consistent.
339	#
340	log_must zpool export $NESTEDPOOL
341	log_must zdb -e -p $FILEDISKDIR $NESTEDPOOL
342	log_must zdb -e -p $FILEDISKDIR -kc $NESTEDPOOL
343	log_must zpool import -d $FILEDISKDIR $NESTEDPOOL
344}
345
346function wait_discard_finish
347{
348	typeset pool="$1"
349
350	typeset status
351	status=$(zpool status $pool | grep "checkpoint:")
352	while [ "" != "$status" ]; do
353		sleep 5
354		status=$(zpool status $pool | grep "checkpoint:")
355	done
356}
357
358function test_wait_discard_finish
359{
360	wait_discard_finish $TESTPOOL
361}
362
363function nested_wait_discard_finish
364{
365	wait_discard_finish $NESTEDPOOL
366}
367
368#
369# Creating the setup for the second group of tests mentioned in
370# block comment of this file can take some time as we are doing
371# random writes to raise capacity and fragmentation before taking
372# the checkpoint. Thus we create this setup once and save the
373# disks of the nested pool in a temporary directory where we can
374# reuse it for each test that requires that setup.
375#
376SAVEDPOOLDIR="/var/tmp/ckpoint_saved_pool"
377
378function test_group_premake_nested_pools
379{
380	setup_nested_pools
381
382	#
383	# Populate and fragment the pool.
384	#
385	fragment_before_checkpoint
386
387	#
388	# Export and save the pool for other tests.
389	#
390	log_must zpool export $NESTEDPOOL
391	log_must mkdir $SAVEDPOOLDIR
392	log_must cp $FILEDISKS $SAVEDPOOLDIR
393
394	#
395	# Reimport pool to be destroyed by
396	# cleanup_nested_pools function
397	#
398	log_must zpool import -d $FILEDISKDIR $NESTEDPOOL
399}
400
401function test_group_destroy_saved_pool
402{
403	log_must rm -rf $SAVEDPOOLDIR
404}
405
406#
407# Recreate nested pool setup from saved pool.
408#
409function setup_nested_pool_state
410{
411	setup_test_pool
412
413	log_must zfs create $DISKFS
414	log_must cp $SAVEDPOOLDIR/* $FILEDISKDIR
415
416	log_must zpool import -d $FILEDISKDIR $NESTEDPOOL
417}
418