xref: /illumos-gate/usr/src/test/zfs-tests/tests/longevity/slop_space_test.ksh (revision bb9475a199514dcace79d04d02c1eff05d65b94f)
1#!/usr/bin/ksh -p
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2017 by Delphix. All rights reserved.
16#
17
18. $STF_SUITE/include/libtest.shlib
19. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
20
21#
22# DESCRIPTION:
23#	Ensure that all levels of reserved slop space are
24#	followed by ZFS.
25#
26# STRATEGY:
27#	1. Create testpool with two filesystems
28#	2. On the first filesystem create a big file that holds
29#	   a large portion of the pool's space. Then overwrite it
30#	   in such a way that if we free it after taking a
31#	   checkpoint it will append a lot of small entries to
32#	   the checkpoint's space map
33#	3. Checkpoint the pool
34#	4. On the second filesystem, create a file and keep writing
35#	   to it until we hit the first level of reserved space
36#	   (128M)
37#	5. Then start adding properties to second filesystem until
38#	   we hit the second level of reserved space (64M)
39#	6. Destroy the first filesystem and wait until the async
40#	   destroys of this operation hit the last level of
41#	   reserved space (32M)
42#	7. Attempt to destroy the second filesystem (should fail)
43#	8. Discard the checkpoint
44#
45
46DISK="$(echo $DISKS | cut -d' ' -f1)"
47DISKFS=$TESTPOOL/disks
48
49NESTEDPOOL=nestedpool
50
51FILEDISKSIZE=4g
52FILEDISKLOCATION=/$DISKFS
53FILEDISK=$FILEDISKLOCATION/dsk1
54
55FS0=$NESTEDPOOL/fs0
56FS1=$NESTEDPOOL/fs1
57
58FS0FILE=/$FS0/file
59FS1FILE=/$FS1/file
60
61CKPOINTEDFILEBLOCKS=3200
62NUMOVERWRITTENBLOCKS=$(($CKPOINTEDFILEBLOCKS * 1024 * 1024 / 512 / 2))
63
64verify_runnable "global"
65
66function test_cleanup
67{
68	log_must set_tunable64 zfs_async_block_max_blocks \
69	    $(print "%llu" 0xffffffffffffffff)
70	poolexists $NESTEDPOOL && destroy_pool $NESTEDPOOL
71	log_must zpool destroy $TESTPOOL
72}
73
74function wait_until_extra_reserved
75{
76	#
77	# Loop until we get from gigabytes to megabytes
78	#
79	size_range=$(zpool list | awk '{print $1,$4}' | \
80	    grep $NESTEDPOOL | awk '{print $2}' | grep G)
81	while [ "" != "$size_range" ]; do
82		sleep 5
83		size_range=$(zpool list | awk '{print $1,$4}' | \
84		    grep $NESTEDPOOL | awk '{print $2}' | grep G)
85	done
86
87
88	#
89	# Loop until we hit the 32M limit
90	#
91	free=$(zpool list | awk '{print $1,$4}' | \
92	    grep $NESTEDPOOL | awk '{print $2}' | cut -d"M" -f1 | \
93	    cut -d"." -f1)
94	while (( free > 32 )); do
95		sleep 5
96		free=$(zpool list | awk '{print $1,$4}' | \
97		    grep $NESTEDPOOL | awk '{print $2}' | cut -d"M" -f1 | \
98		    cut -d"." -f1)
99	done
100
101	#
102	# Even though we may have hit the 32M limit we
103	# still need to wait to ensure that we are at
104	# the stable state where async destroys are suspended.
105	#
106	sleep 300
107}
108
109log_must zpool create $TESTPOOL $DISK
110log_onexit test_cleanup
111
112log_must zfs create $DISKFS
113
114log_must mkfile -n $FILEDISKSIZE $FILEDISK
115log_must zpool create $NESTEDPOOL $FILEDISK
116log_must zfs create -o recordsize=512 $FS0
117log_must zfs create -o recordsize=512 $FS1
118
119
120#
121# Create a ~3.2G file and ensure it is
122# synced to disk
123#
124log_must dd if=/dev/zero of=$FS0FILE bs=1M count=$CKPOINTEDFILEBLOCKS
125log_must sync
126
127# for debugging purposes
128log_must zpool list $NESTEDPOOL
129
130#
131# Overwrite every second block of the file.
132# The idea is to make long space map regions
133# where we have subsequent entries that cycle
134# between marked as ALLOCATED and FREE. This
135# way we attempt to keep the space maps long
136# and fragmented.
137#
138# So later, when there is a checkpoint and we
139# destroy the filesystem, all of these entries
140# should be copied over to the checkpoint's
141# space map increasing capacity beyond the
142# extra reserved slop space.
143#
144log_must dd if=/dev/zero of=$FS0FILE bs=512 ostride=2 \
145    count=$NUMOVERWRITTENBLOCKS conv=notrunc
146
147# for debugging purposes
148log_must zpool list $NESTEDPOOL
149
150log_must zpool checkpoint $NESTEDPOOL
151
152#
153# Keep writing to the pool until we get to
154# the first slop space limit.
155#
156log_mustnot dd if=/dev/zero of=$FS1FILE bs=512
157
158# for debugging purposes
159log_must zpool list $NESTEDPOOL
160
161#
162# Keep adding properties to our second
163# filesystem until we hit we hit the
164# second slop space limit.
165#
166for i in {1..100}
167do
168	#
169	# We use this nested loop logic to fit more
170	# properties in one zfs command and reducing
171	# the overhead caused by the number of times
172	# we wait for a txg to sync (e.g. equal to the
173	# number of times we execute zfs(8))
174	#
175	PROPERTIES=""
176	for j in {1..100}
177	do
178		PROPVAL=$(dd if=/dev/urandom bs=6000 count=1 | base64 -w 0)
179		PROP="user:prop-$i-$j=$PROPVAL"
180		PROPERTIES="$PROPERTIES $PROP"
181	done
182	zfs set $PROPERTIES  $FS1 || break
183	log_note "- setting properties: iteration $i out of 100 -"
184done
185
186for k in {1..100}
187do
188	#
189	# In case we broke out of the loop above because we
190	# couldn't fit 100 props in the space left, make sure
191	# to fill up the space that's left by setting one property
192	# at a time
193	#
194	PROPVAL=$(dd if=/dev/urandom bs=6000 count=1 | base64 -w 0)
195	PROP="user:prop-extra-$k=$PROPVAL"
196	zfs set $PROP $FS1 || break
197done
198
199# for debugging purposes
200log_must zpool list $NESTEDPOOL
201
202#
203# By the time we are done with the loop above
204# we should be getting ENOSPC for trying to add
205# new properties. As a sanity check though, try
206# again (this time with log_mustnot).
207#
208log_mustnot zfs set user:proptest="should fail!" $FS0
209log_mustnot zfs set user:proptest="should fail!" $FS1
210
211# for debugging purposes
212log_must zpool list $NESTEDPOOL
213
214#
215# We are about to destroy the first filesystem,
216# but we want to do so in a way that generates
217# as many entries as possible in the vdev's
218# checkpoint space map. Thus, we reduce the
219# amount of checkpointed blocks that we "free"
220# every txg.
221#
222log_must set_tunable64 zfs_async_block_max_blocks 10000
223
224log_must zfs destroy $FS0
225
226#
227# Keep looping until we hit that point where
228# we are at the last slop space limit (32.0M)
229# and async destroys are suspended.
230#
231wait_until_extra_reserved
232
233# for debugging purposes
234log_must zpool list $NESTEDPOOL
235
236#
237# At this point we shouldn't be allowed to
238# destroy anything.
239#
240log_mustnot zfs destroy $FS1
241
242#
243# The only operations that should be allowed
244# is discarding the checkpoint.
245#
246log_must zpool checkpoint -d $NESTEDPOOL
247
248wait_discard_finish $NESTEDPOOL
249
250#
251# Now that we have space again, we should be
252# able to destroy that filesystem.
253#
254log_must zfs destroy $FS1
255
256log_pass "All levels of slop space work as expected."
257