xref: /illumos-gate/usr/src/test/zfs-tests/tests/longevity/slop_space_test.ksh (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1#!/usr/bin/ksh -p
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2017 by Delphix. All rights reserved.
16#
17
18. $STF_SUITE/include/libtest.shlib
19. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
20
21#
22# DESCRIPTION:
23#	Ensure that all levels of reserved slop space are
24#	followed by ZFS.
25#
26# STRATEGY:
27#	1. Create testpool with two filesystems
28#	2. On the first filesystem create a big file that holds
29#	   a large portion of the pool's space. Then overwrite it
30#	   in such a way that if we free it after taking a
31#	   checkpoint it will append a lot of small entries to
32#	   the checkpoint's space map
33#	3. Checkpoint the pool
34#	4. On the second filesystem, create a file and keep writing
35#	   to it until we hit the first level of reserved space
36#	   (128M)
37#	5. Then start adding properties to second filesystem until
38#	   we hit the second level of reserved space (64M)
39#	6. Destroy the first filesystem and wait until the async
40#	   destroys of this operation hit the last level of
41#	   reserved space (32M)
42#	7. Attempt to destroy the second filesystem (should fail)
43#	8. Discard the checkpoint
44#
45
46DISK="$(echo $DISKS | cut -d' ' -f1)"
47DISKFS=$TESTPOOL/disks
48
49NESTEDPOOL=nestedpool
50
51FILEDISKSIZE=4g
52FILEDISKLOCATION=/$DISKFS
53FILEDISK=$FILEDISKLOCATION/dsk1
54
55FS0=$NESTEDPOOL/fs0
56FS1=$NESTEDPOOL/fs1
57
58FS0FILE=/$FS0/file
59FS1FILE=/$FS1/file
60
61CKPOINTEDFILEBLOCKS=3200
62NUMOVERWRITTENBLOCKS=$(($CKPOINTEDFILEBLOCKS * 1024 * 1024 / 512 / 2))
63
64verify_runnable "global"
65
66function test_cleanup
67{
68	log_must mdb_ctf_set_int zfs_async_block_max_blocks 0xffffffffffffffff
69	poolexists $NESTEDPOOL && destroy_pool $NESTEDPOOL
70	log_must zpool destroy $TESTPOOL
71}
72
73function wait_until_extra_reserved
74{
75	#
76	# Loop until we get from gigabytes to megabytes
77	#
78	size_range=$(zpool list | awk '{print $1,$4}' | \
79	    grep $NESTEDPOOL | awk '{print $2}' | grep G)
80	while [ "" != "$size_range" ]; do
81		sleep 5
82		size_range=$(zpool list | awk '{print $1,$4}' | \
83		    grep $NESTEDPOOL | awk '{print $2}' | grep G)
84	done
85
86
87	#
88	# Loop until we hit the 32M limit
89	#
90	free=$(zpool list | awk '{print $1,$4}' | \
91	    grep $NESTEDPOOL | awk '{print $2}' | cut -d"M" -f1 | \
92	    cut -d"." -f1)
93	while (( $free > 32 )); do
94		sleep 5
95		free=$(zpool list | awk '{print $1,$4}' | \
96		    grep $NESTEDPOOL | awk '{print $2}' | cut -d"M" -f1 | \
97		    cut -d"." -f1)
98	done
99
100	#
101	# Even though we may have hit the 32M limit we
102	# still need to wait to ensure that we are at
103	# the stable state where async destroys are suspended.
104	#
105	sleep 300
106}
107
108log_must zpool create $TESTPOOL $DISK
109log_onexit test_cleanup
110
111log_must zfs create $DISKFS
112
113log_must mkfile -n $FILEDISKSIZE $FILEDISK
114log_must zpool create $NESTEDPOOL $FILEDISK
115log_must zfs create -o recordsize=512 $FS0
116log_must zfs create -o recordsize=512 $FS1
117
118
119#
120# Create a ~3.2G file and ensure it is
121# synced to disk
122#
123log_must dd if=/dev/zero of=$FS0FILE bs=1M count=$CKPOINTEDFILEBLOCKS
124log_must sync
125
126# for debugging purposes
127log_must zpool list $NESTEDPOOL
128
129#
130# Overwrite every second block of the file.
131# The idea is to make long space map regions
132# where we have subsequent entries that cycle
133# between marked as ALLOCATED and FREE. This
134# way we attempt to keep the space maps long
135# and fragmented.
136#
137# So later, when there is a checkpoint and we
138# destroy the filesystem, all of these entries
139# should be copied over to the checkpoint's
140# space map increasing capacity beyond the
141# extra reserved slop space.
142#
143log_must dd if=/dev/zero of=$FS0FILE bs=512 ostride=2 \
144    count=$NUMOVERWRITTENBLOCKS conv=notrunc
145
146# for debugging purposes
147log_must zpool list $NESTEDPOOL
148
149log_must zpool checkpoint $NESTEDPOOL
150
151#
152# Keep writing to the pool until we get to
153# the first slop space limit.
154#
155log_mustnot dd if=/dev/zero of=$FS1FILE bs=512
156
157# for debugging purposes
158log_must zpool list $NESTEDPOOL
159
160#
161# Keep adding properties to our second
162# filesystem until we hit we hit the
163# second slop space limit.
164#
165for i in {1..100}
166do
167	#
168	# We use this nested loop logic to fit more
169	# properties in one zfs command and reducing
170	# the overhead caused by the number of times
171	# we wait for a txg to sync (e.g. equal to the
172	# number of times we execute zfs(1m))
173	#
174	PROPERTIES=""
175	for j in {1..100}
176	do
177		PROPVAL=$(dd if=/dev/urandom bs=6000 count=1 | base64 -w 0)
178		PROP="user:prop-$i-$j=$PROPVAL"
179		PROPERTIES="$PROPERTIES $PROP"
180	done
181	zfs set $PROPERTIES  $FS1 || break
182	log_note "- setting properties: iteration $i out of 100 -"
183done
184
185for k in {1..100}
186do
187	#
188	# In case we broke out of the loop above because we
189	# couldn't fit 100 props in the space left, make sure
190	# to fill up the space that's left by setting one property
191	# at a time
192	#
193	PROPVAL=$(dd if=/dev/urandom bs=6000 count=1 | base64 -w 0)
194	PROP="user:prop-extra-$k=$PROPVAL"
195	zfs set $PROP $FS1 || break
196done
197
198# for debugging purposes
199log_must zpool list $NESTEDPOOL
200
201#
202# By the time we are done with the loop above
203# we should be getting ENOSPC for trying to add
204# new properties. As a sanity check though, try
205# again (this time with log_mustnot).
206#
207log_mustnot zfs set user:proptest="should fail!" $FS0
208log_mustnot zfs set user:proptest="should fail!" $FS1
209
210# for debugging purposes
211log_must zpool list $NESTEDPOOL
212
213#
214# We are about to destroy the first filesystem,
215# but we want to do so in a way that generates
216# as many entries as possible in the vdev's
217# checkpoint space map. Thus, we reduce the
218# amount of checkpointed blocks that we "free"
219# every txg.
220#
221log_must mdb_ctf_set_int zfs_async_block_max_blocks 0t10000
222
223log_must zfs destroy $FS0
224
225#
226# Keep looping until we hit that point where
227# we are at the last slop space limit (32.0M)
228# and async destroys are suspended.
229#
230wait_until_extra_reserved
231
232# for debugging purposes
233log_must zpool list $NESTEDPOOL
234
235#
236# At this point we shouldn't be allowed to
237# destroy anything.
238#
239log_mustnot zfs destroy $FS1
240
241#
242# The only operations that should be allowed
243# is discarding the checkpoint.
244#
245log_must zpool checkpoint -d $NESTEDPOOL
246
247wait_discard_finish $NESTEDPOOL
248
249#
250# Now that we have space again, we should be
251# able to destroy that filesystem.
252#
253log_must zfs destroy $FS1
254
255log_pass "All levels of slop space work as expected."
256