xref: /freebsd/usr.sbin/makefs/tests/makefs_zfs_tests.sh (revision 3a3af6b2a160bea72509a9d5ef84e25906b0478a)
1#-
2# SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3#
4# Copyright (c) 2022 The FreeBSD Foundation
5#
6# This software was developed by Mark Johnston under sponsorship from
7# the FreeBSD Foundation.
8#
9# Redistribution and use in source and binary forms, with or without
10# modification, are permitted provided that the following conditions are
11# met:
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the distribution.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28# SUCH DAMAGE.
29#
30
31MAKEFS="makefs -t zfs -o nowarn=true"
32ZFS_POOL_NAME="makefstest$$"
33TEST_ZFS_POOL_NAME="$TMPDIR/poolname"
34
35. "$(dirname "$0")/makefs_tests_common.sh"
36
37common_cleanup()
38{
39	local pool md
40
41        # Try to force a TXG, this can help catch bugs by triggering a panic.
42	sync
43
44	pool=$(cat $TEST_ZFS_POOL_NAME)
45	if zpool list "$pool" >/dev/null; then
46		zpool destroy "$pool"
47	fi
48
49	md=$(cat $TEST_MD_DEVICE_FILE)
50	if [ -c /dev/"$md" ]; then
51		mdconfig -d -u "$md"
52	fi
53}
54
55import_image()
56{
57	atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \
58	    mdconfig -a -f $TEST_IMAGE
59	atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
60	echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME
61}
62
63#
64# Test autoexpansion of the vdev.
65#
66# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of
67# usable space for data.  Then the pool is expanded to 50GB, and the amount of
68# usable space is 50GB minus one metaslab.
69#
70atf_test_case autoexpand cleanup
71autoexpand_body()
72{
73	local mssize poolsize poolsize1 newpoolsize
74
75	create_test_inputs
76
77	mssize=$((128 * 1024 * 1024))
78	poolsize=$((10 * 1024 * 1024 * 1024))
79	atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \
80	    -o poolname=$ZFS_POOL_NAME \
81	    $TEST_IMAGE $TEST_INPUTS_DIR
82
83	newpoolsize=$((50 * 1024 * 1024 * 1024))
84	truncate -s $newpoolsize $TEST_IMAGE
85
86	import_image
87
88	check_image_contents
89
90	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
91	atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ]
92
93        atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE)
94
95	check_image_contents
96
97	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
98	atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ]
99}
100autoexpand_cleanup()
101{
102	common_cleanup
103}
104
105#
106# Test with some default layout defined by the common code.
107#
108atf_test_case basic cleanup
109basic_body()
110{
111	create_test_inputs
112
113	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
114	    $TEST_IMAGE $TEST_INPUTS_DIR
115
116	import_image
117
118	check_image_contents
119}
120basic_cleanup()
121{
122	common_cleanup
123}
124
125atf_test_case dataset_removal cleanup
126dataset_removal_body()
127{
128	create_test_dirs
129
130	cd $TEST_INPUTS_DIR
131	mkdir dir
132	cd -
133
134	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
135	    -o fs=${ZFS_POOL_NAME}/dir \
136	    $TEST_IMAGE $TEST_INPUTS_DIR
137
138	import_image
139
140	check_image_contents
141
142	atf_check zfs destroy ${ZFS_POOL_NAME}/dir
143}
144dataset_removal_cleanup()
145{
146	common_cleanup
147}
148
149#
150# Make sure that we can create and remove an empty directory.
151#
152atf_test_case empty_dir cleanup
153empty_dir_body()
154{
155	create_test_dirs
156
157	cd $TEST_INPUTS_DIR
158	mkdir dir
159	cd -
160
161	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
162	    $TEST_IMAGE $TEST_INPUTS_DIR
163
164	import_image
165
166	check_image_contents
167
168	atf_check rmdir ${TEST_MOUNT_DIR}/dir
169}
170empty_dir_cleanup()
171{
172	common_cleanup
173}
174
175atf_test_case empty_fs cleanup
176empty_fs_body()
177{
178	create_test_dirs
179
180	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
181	    $TEST_IMAGE $TEST_INPUTS_DIR
182
183	import_image
184
185	check_image_contents
186}
187empty_fs_cleanup()
188{
189	common_cleanup
190}
191
192atf_test_case file_sizes cleanup
193file_sizes_body()
194{
195	local i
196
197	create_test_dirs
198	cd $TEST_INPUTS_DIR
199
200	i=1
201	while [ $i -lt $((1 << 20)) ]; do
202		truncate -s $i ${i}.1
203		truncate -s $(($i - 1)) ${i}.2
204		truncate -s $(($i + 1)) ${i}.3
205		i=$(($i << 1))
206	done
207
208	cd -
209
210	# XXXMJ this creates sparse files, make sure makefs doesn't
211	#       preserve the sparseness.
212	# XXXMJ need to test with larger files (at least 128MB for L2 indirs)
213	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
214	    $TEST_IMAGE $TEST_INPUTS_DIR
215
216	import_image
217
218	check_image_contents
219}
220file_sizes_cleanup()
221{
222	common_cleanup
223}
224
225atf_test_case hard_links cleanup
226hard_links_body()
227{
228	local f
229
230	create_test_dirs
231	cd $TEST_INPUTS_DIR
232
233	mkdir dir
234	echo "hello" > 1
235	ln 1 2
236	ln 1 dir/1
237
238	echo "goodbye" > dir/a
239	ln dir/a dir/b
240	ln dir/a a
241
242	cd -
243
244	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
245	    $TEST_IMAGE $TEST_INPUTS_DIR
246
247	import_image
248
249	check_image_contents
250
251	stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino
252	stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink
253	for f in 1 2 dir/1; do
254		atf_check -o file:./nlink -e empty -s exit:0 \
255		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
256		atf_check -o file:./ino -e empty -s exit:0 \
257		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
258		atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f}
259	done
260
261	stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino
262	stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink
263	for f in dir/a dir/b a; do
264		atf_check -o file:./nlink -e empty -s exit:0 \
265		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
266		atf_check -o file:./ino -e empty -s exit:0 \
267		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
268		atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f}
269	done
270}
271hard_links_cleanup()
272{
273	common_cleanup
274}
275
276# Allocate enough dnodes from an object set that the meta dnode needs to use
277# indirect blocks.
278atf_test_case indirect_dnode_array cleanup
279indirect_dnode_array_body()
280{
281	local count i
282
283	# How many dnodes do we need to allocate?  Well, the data block size
284	# for meta dnodes is always 16KB, so with a dnode size of 512B we get
285	# 32 dnodes per direct block.  The maximum indirect block size is 128KB
286	# and that can fit 1024 block pointers, so we need at least 32 * 1024
287	# files to force the use of two levels of indirection.
288	#
289	# Unfortunately that number of files makes the test run quite slowly,
290	# so we settle for a single indirect block for now...
291	count=$(jot -r 1 32 1024)
292
293	create_test_dirs
294	cd $TEST_INPUTS_DIR
295	for i in $(seq 1 $count); do
296		touch $i
297	done
298	cd -
299
300	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
301	    $TEST_IMAGE $TEST_INPUTS_DIR
302
303	import_image
304
305	check_image_contents
306}
307indirect_dnode_array_cleanup()
308{
309	common_cleanup
310}
311
312#
313# Create some files with long names, so as to test fat ZAP handling.
314#
315atf_test_case long_file_name cleanup
316long_file_name_body()
317{
318	local dir i
319
320	create_test_dirs
321	cd $TEST_INPUTS_DIR
322
323	# micro ZAP keys can be at most 50 bytes.
324	for i in $(seq 1 60); do
325		touch $(jot -s '' $i 1 1)
326	done
327	dir=$(jot -s '' 61 1 1)
328	mkdir $dir
329	for i in $(seq 1 60); do
330		touch ${dir}/$(jot -s '' $i 1 1)
331	done
332
333	cd -
334
335	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
336	    $TEST_IMAGE $TEST_INPUTS_DIR
337
338	import_image
339
340	check_image_contents
341
342	# Add a directory entry in the hope that OpenZFS might catch a bug
343	# in makefs' fat ZAP encoding.
344	touch ${TEST_MOUNT_DIR}/foo
345}
346long_file_name_cleanup()
347{
348	common_cleanup
349}
350
351#
352# Exercise handling of multiple datasets.
353#
354atf_test_case multi_dataset_1 cleanup
355multi_dataset_1_body()
356{
357	create_test_dirs
358	cd $TEST_INPUTS_DIR
359
360	mkdir dir1
361	echo a > dir1/a
362	mkdir dir2
363	echo b > dir2/b
364
365	cd -
366
367	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
368	    -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \
369	    $TEST_IMAGE $TEST_INPUTS_DIR
370
371	import_image
372
373	check_image_contents
374
375	# Make sure that we have three datasets with the expected mount points.
376	atf_check -o inline:${ZFS_POOL_NAME}\\n -e empty -s exit:0 \
377	    zfs list -H -o name ${ZFS_POOL_NAME}
378	atf_check -o inline:${TEST_MOUNT_DIR}\\n -e empty -s exit:0 \
379	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}
380
381	atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n -e empty -s exit:0 \
382	    zfs list -H -o name ${ZFS_POOL_NAME}/dir1
383	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
384	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
385
386	atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n -e empty -s exit:0 \
387	    zfs list -H -o name ${ZFS_POOL_NAME}/dir2
388	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
389	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
390}
391multi_dataset_1_cleanup()
392{
393	common_cleanup
394}
395
396#
397# Create a pool with two datasets, where the root dataset is mounted below
398# the child dataset.
399#
400atf_test_case multi_dataset_2 cleanup
401multi_dataset_2_body()
402{
403	create_test_dirs
404	cd $TEST_INPUTS_DIR
405
406	mkdir dir1
407	echo a > dir1/a
408	mkdir dir2
409	echo b > dir2/b
410
411	cd -
412
413	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
414	    -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \
415	    -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \
416	    $TEST_IMAGE $TEST_INPUTS_DIR
417
418	import_image
419
420	check_image_contents
421}
422multi_dataset_2_cleanup()
423{
424	common_cleanup
425}
426
427#
428# Create a dataset with a non-existent mount point.
429#
430atf_test_case multi_dataset_3 cleanup
431multi_dataset_3_body()
432{
433	create_test_dirs
434	cd $TEST_INPUTS_DIR
435
436	mkdir dir1
437	echo a > dir1/a
438
439	cd -
440
441	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
442	    -o fs=${ZFS_POOL_NAME}/dir1 \
443	    -o fs=${ZFS_POOL_NAME}/dir2 \
444	    $TEST_IMAGE $TEST_INPUTS_DIR
445
446	import_image
447
448	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
449	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
450
451	# Mounting dir2 should have created a directory called dir2.  Go
452	# back and create it in the staging tree before comparing.
453	atf_check mkdir ${TEST_INPUTS_DIR}/dir2
454
455	check_image_contents
456}
457multi_dataset_3_cleanup()
458{
459	common_cleanup
460}
461
462#
463# Create an unmounted dataset.
464#
465atf_test_case multi_dataset_4 cleanup
466multi_dataset_4_body()
467{
468	create_test_dirs
469	cd $TEST_INPUTS_DIR
470
471	mkdir dir1
472	echo a > dir1/a
473
474	cd -
475
476	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
477	    -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \
478	    $TEST_IMAGE $TEST_INPUTS_DIR
479
480	import_image
481
482	atf_check -o inline:none\\n -e empty -s exit:0 \
483	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
484
485	check_image_contents
486
487	atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1
488	atf_check zfs mount ${ZFS_POOL_NAME}/dir1
489	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
490	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
491
492	# dir1/a should be part of the root dataset, not dir1.
493	atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a
494}
495multi_dataset_4_cleanup()
496{
497	common_cleanup
498}
499
500#
501# Rudimentary test to verify that two ZFS images created using the same
502# parameters and input hierarchy are byte-identical.  In particular, makefs(1)
503# does not preserve file access times.
504#
505atf_test_case reproducible cleanup
506reproducible_body()
507{
508	create_test_inputs
509
510	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
511	    ${TEST_IMAGE}.1 $TEST_INPUTS_DIR
512
513	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
514	    ${TEST_IMAGE}.2 $TEST_INPUTS_DIR
515
516	# XXX-MJ cmp(1) is really slow
517	atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2
518}
519reproducible_cleanup()
520{
521}
522
523#
524# Verify that we can take a snapshot of a generated dataset.
525#
526atf_test_case snapshot cleanup
527snapshot_body()
528{
529	create_test_dirs
530	cd $TEST_INPUTS_DIR
531
532	mkdir dir
533	echo "hello" > dir/hello
534	echo "goodbye" > goodbye
535
536	cd -
537
538	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
539	    $TEST_IMAGE $TEST_INPUTS_DIR
540
541	import_image
542
543	atf_check zfs snapshot ${ZFS_POOL_NAME}@1
544}
545snapshot_cleanup()
546{
547	common_cleanup
548}
549
550#
551# Check handling of symbolic links.
552#
553atf_test_case soft_links cleanup
554soft_links_body()
555{
556	create_test_dirs
557	cd $TEST_INPUTS_DIR
558
559	mkdir dir
560	ln -s a a
561	ln -s dir/../a a
562	ln -s dir/b b
563	echo 'c' > dir
564	ln -s dir/c c
565	# XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1
566
567	cd -
568
569	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
570	    $TEST_IMAGE $TEST_INPUTS_DIR
571
572	import_image
573
574	check_image_contents
575}
576soft_links_cleanup()
577{
578	common_cleanup
579}
580
581#
582# Verify that we can set properties on the root dataset.
583#
584atf_test_case root_props cleanup
585root_props_body()
586{
587	create_test_inputs
588
589	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
590	    -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \
591	    $TEST_IMAGE $TEST_INPUTS_DIR
592
593	import_image
594
595	check_image_contents
596
597	atf_check -o inline:off\\n -e empty -s exit:0 \
598	    zfs get -H -o value atime $ZFS_POOL_NAME
599	atf_check -o inline:local\\n -e empty -s exit:0 \
600	    zfs get -H -o source atime $ZFS_POOL_NAME
601	atf_check -o inline:off\\n -e empty -s exit:0 \
602	    zfs get -H -o value setuid $ZFS_POOL_NAME
603	atf_check -o inline:local\\n -e empty -s exit:0 \
604	    zfs get -H -o source setuid $ZFS_POOL_NAME
605}
606root_props_cleanup()
607{
608	common_cleanup
609}
610
611atf_init_test_cases()
612{
613	atf_add_test_case autoexpand
614	atf_add_test_case basic
615	atf_add_test_case dataset_removal
616	atf_add_test_case empty_dir
617	atf_add_test_case empty_fs
618	atf_add_test_case file_sizes
619	atf_add_test_case hard_links
620	atf_add_test_case indirect_dnode_array
621	atf_add_test_case long_file_name
622	atf_add_test_case multi_dataset_1
623	atf_add_test_case multi_dataset_2
624	atf_add_test_case multi_dataset_3
625	atf_add_test_case multi_dataset_4
626	atf_add_test_case reproducible
627	atf_add_test_case snapshot
628	atf_add_test_case soft_links
629	atf_add_test_case root_props
630
631	# XXXMJ tests:
632	# - test with different ashifts (at least, 9 and 12), different image sizes
633	# - create datasets in imported pool
634}
635