xref: /freebsd/usr.sbin/makefs/tests/makefs_zfs_tests.sh (revision 24e4dcf4ba5e9dedcf89efd358ea3e1fe5867020)
1#-
2# SPDX-License-Identifier: BSD-2-Clause
3#
4# Copyright (c) 2022-2023 The FreeBSD Foundation
5#
6# This software was developed by Mark Johnston under sponsorship from
7# the FreeBSD Foundation.
8#
9# Redistribution and use in source and binary forms, with or without
10# modification, are permitted provided that the following conditions are
11# met:
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the distribution.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28# SUCH DAMAGE.
29#
30
31MAKEFS="makefs -t zfs -o verify-txgs=true -o poolguid=$$"
32ZFS_POOL_NAME="makefstest$$"
33TEST_ZFS_POOL_NAME="$TMPDIR/poolname"
34
35. "$(dirname "$0")/makefs_tests_common.sh"
36
37common_cleanup()
38{
39	local pool md
40
41	# Try to force a TXG, this can help catch bugs by triggering a panic.
42	sync
43
44	pool=$(cat $TEST_ZFS_POOL_NAME)
45	if zpool list "$pool" >/dev/null; then
46		zpool destroy "$pool"
47	fi
48
49	md=$(cat $TEST_MD_DEVICE_FILE)
50	if [ -c /dev/"$md" ]; then
51		mdconfig -d -u "$md"
52	fi
53}
54
55import_image()
56{
57	atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \
58	    mdconfig -a -f $TEST_IMAGE
59	atf_check -o ignore -e empty -s exit:0 \
60	    zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd $ZFS_POOL_NAME
61	atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
62	echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME
63}
64
65#
66# Test autoexpansion of the vdev.
67#
68# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of
69# usable space for data.  Then the pool is expanded to 50GB, and the amount of
70# usable space is 50GB minus one metaslab.
71#
72atf_test_case autoexpand cleanup
73autoexpand_body()
74{
75	local mssize poolsize poolsize1 newpoolsize
76
77	create_test_inputs
78
79	mssize=$((128 * 1024 * 1024))
80	poolsize=$((10 * 1024 * 1024 * 1024))
81	atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \
82	    -o poolname=$ZFS_POOL_NAME \
83	    $TEST_IMAGE $TEST_INPUTS_DIR
84
85	newpoolsize=$((50 * 1024 * 1024 * 1024))
86	truncate -s $newpoolsize $TEST_IMAGE
87
88	import_image
89
90	check_image_contents
91
92	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
93	atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ]
94
95	atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE)
96
97	check_image_contents
98
99	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
100	atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ]
101}
102autoexpand_cleanup()
103{
104	common_cleanup
105}
106
107#
108# Test with some default layout defined by the common code.
109#
110atf_test_case basic cleanup
111basic_body()
112{
113	create_test_inputs
114
115	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
116	    $TEST_IMAGE $TEST_INPUTS_DIR
117
118	import_image
119
120	check_image_contents
121}
122basic_cleanup()
123{
124	common_cleanup
125}
126
127#
128# Try configuring various compression algorithms.
129#
130atf_test_case compression cleanup
131compression_body()
132{
133	create_test_inputs
134
135	cd $TEST_INPUTS_DIR
136	mkdir dir
137	mkdir dir2
138	cd -
139
140	for alg in off on lzjb gzip gzip-1 gzip-2 gzip-3 gzip-4 \
141	    gzip-5 gzip-6 gzip-7 gzip-8 gzip-9 zle lz4 zstd; do
142		atf_check $MAKEFS -s 1g -o rootpath=/ \
143		    -o poolname=$ZFS_POOL_NAME \
144		    -o fs=${ZFS_POOL_NAME}\;compression=$alg \
145		    -o fs=${ZFS_POOL_NAME}/dir \
146		    -o fs=${ZFS_POOL_NAME}/dir2\;compression=off \
147		    $TEST_IMAGE $TEST_INPUTS_DIR
148
149		import_image
150
151		check_image_contents
152
153		if [ $alg = gzip-6 ]; then
154			# ZFS reports gzip-6 as just gzip since it uses
155			# a default compression level of 6.
156			alg=gzip
157		fi
158		# The "dir" dataset's compression algorithm should be
159		# inherited from the root dataset.
160		atf_check -o inline:$alg\\n -e empty -s exit:0 \
161		    zfs get -H -o value compression ${ZFS_POOL_NAME}
162		atf_check -o inline:$alg\\n -e empty -s exit:0 \
163		    zfs get -H -o value compression ${ZFS_POOL_NAME}/dir
164		atf_check -o inline:off\\n -e empty -s exit:0 \
165		    zfs get -H -o value compression ${ZFS_POOL_NAME}/dir2
166
167		atf_check -e ignore dd if=/dev/random \
168		    of=${TEST_MOUNT_DIR}/dir/random bs=1M count=10
169		atf_check -e ignore dd if=/dev/zero \
170		    of=${TEST_MOUNT_DIR}/dir/zero bs=1M count=10
171		atf_check -e ignore dd if=/dev/zero \
172		    of=${TEST_MOUNT_DIR}/dir2/zero bs=1M count=10
173
174		# Export and reimport to ensure that everything is
175		# flushed to disk.
176		atf_check zpool export ${ZFS_POOL_NAME}
177		atf_check -o ignore -e empty -s exit:0 \
178		    zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd \
179		    $ZFS_POOL_NAME
180		atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
181
182		if [ $alg = off ]; then
183			# If compression is off, the files should be the
184			# same size as the input.
185			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
186			    du -m ${TEST_MOUNT_DIR}/dir/random
187			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
188			    du -m ${TEST_MOUNT_DIR}/dir/zero
189			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
190			    du -m ${TEST_MOUNT_DIR}/dir2/zero
191		else
192			# If compression is on, the dir/zero file ought
193			# to be smaller.
194			atf_check -o match:"^1[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
195			    du -m ${TEST_MOUNT_DIR}/dir/zero
196			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
197			    du -m ${TEST_MOUNT_DIR}/dir/random
198			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
199			    du -m ${TEST_MOUNT_DIR}/dir2/zero
200		fi
201
202		atf_check zpool destroy ${ZFS_POOL_NAME}
203		atf_check rm -f ${TEST_ZFS_POOL_NAME}
204		atf_check mdconfig -d -u $(cat ${TEST_MD_DEVICE_FILE})
205		atf_check rm -f ${TEST_MD_DEVICE_FILE}
206	done
207}
208compression_cleanup()
209{
210	common_cleanup
211}
212
213#
214# Try destroying a dataset that was created by makefs.
215#
216atf_test_case dataset_removal cleanup
217dataset_removal_body()
218{
219	create_test_dirs
220
221	cd $TEST_INPUTS_DIR
222	mkdir dir
223	cd -
224
225	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
226	    -o fs=${ZFS_POOL_NAME}/dir \
227	    $TEST_IMAGE $TEST_INPUTS_DIR
228
229	import_image
230
231	check_image_contents
232
233	atf_check zfs destroy ${ZFS_POOL_NAME}/dir
234}
235dataset_removal_cleanup()
236{
237	common_cleanup
238}
239
240#
241# Make sure that we can handle some special file types.  Anything other than
242# regular files, symlinks and directories are ignored.
243#
244atf_test_case devfs cleanup
245devfs_body()
246{
247	atf_check mkdir dev
248	atf_check mount -t devfs none ./dev
249
250	atf_check -e match:"skipping unhandled" $MAKEFS -s 1g -o rootpath=/ \
251	    -o poolname=$ZFS_POOL_NAME $TEST_IMAGE ./dev
252
253	import_image
254}
255devfs_cleanup()
256{
257	common_cleanup
258	umount -f ./dev
259}
260
261#
262# Make sure that we can create and remove an empty directory.
263#
264atf_test_case empty_dir cleanup
265empty_dir_body()
266{
267	create_test_dirs
268
269	cd $TEST_INPUTS_DIR
270	mkdir dir
271	cd -
272
273	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
274	    $TEST_IMAGE $TEST_INPUTS_DIR
275
276	import_image
277
278	check_image_contents
279
280	atf_check rmdir ${TEST_MOUNT_DIR}/dir
281}
282empty_dir_cleanup()
283{
284	common_cleanup
285}
286
287atf_test_case empty_fs cleanup
288empty_fs_body()
289{
290	create_test_dirs
291
292	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
293	    $TEST_IMAGE $TEST_INPUTS_DIR
294
295	import_image
296
297	check_image_contents
298}
299empty_fs_cleanup()
300{
301	common_cleanup
302}
303
304atf_test_case file_extend cleanup
305file_extend_body()
306{
307	local i start
308
309	create_test_dirs
310
311	# Create a file slightly longer than the maximum block size.
312	start=132
313	dd if=/dev/random of=${TEST_INPUTS_DIR}/foo bs=1k count=$start
314	md5 -q ${TEST_INPUTS_DIR}/foo > foo.md5
315
316	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
317	    $TEST_IMAGE $TEST_INPUTS_DIR
318
319	import_image
320
321	check_image_contents
322
323	i=0
324	while [ $i -lt 1000 ]; do
325		dd if=/dev/random of=${TEST_MOUNT_DIR}/foo bs=1k count=1 \
326		    seek=$(($i + $start)) conv=notrunc
327		# Make sure that the first $start blocks are unmodified.
328		dd if=${TEST_MOUNT_DIR}/foo bs=1k count=$start of=foo.copy
329		atf_check -o file:foo.md5 md5 -q foo.copy
330		i=$(($i + 1))
331	done
332}
333file_extend_cleanup()
334{
335	common_cleanup
336}
337
338atf_test_case file_sizes cleanup
339file_sizes_body()
340{
341	local i
342
343	create_test_dirs
344	cd $TEST_INPUTS_DIR
345
346	i=1
347	while [ $i -lt $((1 << 20)) ]; do
348		truncate -s $i ${i}.1
349		truncate -s $(($i - 1)) ${i}.2
350		truncate -s $(($i + 1)) ${i}.3
351		i=$(($i << 1))
352	done
353
354	cd -
355
356	# XXXMJ this creates sparse files, make sure makefs doesn't
357	#       preserve the sparseness.
358	# XXXMJ need to test with larger files (at least 128MB for L2 indirs)
359	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
360	    $TEST_IMAGE $TEST_INPUTS_DIR
361
362	import_image
363
364	check_image_contents
365}
366file_sizes_cleanup()
367{
368	common_cleanup
369}
370
371atf_test_case hard_links cleanup
372hard_links_body()
373{
374	local f
375
376	create_test_dirs
377	cd $TEST_INPUTS_DIR
378
379	mkdir dir
380	echo "hello" > 1
381	ln 1 2
382	ln 1 dir/1
383
384	echo "goodbye" > dir/a
385	ln dir/a dir/b
386	ln dir/a a
387
388	cd -
389
390	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
391	    $TEST_IMAGE $TEST_INPUTS_DIR
392
393	import_image
394
395	check_image_contents
396
397	stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino
398	stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink
399	for f in 1 2 dir/1; do
400		atf_check -o file:./nlink -e empty -s exit:0 \
401		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
402		atf_check -o file:./ino -e empty -s exit:0 \
403		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
404		atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f}
405	done
406
407	stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino
408	stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink
409	for f in dir/a dir/b a; do
410		atf_check -o file:./nlink -e empty -s exit:0 \
411		    stat -f '%l' ${TEST_MOUNT_DIR}/${f}
412		atf_check -o file:./ino -e empty -s exit:0 \
413		    stat -f '%i' ${TEST_MOUNT_DIR}/${f}
414		atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f}
415	done
416}
417hard_links_cleanup()
418{
419	common_cleanup
420}
421
422# Allocate enough dnodes from an object set that the meta dnode needs to use
423# indirect blocks.
424atf_test_case indirect_dnode_array cleanup
425indirect_dnode_array_body()
426{
427	local count i
428
429	# How many dnodes do we need to allocate?  Well, the data block size
430	# for meta dnodes is always 16KB, so with a dnode size of 512B we get
431	# 32 dnodes per direct block.  The maximum indirect block size is 128KB
432	# and that can fit 1024 block pointers, so we need at least 32 * 1024
433	# files to force the use of two levels of indirection.
434	#
435	# Unfortunately that number of files makes the test run quite slowly,
436	# so we settle for a single indirect block for now...
437	count=$(jot -r 1 32 1024)
438
439	create_test_dirs
440	cd $TEST_INPUTS_DIR
441	for i in $(seq 1 $count); do
442		touch $i
443	done
444	cd -
445
446	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
447	    $TEST_IMAGE $TEST_INPUTS_DIR
448
449	import_image
450
451	check_image_contents
452}
453indirect_dnode_array_cleanup()
454{
455	common_cleanup
456}
457
458#
459# Create some files with long names, so as to test fat ZAP handling.
460#
461atf_test_case long_file_name cleanup
462long_file_name_body()
463{
464	local dir i
465
466	create_test_dirs
467	cd $TEST_INPUTS_DIR
468
469	# micro ZAP keys can be at most 50 bytes.
470	for i in $(seq 1 60); do
471		touch $(jot -s '' $i 1 1)
472	done
473	dir=$(jot -s '' 61 1 1)
474	mkdir $dir
475	for i in $(seq 1 60); do
476		touch ${dir}/$(jot -s '' $i 1 1)
477	done
478
479	cd -
480
481	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
482	    $TEST_IMAGE $TEST_INPUTS_DIR
483
484	import_image
485
486	check_image_contents
487
488	# Add a directory entry in the hope that OpenZFS might catch a bug
489	# in makefs' fat ZAP encoding.
490	touch ${TEST_MOUNT_DIR}/foo
491}
492long_file_name_cleanup()
493{
494	common_cleanup
495}
496
497#
498# Exercise handling of multiple datasets.
499#
500atf_test_case multi_dataset_1 cleanup
501multi_dataset_1_body()
502{
503	create_test_dirs
504	cd $TEST_INPUTS_DIR
505
506	mkdir dir1
507	echo a > dir1/a
508	mkdir dir2
509	echo b > dir2/b
510
511	cd -
512
513	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
514	    -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \
515	    $TEST_IMAGE $TEST_INPUTS_DIR
516
517	import_image
518
519	check_image_contents
520
521	# Make sure that we have three datasets with the expected mount points.
522	atf_check -o inline:${ZFS_POOL_NAME}\\n -e empty -s exit:0 \
523	    zfs list -H -o name ${ZFS_POOL_NAME}
524	atf_check -o inline:${TEST_MOUNT_DIR}\\n -e empty -s exit:0 \
525	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}
526
527	atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n -e empty -s exit:0 \
528	    zfs list -H -o name ${ZFS_POOL_NAME}/dir1
529	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
530	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
531
532	atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n -e empty -s exit:0 \
533	    zfs list -H -o name ${ZFS_POOL_NAME}/dir2
534	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
535	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
536}
537multi_dataset_1_cleanup()
538{
539	common_cleanup
540}
541
542#
543# Create a pool with two datasets, where the root dataset is mounted below
544# the child dataset.
545#
546atf_test_case multi_dataset_2 cleanup
547multi_dataset_2_body()
548{
549	create_test_dirs
550	cd $TEST_INPUTS_DIR
551
552	mkdir dir1
553	echo a > dir1/a
554	mkdir dir2
555	echo b > dir2/b
556
557	cd -
558
559	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
560	    -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \
561	    -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \
562	    $TEST_IMAGE $TEST_INPUTS_DIR
563
564	import_image
565
566	check_image_contents
567}
568multi_dataset_2_cleanup()
569{
570	common_cleanup
571}
572
573#
574# Create a dataset with a non-existent mount point.
575#
576atf_test_case multi_dataset_3 cleanup
577multi_dataset_3_body()
578{
579	create_test_dirs
580	cd $TEST_INPUTS_DIR
581
582	mkdir dir1
583	echo a > dir1/a
584
585	cd -
586
587	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
588	    -o fs=${ZFS_POOL_NAME}/dir1 \
589	    -o fs=${ZFS_POOL_NAME}/dir2 \
590	    $TEST_IMAGE $TEST_INPUTS_DIR
591
592	import_image
593
594	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
595	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
596
597	# Mounting dir2 should have created a directory called dir2.  Go
598	# back and create it in the staging tree before comparing.
599	atf_check mkdir ${TEST_INPUTS_DIR}/dir2
600
601	check_image_contents
602}
603multi_dataset_3_cleanup()
604{
605	common_cleanup
606}
607
608#
609# Create an unmounted dataset.
610#
611atf_test_case multi_dataset_4 cleanup
612multi_dataset_4_body()
613{
614	create_test_dirs
615	cd $TEST_INPUTS_DIR
616
617	mkdir dir1
618	echo a > dir1/a
619
620	cd -
621
622	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
623	    -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \
624	    $TEST_IMAGE $TEST_INPUTS_DIR
625
626	import_image
627
628	atf_check -o inline:none\\n -e empty -s exit:0 \
629	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
630
631	check_image_contents
632
633	atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1
634	atf_check zfs mount ${ZFS_POOL_NAME}/dir1
635	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
636	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
637
638	# dir1/a should be part of the root dataset, not dir1.
639	atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a
640}
641multi_dataset_4_cleanup()
642{
643	common_cleanup
644}
645
646#
647# Validate handling of multiple staging directories.
648#
649atf_test_case multi_staging_1 cleanup
650multi_staging_1_body()
651{
652	local tmpdir
653
654	create_test_dirs
655	cd $TEST_INPUTS_DIR
656
657	mkdir dir1
658	echo a > a
659	echo a > dir1/a
660	echo z > z
661
662	cd -
663
664	tmpdir=$(mktemp -d)
665	cd $tmpdir
666
667	mkdir dir2 dir2/dir3
668	echo b > dir2/b
669	echo c > dir2/dir3/c
670	ln -s dir2/dir3c s
671
672	cd -
673
674	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
675	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
676
677	import_image
678
679	check_image_contents -d $tmpdir
680}
681multi_staging_1_cleanup()
682{
683	common_cleanup
684}
685
686atf_test_case multi_staging_2 cleanup
687multi_staging_2_body()
688{
689	local tmpdir
690
691	create_test_dirs
692	cd $TEST_INPUTS_DIR
693
694	mkdir dir
695	echo a > dir/foo
696	echo b > dir/bar
697
698	cd -
699
700	tmpdir=$(mktemp -d)
701	cd $tmpdir
702
703	mkdir dir
704	echo c > dir/baz
705
706	cd -
707
708	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
709	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
710
711	import_image
712
713	# check_image_contents can't easily handle merged directories, so
714	# just check that the merged directory contains the files we expect.
715	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/foo
716	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/bar
717	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/baz
718
719	if [ "$(ls ${TEST_MOUNT_DIR}/dir | wc -l)" -ne 3 ]; then
720		atf_fail "Expected 3 files in ${TEST_MOUNT_DIR}/dir"
721	fi
722}
723multi_staging_2_cleanup()
724{
725	common_cleanup
726}
727
728#
729# Rudimentary test to verify that two ZFS images created using the same
730# parameters and input hierarchy are byte-identical.  In particular, makefs(1)
731# does not preserve file access times.
732#
733atf_test_case reproducible cleanup
734reproducible_body()
735{
736	create_test_inputs
737
738	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
739	    ${TEST_IMAGE}.1 $TEST_INPUTS_DIR
740
741	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
742	    ${TEST_IMAGE}.2 $TEST_INPUTS_DIR
743
744	# XXX-MJ cmp(1) is really slow
745	atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2
746}
747reproducible_cleanup()
748{
749}
750
751#
752# Verify that we can take a snapshot of a generated dataset.
753#
754atf_test_case snapshot cleanup
755snapshot_body()
756{
757	create_test_dirs
758	cd $TEST_INPUTS_DIR
759
760	mkdir dir
761	echo "hello" > dir/hello
762	echo "goodbye" > goodbye
763
764	cd -
765
766	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
767	    $TEST_IMAGE $TEST_INPUTS_DIR
768
769	import_image
770
771	atf_check zfs snapshot ${ZFS_POOL_NAME}@1
772}
773snapshot_cleanup()
774{
775	common_cleanup
776}
777
778#
779# Check handling of symbolic links.
780#
781atf_test_case soft_links cleanup
782soft_links_body()
783{
784	create_test_dirs
785	cd $TEST_INPUTS_DIR
786
787	mkdir dir
788	ln -s a a
789	ln -s dir/../a a
790	ln -s dir/b b
791	echo 'c' > dir
792	ln -s dir/c c
793	# XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1
794
795	cd -
796
797	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
798	    $TEST_IMAGE $TEST_INPUTS_DIR
799
800	import_image
801
802	check_image_contents
803}
804soft_links_cleanup()
805{
806	common_cleanup
807}
808
809#
810# Verify that we can set properties on the root dataset.
811#
812atf_test_case root_props cleanup
813root_props_body()
814{
815	create_test_inputs
816
817	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
818	    -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \
819	    $TEST_IMAGE $TEST_INPUTS_DIR
820
821	import_image
822
823	check_image_contents
824
825	atf_check -o inline:off\\n -e empty -s exit:0 \
826	    zfs get -H -o value atime $ZFS_POOL_NAME
827	atf_check -o inline:local\\n -e empty -s exit:0 \
828	    zfs get -H -o source atime $ZFS_POOL_NAME
829	atf_check -o inline:off\\n -e empty -s exit:0 \
830	    zfs get -H -o value setuid $ZFS_POOL_NAME
831	atf_check -o inline:local\\n -e empty -s exit:0 \
832	    zfs get -H -o source setuid $ZFS_POOL_NAME
833}
834root_props_cleanup()
835{
836	common_cleanup
837}
838
839#
840# Verify that usedds and usedchild props are set properly.
841#
842atf_test_case used_space_props cleanup
843used_space_props_body()
844{
845	local used usedds usedchild
846	local rootmb childmb totalmb fudge
847	local status
848
849	create_test_dirs
850	cd $TEST_INPUTS_DIR
851	mkdir dir
852
853	rootmb=17
854	childmb=39
855	totalmb=$(($rootmb + $childmb))
856	fudge=$((2 * 1024 * 1024))
857
858	atf_check -e ignore dd if=/dev/random of=foo bs=1M count=$rootmb
859	atf_check -e ignore dd if=/dev/random of=dir/bar bs=1M count=$childmb
860
861	cd -
862
863	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
864	    -o fs=${ZFS_POOL_NAME}/dir \
865	    $TEST_IMAGE $TEST_INPUTS_DIR
866
867	import_image
868
869	# Make sure that each dataset's space usage is no more than 2MB larger
870	# than their files.  This number is magic and might need to change
871	# someday.
872	usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME})
873	atf_check test $usedds -gt $(($rootmb * 1024 * 1024)) -a \
874	    $usedds -le $(($rootmb * 1024 * 1024 + $fudge))
875	usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME}/dir)
876	atf_check test $usedds -gt $(($childmb * 1024 * 1024)) -a \
877	    $usedds -le $(($childmb * 1024 * 1024 + $fudge))
878
879	# Make sure that the usedchild property value makes sense: the parent's
880	# value corresponds to the size of the child, and the child has no
881	# children.
882	usedchild=$(zfs list -o usedchild -Hp ${ZFS_POOL_NAME})
883	atf_check test $usedchild -gt $(($childmb * 1024 * 1024)) -a \
884	    $usedchild -le $(($childmb * 1024 * 1024 + $fudge))
885	atf_check -o inline:'0\n' \
886	    zfs list -Hp -o usedchild ${ZFS_POOL_NAME}/dir
887
888	# Make sure that the used property value makes sense: the parent's
889	# value is the sum of the two sizes, and the child's value is the
890	# same as its usedds value, which has already been checked.
891	used=$(zfs list -o used -Hp ${ZFS_POOL_NAME})
892	atf_check test $used -gt $(($totalmb * 1024 * 1024)) -a \
893	    $used -le $(($totalmb * 1024 * 1024 + 2 * $fudge))
894	used=$(zfs list -o used -Hp ${ZFS_POOL_NAME}/dir)
895	atf_check -o inline:$used'\n' \
896	    zfs list -Hp -o usedds ${ZFS_POOL_NAME}/dir
897
898	# Both datasets do not have snapshots.
899	atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}
900	atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}/dir
901}
902used_space_props_cleanup()
903{
904	common_cleanup
905}
906
907# Verify that file permissions are set properly.  Make sure that non-executable
908# files can't be executed.
909atf_test_case perms cleanup
910perms_body()
911{
912	local mode
913
914	create_test_dirs
915	cd $TEST_INPUTS_DIR
916
917	for mode in $(seq 0 511); do
918		mode=$(printf "%04o\n" $mode)
919		echo 'echo a' > $mode
920		atf_check chmod $mode $mode
921	done
922
923	cd -
924
925	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
926	    $TEST_IMAGE $TEST_INPUTS_DIR
927
928	import_image
929
930	check_image_contents
931
932	for mode in $(seq 0 511); do
933		mode=$(printf "%04o\n" $mode)
934		if [ $(($mode & 0111)) -eq 0 ]; then
935			atf_check -s not-exit:0 -e match:"Permission denied" \
936			    ${TEST_INPUTS_DIR}/$mode
937		fi
938		if [ $(($mode & 0001)) -eq 0 ]; then
939			atf_check -s not-exit:0 -e match:"Permission denied" \
940			    su -m tests -c ${TEST_INPUTS_DIR}/$mode
941		fi
942	done
943
944}
945perms_cleanup()
946{
947	common_cleanup
948}
949
950#
951# Verify that -T timestamps are honored.
952#
953atf_test_case T_flag_dir cleanup
954T_flag_dir_body()
955{
956	timestamp=1742574909
957	create_test_dirs
958	mkdir -p $TEST_INPUTS_DIR/dir1
959
960	atf_check $MAKEFS -T $timestamp -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
961	    $TEST_IMAGE $TEST_INPUTS_DIR
962
963	import_image
964	eval $(stat -s  $TEST_MOUNT_DIR/dir1)
965	atf_check_equal $st_atime $timestamp
966	atf_check_equal $st_mtime $timestamp
967	atf_check_equal $st_ctime $timestamp
968}
969
970T_flag_dir_cleanup()
971{
972	common_cleanup
973}
974
975atf_test_case T_flag_F_flag cleanup
976T_flag_F_flag_body()
977{
978	atf_expect_fail "-F doesn't take precedence over -T"
979	timestamp_F=1742574909
980	timestamp_T=1742574910
981	create_test_dirs
982	mkdir -p $TEST_INPUTS_DIR/dir1
983
984	atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
985	    mtree -c -k "type,time" -p $TEST_INPUTS_DIR
986	change_mtree_timestamp $TEST_SPEC_FILE $timestamp_F
987	atf_check -e empty -o not-empty -s exit:0 \
988	    $MAKEFS -F $TEST_SPEC_FILE -T $timestamp_T -s 10g -o rootpath=/ \
989	    -o poolname=$ZFS_POOL_NAME $TEST_IMAGE $TEST_INPUTS_DIR
990
991	mount_image
992	eval $(stat -s  $TEST_MOUNT_DIR/dir1)
993	atf_check_equal $st_atime $timestamp_F
994	atf_check_equal $st_mtime $timestamp_F
995	atf_check_equal $st_ctime $timestamp_F
996}
997
998T_flag_F_flag_cleanup()
999{
1000	common_cleanup
1001}
1002
1003atf_test_case T_flag_mtree cleanup
1004T_flag_mtree_body()
1005{
1006	timestamp=1742574909
1007	create_test_dirs
1008	mkdir -p $TEST_INPUTS_DIR/dir1
1009
1010	atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
1011	    mtree -c -k "type" -p $TEST_INPUTS_DIR
1012	atf_check $MAKEFS -T $timestamp -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
1013	    $TEST_IMAGE $TEST_SPEC_FILE
1014
1015	import_image
1016	eval $(stat -s  $TEST_MOUNT_DIR/dir1)
1017	atf_check_equal $st_atime $timestamp
1018	atf_check_equal $st_mtime $timestamp
1019	atf_check_equal $st_ctime $timestamp
1020}
1021
1022T_flag_mtree_cleanup()
1023{
1024	common_cleanup
1025}
1026
1027atf_init_test_cases()
1028{
1029	atf_add_test_case autoexpand
1030	atf_add_test_case basic
1031	atf_add_test_case compression
1032	atf_add_test_case dataset_removal
1033	atf_add_test_case devfs
1034	atf_add_test_case empty_dir
1035	atf_add_test_case empty_fs
1036	atf_add_test_case file_extend
1037	atf_add_test_case file_sizes
1038	atf_add_test_case hard_links
1039	atf_add_test_case indirect_dnode_array
1040	atf_add_test_case long_file_name
1041	atf_add_test_case multi_dataset_1
1042	atf_add_test_case multi_dataset_2
1043	atf_add_test_case multi_dataset_3
1044	atf_add_test_case multi_dataset_4
1045	atf_add_test_case multi_staging_1
1046	atf_add_test_case multi_staging_2
1047	atf_add_test_case reproducible
1048	atf_add_test_case snapshot
1049	atf_add_test_case soft_links
1050	atf_add_test_case root_props
1051	atf_add_test_case used_space_props
1052	atf_add_test_case perms
1053	atf_add_test_case T_flag_dir
1054	atf_add_test_case T_flag_F_flag
1055	atf_add_test_case T_flag_mtree
1056
1057	# XXXMJ tests:
1058	# - test with different ashifts (at least, 9 and 12), different image sizes
1059	# - create datasets in imported pool
1060}
1061