xref: /freebsd/usr.sbin/makefs/tests/makefs_zfs_tests.sh (revision dafba19e42e78cd3d7c9264ece49ddd3d7d70da5)
1#-
2# SPDX-License-Identifier: BSD-2-Clause
3#
4# Copyright (c) 2022-2023 The FreeBSD Foundation
5#
6# This software was developed by Mark Johnston under sponsorship from
7# the FreeBSD Foundation.
8#
9# Redistribution and use in source and binary forms, with or without
10# modification, are permitted provided that the following conditions are
11# met:
12# 1. Redistributions of source code must retain the above copyright
13#    notice, this list of conditions and the following disclaimer.
14# 2. Redistributions in binary form must reproduce the above copyright
15#    notice, this list of conditions and the following disclaimer in
16#    the documentation and/or other materials provided with the distribution.
17#
18# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28# SUCH DAMAGE.
29#
30
31MAKEFS="makefs -t zfs -o verify-txgs=true -o poolguid=$$"
32ZFS_POOL_NAME="makefstest$$"
33TEST_ZFS_POOL_NAME="$TMPDIR/poolname"
34
35. "$(dirname "$0")/makefs_tests_common.sh"
36
37common_cleanup()
38{
39	local pool md
40
41	# Try to force a TXG, this can help catch bugs by triggering a panic.
42	sync
43
44	if [ -f "$TEST_ZFS_POOL_NAME" ]; then
45		pool=$(cat $TEST_ZFS_POOL_NAME)
46		if zpool list "$pool" >/dev/null; then
47			zpool destroy "$pool"
48		fi
49	fi
50
51	if [ -f "$TEST_MD_DEVICE_FILE" ]; then
52		md=$(cat $TEST_MD_DEVICE_FILE)
53		if [ -c /dev/"$md" ]; then
54			mdconfig -o force -d -u "$md"
55		fi
56	fi
57}
58
59import_image()
60{
61	atf_check -o save:$TEST_MD_DEVICE_FILE mdconfig -a -f $TEST_IMAGE
62	atf_check -o ignore \
63	    zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd $ZFS_POOL_NAME
64	atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
65	echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME
66}
67
68#
69# Test autoexpansion of the vdev.
70#
71# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of
72# usable space for data.  Then the pool is expanded to 50GB, and the amount of
73# usable space is 50GB minus one metaslab.
74#
75atf_test_case autoexpand cleanup
76autoexpand_body()
77{
78	local mssize poolsize poolsize1 newpoolsize
79
80	create_test_inputs
81
82	mssize=$((128 * 1024 * 1024))
83	poolsize=$((10 * 1024 * 1024 * 1024))
84	atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \
85	    -o poolname=$ZFS_POOL_NAME \
86	    $TEST_IMAGE $TEST_INPUTS_DIR
87
88	newpoolsize=$((50 * 1024 * 1024 * 1024))
89	truncate -s $newpoolsize $TEST_IMAGE
90
91	import_image
92
93	check_image_contents
94
95	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
96	atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ]
97
98	atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE)
99
100	check_image_contents
101
102	poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
103	atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ]
104}
105autoexpand_cleanup()
106{
107	common_cleanup
108}
109
110#
111# Test with some default layout defined by the common code.
112#
113atf_test_case basic cleanup
114basic_body()
115{
116	create_test_inputs
117
118	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
119	    $TEST_IMAGE $TEST_INPUTS_DIR
120
121	import_image
122
123	check_image_contents
124}
125basic_cleanup()
126{
127	common_cleanup
128}
129
130#
131# Try configuring various compression algorithms.
132#
133atf_test_case compression cleanup
134compression_head()
135{
136	# Double the default timeout to make it pass on emulated architectures
137	# on ci.freebsd.org
138	atf_set "timeout" 600
139}
140compression_body()
141{
142	create_test_inputs
143
144	cd $TEST_INPUTS_DIR
145	mkdir dir
146	mkdir dir2
147	cd -
148
149	for alg in off on lzjb gzip gzip-1 gzip-2 gzip-3 gzip-4 \
150	    gzip-5 gzip-6 gzip-7 gzip-8 gzip-9 zle lz4 zstd; do
151		atf_check $MAKEFS -s 1g -o rootpath=/ \
152		    -o poolname=$ZFS_POOL_NAME \
153		    -o fs=${ZFS_POOL_NAME}\;compression=$alg \
154		    -o fs=${ZFS_POOL_NAME}/dir \
155		    -o fs=${ZFS_POOL_NAME}/dir2\;compression=off \
156		    $TEST_IMAGE $TEST_INPUTS_DIR
157
158		import_image
159
160		check_image_contents
161
162		if [ $alg = gzip-6 ]; then
163			# ZFS reports gzip-6 as just gzip since it uses
164			# a default compression level of 6.
165			alg=gzip
166		fi
167		# The "dir" dataset's compression algorithm should be
168		# inherited from the root dataset.
169		atf_check -o inline:$alg\\n \
170		    zfs get -H -o value compression ${ZFS_POOL_NAME}
171		atf_check -o inline:$alg\\n \
172		    zfs get -H -o value compression ${ZFS_POOL_NAME}/dir
173		atf_check -o inline:off\\n \
174		    zfs get -H -o value compression ${ZFS_POOL_NAME}/dir2
175
176		atf_check -e ignore dd if=/dev/random \
177		    of=${TEST_MOUNT_DIR}/dir/random bs=1M count=10
178		atf_check -e ignore dd if=/dev/zero \
179		    of=${TEST_MOUNT_DIR}/dir/zero bs=1M count=10
180		atf_check -e ignore dd if=/dev/zero \
181		    of=${TEST_MOUNT_DIR}/dir2/zero bs=1M count=10
182
183		# Export and reimport to ensure that everything is
184		# flushed to disk.
185		atf_check zpool export ${ZFS_POOL_NAME}
186		atf_check -o ignore \
187		    zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd \
188		    $ZFS_POOL_NAME
189		atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
190
191		if [ $alg = off ]; then
192			# If compression is off, the files should be the
193			# same size as the input.
194			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
195			    du -m ${TEST_MOUNT_DIR}/dir/random
196			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
197			    du -m ${TEST_MOUNT_DIR}/dir/zero
198			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
199			    du -m ${TEST_MOUNT_DIR}/dir2/zero
200		else
201			# If compression is on, the dir/zero file ought
202			# to be smaller.
203			atf_check -o match:"^1[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
204			    du -m ${TEST_MOUNT_DIR}/dir/zero
205			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
206			    du -m ${TEST_MOUNT_DIR}/dir/random
207			atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
208			    du -m ${TEST_MOUNT_DIR}/dir2/zero
209		fi
210
211		atf_check zpool destroy ${ZFS_POOL_NAME}
212		atf_check rm -f ${TEST_ZFS_POOL_NAME}
213		atf_check mdconfig -o force -d -u $(cat ${TEST_MD_DEVICE_FILE})
214		atf_check rm -f ${TEST_MD_DEVICE_FILE}
215	done
216}
217compression_cleanup()
218{
219	common_cleanup
220}
221
222#
223# Try destroying a dataset that was created by makefs.
224#
225atf_test_case dataset_removal cleanup
226dataset_removal_body()
227{
228	create_test_dirs
229
230	cd $TEST_INPUTS_DIR
231	mkdir dir
232	cd -
233
234	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
235	    -o fs=${ZFS_POOL_NAME}/dir \
236	    $TEST_IMAGE $TEST_INPUTS_DIR
237
238	import_image
239
240	check_image_contents
241
242	atf_check zfs destroy ${ZFS_POOL_NAME}/dir
243}
244dataset_removal_cleanup()
245{
246	common_cleanup
247}
248
249#
250# Make sure that we can handle some special file types.  Anything other than
251# regular files, symlinks and directories are ignored.
252#
253atf_test_case devfs cleanup
254devfs_body()
255{
256	atf_check mkdir dev
257	atf_check mount -t devfs none ./dev
258
259	atf_check -e match:"skipping unhandled" $MAKEFS -s 1g -o rootpath=/ \
260	    -o poolname=$ZFS_POOL_NAME $TEST_IMAGE ./dev
261
262	import_image
263}
264devfs_cleanup()
265{
266	common_cleanup
267	umount -f ./dev
268}
269
270#
271# Make sure that we can create and remove an empty directory.
272#
273atf_test_case empty_dir cleanup
274empty_dir_body()
275{
276	create_test_dirs
277
278	cd $TEST_INPUTS_DIR
279	mkdir dir
280	cd -
281
282	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
283	    $TEST_IMAGE $TEST_INPUTS_DIR
284
285	import_image
286
287	check_image_contents
288
289	atf_check rmdir ${TEST_MOUNT_DIR}/dir
290}
291empty_dir_cleanup()
292{
293	common_cleanup
294}
295
296atf_test_case empty_fs cleanup
297empty_fs_body()
298{
299	create_test_dirs
300
301	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
302	    $TEST_IMAGE $TEST_INPUTS_DIR
303
304	import_image
305
306	check_image_contents
307}
308empty_fs_cleanup()
309{
310	common_cleanup
311}
312
313atf_test_case file_extend cleanup
314file_extend_head()
315{
316	# Double the default timeout to make it pass on emulated architectures
317	# on ci.freebsd.org
318	atf_set "timeout" 600
319}
320file_extend_body()
321{
322	local i start
323
324	create_test_dirs
325
326	# Create a file slightly longer than the maximum block size.
327	start=132
328	dd if=/dev/random of=${TEST_INPUTS_DIR}/foo bs=1k count=$start
329	md5 -q ${TEST_INPUTS_DIR}/foo > foo.md5
330
331	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
332	    $TEST_IMAGE $TEST_INPUTS_DIR
333
334	import_image
335
336	check_image_contents
337
338	i=0
339	while [ $i -lt 1000 ]; do
340		dd if=/dev/random of=${TEST_MOUNT_DIR}/foo bs=1k count=1 \
341		    seek=$(($i + $start)) conv=notrunc
342		# Make sure that the first $start blocks are unmodified.
343		dd if=${TEST_MOUNT_DIR}/foo bs=1k count=$start of=foo.copy
344		atf_check -o file:foo.md5 md5 -q foo.copy
345		i=$(($i + 1))
346	done
347}
348file_extend_cleanup()
349{
350	common_cleanup
351}
352
353atf_test_case file_sizes cleanup
354file_sizes_body()
355{
356	local i
357
358	create_test_dirs
359	cd $TEST_INPUTS_DIR
360
361	i=1
362	while [ $i -lt $((1 << 20)) ]; do
363		truncate -s $i ${i}.1
364		truncate -s $(($i - 1)) ${i}.2
365		truncate -s $(($i + 1)) ${i}.3
366		i=$(($i << 1))
367	done
368
369	cd -
370
371	# XXXMJ this creates sparse files, make sure makefs doesn't
372	#       preserve the sparseness.
373	# XXXMJ need to test with larger files (at least 128MB for L2 indirs)
374	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
375	    $TEST_IMAGE $TEST_INPUTS_DIR
376
377	import_image
378
379	check_image_contents
380}
381file_sizes_cleanup()
382{
383	common_cleanup
384}
385
386atf_test_case hard_links cleanup
387hard_links_body()
388{
389	local f
390
391	create_test_dirs
392	cd $TEST_INPUTS_DIR
393
394	mkdir dir
395	echo "hello" > 1
396	ln 1 2
397	ln 1 dir/1
398
399	echo "goodbye" > dir/a
400	ln dir/a dir/b
401	ln dir/a a
402
403	cd -
404
405	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
406	    $TEST_IMAGE $TEST_INPUTS_DIR
407
408	import_image
409
410	check_image_contents
411
412	stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino
413	stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink
414	for f in 1 2 dir/1; do
415		atf_check -o file:./nlink stat -f '%l' ${TEST_MOUNT_DIR}/${f}
416		atf_check -o file:./ino stat -f '%i' ${TEST_MOUNT_DIR}/${f}
417		atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f}
418	done
419
420	stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino
421	stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink
422	for f in dir/a dir/b a; do
423		atf_check -o file:./nlink stat -f '%l' ${TEST_MOUNT_DIR}/${f}
424		atf_check -o file:./ino stat -f '%i' ${TEST_MOUNT_DIR}/${f}
425		atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f}
426	done
427}
428hard_links_cleanup()
429{
430	common_cleanup
431}
432
433# Allocate enough dnodes from an object set that the meta dnode needs to use
434# indirect blocks.
435atf_test_case indirect_dnode_array cleanup
436indirect_dnode_array_body()
437{
438	local count i
439
440	# How many dnodes do we need to allocate?  Well, the data block size
441	# for meta dnodes is always 16KB, so with a dnode size of 512B we get
442	# 32 dnodes per direct block.  The maximum indirect block size is 128KB
443	# and that can fit 1024 block pointers, so we need at least 32 * 1024
444	# files to force the use of two levels of indirection.
445	#
446	# Unfortunately that number of files makes the test run quite slowly,
447	# so we settle for a single indirect block for now...
448	count=$(jot -r 1 32 1024)
449
450	create_test_dirs
451	cd $TEST_INPUTS_DIR
452	for i in $(seq 1 $count); do
453		touch $i
454	done
455	cd -
456
457	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
458	    $TEST_IMAGE $TEST_INPUTS_DIR
459
460	import_image
461
462	check_image_contents
463}
464indirect_dnode_array_cleanup()
465{
466	common_cleanup
467}
468
469#
470# Create some files with long names, so as to test fat ZAP handling.
471#
472atf_test_case long_file_name cleanup
473long_file_name_body()
474{
475	local dir i
476
477	create_test_dirs
478	cd $TEST_INPUTS_DIR
479
480	# micro ZAP keys can be at most 50 bytes.
481	for i in $(seq 1 60); do
482		touch $(jot -s '' $i 1 1)
483	done
484	dir=$(jot -s '' 61 1 1)
485	mkdir $dir
486	for i in $(seq 1 60); do
487		touch ${dir}/$(jot -s '' $i 1 1)
488	done
489
490	cd -
491
492	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
493	    $TEST_IMAGE $TEST_INPUTS_DIR
494
495	import_image
496
497	check_image_contents
498
499	# Add a directory entry in the hope that OpenZFS might catch a bug
500	# in makefs' fat ZAP encoding.
501	touch ${TEST_MOUNT_DIR}/foo
502}
503long_file_name_cleanup()
504{
505	common_cleanup
506}
507
508#
509# Exercise handling of multiple datasets.
510#
511atf_test_case multi_dataset_1 cleanup
512multi_dataset_1_body()
513{
514	create_test_dirs
515	cd $TEST_INPUTS_DIR
516
517	mkdir dir1
518	echo a > dir1/a
519	mkdir dir2
520	echo b > dir2/b
521
522	cd -
523
524	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
525	    -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \
526	    $TEST_IMAGE $TEST_INPUTS_DIR
527
528	import_image
529
530	check_image_contents
531
532	# Make sure that we have three datasets with the expected mount points.
533	atf_check -o inline:${ZFS_POOL_NAME}\\n \
534	    zfs list -H -o name ${ZFS_POOL_NAME}
535	atf_check -o inline:${TEST_MOUNT_DIR}\\n \
536	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}
537
538	atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n \
539	    zfs list -H -o name ${ZFS_POOL_NAME}/dir1
540	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n \
541	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
542
543	atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n \
544	    zfs list -H -o name ${ZFS_POOL_NAME}/dir2
545	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n \
546	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
547}
548multi_dataset_1_cleanup()
549{
550	common_cleanup
551}
552
553#
554# Create a pool with two datasets, where the root dataset is mounted below
555# the child dataset.
556#
557atf_test_case multi_dataset_2 cleanup
558multi_dataset_2_body()
559{
560	create_test_dirs
561	cd $TEST_INPUTS_DIR
562
563	mkdir dir1
564	echo a > dir1/a
565	mkdir dir2
566	echo b > dir2/b
567
568	cd -
569
570	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
571	    -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \
572	    -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \
573	    $TEST_IMAGE $TEST_INPUTS_DIR
574
575	import_image
576
577	check_image_contents
578}
579multi_dataset_2_cleanup()
580{
581	common_cleanup
582}
583
584#
585# Create a dataset with a non-existent mount point.
586#
587atf_test_case multi_dataset_3 cleanup
588multi_dataset_3_body()
589{
590	create_test_dirs
591	cd $TEST_INPUTS_DIR
592
593	mkdir dir1
594	echo a > dir1/a
595
596	cd -
597
598	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
599	    -o fs=${ZFS_POOL_NAME}/dir1 \
600	    -o fs=${ZFS_POOL_NAME}/dir2 \
601	    $TEST_IMAGE $TEST_INPUTS_DIR
602
603	import_image
604
605	atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n \
606	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
607
608	# Mounting dir2 should have created a directory called dir2.  Go
609	# back and create it in the staging tree before comparing.
610	atf_check mkdir ${TEST_INPUTS_DIR}/dir2
611
612	check_image_contents
613}
614multi_dataset_3_cleanup()
615{
616	common_cleanup
617}
618
619#
620# Create an unmounted dataset.
621#
622atf_test_case multi_dataset_4 cleanup
623multi_dataset_4_body()
624{
625	create_test_dirs
626	cd $TEST_INPUTS_DIR
627
628	mkdir dir1
629	echo a > dir1/a
630
631	cd -
632
633	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
634	    -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \
635	    $TEST_IMAGE $TEST_INPUTS_DIR
636
637	import_image
638
639	atf_check -o inline:none\\n \
640	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
641
642	check_image_contents
643
644	atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1
645	atf_check zfs mount ${ZFS_POOL_NAME}/dir1
646	atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n \
647	    zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
648
649	# dir1/a should be part of the root dataset, not dir1.
650	atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a
651}
652multi_dataset_4_cleanup()
653{
654	common_cleanup
655}
656
657#
658# Validate handling of multiple staging directories.
659#
660atf_test_case multi_staging_1 cleanup
661multi_staging_1_body()
662{
663	local tmpdir
664
665	create_test_dirs
666	cd $TEST_INPUTS_DIR
667
668	mkdir dir1
669	echo a > a
670	echo a > dir1/a
671	echo z > z
672
673	cd -
674
675	tmpdir=$(mktemp -d)
676	cd $tmpdir
677
678	mkdir dir2 dir2/dir3
679	echo b > dir2/b
680	echo c > dir2/dir3/c
681	ln -s dir2/dir3c s
682
683	cd -
684
685	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
686	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
687
688	import_image
689
690	check_image_contents -d $tmpdir
691}
692multi_staging_1_cleanup()
693{
694	common_cleanup
695}
696
697atf_test_case multi_staging_2 cleanup
698multi_staging_2_body()
699{
700	local tmpdir
701
702	create_test_dirs
703	cd $TEST_INPUTS_DIR
704
705	mkdir dir
706	echo a > dir/foo
707	echo b > dir/bar
708
709	cd -
710
711	tmpdir=$(mktemp -d)
712	cd $tmpdir
713
714	mkdir dir
715	echo c > dir/baz
716
717	cd -
718
719	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
720	    $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
721
722	import_image
723
724	# check_image_contents can't easily handle merged directories, so
725	# just check that the merged directory contains the files we expect.
726	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/foo
727	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/bar
728	atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/baz
729
730	if [ "$(ls ${TEST_MOUNT_DIR}/dir | wc -l)" -ne 3 ]; then
731		atf_fail "Expected 3 files in ${TEST_MOUNT_DIR}/dir"
732	fi
733}
734multi_staging_2_cleanup()
735{
736	common_cleanup
737}
738
739#
740# Rudimentary test to verify that two ZFS images created using the same
741# parameters and input hierarchy are byte-identical.  In particular, makefs(1)
742# does not preserve file access times.
743#
744atf_test_case reproducible cleanup
745reproducible_body()
746{
747	create_test_inputs
748
749	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
750	    ${TEST_IMAGE}.1 $TEST_INPUTS_DIR
751
752	atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
753	    ${TEST_IMAGE}.2 $TEST_INPUTS_DIR
754
755	# XXX-MJ cmp(1) is really slow
756	atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2
757}
758reproducible_cleanup()
759{
760}
761
762#
763# Verify that we can take a snapshot of a generated dataset.
764#
765atf_test_case snapshot cleanup
766snapshot_body()
767{
768	create_test_dirs
769	cd $TEST_INPUTS_DIR
770
771	mkdir dir
772	echo "hello" > dir/hello
773	echo "goodbye" > goodbye
774
775	cd -
776
777	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
778	    $TEST_IMAGE $TEST_INPUTS_DIR
779
780	import_image
781
782	atf_check zfs snapshot ${ZFS_POOL_NAME}@1
783}
784snapshot_cleanup()
785{
786	common_cleanup
787}
788
789#
790# Check handling of symbolic links.
791#
792atf_test_case soft_links cleanup
793soft_links_body()
794{
795	create_test_dirs
796	cd $TEST_INPUTS_DIR
797
798	mkdir dir
799	ln -s a a
800	ln -s dir/../a a
801	ln -s dir/b b
802	echo 'c' > dir
803	ln -s dir/c c
804	# XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1
805
806	cd -
807
808	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
809	    $TEST_IMAGE $TEST_INPUTS_DIR
810
811	import_image
812
813	check_image_contents
814}
815soft_links_cleanup()
816{
817	common_cleanup
818}
819
820#
821# Verify that we can set properties on the root dataset.
822#
823atf_test_case root_props cleanup
824root_props_body()
825{
826	create_test_inputs
827
828	atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
829	    -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \
830	    $TEST_IMAGE $TEST_INPUTS_DIR
831
832	import_image
833
834	check_image_contents
835
836	atf_check -o inline:off\\n zfs get -H -o value atime $ZFS_POOL_NAME
837	atf_check -o inline:local\\n zfs get -H -o source atime $ZFS_POOL_NAME
838	atf_check -o inline:off\\n zfs get -H -o value setuid $ZFS_POOL_NAME
839	atf_check -o inline:local\\n zfs get -H -o source setuid $ZFS_POOL_NAME
840}
841root_props_cleanup()
842{
843	common_cleanup
844}
845
846#
847# Verify that usedds and usedchild props are set properly.
848#
849atf_test_case used_space_props cleanup
850used_space_props_body()
851{
852	local used usedds usedchild
853	local rootmb childmb totalmb fudge
854	local status
855
856	create_test_dirs
857	cd $TEST_INPUTS_DIR
858	mkdir dir
859
860	rootmb=17
861	childmb=39
862	totalmb=$(($rootmb + $childmb))
863	fudge=$((2 * 1024 * 1024))
864
865	atf_check -e ignore dd if=/dev/random of=foo bs=1M count=$rootmb
866	atf_check -e ignore dd if=/dev/random of=dir/bar bs=1M count=$childmb
867
868	cd -
869
870	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
871	    -o fs=${ZFS_POOL_NAME}/dir \
872	    $TEST_IMAGE $TEST_INPUTS_DIR
873
874	import_image
875
876	# Make sure that each dataset's space usage is no more than 2MB larger
877	# than their files.  This number is magic and might need to change
878	# someday.
879	usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME})
880	atf_check test $usedds -gt $(($rootmb * 1024 * 1024)) -a \
881	    $usedds -le $(($rootmb * 1024 * 1024 + $fudge))
882	usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME}/dir)
883	atf_check test $usedds -gt $(($childmb * 1024 * 1024)) -a \
884	    $usedds -le $(($childmb * 1024 * 1024 + $fudge))
885
886	# Make sure that the usedchild property value makes sense: the parent's
887	# value corresponds to the size of the child, and the child has no
888	# children.
889	usedchild=$(zfs list -o usedchild -Hp ${ZFS_POOL_NAME})
890	atf_check test $usedchild -gt $(($childmb * 1024 * 1024)) -a \
891	    $usedchild -le $(($childmb * 1024 * 1024 + $fudge))
892	atf_check -o inline:'0\n' zfs list -Hp -o usedchild ${ZFS_POOL_NAME}/dir
893
894	# Make sure that the used property value makes sense: the parent's
895	# value is the sum of the two sizes, and the child's value is the
896	# same as its usedds value, which has already been checked.
897	used=$(zfs list -o used -Hp ${ZFS_POOL_NAME})
898	atf_check test $used -gt $(($totalmb * 1024 * 1024)) -a \
899	    $used -le $(($totalmb * 1024 * 1024 + 2 * $fudge))
900	used=$(zfs list -o used -Hp ${ZFS_POOL_NAME}/dir)
901	atf_check -o inline:$used'\n' zfs list -Hp -o usedds ${ZFS_POOL_NAME}/dir
902
903	# Both datasets do not have snapshots.
904	atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}
905	atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}/dir
906}
907used_space_props_cleanup()
908{
909	common_cleanup
910}
911
912# Verify that file permissions are set properly.  Make sure that non-executable
913# files can't be executed.
914atf_test_case perms cleanup
915perms_body()
916{
917	local mode
918
919	create_test_dirs
920	cd $TEST_INPUTS_DIR
921
922	for mode in $(seq 0 511); do
923		mode=$(printf "%04o\n" $mode)
924		echo 'echo a' > $mode
925		atf_check chmod $mode $mode
926	done
927
928	cd -
929
930	atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
931	    $TEST_IMAGE $TEST_INPUTS_DIR
932
933	import_image
934
935	check_image_contents
936
937	for mode in $(seq 0 511); do
938		mode=$(printf "%04o\n" $mode)
939		if [ $(($mode & 0111)) -eq 0 ]; then
940			atf_check -s not-exit:0 -e match:"Permission denied" \
941			    ${TEST_INPUTS_DIR}/$mode
942		fi
943		if [ $(($mode & 0001)) -eq 0 ]; then
944			atf_check -s not-exit:0 -e match:"Permission denied" \
945			    su -m tests -c ${TEST_INPUTS_DIR}/$mode
946		fi
947	done
948}
949perms_cleanup()
950{
951	common_cleanup
952}
953
954#
955# Verify that -T timestamps are honored.
956#
957atf_test_case T_flag_dir cleanup
958T_flag_dir_body()
959{
960	timestamp=1742574909
961	create_test_dirs
962	mkdir -p $TEST_INPUTS_DIR/dir1
963
964	atf_check $MAKEFS -T $timestamp -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
965	    $TEST_IMAGE $TEST_INPUTS_DIR
966
967	import_image
968	eval $(stat -s  $TEST_MOUNT_DIR/dir1)
969	atf_check_equal $st_atime $timestamp
970	atf_check_equal $st_mtime $timestamp
971	atf_check_equal $st_ctime $timestamp
972}
973
974T_flag_dir_cleanup()
975{
976	common_cleanup
977}
978
979atf_test_case T_flag_F_flag cleanup
980T_flag_F_flag_body()
981{
982	timestamp_F=1742574909
983	timestamp_T=1742574910
984	create_test_dirs
985	mkdir -p $TEST_INPUTS_DIR/dir1
986
987	atf_check -o save:$TEST_SPEC_FILE $MTREE -c -p $TEST_INPUTS_DIR
988	change_mtree_timestamp $TEST_SPEC_FILE $timestamp_F
989	atf_check \
990	    $MAKEFS -F $TEST_SPEC_FILE -T $timestamp_T -s 10g -o rootpath=/ \
991	    -o poolname=$ZFS_POOL_NAME $TEST_IMAGE $TEST_INPUTS_DIR
992
993	import_image
994	eval $(stat -s  $TEST_MOUNT_DIR/dir1)
995	atf_check_equal $st_atime $timestamp_F
996	atf_check_equal $st_mtime $timestamp_F
997	# atf_check_equal $st_ctime $timestamp_F
998}
999
1000T_flag_F_flag_cleanup()
1001{
1002	common_cleanup
1003}
1004
1005atf_test_case T_flag_mtree cleanup
1006T_flag_mtree_body()
1007{
1008	timestamp=1742574909
1009	create_test_dirs
1010	mkdir -p $TEST_INPUTS_DIR/dir1
1011
1012	atf_check -o save:$TEST_SPEC_FILE $MTREE -c -p $TEST_INPUTS_DIR
1013	atf_check $MAKEFS -T $timestamp -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
1014	    $TEST_IMAGE $TEST_SPEC_FILE
1015
1016	import_image
1017	eval $(stat -s  $TEST_MOUNT_DIR/dir1)
1018	atf_check_equal $st_atime $timestamp
1019	atf_check_equal $st_mtime $timestamp
1020	atf_check_equal $st_ctime $timestamp
1021}
1022
1023T_flag_mtree_cleanup()
1024{
1025	common_cleanup
1026}
1027
1028atf_init_test_cases()
1029{
1030	atf_add_test_case autoexpand
1031	atf_add_test_case basic
1032	atf_add_test_case compression
1033	atf_add_test_case dataset_removal
1034	atf_add_test_case devfs
1035	atf_add_test_case empty_dir
1036	atf_add_test_case empty_fs
1037	atf_add_test_case file_extend
1038	atf_add_test_case file_sizes
1039	atf_add_test_case hard_links
1040	atf_add_test_case indirect_dnode_array
1041	atf_add_test_case long_file_name
1042	atf_add_test_case multi_dataset_1
1043	atf_add_test_case multi_dataset_2
1044	atf_add_test_case multi_dataset_3
1045	atf_add_test_case multi_dataset_4
1046	atf_add_test_case multi_staging_1
1047	atf_add_test_case multi_staging_2
1048	atf_add_test_case reproducible
1049	atf_add_test_case snapshot
1050	atf_add_test_case soft_links
1051	atf_add_test_case root_props
1052	atf_add_test_case used_space_props
1053	atf_add_test_case perms
1054	atf_add_test_case T_flag_dir
1055	atf_add_test_case T_flag_F_flag
1056	atf_add_test_case T_flag_mtree
1057
1058	# XXXMJ tests:
1059	# - test with different ashifts (at least, 9 and 12), different image sizes
1060	# - create datasets in imported pool
1061}
1062