xref: /freebsd/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or https://opensource.org/licenses/CDDL-1.0.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24# Copyright (c) 2012, 2020, Delphix. All rights reserved.
25# Copyright (c) 2017, Tim Chase. All rights reserved.
26# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27# Copyright (c) 2017, Lawrence Livermore National Security LLC.
28# Copyright (c) 2017, Datto Inc. All rights reserved.
29# Copyright (c) 2017, Open-E Inc. All rights reserved.
30# Copyright (c) 2021, The FreeBSD Foundation.
31# Use is subject to license terms.
32#
33
34. ${STF_SUITE}/include/tunables.cfg
35
36. ${STF_TOOLS}/include/logapi.shlib
37. ${STF_SUITE}/include/math.shlib
38. ${STF_SUITE}/include/blkdev.shlib
39
40#
41# Apply constrained path when available.  This is required since the
42# PATH may have been modified by sudo's secure_path behavior.
43#
44if [ -n "$STF_PATH" ]; then
45	export PATH="$STF_PATH"
46fi
47
48#
49# Generic dot version comparison function
50#
51# Returns success when version $1 is greater than or equal to $2.
52#
53function compare_version_gte
54{
55	[ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
56}
57
58# Linux kernel version comparison function
59#
60# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
61#
62# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
63#
64function linux_version
65{
66	typeset ver="$1"
67
68	[ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
69
70	typeset version major minor _
71	IFS='.' read -r version major minor _ <<<"$ver"
72
73	[ -z "$version" ] && version=0
74	[ -z "$major" ] && major=0
75	[ -z "$minor" ] && minor=0
76
77	echo $((version * 100000 + major * 1000 + minor))
78}
79
80# Determine if this is a Linux test system
81#
82# Return 0 if platform Linux, 1 if otherwise
83
84function is_linux
85{
86	[ "$UNAME" = "Linux" ]
87}
88
89# Determine if this is an illumos test system
90#
91# Return 0 if platform illumos, 1 if otherwise
92function is_illumos
93{
94	[ "$UNAME" = "illumos" ]
95}
96
97# Determine if this is a FreeBSD test system
98#
99# Return 0 if platform FreeBSD, 1 if otherwise
100
101function is_freebsd
102{
103	[ "$UNAME" = "FreeBSD" ]
104}
105
106# Determine if this is a 32-bit system
107#
108# Return 0 if platform is 32-bit, 1 if otherwise
109
110function is_32bit
111{
112	[ $(getconf LONG_BIT) = "32" ]
113}
114
115# Determine if kmemleak is enabled
116#
117# Return 0 if kmemleak is enabled, 1 if otherwise
118
119function is_kmemleak
120{
121	is_linux && [ -e /sys/kernel/debug/kmemleak ]
122}
123
124# Determine whether a dataset is mounted
125#
126# $1 dataset name
127# $2 filesystem type; optional - defaulted to zfs
128#
129# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
130
131function ismounted
132{
133	typeset fstype=$2
134	[[ -z $fstype ]] && fstype=zfs
135	typeset out dir name
136
137	case $fstype in
138		zfs)
139			if [[ "$1" == "/"* ]] ; then
140				! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
141			else
142				! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
143			fi
144		;;
145		ufs|nfs)
146			if is_freebsd; then
147				mount -pt $fstype | while read dev dir _t _flags; do
148					[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
149				done
150			else
151				out=$(df -F $fstype $1 2>/dev/null) || return
152
153				dir=${out%%\(*}
154				dir=${dir%% *}
155				name=${out##*\(}
156				name=${name%%\)*}
157				name=${name%% *}
158
159				[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
160			fi
161		;;
162		ext*)
163			df -t $fstype $1 > /dev/null 2>&1
164		;;
165		zvol)
166			if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
167				link=$(readlink -f $ZVOL_DEVDIR/$1)
168				[[ -n "$link" ]] && \
169					mount | grep -q "^$link" && \
170						return 0
171			fi
172		;;
173		*)
174			false
175		;;
176	esac
177}
178
179# Return 0 if a dataset is mounted; 1 otherwise
180#
181# $1 dataset name
182# $2 filesystem type; optional - defaulted to zfs
183
184function mounted
185{
186	ismounted $1 $2
187}
188
189# Return 0 if a dataset is unmounted; 1 otherwise
190#
191# $1 dataset name
192# $2 filesystem type; optional - defaulted to zfs
193
194function unmounted
195{
196	! ismounted $1 $2
197}
198
199function default_setup
200{
201	default_setup_noexit "$@"
202
203	log_pass
204}
205
206function default_setup_no_mountpoint
207{
208	default_setup_noexit "$1" "$2" "$3" "yes"
209
210	log_pass
211}
212
213#
214# Given a list of disks, setup storage pools and datasets.
215#
216function default_setup_noexit
217{
218	typeset disklist=$1
219	typeset container=$2
220	typeset volume=$3
221	typeset no_mountpoint=$4
222	log_note begin default_setup_noexit
223
224	if is_global_zone; then
225		if poolexists $TESTPOOL ; then
226			destroy_pool $TESTPOOL
227		fi
228		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
229		log_must zpool create -f $TESTPOOL $disklist
230	else
231		reexport_pool
232	fi
233
234	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
235	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
236
237	log_must zfs create $TESTPOOL/$TESTFS
238	if [[ -z $no_mountpoint ]]; then
239		log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
240	fi
241
242	if [[ -n $container ]]; then
243		rm -rf $TESTDIR1  || \
244			log_unresolved Could not remove $TESTDIR1
245		mkdir -p $TESTDIR1 || \
246			log_unresolved Could not create $TESTDIR1
247
248		log_must zfs create $TESTPOOL/$TESTCTR
249		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
250		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
251		if [[ -z $no_mountpoint ]]; then
252			log_must zfs set mountpoint=$TESTDIR1 \
253			    $TESTPOOL/$TESTCTR/$TESTFS1
254		fi
255	fi
256
257	if [[ -n $volume ]]; then
258		if is_global_zone ; then
259			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
260			block_device_wait
261		else
262			log_must zfs create $TESTPOOL/$TESTVOL
263		fi
264	fi
265}
266
267#
268# Given a list of disks, setup a storage pool, file system and
269# a container.
270#
271function default_container_setup
272{
273	typeset disklist=$1
274
275	default_setup "$disklist" "true"
276}
277
278#
279# Given a list of disks, setup a storage pool,file system
280# and a volume.
281#
282function default_volume_setup
283{
284	typeset disklist=$1
285
286	default_setup "$disklist" "" "true"
287}
288
289#
290# Given a list of disks, setup a storage pool,file system,
291# a container and a volume.
292#
293function default_container_volume_setup
294{
295	typeset disklist=$1
296
297	default_setup "$disklist" "true" "true"
298}
299
300#
301# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
302# filesystem
303#
304# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
305# $2 snapshot name. Default, $TESTSNAP
306#
307function create_snapshot
308{
309	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
310	typeset snap=${2:-$TESTSNAP}
311
312	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
313	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
314
315	if snapexists $fs_vol@$snap; then
316		log_fail "$fs_vol@$snap already exists."
317	fi
318	datasetexists $fs_vol || \
319		log_fail "$fs_vol must exist."
320
321	log_must zfs snapshot $fs_vol@$snap
322}
323
324#
325# Create a clone from a snapshot, default clone name is $TESTCLONE.
326#
327# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
328# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
329#
330function create_clone   # snapshot clone
331{
332	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
333	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
334
335	[[ -z $snap ]] && \
336		log_fail "Snapshot name is undefined."
337	[[ -z $clone ]] && \
338		log_fail "Clone name is undefined."
339
340	log_must zfs clone $snap $clone
341}
342
343#
344# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
345# filesystem.
346#
347# $1 Existing filesystem or volume name. Default, $TESTFS
348# $2 Existing snapshot name. Default, $TESTSNAP
349# $3 bookmark name. Default, $TESTBKMARK
350#
351function create_bookmark
352{
353	typeset fs_vol=${1:-$TESTFS}
354	typeset snap=${2:-$TESTSNAP}
355	typeset bkmark=${3:-$TESTBKMARK}
356
357	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
358	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
359	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
360
361	if bkmarkexists $fs_vol#$bkmark; then
362		log_fail "$fs_vol#$bkmark already exists."
363	fi
364	datasetexists $fs_vol || \
365		log_fail "$fs_vol must exist."
366	snapexists $fs_vol@$snap || \
367		log_fail "$fs_vol@$snap must exist."
368
369	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
370}
371
372#
373# Create a temporary clone result of an interrupted resumable 'zfs receive'
374# $1 Destination filesystem name. Must not exist, will be created as the result
375#    of this function along with its %recv temporary clone
376# $2 Source filesystem name. Must not exist, will be created and destroyed
377#
378function create_recv_clone
379{
380	typeset recvfs="$1"
381	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
382	typeset snap="$sendfs@snap1"
383	typeset incr="$sendfs@snap2"
384	typeset mountpoint="$TESTDIR/create_recv_clone"
385	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
386
387	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
388
389	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
390	datasetexists $sendfs && log_fail "Send filesystem must not exist."
391
392	log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
393	log_must zfs snapshot $snap
394	log_must eval "zfs send $snap | zfs recv -u $recvfs"
395	log_must mkfile 1m "$mountpoint/data"
396	log_must zfs snapshot $incr
397	log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
398	    iflag=fullblock > $sendfile"
399	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
400	destroy_dataset "$sendfs" "-r"
401	log_must rm -f "$sendfile"
402
403	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
404		log_fail "Error creating temporary $recvfs/%recv clone"
405	fi
406}
407
408function default_mirror_setup
409{
410	default_mirror_setup_noexit $1 $2 $3
411
412	log_pass
413}
414
415#
416# Given a pair of disks, set up a storage pool and dataset for the mirror
417# @parameters: $1 the primary side of the mirror
418#   $2 the secondary side of the mirror
419# @uses: ZPOOL ZFS TESTPOOL TESTFS
420function default_mirror_setup_noexit
421{
422	readonly func="default_mirror_setup_noexit"
423	typeset primary=$1
424	typeset secondary=$2
425
426	[[ -z $primary ]] && \
427		log_fail "$func: No parameters passed"
428	[[ -z $secondary ]] && \
429		log_fail "$func: No secondary partition passed"
430	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
431	log_must zpool create -f $TESTPOOL mirror $@
432	log_must zfs create $TESTPOOL/$TESTFS
433	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
434}
435
436#
437# Destroy the configured testpool mirrors.
438# the mirrors are of the form ${TESTPOOL}{number}
439# @uses: ZPOOL ZFS TESTPOOL
440function destroy_mirrors
441{
442	default_cleanup_noexit
443
444	log_pass
445}
446
447function default_raidz_setup
448{
449	default_raidz_setup_noexit "$*"
450
451	log_pass
452}
453
454#
455# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
456# $1 the list of disks
457#
458function default_raidz_setup_noexit
459{
460	typeset disklist="$*"
461	disks=(${disklist[*]})
462
463	if [[ ${#disks[*]} -lt 2 ]]; then
464		log_fail "A raid-z requires a minimum of two disks."
465	fi
466
467	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
468	log_must zpool create -f $TESTPOOL raidz $disklist
469	log_must zfs create $TESTPOOL/$TESTFS
470	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
471}
472
473#
474# Common function used to cleanup storage pools and datasets.
475#
476# Invoked at the start of the test suite to ensure the system
477# is in a known state, and also at the end of each set of
478# sub-tests to ensure errors from one set of tests doesn't
479# impact the execution of the next set.
480
481function default_cleanup
482{
483	default_cleanup_noexit
484
485	log_pass
486}
487
488#
489# Utility function used to list all available pool names.
490#
491# NOTE: $KEEP is a variable containing pool names, separated by a newline
492# character, that must be excluded from the returned list.
493#
494function get_all_pools
495{
496	zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
497}
498
499function default_cleanup_noexit
500{
501	typeset pool=""
502	#
503	# Destroying the pool will also destroy any
504	# filesystems it contains.
505	#
506	if is_global_zone; then
507		zfs unmount -a > /dev/null 2>&1
508		ALL_POOLS=$(get_all_pools)
509		# Here, we loop through the pools we're allowed to
510		# destroy, only destroying them if it's safe to do
511		# so.
512		while [ ! -z ${ALL_POOLS} ]
513		do
514			for pool in ${ALL_POOLS}
515			do
516				if safe_to_destroy_pool $pool ;
517				then
518					destroy_pool $pool
519				fi
520			done
521			ALL_POOLS=$(get_all_pools)
522		done
523
524		zfs mount -a
525	else
526		typeset fs=""
527		for fs in $(zfs list -H -o name \
528		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
529			destroy_dataset "$fs" "-Rf"
530		done
531
532		# Need cleanup here to avoid garbage dir left.
533		for fs in $(zfs list -H -o name); do
534			[[ $fs == /$ZONE_POOL ]] && continue
535			[[ -d $fs ]] && log_must rm -rf $fs/*
536		done
537
538		#
539		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
540		# the default value
541		#
542		for fs in $(zfs list -H -o name); do
543			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
544				log_must zfs set reservation=none $fs
545				log_must zfs set recordsize=128K $fs
546				log_must zfs set mountpoint=/$fs $fs
547				typeset enc=$(get_prop encryption $fs)
548				if [ -z "$enc" ] || [ "$enc" = "off" ]; then
549					log_must zfs set checksum=on $fs
550				fi
551				log_must zfs set compression=off $fs
552				log_must zfs set atime=on $fs
553				log_must zfs set devices=off $fs
554				log_must zfs set exec=on $fs
555				log_must zfs set setuid=on $fs
556				log_must zfs set readonly=off $fs
557				log_must zfs set snapdir=hidden $fs
558				log_must zfs set aclmode=groupmask $fs
559				log_must zfs set aclinherit=secure $fs
560			fi
561		done
562	fi
563
564	[[ -d $TESTDIR ]] && \
565		log_must rm -rf $TESTDIR
566
567	disk1=${DISKS%% *}
568	if is_mpath_device $disk1; then
569		delete_partitions
570	fi
571
572	rm -f $TEST_BASE_DIR/{err,out}
573}
574
575
576#
577# Common function used to cleanup storage pools, file systems
578# and containers.
579#
580function default_container_cleanup
581{
582	if ! is_global_zone; then
583		reexport_pool
584	fi
585
586	ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
587	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
588
589	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
590	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
591
592	[[ -e $TESTDIR1 ]] && \
593	    log_must rm -rf $TESTDIR1
594
595	default_cleanup
596}
597
598#
599# Common function used to cleanup snapshot of file system or volume. Default to
600# delete the file system's snapshot
601#
602# $1 snapshot name
603#
604function destroy_snapshot
605{
606	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
607
608	if ! snapexists $snap; then
609		log_fail "'$snap' does not exist."
610	fi
611
612	#
613	# For the sake of the value which come from 'get_prop' is not equal
614	# to the really mountpoint when the snapshot is unmounted. So, firstly
615	# check and make sure this snapshot's been mounted in current system.
616	#
617	typeset mtpt=""
618	if ismounted $snap; then
619		mtpt=$(get_prop mountpoint $snap)
620	fi
621
622	destroy_dataset "$snap"
623	[[ $mtpt != "" && -d $mtpt ]] && \
624		log_must rm -rf $mtpt
625}
626
627#
628# Common function used to cleanup clone.
629#
630# $1 clone name
631#
632function destroy_clone
633{
634	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
635
636	if ! datasetexists $clone; then
637		log_fail "'$clone' does not existed."
638	fi
639
640	# With the same reason in destroy_snapshot
641	typeset mtpt=""
642	if ismounted $clone; then
643		mtpt=$(get_prop mountpoint $clone)
644	fi
645
646	destroy_dataset "$clone"
647	[[ $mtpt != "" && -d $mtpt ]] && \
648		log_must rm -rf $mtpt
649}
650
651#
652# Common function used to cleanup bookmark of file system or volume.  Default
653# to delete the file system's bookmark.
654#
655# $1 bookmark name
656#
657function destroy_bookmark
658{
659	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
660
661	if ! bkmarkexists $bkmark; then
662		log_fail "'$bkmarkp' does not existed."
663	fi
664
665	destroy_dataset "$bkmark"
666}
667
668# Return 0 if a snapshot exists; $? otherwise
669#
670# $1 - snapshot name
671
672function snapexists
673{
674	zfs list -H -t snapshot "$1" > /dev/null 2>&1
675}
676
677#
678# Return 0 if a bookmark exists; $? otherwise
679#
680# $1 - bookmark name
681#
682function bkmarkexists
683{
684	zfs list -H -t bookmark "$1" > /dev/null 2>&1
685}
686
687#
688# Return 0 if a hold exists; $? otherwise
689#
690# $1 - hold tag
691# $2 - snapshot name
692#
693function holdexists
694{
695	! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
696}
697
698#
699# Set a property to a certain value on a dataset.
700# Sets a property of the dataset to the value as passed in.
701# @param:
702#	$1 dataset who's property is being set
703#	$2 property to set
704#	$3 value to set property to
705# @return:
706#	0 if the property could be set.
707#	non-zero otherwise.
708# @use: ZFS
709#
710function dataset_setprop
711{
712	typeset fn=dataset_setprop
713
714	if (($# < 3)); then
715		log_note "$fn: Insufficient parameters (need 3, had $#)"
716		return 1
717	fi
718	typeset output=
719	output=$(zfs set $2=$3 $1 2>&1)
720	typeset rv=$?
721	if ((rv != 0)); then
722		log_note "Setting property on $1 failed."
723		log_note "property $2=$3"
724		log_note "Return Code: $rv"
725		log_note "Output: $output"
726		return $rv
727	fi
728	return 0
729}
730
731#
732# Check a numeric assertion
733# @parameter: $@ the assertion to check
734# @output: big loud notice if assertion failed
735# @use: log_fail
736#
737function assert
738{
739	(($@)) || log_fail "$@"
740}
741
742#
743# Function to format partition size of a disk
744# Given a disk cxtxdx reduces all partitions
745# to 0 size
746#
747function zero_partitions #<whole_disk_name>
748{
749	typeset diskname=$1
750	typeset i
751
752	if is_freebsd; then
753		gpart destroy -F $diskname
754	elif is_linux; then
755		DSK=$DEV_DSKDIR/$diskname
756		DSK=$(echo $DSK | sed -e "s|//|/|g")
757		log_must parted $DSK -s -- mklabel gpt
758		blockdev --rereadpt $DSK 2>/dev/null
759		block_device_wait
760	else
761		for i in 0 1 3 4 5 6 7
762		do
763			log_must set_partition $i "" 0mb $diskname
764		done
765	fi
766
767	return 0
768}
769
770#
771# Given a slice, size and disk, this function
772# formats the slice to the specified size.
773# Size should be specified with units as per
774# the `format` command requirements eg. 100mb 3gb
775#
776# NOTE: This entire interface is problematic for the Linux parted utility
777# which requires the end of the partition to be specified.  It would be
778# best to retire this interface and replace it with something more flexible.
779# At the moment a best effort is made.
780#
781# arguments: <slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
782function set_partition
783{
784	typeset -i slicenum=$1
785	typeset start=$2
786	typeset size=$3
787	typeset disk=${4#$DEV_DSKDIR/}
788	disk=${disk#$DEV_RDSKDIR/}
789
790	case "$UNAME" in
791	Linux)
792		if [[ -z $size || -z $disk ]]; then
793			log_fail "The size or disk name is unspecified."
794		fi
795		disk=$DEV_DSKDIR/$disk
796		typeset size_mb=${size%%[mMgG]}
797
798		size_mb=${size_mb%%[mMgG][bB]}
799		if [[ ${size:1:1} == 'g' ]]; then
800			((size_mb = size_mb * 1024))
801		fi
802
803		# Create GPT partition table when setting slice 0 or
804		# when the device doesn't already contain a GPT label.
805		parted $disk -s -- print 1 >/dev/null
806		typeset ret_val=$?
807		if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
808			if ! parted $disk -s -- mklabel gpt; then
809				log_note "Failed to create GPT partition table on $disk"
810				return 1
811			fi
812		fi
813
814		# When no start is given align on the first cylinder.
815		if [[ -z "$start" ]]; then
816			start=1
817		fi
818
819		# Determine the cylinder size for the device and using
820		# that calculate the end offset in cylinders.
821		typeset -i cly_size_kb=0
822		cly_size_kb=$(parted -m $disk -s -- unit cyl print |
823			awk -F '[:k.]' 'NR == 3 {print $4}')
824		((end = (size_mb * 1024 / cly_size_kb) + start))
825
826		parted $disk -s -- \
827		    mkpart part$slicenum ${start}cyl ${end}cyl
828		typeset ret_val=$?
829		if [[ $ret_val -ne 0 ]]; then
830			log_note "Failed to create partition $slicenum on $disk"
831			return 1
832		fi
833
834		blockdev --rereadpt $disk 2>/dev/null
835		block_device_wait $disk
836		;;
837	FreeBSD)
838		if [[ -z $size || -z $disk ]]; then
839			log_fail "The size or disk name is unspecified."
840		fi
841		disk=$DEV_DSKDIR/$disk
842
843		if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
844			gpart destroy -F $disk >/dev/null 2>&1
845			if ! gpart create -s GPT $disk; then
846				log_note "Failed to create GPT partition table on $disk"
847				return 1
848			fi
849		fi
850
851		typeset index=$((slicenum + 1))
852
853		if [[ -n $start ]]; then
854			start="-b $start"
855		fi
856		gpart add -t freebsd-zfs $start -s $size -i $index $disk
857		if [[ $ret_val -ne 0 ]]; then
858			log_note "Failed to create partition $slicenum on $disk"
859			return 1
860		fi
861
862		block_device_wait $disk
863		;;
864	*)
865		if [[ -z $slicenum || -z $size || -z $disk ]]; then
866			log_fail "The slice, size or disk name is unspecified."
867		fi
868
869		typeset format_file=/var/tmp/format_in.$$
870
871		echo "partition" >$format_file
872		echo "$slicenum" >> $format_file
873		echo "" >> $format_file
874		echo "" >> $format_file
875		echo "$start" >> $format_file
876		echo "$size" >> $format_file
877		echo "label" >> $format_file
878		echo "" >> $format_file
879		echo "q" >> $format_file
880		echo "q" >> $format_file
881
882		format -e -s -d $disk -f $format_file
883		typeset ret_val=$?
884		rm -f $format_file
885		;;
886	esac
887
888	if [[ $ret_val -ne 0 ]]; then
889		log_note "Unable to format $disk slice $slicenum to $size"
890		return 1
891	fi
892	return 0
893}
894
895#
896# Delete all partitions on all disks - this is specifically for the use of multipath
897# devices which currently can only be used in the test suite as raw/un-partitioned
898# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
899#
900function delete_partitions
901{
902	typeset disk
903
904	if [[ -z $DISKSARRAY ]]; then
905		DISKSARRAY=$DISKS
906	fi
907
908	if is_linux; then
909		typeset -i part
910		for disk in $DISKSARRAY; do
911			for (( part = 1; part < MAX_PARTITIONS; part++ )); do
912				typeset partition=${disk}${SLICE_PREFIX}${part}
913				parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
914				if lsblk | grep -qF ${partition}; then
915					log_fail "Partition ${partition} not deleted"
916				else
917					log_note "Partition ${partition} deleted"
918				fi
919			done
920		done
921	elif is_freebsd; then
922		for disk in $DISKSARRAY; do
923			if gpart destroy -F $disk; then
924				log_note "Partitions for ${disk} deleted"
925			else
926				log_fail "Partitions for ${disk} not deleted"
927			fi
928		done
929	fi
930}
931
932#
933# Get the end cyl of the given slice
934#
935function get_endslice #<disk> <slice>
936{
937	typeset disk=$1
938	typeset slice=$2
939	if [[ -z $disk || -z $slice ]] ; then
940		log_fail "The disk name or slice number is unspecified."
941	fi
942
943	case "$UNAME" in
944	Linux)
945		endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
946			awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
947		((endcyl = (endcyl + 1)))
948		;;
949	FreeBSD)
950		disk=${disk#/dev/zvol/}
951		disk=${disk%p*}
952		slice=$((slice + 1))
953		endcyl=$(gpart show $disk | \
954			awk -v slice=$slice '$3 == slice { print $1 + $2 }')
955		;;
956	*)
957		disk=${disk#/dev/dsk/}
958		disk=${disk#/dev/rdsk/}
959		disk=${disk%s*}
960
961		typeset -i ratio=0
962		ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
963		    awk '/sectors\/cylinder/ {print $2}')
964
965		if ((ratio == 0)); then
966			return
967		fi
968
969		typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
970		    awk -v token="$slice" '$1 == token {print $6}')
971
972		((endcyl = (endcyl + 1) / ratio))
973		;;
974	esac
975
976	echo $endcyl
977}
978
979
980#
981# Given a size,disk and total slice number,  this function formats the
982# disk slices from 0 to the total slice number with the same specified
983# size.
984#
985function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
986{
987	typeset -i i=0
988	typeset slice_size=$1
989	typeset disk_name=$2
990	typeset total_slices=$3
991	typeset cyl
992
993	zero_partitions $disk_name
994	while ((i < $total_slices)); do
995		if ! is_linux; then
996			if ((i == 2)); then
997				((i = i + 1))
998				continue
999			fi
1000		fi
1001		log_must set_partition $i "$cyl" $slice_size $disk_name
1002		cyl=$(get_endslice $disk_name $i)
1003		((i = i+1))
1004	done
1005}
1006
1007#
1008# This function continues to write to a filenum number of files into dirnum
1009# number of directories until either file_write returns an error or the
1010# maximum number of files per directory have been written.
1011#
1012# Usage:
1013# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1014#
1015# Return value: 0 on success
1016#		non 0 on error
1017#
1018# Where :
1019#	destdir:    is the directory where everything is to be created under
1020#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
1021#	filenum:    the maximum number of files per subdirectory
1022#	bytes:	    number of bytes to write
1023#	num_writes: number of types to write out bytes
1024#	data:	    the data that will be written
1025#
1026#	E.g.
1027#	fill_fs /testdir 20 25 1024 256 0
1028#
1029# Note: bytes * num_writes equals the size of the testfile
1030#
1031function fill_fs # destdir dirnum filenum bytes num_writes data
1032{
1033	typeset destdir=${1:-$TESTDIR}
1034	typeset -i dirnum=${2:-50}
1035	typeset -i filenum=${3:-50}
1036	typeset -i bytes=${4:-8192}
1037	typeset -i num_writes=${5:-10240}
1038	typeset data=${6:-0}
1039
1040	mkdir -p $destdir/{1..$dirnum}
1041	for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1042		file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1043		|| return
1044	done
1045}
1046
1047# Get the specified dataset property in parsable format or fail
1048function get_prop # property dataset
1049{
1050	typeset prop=$1
1051	typeset dataset=$2
1052
1053	zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
1054}
1055
1056# Get the specified pool property in parsable format or fail
1057function get_pool_prop # property pool
1058{
1059	typeset prop=$1
1060	typeset pool=$2
1061
1062	zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
1063}
1064
1065# Return 0 if a pool exists; $? otherwise
1066#
1067# $1 - pool name
1068
1069function poolexists
1070{
1071	typeset pool=$1
1072
1073	if [[ -z $pool ]]; then
1074		log_note "No pool name given."
1075		return 1
1076	fi
1077
1078	zpool get name "$pool" > /dev/null 2>&1
1079}
1080
1081# Return 0 if all the specified datasets exist; $? otherwise
1082#
1083# $1-n  dataset name
1084function datasetexists
1085{
1086	if (($# == 0)); then
1087		log_note "No dataset name given."
1088		return 1
1089	fi
1090
1091	zfs get name "$@" > /dev/null 2>&1
1092}
1093
1094# return 0 if none of the specified datasets exists, otherwise return 1.
1095#
1096# $1-n  dataset name
1097function datasetnonexists
1098{
1099	if (($# == 0)); then
1100		log_note "No dataset name given."
1101		return 1
1102	fi
1103
1104	while (($# > 0)); do
1105		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1106		    && return 1
1107		shift
1108	done
1109
1110	return 0
1111}
1112
1113# FreeBSD breaks exports(5) at whitespace and doesn't process escapes
1114# Solaris just breaks
1115#
1116# cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
1117#
1118# Linux can have spaces (which are \OOO-escaped),
1119# but can't have backslashes because they're parsed recursively
1120function shares_can_have_whitespace
1121{
1122	is_linux
1123}
1124
1125function is_shared_freebsd
1126{
1127	typeset fs=$1
1128
1129	pgrep -q mountd && showmount -E | grep -qx "$fs"
1130}
1131
1132function is_shared_illumos
1133{
1134	typeset fs=$1
1135	typeset mtpt
1136
1137	for mtpt in `share | awk '{print $2}'` ; do
1138		if [[ $mtpt == $fs ]] ; then
1139			return 0
1140		fi
1141	done
1142
1143	typeset stat=$(svcs -H -o STA nfs/server:default)
1144	if [[ $stat != "ON" ]]; then
1145		log_note "Current nfs/server status: $stat"
1146	fi
1147
1148	return 1
1149}
1150
1151function is_shared_linux
1152{
1153	typeset fs=$1
1154	! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
1155}
1156
1157#
1158# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1159#
1160# Returns 0 if shared, 1 otherwise.
1161#
1162function is_shared
1163{
1164	typeset fs=$1
1165	typeset mtpt
1166
1167	if [[ $fs != "/"* ]] ; then
1168		if datasetnonexists "$fs" ; then
1169			return 1
1170		else
1171			mtpt=$(get_prop mountpoint "$fs")
1172			case "$mtpt" in
1173				none|legacy|-) return 1
1174					;;
1175				*)	fs=$mtpt
1176					;;
1177			esac
1178		fi
1179	fi
1180
1181	case "$UNAME" in
1182	FreeBSD)	is_shared_freebsd "$fs"	;;
1183	Linux)		is_shared_linux "$fs"	;;
1184	*)		is_shared_illumos "$fs"	;;
1185	esac
1186}
1187
1188function is_exported_illumos
1189{
1190	typeset fs=$1
1191	typeset mtpt _
1192
1193	while read -r mtpt _; do
1194		[ "$mtpt" = "$fs" ] && return
1195	done < /etc/dfs/sharetab
1196
1197	return 1
1198}
1199
1200function is_exported_freebsd
1201{
1202	typeset fs=$1
1203	typeset mtpt _
1204
1205	while read -r mtpt _; do
1206		[ "$mtpt" = "$fs" ] && return
1207	done < /etc/zfs/exports
1208
1209	return 1
1210}
1211
1212function is_exported_linux
1213{
1214	typeset fs=$1
1215	typeset mtpt _
1216
1217	while read -r mtpt _; do
1218		[ "$(printf "$mtpt")" = "$fs" ] && return
1219	done < /etc/exports.d/zfs.exports
1220
1221	return 1
1222}
1223
1224#
1225# Given a mountpoint, or a dataset name, determine if it is exported via
1226# the os-specific NFS exports file.
1227#
1228# Returns 0 if exported, 1 otherwise.
1229#
1230function is_exported
1231{
1232	typeset fs=$1
1233	typeset mtpt
1234
1235	if [[ $fs != "/"* ]] ; then
1236		if datasetnonexists "$fs" ; then
1237			return 1
1238		else
1239			mtpt=$(get_prop mountpoint "$fs")
1240			case $mtpt in
1241				none|legacy|-) return 1
1242					;;
1243				*)	fs=$mtpt
1244					;;
1245			esac
1246		fi
1247	fi
1248
1249	case "$UNAME" in
1250	FreeBSD)	is_exported_freebsd "$fs"	;;
1251	Linux)		is_exported_linux "$fs"	;;
1252	*)		is_exported_illumos "$fs"	;;
1253	esac
1254}
1255
1256#
1257# Given a dataset name determine if it is shared via SMB.
1258#
1259# Returns 0 if shared, 1 otherwise.
1260#
1261function is_shared_smb
1262{
1263	typeset fs=$1
1264
1265	datasetexists "$fs" || return
1266
1267	if is_linux; then
1268		net usershare list | grep -xFq "${fs//[-\/]/_}"
1269	else
1270		log_note "SMB on $UNAME currently unsupported by the test framework"
1271		return 1
1272	fi
1273}
1274
1275#
1276# Given a mountpoint, determine if it is not shared via NFS.
1277#
1278# Returns 0 if not shared, 1 otherwise.
1279#
1280function not_shared
1281{
1282	! is_shared $1
1283}
1284
1285#
1286# Given a dataset determine if it is not shared via SMB.
1287#
1288# Returns 0 if not shared, 1 otherwise.
1289#
1290function not_shared_smb
1291{
1292	! is_shared_smb $1
1293}
1294
1295#
1296# Helper function to unshare a mountpoint.
1297#
1298function unshare_fs #fs
1299{
1300	typeset fs=$1
1301
1302	if is_shared $fs || is_shared_smb $fs; then
1303		log_must zfs unshare $fs
1304	fi
1305}
1306
1307#
1308# Helper function to share a NFS mountpoint.
1309#
1310function share_nfs #fs
1311{
1312	typeset fs=$1
1313
1314	is_shared "$fs" && return
1315
1316	case "$UNAME" in
1317	Linux)
1318		log_must exportfs "*:$fs"
1319		;;
1320	FreeBSD)
1321		typeset mountd
1322		read -r mountd < /var/run/mountd.pid
1323		log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
1324		log_must kill -s HUP "$mountd"
1325		;;
1326	*)
1327		log_must share -F nfs "$fs"
1328		;;
1329	esac
1330
1331	return 0
1332}
1333
1334#
1335# Helper function to unshare a NFS mountpoint.
1336#
1337function unshare_nfs #fs
1338{
1339	typeset fs=$1
1340
1341	! is_shared "$fs" && return
1342
1343	case "$UNAME" in
1344	Linux)
1345		log_must exportfs -u "*:$fs"
1346		;;
1347	FreeBSD)
1348		typeset mountd
1349		read -r mountd < /var/run/mountd.pid
1350		awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
1351		log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
1352		log_must kill -s HUP "$mountd"
1353		;;
1354	*)
1355		log_must unshare -F nfs $fs
1356		;;
1357	esac
1358
1359	return 0
1360}
1361
1362#
1363# Helper function to show NFS shares.
1364#
1365function showshares_nfs
1366{
1367	case "$UNAME" in
1368	Linux)
1369		exportfs -v
1370		;;
1371	FreeBSD)
1372		showmount
1373		;;
1374	*)
1375		share -F nfs
1376		;;
1377	esac
1378}
1379
1380function check_nfs
1381{
1382	case "$UNAME" in
1383	Linux)
1384		exportfs -s
1385		;;
1386	FreeBSD)
1387		showmount -e
1388		;;
1389	*)
1390		log_unsupported "Unknown platform"
1391		;;
1392	esac || log_unsupported "The NFS utilities are not installed"
1393}
1394
1395#
1396# Check NFS server status and trigger it online.
1397#
1398function setup_nfs_server
1399{
1400	# Cannot share directory in non-global zone.
1401	#
1402	if ! is_global_zone; then
1403		log_note "Cannot trigger NFS server by sharing in LZ."
1404		return
1405	fi
1406
1407	if is_linux; then
1408		#
1409		# Re-synchronize /var/lib/nfs/etab with /etc/exports and
1410		# /etc/exports.d./* to provide a clean test environment.
1411		#
1412		log_must exportfs -r
1413
1414		log_note "NFS server must be started prior to running ZTS."
1415		return
1416	elif is_freebsd; then
1417		log_must kill -s HUP $(</var/run/mountd.pid)
1418
1419		log_note "NFS server must be started prior to running ZTS."
1420		return
1421	fi
1422
1423	typeset nfs_fmri="svc:/network/nfs/server:default"
1424	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1425		#
1426		# Only really sharing operation can enable NFS server
1427		# to online permanently.
1428		#
1429		typeset dummy=/tmp/dummy
1430
1431		if [[ -d $dummy ]]; then
1432			log_must rm -rf $dummy
1433		fi
1434
1435		log_must mkdir $dummy
1436		log_must share $dummy
1437
1438		#
1439		# Waiting for fmri's status to be the final status.
1440		# Otherwise, in transition, an asterisk (*) is appended for
1441		# instances, unshare will reverse status to 'DIS' again.
1442		#
1443		# Waiting for 1's at least.
1444		#
1445		log_must sleep 1
1446		timeout=10
1447		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1448		do
1449			log_must sleep 1
1450
1451			((timeout -= 1))
1452		done
1453
1454		log_must unshare $dummy
1455		log_must rm -rf $dummy
1456	fi
1457
1458	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1459}
1460
1461#
1462# To verify whether calling process is in global zone
1463#
1464# Return 0 if in global zone, 1 in non-global zone
1465#
1466function is_global_zone
1467{
1468	if is_linux || is_freebsd; then
1469		return 0
1470	else
1471		typeset cur_zone=$(zonename 2>/dev/null)
1472		[ $cur_zone = "global" ]
1473	fi
1474}
1475
1476#
1477# Verify whether test is permitted to run from
1478# global zone, local zone, or both
1479#
1480# $1 zone limit, could be "global", "local", or "both"(no limit)
1481#
1482# Return 0 if permitted, otherwise exit with log_unsupported
1483#
1484function verify_runnable # zone limit
1485{
1486	typeset limit=$1
1487
1488	[[ -z $limit ]] && return 0
1489
1490	if is_global_zone ; then
1491		case $limit in
1492			global|both)
1493				;;
1494			local)	log_unsupported "Test is unable to run from "\
1495					"global zone."
1496				;;
1497			*)	log_note "Warning: unknown limit $limit - " \
1498					"use both."
1499				;;
1500		esac
1501	else
1502		case $limit in
1503			local|both)
1504				;;
1505			global)	log_unsupported "Test is unable to run from "\
1506					"local zone."
1507				;;
1508			*)	log_note "Warning: unknown limit $limit - " \
1509					"use both."
1510				;;
1511		esac
1512
1513		reexport_pool
1514	fi
1515
1516	return 0
1517}
1518
1519# Return 0 if create successfully or the pool exists; $? otherwise
1520# Note: In local zones, this function should return 0 silently.
1521#
1522# $1 - pool name
1523# $2-n - [keyword] devs_list
1524
1525function create_pool #pool devs_list
1526{
1527	typeset pool=${1%%/*}
1528
1529	shift
1530
1531	if [[ -z $pool ]]; then
1532		log_note "Missing pool name."
1533		return 1
1534	fi
1535
1536	if poolexists $pool ; then
1537		destroy_pool $pool
1538	fi
1539
1540	if is_global_zone ; then
1541		[[ -d /$pool ]] && rm -rf /$pool
1542		log_must zpool create -f $pool $@
1543	fi
1544
1545	return 0
1546}
1547
1548# Return 0 if destroy successfully or the pool exists; $? otherwise
1549# Note: In local zones, this function should return 0 silently.
1550#
1551# $1 - pool name
1552# Destroy pool with the given parameters.
1553
1554function destroy_pool #pool
1555{
1556	typeset pool=${1%%/*}
1557	typeset mtpt
1558
1559	if [[ -z $pool ]]; then
1560		log_note "No pool name given."
1561		return 1
1562	fi
1563
1564	if is_global_zone ; then
1565		if poolexists "$pool" ; then
1566			mtpt=$(get_prop mountpoint "$pool")
1567
1568			# At times, syseventd/udev activity can cause attempts
1569			# to destroy a pool to fail with EBUSY. We retry a few
1570			# times allowing failures before requiring the destroy
1571			# to succeed.
1572			log_must_busy zpool destroy -f $pool
1573
1574			[[ -d $mtpt ]] && \
1575				log_must rm -rf $mtpt
1576		else
1577			log_note "Pool does not exist. ($pool)"
1578			return 1
1579		fi
1580	fi
1581
1582	return 0
1583}
1584
1585# Return 0 if created successfully; $? otherwise
1586#
1587# $1 - dataset name
1588# $2-n - dataset options
1589
1590function create_dataset #dataset dataset_options
1591{
1592	typeset dataset=$1
1593
1594	shift
1595
1596	if [[ -z $dataset ]]; then
1597		log_note "Missing dataset name."
1598		return 1
1599	fi
1600
1601	if datasetexists $dataset ; then
1602		destroy_dataset $dataset
1603	fi
1604
1605	log_must zfs create $@ $dataset
1606
1607	return 0
1608}
1609
1610# Return 0 if destroy successfully or the dataset exists; $? otherwise
1611# Note: In local zones, this function should return 0 silently.
1612#
1613# $1 - dataset name
1614# $2 - custom arguments for zfs destroy
1615# Destroy dataset with the given parameters.
1616
1617function destroy_dataset # dataset [args]
1618{
1619	typeset dataset=$1
1620	typeset mtpt
1621	typeset args=${2:-""}
1622
1623	if [[ -z $dataset ]]; then
1624		log_note "No dataset name given."
1625		return 1
1626	fi
1627
1628	if is_global_zone ; then
1629		if datasetexists "$dataset" ; then
1630			mtpt=$(get_prop mountpoint "$dataset")
1631			log_must_busy zfs destroy $args $dataset
1632
1633			[ -d $mtpt ] && log_must rm -rf $mtpt
1634		else
1635			log_note "Dataset does not exist. ($dataset)"
1636			return 1
1637		fi
1638	fi
1639
1640	return 0
1641}
1642
1643#
1644# Reexport TESTPOOL & TESTPOOL(1-4)
1645#
1646function reexport_pool
1647{
1648	typeset -i cntctr=5
1649	typeset -i i=0
1650
1651	while ((i < cntctr)); do
1652		if ((i == 0)); then
1653			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1654			if ! ismounted $TESTPOOL; then
1655				log_must zfs mount $TESTPOOL
1656			fi
1657		else
1658			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1659			if eval ! ismounted \$TESTPOOL$i; then
1660				log_must eval zfs mount \$TESTPOOL$i
1661			fi
1662		fi
1663		((i += 1))
1664	done
1665}
1666
1667#
1668# Verify a given disk or pool state
1669#
1670# Return 0 is pool/disk matches expected state, 1 otherwise
1671#
1672function check_state # pool disk state{online,offline,degraded}
1673{
1674	typeset pool=$1
1675	typeset disk=${2#$DEV_DSKDIR/}
1676	typeset state=$3
1677
1678	[[ -z $pool ]] || [[ -z $state ]] \
1679	    && log_fail "Arguments invalid or missing"
1680
1681	if [[ -z $disk ]]; then
1682		#check pool state only
1683		zpool get -H -o value health $pool | grep -qi "$state"
1684	else
1685		zpool status -v $pool | grep "$disk" | grep -qi "$state"
1686	fi
1687}
1688
1689#
1690# Get the mountpoint of snapshot
1691# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1692# as its mountpoint
1693#
1694function snapshot_mountpoint
1695{
1696	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1697
1698	if [[ $dataset != *@* ]]; then
1699		log_fail "Error name of snapshot '$dataset'."
1700	fi
1701
1702	typeset fs=${dataset%@*}
1703	typeset snap=${dataset#*@}
1704
1705	if [[ -z $fs || -z $snap ]]; then
1706		log_fail "Error name of snapshot '$dataset'."
1707	fi
1708
1709	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1710}
1711
1712#
1713# Given a device and 'ashift' value verify it's correctly set on every label
1714#
1715function verify_ashift # device ashift
1716{
1717	typeset device="$1"
1718	typeset ashift="$2"
1719
1720	zdb -e -lll $device | awk -v ashift=$ashift '
1721	    /ashift: / {
1722	        if (ashift != $2)
1723	            exit 1;
1724	        else
1725	            count++;
1726	    }
1727	    END {
1728	        exit (count != 4);
1729	    }'
1730}
1731
1732#
1733# Given a pool and file system, this function will verify the file system
1734# using the zdb internal tool. Note that the pool is exported and imported
1735# to ensure it has consistent state.
1736#
1737function verify_filesys # pool filesystem dir
1738{
1739	typeset pool="$1"
1740	typeset filesys="$2"
1741	typeset zdbout="/tmp/zdbout.$$"
1742
1743	shift
1744	shift
1745	typeset dirs=$@
1746	typeset search_path=""
1747
1748	log_note "Calling zdb to verify filesystem '$filesys'"
1749	zfs unmount -a > /dev/null 2>&1
1750	log_must zpool export $pool
1751
1752	if [[ -n $dirs ]] ; then
1753		for dir in $dirs ; do
1754			search_path="$search_path -d $dir"
1755		done
1756	fi
1757
1758	log_must zpool import $search_path $pool
1759
1760	if ! zdb -cudi $filesys > $zdbout 2>&1; then
1761		log_note "Output: zdb -cudi $filesys"
1762		cat $zdbout
1763		rm -f $zdbout
1764		log_fail "zdb detected errors with: '$filesys'"
1765	fi
1766
1767	log_must zfs mount -a
1768	log_must rm -rf $zdbout
1769}
1770
1771#
1772# Given a pool issue a scrub and verify that no checksum errors are reported.
1773#
1774function verify_pool
1775{
1776	typeset pool=${1:-$TESTPOOL}
1777
1778	log_must zpool scrub $pool
1779	log_must wait_scrubbed $pool
1780
1781	typeset -i cksum=$(zpool status $pool | awk '
1782	    !NF { isvdev = 0 }
1783	    isvdev { errors += $NF }
1784	    /CKSUM$/ { isvdev = 1 }
1785	    END { print errors }
1786	')
1787	if [[ $cksum != 0 ]]; then
1788		log_must zpool status -v
1789	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1790	fi
1791}
1792
1793#
1794# Given a pool, and this function list all disks in the pool
1795#
1796function get_disklist # pool
1797{
1798	echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
1799	    grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
1800}
1801
1802#
1803# Given a pool, and this function list all disks in the pool with their full
1804# path (like "/dev/sda" instead of "sda").
1805#
1806function get_disklist_fullpath # pool
1807{
1808	get_disklist "-P $1"
1809}
1810
1811
1812
1813# /**
1814#  This function kills a given list of processes after a time period. We use
1815#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1816#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1817#  would be listed as FAIL, which we don't want : we're happy with stress tests
1818#  running for a certain amount of time, then finishing.
1819#
1820# @param $1 the time in seconds after which we should terminate these processes
1821# @param $2..$n the processes we wish to terminate.
1822# */
1823function stress_timeout
1824{
1825	typeset -i TIMEOUT=$1
1826	shift
1827	typeset cpids="$@"
1828
1829	log_note "Waiting for child processes($cpids). " \
1830		"It could last dozens of minutes, please be patient ..."
1831	log_must sleep $TIMEOUT
1832
1833	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1834	typeset pid
1835	for pid in $cpids; do
1836		ps -p $pid > /dev/null 2>&1 &&
1837			log_must kill -USR1 $pid
1838	done
1839}
1840
1841#
1842# Verify a given hotspare disk is inuse or avail
1843#
1844# Return 0 is pool/disk matches expected state, 1 otherwise
1845#
1846function check_hotspare_state # pool disk state{inuse,avail}
1847{
1848	typeset pool=$1
1849	typeset disk=${2#$DEV_DSKDIR/}
1850	typeset state=$3
1851
1852	cur_state=$(get_device_state $pool $disk "spares")
1853
1854	[ $state = $cur_state ]
1855}
1856
1857#
1858# Wait until a hotspare transitions to a given state or times out.
1859#
1860# Return 0 when  pool/disk matches expected state, 1 on timeout.
1861#
1862function wait_hotspare_state # pool disk state timeout
1863{
1864	typeset pool=$1
1865	typeset disk=${2#*$DEV_DSKDIR/}
1866	typeset state=$3
1867	typeset timeout=${4:-60}
1868	typeset -i i=0
1869
1870	while [[ $i -lt $timeout ]]; do
1871		if check_hotspare_state $pool $disk $state; then
1872			return 0
1873		fi
1874
1875		i=$((i+1))
1876		sleep 1
1877	done
1878
1879	return 1
1880}
1881
1882#
1883# Verify a given vdev disk is inuse or avail
1884#
1885# Return 0 is pool/disk matches expected state, 1 otherwise
1886#
1887function check_vdev_state # pool disk state{online,offline,unavail,removed}
1888{
1889	typeset pool=$1
1890	typeset disk=${2#*$DEV_DSKDIR/}
1891	typeset state=$3
1892
1893	cur_state=$(get_device_state $pool $disk)
1894
1895	[ $state = $cur_state ]
1896}
1897
1898#
1899# Wait until a vdev transitions to a given state or times out.
1900#
1901# Return 0 when  pool/disk matches expected state, 1 on timeout.
1902#
1903function wait_vdev_state # pool disk state timeout
1904{
1905	typeset pool=$1
1906	typeset disk=${2#*$DEV_DSKDIR/}
1907	typeset state=$3
1908	typeset timeout=${4:-60}
1909	typeset -i i=0
1910
1911	while [[ $i -lt $timeout ]]; do
1912		if check_vdev_state $pool $disk $state; then
1913			return 0
1914		fi
1915
1916		i=$((i+1))
1917		sleep 1
1918	done
1919
1920	return 1
1921}
1922
1923#
1924# Check the output of 'zpool status -v <pool>',
1925# and to see if the content of <token> contain the <keyword> specified.
1926#
1927# Return 0 is contain, 1 otherwise
1928#
1929function check_pool_status # pool token keyword <verbose>
1930{
1931	typeset pool=$1
1932	typeset token=$2
1933	typeset keyword=$3
1934	typeset verbose=${4:-false}
1935
1936	scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
1937	if [[ $verbose == true ]]; then
1938		log_note $scan
1939	fi
1940	echo $scan | grep -qi "$keyword"
1941}
1942
1943#
1944# The following functions are instance of check_pool_status()
1945#	is_pool_resilvering - to check if the pool resilver is in progress
1946#	is_pool_resilvered - to check if the pool resilver is completed
1947#	is_pool_scrubbing - to check if the pool scrub is in progress
1948#	is_pool_scrubbed - to check if the pool scrub is completed
1949#	is_pool_scrub_stopped - to check if the pool scrub is stopped
1950#	is_pool_scrub_paused - to check if the pool scrub has paused
1951#	is_pool_removing - to check if the pool removing is a vdev
1952#	is_pool_removed - to check if the pool remove is completed
1953#	is_pool_discarding - to check if the pool checkpoint is being discarded
1954#
1955function is_pool_resilvering #pool <verbose>
1956{
1957	check_pool_status "$1" "scan" \
1958	    "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
1959}
1960
1961function is_pool_resilvered #pool <verbose>
1962{
1963	check_pool_status "$1" "scan" "resilvered " $2
1964}
1965
1966function is_pool_scrubbing #pool <verbose>
1967{
1968	check_pool_status "$1" "scan" "scrub in progress since " $2
1969}
1970
1971function is_pool_scrubbed #pool <verbose>
1972{
1973	check_pool_status "$1" "scan" "scrub repaired" $2
1974}
1975
1976function is_pool_scrub_stopped #pool <verbose>
1977{
1978	check_pool_status "$1" "scan" "scrub canceled" $2
1979}
1980
1981function is_pool_scrub_paused #pool <verbose>
1982{
1983	check_pool_status "$1" "scan" "scrub paused since " $2
1984}
1985
1986function is_pool_removing #pool
1987{
1988	check_pool_status "$1" "remove" "in progress since "
1989}
1990
1991function is_pool_removed #pool
1992{
1993	check_pool_status "$1" "remove" "completed on"
1994}
1995
1996function is_pool_discarding #pool
1997{
1998	check_pool_status "$1" "checkpoint" "discarding"
1999}
2000
2001function wait_for_degraded
2002{
2003	typeset pool=$1
2004	typeset timeout=${2:-30}
2005	typeset t0=$SECONDS
2006
2007	while :; do
2008		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2009		log_note "$pool is not yet degraded."
2010		sleep 1
2011		if ((SECONDS - t0 > $timeout)); then
2012			log_note "$pool not degraded after $timeout seconds."
2013			return 1
2014		fi
2015	done
2016
2017	return 0
2018}
2019
2020#
2021# Use create_pool()/destroy_pool() to clean up the information in
2022# in the given disk to avoid slice overlapping.
2023#
2024function cleanup_devices #vdevs
2025{
2026	typeset pool="foopool$$"
2027
2028	for vdev in $@; do
2029		zero_partitions $vdev
2030	done
2031
2032	poolexists $pool && destroy_pool $pool
2033	create_pool $pool $@
2034	destroy_pool $pool
2035
2036	return 0
2037}
2038
2039#/**
2040# A function to find and locate free disks on a system or from given
2041# disks as the parameter. It works by locating disks that are in use
2042# as swap devices and dump devices, and also disks listed in /etc/vfstab
2043#
2044# $@ given disks to find which are free, default is all disks in
2045# the test system
2046#
2047# @return a string containing the list of available disks
2048#*/
2049function find_disks
2050{
2051	# Trust provided list, no attempt is made to locate unused devices.
2052	if is_linux || is_freebsd; then
2053		echo "$@"
2054		return
2055	fi
2056
2057
2058	sfi=/tmp/swaplist.$$
2059	dmpi=/tmp/dumpdev.$$
2060	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2061
2062	swap -l > $sfi
2063	dumpadm > $dmpi 2>/dev/null
2064
2065	disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
2066BEGIN { FS="."; }
2067
2068/^Specify disk/{
2069	searchdisks=0;
2070}
2071
2072{
2073	if (searchdisks && $2 !~ "^$"){
2074		split($2,arr," ");
2075		print arr[1];
2076	}
2077}
2078
2079/^AVAILABLE DISK SELECTIONS:/{
2080	searchdisks=1;
2081}
2082')}
2083
2084	unused=""
2085	for disk in $disks; do
2086	# Check for mounted
2087		grep -q "${disk}[sp]" /etc/mnttab && continue
2088	# Check for swap
2089		grep -q "${disk}[sp]" $sfi && continue
2090	# check for dump device
2091		grep -q "${disk}[sp]" $dmpi && continue
2092	# check to see if this disk hasn't been explicitly excluded
2093	# by a user-set environment variable
2094		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
2095		unused_candidates="$unused_candidates $disk"
2096	done
2097	rm $sfi $dmpi
2098
2099# now just check to see if those disks do actually exist
2100# by looking for a device pointing to the first slice in
2101# each case. limit the number to max_finddisksnum
2102	count=0
2103	for disk in $unused_candidates; do
2104		if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2105		    [ $count -lt $max_finddisksnum ]; then
2106			unused="$unused $disk"
2107			# do not impose limit if $@ is provided
2108			[[ -z $@ ]] && ((count = count + 1))
2109		fi
2110	done
2111
2112# finally, return our disk list
2113	echo $unused
2114}
2115
2116function add_user_freebsd #<group_name> <user_name> <basedir>
2117{
2118	typeset group=$1
2119	typeset user=$2
2120	typeset basedir=$3
2121
2122	# Check to see if the user exists.
2123	if id $user > /dev/null 2>&1; then
2124		return 0
2125	fi
2126
2127	# Assign 1000 as the base uid
2128	typeset -i uid=1000
2129	while true; do
2130		pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2131		case $? in
2132			0) break ;;
2133			# The uid is not unique
2134			65) ((uid += 1)) ;;
2135			*) return 1 ;;
2136		esac
2137		if [[ $uid == 65000 ]]; then
2138			log_fail "No user id available under 65000 for $user"
2139		fi
2140	done
2141
2142	# Silence MOTD
2143	touch $basedir/$user/.hushlogin
2144
2145	return 0
2146}
2147
2148#
2149# Delete the specified user.
2150#
2151# $1 login name
2152#
2153function del_user_freebsd #<logname>
2154{
2155	typeset user=$1
2156
2157	if id $user > /dev/null 2>&1; then
2158		log_must pw userdel $user
2159	fi
2160
2161	return 0
2162}
2163
2164#
2165# Select valid gid and create specified group.
2166#
2167# $1 group name
2168#
2169function add_group_freebsd #<group_name>
2170{
2171	typeset group=$1
2172
2173	# See if the group already exists.
2174	if pw groupshow $group >/dev/null 2>&1; then
2175		return 0
2176	fi
2177
2178	# Assign 1000 as the base gid
2179	typeset -i gid=1000
2180	while true; do
2181		pw groupadd -g $gid -n $group > /dev/null 2>&1
2182		case $? in
2183			0) return 0 ;;
2184			# The gid is not  unique
2185			65) ((gid += 1)) ;;
2186			*) return 1 ;;
2187		esac
2188		if [[ $gid == 65000 ]]; then
2189			log_fail "No user id available under 65000 for $group"
2190		fi
2191	done
2192}
2193
2194#
2195# Delete the specified group.
2196#
2197# $1 group name
2198#
2199function del_group_freebsd #<group_name>
2200{
2201	typeset group=$1
2202
2203	pw groupdel -n $group > /dev/null 2>&1
2204	case $? in
2205		# Group does not exist, or was deleted successfully.
2206		0|6|65) return 0 ;;
2207		# Name already exists as a group name
2208		9) log_must pw groupdel $group ;;
2209		*) return 1 ;;
2210	esac
2211
2212	return 0
2213}
2214
2215function add_user_illumos #<group_name> <user_name> <basedir>
2216{
2217	typeset group=$1
2218	typeset user=$2
2219	typeset basedir=$3
2220
2221	log_must useradd -g $group -d $basedir/$user -m $user
2222
2223	return 0
2224}
2225
2226function del_user_illumos #<user_name>
2227{
2228	typeset user=$1
2229
2230	if id $user > /dev/null 2>&1; then
2231		log_must_retry "currently used" 6 userdel $user
2232	fi
2233
2234	return 0
2235}
2236
2237function add_group_illumos #<group_name>
2238{
2239	typeset group=$1
2240
2241	typeset -i gid=100
2242	while true; do
2243		groupadd -g $gid $group > /dev/null 2>&1
2244		case $? in
2245			0) return 0 ;;
2246			# The gid is not  unique
2247			4) ((gid += 1)) ;;
2248			*) return 1 ;;
2249		esac
2250	done
2251}
2252
2253function del_group_illumos #<group_name>
2254{
2255	typeset group=$1
2256
2257	groupmod -n $grp $grp > /dev/null 2>&1
2258	case $? in
2259		# Group does not exist.
2260		6) return 0 ;;
2261		# Name already exists as a group name
2262		9) log_must groupdel $grp ;;
2263		*) return 1 ;;
2264	esac
2265}
2266
2267function add_user_linux #<group_name> <user_name> <basedir>
2268{
2269	typeset group=$1
2270	typeset user=$2
2271	typeset basedir=$3
2272
2273	log_must useradd -g $group -d $basedir/$user -m $user
2274
2275	# Add new users to the same group and the command line utils.
2276	# This allows them to be run out of the original users home
2277	# directory as long as it permissioned to be group readable.
2278	cmd_group=$(stat --format="%G" $(command -v zfs))
2279	log_must usermod -a -G $cmd_group $user
2280
2281	return 0
2282}
2283
2284function del_user_linux #<user_name>
2285{
2286	typeset user=$1
2287
2288	if id $user > /dev/null 2>&1; then
2289		log_must_retry "currently used" 6 userdel $user
2290	fi
2291}
2292
2293function add_group_linux #<group_name>
2294{
2295	typeset group=$1
2296
2297	# Assign 100 as the base gid, a larger value is selected for
2298	# Linux because for many distributions 1000 and under are reserved.
2299	while true; do
2300		groupadd $group > /dev/null 2>&1
2301		case $? in
2302			0) return 0 ;;
2303			*) return 1 ;;
2304		esac
2305	done
2306}
2307
2308function del_group_linux #<group_name>
2309{
2310	typeset group=$1
2311
2312	getent group $group > /dev/null 2>&1
2313	case $? in
2314		# Group does not exist.
2315		2) return 0 ;;
2316		# Name already exists as a group name
2317		0) log_must groupdel $group ;;
2318		*) return 1 ;;
2319	esac
2320
2321	return 0
2322}
2323
2324#
2325# Add specified user to specified group
2326#
2327# $1 group name
2328# $2 user name
2329# $3 base of the homedir (optional)
2330#
2331function add_user #<group_name> <user_name> <basedir>
2332{
2333	typeset group=$1
2334	typeset user=$2
2335	typeset basedir=${3:-"/var/tmp"}
2336
2337	if ((${#group} == 0 || ${#user} == 0)); then
2338		log_fail "group name or user name are not defined."
2339	fi
2340
2341	case "$UNAME" in
2342	FreeBSD)
2343		add_user_freebsd "$group" "$user" "$basedir"
2344		;;
2345	Linux)
2346		add_user_linux "$group" "$user" "$basedir"
2347		;;
2348	*)
2349		add_user_illumos "$group" "$user" "$basedir"
2350		;;
2351	esac
2352
2353	return 0
2354}
2355
2356#
2357# Delete the specified user.
2358#
2359# $1 login name
2360# $2 base of the homedir (optional)
2361#
2362function del_user #<logname> <basedir>
2363{
2364	typeset user=$1
2365	typeset basedir=${2:-"/var/tmp"}
2366
2367	if ((${#user} == 0)); then
2368		log_fail "login name is necessary."
2369	fi
2370
2371	case "$UNAME" in
2372	FreeBSD)
2373		del_user_freebsd "$user"
2374		;;
2375	Linux)
2376		del_user_linux "$user"
2377		;;
2378	*)
2379		del_user_illumos "$user"
2380		;;
2381	esac
2382
2383	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2384
2385	return 0
2386}
2387
2388#
2389# Select valid gid and create specified group.
2390#
2391# $1 group name
2392#
2393function add_group #<group_name>
2394{
2395	typeset group=$1
2396
2397	if ((${#group} == 0)); then
2398		log_fail "group name is necessary."
2399	fi
2400
2401	case "$UNAME" in
2402	FreeBSD)
2403		add_group_freebsd "$group"
2404		;;
2405	Linux)
2406		add_group_linux "$group"
2407		;;
2408	*)
2409		add_group_illumos "$group"
2410		;;
2411	esac
2412
2413	return 0
2414}
2415
2416#
2417# Delete the specified group.
2418#
2419# $1 group name
2420#
2421function del_group #<group_name>
2422{
2423	typeset group=$1
2424
2425	if ((${#group} == 0)); then
2426		log_fail "group name is necessary."
2427	fi
2428
2429	case "$UNAME" in
2430	FreeBSD)
2431		del_group_freebsd "$group"
2432		;;
2433	Linux)
2434		del_group_linux "$group"
2435		;;
2436	*)
2437		del_group_illumos "$group"
2438		;;
2439	esac
2440
2441	return 0
2442}
2443
2444#
2445# This function will return true if it's safe to destroy the pool passed
2446# as argument 1. It checks for pools based on zvols and files, and also
2447# files contained in a pool that may have a different mountpoint.
2448#
2449function safe_to_destroy_pool { # $1 the pool name
2450
2451	typeset pool=""
2452	typeset DONT_DESTROY=""
2453
2454	# We check that by deleting the $1 pool, we're not
2455	# going to pull the rug out from other pools. Do this
2456	# by looking at all other pools, ensuring that they
2457	# aren't built from files or zvols contained in this pool.
2458
2459	for pool in $(zpool list -H -o name)
2460	do
2461		ALTMOUNTPOOL=""
2462
2463		# this is a list of the top-level directories in each of the
2464		# files that make up the path to the files the pool is based on
2465		FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
2466
2467		# this is a list of the zvols that make up the pool
2468		ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
2469
2470		# also want to determine if it's a file-based pool using an
2471		# alternate mountpoint...
2472		POOL_FILE_DIRS=$(zpool status -v $pool | \
2473					awk '/\// {print $1}' | \
2474					awk -F/ '!/dev/ {print $2}')
2475
2476		for pooldir in $POOL_FILE_DIRS
2477		do
2478			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2479					awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
2480
2481			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2482		done
2483
2484
2485		if [ ! -z "$ZVOLPOOL" ]
2486		then
2487			DONT_DESTROY="true"
2488			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2489		fi
2490
2491		if [ ! -z "$FILEPOOL" ]
2492		then
2493			DONT_DESTROY="true"
2494			log_note "Pool $pool is built from $FILEPOOL on $1"
2495		fi
2496
2497		if [ ! -z "$ALTMOUNTPOOL" ]
2498		then
2499			DONT_DESTROY="true"
2500			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2501		fi
2502	done
2503
2504	if [ -z "${DONT_DESTROY}" ]
2505	then
2506		return 0
2507	else
2508		log_note "Warning: it is not safe to destroy $1!"
2509		return 1
2510	fi
2511}
2512
2513#
2514# Verify zfs operation with -p option work as expected
2515# $1 operation, value could be create, clone or rename
2516# $2 dataset type, value could be fs or vol
2517# $3 dataset name
2518# $4 new dataset name
2519#
2520function verify_opt_p_ops
2521{
2522	typeset ops=$1
2523	typeset datatype=$2
2524	typeset dataset=$3
2525	typeset newdataset=$4
2526
2527	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2528		log_fail "$datatype is not supported."
2529	fi
2530
2531	# check parameters accordingly
2532	case $ops in
2533		create)
2534			newdataset=$dataset
2535			dataset=""
2536			if [[ $datatype == "vol" ]]; then
2537				ops="create -V $VOLSIZE"
2538			fi
2539			;;
2540		clone)
2541			if [[ -z $newdataset ]]; then
2542				log_fail "newdataset should not be empty" \
2543					"when ops is $ops."
2544			fi
2545			log_must datasetexists $dataset
2546			log_must snapexists $dataset
2547			;;
2548		rename)
2549			if [[ -z $newdataset ]]; then
2550				log_fail "newdataset should not be empty" \
2551					"when ops is $ops."
2552			fi
2553			log_must datasetexists $dataset
2554			;;
2555		*)
2556			log_fail "$ops is not supported."
2557			;;
2558	esac
2559
2560	# make sure the upper level filesystem does not exist
2561	destroy_dataset "${newdataset%/*}" "-rRf"
2562
2563	# without -p option, operation will fail
2564	log_mustnot zfs $ops $dataset $newdataset
2565	log_mustnot datasetexists $newdataset ${newdataset%/*}
2566
2567	# with -p option, operation should succeed
2568	log_must zfs $ops -p $dataset $newdataset
2569	block_device_wait
2570
2571	if ! datasetexists $newdataset ; then
2572		log_fail "-p option does not work for $ops"
2573	fi
2574
2575	# when $ops is create or clone, redo the operation still return zero
2576	if [[ $ops != "rename" ]]; then
2577		log_must zfs $ops -p $dataset $newdataset
2578	fi
2579
2580	return 0
2581}
2582
2583#
2584# Get configuration of pool
2585# $1 pool name
2586# $2 config name
2587#
2588function get_config
2589{
2590	typeset pool=$1
2591	typeset config=$2
2592
2593	if ! poolexists "$pool" ; then
2594		return 1
2595	fi
2596	if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
2597		zdb -e $pool
2598	else
2599		zdb -C $pool
2600	fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
2601}
2602
2603#
2604# Privated function. Random select one of items from arguments.
2605#
2606# $1 count
2607# $2-n string
2608#
2609function _random_get
2610{
2611	typeset cnt=$1
2612	shift
2613
2614	typeset str="$@"
2615	typeset -i ind
2616	((ind = RANDOM % cnt + 1))
2617
2618	echo "$str" | cut -f $ind -d ' '
2619}
2620
2621#
2622# Random select one of item from arguments which include NONE string
2623#
2624function random_get_with_non
2625{
2626	typeset -i cnt=$#
2627	((cnt =+ 1))
2628
2629	_random_get "$cnt" "$@"
2630}
2631
2632#
2633# Random select one of item from arguments which doesn't include NONE string
2634#
2635function random_get
2636{
2637	_random_get "$#" "$@"
2638}
2639
2640#
2641# The function will generate a dataset name with specific length
2642# $1, the length of the name
2643# $2, the base string to construct the name
2644#
2645function gen_dataset_name
2646{
2647	typeset -i len=$1
2648	typeset basestr="$2"
2649	typeset -i baselen=${#basestr}
2650	typeset -i iter=0
2651	typeset l_name=""
2652
2653	if ((len % baselen == 0)); then
2654		((iter = len / baselen))
2655	else
2656		((iter = len / baselen + 1))
2657	fi
2658	while ((iter > 0)); do
2659		l_name="${l_name}$basestr"
2660
2661		((iter -= 1))
2662	done
2663
2664	echo $l_name
2665}
2666
2667#
2668# Get cksum tuple of dataset
2669# $1 dataset name
2670#
2671# sample zdb output:
2672# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2673# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2674# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2675# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2676function datasetcksum
2677{
2678	typeset cksum
2679	sync
2680	sync_all_pools
2681	zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
2682}
2683
2684#
2685# Get the given disk/slice state from the specific field of the pool
2686#
2687function get_device_state #pool disk field("", "spares","logs")
2688{
2689	typeset pool=$1
2690	typeset disk=${2#$DEV_DSKDIR/}
2691	typeset field=${3:-$pool}
2692
2693	zpool status -v "$pool" 2>/dev/null | \
2694		awk -v device=$disk -v pool=$pool -v field=$field \
2695		'BEGIN {startconfig=0; startfield=0; }
2696		/config:/ {startconfig=1}
2697		(startconfig==1) && ($1==field) {startfield=1; next;}
2698		(startfield==1) && ($1==device) {print $2; exit;}
2699		(startfield==1) &&
2700		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
2701}
2702
2703#
2704# get the root filesystem name if it's zfsroot system.
2705#
2706# return: root filesystem name
2707function get_rootfs
2708{
2709	typeset rootfs=""
2710
2711	if is_freebsd; then
2712		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
2713	elif ! is_linux; then
2714		rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
2715			/etc/mnttab)
2716	fi
2717	if [[ -z "$rootfs" ]]; then
2718		log_fail "Can not get rootfs"
2719	fi
2720	if datasetexists $rootfs; then
2721		echo $rootfs
2722	else
2723		log_fail "This is not a zfsroot system."
2724	fi
2725}
2726
2727#
2728# get the rootfs's pool name
2729# return:
2730#       rootpool name
2731#
2732function get_rootpool
2733{
2734	typeset rootfs=$(get_rootfs)
2735	echo ${rootfs%%/*}
2736}
2737
2738#
2739# To verify if the require numbers of disks is given
2740#
2741function verify_disk_count
2742{
2743	typeset -i min=${2:-1}
2744
2745	typeset -i count=$(echo "$1" | wc -w)
2746
2747	if ((count < min)); then
2748		log_untested "A minimum of $min disks is required to run." \
2749			" You specified $count disk(s)"
2750	fi
2751}
2752
2753function ds_is_volume
2754{
2755	typeset type=$(get_prop type $1)
2756	[ $type = "volume" ]
2757}
2758
2759function ds_is_filesystem
2760{
2761	typeset type=$(get_prop type $1)
2762	[ $type = "filesystem" ]
2763}
2764
2765#
2766# Check if Trusted Extensions are installed and enabled
2767#
2768function is_te_enabled
2769{
2770	svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
2771}
2772
2773# Return the number of CPUs (cross-platform)
2774function get_num_cpus
2775{
2776	if is_linux ; then
2777		grep -c '^processor' /proc/cpuinfo
2778	elif is_freebsd; then
2779		sysctl -n kern.smp.cpus
2780	else
2781		psrinfo | wc -l
2782	fi
2783}
2784
2785# Utility function to determine if a system has multiple cpus.
2786function is_mp
2787{
2788	[[ $(get_num_cpus) -gt 1 ]]
2789}
2790
2791function get_cpu_freq
2792{
2793	if is_linux; then
2794		lscpu | awk '/CPU MHz/ { print $3 }'
2795	elif is_freebsd; then
2796		sysctl -n hw.clockrate
2797	else
2798		psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2799	fi
2800}
2801
2802# Run the given command as the user provided.
2803function user_run
2804{
2805	typeset user=$1
2806	shift
2807
2808	log_note "user: $user"
2809	log_note "cmd: $*"
2810
2811	typeset out=$TEST_BASE_DIR/out
2812	typeset err=$TEST_BASE_DIR/err
2813
2814	sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
2815	typeset res=$?
2816	log_note "out: $(<$out)"
2817	log_note "err: $(<$err)"
2818	return $res
2819}
2820
2821#
2822# Check if the pool contains the specified vdevs
2823#
2824# $1 pool
2825# $2..n <vdev> ...
2826#
2827# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2828# vdevs is not in the pool, and 2 if pool name is missing.
2829#
2830function vdevs_in_pool
2831{
2832	typeset pool=$1
2833	typeset vdev
2834
2835	if [[ -z $pool ]]; then
2836		log_note "Missing pool name."
2837		return 2
2838	fi
2839
2840	shift
2841
2842	# We could use 'zpool list' to only get the vdevs of the pool but we
2843	# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
2844	# therefore we use the 'zpool status' output.
2845	typeset tmpfile=$(mktemp)
2846	zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
2847	for vdev in "$@"; do
2848		grep -wq ${vdev##*/} $tmpfile || return 1
2849	done
2850
2851	rm -f $tmpfile
2852	return 0
2853}
2854
2855function get_max
2856{
2857	typeset -l i max=$1
2858	shift
2859
2860	for i in "$@"; do
2861		max=$((max > i ? max : i))
2862	done
2863
2864	echo $max
2865}
2866
2867# Write data that can be compressed into a directory
2868function write_compressible
2869{
2870	typeset dir=$1
2871	typeset megs=$2
2872	typeset nfiles=${3:-1}
2873	typeset bs=${4:-1024k}
2874	typeset fname=${5:-file}
2875
2876	[[ -d $dir ]] || log_fail "No directory: $dir"
2877
2878	# Under Linux fio is not currently used since its behavior can
2879	# differ significantly across versions.  This includes missing
2880	# command line options and cases where the --buffer_compress_*
2881	# options fail to behave as expected.
2882	if is_linux; then
2883		typeset file_bytes=$(to_bytes $megs)
2884		typeset bs_bytes=4096
2885		typeset blocks=$(($file_bytes / $bs_bytes))
2886
2887		for (( i = 0; i < $nfiles; i++ )); do
2888			truncate -s $file_bytes $dir/$fname.$i
2889
2890			# Write every third block to get 66% compression.
2891			for (( j = 0; j < $blocks; j += 3 )); do
2892				dd if=/dev/urandom of=$dir/$fname.$i \
2893				    seek=$j bs=$bs_bytes count=1 \
2894				    conv=notrunc >/dev/null 2>&1
2895			done
2896		done
2897	else
2898		command -v fio > /dev/null || log_unsupported "fio missing"
2899		log_must eval fio \
2900		    --name=job \
2901		    --fallocate=0 \
2902		    --minimal \
2903		    --randrepeat=0 \
2904		    --buffer_compress_percentage=66 \
2905		    --buffer_compress_chunk=4096 \
2906		    --directory="$dir" \
2907		    --numjobs="$nfiles" \
2908		    --nrfiles="$nfiles" \
2909		    --rw=write \
2910		    --bs="$bs" \
2911		    --filesize="$megs" \
2912		    "--filename_format='$fname.\$jobnum' >/dev/null"
2913	fi
2914}
2915
2916function get_objnum
2917{
2918	typeset pathname=$1
2919	typeset objnum
2920
2921	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2922	if is_freebsd; then
2923		objnum=$(stat -f "%i" $pathname)
2924	else
2925		objnum=$(stat -c %i $pathname)
2926	fi
2927	echo $objnum
2928}
2929
2930#
2931# Sync data to the pool
2932#
2933# $1 pool name
2934# $2 boolean to force uberblock (and config including zpool cache file) update
2935#
2936function sync_pool #pool <force>
2937{
2938	typeset pool=${1:-$TESTPOOL}
2939	typeset force=${2:-false}
2940
2941	if [[ $force == true ]]; then
2942		log_must zpool sync -f $pool
2943	else
2944		log_must zpool sync $pool
2945	fi
2946
2947	return 0
2948}
2949
2950#
2951# Sync all pools
2952#
2953# $1 boolean to force uberblock (and config including zpool cache file) update
2954#
2955function sync_all_pools #<force>
2956{
2957	typeset force=${1:-false}
2958
2959	if [[ $force == true ]]; then
2960		log_must zpool sync -f
2961	else
2962		log_must zpool sync
2963	fi
2964
2965	return 0
2966}
2967
2968#
2969# Wait for zpool 'freeing' property drops to zero.
2970#
2971# $1 pool name
2972#
2973function wait_freeing #pool
2974{
2975	typeset pool=${1:-$TESTPOOL}
2976	while true; do
2977		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
2978		log_must sleep 1
2979	done
2980}
2981
2982#
2983# Wait for every device replace operation to complete
2984#
2985# $1 pool name
2986#
2987function wait_replacing #pool
2988{
2989	typeset pool=${1:-$TESTPOOL}
2990	while zpool status $pool | grep -qE 'replacing-[0-9]+'; do
2991		log_must sleep 1
2992	done
2993}
2994
2995# Wait for a pool to be scrubbed
2996#
2997# $1 pool name
2998# $2 timeout
2999#
3000function wait_scrubbed #pool timeout
3001{
3002       typeset timeout=${2:-300}
3003       typeset pool=${1:-$TESTPOOL}
3004       for (( timer = 0; timer < $timeout; timer++ )); do
3005               is_pool_scrubbed $pool && break;
3006               sleep 1;
3007       done
3008}
3009
3010# Backup the zed.rc in our test directory so that we can edit it for our test.
3011#
3012# Returns: Backup file name.  You will need to pass this to zed_rc_restore().
3013function zed_rc_backup
3014{
3015	zedrc_backup="$(mktemp)"
3016	cp $ZEDLET_DIR/zed.rc $zedrc_backup
3017	echo $zedrc_backup
3018}
3019
3020function zed_rc_restore
3021{
3022	mv $1 $ZEDLET_DIR/zed.rc
3023}
3024
3025#
3026# Setup custom environment for the ZED.
3027#
3028# $@ Optional list of zedlets to run under zed.
3029function zed_setup
3030{
3031	if ! is_linux; then
3032		log_unsupported "No zed on $UNAME"
3033	fi
3034
3035	if [[ ! -d $ZEDLET_DIR ]]; then
3036		log_must mkdir $ZEDLET_DIR
3037	fi
3038
3039	if [[ ! -e $VDEVID_CONF ]]; then
3040		log_must touch $VDEVID_CONF
3041	fi
3042
3043	if [[ -e $VDEVID_CONF_ETC ]]; then
3044		log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3045	fi
3046	EXTRA_ZEDLETS=$@
3047
3048	# Create a symlink for /etc/zfs/vdev_id.conf file.
3049	log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3050
3051	# Setup minimal ZED configuration.  Individual test cases should
3052	# add additional ZEDLETs as needed for their specific test.
3053	log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3054	log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3055
3056	# Scripts must only be user writable.
3057	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3058		saved_umask=$(umask)
3059		log_must umask 0022
3060		for i in $EXTRA_ZEDLETS ; do
3061			log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3062		done
3063		log_must umask $saved_umask
3064	fi
3065
3066	# Customize the zed.rc file to enable the full debug log.
3067	log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3068	echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3069
3070}
3071
3072#
3073# Cleanup custom ZED environment.
3074#
3075# $@ Optional list of zedlets to remove from our test zed.d directory.
3076function zed_cleanup
3077{
3078	if ! is_linux; then
3079		return
3080	fi
3081
3082	for extra_zedlet; do
3083		log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
3084	done
3085	log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
3086	                $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
3087	                $ZEDLET_DIR
3088}
3089
3090#
3091# Check if ZED is currently running; if so, returns PIDs
3092#
3093function zed_check
3094{
3095	if ! is_linux; then
3096		return
3097	fi
3098	zedpids="$(pgrep -x zed)"
3099	zedpids2="$(pgrep -x lt-zed)"
3100	echo ${zedpids} ${zedpids2}
3101}
3102
3103#
3104# Check if ZED is currently running, if not start ZED.
3105#
3106function zed_start
3107{
3108	if ! is_linux; then
3109		return
3110	fi
3111
3112	# ZEDLET_DIR=/var/tmp/zed
3113	if [[ ! -d $ZEDLET_DIR ]]; then
3114		log_must mkdir $ZEDLET_DIR
3115	fi
3116
3117	# Verify the ZED is not already running.
3118	zedpids=$(zed_check)
3119	if [ -n "$zedpids" ]; then
3120		# We never, ever, really want it to just keep going if zed
3121		# is already running - usually this implies our test cases
3122		# will break very strangely because whatever we wanted to
3123		# configure zed for won't be listening to our changes in the
3124		# tmpdir
3125		log_fail "ZED already running - ${zedpids}"
3126	else
3127		log_note "Starting ZED"
3128		# run ZED in the background and redirect foreground logging
3129		# output to $ZED_LOG.
3130		log_must truncate -s 0 $ZED_DEBUG_LOG
3131		log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3132		    "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3133	fi
3134
3135	return 0
3136}
3137
3138#
3139# Kill ZED process
3140#
3141function zed_stop
3142{
3143	if ! is_linux; then
3144		return ""
3145	fi
3146
3147	log_note "Stopping ZED"
3148	while true; do
3149		zedpids=$(zed_check)
3150		[ ! -n "$zedpids" ] && break
3151
3152		log_must kill $zedpids
3153		sleep 1
3154	done
3155	return 0
3156}
3157
3158#
3159# Drain all zevents
3160#
3161function zed_events_drain
3162{
3163	while [ $(zpool events -H | wc -l) -ne 0 ]; do
3164		sleep 1
3165		zpool events -c >/dev/null
3166	done
3167}
3168
3169# Set a variable in zed.rc to something, un-commenting it in the process.
3170#
3171# $1 variable
3172# $2 value
3173function zed_rc_set
3174{
3175	var="$1"
3176	val="$2"
3177	# Remove the line
3178	cmd="'/$var/d'"
3179	eval sed -i $cmd $ZEDLET_DIR/zed.rc
3180
3181	# Add it at the end
3182	echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3183}
3184
3185
3186#
3187# Check is provided device is being active used as a swap device.
3188#
3189function is_swap_inuse
3190{
3191	typeset device=$1
3192
3193	if [[ -z $device ]] ; then
3194		log_note "No device specified."
3195		return 1
3196	fi
3197
3198	case "$UNAME" in
3199	Linux)
3200		swapon -s | grep -wq $(readlink -f $device)
3201		;;
3202	FreeBSD)
3203		swapctl -l | grep -wq $device
3204		;;
3205	*)
3206		swap -l | grep -wq $device
3207		;;
3208	esac
3209}
3210
3211#
3212# Setup a swap device using the provided device.
3213#
3214function swap_setup
3215{
3216	typeset swapdev=$1
3217
3218	case "$UNAME" in
3219	Linux)
3220		log_must eval "mkswap $swapdev > /dev/null 2>&1"
3221		log_must swapon $swapdev
3222		;;
3223	FreeBSD)
3224		log_must swapctl -a $swapdev
3225		;;
3226	*)
3227    log_must swap -a $swapdev
3228		;;
3229	esac
3230
3231	return 0
3232}
3233
3234#
3235# Cleanup a swap device on the provided device.
3236#
3237function swap_cleanup
3238{
3239	typeset swapdev=$1
3240
3241	if is_swap_inuse $swapdev; then
3242		if is_linux; then
3243			log_must swapoff $swapdev
3244		elif is_freebsd; then
3245			log_must swapoff $swapdev
3246		else
3247			log_must swap -d $swapdev
3248		fi
3249	fi
3250
3251	return 0
3252}
3253
3254#
3255# Set a global system tunable (64-bit value)
3256#
3257# $1 tunable name (use a NAME defined in tunables.cfg)
3258# $2 tunable values
3259#
3260function set_tunable64
3261{
3262	set_tunable_impl "$1" "$2" Z
3263}
3264
3265#
3266# Set a global system tunable (32-bit value)
3267#
3268# $1 tunable name (use a NAME defined in tunables.cfg)
3269# $2 tunable values
3270#
3271function set_tunable32
3272{
3273	set_tunable_impl "$1" "$2" W
3274}
3275
3276function set_tunable_impl
3277{
3278	typeset name="$1"
3279	typeset value="$2"
3280	typeset mdb_cmd="$3"
3281
3282	eval "typeset tunable=\$$name"
3283	case "$tunable" in
3284	UNSUPPORTED)
3285		log_unsupported "Tunable '$name' is unsupported on $UNAME"
3286		;;
3287	"")
3288		log_fail "Tunable '$name' must be added to tunables.cfg"
3289		;;
3290	*)
3291		;;
3292	esac
3293
3294	[[ -z "$value" ]] && return 1
3295	[[ -z "$mdb_cmd" ]] && return 1
3296
3297	case "$UNAME" in
3298	Linux)
3299		typeset zfs_tunables="/sys/module/zfs/parameters"
3300		echo "$value" >"$zfs_tunables/$tunable"
3301		;;
3302	FreeBSD)
3303		sysctl vfs.zfs.$tunable=$value
3304		;;
3305	SunOS)
3306		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3307		;;
3308	esac
3309}
3310
3311#
3312# Get a global system tunable
3313#
3314# $1 tunable name (use a NAME defined in tunables.cfg)
3315#
3316function get_tunable
3317{
3318	get_tunable_impl "$1"
3319}
3320
3321function get_tunable_impl
3322{
3323	typeset name="$1"
3324	typeset module="${2:-zfs}"
3325	typeset check_only="$3"
3326
3327	eval "typeset tunable=\$$name"
3328	case "$tunable" in
3329	UNSUPPORTED)
3330		if [ -z "$check_only" ] ; then
3331			log_unsupported "Tunable '$name' is unsupported on $UNAME"
3332		else
3333			return 1
3334		fi
3335		;;
3336	"")
3337		if [ -z "$check_only" ] ; then
3338			log_fail "Tunable '$name' must be added to tunables.cfg"
3339		else
3340			return 1
3341		fi
3342		;;
3343	*)
3344		;;
3345	esac
3346
3347	case "$UNAME" in
3348	Linux)
3349		typeset zfs_tunables="/sys/module/$module/parameters"
3350		cat $zfs_tunables/$tunable
3351		;;
3352	FreeBSD)
3353		sysctl -n vfs.zfs.$tunable
3354		;;
3355	SunOS)
3356		[[ "$module" -eq "zfs" ]] || return 1
3357		;;
3358	esac
3359}
3360
3361# Does a tunable exist?
3362#
3363# $1: Tunable name
3364function tunable_exists
3365{
3366	get_tunable_impl $1 "zfs" 1
3367}
3368
3369#
3370# Compute MD5 digest for given file or stdin if no file given.
3371# Note: file path must not contain spaces
3372#
3373function md5digest
3374{
3375	typeset file=$1
3376
3377	case "$UNAME" in
3378	FreeBSD)
3379		md5 -q $file
3380		;;
3381	*)
3382		typeset sum _
3383		read -r sum _ < <(md5sum -b $file)
3384		echo $sum
3385		;;
3386	esac
3387}
3388
3389#
3390# Compute SHA256 digest for given file or stdin if no file given.
3391# Note: file path must not contain spaces
3392#
3393function sha256digest
3394{
3395	typeset file=$1
3396
3397	case "$UNAME" in
3398	FreeBSD)
3399		sha256 -q $file
3400		;;
3401	*)
3402		typeset sum _
3403		read -r sum _ < <(sha256sum -b $file)
3404		echo $sum
3405		;;
3406	esac
3407}
3408
3409function new_fs #<args>
3410{
3411	case "$UNAME" in
3412	FreeBSD)
3413		newfs "$@"
3414		;;
3415	*)
3416		echo y | newfs -v "$@"
3417		;;
3418	esac
3419}
3420
3421function stat_size #<path>
3422{
3423	typeset path=$1
3424
3425	case "$UNAME" in
3426	FreeBSD)
3427		stat -f %z "$path"
3428		;;
3429	*)
3430		stat -c %s "$path"
3431		;;
3432	esac
3433}
3434
3435function stat_mtime #<path>
3436{
3437	typeset path=$1
3438
3439	case "$UNAME" in
3440	FreeBSD)
3441		stat -f %m "$path"
3442		;;
3443	*)
3444		stat -c %Y "$path"
3445		;;
3446	esac
3447}
3448
3449function stat_ctime #<path>
3450{
3451	typeset path=$1
3452
3453	case "$UNAME" in
3454	FreeBSD)
3455		stat -f %c "$path"
3456		;;
3457	*)
3458		stat -c %Z "$path"
3459		;;
3460	esac
3461}
3462
3463function stat_crtime #<path>
3464{
3465	typeset path=$1
3466
3467	case "$UNAME" in
3468	FreeBSD)
3469		stat -f %B "$path"
3470		;;
3471	*)
3472		stat -c %W "$path"
3473		;;
3474	esac
3475}
3476
3477function stat_generation #<path>
3478{
3479	typeset path=$1
3480
3481	case "$UNAME" in
3482	Linux)
3483		getversion "${path}"
3484		;;
3485	*)
3486		stat -f %v "${path}"
3487		;;
3488	esac
3489}
3490
3491# Run a command as if it was being run in a TTY.
3492#
3493# Usage:
3494#
3495#    faketty command
3496#
3497function faketty
3498{
3499    if is_freebsd; then
3500        script -q /dev/null env "$@"
3501    else
3502        script --return --quiet -c "$*" /dev/null
3503    fi
3504}
3505
3506#
3507# Produce a random permutation of the integers in a given range (inclusive).
3508#
3509function range_shuffle # begin end
3510{
3511	typeset -i begin=$1
3512	typeset -i end=$2
3513
3514	seq ${begin} ${end} | sort -R
3515}
3516
3517#
3518# Cross-platform xattr helpers
3519#
3520
3521function get_xattr # name path
3522{
3523	typeset name=$1
3524	typeset path=$2
3525
3526	case "$UNAME" in
3527	FreeBSD)
3528		getextattr -qq user "${name}" "${path}"
3529		;;
3530	*)
3531		attr -qg "${name}" "${path}"
3532		;;
3533	esac
3534}
3535
3536function set_xattr # name value path
3537{
3538	typeset name=$1
3539	typeset value=$2
3540	typeset path=$3
3541
3542	case "$UNAME" in
3543	FreeBSD)
3544		setextattr user "${name}" "${value}" "${path}"
3545		;;
3546	*)
3547		attr -qs "${name}" -V "${value}" "${path}"
3548		;;
3549	esac
3550}
3551
3552function set_xattr_stdin # name value
3553{
3554	typeset name=$1
3555	typeset path=$2
3556
3557	case "$UNAME" in
3558	FreeBSD)
3559		setextattr -i user "${name}" "${path}"
3560		;;
3561	*)
3562		attr -qs "${name}" "${path}"
3563		;;
3564	esac
3565}
3566
3567function rm_xattr # name path
3568{
3569	typeset name=$1
3570	typeset path=$2
3571
3572	case "$UNAME" in
3573	FreeBSD)
3574		rmextattr -q user "${name}" "${path}"
3575		;;
3576	*)
3577		attr -qr "${name}" "${path}"
3578		;;
3579	esac
3580}
3581
3582function ls_xattr # path
3583{
3584	typeset path=$1
3585
3586	case "$UNAME" in
3587	FreeBSD)
3588		lsextattr -qq user "${path}"
3589		;;
3590	*)
3591		attr -ql "${path}"
3592		;;
3593	esac
3594}
3595
3596function kstat # stat flags?
3597{
3598	typeset stat=$1
3599	typeset flags=${2-"-n"}
3600
3601	case "$UNAME" in
3602	FreeBSD)
3603		sysctl $flags kstat.zfs.misc.$stat
3604		;;
3605	Linux)
3606		cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null
3607		;;
3608	*)
3609		false
3610		;;
3611	esac
3612}
3613
3614function get_arcstat # stat
3615{
3616	typeset stat=$1
3617
3618	case "$UNAME" in
3619	FreeBSD)
3620		kstat arcstats.$stat
3621		;;
3622	Linux)
3623		kstat arcstats | awk "/$stat/"' { print $3 }'
3624		;;
3625	*)
3626		false
3627		;;
3628	esac
3629}
3630
3631function punch_hole # offset length file
3632{
3633	typeset offset=$1
3634	typeset length=$2
3635	typeset file=$3
3636
3637	case "$UNAME" in
3638	FreeBSD)
3639		truncate -d -o $offset -l $length "$file"
3640		;;
3641	Linux)
3642		fallocate --punch-hole --offset $offset --length $length "$file"
3643		;;
3644	*)
3645		false
3646		;;
3647	esac
3648}
3649
3650function zero_range # offset length file
3651{
3652	typeset offset=$1
3653	typeset length=$2
3654	typeset file=$3
3655
3656	case "$UNAME" in
3657	Linux)
3658		fallocate --zero-range --offset $offset --length $length "$file"
3659		;;
3660	*)
3661		false
3662		;;
3663	esac
3664}
3665
3666#
3667# Wait for the specified arcstat to reach non-zero quiescence.
3668# If echo is 1 echo the value after reaching quiescence, otherwise
3669# if echo is 0 print the arcstat we are waiting on.
3670#
3671function arcstat_quiescence # stat echo
3672{
3673	typeset stat=$1
3674	typeset echo=$2
3675	typeset do_once=true
3676
3677	if [[ $echo -eq 0 ]]; then
3678		echo "Waiting for arcstat $1 quiescence."
3679	fi
3680
3681	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
3682		typeset stat1=$(get_arcstat $stat)
3683		sleep 2
3684		typeset stat2=$(get_arcstat $stat)
3685		do_once=false
3686	done
3687
3688	if [[ $echo -eq 1 ]]; then
3689		echo $stat2
3690	fi
3691}
3692
3693function arcstat_quiescence_noecho # stat
3694{
3695	typeset stat=$1
3696	arcstat_quiescence $stat 0
3697}
3698
3699function arcstat_quiescence_echo # stat
3700{
3701	typeset stat=$1
3702	arcstat_quiescence $stat 1
3703}
3704
3705#
3706# Given an array of pids, wait until all processes
3707# have completed and check their return status.
3708#
3709function wait_for_children #children
3710{
3711	rv=0
3712	children=("$@")
3713	for child in "${children[@]}"
3714	do
3715		child_exit=0
3716		wait ${child} || child_exit=$?
3717		if [ $child_exit -ne 0 ]; then
3718			echo "child ${child} failed with ${child_exit}"
3719			rv=1
3720		fi
3721	done
3722	return $rv
3723}
3724
3725#
3726# Compare two directory trees recursively in a manner similar to diff(1), but
3727# using rsync. If there are any discrepancies, a summary of the differences are
3728# output and a non-zero error is returned.
3729#
3730# If you're comparing a directory after a ZIL replay, you should set
3731# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
3732# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
3733# information).
3734#
3735function directory_diff # dir_a dir_b
3736{
3737	dir_a="$1"
3738	dir_b="$2"
3739	zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
3740
3741	# If one of the directories doesn't exist, return 2. This is to match the
3742	# semantics of diff.
3743	if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
3744		return 2
3745	fi
3746
3747	# Run rsync with --dry-run --itemize-changes to get something akin to diff
3748	# output, but rsync is far more thorough in detecting differences (diff
3749	# doesn't compare file metadata, and cannot handle special files).
3750	#
3751	# Also make sure to filter out non-user.* xattrs when comparing. On
3752	# SELinux-enabled systems the copied tree will probably have different
3753	# SELinux labels.
3754	args=("-nicaAHX" '--filter=-x! user.*' "--delete")
3755
3756	# NOTE: Quite a few rsync builds do not support --crtimes which would be
3757	# necessary to verify that creation times are being maintained properly.
3758	# Unfortunately because of this we cannot use it unconditionally but we can
3759	# check if this rsync build supports it and use it then. This check is
3760	# based on the same check in the rsync test suite (testsuite/crtimes.test).
3761	#
3762	# We check ctimes even with zil_replay=1 because the ZIL does store
3763	# creation times and we should make sure they match (if the creation times
3764	# do not match there is a "c" entry in one of the columns).
3765	if rsync --version | grep -q "[, ] crtimes"; then
3766		args+=("--crtimes")
3767	else
3768		log_note "This rsync package does not support --crtimes (-N)."
3769	fi
3770
3771	# If we are testing a ZIL replay, we need to ignore timestamp changes.
3772	# Unfortunately --no-times doesn't do what we want -- it will still tell
3773	# you if the timestamps don't match but rsync will set the timestamps to
3774	# the current time (leading to an itemised change entry). It's simpler to
3775	# just filter out those lines.
3776	if [ "$zil_replay" -eq 0 ]; then
3777		filter=("cat")
3778	else
3779		# Different rsync versions have different numbers of columns. So just
3780		# require that aside from the first two, all other columns must be
3781		# blank (literal ".") or a timestamp field ("[tT]").
3782		filter=("grep" "-v" '^\..[.Tt]\+ ')
3783	fi
3784
3785	diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
3786	rv=0
3787	if [ -n "$diff" ]; then
3788		echo "$diff"
3789		rv=1
3790	fi
3791	return $rv
3792}
3793
3794#
3795# Compare two directory trees recursively, without checking whether the mtimes
3796# match (creation times will be checked if the available rsync binary supports
3797# it). This is necessary for ZIL replay checks (because the ZIL does not
3798# contain mtimes and thus after a ZIL replay, mtimes won't match).
3799#
3800# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
3801#
3802function replay_directory_diff # dir_a dir_b
3803{
3804	LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
3805}
3806
3807#
3808# Put coredumps into $1/core.{basename}
3809#
3810# Output must be saved and passed to pop_coredump_pattern on cleanup
3811#
3812function push_coredump_pattern # dir
3813{
3814	ulimit -c unlimited
3815	case "$UNAME" in
3816	Linux)
3817		cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
3818		echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
3819		    echo 0 >/proc/sys/kernel/core_uses_pid
3820		;;
3821	FreeBSD)
3822		sysctl -n kern.corefile
3823		sysctl kern.corefile="$1/core.%N" >/dev/null
3824		;;
3825	*)
3826		# Nothing to output – set only for this shell
3827		coreadm -p "$1/core.%f"
3828		;;
3829	esac
3830}
3831
3832#
3833# Put coredumps back into the default location
3834#
3835function pop_coredump_pattern
3836{
3837	[ -s "$1" ] || return 0
3838	case "$UNAME" in
3839	Linux)
3840		typeset pat pid
3841		{ read -r pat; read -r pid; } < "$1"
3842		echo "$pat" >/proc/sys/kernel/core_pattern &&
3843		    echo "$pid" >/proc/sys/kernel/core_uses_pid
3844		;;
3845	FreeBSD)
3846		sysctl kern.corefile="$(<"$1")" >/dev/null
3847		;;
3848	esac
3849}
3850