xref: /freebsd/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib (revision b1c5f60ce87cc2f179dfb81de507d9b7bf59564c)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24# Copyright (c) 2012, 2020, Delphix. All rights reserved.
25# Copyright (c) 2017, Tim Chase. All rights reserved.
26# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27# Copyright (c) 2017, Lawrence Livermore National Security LLC.
28# Copyright (c) 2017, Datto Inc. All rights reserved.
29# Copyright (c) 2017, Open-E Inc. All rights reserved.
30# Copyright (c) 2021, The FreeBSD Foundation.
31# Use is subject to license terms.
32#
33
34. ${STF_TOOLS}/include/logapi.shlib
35. ${STF_SUITE}/include/math.shlib
36. ${STF_SUITE}/include/blkdev.shlib
37
38. ${STF_SUITE}/include/tunables.cfg
39
40#
41# Apply constrained path when available.  This is required since the
42# PATH may have been modified by sudo's secure_path behavior.
43#
44if [ -n "$STF_PATH" ]; then
45	export PATH="$STF_PATH"
46fi
47
48#
49# Generic dot version comparison function
50#
51# Returns success when version $1 is greater than or equal to $2.
52#
53function compare_version_gte
54{
55	if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
56		return 0
57	else
58		return 1
59	fi
60}
61
62# Linux kernel version comparison function
63#
64# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
65#
66# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
67#
68function linux_version
69{
70	typeset ver="$1"
71
72	[[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
73
74	typeset version=$(echo $ver | cut -d '.' -f 1)
75	typeset major=$(echo $ver | cut -d '.' -f 2)
76	typeset minor=$(echo $ver | cut -d '.' -f 3)
77
78	[[ -z "$version" ]] && version=0
79	[[ -z "$major" ]] && major=0
80	[[ -z "$minor" ]] && minor=0
81
82	echo $((version * 10000 + major * 100 + minor))
83}
84
85# Determine if this is a Linux test system
86#
87# Return 0 if platform Linux, 1 if otherwise
88
89function is_linux
90{
91	if [[ $(uname -o) == "GNU/Linux" ]]; then
92		return 0
93	else
94		return 1
95	fi
96}
97
98# Determine if this is an illumos test system
99#
100# Return 0 if platform illumos, 1 if otherwise
101function is_illumos
102{
103	if [[ $(uname -o) == "illumos" ]]; then
104		return 0
105	else
106		return 1
107	fi
108}
109
110# Determine if this is a FreeBSD test system
111#
112# Return 0 if platform FreeBSD, 1 if otherwise
113
114function is_freebsd
115{
116	if [[ $(uname -o) == "FreeBSD" ]]; then
117		return 0
118	else
119		return 1
120	fi
121}
122
123# Determine if this is a DilOS test system
124#
125# Return 0 if platform DilOS, 1 if otherwise
126
127function is_dilos
128{
129	typeset ID=""
130	[[ -f /etc/os-release ]] && . /etc/os-release
131	if [[ $ID == "dilos" ]]; then
132		return 0
133	else
134		return 1
135	fi
136}
137
138# Determine if this is a 32-bit system
139#
140# Return 0 if platform is 32-bit, 1 if otherwise
141
142function is_32bit
143{
144	if [[ $(getconf LONG_BIT) == "32" ]]; then
145		return 0
146	else
147		return 1
148	fi
149}
150
151# Determine if kmemleak is enabled
152#
153# Return 0 if kmemleak is enabled, 1 if otherwise
154
155function is_kmemleak
156{
157	if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
158		return 0
159	else
160		return 1
161	fi
162}
163
164# Determine whether a dataset is mounted
165#
166# $1 dataset name
167# $2 filesystem type; optional - defaulted to zfs
168#
169# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
170
171function ismounted
172{
173	typeset fstype=$2
174	[[ -z $fstype ]] && fstype=zfs
175	typeset out dir name ret
176
177	case $fstype in
178		zfs)
179			if [[ "$1" == "/"* ]] ; then
180				for out in $(zfs mount | awk '{print $2}'); do
181					[[ $1 == $out ]] && return 0
182				done
183			else
184				for out in $(zfs mount | awk '{print $1}'); do
185					[[ $1 == $out ]] && return 0
186				done
187			fi
188		;;
189		ufs|nfs)
190			if is_freebsd; then
191				mount -pt $fstype | while read dev dir _t _flags; do
192					[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
193				done
194			else
195				out=$(df -F $fstype $1 2>/dev/null)
196				ret=$?
197				(($ret != 0)) && return $ret
198
199				dir=${out%%\(*}
200				dir=${dir%% *}
201				name=${out##*\(}
202				name=${name%%\)*}
203				name=${name%% *}
204
205				[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
206			fi
207		;;
208		ext*)
209			out=$(df -t $fstype $1 2>/dev/null)
210			return $?
211		;;
212		zvol)
213			if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
214				link=$(readlink -f $ZVOL_DEVDIR/$1)
215				[[ -n "$link" ]] && \
216					mount | grep -q "^$link" && \
217						return 0
218			fi
219		;;
220	esac
221
222	return 1
223}
224
225# Return 0 if a dataset is mounted; 1 otherwise
226#
227# $1 dataset name
228# $2 filesystem type; optional - defaulted to zfs
229
230function mounted
231{
232	ismounted $1 $2
233	(($? == 0)) && return 0
234	return 1
235}
236
237# Return 0 if a dataset is unmounted; 1 otherwise
238#
239# $1 dataset name
240# $2 filesystem type; optional - defaulted to zfs
241
242function unmounted
243{
244	ismounted $1 $2
245	(($? == 1)) && return 0
246	return 1
247}
248
249# split line on ","
250#
251# $1 - line to split
252
253function splitline
254{
255	echo $1 | tr ',' ' '
256}
257
258function default_setup
259{
260	default_setup_noexit "$@"
261
262	log_pass
263}
264
265function default_setup_no_mountpoint
266{
267	default_setup_noexit "$1" "$2" "$3" "yes"
268
269	log_pass
270}
271
272#
273# Given a list of disks, setup storage pools and datasets.
274#
275function default_setup_noexit
276{
277	typeset disklist=$1
278	typeset container=$2
279	typeset volume=$3
280	typeset no_mountpoint=$4
281	log_note begin default_setup_noexit
282
283	if is_global_zone; then
284		if poolexists $TESTPOOL ; then
285			destroy_pool $TESTPOOL
286		fi
287		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
288		log_must zpool create -f $TESTPOOL $disklist
289	else
290		reexport_pool
291	fi
292
293	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
294	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
295
296	log_must zfs create $TESTPOOL/$TESTFS
297	if [[ -z $no_mountpoint ]]; then
298		log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
299	fi
300
301	if [[ -n $container ]]; then
302		rm -rf $TESTDIR1  || \
303			log_unresolved Could not remove $TESTDIR1
304		mkdir -p $TESTDIR1 || \
305			log_unresolved Could not create $TESTDIR1
306
307		log_must zfs create $TESTPOOL/$TESTCTR
308		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
309		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
310		if [[ -z $no_mountpoint ]]; then
311			log_must zfs set mountpoint=$TESTDIR1 \
312			    $TESTPOOL/$TESTCTR/$TESTFS1
313		fi
314	fi
315
316	if [[ -n $volume ]]; then
317		if is_global_zone ; then
318			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
319			block_device_wait
320		else
321			log_must zfs create $TESTPOOL/$TESTVOL
322		fi
323	fi
324}
325
326#
327# Given a list of disks, setup a storage pool, file system and
328# a container.
329#
330function default_container_setup
331{
332	typeset disklist=$1
333
334	default_setup "$disklist" "true"
335}
336
337#
338# Given a list of disks, setup a storage pool,file system
339# and a volume.
340#
341function default_volume_setup
342{
343	typeset disklist=$1
344
345	default_setup "$disklist" "" "true"
346}
347
348#
349# Given a list of disks, setup a storage pool,file system,
350# a container and a volume.
351#
352function default_container_volume_setup
353{
354	typeset disklist=$1
355
356	default_setup "$disklist" "true" "true"
357}
358
359#
360# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
361# filesystem
362#
363# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
364# $2 snapshot name. Default, $TESTSNAP
365#
366function create_snapshot
367{
368	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
369	typeset snap=${2:-$TESTSNAP}
370
371	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
372	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
373
374	if snapexists $fs_vol@$snap; then
375		log_fail "$fs_vol@$snap already exists."
376	fi
377	datasetexists $fs_vol || \
378		log_fail "$fs_vol must exist."
379
380	log_must zfs snapshot $fs_vol@$snap
381}
382
383#
384# Create a clone from a snapshot, default clone name is $TESTCLONE.
385#
386# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
387# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
388#
389function create_clone   # snapshot clone
390{
391	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
392	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
393
394	[[ -z $snap ]] && \
395		log_fail "Snapshot name is undefined."
396	[[ -z $clone ]] && \
397		log_fail "Clone name is undefined."
398
399	log_must zfs clone $snap $clone
400}
401
402#
403# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
404# filesystem.
405#
406# $1 Existing filesystem or volume name. Default, $TESTFS
407# $2 Existing snapshot name. Default, $TESTSNAP
408# $3 bookmark name. Default, $TESTBKMARK
409#
410function create_bookmark
411{
412	typeset fs_vol=${1:-$TESTFS}
413	typeset snap=${2:-$TESTSNAP}
414	typeset bkmark=${3:-$TESTBKMARK}
415
416	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
417	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
418	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
419
420	if bkmarkexists $fs_vol#$bkmark; then
421		log_fail "$fs_vol#$bkmark already exists."
422	fi
423	datasetexists $fs_vol || \
424		log_fail "$fs_vol must exist."
425	snapexists $fs_vol@$snap || \
426		log_fail "$fs_vol@$snap must exist."
427
428	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
429}
430
431#
432# Create a temporary clone result of an interrupted resumable 'zfs receive'
433# $1 Destination filesystem name. Must not exist, will be created as the result
434#    of this function along with its %recv temporary clone
435# $2 Source filesystem name. Must not exist, will be created and destroyed
436#
437function create_recv_clone
438{
439	typeset recvfs="$1"
440	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
441	typeset snap="$sendfs@snap1"
442	typeset incr="$sendfs@snap2"
443	typeset mountpoint="$TESTDIR/create_recv_clone"
444	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
445
446	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
447
448	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
449	datasetexists $sendfs && log_fail "Send filesystem must not exist."
450
451	log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
452	log_must zfs snapshot $snap
453	log_must eval "zfs send $snap | zfs recv -u $recvfs"
454	log_must mkfile 1m "$mountpoint/data"
455	log_must zfs snapshot $incr
456	log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
457	    iflag=fullblock > $sendfile"
458	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
459	destroy_dataset "$sendfs" "-r"
460	log_must rm -f "$sendfile"
461
462	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
463		log_fail "Error creating temporary $recvfs/%recv clone"
464	fi
465}
466
467function default_mirror_setup
468{
469	default_mirror_setup_noexit $1 $2 $3
470
471	log_pass
472}
473
474#
475# Given a pair of disks, set up a storage pool and dataset for the mirror
476# @parameters: $1 the primary side of the mirror
477#   $2 the secondary side of the mirror
478# @uses: ZPOOL ZFS TESTPOOL TESTFS
479function default_mirror_setup_noexit
480{
481	readonly func="default_mirror_setup_noexit"
482	typeset primary=$1
483	typeset secondary=$2
484
485	[[ -z $primary ]] && \
486		log_fail "$func: No parameters passed"
487	[[ -z $secondary ]] && \
488		log_fail "$func: No secondary partition passed"
489	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
490	log_must zpool create -f $TESTPOOL mirror $@
491	log_must zfs create $TESTPOOL/$TESTFS
492	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
493}
494
495#
496# create a number of mirrors.
497# We create a number($1) of 2 way mirrors using the pairs of disks named
498# on the command line. These mirrors are *not* mounted
499# @parameters: $1 the number of mirrors to create
500#  $... the devices to use to create the mirrors on
501# @uses: ZPOOL ZFS TESTPOOL
502function setup_mirrors
503{
504	typeset -i nmirrors=$1
505
506	shift
507	while ((nmirrors > 0)); do
508		log_must test -n "$1" -a -n "$2"
509		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
510		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
511		shift 2
512		((nmirrors = nmirrors - 1))
513	done
514}
515
516#
517# create a number of raidz pools.
518# We create a number($1) of 2 raidz pools  using the pairs of disks named
519# on the command line. These pools are *not* mounted
520# @parameters: $1 the number of pools to create
521#  $... the devices to use to create the pools on
522# @uses: ZPOOL ZFS TESTPOOL
523function setup_raidzs
524{
525	typeset -i nraidzs=$1
526
527	shift
528	while ((nraidzs > 0)); do
529		log_must test -n "$1" -a -n "$2"
530		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
531		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
532		shift 2
533		((nraidzs = nraidzs - 1))
534	done
535}
536
537#
538# Destroy the configured testpool mirrors.
539# the mirrors are of the form ${TESTPOOL}{number}
540# @uses: ZPOOL ZFS TESTPOOL
541function destroy_mirrors
542{
543	default_cleanup_noexit
544
545	log_pass
546}
547
548function default_raidz_setup
549{
550	default_raidz_setup_noexit "$*"
551
552	log_pass
553}
554
555#
556# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
557# $1 the list of disks
558#
559function default_raidz_setup_noexit
560{
561	typeset disklist="$*"
562	disks=(${disklist[*]})
563
564	if [[ ${#disks[*]} -lt 2 ]]; then
565		log_fail "A raid-z requires a minimum of two disks."
566	fi
567
568	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
569	log_must zpool create -f $TESTPOOL raidz $disklist
570	log_must zfs create $TESTPOOL/$TESTFS
571	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
572}
573
574#
575# Common function used to cleanup storage pools and datasets.
576#
577# Invoked at the start of the test suite to ensure the system
578# is in a known state, and also at the end of each set of
579# sub-tests to ensure errors from one set of tests doesn't
580# impact the execution of the next set.
581
582function default_cleanup
583{
584	default_cleanup_noexit
585
586	log_pass
587}
588
589#
590# Utility function used to list all available pool names.
591#
592# NOTE: $KEEP is a variable containing pool names, separated by a newline
593# character, that must be excluded from the returned list.
594#
595function get_all_pools
596{
597	zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
598}
599
600function default_cleanup_noexit
601{
602	typeset pool=""
603	#
604	# Destroying the pool will also destroy any
605	# filesystems it contains.
606	#
607	if is_global_zone; then
608		zfs unmount -a > /dev/null 2>&1
609		ALL_POOLS=$(get_all_pools)
610		# Here, we loop through the pools we're allowed to
611		# destroy, only destroying them if it's safe to do
612		# so.
613		while [ ! -z ${ALL_POOLS} ]
614		do
615			for pool in ${ALL_POOLS}
616			do
617				if safe_to_destroy_pool $pool ;
618				then
619					destroy_pool $pool
620				fi
621			done
622			ALL_POOLS=$(get_all_pools)
623		done
624
625		zfs mount -a
626	else
627		typeset fs=""
628		for fs in $(zfs list -H -o name \
629		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
630			destroy_dataset "$fs" "-Rf"
631		done
632
633		# Need cleanup here to avoid garbage dir left.
634		for fs in $(zfs list -H -o name); do
635			[[ $fs == /$ZONE_POOL ]] && continue
636			[[ -d $fs ]] && log_must rm -rf $fs/*
637		done
638
639		#
640		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
641		# the default value
642		#
643		for fs in $(zfs list -H -o name); do
644			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
645				log_must zfs set reservation=none $fs
646				log_must zfs set recordsize=128K $fs
647				log_must zfs set mountpoint=/$fs $fs
648				typeset enc=""
649				enc=$(get_prop encryption $fs)
650				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
651					[[ "$enc" == "off" ]]; then
652					log_must zfs set checksum=on $fs
653				fi
654				log_must zfs set compression=off $fs
655				log_must zfs set atime=on $fs
656				log_must zfs set devices=off $fs
657				log_must zfs set exec=on $fs
658				log_must zfs set setuid=on $fs
659				log_must zfs set readonly=off $fs
660				log_must zfs set snapdir=hidden $fs
661				log_must zfs set aclmode=groupmask $fs
662				log_must zfs set aclinherit=secure $fs
663			fi
664		done
665	fi
666
667	[[ -d $TESTDIR ]] && \
668		log_must rm -rf $TESTDIR
669
670	disk1=${DISKS%% *}
671	if is_mpath_device $disk1; then
672		delete_partitions
673	fi
674
675	rm -f $TEST_BASE_DIR/{err,out}
676}
677
678
679#
680# Common function used to cleanup storage pools, file systems
681# and containers.
682#
683function default_container_cleanup
684{
685	if ! is_global_zone; then
686		reexport_pool
687	fi
688
689	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
690	[[ $? -eq 0 ]] && \
691	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
692
693	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
694	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
695
696	[[ -e $TESTDIR1 ]] && \
697	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
698
699	default_cleanup
700}
701
702#
703# Common function used to cleanup snapshot of file system or volume. Default to
704# delete the file system's snapshot
705#
706# $1 snapshot name
707#
708function destroy_snapshot
709{
710	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
711
712	if ! snapexists $snap; then
713		log_fail "'$snap' does not exist."
714	fi
715
716	#
717	# For the sake of the value which come from 'get_prop' is not equal
718	# to the really mountpoint when the snapshot is unmounted. So, firstly
719	# check and make sure this snapshot's been mounted in current system.
720	#
721	typeset mtpt=""
722	if ismounted $snap; then
723		mtpt=$(get_prop mountpoint $snap)
724		(($? != 0)) && \
725			log_fail "get_prop mountpoint $snap failed."
726	fi
727
728	destroy_dataset "$snap"
729	[[ $mtpt != "" && -d $mtpt ]] && \
730		log_must rm -rf $mtpt
731}
732
733#
734# Common function used to cleanup clone.
735#
736# $1 clone name
737#
738function destroy_clone
739{
740	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
741
742	if ! datasetexists $clone; then
743		log_fail "'$clone' does not existed."
744	fi
745
746	# With the same reason in destroy_snapshot
747	typeset mtpt=""
748	if ismounted $clone; then
749		mtpt=$(get_prop mountpoint $clone)
750		(($? != 0)) && \
751			log_fail "get_prop mountpoint $clone failed."
752	fi
753
754	destroy_dataset "$clone"
755	[[ $mtpt != "" && -d $mtpt ]] && \
756		log_must rm -rf $mtpt
757}
758
759#
760# Common function used to cleanup bookmark of file system or volume.  Default
761# to delete the file system's bookmark.
762#
763# $1 bookmark name
764#
765function destroy_bookmark
766{
767	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
768
769	if ! bkmarkexists $bkmark; then
770		log_fail "'$bkmarkp' does not existed."
771	fi
772
773	destroy_dataset "$bkmark"
774}
775
776# Return 0 if a snapshot exists; $? otherwise
777#
778# $1 - snapshot name
779
780function snapexists
781{
782	zfs list -H -t snapshot "$1" > /dev/null 2>&1
783	return $?
784}
785
786#
787# Return 0 if a bookmark exists; $? otherwise
788#
789# $1 - bookmark name
790#
791function bkmarkexists
792{
793	zfs list -H -t bookmark "$1" > /dev/null 2>&1
794	return $?
795}
796
797#
798# Return 0 if a hold exists; $? otherwise
799#
800# $1 - hold tag
801# $2 - snapshot name
802#
803function holdexists
804{
805	zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
806	return $?
807}
808
809#
810# Set a property to a certain value on a dataset.
811# Sets a property of the dataset to the value as passed in.
812# @param:
813#	$1 dataset who's property is being set
814#	$2 property to set
815#	$3 value to set property to
816# @return:
817#	0 if the property could be set.
818#	non-zero otherwise.
819# @use: ZFS
820#
821function dataset_setprop
822{
823	typeset fn=dataset_setprop
824
825	if (($# < 3)); then
826		log_note "$fn: Insufficient parameters (need 3, had $#)"
827		return 1
828	fi
829	typeset output=
830	output=$(zfs set $2=$3 $1 2>&1)
831	typeset rv=$?
832	if ((rv != 0)); then
833		log_note "Setting property on $1 failed."
834		log_note "property $2=$3"
835		log_note "Return Code: $rv"
836		log_note "Output: $output"
837		return $rv
838	fi
839	return 0
840}
841
842#
843# Assign suite defined dataset properties.
844# This function is used to apply the suite's defined default set of
845# properties to a dataset.
846# @parameters: $1 dataset to use
847# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
848# @returns:
849#   0 if the dataset has been altered.
850#   1 if no pool name was passed in.
851#   2 if the dataset could not be found.
852#   3 if the dataset could not have it's properties set.
853#
854function dataset_set_defaultproperties
855{
856	typeset dataset="$1"
857
858	[[ -z $dataset ]] && return 1
859
860	typeset confset=
861	typeset -i found=0
862	for confset in $(zfs list); do
863		if [[ $dataset = $confset ]]; then
864			found=1
865			break
866		fi
867	done
868	[[ $found -eq 0 ]] && return 2
869	if [[ -n $COMPRESSION_PROP ]]; then
870		dataset_setprop $dataset compression $COMPRESSION_PROP || \
871			return 3
872		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
873	fi
874	if [[ -n $CHECKSUM_PROP ]]; then
875		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
876			return 3
877		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
878	fi
879	return 0
880}
881
882#
883# Check a numeric assertion
884# @parameter: $@ the assertion to check
885# @output: big loud notice if assertion failed
886# @use: log_fail
887#
888function assert
889{
890	(($@)) || log_fail "$@"
891}
892
893#
894# Function to format partition size of a disk
895# Given a disk cxtxdx reduces all partitions
896# to 0 size
897#
898function zero_partitions #<whole_disk_name>
899{
900	typeset diskname=$1
901	typeset i
902
903	if is_freebsd; then
904		gpart destroy -F $diskname
905	elif is_linux; then
906		DSK=$DEV_DSKDIR/$diskname
907		DSK=$(echo $DSK | sed -e "s|//|/|g")
908		log_must parted $DSK -s -- mklabel gpt
909		blockdev --rereadpt $DSK 2>/dev/null
910		block_device_wait
911	else
912		for i in 0 1 3 4 5 6 7
913		do
914			log_must set_partition $i "" 0mb $diskname
915		done
916	fi
917
918	return 0
919}
920
921#
922# Given a slice, size and disk, this function
923# formats the slice to the specified size.
924# Size should be specified with units as per
925# the `format` command requirements eg. 100mb 3gb
926#
927# NOTE: This entire interface is problematic for the Linux parted utility
928# which requires the end of the partition to be specified.  It would be
929# best to retire this interface and replace it with something more flexible.
930# At the moment a best effort is made.
931#
932# arguments: <slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
933function set_partition
934{
935	typeset -i slicenum=$1
936	typeset start=$2
937	typeset size=$3
938	typeset disk=${4#$DEV_DSKDIR/}
939	disk=${disk#$DEV_RDSKDIR/}
940
941	case "$(uname)" in
942	Linux)
943		if [[ -z $size || -z $disk ]]; then
944			log_fail "The size or disk name is unspecified."
945		fi
946		disk=$DEV_DSKDIR/$disk
947		typeset size_mb=${size%%[mMgG]}
948
949		size_mb=${size_mb%%[mMgG][bB]}
950		if [[ ${size:1:1} == 'g' ]]; then
951			((size_mb = size_mb * 1024))
952		fi
953
954		# Create GPT partition table when setting slice 0 or
955		# when the device doesn't already contain a GPT label.
956		parted $disk -s -- print 1 >/dev/null
957		typeset ret_val=$?
958		if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
959			parted $disk -s -- mklabel gpt
960			if [[ $? -ne 0 ]]; then
961				log_note "Failed to create GPT partition table on $disk"
962				return 1
963			fi
964		fi
965
966		# When no start is given align on the first cylinder.
967		if [[ -z "$start" ]]; then
968			start=1
969		fi
970
971		# Determine the cylinder size for the device and using
972		# that calculate the end offset in cylinders.
973		typeset -i cly_size_kb=0
974		cly_size_kb=$(parted -m $disk -s -- \
975			unit cyl print | head -3 | tail -1 | \
976			awk -F '[:k.]' '{print $4}')
977		((end = (size_mb * 1024 / cly_size_kb) + start))
978
979		parted $disk -s -- \
980		    mkpart part$slicenum ${start}cyl ${end}cyl
981		typeset ret_val=$?
982		if [[ $ret_val -ne 0 ]]; then
983			log_note "Failed to create partition $slicenum on $disk"
984			return 1
985		fi
986
987		blockdev --rereadpt $disk 2>/dev/null
988		block_device_wait $disk
989		;;
990	FreeBSD)
991		if [[ -z $size || -z $disk ]]; then
992			log_fail "The size or disk name is unspecified."
993		fi
994		disk=$DEV_DSKDIR/$disk
995
996		if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
997			gpart destroy -F $disk >/dev/null 2>&1
998			gpart create -s GPT $disk
999			if [[ $? -ne 0 ]]; then
1000				log_note "Failed to create GPT partition table on $disk"
1001				return 1
1002			fi
1003		fi
1004
1005		typeset index=$((slicenum + 1))
1006
1007		if [[ -n $start ]]; then
1008			start="-b $start"
1009		fi
1010		gpart add -t freebsd-zfs $start -s $size -i $index $disk
1011		if [[ $ret_val -ne 0 ]]; then
1012			log_note "Failed to create partition $slicenum on $disk"
1013			return 1
1014		fi
1015
1016		block_device_wait $disk
1017		;;
1018	*)
1019		if [[ -z $slicenum || -z $size || -z $disk ]]; then
1020			log_fail "The slice, size or disk name is unspecified."
1021		fi
1022
1023		typeset format_file=/var/tmp/format_in.$$
1024
1025		echo "partition" >$format_file
1026		echo "$slicenum" >> $format_file
1027		echo "" >> $format_file
1028		echo "" >> $format_file
1029		echo "$start" >> $format_file
1030		echo "$size" >> $format_file
1031		echo "label" >> $format_file
1032		echo "" >> $format_file
1033		echo "q" >> $format_file
1034		echo "q" >> $format_file
1035
1036		format -e -s -d $disk -f $format_file
1037		typeset ret_val=$?
1038		rm -f $format_file
1039		;;
1040	esac
1041
1042	if [[ $ret_val -ne 0 ]]; then
1043		log_note "Unable to format $disk slice $slicenum to $size"
1044		return 1
1045	fi
1046	return 0
1047}
1048
1049#
1050# Delete all partitions on all disks - this is specifically for the use of multipath
1051# devices which currently can only be used in the test suite as raw/un-partitioned
1052# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
1053#
1054function delete_partitions
1055{
1056	typeset disk
1057
1058	if [[ -z $DISKSARRAY ]]; then
1059		DISKSARRAY=$DISKS
1060	fi
1061
1062	if is_linux; then
1063		typeset -i part
1064		for disk in $DISKSARRAY; do
1065			for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1066				typeset partition=${disk}${SLICE_PREFIX}${part}
1067				parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1068				if lsblk | grep -qF ${partition}; then
1069					log_fail "Partition ${partition} not deleted"
1070				else
1071					log_note "Partition ${partition} deleted"
1072				fi
1073			done
1074		done
1075	elif is_freebsd; then
1076		for disk in $DISKSARRAY; do
1077			if gpart destroy -F $disk; then
1078				log_note "Partitions for ${disk} deleted"
1079			else
1080				log_fail "Partitions for ${disk} not deleted"
1081			fi
1082		done
1083	fi
1084}
1085
1086#
1087# Get the end cyl of the given slice
1088#
1089function get_endslice #<disk> <slice>
1090{
1091	typeset disk=$1
1092	typeset slice=$2
1093	if [[ -z $disk || -z $slice ]] ; then
1094		log_fail "The disk name or slice number is unspecified."
1095	fi
1096
1097	case "$(uname)" in
1098	Linux)
1099		endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1100			awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
1101		((endcyl = (endcyl + 1)))
1102		;;
1103	FreeBSD)
1104		disk=${disk#/dev/zvol/}
1105		disk=${disk%p*}
1106		slice=$((slice + 1))
1107		endcyl=$(gpart show $disk | \
1108			awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1109		;;
1110	*)
1111		disk=${disk#/dev/dsk/}
1112		disk=${disk#/dev/rdsk/}
1113		disk=${disk%s*}
1114
1115		typeset -i ratio=0
1116		ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1117		    grep "sectors\/cylinder" | \
1118		    awk '{print $2}')
1119
1120		if ((ratio == 0)); then
1121			return
1122		fi
1123
1124		typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1125		    nawk -v token="$slice" '{if ($1==token) print $6}')
1126
1127		((endcyl = (endcyl + 1) / ratio))
1128		;;
1129	esac
1130
1131	echo $endcyl
1132}
1133
1134
1135#
1136# Given a size,disk and total slice number,  this function formats the
1137# disk slices from 0 to the total slice number with the same specified
1138# size.
1139#
1140function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
1141{
1142	typeset -i i=0
1143	typeset slice_size=$1
1144	typeset disk_name=$2
1145	typeset total_slices=$3
1146	typeset cyl
1147
1148	zero_partitions $disk_name
1149	while ((i < $total_slices)); do
1150		if ! is_linux; then
1151			if ((i == 2)); then
1152				((i = i + 1))
1153				continue
1154			fi
1155		fi
1156		log_must set_partition $i "$cyl" $slice_size $disk_name
1157		cyl=$(get_endslice $disk_name $i)
1158		((i = i+1))
1159	done
1160}
1161
1162#
1163# This function continues to write to a filenum number of files into dirnum
1164# number of directories until either file_write returns an error or the
1165# maximum number of files per directory have been written.
1166#
1167# Usage:
1168# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1169#
1170# Return value: 0 on success
1171#		non 0 on error
1172#
1173# Where :
1174#	destdir:    is the directory where everything is to be created under
1175#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
1176#	filenum:    the maximum number of files per subdirectory
1177#	bytes:	    number of bytes to write
1178#	num_writes: number of types to write out bytes
1179#	data:	    the data that will be written
1180#
1181#	E.g.
1182#	fill_fs /testdir 20 25 1024 256 0
1183#
1184# Note: bytes * num_writes equals the size of the testfile
1185#
1186function fill_fs # destdir dirnum filenum bytes num_writes data
1187{
1188	typeset destdir=${1:-$TESTDIR}
1189	typeset -i dirnum=${2:-50}
1190	typeset -i filenum=${3:-50}
1191	typeset -i bytes=${4:-8192}
1192	typeset -i num_writes=${5:-10240}
1193	typeset data=${6:-0}
1194
1195	mkdir -p $destdir/{1..$dirnum}
1196	for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1197		file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1198		|| return $?
1199	done
1200	return 0
1201}
1202
1203#
1204# Simple function to get the specified property. If unable to
1205# get the property then exits.
1206#
1207# Note property is in 'parsable' format (-p)
1208#
1209function get_prop # property dataset
1210{
1211	typeset prop_val
1212	typeset prop=$1
1213	typeset dataset=$2
1214
1215	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1216	if [[ $? -ne 0 ]]; then
1217		log_note "Unable to get $prop property for dataset " \
1218		"$dataset"
1219		return 1
1220	fi
1221
1222	echo "$prop_val"
1223	return 0
1224}
1225
1226#
1227# Simple function to get the specified property of pool. If unable to
1228# get the property then exits.
1229#
1230# Note property is in 'parsable' format (-p)
1231#
1232function get_pool_prop # property pool
1233{
1234	typeset prop_val
1235	typeset prop=$1
1236	typeset pool=$2
1237
1238	if poolexists $pool ; then
1239		prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1240			awk '{print $3}')
1241		if [[ $? -ne 0 ]]; then
1242			log_note "Unable to get $prop property for pool " \
1243			"$pool"
1244			return 1
1245		fi
1246	else
1247		log_note "Pool $pool not exists."
1248		return 1
1249	fi
1250
1251	echo "$prop_val"
1252	return 0
1253}
1254
1255# Return 0 if a pool exists; $? otherwise
1256#
1257# $1 - pool name
1258
1259function poolexists
1260{
1261	typeset pool=$1
1262
1263	if [[ -z $pool ]]; then
1264		log_note "No pool name given."
1265		return 1
1266	fi
1267
1268	zpool get name "$pool" > /dev/null 2>&1
1269	return $?
1270}
1271
1272# Return 0 if all the specified datasets exist; $? otherwise
1273#
1274# $1-n  dataset name
1275function datasetexists
1276{
1277	if (($# == 0)); then
1278		log_note "No dataset name given."
1279		return 1
1280	fi
1281
1282	while (($# > 0)); do
1283		zfs get name $1 > /dev/null 2>&1 || \
1284			return $?
1285		shift
1286	done
1287
1288	return 0
1289}
1290
1291# return 0 if none of the specified datasets exists, otherwise return 1.
1292#
1293# $1-n  dataset name
1294function datasetnonexists
1295{
1296	if (($# == 0)); then
1297		log_note "No dataset name given."
1298		return 1
1299	fi
1300
1301	while (($# > 0)); do
1302		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1303		    && return 1
1304		shift
1305	done
1306
1307	return 0
1308}
1309
1310function is_shared_freebsd
1311{
1312	typeset fs=$1
1313
1314	pgrep -q mountd && showmount -E | grep -qx $fs
1315}
1316
1317function is_shared_illumos
1318{
1319	typeset fs=$1
1320	typeset mtpt
1321
1322	for mtpt in `share | awk '{print $2}'` ; do
1323		if [[ $mtpt == $fs ]] ; then
1324			return 0
1325		fi
1326	done
1327
1328	typeset stat=$(svcs -H -o STA nfs/server:default)
1329	if [[ $stat != "ON" ]]; then
1330		log_note "Current nfs/server status: $stat"
1331	fi
1332
1333	return 1
1334}
1335
1336function is_shared_linux
1337{
1338	typeset fs=$1
1339	typeset mtpt
1340
1341	for mtpt in `share | awk '{print $1}'` ; do
1342		if [[ $mtpt == $fs ]] ; then
1343			return 0
1344		fi
1345	done
1346	return 1
1347}
1348
1349#
1350# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1351#
1352# Returns 0 if shared, 1 otherwise.
1353#
1354function is_shared
1355{
1356	typeset fs=$1
1357	typeset mtpt
1358
1359	if [[ $fs != "/"* ]] ; then
1360		if datasetnonexists "$fs" ; then
1361			return 1
1362		else
1363			mtpt=$(get_prop mountpoint "$fs")
1364			case $mtpt in
1365				none|legacy|-) return 1
1366					;;
1367				*)	fs=$mtpt
1368					;;
1369			esac
1370		fi
1371	fi
1372
1373	case $(uname) in
1374	FreeBSD)	is_shared_freebsd "$fs"	;;
1375	Linux)		is_shared_linux "$fs"	;;
1376	*)		is_shared_illumos "$fs"	;;
1377	esac
1378}
1379
1380function is_exported_illumos
1381{
1382	typeset fs=$1
1383	typeset mtpt
1384
1385	for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
1386		if [[ $mtpt == $fs ]] ; then
1387			return 0
1388		fi
1389	done
1390
1391	return 1
1392}
1393
1394function is_exported_freebsd
1395{
1396	typeset fs=$1
1397	typeset mtpt
1398
1399	for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
1400		if [[ $mtpt == $fs ]] ; then
1401			return 0
1402		fi
1403	done
1404
1405	return 1
1406}
1407
1408function is_exported_linux
1409{
1410	typeset fs=$1
1411	typeset mtpt
1412
1413	for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
1414		if [[ $mtpt == $fs ]] ; then
1415			return 0
1416		fi
1417	done
1418
1419	return 1
1420}
1421
1422#
1423# Given a mountpoint, or a dataset name, determine if it is exported via
1424# the os-specific NFS exports file.
1425#
1426# Returns 0 if exported, 1 otherwise.
1427#
1428function is_exported
1429{
1430	typeset fs=$1
1431	typeset mtpt
1432
1433	if [[ $fs != "/"* ]] ; then
1434		if datasetnonexists "$fs" ; then
1435			return 1
1436		else
1437			mtpt=$(get_prop mountpoint "$fs")
1438			case $mtpt in
1439				none|legacy|-) return 1
1440					;;
1441				*)	fs=$mtpt
1442					;;
1443			esac
1444		fi
1445	fi
1446
1447	case $(uname) in
1448	FreeBSD)	is_exported_freebsd "$fs"	;;
1449	Linux)		is_exported_linux "$fs"	;;
1450	*)		is_exported_illumos "$fs"	;;
1451	esac
1452}
1453
1454#
1455# Given a dataset name determine if it is shared via SMB.
1456#
1457# Returns 0 if shared, 1 otherwise.
1458#
1459function is_shared_smb
1460{
1461	typeset fs=$1
1462	typeset mtpt
1463
1464	if datasetnonexists "$fs" ; then
1465		return 1
1466	else
1467		fs=$(echo $fs | tr / _)
1468	fi
1469
1470	if is_linux; then
1471		for mtpt in `net usershare list | awk '{print $1}'` ; do
1472			if [[ $mtpt == $fs ]] ; then
1473				return 0
1474			fi
1475		done
1476		return 1
1477	else
1478		log_note "Currently unsupported by the test framework"
1479		return 1
1480	fi
1481}
1482
1483#
1484# Given a mountpoint, determine if it is not shared via NFS.
1485#
1486# Returns 0 if not shared, 1 otherwise.
1487#
1488function not_shared
1489{
1490	typeset fs=$1
1491
1492	is_shared $fs
1493	if (($? == 0)); then
1494		return 1
1495	fi
1496
1497	return 0
1498}
1499
1500#
1501# Given a dataset determine if it is not shared via SMB.
1502#
1503# Returns 0 if not shared, 1 otherwise.
1504#
1505function not_shared_smb
1506{
1507	typeset fs=$1
1508
1509	is_shared_smb $fs
1510	if (($? == 0)); then
1511		return 1
1512	fi
1513
1514	return 0
1515}
1516
1517#
1518# Helper function to unshare a mountpoint.
1519#
1520function unshare_fs #fs
1521{
1522	typeset fs=$1
1523
1524	is_shared $fs || is_shared_smb $fs
1525	if (($? == 0)); then
1526		zfs unshare $fs || log_fail "zfs unshare $fs failed"
1527	fi
1528
1529	return 0
1530}
1531
1532#
1533# Helper function to share a NFS mountpoint.
1534#
1535function share_nfs #fs
1536{
1537	typeset fs=$1
1538
1539	if is_linux; then
1540		is_shared $fs
1541		if (($? != 0)); then
1542			log_must share "*:$fs"
1543		fi
1544	else
1545		is_shared $fs
1546		if (($? != 0)); then
1547			log_must share -F nfs $fs
1548		fi
1549	fi
1550
1551	return 0
1552}
1553
1554#
1555# Helper function to unshare a NFS mountpoint.
1556#
1557function unshare_nfs #fs
1558{
1559	typeset fs=$1
1560
1561	if is_linux; then
1562		is_shared $fs
1563		if (($? == 0)); then
1564			log_must unshare -u "*:$fs"
1565		fi
1566	else
1567		is_shared $fs
1568		if (($? == 0)); then
1569			log_must unshare -F nfs $fs
1570		fi
1571	fi
1572
1573	return 0
1574}
1575
1576#
1577# Helper function to show NFS shares.
1578#
1579function showshares_nfs
1580{
1581	if is_linux; then
1582		share -v
1583	else
1584		share -F nfs
1585	fi
1586
1587	return 0
1588}
1589
1590#
1591# Helper function to show SMB shares.
1592#
1593function showshares_smb
1594{
1595	if is_linux; then
1596		net usershare list
1597	else
1598		share -F smb
1599	fi
1600
1601	return 0
1602}
1603
1604function check_nfs
1605{
1606	if is_linux; then
1607		share -s
1608	elif is_freebsd; then
1609		showmount -e
1610	else
1611		log_unsupported "Unknown platform"
1612	fi
1613
1614	if [[ $? -ne 0 ]]; then
1615		log_unsupported "The NFS utilities are not installed"
1616	fi
1617}
1618
1619#
1620# Check NFS server status and trigger it online.
1621#
1622function setup_nfs_server
1623{
1624	# Cannot share directory in non-global zone.
1625	#
1626	if ! is_global_zone; then
1627		log_note "Cannot trigger NFS server by sharing in LZ."
1628		return
1629	fi
1630
1631	if is_linux; then
1632		#
1633		# Re-synchronize /var/lib/nfs/etab with /etc/exports and
1634		# /etc/exports.d./* to provide a clean test environment.
1635		#
1636		log_must share -r
1637
1638		log_note "NFS server must be started prior to running ZTS."
1639		return
1640	elif is_freebsd; then
1641		kill -s HUP $(cat /var/run/mountd.pid)
1642
1643		log_note "NFS server must be started prior to running ZTS."
1644		return
1645	fi
1646
1647	typeset nfs_fmri="svc:/network/nfs/server:default"
1648	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1649		#
1650		# Only really sharing operation can enable NFS server
1651		# to online permanently.
1652		#
1653		typeset dummy=/tmp/dummy
1654
1655		if [[ -d $dummy ]]; then
1656			log_must rm -rf $dummy
1657		fi
1658
1659		log_must mkdir $dummy
1660		log_must share $dummy
1661
1662		#
1663		# Waiting for fmri's status to be the final status.
1664		# Otherwise, in transition, an asterisk (*) is appended for
1665		# instances, unshare will reverse status to 'DIS' again.
1666		#
1667		# Waiting for 1's at least.
1668		#
1669		log_must sleep 1
1670		timeout=10
1671		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1672		do
1673			log_must sleep 1
1674
1675			((timeout -= 1))
1676		done
1677
1678		log_must unshare $dummy
1679		log_must rm -rf $dummy
1680	fi
1681
1682	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1683}
1684
1685#
1686# To verify whether calling process is in global zone
1687#
1688# Return 0 if in global zone, 1 in non-global zone
1689#
1690function is_global_zone
1691{
1692	if is_linux || is_freebsd; then
1693		return 0
1694	else
1695		typeset cur_zone=$(zonename 2>/dev/null)
1696		if [[ $cur_zone != "global" ]]; then
1697			return 1
1698		fi
1699		return 0
1700	fi
1701}
1702
1703#
1704# Verify whether test is permitted to run from
1705# global zone, local zone, or both
1706#
1707# $1 zone limit, could be "global", "local", or "both"(no limit)
1708#
1709# Return 0 if permitted, otherwise exit with log_unsupported
1710#
1711function verify_runnable # zone limit
1712{
1713	typeset limit=$1
1714
1715	[[ -z $limit ]] && return 0
1716
1717	if is_global_zone ; then
1718		case $limit in
1719			global|both)
1720				;;
1721			local)	log_unsupported "Test is unable to run from "\
1722					"global zone."
1723				;;
1724			*)	log_note "Warning: unknown limit $limit - " \
1725					"use both."
1726				;;
1727		esac
1728	else
1729		case $limit in
1730			local|both)
1731				;;
1732			global)	log_unsupported "Test is unable to run from "\
1733					"local zone."
1734				;;
1735			*)	log_note "Warning: unknown limit $limit - " \
1736					"use both."
1737				;;
1738		esac
1739
1740		reexport_pool
1741	fi
1742
1743	return 0
1744}
1745
1746# Return 0 if create successfully or the pool exists; $? otherwise
1747# Note: In local zones, this function should return 0 silently.
1748#
1749# $1 - pool name
1750# $2-n - [keyword] devs_list
1751
1752function create_pool #pool devs_list
1753{
1754	typeset pool=${1%%/*}
1755
1756	shift
1757
1758	if [[ -z $pool ]]; then
1759		log_note "Missing pool name."
1760		return 1
1761	fi
1762
1763	if poolexists $pool ; then
1764		destroy_pool $pool
1765	fi
1766
1767	if is_global_zone ; then
1768		[[ -d /$pool ]] && rm -rf /$pool
1769		log_must zpool create -f $pool $@
1770	fi
1771
1772	return 0
1773}
1774
1775# Return 0 if destroy successfully or the pool exists; $? otherwise
1776# Note: In local zones, this function should return 0 silently.
1777#
1778# $1 - pool name
1779# Destroy pool with the given parameters.
1780
1781function destroy_pool #pool
1782{
1783	typeset pool=${1%%/*}
1784	typeset mtpt
1785
1786	if [[ -z $pool ]]; then
1787		log_note "No pool name given."
1788		return 1
1789	fi
1790
1791	if is_global_zone ; then
1792		if poolexists "$pool" ; then
1793			mtpt=$(get_prop mountpoint "$pool")
1794
1795			# At times, syseventd/udev activity can cause attempts
1796			# to destroy a pool to fail with EBUSY. We retry a few
1797			# times allowing failures before requiring the destroy
1798			# to succeed.
1799			log_must_busy zpool destroy -f $pool
1800
1801			[[ -d $mtpt ]] && \
1802				log_must rm -rf $mtpt
1803		else
1804			log_note "Pool does not exist. ($pool)"
1805			return 1
1806		fi
1807	fi
1808
1809	return 0
1810}
1811
1812# Return 0 if created successfully; $? otherwise
1813#
1814# $1 - dataset name
1815# $2-n - dataset options
1816
1817function create_dataset #dataset dataset_options
1818{
1819	typeset dataset=$1
1820
1821	shift
1822
1823	if [[ -z $dataset ]]; then
1824		log_note "Missing dataset name."
1825		return 1
1826	fi
1827
1828	if datasetexists $dataset ; then
1829		destroy_dataset $dataset
1830	fi
1831
1832	log_must zfs create $@ $dataset
1833
1834	return 0
1835}
1836
1837# Return 0 if destroy successfully or the dataset exists; $? otherwise
1838# Note: In local zones, this function should return 0 silently.
1839#
1840# $1 - dataset name
1841# $2 - custom arguments for zfs destroy
1842# Destroy dataset with the given parameters.
1843
1844function destroy_dataset #dataset #args
1845{
1846	typeset dataset=$1
1847	typeset mtpt
1848	typeset args=${2:-""}
1849
1850	if [[ -z $dataset ]]; then
1851		log_note "No dataset name given."
1852		return 1
1853	fi
1854
1855	if is_global_zone ; then
1856		if datasetexists "$dataset" ; then
1857			mtpt=$(get_prop mountpoint "$dataset")
1858			log_must_busy zfs destroy $args $dataset
1859
1860			[[ -d $mtpt ]] && \
1861				log_must rm -rf $mtpt
1862		else
1863			log_note "Dataset does not exist. ($dataset)"
1864			return 1
1865		fi
1866	fi
1867
1868	return 0
1869}
1870
1871#
1872# Firstly, create a pool with 5 datasets. Then, create a single zone and
1873# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1874# and a zvol device to the zone.
1875#
1876# $1 zone name
1877# $2 zone root directory prefix
1878# $3 zone ip
1879#
1880function zfs_zones_setup #zone_name zone_root zone_ip
1881{
1882	typeset zone_name=${1:-$(hostname)-z}
1883	typeset zone_root=${2:-"/zone_root"}
1884	typeset zone_ip=${3:-"10.1.1.10"}
1885	typeset prefix_ctr=$ZONE_CTR
1886	typeset pool_name=$ZONE_POOL
1887	typeset -i cntctr=5
1888	typeset -i i=0
1889
1890	# Create pool and 5 container within it
1891	#
1892	[[ -d /$pool_name ]] && rm -rf /$pool_name
1893	log_must zpool create -f $pool_name $DISKS
1894	while ((i < cntctr)); do
1895		log_must zfs create $pool_name/$prefix_ctr$i
1896		((i += 1))
1897	done
1898
1899	# create a zvol
1900	log_must zfs create -V 1g $pool_name/zone_zvol
1901	block_device_wait
1902
1903	#
1904	# Add slog device for pool
1905	#
1906	typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1907	log_must mkfile $MINVDEVSIZE $sdevs
1908	log_must zpool add $pool_name log mirror $sdevs
1909
1910	# this isn't supported just yet.
1911	# Create a filesystem. In order to add this to
1912	# the zone, it must have it's mountpoint set to 'legacy'
1913	# log_must zfs create $pool_name/zfs_filesystem
1914	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1915
1916	[[ -d $zone_root ]] && \
1917		log_must rm -rf $zone_root/$zone_name
1918	[[ ! -d $zone_root ]] && \
1919		log_must mkdir -p -m 0700 $zone_root/$zone_name
1920
1921	# Create zone configure file and configure the zone
1922	#
1923	typeset zone_conf=/tmp/zone_conf.$$
1924	echo "create" > $zone_conf
1925	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1926	echo "set autoboot=true" >> $zone_conf
1927	i=0
1928	while ((i < cntctr)); do
1929		echo "add dataset" >> $zone_conf
1930		echo "set name=$pool_name/$prefix_ctr$i" >> \
1931			$zone_conf
1932		echo "end" >> $zone_conf
1933		((i += 1))
1934	done
1935
1936	# add our zvol to the zone
1937	echo "add device" >> $zone_conf
1938	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1939	echo "end" >> $zone_conf
1940
1941	# add a corresponding zvol rdsk to the zone
1942	echo "add device" >> $zone_conf
1943	echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1944	echo "end" >> $zone_conf
1945
1946	# once it's supported, we'll add our filesystem to the zone
1947	# echo "add fs" >> $zone_conf
1948	# echo "set type=zfs" >> $zone_conf
1949	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1950	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1951	# echo "end" >> $zone_conf
1952
1953	echo "verify" >> $zone_conf
1954	echo "commit" >> $zone_conf
1955	log_must zonecfg -z $zone_name -f $zone_conf
1956	log_must rm -f $zone_conf
1957
1958	# Install the zone
1959	zoneadm -z $zone_name install
1960	if (($? == 0)); then
1961		log_note "SUCCESS: zoneadm -z $zone_name install"
1962	else
1963		log_fail "FAIL: zoneadm -z $zone_name install"
1964	fi
1965
1966	# Install sysidcfg file
1967	#
1968	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1969	echo "system_locale=C" > $sysidcfg
1970	echo  "terminal=dtterm" >> $sysidcfg
1971	echo  "network_interface=primary {" >> $sysidcfg
1972	echo  "hostname=$zone_name" >> $sysidcfg
1973	echo  "}" >> $sysidcfg
1974	echo  "name_service=NONE" >> $sysidcfg
1975	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1976	echo  "security_policy=NONE" >> $sysidcfg
1977	echo  "timezone=US/Eastern" >> $sysidcfg
1978
1979	# Boot this zone
1980	log_must zoneadm -z $zone_name boot
1981}
1982
1983#
1984# Reexport TESTPOOL & TESTPOOL(1-4)
1985#
1986function reexport_pool
1987{
1988	typeset -i cntctr=5
1989	typeset -i i=0
1990
1991	while ((i < cntctr)); do
1992		if ((i == 0)); then
1993			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1994			if ! ismounted $TESTPOOL; then
1995				log_must zfs mount $TESTPOOL
1996			fi
1997		else
1998			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1999			if eval ! ismounted \$TESTPOOL$i; then
2000				log_must eval zfs mount \$TESTPOOL$i
2001			fi
2002		fi
2003		((i += 1))
2004	done
2005}
2006
2007#
2008# Verify a given disk or pool state
2009#
2010# Return 0 is pool/disk matches expected state, 1 otherwise
2011#
2012function check_state # pool disk state{online,offline,degraded}
2013{
2014	typeset pool=$1
2015	typeset disk=${2#$DEV_DSKDIR/}
2016	typeset state=$3
2017
2018	[[ -z $pool ]] || [[ -z $state ]] \
2019	    && log_fail "Arguments invalid or missing"
2020
2021	if [[ -z $disk ]]; then
2022		#check pool state only
2023		zpool get -H -o value health $pool \
2024		    | grep -i "$state" > /dev/null 2>&1
2025	else
2026		zpool status -v $pool | grep "$disk"  \
2027		    | grep -i "$state" > /dev/null 2>&1
2028	fi
2029
2030	return $?
2031}
2032
2033#
2034# Get the mountpoint of snapshot
2035# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
2036# as its mountpoint
2037#
2038function snapshot_mountpoint
2039{
2040	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
2041
2042	if [[ $dataset != *@* ]]; then
2043		log_fail "Error name of snapshot '$dataset'."
2044	fi
2045
2046	typeset fs=${dataset%@*}
2047	typeset snap=${dataset#*@}
2048
2049	if [[ -z $fs || -z $snap ]]; then
2050		log_fail "Error name of snapshot '$dataset'."
2051	fi
2052
2053	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
2054}
2055
2056#
2057# Given a device and 'ashift' value verify it's correctly set on every label
2058#
2059function verify_ashift # device ashift
2060{
2061	typeset device="$1"
2062	typeset ashift="$2"
2063
2064	zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
2065	    if (ashift != $2)
2066	        exit 1;
2067	    else
2068	        count++;
2069	    } END {
2070	    if (count != 4)
2071	        exit 1;
2072	    else
2073	        exit 0;
2074	    }'
2075
2076	return $?
2077}
2078
2079#
2080# Given a pool and file system, this function will verify the file system
2081# using the zdb internal tool. Note that the pool is exported and imported
2082# to ensure it has consistent state.
2083#
2084function verify_filesys # pool filesystem dir
2085{
2086	typeset pool="$1"
2087	typeset filesys="$2"
2088	typeset zdbout="/tmp/zdbout.$$"
2089
2090	shift
2091	shift
2092	typeset dirs=$@
2093	typeset search_path=""
2094
2095	log_note "Calling zdb to verify filesystem '$filesys'"
2096	zfs unmount -a > /dev/null 2>&1
2097	log_must zpool export $pool
2098
2099	if [[ -n $dirs ]] ; then
2100		for dir in $dirs ; do
2101			search_path="$search_path -d $dir"
2102		done
2103	fi
2104
2105	log_must zpool import $search_path $pool
2106
2107	zdb -cudi $filesys > $zdbout 2>&1
2108	if [[ $? != 0 ]]; then
2109		log_note "Output: zdb -cudi $filesys"
2110		cat $zdbout
2111		log_fail "zdb detected errors with: '$filesys'"
2112	fi
2113
2114	log_must zfs mount -a
2115	log_must rm -rf $zdbout
2116}
2117
2118#
2119# Given a pool issue a scrub and verify that no checksum errors are reported.
2120#
2121function verify_pool
2122{
2123	typeset pool=${1:-$TESTPOOL}
2124
2125	log_must zpool scrub $pool
2126	log_must wait_scrubbed $pool
2127
2128	typeset -i cksum=$(zpool status $pool | awk '
2129	    !NF { isvdev = 0 }
2130	    isvdev { errors += $NF }
2131	    /CKSUM$/ { isvdev = 1 }
2132	    END { print errors }
2133	')
2134	if [[ $cksum != 0 ]]; then
2135		log_must zpool status -v
2136	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
2137	fi
2138}
2139
2140#
2141# Given a pool, and this function list all disks in the pool
2142#
2143function get_disklist # pool
2144{
2145	typeset disklist=""
2146
2147	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
2148	    grep -v "\-\-\-\-\-" | \
2149	    egrep -v -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
2150
2151	echo $disklist
2152}
2153
2154#
2155# Given a pool, and this function list all disks in the pool with their full
2156# path (like "/dev/sda" instead of "sda").
2157#
2158function get_disklist_fullpath # pool
2159{
2160	args="-P $1"
2161	get_disklist $args
2162}
2163
2164
2165
2166# /**
2167#  This function kills a given list of processes after a time period. We use
2168#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
2169#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2170#  would be listed as FAIL, which we don't want : we're happy with stress tests
2171#  running for a certain amount of time, then finishing.
2172#
2173# @param $1 the time in seconds after which we should terminate these processes
2174# @param $2..$n the processes we wish to terminate.
2175# */
2176function stress_timeout
2177{
2178	typeset -i TIMEOUT=$1
2179	shift
2180	typeset cpids="$@"
2181
2182	log_note "Waiting for child processes($cpids). " \
2183		"It could last dozens of minutes, please be patient ..."
2184	log_must sleep $TIMEOUT
2185
2186	log_note "Killing child processes after ${TIMEOUT} stress timeout."
2187	typeset pid
2188	for pid in $cpids; do
2189		ps -p $pid > /dev/null 2>&1
2190		if (($? == 0)); then
2191			log_must kill -USR1 $pid
2192		fi
2193	done
2194}
2195
2196#
2197# Verify a given hotspare disk is inuse or avail
2198#
2199# Return 0 is pool/disk matches expected state, 1 otherwise
2200#
2201function check_hotspare_state # pool disk state{inuse,avail}
2202{
2203	typeset pool=$1
2204	typeset disk=${2#$DEV_DSKDIR/}
2205	typeset state=$3
2206
2207	cur_state=$(get_device_state $pool $disk "spares")
2208
2209	if [[ $state != ${cur_state} ]]; then
2210		return 1
2211	fi
2212	return 0
2213}
2214
2215#
2216# Wait until a hotspare transitions to a given state or times out.
2217#
2218# Return 0 when  pool/disk matches expected state, 1 on timeout.
2219#
2220function wait_hotspare_state # pool disk state timeout
2221{
2222	typeset pool=$1
2223	typeset disk=${2#*$DEV_DSKDIR/}
2224	typeset state=$3
2225	typeset timeout=${4:-60}
2226	typeset -i i=0
2227
2228	while [[ $i -lt $timeout ]]; do
2229		if check_hotspare_state $pool $disk $state; then
2230			return 0
2231		fi
2232
2233		i=$((i+1))
2234		sleep 1
2235	done
2236
2237	return 1
2238}
2239
2240#
2241# Verify a given slog disk is inuse or avail
2242#
2243# Return 0 is pool/disk matches expected state, 1 otherwise
2244#
2245function check_slog_state # pool disk state{online,offline,unavail}
2246{
2247	typeset pool=$1
2248	typeset disk=${2#$DEV_DSKDIR/}
2249	typeset state=$3
2250
2251	cur_state=$(get_device_state $pool $disk "logs")
2252
2253	if [[ $state != ${cur_state} ]]; then
2254		return 1
2255	fi
2256	return 0
2257}
2258
2259#
2260# Verify a given vdev disk is inuse or avail
2261#
2262# Return 0 is pool/disk matches expected state, 1 otherwise
2263#
2264function check_vdev_state # pool disk state{online,offline,unavail}
2265{
2266	typeset pool=$1
2267	typeset disk=${2#*$DEV_DSKDIR/}
2268	typeset state=$3
2269
2270	cur_state=$(get_device_state $pool $disk)
2271
2272	if [[ $state != ${cur_state} ]]; then
2273		return 1
2274	fi
2275	return 0
2276}
2277
2278#
2279# Wait until a vdev transitions to a given state or times out.
2280#
2281# Return 0 when  pool/disk matches expected state, 1 on timeout.
2282#
2283function wait_vdev_state # pool disk state timeout
2284{
2285	typeset pool=$1
2286	typeset disk=${2#*$DEV_DSKDIR/}
2287	typeset state=$3
2288	typeset timeout=${4:-60}
2289	typeset -i i=0
2290
2291	while [[ $i -lt $timeout ]]; do
2292		if check_vdev_state $pool $disk $state; then
2293			return 0
2294		fi
2295
2296		i=$((i+1))
2297		sleep 1
2298	done
2299
2300	return 1
2301}
2302
2303#
2304# Check the output of 'zpool status -v <pool>',
2305# and to see if the content of <token> contain the <keyword> specified.
2306#
2307# Return 0 is contain, 1 otherwise
2308#
2309function check_pool_status # pool token keyword <verbose>
2310{
2311	typeset pool=$1
2312	typeset token=$2
2313	typeset keyword=$3
2314	typeset verbose=${4:-false}
2315
2316	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2317		($1==token) {print $0}')
2318	if [[ $verbose == true ]]; then
2319		log_note $scan
2320	fi
2321	echo $scan | egrep -i "$keyword" > /dev/null 2>&1
2322
2323	return $?
2324}
2325
2326#
2327# The following functions are instance of check_pool_status()
2328#	is_pool_resilvering - to check if the pool resilver is in progress
2329#	is_pool_resilvered - to check if the pool resilver is completed
2330#	is_pool_scrubbing - to check if the pool scrub is in progress
2331#	is_pool_scrubbed - to check if the pool scrub is completed
2332#	is_pool_scrub_stopped - to check if the pool scrub is stopped
2333#	is_pool_scrub_paused - to check if the pool scrub has paused
2334#	is_pool_removing - to check if the pool removing is a vdev
2335#	is_pool_removed - to check if the pool remove is completed
2336#	is_pool_discarding - to check if the pool checkpoint is being discarded
2337#
2338function is_pool_resilvering #pool <verbose>
2339{
2340	check_pool_status "$1" "scan" \
2341	    "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2342	return $?
2343}
2344
2345function is_pool_resilvered #pool <verbose>
2346{
2347	check_pool_status "$1" "scan" "resilvered " $2
2348	return $?
2349}
2350
2351function is_pool_scrubbing #pool <verbose>
2352{
2353	check_pool_status "$1" "scan" "scrub in progress since " $2
2354	return $?
2355}
2356
2357function is_pool_scrubbed #pool <verbose>
2358{
2359	check_pool_status "$1" "scan" "scrub repaired" $2
2360	return $?
2361}
2362
2363function is_pool_scrub_stopped #pool <verbose>
2364{
2365	check_pool_status "$1" "scan" "scrub canceled" $2
2366	return $?
2367}
2368
2369function is_pool_scrub_paused #pool <verbose>
2370{
2371	check_pool_status "$1" "scan" "scrub paused since " $2
2372	return $?
2373}
2374
2375function is_pool_removing #pool
2376{
2377	check_pool_status "$1" "remove" "in progress since "
2378	return $?
2379}
2380
2381function is_pool_removed #pool
2382{
2383	check_pool_status "$1" "remove" "completed on"
2384	return $?
2385}
2386
2387function is_pool_discarding #pool
2388{
2389	check_pool_status "$1" "checkpoint" "discarding"
2390	return $?
2391}
2392
2393function wait_for_degraded
2394{
2395	typeset pool=$1
2396	typeset timeout=${2:-30}
2397	typeset t0=$SECONDS
2398
2399	while :; do
2400		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2401		log_note "$pool is not yet degraded."
2402		sleep 1
2403		if ((SECONDS - t0 > $timeout)); then
2404			log_note "$pool not degraded after $timeout seconds."
2405			return 1
2406		fi
2407	done
2408
2409	return 0
2410}
2411
2412#
2413# Use create_pool()/destroy_pool() to clean up the information in
2414# in the given disk to avoid slice overlapping.
2415#
2416function cleanup_devices #vdevs
2417{
2418	typeset pool="foopool$$"
2419
2420	for vdev in $@; do
2421		zero_partitions $vdev
2422	done
2423
2424	poolexists $pool && destroy_pool $pool
2425	create_pool $pool $@
2426	destroy_pool $pool
2427
2428	return 0
2429}
2430
2431#/**
2432# A function to find and locate free disks on a system or from given
2433# disks as the parameter. It works by locating disks that are in use
2434# as swap devices and dump devices, and also disks listed in /etc/vfstab
2435#
2436# $@ given disks to find which are free, default is all disks in
2437# the test system
2438#
2439# @return a string containing the list of available disks
2440#*/
2441function find_disks
2442{
2443	# Trust provided list, no attempt is made to locate unused devices.
2444	if is_linux || is_freebsd; then
2445		echo "$@"
2446		return
2447	fi
2448
2449
2450	sfi=/tmp/swaplist.$$
2451	dmpi=/tmp/dumpdev.$$
2452	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2453
2454	swap -l > $sfi
2455	dumpadm > $dmpi 2>/dev/null
2456
2457# write an awk script that can process the output of format
2458# to produce a list of disks we know about. Note that we have
2459# to escape "$2" so that the shell doesn't interpret it while
2460# we're creating the awk script.
2461# -------------------
2462	cat > /tmp/find_disks.awk <<EOF
2463#!/bin/nawk -f
2464	BEGIN { FS="."; }
2465
2466	/^Specify disk/{
2467		searchdisks=0;
2468	}
2469
2470	{
2471		if (searchdisks && \$2 !~ "^$"){
2472			split(\$2,arr," ");
2473			print arr[1];
2474		}
2475	}
2476
2477	/^AVAILABLE DISK SELECTIONS:/{
2478		searchdisks=1;
2479	}
2480EOF
2481#---------------------
2482
2483	chmod 755 /tmp/find_disks.awk
2484	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2485	rm /tmp/find_disks.awk
2486
2487	unused=""
2488	for disk in $disks; do
2489	# Check for mounted
2490		grep "${disk}[sp]" /etc/mnttab >/dev/null
2491		(($? == 0)) && continue
2492	# Check for swap
2493		grep "${disk}[sp]" $sfi >/dev/null
2494		(($? == 0)) && continue
2495	# check for dump device
2496		grep "${disk}[sp]" $dmpi >/dev/null
2497		(($? == 0)) && continue
2498	# check to see if this disk hasn't been explicitly excluded
2499	# by a user-set environment variable
2500		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2501		(($? == 0)) && continue
2502		unused_candidates="$unused_candidates $disk"
2503	done
2504	rm $sfi
2505	rm $dmpi
2506
2507# now just check to see if those disks do actually exist
2508# by looking for a device pointing to the first slice in
2509# each case. limit the number to max_finddisksnum
2510	count=0
2511	for disk in $unused_candidates; do
2512		if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2513		    [ $count -lt $max_finddisksnum ]; then
2514			unused="$unused $disk"
2515			# do not impose limit if $@ is provided
2516			[[ -z $@ ]] && ((count = count + 1))
2517		fi
2518	done
2519
2520# finally, return our disk list
2521	echo $unused
2522}
2523
2524function add_user_freebsd #<group_name> <user_name> <basedir>
2525{
2526	typeset group=$1
2527	typeset user=$2
2528	typeset basedir=$3
2529
2530	# Check to see if the user exists.
2531	if id $user > /dev/null 2>&1; then
2532		return 0
2533	fi
2534
2535	# Assign 1000 as the base uid
2536	typeset -i uid=1000
2537	while true; do
2538		typeset -i ret
2539		pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2540		ret=$?
2541		case $ret in
2542			0) break ;;
2543			# The uid is not unique
2544			65) ((uid += 1)) ;;
2545			*) return 1 ;;
2546		esac
2547		if [[ $uid == 65000 ]]; then
2548			log_fail "No user id available under 65000 for $user"
2549		fi
2550	done
2551
2552	# Silence MOTD
2553	touch $basedir/$user/.hushlogin
2554
2555	return 0
2556}
2557
2558#
2559# Delete the specified user.
2560#
2561# $1 login name
2562#
2563function del_user_freebsd #<logname>
2564{
2565	typeset user=$1
2566
2567	if id $user > /dev/null 2>&1; then
2568		log_must pw userdel $user
2569	fi
2570
2571	return 0
2572}
2573
2574#
2575# Select valid gid and create specified group.
2576#
2577# $1 group name
2578#
2579function add_group_freebsd #<group_name>
2580{
2581	typeset group=$1
2582
2583	# See if the group already exists.
2584	if pw groupshow $group >/dev/null 2>&1; then
2585		return 0
2586	fi
2587
2588	# Assign 1000 as the base gid
2589	typeset -i gid=1000
2590	while true; do
2591		pw groupadd -g $gid -n $group > /dev/null 2>&1
2592		typeset -i ret=$?
2593		case $ret in
2594			0) return 0 ;;
2595			# The gid is not  unique
2596			65) ((gid += 1)) ;;
2597			*) return 1 ;;
2598		esac
2599		if [[ $gid == 65000 ]]; then
2600			log_fail "No user id available under 65000 for $group"
2601		fi
2602	done
2603}
2604
2605#
2606# Delete the specified group.
2607#
2608# $1 group name
2609#
2610function del_group_freebsd #<group_name>
2611{
2612	typeset group=$1
2613
2614	pw groupdel -n $group > /dev/null 2>&1
2615	typeset -i ret=$?
2616	case $ret in
2617		# Group does not exist, or was deleted successfully.
2618		0|6|65) return 0 ;;
2619		# Name already exists as a group name
2620		9) log_must pw groupdel $group ;;
2621		*) return 1 ;;
2622	esac
2623
2624	return 0
2625}
2626
2627function add_user_illumos #<group_name> <user_name> <basedir>
2628{
2629	typeset group=$1
2630	typeset user=$2
2631	typeset basedir=$3
2632
2633	log_must useradd -g $group -d $basedir/$user -m $user
2634
2635	return 0
2636}
2637
2638function del_user_illumos #<user_name>
2639{
2640	typeset user=$1
2641
2642	if id $user > /dev/null 2>&1; then
2643		log_must_retry "currently used" 6 userdel $user
2644	fi
2645
2646	return 0
2647}
2648
2649function add_group_illumos #<group_name>
2650{
2651	typeset group=$1
2652
2653	typeset -i gid=100
2654	while true; do
2655		groupadd -g $gid $group > /dev/null 2>&1
2656		typeset -i ret=$?
2657		case $ret in
2658			0) return 0 ;;
2659			# The gid is not  unique
2660			4) ((gid += 1)) ;;
2661			*) return 1 ;;
2662		esac
2663	done
2664}
2665
2666function del_group_illumos #<group_name>
2667{
2668	typeset group=$1
2669
2670	groupmod -n $grp $grp > /dev/null 2>&1
2671	typeset -i ret=$?
2672	case $ret in
2673		# Group does not exist.
2674		6) return 0 ;;
2675		# Name already exists as a group name
2676		9) log_must groupdel $grp ;;
2677		*) return 1 ;;
2678	esac
2679}
2680
2681function add_user_linux #<group_name> <user_name> <basedir>
2682{
2683	typeset group=$1
2684	typeset user=$2
2685	typeset basedir=$3
2686
2687	log_must useradd -g $group -d $basedir/$user -m $user
2688
2689	# Add new users to the same group and the command line utils.
2690	# This allows them to be run out of the original users home
2691	# directory as long as it permissioned to be group readable.
2692	cmd_group=$(stat --format="%G" $(which zfs))
2693	log_must usermod -a -G $cmd_group $user
2694
2695	return 0
2696}
2697
2698function del_user_linux #<user_name>
2699{
2700	typeset user=$1
2701
2702	if id $user > /dev/null 2>&1; then
2703		log_must_retry "currently used" 6 userdel $user
2704	fi
2705
2706	return 0
2707}
2708
2709function add_group_linux #<group_name>
2710{
2711	typeset group=$1
2712
2713	# Assign 100 as the base gid, a larger value is selected for
2714	# Linux because for many distributions 1000 and under are reserved.
2715	while true; do
2716		groupadd $group > /dev/null 2>&1
2717		typeset -i ret=$?
2718		case $ret in
2719			0) return 0 ;;
2720			*) return 1 ;;
2721		esac
2722	done
2723}
2724
2725function del_group_linux #<group_name>
2726{
2727	typeset group=$1
2728
2729	getent group $group > /dev/null 2>&1
2730	typeset -i ret=$?
2731	case $ret in
2732		# Group does not exist.
2733		2) return 0 ;;
2734		# Name already exists as a group name
2735		0) log_must groupdel $group ;;
2736		*) return 1 ;;
2737	esac
2738
2739	return 0
2740}
2741
2742#
2743# Add specified user to specified group
2744#
2745# $1 group name
2746# $2 user name
2747# $3 base of the homedir (optional)
2748#
2749function add_user #<group_name> <user_name> <basedir>
2750{
2751	typeset group=$1
2752	typeset user=$2
2753	typeset basedir=${3:-"/var/tmp"}
2754
2755	if ((${#group} == 0 || ${#user} == 0)); then
2756		log_fail "group name or user name are not defined."
2757	fi
2758
2759	case $(uname) in
2760	FreeBSD)
2761		add_user_freebsd "$group" "$user" "$basedir"
2762		;;
2763	Linux)
2764		add_user_linux "$group" "$user" "$basedir"
2765		;;
2766	*)
2767		add_user_illumos "$group" "$user" "$basedir"
2768		;;
2769	esac
2770
2771	return 0
2772}
2773
2774#
2775# Delete the specified user.
2776#
2777# $1 login name
2778# $2 base of the homedir (optional)
2779#
2780function del_user #<logname> <basedir>
2781{
2782	typeset user=$1
2783	typeset basedir=${2:-"/var/tmp"}
2784
2785	if ((${#user} == 0)); then
2786		log_fail "login name is necessary."
2787	fi
2788
2789	case $(uname) in
2790	FreeBSD)
2791		del_user_freebsd "$user"
2792		;;
2793	Linux)
2794		del_user_linux "$user"
2795		;;
2796	*)
2797		del_user_illumos "$user"
2798		;;
2799	esac
2800
2801	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2802
2803	return 0
2804}
2805
2806#
2807# Select valid gid and create specified group.
2808#
2809# $1 group name
2810#
2811function add_group #<group_name>
2812{
2813	typeset group=$1
2814
2815	if ((${#group} == 0)); then
2816		log_fail "group name is necessary."
2817	fi
2818
2819	case $(uname) in
2820	FreeBSD)
2821		add_group_freebsd "$group"
2822		;;
2823	Linux)
2824		add_group_linux "$group"
2825		;;
2826	*)
2827		add_group_illumos "$group"
2828		;;
2829	esac
2830
2831	return 0
2832}
2833
2834#
2835# Delete the specified group.
2836#
2837# $1 group name
2838#
2839function del_group #<group_name>
2840{
2841	typeset group=$1
2842
2843	if ((${#group} == 0)); then
2844		log_fail "group name is necessary."
2845	fi
2846
2847	case $(uname) in
2848	FreeBSD)
2849		del_group_freebsd "$group"
2850		;;
2851	Linux)
2852		del_group_linux "$group"
2853		;;
2854	*)
2855		del_group_illumos "$group"
2856		;;
2857	esac
2858
2859	return 0
2860}
2861
2862#
2863# This function will return true if it's safe to destroy the pool passed
2864# as argument 1. It checks for pools based on zvols and files, and also
2865# files contained in a pool that may have a different mountpoint.
2866#
2867function safe_to_destroy_pool { # $1 the pool name
2868
2869	typeset pool=""
2870	typeset DONT_DESTROY=""
2871
2872	# We check that by deleting the $1 pool, we're not
2873	# going to pull the rug out from other pools. Do this
2874	# by looking at all other pools, ensuring that they
2875	# aren't built from files or zvols contained in this pool.
2876
2877	for pool in $(zpool list -H -o name)
2878	do
2879		ALTMOUNTPOOL=""
2880
2881		# this is a list of the top-level directories in each of the
2882		# files that make up the path to the files the pool is based on
2883		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2884			awk '{print $1}')
2885
2886		# this is a list of the zvols that make up the pool
2887		ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2888		    | awk '{print $1}')
2889
2890		# also want to determine if it's a file-based pool using an
2891		# alternate mountpoint...
2892		POOL_FILE_DIRS=$(zpool status -v $pool | \
2893					grep / | awk '{print $1}' | \
2894					awk -F/ '{print $2}' | grep -v "dev")
2895
2896		for pooldir in $POOL_FILE_DIRS
2897		do
2898			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2899					grep "${pooldir}$" | awk '{print $1}')
2900
2901			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2902		done
2903
2904
2905		if [ ! -z "$ZVOLPOOL" ]
2906		then
2907			DONT_DESTROY="true"
2908			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2909		fi
2910
2911		if [ ! -z "$FILEPOOL" ]
2912		then
2913			DONT_DESTROY="true"
2914			log_note "Pool $pool is built from $FILEPOOL on $1"
2915		fi
2916
2917		if [ ! -z "$ALTMOUNTPOOL" ]
2918		then
2919			DONT_DESTROY="true"
2920			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2921		fi
2922	done
2923
2924	if [ -z "${DONT_DESTROY}" ]
2925	then
2926		return 0
2927	else
2928		log_note "Warning: it is not safe to destroy $1!"
2929		return 1
2930	fi
2931}
2932
2933#
2934# Verify zfs operation with -p option work as expected
2935# $1 operation, value could be create, clone or rename
2936# $2 dataset type, value could be fs or vol
2937# $3 dataset name
2938# $4 new dataset name
2939#
2940function verify_opt_p_ops
2941{
2942	typeset ops=$1
2943	typeset datatype=$2
2944	typeset dataset=$3
2945	typeset newdataset=$4
2946
2947	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2948		log_fail "$datatype is not supported."
2949	fi
2950
2951	# check parameters accordingly
2952	case $ops in
2953		create)
2954			newdataset=$dataset
2955			dataset=""
2956			if [[ $datatype == "vol" ]]; then
2957				ops="create -V $VOLSIZE"
2958			fi
2959			;;
2960		clone)
2961			if [[ -z $newdataset ]]; then
2962				log_fail "newdataset should not be empty" \
2963					"when ops is $ops."
2964			fi
2965			log_must datasetexists $dataset
2966			log_must snapexists $dataset
2967			;;
2968		rename)
2969			if [[ -z $newdataset ]]; then
2970				log_fail "newdataset should not be empty" \
2971					"when ops is $ops."
2972			fi
2973			log_must datasetexists $dataset
2974			;;
2975		*)
2976			log_fail "$ops is not supported."
2977			;;
2978	esac
2979
2980	# make sure the upper level filesystem does not exist
2981	destroy_dataset "${newdataset%/*}" "-rRf"
2982
2983	# without -p option, operation will fail
2984	log_mustnot zfs $ops $dataset $newdataset
2985	log_mustnot datasetexists $newdataset ${newdataset%/*}
2986
2987	# with -p option, operation should succeed
2988	log_must zfs $ops -p $dataset $newdataset
2989	block_device_wait
2990
2991	if ! datasetexists $newdataset ; then
2992		log_fail "-p option does not work for $ops"
2993	fi
2994
2995	# when $ops is create or clone, redo the operation still return zero
2996	if [[ $ops != "rename" ]]; then
2997		log_must zfs $ops -p $dataset $newdataset
2998	fi
2999
3000	return 0
3001}
3002
3003#
3004# Get configuration of pool
3005# $1 pool name
3006# $2 config name
3007#
3008function get_config
3009{
3010	typeset pool=$1
3011	typeset config=$2
3012	typeset alt_root
3013
3014	if ! poolexists "$pool" ; then
3015		return 1
3016	fi
3017	alt_root=$(zpool list -H $pool | awk '{print $NF}')
3018	if [[ $alt_root == "-" ]]; then
3019		value=$(zdb -C $pool | grep "$config:" | awk -F: \
3020		    '{print $2}')
3021	else
3022		value=$(zdb -e $pool | grep "$config:" | awk -F: \
3023		    '{print $2}')
3024	fi
3025	if [[ -n $value ]] ; then
3026		value=${value#'}
3027		value=${value%'}
3028	fi
3029	echo $value
3030
3031	return 0
3032}
3033
3034#
3035# Privated function. Random select one of items from arguments.
3036#
3037# $1 count
3038# $2-n string
3039#
3040function _random_get
3041{
3042	typeset cnt=$1
3043	shift
3044
3045	typeset str="$@"
3046	typeset -i ind
3047	((ind = RANDOM % cnt + 1))
3048
3049	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
3050	echo $ret
3051}
3052
3053#
3054# Random select one of item from arguments which include NONE string
3055#
3056function random_get_with_non
3057{
3058	typeset -i cnt=$#
3059	((cnt =+ 1))
3060
3061	_random_get "$cnt" "$@"
3062}
3063
3064#
3065# Random select one of item from arguments which doesn't include NONE string
3066#
3067function random_get
3068{
3069	_random_get "$#" "$@"
3070}
3071
3072#
3073# The function will generate a dataset name with specific length
3074# $1, the length of the name
3075# $2, the base string to construct the name
3076#
3077function gen_dataset_name
3078{
3079	typeset -i len=$1
3080	typeset basestr="$2"
3081	typeset -i baselen=${#basestr}
3082	typeset -i iter=0
3083	typeset l_name=""
3084
3085	if ((len % baselen == 0)); then
3086		((iter = len / baselen))
3087	else
3088		((iter = len / baselen + 1))
3089	fi
3090	while ((iter > 0)); do
3091		l_name="${l_name}$basestr"
3092
3093		((iter -= 1))
3094	done
3095
3096	echo $l_name
3097}
3098
3099#
3100# Get cksum tuple of dataset
3101# $1 dataset name
3102#
3103# sample zdb output:
3104# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
3105# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
3106# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
3107# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
3108function datasetcksum
3109{
3110	typeset cksum
3111	sync
3112	sync_all_pools
3113	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
3114		| awk -F= '{print $7}')
3115	echo $cksum
3116}
3117
3118#
3119# Get cksum of file
3120# #1 file path
3121#
3122function checksum
3123{
3124	typeset cksum
3125	cksum=$(cksum $1 | awk '{print $1}')
3126	echo $cksum
3127}
3128
3129#
3130# Get the given disk/slice state from the specific field of the pool
3131#
3132function get_device_state #pool disk field("", "spares","logs")
3133{
3134	typeset pool=$1
3135	typeset disk=${2#$DEV_DSKDIR/}
3136	typeset field=${3:-$pool}
3137
3138	state=$(zpool status -v "$pool" 2>/dev/null | \
3139		nawk -v device=$disk -v pool=$pool -v field=$field \
3140		'BEGIN {startconfig=0; startfield=0; }
3141		/config:/ {startconfig=1}
3142		(startconfig==1) && ($1==field) {startfield=1; next;}
3143		(startfield==1) && ($1==device) {print $2; exit;}
3144		(startfield==1) &&
3145		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
3146	echo $state
3147}
3148
3149
3150#
3151# print the given directory filesystem type
3152#
3153# $1 directory name
3154#
3155function get_fstype
3156{
3157	typeset dir=$1
3158
3159	if [[ -z $dir ]]; then
3160		log_fail "Usage: get_fstype <directory>"
3161	fi
3162
3163	#
3164	#  $ df -n /
3165	#  /		  : ufs
3166	#
3167	df -n $dir | awk '{print $3}'
3168}
3169
3170#
3171# Given a disk, label it to VTOC regardless what label was on the disk
3172# $1 disk
3173#
3174function labelvtoc
3175{
3176	typeset disk=$1
3177	if [[ -z $disk ]]; then
3178		log_fail "The disk name is unspecified."
3179	fi
3180	typeset label_file=/var/tmp/labelvtoc.$$
3181	typeset arch=$(uname -p)
3182
3183	if is_linux || is_freebsd; then
3184		log_note "Currently unsupported by the test framework"
3185		return 1
3186	fi
3187
3188	if [[ $arch == "i386" ]]; then
3189		echo "label" > $label_file
3190		echo "0" >> $label_file
3191		echo "" >> $label_file
3192		echo "q" >> $label_file
3193		echo "q" >> $label_file
3194
3195		fdisk -B $disk >/dev/null 2>&1
3196		# wait a while for fdisk finishes
3197		sleep 60
3198	elif [[ $arch == "sparc" ]]; then
3199		echo "label" > $label_file
3200		echo "0" >> $label_file
3201		echo "" >> $label_file
3202		echo "" >> $label_file
3203		echo "" >> $label_file
3204		echo "q" >> $label_file
3205	else
3206		log_fail "unknown arch type"
3207	fi
3208
3209	format -e -s -d $disk -f $label_file
3210	typeset -i ret_val=$?
3211	rm -f $label_file
3212	#
3213	# wait the format to finish
3214	#
3215	sleep 60
3216	if ((ret_val != 0)); then
3217		log_fail "unable to label $disk as VTOC."
3218	fi
3219
3220	return 0
3221}
3222
3223#
3224# check if the system was installed as zfsroot or not
3225# return: 0 if zfsroot, non-zero if not
3226#
3227function is_zfsroot
3228{
3229	df -n / | grep zfs > /dev/null 2>&1
3230	return $?
3231}
3232
3233#
3234# get the root filesystem name if it's zfsroot system.
3235#
3236# return: root filesystem name
3237function get_rootfs
3238{
3239	typeset rootfs=""
3240
3241	if is_freebsd; then
3242		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3243	elif ! is_linux; then
3244		rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
3245			/etc/mnttab)
3246	fi
3247	if [[ -z "$rootfs" ]]; then
3248		log_fail "Can not get rootfs"
3249	fi
3250	zfs list $rootfs > /dev/null 2>&1
3251	if (($? == 0)); then
3252		echo $rootfs
3253	else
3254		log_fail "This is not a zfsroot system."
3255	fi
3256}
3257
3258#
3259# get the rootfs's pool name
3260# return:
3261#       rootpool name
3262#
3263function get_rootpool
3264{
3265	typeset rootfs=""
3266	typeset rootpool=""
3267
3268	if is_freebsd; then
3269		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3270	elif ! is_linux; then
3271		rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
3272			 /etc/mnttab)
3273	fi
3274	if [[ -z "$rootfs" ]]; then
3275		log_fail "Can not get rootpool"
3276	fi
3277	zfs list $rootfs > /dev/null 2>&1
3278	if (($? == 0)); then
3279		echo ${rootfs%%/*}
3280	else
3281		log_fail "This is not a zfsroot system."
3282	fi
3283}
3284
3285#
3286# Get the word numbers from a string separated by white space
3287#
3288function get_word_count
3289{
3290	echo $1 | wc -w
3291}
3292
3293#
3294# To verify if the require numbers of disks is given
3295#
3296function verify_disk_count
3297{
3298	typeset -i min=${2:-1}
3299
3300	typeset -i count=$(get_word_count "$1")
3301
3302	if ((count < min)); then
3303		log_untested "A minimum of $min disks is required to run." \
3304			" You specified $count disk(s)"
3305	fi
3306}
3307
3308function ds_is_volume
3309{
3310	typeset type=$(get_prop type $1)
3311	[[ $type = "volume" ]] && return 0
3312	return 1
3313}
3314
3315function ds_is_filesystem
3316{
3317	typeset type=$(get_prop type $1)
3318	[[ $type = "filesystem" ]] && return 0
3319	return 1
3320}
3321
3322function ds_is_snapshot
3323{
3324	typeset type=$(get_prop type $1)
3325	[[ $type = "snapshot" ]] && return 0
3326	return 1
3327}
3328
3329#
3330# Check if Trusted Extensions are installed and enabled
3331#
3332function is_te_enabled
3333{
3334	svcs -H -o state labeld 2>/dev/null | grep "enabled"
3335	if (($? != 0)); then
3336		return 1
3337	else
3338		return 0
3339	fi
3340}
3341
3342# Utility function to determine if a system has multiple cpus.
3343function is_mp
3344{
3345	if is_linux; then
3346		(($(nproc) > 1))
3347	elif is_freebsd; then
3348		sysctl -n kern.smp.cpus
3349	else
3350		(($(psrinfo | wc -l) > 1))
3351	fi
3352
3353	return $?
3354}
3355
3356function get_cpu_freq
3357{
3358	if is_linux; then
3359		lscpu | awk '/CPU MHz/ { print $3 }'
3360	elif is_freebsd; then
3361		sysctl -n hw.clockrate
3362	else
3363		psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3364	fi
3365}
3366
3367# Run the given command as the user provided.
3368function user_run
3369{
3370	typeset user=$1
3371	shift
3372
3373	log_note "user: $user"
3374	log_note "cmd: $*"
3375
3376	typeset out=$TEST_BASE_DIR/out
3377	typeset err=$TEST_BASE_DIR/err
3378
3379	sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
3380	typeset res=$?
3381	log_note "out: $(<$out)"
3382	log_note "err: $(<$err)"
3383	return $res
3384}
3385
3386#
3387# Check if the pool contains the specified vdevs
3388#
3389# $1 pool
3390# $2..n <vdev> ...
3391#
3392# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3393# vdevs is not in the pool, and 2 if pool name is missing.
3394#
3395function vdevs_in_pool
3396{
3397	typeset pool=$1
3398	typeset vdev
3399
3400	if [[ -z $pool ]]; then
3401		log_note "Missing pool name."
3402		return 2
3403	fi
3404
3405	shift
3406
3407	# We could use 'zpool list' to only get the vdevs of the pool but we
3408	# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3409	# therefore we use the 'zpool status' output.
3410	typeset tmpfile=$(mktemp)
3411	zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3412	for vdev in $@; do
3413		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3414		[[ $? -ne 0 ]] && return 1
3415	done
3416
3417	rm -f $tmpfile
3418
3419	return 0;
3420}
3421
3422function get_max
3423{
3424	typeset -l i max=$1
3425	shift
3426
3427	for i in "$@"; do
3428		max=$((max > i ? max : i))
3429	done
3430
3431	echo $max
3432}
3433
3434function get_min
3435{
3436	typeset -l i min=$1
3437	shift
3438
3439	for i in "$@"; do
3440		min=$((min < i ? min : i))
3441	done
3442
3443	echo $min
3444}
3445
3446# Write data that can be compressed into a directory
3447function write_compressible
3448{
3449	typeset dir=$1
3450	typeset megs=$2
3451	typeset nfiles=${3:-1}
3452	typeset bs=${4:-1024k}
3453	typeset fname=${5:-file}
3454
3455	[[ -d $dir ]] || log_fail "No directory: $dir"
3456
3457	# Under Linux fio is not currently used since its behavior can
3458	# differ significantly across versions.  This includes missing
3459	# command line options and cases where the --buffer_compress_*
3460	# options fail to behave as expected.
3461	if is_linux; then
3462		typeset file_bytes=$(to_bytes $megs)
3463		typeset bs_bytes=4096
3464		typeset blocks=$(($file_bytes / $bs_bytes))
3465
3466		for (( i = 0; i < $nfiles; i++ )); do
3467			truncate -s $file_bytes $dir/$fname.$i
3468
3469			# Write every third block to get 66% compression.
3470			for (( j = 0; j < $blocks; j += 3 )); do
3471				dd if=/dev/urandom of=$dir/$fname.$i \
3472				    seek=$j bs=$bs_bytes count=1 \
3473				    conv=notrunc >/dev/null 2>&1
3474			done
3475		done
3476	else
3477		log_must eval "fio \
3478		    --name=job \
3479		    --fallocate=0 \
3480		    --minimal \
3481		    --randrepeat=0 \
3482		    --buffer_compress_percentage=66 \
3483		    --buffer_compress_chunk=4096 \
3484		    --directory=$dir \
3485		    --numjobs=$nfiles \
3486		    --nrfiles=$nfiles \
3487		    --rw=write \
3488		    --bs=$bs \
3489		    --filesize=$megs \
3490		    --filename_format='$fname.\$jobnum' >/dev/null"
3491	fi
3492}
3493
3494function get_objnum
3495{
3496	typeset pathname=$1
3497	typeset objnum
3498
3499	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3500	if is_freebsd; then
3501		objnum=$(stat -f "%i" $pathname)
3502	else
3503		objnum=$(stat -c %i $pathname)
3504	fi
3505	echo $objnum
3506}
3507
3508#
3509# Sync data to the pool
3510#
3511# $1 pool name
3512# $2 boolean to force uberblock (and config including zpool cache file) update
3513#
3514function sync_pool #pool <force>
3515{
3516	typeset pool=${1:-$TESTPOOL}
3517	typeset force=${2:-false}
3518
3519	if [[ $force == true ]]; then
3520		log_must zpool sync -f $pool
3521	else
3522		log_must zpool sync $pool
3523	fi
3524
3525	return 0
3526}
3527
3528#
3529# Sync all pools
3530#
3531# $1 boolean to force uberblock (and config including zpool cache file) update
3532#
3533function sync_all_pools #<force>
3534{
3535	typeset force=${1:-false}
3536
3537	if [[ $force == true ]]; then
3538		log_must zpool sync -f
3539	else
3540		log_must zpool sync
3541	fi
3542
3543	return 0
3544}
3545
3546#
3547# Wait for zpool 'freeing' property drops to zero.
3548#
3549# $1 pool name
3550#
3551function wait_freeing #pool
3552{
3553	typeset pool=${1:-$TESTPOOL}
3554	while true; do
3555		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3556		log_must sleep 1
3557	done
3558}
3559
3560#
3561# Wait for every device replace operation to complete
3562#
3563# $1 pool name
3564#
3565function wait_replacing #pool
3566{
3567	typeset pool=${1:-$TESTPOOL}
3568	while true; do
3569		[[ "" == "$(zpool status $pool |
3570		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3571		log_must sleep 1
3572	done
3573}
3574
3575# Wait for a pool to be scrubbed
3576#
3577# $1 pool name
3578# $2 timeout
3579#
3580function wait_scrubbed #pool timeout
3581{
3582       typeset timeout=${2:-300}
3583       typeset pool=${1:-$TESTPOOL}
3584       for (( timer = 0; timer < $timeout; timer++ )); do
3585               is_pool_scrubbed $pool && break;
3586               sleep 1;
3587       done
3588}
3589
3590# Backup the zed.rc in our test directory so that we can edit it for our test.
3591#
3592# Returns: Backup file name.  You will need to pass this to zed_rc_restore().
3593function zed_rc_backup
3594{
3595	zedrc_backup="$(mktemp)"
3596	cp $ZEDLET_DIR/zed.rc $zedrc_backup
3597	echo $zedrc_backup
3598}
3599
3600function zed_rc_restore
3601{
3602	mv $1 $ZEDLET_DIR/zed.rc
3603}
3604
3605#
3606# Setup custom environment for the ZED.
3607#
3608# $@ Optional list of zedlets to run under zed.
3609function zed_setup
3610{
3611	if ! is_linux; then
3612		log_unsupported "No zed on $(uname)"
3613	fi
3614
3615	if [[ ! -d $ZEDLET_DIR ]]; then
3616		log_must mkdir $ZEDLET_DIR
3617	fi
3618
3619	if [[ ! -e $VDEVID_CONF ]]; then
3620		log_must touch $VDEVID_CONF
3621	fi
3622
3623	if [[ -e $VDEVID_CONF_ETC ]]; then
3624		log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3625	fi
3626	EXTRA_ZEDLETS=$@
3627
3628	# Create a symlink for /etc/zfs/vdev_id.conf file.
3629	log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3630
3631	# Setup minimal ZED configuration.  Individual test cases should
3632	# add additional ZEDLETs as needed for their specific test.
3633	log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3634	log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3635
3636	# Scripts must only be user writable.
3637	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3638		saved_umask=$(umask)
3639		log_must umask 0022
3640		for i in $EXTRA_ZEDLETS ; do
3641			log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3642		done
3643		log_must umask $saved_umask
3644	fi
3645
3646	# Customize the zed.rc file to enable the full debug log.
3647	log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3648	echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3649
3650}
3651
3652#
3653# Cleanup custom ZED environment.
3654#
3655# $@ Optional list of zedlets to remove from our test zed.d directory.
3656function zed_cleanup
3657{
3658	if ! is_linux; then
3659		return
3660	fi
3661	EXTRA_ZEDLETS=$@
3662
3663	log_must rm -f ${ZEDLET_DIR}/zed.rc
3664	log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3665	log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3666	log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3667	log_must rm -f ${ZEDLET_DIR}/state
3668
3669	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3670		for i in $EXTRA_ZEDLETS ; do
3671			log_must rm -f ${ZEDLET_DIR}/$i
3672		done
3673	fi
3674	log_must rm -f $ZED_LOG
3675	log_must rm -f $ZED_DEBUG_LOG
3676	log_must rm -f $VDEVID_CONF_ETC
3677	log_must rm -f $VDEVID_CONF
3678	rmdir $ZEDLET_DIR
3679}
3680
3681#
3682# Check if ZED is currently running; if so, returns PIDs
3683#
3684function zed_check
3685{
3686	if ! is_linux; then
3687		return
3688	fi
3689	zedpids="$(pgrep -x zed)"
3690#	ret1=$?
3691	zedpids2="$(pgrep -x lt-zed)"
3692#	ret2=$?
3693	echo ${zedpids} ${zedpids2}
3694}
3695
3696#
3697# Check if ZED is currently running, if not start ZED.
3698#
3699function zed_start
3700{
3701	if ! is_linux; then
3702		return
3703	fi
3704
3705	# ZEDLET_DIR=/var/tmp/zed
3706	if [[ ! -d $ZEDLET_DIR ]]; then
3707		log_must mkdir $ZEDLET_DIR
3708	fi
3709
3710	# Verify the ZED is not already running.
3711	zedpids=$(zed_check)
3712	if [ -n "$zedpids" ]; then
3713		# We never, ever, really want it to just keep going if zed
3714		# is already running - usually this implies our test cases
3715		# will break very strangely because whatever we wanted to
3716		# configure zed for won't be listening to our changes in the
3717		# tmpdir
3718		log_fail "ZED already running - ${zedpids}"
3719	else
3720		log_note "Starting ZED"
3721		# run ZED in the background and redirect foreground logging
3722		# output to $ZED_LOG.
3723		log_must truncate -s 0 $ZED_DEBUG_LOG
3724		log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3725		    "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3726	fi
3727
3728	return 0
3729}
3730
3731#
3732# Kill ZED process
3733#
3734function zed_stop
3735{
3736	if ! is_linux; then
3737		return ""
3738	fi
3739
3740	log_note "Stopping ZED"
3741	while true; do
3742		zedpids=$(zed_check)
3743		[ ! -n "$zedpids" ] && break
3744
3745		log_must kill $zedpids
3746		sleep 1
3747	done
3748	return 0
3749}
3750
3751#
3752# Drain all zevents
3753#
3754function zed_events_drain
3755{
3756	while [ $(zpool events -H | wc -l) -ne 0 ]; do
3757		sleep 1
3758		zpool events -c >/dev/null
3759	done
3760}
3761
3762# Set a variable in zed.rc to something, un-commenting it in the process.
3763#
3764# $1 variable
3765# $2 value
3766function zed_rc_set
3767{
3768	var="$1"
3769	val="$2"
3770	# Remove the line
3771	cmd="'/$var/d'"
3772	eval sed -i $cmd $ZEDLET_DIR/zed.rc
3773
3774	# Add it at the end
3775	echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3776}
3777
3778
3779#
3780# Check is provided device is being active used as a swap device.
3781#
3782function is_swap_inuse
3783{
3784	typeset device=$1
3785
3786	if [[ -z $device ]] ; then
3787		log_note "No device specified."
3788		return 1
3789	fi
3790
3791	if is_linux; then
3792		swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3793	elif is_freebsd; then
3794		swapctl -l | grep -w $device
3795	else
3796		swap -l | grep -w $device > /dev/null 2>&1
3797	fi
3798
3799	return $?
3800}
3801
3802#
3803# Setup a swap device using the provided device.
3804#
3805function swap_setup
3806{
3807	typeset swapdev=$1
3808
3809	if is_linux; then
3810		log_must eval "mkswap $swapdev > /dev/null 2>&1"
3811		log_must swapon $swapdev
3812	elif is_freebsd; then
3813		log_must swapctl -a $swapdev
3814	else
3815	        log_must swap -a $swapdev
3816	fi
3817
3818	return 0
3819}
3820
3821#
3822# Cleanup a swap device on the provided device.
3823#
3824function swap_cleanup
3825{
3826	typeset swapdev=$1
3827
3828	if is_swap_inuse $swapdev; then
3829		if is_linux; then
3830			log_must swapoff $swapdev
3831		elif is_freebsd; then
3832			log_must swapoff $swapdev
3833		else
3834			log_must swap -d $swapdev
3835		fi
3836	fi
3837
3838	return 0
3839}
3840
3841#
3842# Set a global system tunable (64-bit value)
3843#
3844# $1 tunable name (use a NAME defined in tunables.cfg)
3845# $2 tunable values
3846#
3847function set_tunable64
3848{
3849	set_tunable_impl "$1" "$2" Z
3850}
3851
3852#
3853# Set a global system tunable (32-bit value)
3854#
3855# $1 tunable name (use a NAME defined in tunables.cfg)
3856# $2 tunable values
3857#
3858function set_tunable32
3859{
3860	set_tunable_impl "$1" "$2" W
3861}
3862
3863function set_tunable_impl
3864{
3865	typeset name="$1"
3866	typeset value="$2"
3867	typeset mdb_cmd="$3"
3868	typeset module="${4:-zfs}"
3869
3870	eval "typeset tunable=\$$name"
3871	case "$tunable" in
3872	UNSUPPORTED)
3873		log_unsupported "Tunable '$name' is unsupported on $(uname)"
3874		;;
3875	"")
3876		log_fail "Tunable '$name' must be added to tunables.cfg"
3877		;;
3878	*)
3879		;;
3880	esac
3881
3882	[[ -z "$value" ]] && return 1
3883	[[ -z "$mdb_cmd" ]] && return 1
3884
3885	case "$(uname)" in
3886	Linux)
3887		typeset zfs_tunables="/sys/module/$module/parameters"
3888		[[ -w "$zfs_tunables/$tunable" ]] || return 1
3889		cat >"$zfs_tunables/$tunable" <<<"$value"
3890		return $?
3891		;;
3892	FreeBSD)
3893		sysctl vfs.zfs.$tunable=$value
3894		return "$?"
3895		;;
3896	SunOS)
3897		[[ "$module" -eq "zfs" ]] || return 1
3898		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3899		return $?
3900		;;
3901	esac
3902}
3903
3904#
3905# Get a global system tunable
3906#
3907# $1 tunable name (use a NAME defined in tunables.cfg)
3908#
3909function get_tunable
3910{
3911	get_tunable_impl "$1"
3912}
3913
3914function get_tunable_impl
3915{
3916	typeset name="$1"
3917	typeset module="${2:-zfs}"
3918
3919	eval "typeset tunable=\$$name"
3920	case "$tunable" in
3921	UNSUPPORTED)
3922		log_unsupported "Tunable '$name' is unsupported on $(uname)"
3923		;;
3924	"")
3925		log_fail "Tunable '$name' must be added to tunables.cfg"
3926		;;
3927	*)
3928		;;
3929	esac
3930
3931	case "$(uname)" in
3932	Linux)
3933		typeset zfs_tunables="/sys/module/$module/parameters"
3934		[[ -f "$zfs_tunables/$tunable" ]] || return 1
3935		cat $zfs_tunables/$tunable
3936		return $?
3937		;;
3938	FreeBSD)
3939		sysctl -n vfs.zfs.$tunable
3940		;;
3941	SunOS)
3942		[[ "$module" -eq "zfs" ]] || return 1
3943		;;
3944	esac
3945
3946	return 1
3947}
3948
3949#
3950# Prints the current time in seconds since UNIX Epoch.
3951#
3952function current_epoch
3953{
3954	printf '%(%s)T'
3955}
3956
3957#
3958# Get decimal value of global uint32_t variable using mdb.
3959#
3960function mdb_get_uint32
3961{
3962	typeset variable=$1
3963	typeset value
3964
3965	value=$(mdb -k -e "$variable/X | ::eval .=U")
3966	if [[ $? -ne 0 ]]; then
3967		log_fail "Failed to get value of '$variable' from mdb."
3968		return 1
3969	fi
3970
3971	echo $value
3972	return 0
3973}
3974
3975#
3976# Set global uint32_t variable to a decimal value using mdb.
3977#
3978function mdb_set_uint32
3979{
3980	typeset variable=$1
3981	typeset value=$2
3982
3983	mdb -kw -e "$variable/W 0t$value" > /dev/null
3984	if [[ $? -ne 0 ]]; then
3985		echo "Failed to set '$variable' to '$value' in mdb."
3986		return 1
3987	fi
3988
3989	return 0
3990}
3991
3992#
3993# Set global scalar integer variable to a hex value using mdb.
3994# Note: Target should have CTF data loaded.
3995#
3996function mdb_ctf_set_int
3997{
3998	typeset variable=$1
3999	typeset value=$2
4000
4001	mdb -kw -e "$variable/z $value" > /dev/null
4002	if [[ $? -ne 0 ]]; then
4003		echo "Failed to set '$variable' to '$value' in mdb."
4004		return 1
4005	fi
4006
4007	return 0
4008}
4009
4010#
4011# Compute MD5 digest for given file or stdin if no file given.
4012# Note: file path must not contain spaces
4013#
4014function md5digest
4015{
4016	typeset file=$1
4017
4018	case $(uname) in
4019	FreeBSD)
4020		md5 -q $file
4021		;;
4022	*)
4023		md5sum -b $file | awk '{ print $1 }'
4024		;;
4025	esac
4026}
4027
4028#
4029# Compute SHA256 digest for given file or stdin if no file given.
4030# Note: file path must not contain spaces
4031#
4032function sha256digest
4033{
4034	typeset file=$1
4035
4036	case $(uname) in
4037	FreeBSD)
4038		sha256 -q $file
4039		;;
4040	*)
4041		sha256sum -b $file | awk '{ print $1 }'
4042		;;
4043	esac
4044}
4045
4046function new_fs #<args>
4047{
4048	case $(uname) in
4049	FreeBSD)
4050		newfs "$@"
4051		;;
4052	*)
4053		echo y | newfs -v "$@"
4054		;;
4055	esac
4056}
4057
4058function stat_size #<path>
4059{
4060	typeset path=$1
4061
4062	case $(uname) in
4063	FreeBSD)
4064		stat -f %z "$path"
4065		;;
4066	*)
4067		stat -c %s "$path"
4068		;;
4069	esac
4070}
4071
4072function stat_ctime #<path>
4073{
4074	typeset path=$1
4075
4076	case $(uname) in
4077	FreeBSD)
4078		stat -f %c "$path"
4079		;;
4080	*)
4081		stat -c %Z "$path"
4082		;;
4083	esac
4084}
4085
4086function stat_crtime #<path>
4087{
4088	typeset path=$1
4089
4090	case $(uname) in
4091	FreeBSD)
4092		stat -f %B "$path"
4093		;;
4094	*)
4095		stat -c %W "$path"
4096		;;
4097	esac
4098}
4099
4100function stat_generation #<path>
4101{
4102	typeset path=$1
4103
4104	case $(uname) in
4105	Linux)
4106		getversion "${path}"
4107		;;
4108	*)
4109		stat -f %v "${path}"
4110		;;
4111	esac
4112}
4113
4114# Run a command as if it was being run in a TTY.
4115#
4116# Usage:
4117#
4118#    faketty command
4119#
4120function faketty
4121{
4122    if is_freebsd; then
4123        script -q /dev/null env "$@"
4124    else
4125        script --return --quiet -c "$*" /dev/null
4126    fi
4127}
4128
4129#
4130# Produce a random permutation of the integers in a given range (inclusive).
4131#
4132function range_shuffle # begin end
4133{
4134	typeset -i begin=$1
4135	typeset -i end=$2
4136
4137	seq ${begin} ${end} | sort -R
4138}
4139
4140#
4141# Cross-platform xattr helpers
4142#
4143
4144function get_xattr # name path
4145{
4146	typeset name=$1
4147	typeset path=$2
4148
4149	case $(uname) in
4150	FreeBSD)
4151		getextattr -qq user "${name}" "${path}"
4152		;;
4153	*)
4154		attr -qg "${name}" "${path}"
4155		;;
4156	esac
4157}
4158
4159function set_xattr # name value path
4160{
4161	typeset name=$1
4162	typeset value=$2
4163	typeset path=$3
4164
4165	case $(uname) in
4166	FreeBSD)
4167		setextattr user "${name}" "${value}" "${path}"
4168		;;
4169	*)
4170		attr -qs "${name}" -V "${value}" "${path}"
4171		;;
4172	esac
4173}
4174
4175function set_xattr_stdin # name value
4176{
4177	typeset name=$1
4178	typeset path=$2
4179
4180	case $(uname) in
4181	FreeBSD)
4182		setextattr -i user "${name}" "${path}"
4183		;;
4184	*)
4185		attr -qs "${name}" "${path}"
4186		;;
4187	esac
4188}
4189
4190function rm_xattr # name path
4191{
4192	typeset name=$1
4193	typeset path=$2
4194
4195	case $(uname) in
4196	FreeBSD)
4197		rmextattr -q user "${name}" "${path}"
4198		;;
4199	*)
4200		attr -qr "${name}" "${path}"
4201		;;
4202	esac
4203}
4204
4205function ls_xattr # path
4206{
4207	typeset path=$1
4208
4209	case $(uname) in
4210	FreeBSD)
4211		lsextattr -qq user "${path}"
4212		;;
4213	*)
4214		attr -ql "${path}"
4215		;;
4216	esac
4217}
4218
4219function kstat # stat flags?
4220{
4221	typeset stat=$1
4222	typeset flags=${2-"-n"}
4223
4224	case $(uname) in
4225	FreeBSD)
4226		sysctl $flags kstat.zfs.misc.$stat
4227		;;
4228	Linux)
4229		typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
4230		[[ -f "$zfs_kstat" ]] || return 1
4231		cat $zfs_kstat
4232		;;
4233	*)
4234		false
4235		;;
4236	esac
4237}
4238
4239function get_arcstat # stat
4240{
4241	typeset stat=$1
4242
4243	case $(uname) in
4244	FreeBSD)
4245		kstat arcstats.$stat
4246		;;
4247	Linux)
4248		kstat arcstats | awk "/$stat/ { print \$3 }"
4249		;;
4250	*)
4251		false
4252		;;
4253	esac
4254}
4255
4256function punch_hole # offset length file
4257{
4258	typeset offset=$1
4259	typeset length=$2
4260	typeset file=$3
4261
4262	case $(uname) in
4263	FreeBSD)
4264		truncate -d -o $offset -l $length "$file"
4265		;;
4266	Linux)
4267		fallocate --punch-hole --offset $offset --length $length "$file"
4268		;;
4269	*)
4270		false
4271		;;
4272	esac
4273}
4274
4275#
4276# Wait for the specified arcstat to reach non-zero quiescence.
4277# If echo is 1 echo the value after reaching quiescence, otherwise
4278# if echo is 0 print the arcstat we are waiting on.
4279#
4280function arcstat_quiescence # stat echo
4281{
4282	typeset stat=$1
4283	typeset echo=$2
4284	typeset do_once=true
4285
4286	if [[ $echo -eq 0 ]]; then
4287		echo "Waiting for arcstat $1 quiescence."
4288	fi
4289
4290	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
4291		typeset stat1=$(get_arcstat $stat)
4292		sleep 2
4293		typeset stat2=$(get_arcstat $stat)
4294		do_once=false
4295	done
4296
4297	if [[ $echo -eq 1 ]]; then
4298		echo $stat2
4299	fi
4300}
4301
4302function arcstat_quiescence_noecho # stat
4303{
4304	typeset stat=$1
4305	arcstat_quiescence $stat 0
4306}
4307
4308function arcstat_quiescence_echo # stat
4309{
4310	typeset stat=$1
4311	arcstat_quiescence $stat 1
4312}
4313
4314#
4315# Given an array of pids, wait until all processes
4316# have completed and check their return status.
4317#
4318function wait_for_children #children
4319{
4320	rv=0
4321	children=("$@")
4322	for child in "${children[@]}"
4323	do
4324		child_exit=0
4325		wait ${child} || child_exit=$?
4326		if [ $child_exit -ne 0 ]; then
4327			echo "child ${child} failed with ${child_exit}"
4328			rv=1
4329		fi
4330	done
4331	return $rv
4332}
4333
4334#
4335# Compare two directory trees recursively in a manner similar to diff(1), but
4336# using rsync. If there are any discrepancies, a summary of the differences are
4337# output and a non-zero error is returned.
4338#
4339# If you're comparing a directory after a ZIL replay, you should set
4340# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
4341# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
4342# information).
4343#
4344function directory_diff # dir_a dir_b
4345{
4346	dir_a="$1"
4347	dir_b="$2"
4348	zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
4349
4350	# If one of the directories doesn't exist, return 2. This is to match the
4351	# semantics of diff.
4352	if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
4353		return 2
4354	fi
4355
4356	# Run rsync with --dry-run --itemize-changes to get something akin to diff
4357	# output, but rsync is far more thorough in detecting differences (diff
4358	# doesn't compare file metadata, and cannot handle special files).
4359	#
4360	# Also make sure to filter out non-user.* xattrs when comparing. On
4361	# SELinux-enabled systems the copied tree will probably have different
4362	# SELinux labels.
4363	args=("-nicaAHX" '--filter=-x! user.*' "--delete")
4364
4365	# NOTE: Quite a few rsync builds do not support --crtimes which would be
4366	# necessary to verify that creation times are being maintained properly.
4367	# Unfortunately because of this we cannot use it unconditionally but we can
4368	# check if this rsync build supports it and use it then. This check is
4369	# based on the same check in the rsync test suite (testsuite/crtimes.test).
4370	#
4371	# We check ctimes even with zil_replay=1 because the ZIL does store
4372	# creation times and we should make sure they match (if the creation times
4373	# do not match there is a "c" entry in one of the columns).
4374	if ( rsync --version | grep -q "[, ] crtimes" >/dev/null ); then
4375		args+=("--crtimes")
4376	else
4377		echo "NOTE: This rsync package does not support --crtimes (-N)."
4378	fi
4379
4380	# If we are testing a ZIL replay, we need to ignore timestamp changes.
4381	# Unfortunately --no-times doesn't do what we want -- it will still tell
4382	# you if the timestamps don't match but rsync will set the timestamps to
4383	# the current time (leading to an itemised change entry). It's simpler to
4384	# just filter out those lines.
4385	if [ "$zil_replay" -eq 0 ]; then
4386		filter=("cat")
4387	else
4388		# Different rsync versions have different numbers of columns. So just
4389		# require that aside from the first two, all other columns must be
4390		# blank (literal ".") or a timestamp field ("[tT]").
4391		filter=("grep" "-v" '^\..[.Tt]\+ ')
4392	fi
4393
4394	diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
4395	rv=0
4396	if [ -n "$diff" ]; then
4397		echo "$diff"
4398		rv=1
4399	fi
4400	return $rv
4401}
4402
4403#
4404# Compare two directory trees recursively, without checking whether the mtimes
4405# match (creation times will be checked if the available rsync binary supports
4406# it). This is necessary for ZIL replay checks (because the ZIL does not
4407# contain mtimes and thus after a ZIL replay, mtimes won't match).
4408#
4409# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
4410#
4411function replay_directory_diff # dir_a dir_b
4412{
4413	LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
4414	return $?
4415}
4416