xref: /freebsd/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24# Copyright (c) 2012, 2020, Delphix. All rights reserved.
25# Copyright (c) 2017, Tim Chase. All rights reserved.
26# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27# Copyright (c) 2017, Lawrence Livermore National Security LLC.
28# Copyright (c) 2017, Datto Inc. All rights reserved.
29# Copyright (c) 2017, Open-E Inc. All rights reserved.
30# Use is subject to license terms.
31#
32
33. ${STF_TOOLS}/include/logapi.shlib
34. ${STF_SUITE}/include/math.shlib
35. ${STF_SUITE}/include/blkdev.shlib
36
37. ${STF_SUITE}/include/tunables.cfg
38
39#
40# Apply constrained path when available.  This is required since the
41# PATH may have been modified by sudo's secure_path behavior.
42#
43if [ -n "$STF_PATH" ]; then
44	PATH="$STF_PATH"
45fi
46
47#
48# Generic dot version comparison function
49#
50# Returns success when version $1 is greater than or equal to $2.
51#
52function compare_version_gte
53{
54	if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
55		return 0
56	else
57		return 1
58	fi
59}
60
61# Linux kernel version comparison function
62#
63# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
64#
65# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
66#
67function linux_version
68{
69	typeset ver="$1"
70
71	[[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
72
73	typeset version=$(echo $ver | cut -d '.' -f 1)
74	typeset major=$(echo $ver | cut -d '.' -f 2)
75	typeset minor=$(echo $ver | cut -d '.' -f 3)
76
77	[[ -z "$version" ]] && version=0
78	[[ -z "$major" ]] && major=0
79	[[ -z "$minor" ]] && minor=0
80
81	echo $((version * 10000 + major * 100 + minor))
82}
83
84# Determine if this is a Linux test system
85#
86# Return 0 if platform Linux, 1 if otherwise
87
88function is_linux
89{
90	if [[ $(uname -o) == "GNU/Linux" ]]; then
91		return 0
92	else
93		return 1
94	fi
95}
96
97# Determine if this is an illumos test system
98#
99# Return 0 if platform illumos, 1 if otherwise
100function is_illumos
101{
102	if [[ $(uname -o) == "illumos" ]]; then
103		return 0
104	else
105		return 1
106	fi
107}
108
109# Determine if this is a FreeBSD test system
110#
111# Return 0 if platform FreeBSD, 1 if otherwise
112
113function is_freebsd
114{
115	if [[ $(uname -o) == "FreeBSD" ]]; then
116		return 0
117	else
118		return 1
119	fi
120}
121
122# Determine if this is a DilOS test system
123#
124# Return 0 if platform DilOS, 1 if otherwise
125
126function is_dilos
127{
128	typeset ID=""
129	[[ -f /etc/os-release ]] && . /etc/os-release
130	if [[ $ID == "dilos" ]]; then
131		return 0
132	else
133		return 1
134	fi
135}
136
137# Determine if this is a 32-bit system
138#
139# Return 0 if platform is 32-bit, 1 if otherwise
140
141function is_32bit
142{
143	if [[ $(getconf LONG_BIT) == "32" ]]; then
144		return 0
145	else
146		return 1
147	fi
148}
149
150# Determine if kmemleak is enabled
151#
152# Return 0 if kmemleak is enabled, 1 if otherwise
153
154function is_kmemleak
155{
156	if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
157		return 0
158	else
159		return 1
160	fi
161}
162
163# Determine whether a dataset is mounted
164#
165# $1 dataset name
166# $2 filesystem type; optional - defaulted to zfs
167#
168# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
169
170function ismounted
171{
172	typeset fstype=$2
173	[[ -z $fstype ]] && fstype=zfs
174	typeset out dir name ret
175
176	case $fstype in
177		zfs)
178			if [[ "$1" == "/"* ]] ; then
179				for out in $(zfs mount | awk '{print $2}'); do
180					[[ $1 == $out ]] && return 0
181				done
182			else
183				for out in $(zfs mount | awk '{print $1}'); do
184					[[ $1 == $out ]] && return 0
185				done
186			fi
187		;;
188		ufs|nfs)
189			if is_freebsd; then
190				mount -pt $fstype | while read dev dir _t _flags; do
191					[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
192				done
193			else
194				out=$(df -F $fstype $1 2>/dev/null)
195				ret=$?
196				(($ret != 0)) && return $ret
197
198				dir=${out%%\(*}
199				dir=${dir%% *}
200				name=${out##*\(}
201				name=${name%%\)*}
202				name=${name%% *}
203
204				[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
205			fi
206		;;
207		ext*)
208			out=$(df -t $fstype $1 2>/dev/null)
209			return $?
210		;;
211		zvol)
212			if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
213				link=$(readlink -f $ZVOL_DEVDIR/$1)
214				[[ -n "$link" ]] && \
215					mount | grep -q "^$link" && \
216						return 0
217			fi
218		;;
219	esac
220
221	return 1
222}
223
224# Return 0 if a dataset is mounted; 1 otherwise
225#
226# $1 dataset name
227# $2 filesystem type; optional - defaulted to zfs
228
229function mounted
230{
231	ismounted $1 $2
232	(($? == 0)) && return 0
233	return 1
234}
235
236# Return 0 if a dataset is unmounted; 1 otherwise
237#
238# $1 dataset name
239# $2 filesystem type; optional - defaulted to zfs
240
241function unmounted
242{
243	ismounted $1 $2
244	(($? == 1)) && return 0
245	return 1
246}
247
248# split line on ","
249#
250# $1 - line to split
251
252function splitline
253{
254	echo $1 | sed "s/,/ /g"
255}
256
257function default_setup
258{
259	default_setup_noexit "$@"
260
261	log_pass
262}
263
264function default_setup_no_mountpoint
265{
266	default_setup_noexit "$1" "$2" "$3" "yes"
267
268	log_pass
269}
270
271#
272# Given a list of disks, setup storage pools and datasets.
273#
274function default_setup_noexit
275{
276	typeset disklist=$1
277	typeset container=$2
278	typeset volume=$3
279	typeset no_mountpoint=$4
280	log_note begin default_setup_noexit
281
282	if is_global_zone; then
283		if poolexists $TESTPOOL ; then
284			destroy_pool $TESTPOOL
285		fi
286		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
287		log_must zpool create -f $TESTPOOL $disklist
288	else
289		reexport_pool
290	fi
291
292	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
293	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
294
295	log_must zfs create $TESTPOOL/$TESTFS
296	if [[ -z $no_mountpoint ]]; then
297		log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
298	fi
299
300	if [[ -n $container ]]; then
301		rm -rf $TESTDIR1  || \
302			log_unresolved Could not remove $TESTDIR1
303		mkdir -p $TESTDIR1 || \
304			log_unresolved Could not create $TESTDIR1
305
306		log_must zfs create $TESTPOOL/$TESTCTR
307		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
308		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
309		if [[ -z $no_mountpoint ]]; then
310			log_must zfs set mountpoint=$TESTDIR1 \
311			    $TESTPOOL/$TESTCTR/$TESTFS1
312		fi
313	fi
314
315	if [[ -n $volume ]]; then
316		if is_global_zone ; then
317			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
318			block_device_wait
319		else
320			log_must zfs create $TESTPOOL/$TESTVOL
321		fi
322	fi
323}
324
325#
326# Given a list of disks, setup a storage pool, file system and
327# a container.
328#
329function default_container_setup
330{
331	typeset disklist=$1
332
333	default_setup "$disklist" "true"
334}
335
336#
337# Given a list of disks, setup a storage pool,file system
338# and a volume.
339#
340function default_volume_setup
341{
342	typeset disklist=$1
343
344	default_setup "$disklist" "" "true"
345}
346
347#
348# Given a list of disks, setup a storage pool,file system,
349# a container and a volume.
350#
351function default_container_volume_setup
352{
353	typeset disklist=$1
354
355	default_setup "$disklist" "true" "true"
356}
357
358#
359# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
360# filesystem
361#
362# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
363# $2 snapshot name. Default, $TESTSNAP
364#
365function create_snapshot
366{
367	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
368	typeset snap=${2:-$TESTSNAP}
369
370	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
371	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
372
373	if snapexists $fs_vol@$snap; then
374		log_fail "$fs_vol@$snap already exists."
375	fi
376	datasetexists $fs_vol || \
377		log_fail "$fs_vol must exist."
378
379	log_must zfs snapshot $fs_vol@$snap
380}
381
382#
383# Create a clone from a snapshot, default clone name is $TESTCLONE.
384#
385# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
386# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
387#
388function create_clone   # snapshot clone
389{
390	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
391	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
392
393	[[ -z $snap ]] && \
394		log_fail "Snapshot name is undefined."
395	[[ -z $clone ]] && \
396		log_fail "Clone name is undefined."
397
398	log_must zfs clone $snap $clone
399}
400
401#
402# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
403# filesystem.
404#
405# $1 Existing filesystem or volume name. Default, $TESTFS
406# $2 Existing snapshot name. Default, $TESTSNAP
407# $3 bookmark name. Default, $TESTBKMARK
408#
409function create_bookmark
410{
411	typeset fs_vol=${1:-$TESTFS}
412	typeset snap=${2:-$TESTSNAP}
413	typeset bkmark=${3:-$TESTBKMARK}
414
415	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
416	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
417	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
418
419	if bkmarkexists $fs_vol#$bkmark; then
420		log_fail "$fs_vol#$bkmark already exists."
421	fi
422	datasetexists $fs_vol || \
423		log_fail "$fs_vol must exist."
424	snapexists $fs_vol@$snap || \
425		log_fail "$fs_vol@$snap must exist."
426
427	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
428}
429
430#
431# Create a temporary clone result of an interrupted resumable 'zfs receive'
432# $1 Destination filesystem name. Must not exist, will be created as the result
433#    of this function along with its %recv temporary clone
434# $2 Source filesystem name. Must not exist, will be created and destroyed
435#
436function create_recv_clone
437{
438	typeset recvfs="$1"
439	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
440	typeset snap="$sendfs@snap1"
441	typeset incr="$sendfs@snap2"
442	typeset mountpoint="$TESTDIR/create_recv_clone"
443	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
444
445	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
446
447	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
448	datasetexists $sendfs && log_fail "Send filesystem must not exist."
449
450	log_must zfs create -o mountpoint="$mountpoint" $sendfs
451	log_must zfs snapshot $snap
452	log_must eval "zfs send $snap | zfs recv -u $recvfs"
453	log_must mkfile 1m "$mountpoint/data"
454	log_must zfs snapshot $incr
455	log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
456	    iflag=fullblock > $sendfile"
457	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
458	destroy_dataset "$sendfs" "-r"
459	log_must rm -f "$sendfile"
460
461	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
462		log_fail "Error creating temporary $recvfs/%recv clone"
463	fi
464}
465
466function default_mirror_setup
467{
468	default_mirror_setup_noexit $1 $2 $3
469
470	log_pass
471}
472
473#
474# Given a pair of disks, set up a storage pool and dataset for the mirror
475# @parameters: $1 the primary side of the mirror
476#   $2 the secondary side of the mirror
477# @uses: ZPOOL ZFS TESTPOOL TESTFS
478function default_mirror_setup_noexit
479{
480	readonly func="default_mirror_setup_noexit"
481	typeset primary=$1
482	typeset secondary=$2
483
484	[[ -z $primary ]] && \
485		log_fail "$func: No parameters passed"
486	[[ -z $secondary ]] && \
487		log_fail "$func: No secondary partition passed"
488	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
489	log_must zpool create -f $TESTPOOL mirror $@
490	log_must zfs create $TESTPOOL/$TESTFS
491	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
492}
493
494#
495# create a number of mirrors.
496# We create a number($1) of 2 way mirrors using the pairs of disks named
497# on the command line. These mirrors are *not* mounted
498# @parameters: $1 the number of mirrors to create
499#  $... the devices to use to create the mirrors on
500# @uses: ZPOOL ZFS TESTPOOL
501function setup_mirrors
502{
503	typeset -i nmirrors=$1
504
505	shift
506	while ((nmirrors > 0)); do
507		log_must test -n "$1" -a -n "$2"
508		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
509		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
510		shift 2
511		((nmirrors = nmirrors - 1))
512	done
513}
514
515#
516# create a number of raidz pools.
517# We create a number($1) of 2 raidz pools  using the pairs of disks named
518# on the command line. These pools are *not* mounted
519# @parameters: $1 the number of pools to create
520#  $... the devices to use to create the pools on
521# @uses: ZPOOL ZFS TESTPOOL
522function setup_raidzs
523{
524	typeset -i nraidzs=$1
525
526	shift
527	while ((nraidzs > 0)); do
528		log_must test -n "$1" -a -n "$2"
529		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
530		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
531		shift 2
532		((nraidzs = nraidzs - 1))
533	done
534}
535
536#
537# Destroy the configured testpool mirrors.
538# the mirrors are of the form ${TESTPOOL}{number}
539# @uses: ZPOOL ZFS TESTPOOL
540function destroy_mirrors
541{
542	default_cleanup_noexit
543
544	log_pass
545}
546
547#
548# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
549# $1 the list of disks
550#
551function default_raidz_setup
552{
553	typeset disklist="$*"
554	disks=(${disklist[*]})
555
556	if [[ ${#disks[*]} -lt 2 ]]; then
557		log_fail "A raid-z requires a minimum of two disks."
558	fi
559
560	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
561	log_must zpool create -f $TESTPOOL raidz $disklist
562	log_must zfs create $TESTPOOL/$TESTFS
563	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
564
565	log_pass
566}
567
568#
569# Common function used to cleanup storage pools and datasets.
570#
571# Invoked at the start of the test suite to ensure the system
572# is in a known state, and also at the end of each set of
573# sub-tests to ensure errors from one set of tests doesn't
574# impact the execution of the next set.
575
576function default_cleanup
577{
578	default_cleanup_noexit
579
580	log_pass
581}
582
583#
584# Utility function used to list all available pool names.
585#
586# NOTE: $KEEP is a variable containing pool names, separated by a newline
587# character, that must be excluded from the returned list.
588#
589function get_all_pools
590{
591	zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
592}
593
594function default_cleanup_noexit
595{
596	typeset pool=""
597	#
598	# Destroying the pool will also destroy any
599	# filesystems it contains.
600	#
601	if is_global_zone; then
602		zfs unmount -a > /dev/null 2>&1
603		ALL_POOLS=$(get_all_pools)
604		# Here, we loop through the pools we're allowed to
605		# destroy, only destroying them if it's safe to do
606		# so.
607		while [ ! -z ${ALL_POOLS} ]
608		do
609			for pool in ${ALL_POOLS}
610			do
611				if safe_to_destroy_pool $pool ;
612				then
613					destroy_pool $pool
614				fi
615			done
616			ALL_POOLS=$(get_all_pools)
617		done
618
619		zfs mount -a
620	else
621		typeset fs=""
622		for fs in $(zfs list -H -o name \
623		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
624			destroy_dataset "$fs" "-Rf"
625		done
626
627		# Need cleanup here to avoid garbage dir left.
628		for fs in $(zfs list -H -o name); do
629			[[ $fs == /$ZONE_POOL ]] && continue
630			[[ -d $fs ]] && log_must rm -rf $fs/*
631		done
632
633		#
634		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
635		# the default value
636		#
637		for fs in $(zfs list -H -o name); do
638			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
639				log_must zfs set reservation=none $fs
640				log_must zfs set recordsize=128K $fs
641				log_must zfs set mountpoint=/$fs $fs
642				typeset enc=""
643				enc=$(get_prop encryption $fs)
644				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
645					[[ "$enc" == "off" ]]; then
646					log_must zfs set checksum=on $fs
647				fi
648				log_must zfs set compression=off $fs
649				log_must zfs set atime=on $fs
650				log_must zfs set devices=off $fs
651				log_must zfs set exec=on $fs
652				log_must zfs set setuid=on $fs
653				log_must zfs set readonly=off $fs
654				log_must zfs set snapdir=hidden $fs
655				log_must zfs set aclmode=groupmask $fs
656				log_must zfs set aclinherit=secure $fs
657			fi
658		done
659	fi
660
661	[[ -d $TESTDIR ]] && \
662		log_must rm -rf $TESTDIR
663
664	disk1=${DISKS%% *}
665	if is_mpath_device $disk1; then
666		delete_partitions
667	fi
668
669	rm -f $TEST_BASE_DIR/{err,out}
670}
671
672
673#
674# Common function used to cleanup storage pools, file systems
675# and containers.
676#
677function default_container_cleanup
678{
679	if ! is_global_zone; then
680		reexport_pool
681	fi
682
683	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
684	[[ $? -eq 0 ]] && \
685	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
686
687	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
688	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
689
690	[[ -e $TESTDIR1 ]] && \
691	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
692
693	default_cleanup
694}
695
696#
697# Common function used to cleanup snapshot of file system or volume. Default to
698# delete the file system's snapshot
699#
700# $1 snapshot name
701#
702function destroy_snapshot
703{
704	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
705
706	if ! snapexists $snap; then
707		log_fail "'$snap' does not exist."
708	fi
709
710	#
711	# For the sake of the value which come from 'get_prop' is not equal
712	# to the really mountpoint when the snapshot is unmounted. So, firstly
713	# check and make sure this snapshot's been mounted in current system.
714	#
715	typeset mtpt=""
716	if ismounted $snap; then
717		mtpt=$(get_prop mountpoint $snap)
718		(($? != 0)) && \
719			log_fail "get_prop mountpoint $snap failed."
720	fi
721
722	destroy_dataset "$snap"
723	[[ $mtpt != "" && -d $mtpt ]] && \
724		log_must rm -rf $mtpt
725}
726
727#
728# Common function used to cleanup clone.
729#
730# $1 clone name
731#
732function destroy_clone
733{
734	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
735
736	if ! datasetexists $clone; then
737		log_fail "'$clone' does not existed."
738	fi
739
740	# With the same reason in destroy_snapshot
741	typeset mtpt=""
742	if ismounted $clone; then
743		mtpt=$(get_prop mountpoint $clone)
744		(($? != 0)) && \
745			log_fail "get_prop mountpoint $clone failed."
746	fi
747
748	destroy_dataset "$clone"
749	[[ $mtpt != "" && -d $mtpt ]] && \
750		log_must rm -rf $mtpt
751}
752
753#
754# Common function used to cleanup bookmark of file system or volume.  Default
755# to delete the file system's bookmark.
756#
757# $1 bookmark name
758#
759function destroy_bookmark
760{
761	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
762
763	if ! bkmarkexists $bkmark; then
764		log_fail "'$bkmarkp' does not existed."
765	fi
766
767	destroy_dataset "$bkmark"
768}
769
770# Return 0 if a snapshot exists; $? otherwise
771#
772# $1 - snapshot name
773
774function snapexists
775{
776	zfs list -H -t snapshot "$1" > /dev/null 2>&1
777	return $?
778}
779
780#
781# Return 0 if a bookmark exists; $? otherwise
782#
783# $1 - bookmark name
784#
785function bkmarkexists
786{
787	zfs list -H -t bookmark "$1" > /dev/null 2>&1
788	return $?
789}
790
791#
792# Return 0 if a hold exists; $? otherwise
793#
794# $1 - hold tag
795# $2 - snapshot name
796#
797function holdexists
798{
799	zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
800	return $?
801}
802
803#
804# Set a property to a certain value on a dataset.
805# Sets a property of the dataset to the value as passed in.
806# @param:
807#	$1 dataset who's property is being set
808#	$2 property to set
809#	$3 value to set property to
810# @return:
811#	0 if the property could be set.
812#	non-zero otherwise.
813# @use: ZFS
814#
815function dataset_setprop
816{
817	typeset fn=dataset_setprop
818
819	if (($# < 3)); then
820		log_note "$fn: Insufficient parameters (need 3, had $#)"
821		return 1
822	fi
823	typeset output=
824	output=$(zfs set $2=$3 $1 2>&1)
825	typeset rv=$?
826	if ((rv != 0)); then
827		log_note "Setting property on $1 failed."
828		log_note "property $2=$3"
829		log_note "Return Code: $rv"
830		log_note "Output: $output"
831		return $rv
832	fi
833	return 0
834}
835
836#
837# Assign suite defined dataset properties.
838# This function is used to apply the suite's defined default set of
839# properties to a dataset.
840# @parameters: $1 dataset to use
841# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
842# @returns:
843#   0 if the dataset has been altered.
844#   1 if no pool name was passed in.
845#   2 if the dataset could not be found.
846#   3 if the dataset could not have it's properties set.
847#
848function dataset_set_defaultproperties
849{
850	typeset dataset="$1"
851
852	[[ -z $dataset ]] && return 1
853
854	typeset confset=
855	typeset -i found=0
856	for confset in $(zfs list); do
857		if [[ $dataset = $confset ]]; then
858			found=1
859			break
860		fi
861	done
862	[[ $found -eq 0 ]] && return 2
863	if [[ -n $COMPRESSION_PROP ]]; then
864		dataset_setprop $dataset compression $COMPRESSION_PROP || \
865			return 3
866		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
867	fi
868	if [[ -n $CHECKSUM_PROP ]]; then
869		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
870			return 3
871		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
872	fi
873	return 0
874}
875
876#
877# Check a numeric assertion
878# @parameter: $@ the assertion to check
879# @output: big loud notice if assertion failed
880# @use: log_fail
881#
882function assert
883{
884	(($@)) || log_fail "$@"
885}
886
887#
888# Function to format partition size of a disk
889# Given a disk cxtxdx reduces all partitions
890# to 0 size
891#
892function zero_partitions #<whole_disk_name>
893{
894	typeset diskname=$1
895	typeset i
896
897	if is_freebsd; then
898		gpart destroy -F $diskname
899	elif is_linux; then
900		DSK=$DEV_DSKDIR/$diskname
901		DSK=$(echo $DSK | sed -e "s|//|/|g")
902		log_must parted $DSK -s -- mklabel gpt
903		blockdev --rereadpt $DSK 2>/dev/null
904		block_device_wait
905	else
906		for i in 0 1 3 4 5 6 7
907		do
908			log_must set_partition $i "" 0mb $diskname
909		done
910	fi
911
912	return 0
913}
914
915#
916# Given a slice, size and disk, this function
917# formats the slice to the specified size.
918# Size should be specified with units as per
919# the `format` command requirements eg. 100mb 3gb
920#
921# NOTE: This entire interface is problematic for the Linux parted utility
922# which requires the end of the partition to be specified.  It would be
923# best to retire this interface and replace it with something more flexible.
924# At the moment a best effort is made.
925#
926# arguments: <slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
927function set_partition
928{
929	typeset -i slicenum=$1
930	typeset start=$2
931	typeset size=$3
932	typeset disk=${4#$DEV_DSKDIR/}
933	disk=${disk#$DEV_RDSKDIR/}
934
935	case "$(uname)" in
936	Linux)
937		if [[ -z $size || -z $disk ]]; then
938			log_fail "The size or disk name is unspecified."
939		fi
940		disk=$DEV_DSKDIR/$disk
941		typeset size_mb=${size%%[mMgG]}
942
943		size_mb=${size_mb%%[mMgG][bB]}
944		if [[ ${size:1:1} == 'g' ]]; then
945			((size_mb = size_mb * 1024))
946		fi
947
948		# Create GPT partition table when setting slice 0 or
949		# when the device doesn't already contain a GPT label.
950		parted $disk -s -- print 1 >/dev/null
951		typeset ret_val=$?
952		if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
953			parted $disk -s -- mklabel gpt
954			if [[ $? -ne 0 ]]; then
955				log_note "Failed to create GPT partition table on $disk"
956				return 1
957			fi
958		fi
959
960		# When no start is given align on the first cylinder.
961		if [[ -z "$start" ]]; then
962			start=1
963		fi
964
965		# Determine the cylinder size for the device and using
966		# that calculate the end offset in cylinders.
967		typeset -i cly_size_kb=0
968		cly_size_kb=$(parted -m $disk -s -- \
969			unit cyl print | head -3 | tail -1 | \
970			awk -F '[:k.]' '{print $4}')
971		((end = (size_mb * 1024 / cly_size_kb) + start))
972
973		parted $disk -s -- \
974		    mkpart part$slicenum ${start}cyl ${end}cyl
975		typeset ret_val=$?
976		if [[ $ret_val -ne 0 ]]; then
977			log_note "Failed to create partition $slicenum on $disk"
978			return 1
979		fi
980
981		blockdev --rereadpt $disk 2>/dev/null
982		block_device_wait $disk
983		;;
984	FreeBSD)
985		if [[ -z $size || -z $disk ]]; then
986			log_fail "The size or disk name is unspecified."
987		fi
988		disk=$DEV_DSKDIR/$disk
989
990		if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
991			gpart destroy -F $disk >/dev/null 2>&1
992			gpart create -s GPT $disk
993			if [[ $? -ne 0 ]]; then
994				log_note "Failed to create GPT partition table on $disk"
995				return 1
996			fi
997		fi
998
999		typeset index=$((slicenum + 1))
1000
1001		if [[ -n $start ]]; then
1002			start="-b $start"
1003		fi
1004		gpart add -t freebsd-zfs $start -s $size -i $index $disk
1005		if [[ $ret_val -ne 0 ]]; then
1006			log_note "Failed to create partition $slicenum on $disk"
1007			return 1
1008		fi
1009
1010		block_device_wait $disk
1011		;;
1012	*)
1013		if [[ -z $slicenum || -z $size || -z $disk ]]; then
1014			log_fail "The slice, size or disk name is unspecified."
1015		fi
1016
1017		typeset format_file=/var/tmp/format_in.$$
1018
1019		echo "partition" >$format_file
1020		echo "$slicenum" >> $format_file
1021		echo "" >> $format_file
1022		echo "" >> $format_file
1023		echo "$start" >> $format_file
1024		echo "$size" >> $format_file
1025		echo "label" >> $format_file
1026		echo "" >> $format_file
1027		echo "q" >> $format_file
1028		echo "q" >> $format_file
1029
1030		format -e -s -d $disk -f $format_file
1031		typeset ret_val=$?
1032		rm -f $format_file
1033		;;
1034	esac
1035
1036	if [[ $ret_val -ne 0 ]]; then
1037		log_note "Unable to format $disk slice $slicenum to $size"
1038		return 1
1039	fi
1040	return 0
1041}
1042
1043#
1044# Delete all partitions on all disks - this is specifically for the use of multipath
1045# devices which currently can only be used in the test suite as raw/un-partitioned
1046# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
1047#
1048function delete_partitions
1049{
1050	typeset disk
1051
1052	if [[ -z $DISKSARRAY ]]; then
1053		DISKSARRAY=$DISKS
1054	fi
1055
1056	if is_linux; then
1057		typeset -i part
1058		for disk in $DISKSARRAY; do
1059			for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1060				typeset partition=${disk}${SLICE_PREFIX}${part}
1061				parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1062				if lsblk | grep -qF ${partition}; then
1063					log_fail "Partition ${partition} not deleted"
1064				else
1065					log_note "Partition ${partition} deleted"
1066				fi
1067			done
1068		done
1069	elif is_freebsd; then
1070		for disk in $DISKSARRAY; do
1071			if gpart destroy -F $disk; then
1072				log_note "Partitions for ${disk} deleted"
1073			else
1074				log_fail "Partitions for ${disk} not deleted"
1075			fi
1076		done
1077	fi
1078}
1079
1080#
1081# Get the end cyl of the given slice
1082#
1083function get_endslice #<disk> <slice>
1084{
1085	typeset disk=$1
1086	typeset slice=$2
1087	if [[ -z $disk || -z $slice ]] ; then
1088		log_fail "The disk name or slice number is unspecified."
1089	fi
1090
1091	case "$(uname)" in
1092	Linux)
1093		endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1094			grep "part${slice}" | \
1095			awk '{print $3}' | \
1096			sed 's,cyl,,')
1097		((endcyl = (endcyl + 1)))
1098		;;
1099	FreeBSD)
1100		disk=${disk#/dev/zvol/}
1101		disk=${disk%p*}
1102		slice=$((slice + 1))
1103		endcyl=$(gpart show $disk | \
1104			awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1105		;;
1106	*)
1107		disk=${disk#/dev/dsk/}
1108		disk=${disk#/dev/rdsk/}
1109		disk=${disk%s*}
1110
1111		typeset -i ratio=0
1112		ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1113		    grep "sectors\/cylinder" | \
1114		    awk '{print $2}')
1115
1116		if ((ratio == 0)); then
1117			return
1118		fi
1119
1120		typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1121		    nawk -v token="$slice" '{if ($1==token) print $6}')
1122
1123		((endcyl = (endcyl + 1) / ratio))
1124		;;
1125	esac
1126
1127	echo $endcyl
1128}
1129
1130
1131#
1132# Given a size,disk and total slice number,  this function formats the
1133# disk slices from 0 to the total slice number with the same specified
1134# size.
1135#
1136function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
1137{
1138	typeset -i i=0
1139	typeset slice_size=$1
1140	typeset disk_name=$2
1141	typeset total_slices=$3
1142	typeset cyl
1143
1144	zero_partitions $disk_name
1145	while ((i < $total_slices)); do
1146		if ! is_linux; then
1147			if ((i == 2)); then
1148				((i = i + 1))
1149				continue
1150			fi
1151		fi
1152		log_must set_partition $i "$cyl" $slice_size $disk_name
1153		cyl=$(get_endslice $disk_name $i)
1154		((i = i+1))
1155	done
1156}
1157
1158#
1159# This function continues to write to a filenum number of files into dirnum
1160# number of directories until either file_write returns an error or the
1161# maximum number of files per directory have been written.
1162#
1163# Usage:
1164# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1165#
1166# Return value: 0 on success
1167#		non 0 on error
1168#
1169# Where :
1170#	destdir:    is the directory where everything is to be created under
1171#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
1172#	filenum:    the maximum number of files per subdirectory
1173#	bytes:	    number of bytes to write
1174#	num_writes: number of types to write out bytes
1175#	data:	    the data that will be written
1176#
1177#	E.g.
1178#	fill_fs /testdir 20 25 1024 256 0
1179#
1180# Note: bytes * num_writes equals the size of the testfile
1181#
1182function fill_fs # destdir dirnum filenum bytes num_writes data
1183{
1184	typeset destdir=${1:-$TESTDIR}
1185	typeset -i dirnum=${2:-50}
1186	typeset -i filenum=${3:-50}
1187	typeset -i bytes=${4:-8192}
1188	typeset -i num_writes=${5:-10240}
1189	typeset data=${6:-0}
1190
1191	mkdir -p $destdir/{1..$dirnum}
1192	for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1193		file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1194		|| return $?
1195	done
1196	return 0
1197}
1198
1199#
1200# Simple function to get the specified property. If unable to
1201# get the property then exits.
1202#
1203# Note property is in 'parsable' format (-p)
1204#
1205function get_prop # property dataset
1206{
1207	typeset prop_val
1208	typeset prop=$1
1209	typeset dataset=$2
1210
1211	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1212	if [[ $? -ne 0 ]]; then
1213		log_note "Unable to get $prop property for dataset " \
1214		"$dataset"
1215		return 1
1216	fi
1217
1218	echo "$prop_val"
1219	return 0
1220}
1221
1222#
1223# Simple function to get the specified property of pool. If unable to
1224# get the property then exits.
1225#
1226# Note property is in 'parsable' format (-p)
1227#
1228function get_pool_prop # property pool
1229{
1230	typeset prop_val
1231	typeset prop=$1
1232	typeset pool=$2
1233
1234	if poolexists $pool ; then
1235		prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1236			awk '{print $3}')
1237		if [[ $? -ne 0 ]]; then
1238			log_note "Unable to get $prop property for pool " \
1239			"$pool"
1240			return 1
1241		fi
1242	else
1243		log_note "Pool $pool not exists."
1244		return 1
1245	fi
1246
1247	echo "$prop_val"
1248	return 0
1249}
1250
1251# Return 0 if a pool exists; $? otherwise
1252#
1253# $1 - pool name
1254
1255function poolexists
1256{
1257	typeset pool=$1
1258
1259	if [[ -z $pool ]]; then
1260		log_note "No pool name given."
1261		return 1
1262	fi
1263
1264	zpool get name "$pool" > /dev/null 2>&1
1265	return $?
1266}
1267
1268# Return 0 if all the specified datasets exist; $? otherwise
1269#
1270# $1-n  dataset name
1271function datasetexists
1272{
1273	if (($# == 0)); then
1274		log_note "No dataset name given."
1275		return 1
1276	fi
1277
1278	while (($# > 0)); do
1279		zfs get name $1 > /dev/null 2>&1 || \
1280			return $?
1281		shift
1282	done
1283
1284	return 0
1285}
1286
1287# return 0 if none of the specified datasets exists, otherwise return 1.
1288#
1289# $1-n  dataset name
1290function datasetnonexists
1291{
1292	if (($# == 0)); then
1293		log_note "No dataset name given."
1294		return 1
1295	fi
1296
1297	while (($# > 0)); do
1298		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1299		    && return 1
1300		shift
1301	done
1302
1303	return 0
1304}
1305
1306function is_shared_freebsd
1307{
1308	typeset fs=$1
1309
1310	pgrep -q mountd && showmount -E | grep -qx $fs
1311}
1312
1313function is_shared_illumos
1314{
1315	typeset fs=$1
1316	typeset mtpt
1317
1318	for mtpt in `share | awk '{print $2}'` ; do
1319		if [[ $mtpt == $fs ]] ; then
1320			return 0
1321		fi
1322	done
1323
1324	typeset stat=$(svcs -H -o STA nfs/server:default)
1325	if [[ $stat != "ON" ]]; then
1326		log_note "Current nfs/server status: $stat"
1327	fi
1328
1329	return 1
1330}
1331
1332function is_shared_linux
1333{
1334	typeset fs=$1
1335	typeset mtpt
1336
1337	for mtpt in `share | awk '{print $1}'` ; do
1338		if [[ $mtpt == $fs ]] ; then
1339			return 0
1340		fi
1341	done
1342	return 1
1343}
1344
1345#
1346# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1347#
1348# Returns 0 if shared, 1 otherwise.
1349#
1350function is_shared
1351{
1352	typeset fs=$1
1353	typeset mtpt
1354
1355	if [[ $fs != "/"* ]] ; then
1356		if datasetnonexists "$fs" ; then
1357			return 1
1358		else
1359			mtpt=$(get_prop mountpoint "$fs")
1360			case $mtpt in
1361				none|legacy|-) return 1
1362					;;
1363				*)	fs=$mtpt
1364					;;
1365			esac
1366		fi
1367	fi
1368
1369	case $(uname) in
1370	FreeBSD)	is_shared_freebsd "$fs"	;;
1371	Linux)		is_shared_linux "$fs"	;;
1372	*)		is_shared_illumos "$fs"	;;
1373	esac
1374}
1375
1376function is_exported_illumos
1377{
1378	typeset fs=$1
1379	typeset mtpt
1380
1381	for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
1382		if [[ $mtpt == $fs ]] ; then
1383			return 0
1384		fi
1385	done
1386
1387	return 1
1388}
1389
1390function is_exported_freebsd
1391{
1392	typeset fs=$1
1393	typeset mtpt
1394
1395	for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
1396		if [[ $mtpt == $fs ]] ; then
1397			return 0
1398		fi
1399	done
1400
1401	return 1
1402}
1403
1404function is_exported_linux
1405{
1406	typeset fs=$1
1407	typeset mtpt
1408
1409	for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
1410		if [[ $mtpt == $fs ]] ; then
1411			return 0
1412		fi
1413	done
1414
1415	return 1
1416}
1417
1418#
1419# Given a mountpoint, or a dataset name, determine if it is exported via
1420# the os-specific NFS exports file.
1421#
1422# Returns 0 if exported, 1 otherwise.
1423#
1424function is_exported
1425{
1426	typeset fs=$1
1427	typeset mtpt
1428
1429	if [[ $fs != "/"* ]] ; then
1430		if datasetnonexists "$fs" ; then
1431			return 1
1432		else
1433			mtpt=$(get_prop mountpoint "$fs")
1434			case $mtpt in
1435				none|legacy|-) return 1
1436					;;
1437				*)	fs=$mtpt
1438					;;
1439			esac
1440		fi
1441	fi
1442
1443	case $(uname) in
1444	FreeBSD)	is_exported_freebsd "$fs"	;;
1445	Linux)		is_exported_linux "$fs"	;;
1446	*)		is_exported_illumos "$fs"	;;
1447	esac
1448}
1449
1450#
1451# Given a dataset name determine if it is shared via SMB.
1452#
1453# Returns 0 if shared, 1 otherwise.
1454#
1455function is_shared_smb
1456{
1457	typeset fs=$1
1458	typeset mtpt
1459
1460	if datasetnonexists "$fs" ; then
1461		return 1
1462	else
1463		fs=$(echo $fs | sed 's@/@_@g')
1464	fi
1465
1466	if is_linux; then
1467		for mtpt in `net usershare list | awk '{print $1}'` ; do
1468			if [[ $mtpt == $fs ]] ; then
1469				return 0
1470			fi
1471		done
1472		return 1
1473	else
1474		log_note "Currently unsupported by the test framework"
1475		return 1
1476	fi
1477}
1478
1479#
1480# Given a mountpoint, determine if it is not shared via NFS.
1481#
1482# Returns 0 if not shared, 1 otherwise.
1483#
1484function not_shared
1485{
1486	typeset fs=$1
1487
1488	is_shared $fs
1489	if (($? == 0)); then
1490		return 1
1491	fi
1492
1493	return 0
1494}
1495
1496#
1497# Given a dataset determine if it is not shared via SMB.
1498#
1499# Returns 0 if not shared, 1 otherwise.
1500#
1501function not_shared_smb
1502{
1503	typeset fs=$1
1504
1505	is_shared_smb $fs
1506	if (($? == 0)); then
1507		return 1
1508	fi
1509
1510	return 0
1511}
1512
1513#
1514# Helper function to unshare a mountpoint.
1515#
1516function unshare_fs #fs
1517{
1518	typeset fs=$1
1519
1520	is_shared $fs || is_shared_smb $fs
1521	if (($? == 0)); then
1522		zfs unshare $fs || log_fail "zfs unshare $fs failed"
1523	fi
1524
1525	return 0
1526}
1527
1528#
1529# Helper function to share a NFS mountpoint.
1530#
1531function share_nfs #fs
1532{
1533	typeset fs=$1
1534
1535	if is_linux; then
1536		is_shared $fs
1537		if (($? != 0)); then
1538			log_must share "*:$fs"
1539		fi
1540	else
1541		is_shared $fs
1542		if (($? != 0)); then
1543			log_must share -F nfs $fs
1544		fi
1545	fi
1546
1547	return 0
1548}
1549
1550#
1551# Helper function to unshare a NFS mountpoint.
1552#
1553function unshare_nfs #fs
1554{
1555	typeset fs=$1
1556
1557	if is_linux; then
1558		is_shared $fs
1559		if (($? == 0)); then
1560			log_must unshare -u "*:$fs"
1561		fi
1562	else
1563		is_shared $fs
1564		if (($? == 0)); then
1565			log_must unshare -F nfs $fs
1566		fi
1567	fi
1568
1569	return 0
1570}
1571
1572#
1573# Helper function to show NFS shares.
1574#
1575function showshares_nfs
1576{
1577	if is_linux; then
1578		share -v
1579	else
1580		share -F nfs
1581	fi
1582
1583	return 0
1584}
1585
1586#
1587# Helper function to show SMB shares.
1588#
1589function showshares_smb
1590{
1591	if is_linux; then
1592		net usershare list
1593	else
1594		share -F smb
1595	fi
1596
1597	return 0
1598}
1599
1600function check_nfs
1601{
1602	if is_linux; then
1603		share -s
1604	elif is_freebsd; then
1605		showmount -e
1606	else
1607		log_unsupported "Unknown platform"
1608	fi
1609
1610	if [[ $? -ne 0 ]]; then
1611		log_unsupported "The NFS utilities are not installed"
1612	fi
1613}
1614
1615#
1616# Check NFS server status and trigger it online.
1617#
1618function setup_nfs_server
1619{
1620	# Cannot share directory in non-global zone.
1621	#
1622	if ! is_global_zone; then
1623		log_note "Cannot trigger NFS server by sharing in LZ."
1624		return
1625	fi
1626
1627	if is_linux; then
1628		#
1629		# Re-synchronize /var/lib/nfs/etab with /etc/exports and
1630		# /etc/exports.d./* to provide a clean test environment.
1631		#
1632		log_must share -r
1633
1634		log_note "NFS server must be started prior to running ZTS."
1635		return
1636	elif is_freebsd; then
1637		kill -s HUP $(cat /var/run/mountd.pid)
1638
1639		log_note "NFS server must be started prior to running ZTS."
1640		return
1641	fi
1642
1643	typeset nfs_fmri="svc:/network/nfs/server:default"
1644	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1645		#
1646		# Only really sharing operation can enable NFS server
1647		# to online permanently.
1648		#
1649		typeset dummy=/tmp/dummy
1650
1651		if [[ -d $dummy ]]; then
1652			log_must rm -rf $dummy
1653		fi
1654
1655		log_must mkdir $dummy
1656		log_must share $dummy
1657
1658		#
1659		# Waiting for fmri's status to be the final status.
1660		# Otherwise, in transition, an asterisk (*) is appended for
1661		# instances, unshare will reverse status to 'DIS' again.
1662		#
1663		# Waiting for 1's at least.
1664		#
1665		log_must sleep 1
1666		timeout=10
1667		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1668		do
1669			log_must sleep 1
1670
1671			((timeout -= 1))
1672		done
1673
1674		log_must unshare $dummy
1675		log_must rm -rf $dummy
1676	fi
1677
1678	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1679}
1680
1681#
1682# To verify whether calling process is in global zone
1683#
1684# Return 0 if in global zone, 1 in non-global zone
1685#
1686function is_global_zone
1687{
1688	if is_linux || is_freebsd; then
1689		return 0
1690	else
1691		typeset cur_zone=$(zonename 2>/dev/null)
1692		if [[ $cur_zone != "global" ]]; then
1693			return 1
1694		fi
1695		return 0
1696	fi
1697}
1698
1699#
1700# Verify whether test is permitted to run from
1701# global zone, local zone, or both
1702#
1703# $1 zone limit, could be "global", "local", or "both"(no limit)
1704#
1705# Return 0 if permitted, otherwise exit with log_unsupported
1706#
1707function verify_runnable # zone limit
1708{
1709	typeset limit=$1
1710
1711	[[ -z $limit ]] && return 0
1712
1713	if is_global_zone ; then
1714		case $limit in
1715			global|both)
1716				;;
1717			local)	log_unsupported "Test is unable to run from "\
1718					"global zone."
1719				;;
1720			*)	log_note "Warning: unknown limit $limit - " \
1721					"use both."
1722				;;
1723		esac
1724	else
1725		case $limit in
1726			local|both)
1727				;;
1728			global)	log_unsupported "Test is unable to run from "\
1729					"local zone."
1730				;;
1731			*)	log_note "Warning: unknown limit $limit - " \
1732					"use both."
1733				;;
1734		esac
1735
1736		reexport_pool
1737	fi
1738
1739	return 0
1740}
1741
1742# Return 0 if create successfully or the pool exists; $? otherwise
1743# Note: In local zones, this function should return 0 silently.
1744#
1745# $1 - pool name
1746# $2-n - [keyword] devs_list
1747
1748function create_pool #pool devs_list
1749{
1750	typeset pool=${1%%/*}
1751
1752	shift
1753
1754	if [[ -z $pool ]]; then
1755		log_note "Missing pool name."
1756		return 1
1757	fi
1758
1759	if poolexists $pool ; then
1760		destroy_pool $pool
1761	fi
1762
1763	if is_global_zone ; then
1764		[[ -d /$pool ]] && rm -rf /$pool
1765		log_must zpool create -f $pool $@
1766	fi
1767
1768	return 0
1769}
1770
1771# Return 0 if destroy successfully or the pool exists; $? otherwise
1772# Note: In local zones, this function should return 0 silently.
1773#
1774# $1 - pool name
1775# Destroy pool with the given parameters.
1776
1777function destroy_pool #pool
1778{
1779	typeset pool=${1%%/*}
1780	typeset mtpt
1781
1782	if [[ -z $pool ]]; then
1783		log_note "No pool name given."
1784		return 1
1785	fi
1786
1787	if is_global_zone ; then
1788		if poolexists "$pool" ; then
1789			mtpt=$(get_prop mountpoint "$pool")
1790
1791			# At times, syseventd/udev activity can cause attempts
1792			# to destroy a pool to fail with EBUSY. We retry a few
1793			# times allowing failures before requiring the destroy
1794			# to succeed.
1795			log_must_busy zpool destroy -f $pool
1796
1797			[[ -d $mtpt ]] && \
1798				log_must rm -rf $mtpt
1799		else
1800			log_note "Pool does not exist. ($pool)"
1801			return 1
1802		fi
1803	fi
1804
1805	return 0
1806}
1807
1808# Return 0 if created successfully; $? otherwise
1809#
1810# $1 - dataset name
1811# $2-n - dataset options
1812
1813function create_dataset #dataset dataset_options
1814{
1815	typeset dataset=$1
1816
1817	shift
1818
1819	if [[ -z $dataset ]]; then
1820		log_note "Missing dataset name."
1821		return 1
1822	fi
1823
1824	if datasetexists $dataset ; then
1825		destroy_dataset $dataset
1826	fi
1827
1828	log_must zfs create $@ $dataset
1829
1830	return 0
1831}
1832
1833# Return 0 if destroy successfully or the dataset exists; $? otherwise
1834# Note: In local zones, this function should return 0 silently.
1835#
1836# $1 - dataset name
1837# $2 - custom arguments for zfs destroy
1838# Destroy dataset with the given parameters.
1839
1840function destroy_dataset #dataset #args
1841{
1842	typeset dataset=$1
1843	typeset mtpt
1844	typeset args=${2:-""}
1845
1846	if [[ -z $dataset ]]; then
1847		log_note "No dataset name given."
1848		return 1
1849	fi
1850
1851	if is_global_zone ; then
1852		if datasetexists "$dataset" ; then
1853			mtpt=$(get_prop mountpoint "$dataset")
1854			log_must_busy zfs destroy $args $dataset
1855
1856			[[ -d $mtpt ]] && \
1857				log_must rm -rf $mtpt
1858		else
1859			log_note "Dataset does not exist. ($dataset)"
1860			return 1
1861		fi
1862	fi
1863
1864	return 0
1865}
1866
1867#
1868# Firstly, create a pool with 5 datasets. Then, create a single zone and
1869# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1870# and a zvol device to the zone.
1871#
1872# $1 zone name
1873# $2 zone root directory prefix
1874# $3 zone ip
1875#
1876function zfs_zones_setup #zone_name zone_root zone_ip
1877{
1878	typeset zone_name=${1:-$(hostname)-z}
1879	typeset zone_root=${2:-"/zone_root"}
1880	typeset zone_ip=${3:-"10.1.1.10"}
1881	typeset prefix_ctr=$ZONE_CTR
1882	typeset pool_name=$ZONE_POOL
1883	typeset -i cntctr=5
1884	typeset -i i=0
1885
1886	# Create pool and 5 container within it
1887	#
1888	[[ -d /$pool_name ]] && rm -rf /$pool_name
1889	log_must zpool create -f $pool_name $DISKS
1890	while ((i < cntctr)); do
1891		log_must zfs create $pool_name/$prefix_ctr$i
1892		((i += 1))
1893	done
1894
1895	# create a zvol
1896	log_must zfs create -V 1g $pool_name/zone_zvol
1897	block_device_wait
1898
1899	#
1900	# If current system support slog, add slog device for pool
1901	#
1902	if verify_slog_support ; then
1903		typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1904		log_must mkfile $MINVDEVSIZE $sdevs
1905		log_must zpool add $pool_name log mirror $sdevs
1906	fi
1907
1908	# this isn't supported just yet.
1909	# Create a filesystem. In order to add this to
1910	# the zone, it must have it's mountpoint set to 'legacy'
1911	# log_must zfs create $pool_name/zfs_filesystem
1912	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1913
1914	[[ -d $zone_root ]] && \
1915		log_must rm -rf $zone_root/$zone_name
1916	[[ ! -d $zone_root ]] && \
1917		log_must mkdir -p -m 0700 $zone_root/$zone_name
1918
1919	# Create zone configure file and configure the zone
1920	#
1921	typeset zone_conf=/tmp/zone_conf.$$
1922	echo "create" > $zone_conf
1923	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1924	echo "set autoboot=true" >> $zone_conf
1925	i=0
1926	while ((i < cntctr)); do
1927		echo "add dataset" >> $zone_conf
1928		echo "set name=$pool_name/$prefix_ctr$i" >> \
1929			$zone_conf
1930		echo "end" >> $zone_conf
1931		((i += 1))
1932	done
1933
1934	# add our zvol to the zone
1935	echo "add device" >> $zone_conf
1936	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1937	echo "end" >> $zone_conf
1938
1939	# add a corresponding zvol rdsk to the zone
1940	echo "add device" >> $zone_conf
1941	echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1942	echo "end" >> $zone_conf
1943
1944	# once it's supported, we'll add our filesystem to the zone
1945	# echo "add fs" >> $zone_conf
1946	# echo "set type=zfs" >> $zone_conf
1947	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1948	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1949	# echo "end" >> $zone_conf
1950
1951	echo "verify" >> $zone_conf
1952	echo "commit" >> $zone_conf
1953	log_must zonecfg -z $zone_name -f $zone_conf
1954	log_must rm -f $zone_conf
1955
1956	# Install the zone
1957	zoneadm -z $zone_name install
1958	if (($? == 0)); then
1959		log_note "SUCCESS: zoneadm -z $zone_name install"
1960	else
1961		log_fail "FAIL: zoneadm -z $zone_name install"
1962	fi
1963
1964	# Install sysidcfg file
1965	#
1966	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1967	echo "system_locale=C" > $sysidcfg
1968	echo  "terminal=dtterm" >> $sysidcfg
1969	echo  "network_interface=primary {" >> $sysidcfg
1970	echo  "hostname=$zone_name" >> $sysidcfg
1971	echo  "}" >> $sysidcfg
1972	echo  "name_service=NONE" >> $sysidcfg
1973	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1974	echo  "security_policy=NONE" >> $sysidcfg
1975	echo  "timezone=US/Eastern" >> $sysidcfg
1976
1977	# Boot this zone
1978	log_must zoneadm -z $zone_name boot
1979}
1980
1981#
1982# Reexport TESTPOOL & TESTPOOL(1-4)
1983#
1984function reexport_pool
1985{
1986	typeset -i cntctr=5
1987	typeset -i i=0
1988
1989	while ((i < cntctr)); do
1990		if ((i == 0)); then
1991			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1992			if ! ismounted $TESTPOOL; then
1993				log_must zfs mount $TESTPOOL
1994			fi
1995		else
1996			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1997			if eval ! ismounted \$TESTPOOL$i; then
1998				log_must eval zfs mount \$TESTPOOL$i
1999			fi
2000		fi
2001		((i += 1))
2002	done
2003}
2004
2005#
2006# Verify a given disk or pool state
2007#
2008# Return 0 is pool/disk matches expected state, 1 otherwise
2009#
2010function check_state # pool disk state{online,offline,degraded}
2011{
2012	typeset pool=$1
2013	typeset disk=${2#$DEV_DSKDIR/}
2014	typeset state=$3
2015
2016	[[ -z $pool ]] || [[ -z $state ]] \
2017	    && log_fail "Arguments invalid or missing"
2018
2019	if [[ -z $disk ]]; then
2020		#check pool state only
2021		zpool get -H -o value health $pool \
2022		    | grep -i "$state" > /dev/null 2>&1
2023	else
2024		zpool status -v $pool | grep "$disk"  \
2025		    | grep -i "$state" > /dev/null 2>&1
2026	fi
2027
2028	return $?
2029}
2030
2031#
2032# Get the mountpoint of snapshot
2033# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
2034# as its mountpoint
2035#
2036function snapshot_mountpoint
2037{
2038	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
2039
2040	if [[ $dataset != *@* ]]; then
2041		log_fail "Error name of snapshot '$dataset'."
2042	fi
2043
2044	typeset fs=${dataset%@*}
2045	typeset snap=${dataset#*@}
2046
2047	if [[ -z $fs || -z $snap ]]; then
2048		log_fail "Error name of snapshot '$dataset'."
2049	fi
2050
2051	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
2052}
2053
2054#
2055# Given a device and 'ashift' value verify it's correctly set on every label
2056#
2057function verify_ashift # device ashift
2058{
2059	typeset device="$1"
2060	typeset ashift="$2"
2061
2062	zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
2063	    if (ashift != $2)
2064	        exit 1;
2065	    else
2066	        count++;
2067	    } END {
2068	    if (count != 4)
2069	        exit 1;
2070	    else
2071	        exit 0;
2072	    }'
2073
2074	return $?
2075}
2076
2077#
2078# Given a pool and file system, this function will verify the file system
2079# using the zdb internal tool. Note that the pool is exported and imported
2080# to ensure it has consistent state.
2081#
2082function verify_filesys # pool filesystem dir
2083{
2084	typeset pool="$1"
2085	typeset filesys="$2"
2086	typeset zdbout="/tmp/zdbout.$$"
2087
2088	shift
2089	shift
2090	typeset dirs=$@
2091	typeset search_path=""
2092
2093	log_note "Calling zdb to verify filesystem '$filesys'"
2094	zfs unmount -a > /dev/null 2>&1
2095	log_must zpool export $pool
2096
2097	if [[ -n $dirs ]] ; then
2098		for dir in $dirs ; do
2099			search_path="$search_path -d $dir"
2100		done
2101	fi
2102
2103	log_must zpool import $search_path $pool
2104
2105	zdb -cudi $filesys > $zdbout 2>&1
2106	if [[ $? != 0 ]]; then
2107		log_note "Output: zdb -cudi $filesys"
2108		cat $zdbout
2109		log_fail "zdb detected errors with: '$filesys'"
2110	fi
2111
2112	log_must zfs mount -a
2113	log_must rm -rf $zdbout
2114}
2115
2116#
2117# Given a pool issue a scrub and verify that no checksum errors are reported.
2118#
2119function verify_pool
2120{
2121	typeset pool=${1:-$TESTPOOL}
2122
2123	log_must zpool scrub $pool
2124	log_must wait_scrubbed $pool
2125
2126	typeset -i cksum=$(zpool status $pool | awk '
2127	    !NF { isvdev = 0 }
2128	    isvdev { errors += $NF }
2129	    /CKSUM$/ { isvdev = 1 }
2130	    END { print errors }
2131	')
2132	if [[ $cksum != 0 ]]; then
2133		log_must zpool status -v
2134	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
2135	fi
2136}
2137
2138#
2139# Given a pool, and this function list all disks in the pool
2140#
2141function get_disklist # pool
2142{
2143	typeset disklist=""
2144
2145	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
2146	    grep -v "\-\-\-\-\-" | \
2147	    egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
2148
2149	echo $disklist
2150}
2151
2152#
2153# Given a pool, and this function list all disks in the pool with their full
2154# path (like "/dev/sda" instead of "sda").
2155#
2156function get_disklist_fullpath # pool
2157{
2158	args="-P $1"
2159	get_disklist $args
2160}
2161
2162
2163
2164# /**
2165#  This function kills a given list of processes after a time period. We use
2166#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
2167#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2168#  would be listed as FAIL, which we don't want : we're happy with stress tests
2169#  running for a certain amount of time, then finishing.
2170#
2171# @param $1 the time in seconds after which we should terminate these processes
2172# @param $2..$n the processes we wish to terminate.
2173# */
2174function stress_timeout
2175{
2176	typeset -i TIMEOUT=$1
2177	shift
2178	typeset cpids="$@"
2179
2180	log_note "Waiting for child processes($cpids). " \
2181		"It could last dozens of minutes, please be patient ..."
2182	log_must sleep $TIMEOUT
2183
2184	log_note "Killing child processes after ${TIMEOUT} stress timeout."
2185	typeset pid
2186	for pid in $cpids; do
2187		ps -p $pid > /dev/null 2>&1
2188		if (($? == 0)); then
2189			log_must kill -USR1 $pid
2190		fi
2191	done
2192}
2193
2194#
2195# Verify a given hotspare disk is inuse or avail
2196#
2197# Return 0 is pool/disk matches expected state, 1 otherwise
2198#
2199function check_hotspare_state # pool disk state{inuse,avail}
2200{
2201	typeset pool=$1
2202	typeset disk=${2#$DEV_DSKDIR/}
2203	typeset state=$3
2204
2205	cur_state=$(get_device_state $pool $disk "spares")
2206
2207	if [[ $state != ${cur_state} ]]; then
2208		return 1
2209	fi
2210	return 0
2211}
2212
2213#
2214# Wait until a hotspare transitions to a given state or times out.
2215#
2216# Return 0 when  pool/disk matches expected state, 1 on timeout.
2217#
2218function wait_hotspare_state # pool disk state timeout
2219{
2220	typeset pool=$1
2221	typeset disk=${2#*$DEV_DSKDIR/}
2222	typeset state=$3
2223	typeset timeout=${4:-60}
2224	typeset -i i=0
2225
2226	while [[ $i -lt $timeout ]]; do
2227		if check_hotspare_state $pool $disk $state; then
2228			return 0
2229		fi
2230
2231		i=$((i+1))
2232		sleep 1
2233	done
2234
2235	return 1
2236}
2237
2238#
2239# Verify a given slog disk is inuse or avail
2240#
2241# Return 0 is pool/disk matches expected state, 1 otherwise
2242#
2243function check_slog_state # pool disk state{online,offline,unavail}
2244{
2245	typeset pool=$1
2246	typeset disk=${2#$DEV_DSKDIR/}
2247	typeset state=$3
2248
2249	cur_state=$(get_device_state $pool $disk "logs")
2250
2251	if [[ $state != ${cur_state} ]]; then
2252		return 1
2253	fi
2254	return 0
2255}
2256
2257#
2258# Verify a given vdev disk is inuse or avail
2259#
2260# Return 0 is pool/disk matches expected state, 1 otherwise
2261#
2262function check_vdev_state # pool disk state{online,offline,unavail}
2263{
2264	typeset pool=$1
2265	typeset disk=${2#*$DEV_DSKDIR/}
2266	typeset state=$3
2267
2268	cur_state=$(get_device_state $pool $disk)
2269
2270	if [[ $state != ${cur_state} ]]; then
2271		return 1
2272	fi
2273	return 0
2274}
2275
2276#
2277# Wait until a vdev transitions to a given state or times out.
2278#
2279# Return 0 when  pool/disk matches expected state, 1 on timeout.
2280#
2281function wait_vdev_state # pool disk state timeout
2282{
2283	typeset pool=$1
2284	typeset disk=${2#*$DEV_DSKDIR/}
2285	typeset state=$3
2286	typeset timeout=${4:-60}
2287	typeset -i i=0
2288
2289	while [[ $i -lt $timeout ]]; do
2290		if check_vdev_state $pool $disk $state; then
2291			return 0
2292		fi
2293
2294		i=$((i+1))
2295		sleep 1
2296	done
2297
2298	return 1
2299}
2300
2301#
2302# Check the output of 'zpool status -v <pool>',
2303# and to see if the content of <token> contain the <keyword> specified.
2304#
2305# Return 0 is contain, 1 otherwise
2306#
2307function check_pool_status # pool token keyword <verbose>
2308{
2309	typeset pool=$1
2310	typeset token=$2
2311	typeset keyword=$3
2312	typeset verbose=${4:-false}
2313
2314	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2315		($1==token) {print $0}')
2316	if [[ $verbose == true ]]; then
2317		log_note $scan
2318	fi
2319	echo $scan | egrep -i "$keyword" > /dev/null 2>&1
2320
2321	return $?
2322}
2323
2324#
2325# The following functions are instance of check_pool_status()
2326#	is_pool_resilvering - to check if the pool resilver is in progress
2327#	is_pool_resilvered - to check if the pool resilver is completed
2328#	is_pool_scrubbing - to check if the pool scrub is in progress
2329#	is_pool_scrubbed - to check if the pool scrub is completed
2330#	is_pool_scrub_stopped - to check if the pool scrub is stopped
2331#	is_pool_scrub_paused - to check if the pool scrub has paused
2332#	is_pool_removing - to check if the pool removing is a vdev
2333#	is_pool_removed - to check if the pool remove is completed
2334#	is_pool_discarding - to check if the pool checkpoint is being discarded
2335#
2336function is_pool_resilvering #pool <verbose>
2337{
2338	check_pool_status "$1" "scan" \
2339	    "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2340	return $?
2341}
2342
2343function is_pool_resilvered #pool <verbose>
2344{
2345	check_pool_status "$1" "scan" "resilvered " $2
2346	return $?
2347}
2348
2349function is_pool_scrubbing #pool <verbose>
2350{
2351	check_pool_status "$1" "scan" "scrub in progress since " $2
2352	return $?
2353}
2354
2355function is_pool_scrubbed #pool <verbose>
2356{
2357	check_pool_status "$1" "scan" "scrub repaired" $2
2358	return $?
2359}
2360
2361function is_pool_scrub_stopped #pool <verbose>
2362{
2363	check_pool_status "$1" "scan" "scrub canceled" $2
2364	return $?
2365}
2366
2367function is_pool_scrub_paused #pool <verbose>
2368{
2369	check_pool_status "$1" "scan" "scrub paused since " $2
2370	return $?
2371}
2372
2373function is_pool_removing #pool
2374{
2375	check_pool_status "$1" "remove" "in progress since "
2376	return $?
2377}
2378
2379function is_pool_removed #pool
2380{
2381	check_pool_status "$1" "remove" "completed on"
2382	return $?
2383}
2384
2385function is_pool_discarding #pool
2386{
2387	check_pool_status "$1" "checkpoint" "discarding"
2388	return $?
2389}
2390
2391function wait_for_degraded
2392{
2393	typeset pool=$1
2394	typeset timeout=${2:-30}
2395	typeset t0=$SECONDS
2396
2397	while :; do
2398		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2399		log_note "$pool is not yet degraded."
2400		sleep 1
2401		if ((SECONDS - t0 > $timeout)); then
2402			log_note "$pool not degraded after $timeout seconds."
2403			return 1
2404		fi
2405	done
2406
2407	return 0
2408}
2409
2410#
2411# Use create_pool()/destroy_pool() to clean up the information in
2412# in the given disk to avoid slice overlapping.
2413#
2414function cleanup_devices #vdevs
2415{
2416	typeset pool="foopool$$"
2417
2418	for vdev in $@; do
2419		zero_partitions $vdev
2420	done
2421
2422	poolexists $pool && destroy_pool $pool
2423	create_pool $pool $@
2424	destroy_pool $pool
2425
2426	return 0
2427}
2428
2429#/**
2430# A function to find and locate free disks on a system or from given
2431# disks as the parameter. It works by locating disks that are in use
2432# as swap devices and dump devices, and also disks listed in /etc/vfstab
2433#
2434# $@ given disks to find which are free, default is all disks in
2435# the test system
2436#
2437# @return a string containing the list of available disks
2438#*/
2439function find_disks
2440{
2441	# Trust provided list, no attempt is made to locate unused devices.
2442	if is_linux || is_freebsd; then
2443		echo "$@"
2444		return
2445	fi
2446
2447
2448	sfi=/tmp/swaplist.$$
2449	dmpi=/tmp/dumpdev.$$
2450	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2451
2452	swap -l > $sfi
2453	dumpadm > $dmpi 2>/dev/null
2454
2455# write an awk script that can process the output of format
2456# to produce a list of disks we know about. Note that we have
2457# to escape "$2" so that the shell doesn't interpret it while
2458# we're creating the awk script.
2459# -------------------
2460	cat > /tmp/find_disks.awk <<EOF
2461#!/bin/nawk -f
2462	BEGIN { FS="."; }
2463
2464	/^Specify disk/{
2465		searchdisks=0;
2466	}
2467
2468	{
2469		if (searchdisks && \$2 !~ "^$"){
2470			split(\$2,arr," ");
2471			print arr[1];
2472		}
2473	}
2474
2475	/^AVAILABLE DISK SELECTIONS:/{
2476		searchdisks=1;
2477	}
2478EOF
2479#---------------------
2480
2481	chmod 755 /tmp/find_disks.awk
2482	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2483	rm /tmp/find_disks.awk
2484
2485	unused=""
2486	for disk in $disks; do
2487	# Check for mounted
2488		grep "${disk}[sp]" /etc/mnttab >/dev/null
2489		(($? == 0)) && continue
2490	# Check for swap
2491		grep "${disk}[sp]" $sfi >/dev/null
2492		(($? == 0)) && continue
2493	# check for dump device
2494		grep "${disk}[sp]" $dmpi >/dev/null
2495		(($? == 0)) && continue
2496	# check to see if this disk hasn't been explicitly excluded
2497	# by a user-set environment variable
2498		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2499		(($? == 0)) && continue
2500		unused_candidates="$unused_candidates $disk"
2501	done
2502	rm $sfi
2503	rm $dmpi
2504
2505# now just check to see if those disks do actually exist
2506# by looking for a device pointing to the first slice in
2507# each case. limit the number to max_finddisksnum
2508	count=0
2509	for disk in $unused_candidates; do
2510		if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2511		    [ $count -lt $max_finddisksnum ]; then
2512			unused="$unused $disk"
2513			# do not impose limit if $@ is provided
2514			[[ -z $@ ]] && ((count = count + 1))
2515		fi
2516	done
2517
2518# finally, return our disk list
2519	echo $unused
2520}
2521
2522function add_user_freebsd #<group_name> <user_name> <basedir>
2523{
2524	typeset group=$1
2525	typeset user=$2
2526	typeset basedir=$3
2527
2528	# Check to see if the user exists.
2529	if id $user > /dev/null 2>&1; then
2530		return 0
2531	fi
2532
2533	# Assign 1000 as the base uid
2534	typeset -i uid=1000
2535	while true; do
2536		typeset -i ret
2537		pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2538		ret=$?
2539		case $ret in
2540			0) break ;;
2541			# The uid is not unique
2542			65) ((uid += 1)) ;;
2543			*) return 1 ;;
2544		esac
2545		if [[ $uid == 65000 ]]; then
2546			log_fail "No user id available under 65000 for $user"
2547		fi
2548	done
2549
2550	# Silence MOTD
2551	touch $basedir/$user/.hushlogin
2552
2553	return 0
2554}
2555
2556#
2557# Delete the specified user.
2558#
2559# $1 login name
2560#
2561function del_user_freebsd #<logname>
2562{
2563	typeset user=$1
2564
2565	if id $user > /dev/null 2>&1; then
2566		log_must pw userdel $user
2567	fi
2568
2569	return 0
2570}
2571
2572#
2573# Select valid gid and create specified group.
2574#
2575# $1 group name
2576#
2577function add_group_freebsd #<group_name>
2578{
2579	typeset group=$1
2580
2581	# See if the group already exists.
2582	if pw groupshow $group >/dev/null 2>&1; then
2583		return 0
2584	fi
2585
2586	# Assign 1000 as the base gid
2587	typeset -i gid=1000
2588	while true; do
2589		pw groupadd -g $gid -n $group > /dev/null 2>&1
2590		typeset -i ret=$?
2591		case $ret in
2592			0) return 0 ;;
2593			# The gid is not  unique
2594			65) ((gid += 1)) ;;
2595			*) return 1 ;;
2596		esac
2597		if [[ $gid == 65000 ]]; then
2598			log_fail "No user id available under 65000 for $group"
2599		fi
2600	done
2601}
2602
2603#
2604# Delete the specified group.
2605#
2606# $1 group name
2607#
2608function del_group_freebsd #<group_name>
2609{
2610	typeset group=$1
2611
2612	pw groupdel -n $group > /dev/null 2>&1
2613	typeset -i ret=$?
2614	case $ret in
2615		# Group does not exist, or was deleted successfully.
2616		0|6|65) return 0 ;;
2617		# Name already exists as a group name
2618		9) log_must pw groupdel $group ;;
2619		*) return 1 ;;
2620	esac
2621
2622	return 0
2623}
2624
2625function add_user_illumos #<group_name> <user_name> <basedir>
2626{
2627	typeset group=$1
2628	typeset user=$2
2629	typeset basedir=$3
2630
2631	log_must useradd -g $group -d $basedir/$user -m $user
2632
2633	return 0
2634}
2635
2636function del_user_illumos #<user_name>
2637{
2638	typeset user=$1
2639
2640	if id $user > /dev/null 2>&1; then
2641		log_must_retry "currently used" 6 userdel $user
2642	fi
2643
2644	return 0
2645}
2646
2647function add_group_illumos #<group_name>
2648{
2649	typeset group=$1
2650
2651	typeset -i gid=100
2652	while true; do
2653		groupadd -g $gid $group > /dev/null 2>&1
2654		typeset -i ret=$?
2655		case $ret in
2656			0) return 0 ;;
2657			# The gid is not  unique
2658			4) ((gid += 1)) ;;
2659			*) return 1 ;;
2660		esac
2661	done
2662}
2663
2664function del_group_illumos #<group_name>
2665{
2666	typeset group=$1
2667
2668	groupmod -n $grp $grp > /dev/null 2>&1
2669	typeset -i ret=$?
2670	case $ret in
2671		# Group does not exist.
2672		6) return 0 ;;
2673		# Name already exists as a group name
2674		9) log_must groupdel $grp ;;
2675		*) return 1 ;;
2676	esac
2677}
2678
2679function add_user_linux #<group_name> <user_name> <basedir>
2680{
2681	typeset group=$1
2682	typeset user=$2
2683	typeset basedir=$3
2684
2685	log_must useradd -g $group -d $basedir/$user -m $user
2686
2687	# Add new users to the same group and the command line utils.
2688	# This allows them to be run out of the original users home
2689	# directory as long as it permissioned to be group readable.
2690	cmd_group=$(stat --format="%G" $(which zfs))
2691	log_must usermod -a -G $cmd_group $user
2692
2693	return 0
2694}
2695
2696function del_user_linux #<user_name>
2697{
2698	typeset user=$1
2699
2700	if id $user > /dev/null 2>&1; then
2701		log_must_retry "currently used" 6 userdel $user
2702	fi
2703
2704	return 0
2705}
2706
2707function add_group_linux #<group_name>
2708{
2709	typeset group=$1
2710
2711	# Assign 100 as the base gid, a larger value is selected for
2712	# Linux because for many distributions 1000 and under are reserved.
2713	while true; do
2714		groupadd $group > /dev/null 2>&1
2715		typeset -i ret=$?
2716		case $ret in
2717			0) return 0 ;;
2718			*) return 1 ;;
2719		esac
2720	done
2721}
2722
2723function del_group_linux #<group_name>
2724{
2725	typeset group=$1
2726
2727	getent group $group > /dev/null 2>&1
2728	typeset -i ret=$?
2729	case $ret in
2730		# Group does not exist.
2731		2) return 0 ;;
2732		# Name already exists as a group name
2733		0) log_must groupdel $group ;;
2734		*) return 1 ;;
2735	esac
2736
2737	return 0
2738}
2739
2740#
2741# Add specified user to specified group
2742#
2743# $1 group name
2744# $2 user name
2745# $3 base of the homedir (optional)
2746#
2747function add_user #<group_name> <user_name> <basedir>
2748{
2749	typeset group=$1
2750	typeset user=$2
2751	typeset basedir=${3:-"/var/tmp"}
2752
2753	if ((${#group} == 0 || ${#user} == 0)); then
2754		log_fail "group name or user name are not defined."
2755	fi
2756
2757	case $(uname) in
2758	FreeBSD)
2759		add_user_freebsd "$group" "$user" "$basedir"
2760		;;
2761	Linux)
2762		add_user_linux "$group" "$user" "$basedir"
2763		;;
2764	*)
2765		add_user_illumos "$group" "$user" "$basedir"
2766		;;
2767	esac
2768
2769	echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.profile
2770	echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.bash_profile
2771	echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.login
2772
2773	return 0
2774}
2775
2776#
2777# Delete the specified user.
2778#
2779# $1 login name
2780# $2 base of the homedir (optional)
2781#
2782function del_user #<logname> <basedir>
2783{
2784	typeset user=$1
2785	typeset basedir=${2:-"/var/tmp"}
2786
2787	if ((${#user} == 0)); then
2788		log_fail "login name is necessary."
2789	fi
2790
2791	case $(uname) in
2792	FreeBSD)
2793		del_user_freebsd "$user"
2794		;;
2795	Linux)
2796		del_user_linux "$user"
2797		;;
2798	*)
2799		del_user_illumos "$user"
2800		;;
2801	esac
2802
2803	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2804
2805	return 0
2806}
2807
2808#
2809# Select valid gid and create specified group.
2810#
2811# $1 group name
2812#
2813function add_group #<group_name>
2814{
2815	typeset group=$1
2816
2817	if ((${#group} == 0)); then
2818		log_fail "group name is necessary."
2819	fi
2820
2821	case $(uname) in
2822	FreeBSD)
2823		add_group_freebsd "$group"
2824		;;
2825	Linux)
2826		add_group_linux "$group"
2827		;;
2828	*)
2829		add_group_illumos "$group"
2830		;;
2831	esac
2832
2833	return 0
2834}
2835
2836#
2837# Delete the specified group.
2838#
2839# $1 group name
2840#
2841function del_group #<group_name>
2842{
2843	typeset group=$1
2844
2845	if ((${#group} == 0)); then
2846		log_fail "group name is necessary."
2847	fi
2848
2849	case $(uname) in
2850	FreeBSD)
2851		del_group_freebsd "$group"
2852		;;
2853	Linux)
2854		del_group_linux "$group"
2855		;;
2856	*)
2857		del_group_illumos "$group"
2858		;;
2859	esac
2860
2861	return 0
2862}
2863
2864#
2865# This function will return true if it's safe to destroy the pool passed
2866# as argument 1. It checks for pools based on zvols and files, and also
2867# files contained in a pool that may have a different mountpoint.
2868#
2869function safe_to_destroy_pool { # $1 the pool name
2870
2871	typeset pool=""
2872	typeset DONT_DESTROY=""
2873
2874	# We check that by deleting the $1 pool, we're not
2875	# going to pull the rug out from other pools. Do this
2876	# by looking at all other pools, ensuring that they
2877	# aren't built from files or zvols contained in this pool.
2878
2879	for pool in $(zpool list -H -o name)
2880	do
2881		ALTMOUNTPOOL=""
2882
2883		# this is a list of the top-level directories in each of the
2884		# files that make up the path to the files the pool is based on
2885		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2886			awk '{print $1}')
2887
2888		# this is a list of the zvols that make up the pool
2889		ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2890		    | awk '{print $1}')
2891
2892		# also want to determine if it's a file-based pool using an
2893		# alternate mountpoint...
2894		POOL_FILE_DIRS=$(zpool status -v $pool | \
2895					grep / | awk '{print $1}' | \
2896					awk -F/ '{print $2}' | grep -v "dev")
2897
2898		for pooldir in $POOL_FILE_DIRS
2899		do
2900			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2901					grep "${pooldir}$" | awk '{print $1}')
2902
2903			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2904		done
2905
2906
2907		if [ ! -z "$ZVOLPOOL" ]
2908		then
2909			DONT_DESTROY="true"
2910			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2911		fi
2912
2913		if [ ! -z "$FILEPOOL" ]
2914		then
2915			DONT_DESTROY="true"
2916			log_note "Pool $pool is built from $FILEPOOL on $1"
2917		fi
2918
2919		if [ ! -z "$ALTMOUNTPOOL" ]
2920		then
2921			DONT_DESTROY="true"
2922			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2923		fi
2924	done
2925
2926	if [ -z "${DONT_DESTROY}" ]
2927	then
2928		return 0
2929	else
2930		log_note "Warning: it is not safe to destroy $1!"
2931		return 1
2932	fi
2933}
2934
2935#
2936# Verify zfs operation with -p option work as expected
2937# $1 operation, value could be create, clone or rename
2938# $2 dataset type, value could be fs or vol
2939# $3 dataset name
2940# $4 new dataset name
2941#
2942function verify_opt_p_ops
2943{
2944	typeset ops=$1
2945	typeset datatype=$2
2946	typeset dataset=$3
2947	typeset newdataset=$4
2948
2949	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2950		log_fail "$datatype is not supported."
2951	fi
2952
2953	# check parameters accordingly
2954	case $ops in
2955		create)
2956			newdataset=$dataset
2957			dataset=""
2958			if [[ $datatype == "vol" ]]; then
2959				ops="create -V $VOLSIZE"
2960			fi
2961			;;
2962		clone)
2963			if [[ -z $newdataset ]]; then
2964				log_fail "newdataset should not be empty" \
2965					"when ops is $ops."
2966			fi
2967			log_must datasetexists $dataset
2968			log_must snapexists $dataset
2969			;;
2970		rename)
2971			if [[ -z $newdataset ]]; then
2972				log_fail "newdataset should not be empty" \
2973					"when ops is $ops."
2974			fi
2975			log_must datasetexists $dataset
2976			;;
2977		*)
2978			log_fail "$ops is not supported."
2979			;;
2980	esac
2981
2982	# make sure the upper level filesystem does not exist
2983	destroy_dataset "${newdataset%/*}" "-rRf"
2984
2985	# without -p option, operation will fail
2986	log_mustnot zfs $ops $dataset $newdataset
2987	log_mustnot datasetexists $newdataset ${newdataset%/*}
2988
2989	# with -p option, operation should succeed
2990	log_must zfs $ops -p $dataset $newdataset
2991	block_device_wait
2992
2993	if ! datasetexists $newdataset ; then
2994		log_fail "-p option does not work for $ops"
2995	fi
2996
2997	# when $ops is create or clone, redo the operation still return zero
2998	if [[ $ops != "rename" ]]; then
2999		log_must zfs $ops -p $dataset $newdataset
3000	fi
3001
3002	return 0
3003}
3004
3005#
3006# Get configuration of pool
3007# $1 pool name
3008# $2 config name
3009#
3010function get_config
3011{
3012	typeset pool=$1
3013	typeset config=$2
3014	typeset alt_root
3015
3016	if ! poolexists "$pool" ; then
3017		return 1
3018	fi
3019	alt_root=$(zpool list -H $pool | awk '{print $NF}')
3020	if [[ $alt_root == "-" ]]; then
3021		value=$(zdb -C $pool | grep "$config:" | awk -F: \
3022		    '{print $2}')
3023	else
3024		value=$(zdb -e $pool | grep "$config:" | awk -F: \
3025		    '{print $2}')
3026	fi
3027	if [[ -n $value ]] ; then
3028		value=${value#'}
3029		value=${value%'}
3030	fi
3031	echo $value
3032
3033	return 0
3034}
3035
3036#
3037# Privated function. Random select one of items from arguments.
3038#
3039# $1 count
3040# $2-n string
3041#
3042function _random_get
3043{
3044	typeset cnt=$1
3045	shift
3046
3047	typeset str="$@"
3048	typeset -i ind
3049	((ind = RANDOM % cnt + 1))
3050
3051	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
3052	echo $ret
3053}
3054
3055#
3056# Random select one of item from arguments which include NONE string
3057#
3058function random_get_with_non
3059{
3060	typeset -i cnt=$#
3061	((cnt =+ 1))
3062
3063	_random_get "$cnt" "$@"
3064}
3065
3066#
3067# Random select one of item from arguments which doesn't include NONE string
3068#
3069function random_get
3070{
3071	_random_get "$#" "$@"
3072}
3073
3074#
3075# Detect if the current system support slog
3076#
3077function verify_slog_support
3078{
3079	typeset dir=$TEST_BASE_DIR/disk.$$
3080	typeset pool=foo.$$
3081	typeset vdev=$dir/a
3082	typeset sdev=$dir/b
3083
3084	mkdir -p $dir
3085	mkfile $MINVDEVSIZE $vdev $sdev
3086
3087	typeset -i ret=0
3088	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
3089		ret=1
3090	fi
3091	rm -r $dir
3092
3093	return $ret
3094}
3095
3096#
3097# The function will generate a dataset name with specific length
3098# $1, the length of the name
3099# $2, the base string to construct the name
3100#
3101function gen_dataset_name
3102{
3103	typeset -i len=$1
3104	typeset basestr="$2"
3105	typeset -i baselen=${#basestr}
3106	typeset -i iter=0
3107	typeset l_name=""
3108
3109	if ((len % baselen == 0)); then
3110		((iter = len / baselen))
3111	else
3112		((iter = len / baselen + 1))
3113	fi
3114	while ((iter > 0)); do
3115		l_name="${l_name}$basestr"
3116
3117		((iter -= 1))
3118	done
3119
3120	echo $l_name
3121}
3122
3123#
3124# Get cksum tuple of dataset
3125# $1 dataset name
3126#
3127# sample zdb output:
3128# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
3129# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
3130# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
3131# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
3132function datasetcksum
3133{
3134	typeset cksum
3135	sync
3136	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
3137		| awk -F= '{print $7}')
3138	echo $cksum
3139}
3140
3141#
3142# Get cksum of file
3143# #1 file path
3144#
3145function checksum
3146{
3147	typeset cksum
3148	cksum=$(cksum $1 | awk '{print $1}')
3149	echo $cksum
3150}
3151
3152#
3153# Get the given disk/slice state from the specific field of the pool
3154#
3155function get_device_state #pool disk field("", "spares","logs")
3156{
3157	typeset pool=$1
3158	typeset disk=${2#$DEV_DSKDIR/}
3159	typeset field=${3:-$pool}
3160
3161	state=$(zpool status -v "$pool" 2>/dev/null | \
3162		nawk -v device=$disk -v pool=$pool -v field=$field \
3163		'BEGIN {startconfig=0; startfield=0; }
3164		/config:/ {startconfig=1}
3165		(startconfig==1) && ($1==field) {startfield=1; next;}
3166		(startfield==1) && ($1==device) {print $2; exit;}
3167		(startfield==1) &&
3168		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
3169	echo $state
3170}
3171
3172
3173#
3174# print the given directory filesystem type
3175#
3176# $1 directory name
3177#
3178function get_fstype
3179{
3180	typeset dir=$1
3181
3182	if [[ -z $dir ]]; then
3183		log_fail "Usage: get_fstype <directory>"
3184	fi
3185
3186	#
3187	#  $ df -n /
3188	#  /		  : ufs
3189	#
3190	df -n $dir | awk '{print $3}'
3191}
3192
3193#
3194# Given a disk, label it to VTOC regardless what label was on the disk
3195# $1 disk
3196#
3197function labelvtoc
3198{
3199	typeset disk=$1
3200	if [[ -z $disk ]]; then
3201		log_fail "The disk name is unspecified."
3202	fi
3203	typeset label_file=/var/tmp/labelvtoc.$$
3204	typeset arch=$(uname -p)
3205
3206	if is_linux || is_freebsd; then
3207		log_note "Currently unsupported by the test framework"
3208		return 1
3209	fi
3210
3211	if [[ $arch == "i386" ]]; then
3212		echo "label" > $label_file
3213		echo "0" >> $label_file
3214		echo "" >> $label_file
3215		echo "q" >> $label_file
3216		echo "q" >> $label_file
3217
3218		fdisk -B $disk >/dev/null 2>&1
3219		# wait a while for fdisk finishes
3220		sleep 60
3221	elif [[ $arch == "sparc" ]]; then
3222		echo "label" > $label_file
3223		echo "0" >> $label_file
3224		echo "" >> $label_file
3225		echo "" >> $label_file
3226		echo "" >> $label_file
3227		echo "q" >> $label_file
3228	else
3229		log_fail "unknown arch type"
3230	fi
3231
3232	format -e -s -d $disk -f $label_file
3233	typeset -i ret_val=$?
3234	rm -f $label_file
3235	#
3236	# wait the format to finish
3237	#
3238	sleep 60
3239	if ((ret_val != 0)); then
3240		log_fail "unable to label $disk as VTOC."
3241	fi
3242
3243	return 0
3244}
3245
3246#
3247# check if the system was installed as zfsroot or not
3248# return: 0 if zfsroot, non-zero if not
3249#
3250function is_zfsroot
3251{
3252	df -n / | grep zfs > /dev/null 2>&1
3253	return $?
3254}
3255
3256#
3257# get the root filesystem name if it's zfsroot system.
3258#
3259# return: root filesystem name
3260function get_rootfs
3261{
3262	typeset rootfs=""
3263
3264	if is_freebsd; then
3265		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3266	elif ! is_linux; then
3267		rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
3268			/etc/mnttab)
3269	fi
3270	if [[ -z "$rootfs" ]]; then
3271		log_fail "Can not get rootfs"
3272	fi
3273	zfs list $rootfs > /dev/null 2>&1
3274	if (($? == 0)); then
3275		echo $rootfs
3276	else
3277		log_fail "This is not a zfsroot system."
3278	fi
3279}
3280
3281#
3282# get the rootfs's pool name
3283# return:
3284#       rootpool name
3285#
3286function get_rootpool
3287{
3288	typeset rootfs=""
3289	typeset rootpool=""
3290
3291	if is_freebsd; then
3292		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3293	elif ! is_linux; then
3294		rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
3295			 /etc/mnttab)
3296	fi
3297	if [[ -z "$rootfs" ]]; then
3298		log_fail "Can not get rootpool"
3299	fi
3300	zfs list $rootfs > /dev/null 2>&1
3301	if (($? == 0)); then
3302		echo ${rootfs%%/*}
3303	else
3304		log_fail "This is not a zfsroot system."
3305	fi
3306}
3307
3308#
3309# Get the word numbers from a string separated by white space
3310#
3311function get_word_count
3312{
3313	echo $1 | wc -w
3314}
3315
3316#
3317# To verify if the require numbers of disks is given
3318#
3319function verify_disk_count
3320{
3321	typeset -i min=${2:-1}
3322
3323	typeset -i count=$(get_word_count "$1")
3324
3325	if ((count < min)); then
3326		log_untested "A minimum of $min disks is required to run." \
3327			" You specified $count disk(s)"
3328	fi
3329}
3330
3331function ds_is_volume
3332{
3333	typeset type=$(get_prop type $1)
3334	[[ $type = "volume" ]] && return 0
3335	return 1
3336}
3337
3338function ds_is_filesystem
3339{
3340	typeset type=$(get_prop type $1)
3341	[[ $type = "filesystem" ]] && return 0
3342	return 1
3343}
3344
3345function ds_is_snapshot
3346{
3347	typeset type=$(get_prop type $1)
3348	[[ $type = "snapshot" ]] && return 0
3349	return 1
3350}
3351
3352#
3353# Check if Trusted Extensions are installed and enabled
3354#
3355function is_te_enabled
3356{
3357	svcs -H -o state labeld 2>/dev/null | grep "enabled"
3358	if (($? != 0)); then
3359		return 1
3360	else
3361		return 0
3362	fi
3363}
3364
3365# Utility function to determine if a system has multiple cpus.
3366function is_mp
3367{
3368	if is_linux; then
3369		(($(nproc) > 1))
3370	elif is_freebsd; then
3371		sysctl -n kern.smp.cpus
3372	else
3373		(($(psrinfo | wc -l) > 1))
3374	fi
3375
3376	return $?
3377}
3378
3379function get_cpu_freq
3380{
3381	if is_linux; then
3382		lscpu | awk '/CPU MHz/ { print $3 }'
3383	elif is_freebsd; then
3384		sysctl -n hw.clockrate
3385	else
3386		psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3387	fi
3388}
3389
3390# Run the given command as the user provided.
3391function user_run
3392{
3393	typeset user=$1
3394	shift
3395
3396	log_note "user:$user $@"
3397	eval su - \$user -c \"$@\" > $TEST_BASE_DIR/out 2>$TEST_BASE_DIR/err
3398}
3399
3400#
3401# Check if the pool contains the specified vdevs
3402#
3403# $1 pool
3404# $2..n <vdev> ...
3405#
3406# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3407# vdevs is not in the pool, and 2 if pool name is missing.
3408#
3409function vdevs_in_pool
3410{
3411	typeset pool=$1
3412	typeset vdev
3413
3414	if [[ -z $pool ]]; then
3415		log_note "Missing pool name."
3416		return 2
3417	fi
3418
3419	shift
3420
3421	# We could use 'zpool list' to only get the vdevs of the pool but we
3422	# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3423	# therefore we use the 'zpool status' output.
3424	typeset tmpfile=$(mktemp)
3425	zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3426	for vdev in $@; do
3427		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3428		[[ $? -ne 0 ]] && return 1
3429	done
3430
3431	rm -f $tmpfile
3432
3433	return 0;
3434}
3435
3436function get_max
3437{
3438	typeset -l i max=$1
3439	shift
3440
3441	for i in "$@"; do
3442		max=$((max > i ? max : i))
3443	done
3444
3445	echo $max
3446}
3447
3448function get_min
3449{
3450	typeset -l i min=$1
3451	shift
3452
3453	for i in "$@"; do
3454		min=$((min < i ? min : i))
3455	done
3456
3457	echo $min
3458}
3459
3460# Write data that can be compressed into a directory
3461function write_compressible
3462{
3463	typeset dir=$1
3464	typeset megs=$2
3465	typeset nfiles=${3:-1}
3466	typeset bs=${4:-1024k}
3467	typeset fname=${5:-file}
3468
3469	[[ -d $dir ]] || log_fail "No directory: $dir"
3470
3471	# Under Linux fio is not currently used since its behavior can
3472	# differ significantly across versions.  This includes missing
3473	# command line options and cases where the --buffer_compress_*
3474	# options fail to behave as expected.
3475	if is_linux; then
3476		typeset file_bytes=$(to_bytes $megs)
3477		typeset bs_bytes=4096
3478		typeset blocks=$(($file_bytes / $bs_bytes))
3479
3480		for (( i = 0; i < $nfiles; i++ )); do
3481			truncate -s $file_bytes $dir/$fname.$i
3482
3483			# Write every third block to get 66% compression.
3484			for (( j = 0; j < $blocks; j += 3 )); do
3485				dd if=/dev/urandom of=$dir/$fname.$i \
3486				    seek=$j bs=$bs_bytes count=1 \
3487				    conv=notrunc >/dev/null 2>&1
3488			done
3489		done
3490	else
3491		log_must eval "fio \
3492		    --name=job \
3493		    --fallocate=0 \
3494		    --minimal \
3495		    --randrepeat=0 \
3496		    --buffer_compress_percentage=66 \
3497		    --buffer_compress_chunk=4096 \
3498		    --directory=$dir \
3499		    --numjobs=$nfiles \
3500		    --nrfiles=$nfiles \
3501		    --rw=write \
3502		    --bs=$bs \
3503		    --filesize=$megs \
3504		    --filename_format='$fname.\$jobnum' >/dev/null"
3505	fi
3506}
3507
3508function get_objnum
3509{
3510	typeset pathname=$1
3511	typeset objnum
3512
3513	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3514	if is_freebsd; then
3515		objnum=$(stat -f "%i" $pathname)
3516	else
3517		objnum=$(stat -c %i $pathname)
3518	fi
3519	echo $objnum
3520}
3521
3522#
3523# Sync data to the pool
3524#
3525# $1 pool name
3526# $2 boolean to force uberblock (and config including zpool cache file) update
3527#
3528function sync_pool #pool <force>
3529{
3530	typeset pool=${1:-$TESTPOOL}
3531	typeset force=${2:-false}
3532
3533	if [[ $force == true ]]; then
3534		log_must zpool sync -f $pool
3535	else
3536		log_must zpool sync $pool
3537	fi
3538
3539	return 0
3540}
3541
3542#
3543# Wait for zpool 'freeing' property drops to zero.
3544#
3545# $1 pool name
3546#
3547function wait_freeing #pool
3548{
3549	typeset pool=${1:-$TESTPOOL}
3550	while true; do
3551		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3552		log_must sleep 1
3553	done
3554}
3555
3556#
3557# Wait for every device replace operation to complete
3558#
3559# $1 pool name
3560#
3561function wait_replacing #pool
3562{
3563	typeset pool=${1:-$TESTPOOL}
3564	while true; do
3565		[[ "" == "$(zpool status $pool |
3566		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3567		log_must sleep 1
3568	done
3569}
3570
3571#
3572# Wait for a pool to be scrubbed
3573#
3574# $1 pool name
3575# $2 number of seconds to wait (optional)
3576#
3577# Returns true when pool has been scrubbed, or false if there's a timeout or if
3578# no scrub was done.
3579#
3580function wait_scrubbed
3581{
3582	typeset pool=${1:-$TESTPOOL}
3583	while true ; do
3584		is_pool_scrubbed $pool && break
3585		sleep 1
3586	done
3587}
3588
3589# Backup the zed.rc in our test directory so that we can edit it for our test.
3590#
3591# Returns: Backup file name.  You will need to pass this to zed_rc_restore().
3592function zed_rc_backup
3593{
3594	zedrc_backup="$(mktemp)"
3595	cp $ZEDLET_DIR/zed.rc $zedrc_backup
3596	echo $zedrc_backup
3597}
3598
3599function zed_rc_restore
3600{
3601	mv $1 $ZEDLET_DIR/zed.rc
3602}
3603
3604#
3605# Setup custom environment for the ZED.
3606#
3607# $@ Optional list of zedlets to run under zed.
3608function zed_setup
3609{
3610	if ! is_linux; then
3611		log_unsupported "No zed on $(uname)"
3612	fi
3613
3614	if [[ ! -d $ZEDLET_DIR ]]; then
3615		log_must mkdir $ZEDLET_DIR
3616	fi
3617
3618	if [[ ! -e $VDEVID_CONF ]]; then
3619		log_must touch $VDEVID_CONF
3620	fi
3621
3622	if [[ -e $VDEVID_CONF_ETC ]]; then
3623		log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3624	fi
3625	EXTRA_ZEDLETS=$@
3626
3627	# Create a symlink for /etc/zfs/vdev_id.conf file.
3628	log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3629
3630	# Setup minimal ZED configuration.  Individual test cases should
3631	# add additional ZEDLETs as needed for their specific test.
3632	log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3633	log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3634
3635	# Scripts must only be user writable.
3636	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3637		saved_umask=$(umask)
3638		log_must umask 0022
3639		for i in $EXTRA_ZEDLETS ; do
3640			log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3641		done
3642		log_must umask $saved_umask
3643	fi
3644
3645	# Customize the zed.rc file to enable the full debug log.
3646	log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3647	echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3648
3649}
3650
3651#
3652# Cleanup custom ZED environment.
3653#
3654# $@ Optional list of zedlets to remove from our test zed.d directory.
3655function zed_cleanup
3656{
3657	if ! is_linux; then
3658		return
3659	fi
3660	EXTRA_ZEDLETS=$@
3661
3662	log_must rm -f ${ZEDLET_DIR}/zed.rc
3663	log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3664	log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3665	log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3666	log_must rm -f ${ZEDLET_DIR}/state
3667
3668	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3669		for i in $EXTRA_ZEDLETS ; do
3670			log_must rm -f ${ZEDLET_DIR}/$i
3671		done
3672	fi
3673	log_must rm -f $ZED_LOG
3674	log_must rm -f $ZED_DEBUG_LOG
3675	log_must rm -f $VDEVID_CONF_ETC
3676	log_must rm -f $VDEVID_CONF
3677	rmdir $ZEDLET_DIR
3678}
3679
3680#
3681# Check if ZED is currently running, if not start ZED.
3682#
3683function zed_start
3684{
3685	if ! is_linux; then
3686		return
3687	fi
3688
3689	# ZEDLET_DIR=/var/tmp/zed
3690	if [[ ! -d $ZEDLET_DIR ]]; then
3691		log_must mkdir $ZEDLET_DIR
3692	fi
3693
3694	# Verify the ZED is not already running.
3695	pgrep -x zed > /dev/null
3696	if (($? == 0)); then
3697		log_note "ZED already running"
3698	else
3699		log_note "Starting ZED"
3700		# run ZED in the background and redirect foreground logging
3701		# output to $ZED_LOG.
3702		log_must truncate -s 0 $ZED_DEBUG_LOG
3703		log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3704		    "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3705	fi
3706
3707	return 0
3708}
3709
3710#
3711# Kill ZED process
3712#
3713function zed_stop
3714{
3715	if ! is_linux; then
3716		return
3717	fi
3718
3719	log_note "Stopping ZED"
3720	if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3721		zedpid=$(<${ZEDLET_DIR}/zed.pid)
3722		kill $zedpid
3723		while ps -p $zedpid > /dev/null; do
3724			sleep 1
3725		done
3726		rm -f ${ZEDLET_DIR}/zed.pid
3727	fi
3728	return 0
3729}
3730
3731#
3732# Drain all zevents
3733#
3734function zed_events_drain
3735{
3736	while [ $(zpool events -H | wc -l) -ne 0 ]; do
3737		sleep 1
3738		zpool events -c >/dev/null
3739	done
3740}
3741
3742# Set a variable in zed.rc to something, un-commenting it in the process.
3743#
3744# $1 variable
3745# $2 value
3746function zed_rc_set
3747{
3748	var="$1"
3749	val="$2"
3750	# Remove the line
3751	cmd="'/$var/d'"
3752	eval sed -i $cmd $ZEDLET_DIR/zed.rc
3753
3754	# Add it at the end
3755	echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3756}
3757
3758
3759#
3760# Check is provided device is being active used as a swap device.
3761#
3762function is_swap_inuse
3763{
3764	typeset device=$1
3765
3766	if [[ -z $device ]] ; then
3767		log_note "No device specified."
3768		return 1
3769	fi
3770
3771	if is_linux; then
3772		swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3773	elif is_freebsd; then
3774		swapctl -l | grep -w $device
3775	else
3776		swap -l | grep -w $device > /dev/null 2>&1
3777	fi
3778
3779	return $?
3780}
3781
3782#
3783# Setup a swap device using the provided device.
3784#
3785function swap_setup
3786{
3787	typeset swapdev=$1
3788
3789	if is_linux; then
3790		log_must eval "mkswap $swapdev > /dev/null 2>&1"
3791		log_must swapon $swapdev
3792	elif is_freebsd; then
3793		log_must swapctl -a $swapdev
3794	else
3795	        log_must swap -a $swapdev
3796	fi
3797
3798	return 0
3799}
3800
3801#
3802# Cleanup a swap device on the provided device.
3803#
3804function swap_cleanup
3805{
3806	typeset swapdev=$1
3807
3808	if is_swap_inuse $swapdev; then
3809		if is_linux; then
3810			log_must swapoff $swapdev
3811		elif is_freebsd; then
3812			log_must swapoff $swapdev
3813		else
3814			log_must swap -d $swapdev
3815		fi
3816	fi
3817
3818	return 0
3819}
3820
3821#
3822# Set a global system tunable (64-bit value)
3823#
3824# $1 tunable name (use a NAME defined in tunables.cfg)
3825# $2 tunable values
3826#
3827function set_tunable64
3828{
3829	set_tunable_impl "$1" "$2" Z
3830}
3831
3832#
3833# Set a global system tunable (32-bit value)
3834#
3835# $1 tunable name (use a NAME defined in tunables.cfg)
3836# $2 tunable values
3837#
3838function set_tunable32
3839{
3840	set_tunable_impl "$1" "$2" W
3841}
3842
3843function set_tunable_impl
3844{
3845	typeset name="$1"
3846	typeset value="$2"
3847	typeset mdb_cmd="$3"
3848	typeset module="${4:-zfs}"
3849
3850	eval "typeset tunable=\$$name"
3851	case "$tunable" in
3852	UNSUPPORTED)
3853		log_unsupported "Tunable '$name' is unsupported on $(uname)"
3854		;;
3855	"")
3856		log_fail "Tunable '$name' must be added to tunables.cfg"
3857		;;
3858	*)
3859		;;
3860	esac
3861
3862	[[ -z "$value" ]] && return 1
3863	[[ -z "$mdb_cmd" ]] && return 1
3864
3865	case "$(uname)" in
3866	Linux)
3867		typeset zfs_tunables="/sys/module/$module/parameters"
3868		[[ -w "$zfs_tunables/$tunable" ]] || return 1
3869		cat >"$zfs_tunables/$tunable" <<<"$value"
3870		return $?
3871		;;
3872	FreeBSD)
3873		sysctl vfs.zfs.$tunable=$value
3874		return "$?"
3875		;;
3876	SunOS)
3877		[[ "$module" -eq "zfs" ]] || return 1
3878		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3879		return $?
3880		;;
3881	esac
3882}
3883
3884#
3885# Get a global system tunable
3886#
3887# $1 tunable name (use a NAME defined in tunables.cfg)
3888#
3889function get_tunable
3890{
3891	get_tunable_impl "$1"
3892}
3893
3894function get_tunable_impl
3895{
3896	typeset name="$1"
3897	typeset module="${2:-zfs}"
3898
3899	eval "typeset tunable=\$$name"
3900	case "$tunable" in
3901	UNSUPPORTED)
3902		log_unsupported "Tunable '$name' is unsupported on $(uname)"
3903		;;
3904	"")
3905		log_fail "Tunable '$name' must be added to tunables.cfg"
3906		;;
3907	*)
3908		;;
3909	esac
3910
3911	case "$(uname)" in
3912	Linux)
3913		typeset zfs_tunables="/sys/module/$module/parameters"
3914		[[ -f "$zfs_tunables/$tunable" ]] || return 1
3915		cat $zfs_tunables/$tunable
3916		return $?
3917		;;
3918	FreeBSD)
3919		sysctl -n vfs.zfs.$tunable
3920		;;
3921	SunOS)
3922		[[ "$module" -eq "zfs" ]] || return 1
3923		;;
3924	esac
3925
3926	return 1
3927}
3928
3929#
3930# Prints the current time in seconds since UNIX Epoch.
3931#
3932function current_epoch
3933{
3934	printf '%(%s)T'
3935}
3936
3937#
3938# Get decimal value of global uint32_t variable using mdb.
3939#
3940function mdb_get_uint32
3941{
3942	typeset variable=$1
3943	typeset value
3944
3945	value=$(mdb -k -e "$variable/X | ::eval .=U")
3946	if [[ $? -ne 0 ]]; then
3947		log_fail "Failed to get value of '$variable' from mdb."
3948		return 1
3949	fi
3950
3951	echo $value
3952	return 0
3953}
3954
3955#
3956# Set global uint32_t variable to a decimal value using mdb.
3957#
3958function mdb_set_uint32
3959{
3960	typeset variable=$1
3961	typeset value=$2
3962
3963	mdb -kw -e "$variable/W 0t$value" > /dev/null
3964	if [[ $? -ne 0 ]]; then
3965		echo "Failed to set '$variable' to '$value' in mdb."
3966		return 1
3967	fi
3968
3969	return 0
3970}
3971
3972#
3973# Set global scalar integer variable to a hex value using mdb.
3974# Note: Target should have CTF data loaded.
3975#
3976function mdb_ctf_set_int
3977{
3978	typeset variable=$1
3979	typeset value=$2
3980
3981	mdb -kw -e "$variable/z $value" > /dev/null
3982	if [[ $? -ne 0 ]]; then
3983		echo "Failed to set '$variable' to '$value' in mdb."
3984		return 1
3985	fi
3986
3987	return 0
3988}
3989
3990#
3991# Compute MD5 digest for given file or stdin if no file given.
3992# Note: file path must not contain spaces
3993#
3994function md5digest
3995{
3996	typeset file=$1
3997
3998	case $(uname) in
3999	FreeBSD)
4000		md5 -q $file
4001		;;
4002	*)
4003		md5sum -b $file | awk '{ print $1 }'
4004		;;
4005	esac
4006}
4007
4008#
4009# Compute SHA256 digest for given file or stdin if no file given.
4010# Note: file path must not contain spaces
4011#
4012function sha256digest
4013{
4014	typeset file=$1
4015
4016	case $(uname) in
4017	FreeBSD)
4018		sha256 -q $file
4019		;;
4020	*)
4021		sha256sum -b $file | awk '{ print $1 }'
4022		;;
4023	esac
4024}
4025
4026function new_fs #<args>
4027{
4028	case $(uname) in
4029	FreeBSD)
4030		newfs "$@"
4031		;;
4032	*)
4033		echo y | newfs -v "$@"
4034		;;
4035	esac
4036}
4037
4038function stat_size #<path>
4039{
4040	typeset path=$1
4041
4042	case $(uname) in
4043	FreeBSD)
4044		stat -f %z "$path"
4045		;;
4046	*)
4047		stat -c %s "$path"
4048		;;
4049	esac
4050}
4051
4052# Run a command as if it was being run in a TTY.
4053#
4054# Usage:
4055#
4056#    faketty command
4057#
4058function faketty
4059{
4060    if is_freebsd; then
4061        script -q /dev/null env "$@"
4062    else
4063        script --return --quiet -c "$*" /dev/null
4064    fi
4065}
4066
4067#
4068# Produce a random permutation of the integers in a given range (inclusive).
4069#
4070function range_shuffle # begin end
4071{
4072	typeset -i begin=$1
4073	typeset -i end=$2
4074
4075	seq ${begin} ${end} | sort -R
4076}
4077
4078#
4079# Cross-platform xattr helpers
4080#
4081
4082function get_xattr # name path
4083{
4084	typeset name=$1
4085	typeset path=$2
4086
4087	case $(uname) in
4088	FreeBSD)
4089		getextattr -qq user "${name}" "${path}"
4090		;;
4091	*)
4092		attr -qg "${name}" "${path}"
4093		;;
4094	esac
4095}
4096
4097function set_xattr # name value path
4098{
4099	typeset name=$1
4100	typeset value=$2
4101	typeset path=$3
4102
4103	case $(uname) in
4104	FreeBSD)
4105		setextattr user "${name}" "${value}" "${path}"
4106		;;
4107	*)
4108		attr -qs "${name}" -V "${value}" "${path}"
4109		;;
4110	esac
4111}
4112
4113function set_xattr_stdin # name value
4114{
4115	typeset name=$1
4116	typeset path=$2
4117
4118	case $(uname) in
4119	FreeBSD)
4120		setextattr -i user "${name}" "${path}"
4121		;;
4122	*)
4123		attr -qs "${name}" "${path}"
4124		;;
4125	esac
4126}
4127
4128function rm_xattr # name path
4129{
4130	typeset name=$1
4131	typeset path=$2
4132
4133	case $(uname) in
4134	FreeBSD)
4135		rmextattr -q user "${name}" "${path}"
4136		;;
4137	*)
4138		attr -qr "${name}" "${path}"
4139		;;
4140	esac
4141}
4142
4143function ls_xattr # path
4144{
4145	typeset path=$1
4146
4147	case $(uname) in
4148	FreeBSD)
4149		lsextattr -qq user "${path}"
4150		;;
4151	*)
4152		attr -ql "${path}"
4153		;;
4154	esac
4155}
4156
4157function kstat # stat flags?
4158{
4159	typeset stat=$1
4160	typeset flags=${2-"-n"}
4161
4162	case $(uname) in
4163	FreeBSD)
4164		sysctl $flags kstat.zfs.misc.$stat
4165		;;
4166	Linux)
4167		typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
4168		[[ -f "$zfs_kstat" ]] || return 1
4169		cat $zfs_kstat
4170		;;
4171	*)
4172		false
4173		;;
4174	esac
4175}
4176
4177function get_arcstat # stat
4178{
4179	typeset stat=$1
4180
4181	case $(uname) in
4182	FreeBSD)
4183		kstat arcstats.$stat
4184		;;
4185	Linux)
4186		kstat arcstats | awk "/$stat/ { print \$3 }"
4187		;;
4188	*)
4189		false
4190		;;
4191	esac
4192}
4193
4194#
4195# Wait for the specified arcstat to reach non-zero quiescence.
4196# If echo is 1 echo the value after reaching quiescence, otherwise
4197# if echo is 0 print the arcstat we are waiting on.
4198#
4199function arcstat_quiescence # stat echo
4200{
4201	typeset stat=$1
4202	typeset echo=$2
4203	typeset do_once=true
4204
4205	if [[ $echo -eq 0 ]]; then
4206		echo "Waiting for arcstat $1 quiescence."
4207	fi
4208
4209	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
4210		typeset stat1=$(get_arcstat $stat)
4211		sleep 2
4212		typeset stat2=$(get_arcstat $stat)
4213		do_once=false
4214	done
4215
4216	if [[ $echo -eq 1 ]]; then
4217		echo $stat2
4218	fi
4219}
4220
4221function arcstat_quiescence_noecho # stat
4222{
4223	typeset stat=$1
4224	arcstat_quiescence $stat 0
4225}
4226
4227function arcstat_quiescence_echo # stat
4228{
4229	typeset stat=$1
4230	arcstat_quiescence $stat 1
4231}
4232
4233#
4234# Given an array of pids, wait until all processes
4235# have completed and check their return status.
4236#
4237function wait_for_children #children
4238{
4239	rv=0
4240	children=("$@")
4241	for child in "${children[@]}"
4242	do
4243		child_exit=0
4244		wait ${child} || child_exit=$?
4245		if [ $child_exit -ne 0 ]; then
4246			echo "child ${child} failed with ${child_exit}"
4247			rv=1
4248		fi
4249	done
4250	return $rv
4251}
4252