xref: /freebsd/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib (revision 5def4c47d4bd90b209b9b4a4ba9faec15846d8fd)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24# Copyright (c) 2012, 2020, Delphix. All rights reserved.
25# Copyright (c) 2017, Tim Chase. All rights reserved.
26# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27# Copyright (c) 2017, Lawrence Livermore National Security LLC.
28# Copyright (c) 2017, Datto Inc. All rights reserved.
29# Copyright (c) 2017, Open-E Inc. All rights reserved.
30# Use is subject to license terms.
31#
32
33. ${STF_TOOLS}/include/logapi.shlib
34. ${STF_SUITE}/include/math.shlib
35. ${STF_SUITE}/include/blkdev.shlib
36
37. ${STF_SUITE}/include/tunables.cfg
38
39#
40# Apply constrained path when available.  This is required since the
41# PATH may have been modified by sudo's secure_path behavior.
42#
43if [ -n "$STF_PATH" ]; then
44	export PATH="$STF_PATH"
45fi
46
47#
48# Generic dot version comparison function
49#
50# Returns success when version $1 is greater than or equal to $2.
51#
52function compare_version_gte
53{
54	if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
55		return 0
56	else
57		return 1
58	fi
59}
60
61# Linux kernel version comparison function
62#
63# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
64#
65# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
66#
67function linux_version
68{
69	typeset ver="$1"
70
71	[[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
72
73	typeset version=$(echo $ver | cut -d '.' -f 1)
74	typeset major=$(echo $ver | cut -d '.' -f 2)
75	typeset minor=$(echo $ver | cut -d '.' -f 3)
76
77	[[ -z "$version" ]] && version=0
78	[[ -z "$major" ]] && major=0
79	[[ -z "$minor" ]] && minor=0
80
81	echo $((version * 10000 + major * 100 + minor))
82}
83
84# Determine if this is a Linux test system
85#
86# Return 0 if platform Linux, 1 if otherwise
87
88function is_linux
89{
90	if [[ $(uname -o) == "GNU/Linux" ]]; then
91		return 0
92	else
93		return 1
94	fi
95}
96
97# Determine if this is an illumos test system
98#
99# Return 0 if platform illumos, 1 if otherwise
100function is_illumos
101{
102	if [[ $(uname -o) == "illumos" ]]; then
103		return 0
104	else
105		return 1
106	fi
107}
108
109# Determine if this is a FreeBSD test system
110#
111# Return 0 if platform FreeBSD, 1 if otherwise
112
113function is_freebsd
114{
115	if [[ $(uname -o) == "FreeBSD" ]]; then
116		return 0
117	else
118		return 1
119	fi
120}
121
122# Determine if this is a DilOS test system
123#
124# Return 0 if platform DilOS, 1 if otherwise
125
126function is_dilos
127{
128	typeset ID=""
129	[[ -f /etc/os-release ]] && . /etc/os-release
130	if [[ $ID == "dilos" ]]; then
131		return 0
132	else
133		return 1
134	fi
135}
136
137# Determine if this is a 32-bit system
138#
139# Return 0 if platform is 32-bit, 1 if otherwise
140
141function is_32bit
142{
143	if [[ $(getconf LONG_BIT) == "32" ]]; then
144		return 0
145	else
146		return 1
147	fi
148}
149
150# Determine if kmemleak is enabled
151#
152# Return 0 if kmemleak is enabled, 1 if otherwise
153
154function is_kmemleak
155{
156	if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
157		return 0
158	else
159		return 1
160	fi
161}
162
163# Determine whether a dataset is mounted
164#
165# $1 dataset name
166# $2 filesystem type; optional - defaulted to zfs
167#
168# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
169
170function ismounted
171{
172	typeset fstype=$2
173	[[ -z $fstype ]] && fstype=zfs
174	typeset out dir name ret
175
176	case $fstype in
177		zfs)
178			if [[ "$1" == "/"* ]] ; then
179				for out in $(zfs mount | awk '{print $2}'); do
180					[[ $1 == $out ]] && return 0
181				done
182			else
183				for out in $(zfs mount | awk '{print $1}'); do
184					[[ $1 == $out ]] && return 0
185				done
186			fi
187		;;
188		ufs|nfs)
189			if is_freebsd; then
190				mount -pt $fstype | while read dev dir _t _flags; do
191					[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
192				done
193			else
194				out=$(df -F $fstype $1 2>/dev/null)
195				ret=$?
196				(($ret != 0)) && return $ret
197
198				dir=${out%%\(*}
199				dir=${dir%% *}
200				name=${out##*\(}
201				name=${name%%\)*}
202				name=${name%% *}
203
204				[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
205			fi
206		;;
207		ext*)
208			out=$(df -t $fstype $1 2>/dev/null)
209			return $?
210		;;
211		zvol)
212			if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
213				link=$(readlink -f $ZVOL_DEVDIR/$1)
214				[[ -n "$link" ]] && \
215					mount | grep -q "^$link" && \
216						return 0
217			fi
218		;;
219	esac
220
221	return 1
222}
223
224# Return 0 if a dataset is mounted; 1 otherwise
225#
226# $1 dataset name
227# $2 filesystem type; optional - defaulted to zfs
228
229function mounted
230{
231	ismounted $1 $2
232	(($? == 0)) && return 0
233	return 1
234}
235
236# Return 0 if a dataset is unmounted; 1 otherwise
237#
238# $1 dataset name
239# $2 filesystem type; optional - defaulted to zfs
240
241function unmounted
242{
243	ismounted $1 $2
244	(($? == 1)) && return 0
245	return 1
246}
247
248# split line on ","
249#
250# $1 - line to split
251
252function splitline
253{
254	echo $1 | sed "s/,/ /g"
255}
256
257function default_setup
258{
259	default_setup_noexit "$@"
260
261	log_pass
262}
263
264function default_setup_no_mountpoint
265{
266	default_setup_noexit "$1" "$2" "$3" "yes"
267
268	log_pass
269}
270
271#
272# Given a list of disks, setup storage pools and datasets.
273#
274function default_setup_noexit
275{
276	typeset disklist=$1
277	typeset container=$2
278	typeset volume=$3
279	typeset no_mountpoint=$4
280	log_note begin default_setup_noexit
281
282	if is_global_zone; then
283		if poolexists $TESTPOOL ; then
284			destroy_pool $TESTPOOL
285		fi
286		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
287		log_must zpool create -f $TESTPOOL $disklist
288	else
289		reexport_pool
290	fi
291
292	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
293	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
294
295	log_must zfs create $TESTPOOL/$TESTFS
296	if [[ -z $no_mountpoint ]]; then
297		log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
298	fi
299
300	if [[ -n $container ]]; then
301		rm -rf $TESTDIR1  || \
302			log_unresolved Could not remove $TESTDIR1
303		mkdir -p $TESTDIR1 || \
304			log_unresolved Could not create $TESTDIR1
305
306		log_must zfs create $TESTPOOL/$TESTCTR
307		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
308		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
309		if [[ -z $no_mountpoint ]]; then
310			log_must zfs set mountpoint=$TESTDIR1 \
311			    $TESTPOOL/$TESTCTR/$TESTFS1
312		fi
313	fi
314
315	if [[ -n $volume ]]; then
316		if is_global_zone ; then
317			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
318			block_device_wait
319		else
320			log_must zfs create $TESTPOOL/$TESTVOL
321		fi
322	fi
323}
324
325#
326# Given a list of disks, setup a storage pool, file system and
327# a container.
328#
329function default_container_setup
330{
331	typeset disklist=$1
332
333	default_setup "$disklist" "true"
334}
335
336#
337# Given a list of disks, setup a storage pool,file system
338# and a volume.
339#
340function default_volume_setup
341{
342	typeset disklist=$1
343
344	default_setup "$disklist" "" "true"
345}
346
347#
348# Given a list of disks, setup a storage pool,file system,
349# a container and a volume.
350#
351function default_container_volume_setup
352{
353	typeset disklist=$1
354
355	default_setup "$disklist" "true" "true"
356}
357
358#
359# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
360# filesystem
361#
362# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
363# $2 snapshot name. Default, $TESTSNAP
364#
365function create_snapshot
366{
367	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
368	typeset snap=${2:-$TESTSNAP}
369
370	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
371	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
372
373	if snapexists $fs_vol@$snap; then
374		log_fail "$fs_vol@$snap already exists."
375	fi
376	datasetexists $fs_vol || \
377		log_fail "$fs_vol must exist."
378
379	log_must zfs snapshot $fs_vol@$snap
380}
381
382#
383# Create a clone from a snapshot, default clone name is $TESTCLONE.
384#
385# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
386# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
387#
388function create_clone   # snapshot clone
389{
390	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
391	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
392
393	[[ -z $snap ]] && \
394		log_fail "Snapshot name is undefined."
395	[[ -z $clone ]] && \
396		log_fail "Clone name is undefined."
397
398	log_must zfs clone $snap $clone
399}
400
401#
402# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
403# filesystem.
404#
405# $1 Existing filesystem or volume name. Default, $TESTFS
406# $2 Existing snapshot name. Default, $TESTSNAP
407# $3 bookmark name. Default, $TESTBKMARK
408#
409function create_bookmark
410{
411	typeset fs_vol=${1:-$TESTFS}
412	typeset snap=${2:-$TESTSNAP}
413	typeset bkmark=${3:-$TESTBKMARK}
414
415	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
416	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
417	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
418
419	if bkmarkexists $fs_vol#$bkmark; then
420		log_fail "$fs_vol#$bkmark already exists."
421	fi
422	datasetexists $fs_vol || \
423		log_fail "$fs_vol must exist."
424	snapexists $fs_vol@$snap || \
425		log_fail "$fs_vol@$snap must exist."
426
427	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
428}
429
430#
431# Create a temporary clone result of an interrupted resumable 'zfs receive'
432# $1 Destination filesystem name. Must not exist, will be created as the result
433#    of this function along with its %recv temporary clone
434# $2 Source filesystem name. Must not exist, will be created and destroyed
435#
436function create_recv_clone
437{
438	typeset recvfs="$1"
439	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
440	typeset snap="$sendfs@snap1"
441	typeset incr="$sendfs@snap2"
442	typeset mountpoint="$TESTDIR/create_recv_clone"
443	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
444
445	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
446
447	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
448	datasetexists $sendfs && log_fail "Send filesystem must not exist."
449
450	log_must zfs create -o mountpoint="$mountpoint" $sendfs
451	log_must zfs snapshot $snap
452	log_must eval "zfs send $snap | zfs recv -u $recvfs"
453	log_must mkfile 1m "$mountpoint/data"
454	log_must zfs snapshot $incr
455	log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
456	    iflag=fullblock > $sendfile"
457	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
458	destroy_dataset "$sendfs" "-r"
459	log_must rm -f "$sendfile"
460
461	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
462		log_fail "Error creating temporary $recvfs/%recv clone"
463	fi
464}
465
466function default_mirror_setup
467{
468	default_mirror_setup_noexit $1 $2 $3
469
470	log_pass
471}
472
473#
474# Given a pair of disks, set up a storage pool and dataset for the mirror
475# @parameters: $1 the primary side of the mirror
476#   $2 the secondary side of the mirror
477# @uses: ZPOOL ZFS TESTPOOL TESTFS
478function default_mirror_setup_noexit
479{
480	readonly func="default_mirror_setup_noexit"
481	typeset primary=$1
482	typeset secondary=$2
483
484	[[ -z $primary ]] && \
485		log_fail "$func: No parameters passed"
486	[[ -z $secondary ]] && \
487		log_fail "$func: No secondary partition passed"
488	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
489	log_must zpool create -f $TESTPOOL mirror $@
490	log_must zfs create $TESTPOOL/$TESTFS
491	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
492}
493
494#
495# create a number of mirrors.
496# We create a number($1) of 2 way mirrors using the pairs of disks named
497# on the command line. These mirrors are *not* mounted
498# @parameters: $1 the number of mirrors to create
499#  $... the devices to use to create the mirrors on
500# @uses: ZPOOL ZFS TESTPOOL
501function setup_mirrors
502{
503	typeset -i nmirrors=$1
504
505	shift
506	while ((nmirrors > 0)); do
507		log_must test -n "$1" -a -n "$2"
508		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
509		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
510		shift 2
511		((nmirrors = nmirrors - 1))
512	done
513}
514
515#
516# create a number of raidz pools.
517# We create a number($1) of 2 raidz pools  using the pairs of disks named
518# on the command line. These pools are *not* mounted
519# @parameters: $1 the number of pools to create
520#  $... the devices to use to create the pools on
521# @uses: ZPOOL ZFS TESTPOOL
522function setup_raidzs
523{
524	typeset -i nraidzs=$1
525
526	shift
527	while ((nraidzs > 0)); do
528		log_must test -n "$1" -a -n "$2"
529		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
530		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
531		shift 2
532		((nraidzs = nraidzs - 1))
533	done
534}
535
536#
537# Destroy the configured testpool mirrors.
538# the mirrors are of the form ${TESTPOOL}{number}
539# @uses: ZPOOL ZFS TESTPOOL
540function destroy_mirrors
541{
542	default_cleanup_noexit
543
544	log_pass
545}
546
547#
548# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
549# $1 the list of disks
550#
551function default_raidz_setup
552{
553	typeset disklist="$*"
554	disks=(${disklist[*]})
555
556	if [[ ${#disks[*]} -lt 2 ]]; then
557		log_fail "A raid-z requires a minimum of two disks."
558	fi
559
560	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
561	log_must zpool create -f $TESTPOOL raidz $disklist
562	log_must zfs create $TESTPOOL/$TESTFS
563	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
564
565	log_pass
566}
567
568#
569# Common function used to cleanup storage pools and datasets.
570#
571# Invoked at the start of the test suite to ensure the system
572# is in a known state, and also at the end of each set of
573# sub-tests to ensure errors from one set of tests doesn't
574# impact the execution of the next set.
575
576function default_cleanup
577{
578	default_cleanup_noexit
579
580	log_pass
581}
582
583#
584# Utility function used to list all available pool names.
585#
586# NOTE: $KEEP is a variable containing pool names, separated by a newline
587# character, that must be excluded from the returned list.
588#
589function get_all_pools
590{
591	zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
592}
593
594function default_cleanup_noexit
595{
596	typeset pool=""
597	#
598	# Destroying the pool will also destroy any
599	# filesystems it contains.
600	#
601	if is_global_zone; then
602		zfs unmount -a > /dev/null 2>&1
603		ALL_POOLS=$(get_all_pools)
604		# Here, we loop through the pools we're allowed to
605		# destroy, only destroying them if it's safe to do
606		# so.
607		while [ ! -z ${ALL_POOLS} ]
608		do
609			for pool in ${ALL_POOLS}
610			do
611				if safe_to_destroy_pool $pool ;
612				then
613					destroy_pool $pool
614				fi
615			done
616			ALL_POOLS=$(get_all_pools)
617		done
618
619		zfs mount -a
620	else
621		typeset fs=""
622		for fs in $(zfs list -H -o name \
623		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
624			destroy_dataset "$fs" "-Rf"
625		done
626
627		# Need cleanup here to avoid garbage dir left.
628		for fs in $(zfs list -H -o name); do
629			[[ $fs == /$ZONE_POOL ]] && continue
630			[[ -d $fs ]] && log_must rm -rf $fs/*
631		done
632
633		#
634		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
635		# the default value
636		#
637		for fs in $(zfs list -H -o name); do
638			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
639				log_must zfs set reservation=none $fs
640				log_must zfs set recordsize=128K $fs
641				log_must zfs set mountpoint=/$fs $fs
642				typeset enc=""
643				enc=$(get_prop encryption $fs)
644				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
645					[[ "$enc" == "off" ]]; then
646					log_must zfs set checksum=on $fs
647				fi
648				log_must zfs set compression=off $fs
649				log_must zfs set atime=on $fs
650				log_must zfs set devices=off $fs
651				log_must zfs set exec=on $fs
652				log_must zfs set setuid=on $fs
653				log_must zfs set readonly=off $fs
654				log_must zfs set snapdir=hidden $fs
655				log_must zfs set aclmode=groupmask $fs
656				log_must zfs set aclinherit=secure $fs
657			fi
658		done
659	fi
660
661	[[ -d $TESTDIR ]] && \
662		log_must rm -rf $TESTDIR
663
664	disk1=${DISKS%% *}
665	if is_mpath_device $disk1; then
666		delete_partitions
667	fi
668
669	rm -f $TEST_BASE_DIR/{err,out}
670}
671
672
673#
674# Common function used to cleanup storage pools, file systems
675# and containers.
676#
677function default_container_cleanup
678{
679	if ! is_global_zone; then
680		reexport_pool
681	fi
682
683	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
684	[[ $? -eq 0 ]] && \
685	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
686
687	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
688	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
689
690	[[ -e $TESTDIR1 ]] && \
691	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
692
693	default_cleanup
694}
695
696#
697# Common function used to cleanup snapshot of file system or volume. Default to
698# delete the file system's snapshot
699#
700# $1 snapshot name
701#
702function destroy_snapshot
703{
704	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
705
706	if ! snapexists $snap; then
707		log_fail "'$snap' does not exist."
708	fi
709
710	#
711	# For the sake of the value which come from 'get_prop' is not equal
712	# to the really mountpoint when the snapshot is unmounted. So, firstly
713	# check and make sure this snapshot's been mounted in current system.
714	#
715	typeset mtpt=""
716	if ismounted $snap; then
717		mtpt=$(get_prop mountpoint $snap)
718		(($? != 0)) && \
719			log_fail "get_prop mountpoint $snap failed."
720	fi
721
722	destroy_dataset "$snap"
723	[[ $mtpt != "" && -d $mtpt ]] && \
724		log_must rm -rf $mtpt
725}
726
727#
728# Common function used to cleanup clone.
729#
730# $1 clone name
731#
732function destroy_clone
733{
734	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
735
736	if ! datasetexists $clone; then
737		log_fail "'$clone' does not existed."
738	fi
739
740	# With the same reason in destroy_snapshot
741	typeset mtpt=""
742	if ismounted $clone; then
743		mtpt=$(get_prop mountpoint $clone)
744		(($? != 0)) && \
745			log_fail "get_prop mountpoint $clone failed."
746	fi
747
748	destroy_dataset "$clone"
749	[[ $mtpt != "" && -d $mtpt ]] && \
750		log_must rm -rf $mtpt
751}
752
753#
754# Common function used to cleanup bookmark of file system or volume.  Default
755# to delete the file system's bookmark.
756#
757# $1 bookmark name
758#
759function destroy_bookmark
760{
761	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
762
763	if ! bkmarkexists $bkmark; then
764		log_fail "'$bkmarkp' does not existed."
765	fi
766
767	destroy_dataset "$bkmark"
768}
769
770# Return 0 if a snapshot exists; $? otherwise
771#
772# $1 - snapshot name
773
774function snapexists
775{
776	zfs list -H -t snapshot "$1" > /dev/null 2>&1
777	return $?
778}
779
780#
781# Return 0 if a bookmark exists; $? otherwise
782#
783# $1 - bookmark name
784#
785function bkmarkexists
786{
787	zfs list -H -t bookmark "$1" > /dev/null 2>&1
788	return $?
789}
790
791#
792# Return 0 if a hold exists; $? otherwise
793#
794# $1 - hold tag
795# $2 - snapshot name
796#
797function holdexists
798{
799	zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
800	return $?
801}
802
803#
804# Set a property to a certain value on a dataset.
805# Sets a property of the dataset to the value as passed in.
806# @param:
807#	$1 dataset who's property is being set
808#	$2 property to set
809#	$3 value to set property to
810# @return:
811#	0 if the property could be set.
812#	non-zero otherwise.
813# @use: ZFS
814#
815function dataset_setprop
816{
817	typeset fn=dataset_setprop
818
819	if (($# < 3)); then
820		log_note "$fn: Insufficient parameters (need 3, had $#)"
821		return 1
822	fi
823	typeset output=
824	output=$(zfs set $2=$3 $1 2>&1)
825	typeset rv=$?
826	if ((rv != 0)); then
827		log_note "Setting property on $1 failed."
828		log_note "property $2=$3"
829		log_note "Return Code: $rv"
830		log_note "Output: $output"
831		return $rv
832	fi
833	return 0
834}
835
836#
837# Assign suite defined dataset properties.
838# This function is used to apply the suite's defined default set of
839# properties to a dataset.
840# @parameters: $1 dataset to use
841# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
842# @returns:
843#   0 if the dataset has been altered.
844#   1 if no pool name was passed in.
845#   2 if the dataset could not be found.
846#   3 if the dataset could not have it's properties set.
847#
848function dataset_set_defaultproperties
849{
850	typeset dataset="$1"
851
852	[[ -z $dataset ]] && return 1
853
854	typeset confset=
855	typeset -i found=0
856	for confset in $(zfs list); do
857		if [[ $dataset = $confset ]]; then
858			found=1
859			break
860		fi
861	done
862	[[ $found -eq 0 ]] && return 2
863	if [[ -n $COMPRESSION_PROP ]]; then
864		dataset_setprop $dataset compression $COMPRESSION_PROP || \
865			return 3
866		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
867	fi
868	if [[ -n $CHECKSUM_PROP ]]; then
869		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
870			return 3
871		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
872	fi
873	return 0
874}
875
876#
877# Check a numeric assertion
878# @parameter: $@ the assertion to check
879# @output: big loud notice if assertion failed
880# @use: log_fail
881#
882function assert
883{
884	(($@)) || log_fail "$@"
885}
886
887#
888# Function to format partition size of a disk
889# Given a disk cxtxdx reduces all partitions
890# to 0 size
891#
892function zero_partitions #<whole_disk_name>
893{
894	typeset diskname=$1
895	typeset i
896
897	if is_freebsd; then
898		gpart destroy -F $diskname
899	elif is_linux; then
900		DSK=$DEV_DSKDIR/$diskname
901		DSK=$(echo $DSK | sed -e "s|//|/|g")
902		log_must parted $DSK -s -- mklabel gpt
903		blockdev --rereadpt $DSK 2>/dev/null
904		block_device_wait
905	else
906		for i in 0 1 3 4 5 6 7
907		do
908			log_must set_partition $i "" 0mb $diskname
909		done
910	fi
911
912	return 0
913}
914
915#
916# Given a slice, size and disk, this function
917# formats the slice to the specified size.
918# Size should be specified with units as per
919# the `format` command requirements eg. 100mb 3gb
920#
921# NOTE: This entire interface is problematic for the Linux parted utility
922# which requires the end of the partition to be specified.  It would be
923# best to retire this interface and replace it with something more flexible.
924# At the moment a best effort is made.
925#
926# arguments: <slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
927function set_partition
928{
929	typeset -i slicenum=$1
930	typeset start=$2
931	typeset size=$3
932	typeset disk=${4#$DEV_DSKDIR/}
933	disk=${disk#$DEV_RDSKDIR/}
934
935	case "$(uname)" in
936	Linux)
937		if [[ -z $size || -z $disk ]]; then
938			log_fail "The size or disk name is unspecified."
939		fi
940		disk=$DEV_DSKDIR/$disk
941		typeset size_mb=${size%%[mMgG]}
942
943		size_mb=${size_mb%%[mMgG][bB]}
944		if [[ ${size:1:1} == 'g' ]]; then
945			((size_mb = size_mb * 1024))
946		fi
947
948		# Create GPT partition table when setting slice 0 or
949		# when the device doesn't already contain a GPT label.
950		parted $disk -s -- print 1 >/dev/null
951		typeset ret_val=$?
952		if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
953			parted $disk -s -- mklabel gpt
954			if [[ $? -ne 0 ]]; then
955				log_note "Failed to create GPT partition table on $disk"
956				return 1
957			fi
958		fi
959
960		# When no start is given align on the first cylinder.
961		if [[ -z "$start" ]]; then
962			start=1
963		fi
964
965		# Determine the cylinder size for the device and using
966		# that calculate the end offset in cylinders.
967		typeset -i cly_size_kb=0
968		cly_size_kb=$(parted -m $disk -s -- \
969			unit cyl print | head -3 | tail -1 | \
970			awk -F '[:k.]' '{print $4}')
971		((end = (size_mb * 1024 / cly_size_kb) + start))
972
973		parted $disk -s -- \
974		    mkpart part$slicenum ${start}cyl ${end}cyl
975		typeset ret_val=$?
976		if [[ $ret_val -ne 0 ]]; then
977			log_note "Failed to create partition $slicenum on $disk"
978			return 1
979		fi
980
981		blockdev --rereadpt $disk 2>/dev/null
982		block_device_wait $disk
983		;;
984	FreeBSD)
985		if [[ -z $size || -z $disk ]]; then
986			log_fail "The size or disk name is unspecified."
987		fi
988		disk=$DEV_DSKDIR/$disk
989
990		if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
991			gpart destroy -F $disk >/dev/null 2>&1
992			gpart create -s GPT $disk
993			if [[ $? -ne 0 ]]; then
994				log_note "Failed to create GPT partition table on $disk"
995				return 1
996			fi
997		fi
998
999		typeset index=$((slicenum + 1))
1000
1001		if [[ -n $start ]]; then
1002			start="-b $start"
1003		fi
1004		gpart add -t freebsd-zfs $start -s $size -i $index $disk
1005		if [[ $ret_val -ne 0 ]]; then
1006			log_note "Failed to create partition $slicenum on $disk"
1007			return 1
1008		fi
1009
1010		block_device_wait $disk
1011		;;
1012	*)
1013		if [[ -z $slicenum || -z $size || -z $disk ]]; then
1014			log_fail "The slice, size or disk name is unspecified."
1015		fi
1016
1017		typeset format_file=/var/tmp/format_in.$$
1018
1019		echo "partition" >$format_file
1020		echo "$slicenum" >> $format_file
1021		echo "" >> $format_file
1022		echo "" >> $format_file
1023		echo "$start" >> $format_file
1024		echo "$size" >> $format_file
1025		echo "label" >> $format_file
1026		echo "" >> $format_file
1027		echo "q" >> $format_file
1028		echo "q" >> $format_file
1029
1030		format -e -s -d $disk -f $format_file
1031		typeset ret_val=$?
1032		rm -f $format_file
1033		;;
1034	esac
1035
1036	if [[ $ret_val -ne 0 ]]; then
1037		log_note "Unable to format $disk slice $slicenum to $size"
1038		return 1
1039	fi
1040	return 0
1041}
1042
1043#
1044# Delete all partitions on all disks - this is specifically for the use of multipath
1045# devices which currently can only be used in the test suite as raw/un-partitioned
1046# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
1047#
1048function delete_partitions
1049{
1050	typeset disk
1051
1052	if [[ -z $DISKSARRAY ]]; then
1053		DISKSARRAY=$DISKS
1054	fi
1055
1056	if is_linux; then
1057		typeset -i part
1058		for disk in $DISKSARRAY; do
1059			for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1060				typeset partition=${disk}${SLICE_PREFIX}${part}
1061				parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1062				if lsblk | grep -qF ${partition}; then
1063					log_fail "Partition ${partition} not deleted"
1064				else
1065					log_note "Partition ${partition} deleted"
1066				fi
1067			done
1068		done
1069	elif is_freebsd; then
1070		for disk in $DISKSARRAY; do
1071			if gpart destroy -F $disk; then
1072				log_note "Partitions for ${disk} deleted"
1073			else
1074				log_fail "Partitions for ${disk} not deleted"
1075			fi
1076		done
1077	fi
1078}
1079
1080#
1081# Get the end cyl of the given slice
1082#
1083function get_endslice #<disk> <slice>
1084{
1085	typeset disk=$1
1086	typeset slice=$2
1087	if [[ -z $disk || -z $slice ]] ; then
1088		log_fail "The disk name or slice number is unspecified."
1089	fi
1090
1091	case "$(uname)" in
1092	Linux)
1093		endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1094			grep "part${slice}" | \
1095			awk '{print $3}' | \
1096			sed 's,cyl,,')
1097		((endcyl = (endcyl + 1)))
1098		;;
1099	FreeBSD)
1100		disk=${disk#/dev/zvol/}
1101		disk=${disk%p*}
1102		slice=$((slice + 1))
1103		endcyl=$(gpart show $disk | \
1104			awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1105		;;
1106	*)
1107		disk=${disk#/dev/dsk/}
1108		disk=${disk#/dev/rdsk/}
1109		disk=${disk%s*}
1110
1111		typeset -i ratio=0
1112		ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1113		    grep "sectors\/cylinder" | \
1114		    awk '{print $2}')
1115
1116		if ((ratio == 0)); then
1117			return
1118		fi
1119
1120		typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1121		    nawk -v token="$slice" '{if ($1==token) print $6}')
1122
1123		((endcyl = (endcyl + 1) / ratio))
1124		;;
1125	esac
1126
1127	echo $endcyl
1128}
1129
1130
1131#
1132# Given a size,disk and total slice number,  this function formats the
1133# disk slices from 0 to the total slice number with the same specified
1134# size.
1135#
1136function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
1137{
1138	typeset -i i=0
1139	typeset slice_size=$1
1140	typeset disk_name=$2
1141	typeset total_slices=$3
1142	typeset cyl
1143
1144	zero_partitions $disk_name
1145	while ((i < $total_slices)); do
1146		if ! is_linux; then
1147			if ((i == 2)); then
1148				((i = i + 1))
1149				continue
1150			fi
1151		fi
1152		log_must set_partition $i "$cyl" $slice_size $disk_name
1153		cyl=$(get_endslice $disk_name $i)
1154		((i = i+1))
1155	done
1156}
1157
1158#
1159# This function continues to write to a filenum number of files into dirnum
1160# number of directories until either file_write returns an error or the
1161# maximum number of files per directory have been written.
1162#
1163# Usage:
1164# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1165#
1166# Return value: 0 on success
1167#		non 0 on error
1168#
1169# Where :
1170#	destdir:    is the directory where everything is to be created under
1171#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
1172#	filenum:    the maximum number of files per subdirectory
1173#	bytes:	    number of bytes to write
1174#	num_writes: number of types to write out bytes
1175#	data:	    the data that will be written
1176#
1177#	E.g.
1178#	fill_fs /testdir 20 25 1024 256 0
1179#
1180# Note: bytes * num_writes equals the size of the testfile
1181#
1182function fill_fs # destdir dirnum filenum bytes num_writes data
1183{
1184	typeset destdir=${1:-$TESTDIR}
1185	typeset -i dirnum=${2:-50}
1186	typeset -i filenum=${3:-50}
1187	typeset -i bytes=${4:-8192}
1188	typeset -i num_writes=${5:-10240}
1189	typeset data=${6:-0}
1190
1191	mkdir -p $destdir/{1..$dirnum}
1192	for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1193		file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1194		|| return $?
1195	done
1196	return 0
1197}
1198
1199#
1200# Simple function to get the specified property. If unable to
1201# get the property then exits.
1202#
1203# Note property is in 'parsable' format (-p)
1204#
1205function get_prop # property dataset
1206{
1207	typeset prop_val
1208	typeset prop=$1
1209	typeset dataset=$2
1210
1211	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1212	if [[ $? -ne 0 ]]; then
1213		log_note "Unable to get $prop property for dataset " \
1214		"$dataset"
1215		return 1
1216	fi
1217
1218	echo "$prop_val"
1219	return 0
1220}
1221
1222#
1223# Simple function to get the specified property of pool. If unable to
1224# get the property then exits.
1225#
1226# Note property is in 'parsable' format (-p)
1227#
1228function get_pool_prop # property pool
1229{
1230	typeset prop_val
1231	typeset prop=$1
1232	typeset pool=$2
1233
1234	if poolexists $pool ; then
1235		prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1236			awk '{print $3}')
1237		if [[ $? -ne 0 ]]; then
1238			log_note "Unable to get $prop property for pool " \
1239			"$pool"
1240			return 1
1241		fi
1242	else
1243		log_note "Pool $pool not exists."
1244		return 1
1245	fi
1246
1247	echo "$prop_val"
1248	return 0
1249}
1250
1251# Return 0 if a pool exists; $? otherwise
1252#
1253# $1 - pool name
1254
1255function poolexists
1256{
1257	typeset pool=$1
1258
1259	if [[ -z $pool ]]; then
1260		log_note "No pool name given."
1261		return 1
1262	fi
1263
1264	zpool get name "$pool" > /dev/null 2>&1
1265	return $?
1266}
1267
1268# Return 0 if all the specified datasets exist; $? otherwise
1269#
1270# $1-n  dataset name
1271function datasetexists
1272{
1273	if (($# == 0)); then
1274		log_note "No dataset name given."
1275		return 1
1276	fi
1277
1278	while (($# > 0)); do
1279		zfs get name $1 > /dev/null 2>&1 || \
1280			return $?
1281		shift
1282	done
1283
1284	return 0
1285}
1286
1287# return 0 if none of the specified datasets exists, otherwise return 1.
1288#
1289# $1-n  dataset name
1290function datasetnonexists
1291{
1292	if (($# == 0)); then
1293		log_note "No dataset name given."
1294		return 1
1295	fi
1296
1297	while (($# > 0)); do
1298		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1299		    && return 1
1300		shift
1301	done
1302
1303	return 0
1304}
1305
1306function is_shared_freebsd
1307{
1308	typeset fs=$1
1309
1310	pgrep -q mountd && showmount -E | grep -qx $fs
1311}
1312
1313function is_shared_illumos
1314{
1315	typeset fs=$1
1316	typeset mtpt
1317
1318	for mtpt in `share | awk '{print $2}'` ; do
1319		if [[ $mtpt == $fs ]] ; then
1320			return 0
1321		fi
1322	done
1323
1324	typeset stat=$(svcs -H -o STA nfs/server:default)
1325	if [[ $stat != "ON" ]]; then
1326		log_note "Current nfs/server status: $stat"
1327	fi
1328
1329	return 1
1330}
1331
1332function is_shared_linux
1333{
1334	typeset fs=$1
1335	typeset mtpt
1336
1337	for mtpt in `share | awk '{print $1}'` ; do
1338		if [[ $mtpt == $fs ]] ; then
1339			return 0
1340		fi
1341	done
1342	return 1
1343}
1344
1345#
1346# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1347#
1348# Returns 0 if shared, 1 otherwise.
1349#
1350function is_shared
1351{
1352	typeset fs=$1
1353	typeset mtpt
1354
1355	if [[ $fs != "/"* ]] ; then
1356		if datasetnonexists "$fs" ; then
1357			return 1
1358		else
1359			mtpt=$(get_prop mountpoint "$fs")
1360			case $mtpt in
1361				none|legacy|-) return 1
1362					;;
1363				*)	fs=$mtpt
1364					;;
1365			esac
1366		fi
1367	fi
1368
1369	case $(uname) in
1370	FreeBSD)	is_shared_freebsd "$fs"	;;
1371	Linux)		is_shared_linux "$fs"	;;
1372	*)		is_shared_illumos "$fs"	;;
1373	esac
1374}
1375
1376function is_exported_illumos
1377{
1378	typeset fs=$1
1379	typeset mtpt
1380
1381	for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
1382		if [[ $mtpt == $fs ]] ; then
1383			return 0
1384		fi
1385	done
1386
1387	return 1
1388}
1389
1390function is_exported_freebsd
1391{
1392	typeset fs=$1
1393	typeset mtpt
1394
1395	for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
1396		if [[ $mtpt == $fs ]] ; then
1397			return 0
1398		fi
1399	done
1400
1401	return 1
1402}
1403
1404function is_exported_linux
1405{
1406	typeset fs=$1
1407	typeset mtpt
1408
1409	for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
1410		if [[ $mtpt == $fs ]] ; then
1411			return 0
1412		fi
1413	done
1414
1415	return 1
1416}
1417
1418#
1419# Given a mountpoint, or a dataset name, determine if it is exported via
1420# the os-specific NFS exports file.
1421#
1422# Returns 0 if exported, 1 otherwise.
1423#
1424function is_exported
1425{
1426	typeset fs=$1
1427	typeset mtpt
1428
1429	if [[ $fs != "/"* ]] ; then
1430		if datasetnonexists "$fs" ; then
1431			return 1
1432		else
1433			mtpt=$(get_prop mountpoint "$fs")
1434			case $mtpt in
1435				none|legacy|-) return 1
1436					;;
1437				*)	fs=$mtpt
1438					;;
1439			esac
1440		fi
1441	fi
1442
1443	case $(uname) in
1444	FreeBSD)	is_exported_freebsd "$fs"	;;
1445	Linux)		is_exported_linux "$fs"	;;
1446	*)		is_exported_illumos "$fs"	;;
1447	esac
1448}
1449
1450#
1451# Given a dataset name determine if it is shared via SMB.
1452#
1453# Returns 0 if shared, 1 otherwise.
1454#
1455function is_shared_smb
1456{
1457	typeset fs=$1
1458	typeset mtpt
1459
1460	if datasetnonexists "$fs" ; then
1461		return 1
1462	else
1463		fs=$(echo $fs | sed 's@/@_@g')
1464	fi
1465
1466	if is_linux; then
1467		for mtpt in `net usershare list | awk '{print $1}'` ; do
1468			if [[ $mtpt == $fs ]] ; then
1469				return 0
1470			fi
1471		done
1472		return 1
1473	else
1474		log_note "Currently unsupported by the test framework"
1475		return 1
1476	fi
1477}
1478
1479#
1480# Given a mountpoint, determine if it is not shared via NFS.
1481#
1482# Returns 0 if not shared, 1 otherwise.
1483#
1484function not_shared
1485{
1486	typeset fs=$1
1487
1488	is_shared $fs
1489	if (($? == 0)); then
1490		return 1
1491	fi
1492
1493	return 0
1494}
1495
1496#
1497# Given a dataset determine if it is not shared via SMB.
1498#
1499# Returns 0 if not shared, 1 otherwise.
1500#
1501function not_shared_smb
1502{
1503	typeset fs=$1
1504
1505	is_shared_smb $fs
1506	if (($? == 0)); then
1507		return 1
1508	fi
1509
1510	return 0
1511}
1512
1513#
1514# Helper function to unshare a mountpoint.
1515#
1516function unshare_fs #fs
1517{
1518	typeset fs=$1
1519
1520	is_shared $fs || is_shared_smb $fs
1521	if (($? == 0)); then
1522		zfs unshare $fs || log_fail "zfs unshare $fs failed"
1523	fi
1524
1525	return 0
1526}
1527
1528#
1529# Helper function to share a NFS mountpoint.
1530#
1531function share_nfs #fs
1532{
1533	typeset fs=$1
1534
1535	if is_linux; then
1536		is_shared $fs
1537		if (($? != 0)); then
1538			log_must share "*:$fs"
1539		fi
1540	else
1541		is_shared $fs
1542		if (($? != 0)); then
1543			log_must share -F nfs $fs
1544		fi
1545	fi
1546
1547	return 0
1548}
1549
1550#
1551# Helper function to unshare a NFS mountpoint.
1552#
1553function unshare_nfs #fs
1554{
1555	typeset fs=$1
1556
1557	if is_linux; then
1558		is_shared $fs
1559		if (($? == 0)); then
1560			log_must unshare -u "*:$fs"
1561		fi
1562	else
1563		is_shared $fs
1564		if (($? == 0)); then
1565			log_must unshare -F nfs $fs
1566		fi
1567	fi
1568
1569	return 0
1570}
1571
1572#
1573# Helper function to show NFS shares.
1574#
1575function showshares_nfs
1576{
1577	if is_linux; then
1578		share -v
1579	else
1580		share -F nfs
1581	fi
1582
1583	return 0
1584}
1585
1586#
1587# Helper function to show SMB shares.
1588#
1589function showshares_smb
1590{
1591	if is_linux; then
1592		net usershare list
1593	else
1594		share -F smb
1595	fi
1596
1597	return 0
1598}
1599
1600function check_nfs
1601{
1602	if is_linux; then
1603		share -s
1604	elif is_freebsd; then
1605		showmount -e
1606	else
1607		log_unsupported "Unknown platform"
1608	fi
1609
1610	if [[ $? -ne 0 ]]; then
1611		log_unsupported "The NFS utilities are not installed"
1612	fi
1613}
1614
1615#
1616# Check NFS server status and trigger it online.
1617#
1618function setup_nfs_server
1619{
1620	# Cannot share directory in non-global zone.
1621	#
1622	if ! is_global_zone; then
1623		log_note "Cannot trigger NFS server by sharing in LZ."
1624		return
1625	fi
1626
1627	if is_linux; then
1628		#
1629		# Re-synchronize /var/lib/nfs/etab with /etc/exports and
1630		# /etc/exports.d./* to provide a clean test environment.
1631		#
1632		log_must share -r
1633
1634		log_note "NFS server must be started prior to running ZTS."
1635		return
1636	elif is_freebsd; then
1637		kill -s HUP $(cat /var/run/mountd.pid)
1638
1639		log_note "NFS server must be started prior to running ZTS."
1640		return
1641	fi
1642
1643	typeset nfs_fmri="svc:/network/nfs/server:default"
1644	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1645		#
1646		# Only really sharing operation can enable NFS server
1647		# to online permanently.
1648		#
1649		typeset dummy=/tmp/dummy
1650
1651		if [[ -d $dummy ]]; then
1652			log_must rm -rf $dummy
1653		fi
1654
1655		log_must mkdir $dummy
1656		log_must share $dummy
1657
1658		#
1659		# Waiting for fmri's status to be the final status.
1660		# Otherwise, in transition, an asterisk (*) is appended for
1661		# instances, unshare will reverse status to 'DIS' again.
1662		#
1663		# Waiting for 1's at least.
1664		#
1665		log_must sleep 1
1666		timeout=10
1667		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1668		do
1669			log_must sleep 1
1670
1671			((timeout -= 1))
1672		done
1673
1674		log_must unshare $dummy
1675		log_must rm -rf $dummy
1676	fi
1677
1678	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1679}
1680
1681#
1682# To verify whether calling process is in global zone
1683#
1684# Return 0 if in global zone, 1 in non-global zone
1685#
1686function is_global_zone
1687{
1688	if is_linux || is_freebsd; then
1689		return 0
1690	else
1691		typeset cur_zone=$(zonename 2>/dev/null)
1692		if [[ $cur_zone != "global" ]]; then
1693			return 1
1694		fi
1695		return 0
1696	fi
1697}
1698
1699#
1700# Verify whether test is permitted to run from
1701# global zone, local zone, or both
1702#
1703# $1 zone limit, could be "global", "local", or "both"(no limit)
1704#
1705# Return 0 if permitted, otherwise exit with log_unsupported
1706#
1707function verify_runnable # zone limit
1708{
1709	typeset limit=$1
1710
1711	[[ -z $limit ]] && return 0
1712
1713	if is_global_zone ; then
1714		case $limit in
1715			global|both)
1716				;;
1717			local)	log_unsupported "Test is unable to run from "\
1718					"global zone."
1719				;;
1720			*)	log_note "Warning: unknown limit $limit - " \
1721					"use both."
1722				;;
1723		esac
1724	else
1725		case $limit in
1726			local|both)
1727				;;
1728			global)	log_unsupported "Test is unable to run from "\
1729					"local zone."
1730				;;
1731			*)	log_note "Warning: unknown limit $limit - " \
1732					"use both."
1733				;;
1734		esac
1735
1736		reexport_pool
1737	fi
1738
1739	return 0
1740}
1741
1742# Return 0 if create successfully or the pool exists; $? otherwise
1743# Note: In local zones, this function should return 0 silently.
1744#
1745# $1 - pool name
1746# $2-n - [keyword] devs_list
1747
1748function create_pool #pool devs_list
1749{
1750	typeset pool=${1%%/*}
1751
1752	shift
1753
1754	if [[ -z $pool ]]; then
1755		log_note "Missing pool name."
1756		return 1
1757	fi
1758
1759	if poolexists $pool ; then
1760		destroy_pool $pool
1761	fi
1762
1763	if is_global_zone ; then
1764		[[ -d /$pool ]] && rm -rf /$pool
1765		log_must zpool create -f $pool $@
1766	fi
1767
1768	return 0
1769}
1770
1771# Return 0 if destroy successfully or the pool exists; $? otherwise
1772# Note: In local zones, this function should return 0 silently.
1773#
1774# $1 - pool name
1775# Destroy pool with the given parameters.
1776
1777function destroy_pool #pool
1778{
1779	typeset pool=${1%%/*}
1780	typeset mtpt
1781
1782	if [[ -z $pool ]]; then
1783		log_note "No pool name given."
1784		return 1
1785	fi
1786
1787	if is_global_zone ; then
1788		if poolexists "$pool" ; then
1789			mtpt=$(get_prop mountpoint "$pool")
1790
1791			# At times, syseventd/udev activity can cause attempts
1792			# to destroy a pool to fail with EBUSY. We retry a few
1793			# times allowing failures before requiring the destroy
1794			# to succeed.
1795			log_must_busy zpool destroy -f $pool
1796
1797			[[ -d $mtpt ]] && \
1798				log_must rm -rf $mtpt
1799		else
1800			log_note "Pool does not exist. ($pool)"
1801			return 1
1802		fi
1803	fi
1804
1805	return 0
1806}
1807
1808# Return 0 if created successfully; $? otherwise
1809#
1810# $1 - dataset name
1811# $2-n - dataset options
1812
1813function create_dataset #dataset dataset_options
1814{
1815	typeset dataset=$1
1816
1817	shift
1818
1819	if [[ -z $dataset ]]; then
1820		log_note "Missing dataset name."
1821		return 1
1822	fi
1823
1824	if datasetexists $dataset ; then
1825		destroy_dataset $dataset
1826	fi
1827
1828	log_must zfs create $@ $dataset
1829
1830	return 0
1831}
1832
1833# Return 0 if destroy successfully or the dataset exists; $? otherwise
1834# Note: In local zones, this function should return 0 silently.
1835#
1836# $1 - dataset name
1837# $2 - custom arguments for zfs destroy
1838# Destroy dataset with the given parameters.
1839
1840function destroy_dataset #dataset #args
1841{
1842	typeset dataset=$1
1843	typeset mtpt
1844	typeset args=${2:-""}
1845
1846	if [[ -z $dataset ]]; then
1847		log_note "No dataset name given."
1848		return 1
1849	fi
1850
1851	if is_global_zone ; then
1852		if datasetexists "$dataset" ; then
1853			mtpt=$(get_prop mountpoint "$dataset")
1854			log_must_busy zfs destroy $args $dataset
1855
1856			[[ -d $mtpt ]] && \
1857				log_must rm -rf $mtpt
1858		else
1859			log_note "Dataset does not exist. ($dataset)"
1860			return 1
1861		fi
1862	fi
1863
1864	return 0
1865}
1866
1867#
1868# Firstly, create a pool with 5 datasets. Then, create a single zone and
1869# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1870# and a zvol device to the zone.
1871#
1872# $1 zone name
1873# $2 zone root directory prefix
1874# $3 zone ip
1875#
1876function zfs_zones_setup #zone_name zone_root zone_ip
1877{
1878	typeset zone_name=${1:-$(hostname)-z}
1879	typeset zone_root=${2:-"/zone_root"}
1880	typeset zone_ip=${3:-"10.1.1.10"}
1881	typeset prefix_ctr=$ZONE_CTR
1882	typeset pool_name=$ZONE_POOL
1883	typeset -i cntctr=5
1884	typeset -i i=0
1885
1886	# Create pool and 5 container within it
1887	#
1888	[[ -d /$pool_name ]] && rm -rf /$pool_name
1889	log_must zpool create -f $pool_name $DISKS
1890	while ((i < cntctr)); do
1891		log_must zfs create $pool_name/$prefix_ctr$i
1892		((i += 1))
1893	done
1894
1895	# create a zvol
1896	log_must zfs create -V 1g $pool_name/zone_zvol
1897	block_device_wait
1898
1899	#
1900	# Add slog device for pool
1901	#
1902	typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1903	log_must mkfile $MINVDEVSIZE $sdevs
1904	log_must zpool add $pool_name log mirror $sdevs
1905
1906	# this isn't supported just yet.
1907	# Create a filesystem. In order to add this to
1908	# the zone, it must have it's mountpoint set to 'legacy'
1909	# log_must zfs create $pool_name/zfs_filesystem
1910	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1911
1912	[[ -d $zone_root ]] && \
1913		log_must rm -rf $zone_root/$zone_name
1914	[[ ! -d $zone_root ]] && \
1915		log_must mkdir -p -m 0700 $zone_root/$zone_name
1916
1917	# Create zone configure file and configure the zone
1918	#
1919	typeset zone_conf=/tmp/zone_conf.$$
1920	echo "create" > $zone_conf
1921	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1922	echo "set autoboot=true" >> $zone_conf
1923	i=0
1924	while ((i < cntctr)); do
1925		echo "add dataset" >> $zone_conf
1926		echo "set name=$pool_name/$prefix_ctr$i" >> \
1927			$zone_conf
1928		echo "end" >> $zone_conf
1929		((i += 1))
1930	done
1931
1932	# add our zvol to the zone
1933	echo "add device" >> $zone_conf
1934	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1935	echo "end" >> $zone_conf
1936
1937	# add a corresponding zvol rdsk to the zone
1938	echo "add device" >> $zone_conf
1939	echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1940	echo "end" >> $zone_conf
1941
1942	# once it's supported, we'll add our filesystem to the zone
1943	# echo "add fs" >> $zone_conf
1944	# echo "set type=zfs" >> $zone_conf
1945	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1946	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1947	# echo "end" >> $zone_conf
1948
1949	echo "verify" >> $zone_conf
1950	echo "commit" >> $zone_conf
1951	log_must zonecfg -z $zone_name -f $zone_conf
1952	log_must rm -f $zone_conf
1953
1954	# Install the zone
1955	zoneadm -z $zone_name install
1956	if (($? == 0)); then
1957		log_note "SUCCESS: zoneadm -z $zone_name install"
1958	else
1959		log_fail "FAIL: zoneadm -z $zone_name install"
1960	fi
1961
1962	# Install sysidcfg file
1963	#
1964	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1965	echo "system_locale=C" > $sysidcfg
1966	echo  "terminal=dtterm" >> $sysidcfg
1967	echo  "network_interface=primary {" >> $sysidcfg
1968	echo  "hostname=$zone_name" >> $sysidcfg
1969	echo  "}" >> $sysidcfg
1970	echo  "name_service=NONE" >> $sysidcfg
1971	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1972	echo  "security_policy=NONE" >> $sysidcfg
1973	echo  "timezone=US/Eastern" >> $sysidcfg
1974
1975	# Boot this zone
1976	log_must zoneadm -z $zone_name boot
1977}
1978
1979#
1980# Reexport TESTPOOL & TESTPOOL(1-4)
1981#
1982function reexport_pool
1983{
1984	typeset -i cntctr=5
1985	typeset -i i=0
1986
1987	while ((i < cntctr)); do
1988		if ((i == 0)); then
1989			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1990			if ! ismounted $TESTPOOL; then
1991				log_must zfs mount $TESTPOOL
1992			fi
1993		else
1994			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1995			if eval ! ismounted \$TESTPOOL$i; then
1996				log_must eval zfs mount \$TESTPOOL$i
1997			fi
1998		fi
1999		((i += 1))
2000	done
2001}
2002
2003#
2004# Verify a given disk or pool state
2005#
2006# Return 0 is pool/disk matches expected state, 1 otherwise
2007#
2008function check_state # pool disk state{online,offline,degraded}
2009{
2010	typeset pool=$1
2011	typeset disk=${2#$DEV_DSKDIR/}
2012	typeset state=$3
2013
2014	[[ -z $pool ]] || [[ -z $state ]] \
2015	    && log_fail "Arguments invalid or missing"
2016
2017	if [[ -z $disk ]]; then
2018		#check pool state only
2019		zpool get -H -o value health $pool \
2020		    | grep -i "$state" > /dev/null 2>&1
2021	else
2022		zpool status -v $pool | grep "$disk"  \
2023		    | grep -i "$state" > /dev/null 2>&1
2024	fi
2025
2026	return $?
2027}
2028
2029#
2030# Get the mountpoint of snapshot
2031# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
2032# as its mountpoint
2033#
2034function snapshot_mountpoint
2035{
2036	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
2037
2038	if [[ $dataset != *@* ]]; then
2039		log_fail "Error name of snapshot '$dataset'."
2040	fi
2041
2042	typeset fs=${dataset%@*}
2043	typeset snap=${dataset#*@}
2044
2045	if [[ -z $fs || -z $snap ]]; then
2046		log_fail "Error name of snapshot '$dataset'."
2047	fi
2048
2049	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
2050}
2051
2052#
2053# Given a device and 'ashift' value verify it's correctly set on every label
2054#
2055function verify_ashift # device ashift
2056{
2057	typeset device="$1"
2058	typeset ashift="$2"
2059
2060	zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
2061	    if (ashift != $2)
2062	        exit 1;
2063	    else
2064	        count++;
2065	    } END {
2066	    if (count != 4)
2067	        exit 1;
2068	    else
2069	        exit 0;
2070	    }'
2071
2072	return $?
2073}
2074
2075#
2076# Given a pool and file system, this function will verify the file system
2077# using the zdb internal tool. Note that the pool is exported and imported
2078# to ensure it has consistent state.
2079#
2080function verify_filesys # pool filesystem dir
2081{
2082	typeset pool="$1"
2083	typeset filesys="$2"
2084	typeset zdbout="/tmp/zdbout.$$"
2085
2086	shift
2087	shift
2088	typeset dirs=$@
2089	typeset search_path=""
2090
2091	log_note "Calling zdb to verify filesystem '$filesys'"
2092	zfs unmount -a > /dev/null 2>&1
2093	log_must zpool export $pool
2094
2095	if [[ -n $dirs ]] ; then
2096		for dir in $dirs ; do
2097			search_path="$search_path -d $dir"
2098		done
2099	fi
2100
2101	log_must zpool import $search_path $pool
2102
2103	zdb -cudi $filesys > $zdbout 2>&1
2104	if [[ $? != 0 ]]; then
2105		log_note "Output: zdb -cudi $filesys"
2106		cat $zdbout
2107		log_fail "zdb detected errors with: '$filesys'"
2108	fi
2109
2110	log_must zfs mount -a
2111	log_must rm -rf $zdbout
2112}
2113
2114#
2115# Given a pool issue a scrub and verify that no checksum errors are reported.
2116#
2117function verify_pool
2118{
2119	typeset pool=${1:-$TESTPOOL}
2120
2121	log_must zpool scrub $pool
2122	log_must wait_scrubbed $pool
2123
2124	typeset -i cksum=$(zpool status $pool | awk '
2125	    !NF { isvdev = 0 }
2126	    isvdev { errors += $NF }
2127	    /CKSUM$/ { isvdev = 1 }
2128	    END { print errors }
2129	')
2130	if [[ $cksum != 0 ]]; then
2131		log_must zpool status -v
2132	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
2133	fi
2134}
2135
2136#
2137# Given a pool, and this function list all disks in the pool
2138#
2139function get_disklist # pool
2140{
2141	typeset disklist=""
2142
2143	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
2144	    grep -v "\-\-\-\-\-" | \
2145	    egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
2146
2147	echo $disklist
2148}
2149
2150#
2151# Given a pool, and this function list all disks in the pool with their full
2152# path (like "/dev/sda" instead of "sda").
2153#
2154function get_disklist_fullpath # pool
2155{
2156	args="-P $1"
2157	get_disklist $args
2158}
2159
2160
2161
2162# /**
2163#  This function kills a given list of processes after a time period. We use
2164#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
2165#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2166#  would be listed as FAIL, which we don't want : we're happy with stress tests
2167#  running for a certain amount of time, then finishing.
2168#
2169# @param $1 the time in seconds after which we should terminate these processes
2170# @param $2..$n the processes we wish to terminate.
2171# */
2172function stress_timeout
2173{
2174	typeset -i TIMEOUT=$1
2175	shift
2176	typeset cpids="$@"
2177
2178	log_note "Waiting for child processes($cpids). " \
2179		"It could last dozens of minutes, please be patient ..."
2180	log_must sleep $TIMEOUT
2181
2182	log_note "Killing child processes after ${TIMEOUT} stress timeout."
2183	typeset pid
2184	for pid in $cpids; do
2185		ps -p $pid > /dev/null 2>&1
2186		if (($? == 0)); then
2187			log_must kill -USR1 $pid
2188		fi
2189	done
2190}
2191
2192#
2193# Verify a given hotspare disk is inuse or avail
2194#
2195# Return 0 is pool/disk matches expected state, 1 otherwise
2196#
2197function check_hotspare_state # pool disk state{inuse,avail}
2198{
2199	typeset pool=$1
2200	typeset disk=${2#$DEV_DSKDIR/}
2201	typeset state=$3
2202
2203	cur_state=$(get_device_state $pool $disk "spares")
2204
2205	if [[ $state != ${cur_state} ]]; then
2206		return 1
2207	fi
2208	return 0
2209}
2210
2211#
2212# Wait until a hotspare transitions to a given state or times out.
2213#
2214# Return 0 when  pool/disk matches expected state, 1 on timeout.
2215#
2216function wait_hotspare_state # pool disk state timeout
2217{
2218	typeset pool=$1
2219	typeset disk=${2#*$DEV_DSKDIR/}
2220	typeset state=$3
2221	typeset timeout=${4:-60}
2222	typeset -i i=0
2223
2224	while [[ $i -lt $timeout ]]; do
2225		if check_hotspare_state $pool $disk $state; then
2226			return 0
2227		fi
2228
2229		i=$((i+1))
2230		sleep 1
2231	done
2232
2233	return 1
2234}
2235
2236#
2237# Verify a given slog disk is inuse or avail
2238#
2239# Return 0 is pool/disk matches expected state, 1 otherwise
2240#
2241function check_slog_state # pool disk state{online,offline,unavail}
2242{
2243	typeset pool=$1
2244	typeset disk=${2#$DEV_DSKDIR/}
2245	typeset state=$3
2246
2247	cur_state=$(get_device_state $pool $disk "logs")
2248
2249	if [[ $state != ${cur_state} ]]; then
2250		return 1
2251	fi
2252	return 0
2253}
2254
2255#
2256# Verify a given vdev disk is inuse or avail
2257#
2258# Return 0 is pool/disk matches expected state, 1 otherwise
2259#
2260function check_vdev_state # pool disk state{online,offline,unavail}
2261{
2262	typeset pool=$1
2263	typeset disk=${2#*$DEV_DSKDIR/}
2264	typeset state=$3
2265
2266	cur_state=$(get_device_state $pool $disk)
2267
2268	if [[ $state != ${cur_state} ]]; then
2269		return 1
2270	fi
2271	return 0
2272}
2273
2274#
2275# Wait until a vdev transitions to a given state or times out.
2276#
2277# Return 0 when  pool/disk matches expected state, 1 on timeout.
2278#
2279function wait_vdev_state # pool disk state timeout
2280{
2281	typeset pool=$1
2282	typeset disk=${2#*$DEV_DSKDIR/}
2283	typeset state=$3
2284	typeset timeout=${4:-60}
2285	typeset -i i=0
2286
2287	while [[ $i -lt $timeout ]]; do
2288		if check_vdev_state $pool $disk $state; then
2289			return 0
2290		fi
2291
2292		i=$((i+1))
2293		sleep 1
2294	done
2295
2296	return 1
2297}
2298
2299#
2300# Check the output of 'zpool status -v <pool>',
2301# and to see if the content of <token> contain the <keyword> specified.
2302#
2303# Return 0 is contain, 1 otherwise
2304#
2305function check_pool_status # pool token keyword <verbose>
2306{
2307	typeset pool=$1
2308	typeset token=$2
2309	typeset keyword=$3
2310	typeset verbose=${4:-false}
2311
2312	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2313		($1==token) {print $0}')
2314	if [[ $verbose == true ]]; then
2315		log_note $scan
2316	fi
2317	echo $scan | egrep -i "$keyword" > /dev/null 2>&1
2318
2319	return $?
2320}
2321
2322#
2323# The following functions are instance of check_pool_status()
2324#	is_pool_resilvering - to check if the pool resilver is in progress
2325#	is_pool_resilvered - to check if the pool resilver is completed
2326#	is_pool_scrubbing - to check if the pool scrub is in progress
2327#	is_pool_scrubbed - to check if the pool scrub is completed
2328#	is_pool_scrub_stopped - to check if the pool scrub is stopped
2329#	is_pool_scrub_paused - to check if the pool scrub has paused
2330#	is_pool_removing - to check if the pool removing is a vdev
2331#	is_pool_removed - to check if the pool remove is completed
2332#	is_pool_discarding - to check if the pool checkpoint is being discarded
2333#
2334function is_pool_resilvering #pool <verbose>
2335{
2336	check_pool_status "$1" "scan" \
2337	    "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2338	return $?
2339}
2340
2341function is_pool_resilvered #pool <verbose>
2342{
2343	check_pool_status "$1" "scan" "resilvered " $2
2344	return $?
2345}
2346
2347function is_pool_scrubbing #pool <verbose>
2348{
2349	check_pool_status "$1" "scan" "scrub in progress since " $2
2350	return $?
2351}
2352
2353function is_pool_scrubbed #pool <verbose>
2354{
2355	check_pool_status "$1" "scan" "scrub repaired" $2
2356	return $?
2357}
2358
2359function is_pool_scrub_stopped #pool <verbose>
2360{
2361	check_pool_status "$1" "scan" "scrub canceled" $2
2362	return $?
2363}
2364
2365function is_pool_scrub_paused #pool <verbose>
2366{
2367	check_pool_status "$1" "scan" "scrub paused since " $2
2368	return $?
2369}
2370
2371function is_pool_removing #pool
2372{
2373	check_pool_status "$1" "remove" "in progress since "
2374	return $?
2375}
2376
2377function is_pool_removed #pool
2378{
2379	check_pool_status "$1" "remove" "completed on"
2380	return $?
2381}
2382
2383function is_pool_discarding #pool
2384{
2385	check_pool_status "$1" "checkpoint" "discarding"
2386	return $?
2387}
2388
2389function wait_for_degraded
2390{
2391	typeset pool=$1
2392	typeset timeout=${2:-30}
2393	typeset t0=$SECONDS
2394
2395	while :; do
2396		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2397		log_note "$pool is not yet degraded."
2398		sleep 1
2399		if ((SECONDS - t0 > $timeout)); then
2400			log_note "$pool not degraded after $timeout seconds."
2401			return 1
2402		fi
2403	done
2404
2405	return 0
2406}
2407
2408#
2409# Use create_pool()/destroy_pool() to clean up the information in
2410# in the given disk to avoid slice overlapping.
2411#
2412function cleanup_devices #vdevs
2413{
2414	typeset pool="foopool$$"
2415
2416	for vdev in $@; do
2417		zero_partitions $vdev
2418	done
2419
2420	poolexists $pool && destroy_pool $pool
2421	create_pool $pool $@
2422	destroy_pool $pool
2423
2424	return 0
2425}
2426
2427#/**
2428# A function to find and locate free disks on a system or from given
2429# disks as the parameter. It works by locating disks that are in use
2430# as swap devices and dump devices, and also disks listed in /etc/vfstab
2431#
2432# $@ given disks to find which are free, default is all disks in
2433# the test system
2434#
2435# @return a string containing the list of available disks
2436#*/
2437function find_disks
2438{
2439	# Trust provided list, no attempt is made to locate unused devices.
2440	if is_linux || is_freebsd; then
2441		echo "$@"
2442		return
2443	fi
2444
2445
2446	sfi=/tmp/swaplist.$$
2447	dmpi=/tmp/dumpdev.$$
2448	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2449
2450	swap -l > $sfi
2451	dumpadm > $dmpi 2>/dev/null
2452
2453# write an awk script that can process the output of format
2454# to produce a list of disks we know about. Note that we have
2455# to escape "$2" so that the shell doesn't interpret it while
2456# we're creating the awk script.
2457# -------------------
2458	cat > /tmp/find_disks.awk <<EOF
2459#!/bin/nawk -f
2460	BEGIN { FS="."; }
2461
2462	/^Specify disk/{
2463		searchdisks=0;
2464	}
2465
2466	{
2467		if (searchdisks && \$2 !~ "^$"){
2468			split(\$2,arr," ");
2469			print arr[1];
2470		}
2471	}
2472
2473	/^AVAILABLE DISK SELECTIONS:/{
2474		searchdisks=1;
2475	}
2476EOF
2477#---------------------
2478
2479	chmod 755 /tmp/find_disks.awk
2480	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2481	rm /tmp/find_disks.awk
2482
2483	unused=""
2484	for disk in $disks; do
2485	# Check for mounted
2486		grep "${disk}[sp]" /etc/mnttab >/dev/null
2487		(($? == 0)) && continue
2488	# Check for swap
2489		grep "${disk}[sp]" $sfi >/dev/null
2490		(($? == 0)) && continue
2491	# check for dump device
2492		grep "${disk}[sp]" $dmpi >/dev/null
2493		(($? == 0)) && continue
2494	# check to see if this disk hasn't been explicitly excluded
2495	# by a user-set environment variable
2496		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2497		(($? == 0)) && continue
2498		unused_candidates="$unused_candidates $disk"
2499	done
2500	rm $sfi
2501	rm $dmpi
2502
2503# now just check to see if those disks do actually exist
2504# by looking for a device pointing to the first slice in
2505# each case. limit the number to max_finddisksnum
2506	count=0
2507	for disk in $unused_candidates; do
2508		if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2509		    [ $count -lt $max_finddisksnum ]; then
2510			unused="$unused $disk"
2511			# do not impose limit if $@ is provided
2512			[[ -z $@ ]] && ((count = count + 1))
2513		fi
2514	done
2515
2516# finally, return our disk list
2517	echo $unused
2518}
2519
2520function add_user_freebsd #<group_name> <user_name> <basedir>
2521{
2522	typeset group=$1
2523	typeset user=$2
2524	typeset basedir=$3
2525
2526	# Check to see if the user exists.
2527	if id $user > /dev/null 2>&1; then
2528		return 0
2529	fi
2530
2531	# Assign 1000 as the base uid
2532	typeset -i uid=1000
2533	while true; do
2534		typeset -i ret
2535		pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2536		ret=$?
2537		case $ret in
2538			0) break ;;
2539			# The uid is not unique
2540			65) ((uid += 1)) ;;
2541			*) return 1 ;;
2542		esac
2543		if [[ $uid == 65000 ]]; then
2544			log_fail "No user id available under 65000 for $user"
2545		fi
2546	done
2547
2548	# Silence MOTD
2549	touch $basedir/$user/.hushlogin
2550
2551	return 0
2552}
2553
2554#
2555# Delete the specified user.
2556#
2557# $1 login name
2558#
2559function del_user_freebsd #<logname>
2560{
2561	typeset user=$1
2562
2563	if id $user > /dev/null 2>&1; then
2564		log_must pw userdel $user
2565	fi
2566
2567	return 0
2568}
2569
2570#
2571# Select valid gid and create specified group.
2572#
2573# $1 group name
2574#
2575function add_group_freebsd #<group_name>
2576{
2577	typeset group=$1
2578
2579	# See if the group already exists.
2580	if pw groupshow $group >/dev/null 2>&1; then
2581		return 0
2582	fi
2583
2584	# Assign 1000 as the base gid
2585	typeset -i gid=1000
2586	while true; do
2587		pw groupadd -g $gid -n $group > /dev/null 2>&1
2588		typeset -i ret=$?
2589		case $ret in
2590			0) return 0 ;;
2591			# The gid is not  unique
2592			65) ((gid += 1)) ;;
2593			*) return 1 ;;
2594		esac
2595		if [[ $gid == 65000 ]]; then
2596			log_fail "No user id available under 65000 for $group"
2597		fi
2598	done
2599}
2600
2601#
2602# Delete the specified group.
2603#
2604# $1 group name
2605#
2606function del_group_freebsd #<group_name>
2607{
2608	typeset group=$1
2609
2610	pw groupdel -n $group > /dev/null 2>&1
2611	typeset -i ret=$?
2612	case $ret in
2613		# Group does not exist, or was deleted successfully.
2614		0|6|65) return 0 ;;
2615		# Name already exists as a group name
2616		9) log_must pw groupdel $group ;;
2617		*) return 1 ;;
2618	esac
2619
2620	return 0
2621}
2622
2623function add_user_illumos #<group_name> <user_name> <basedir>
2624{
2625	typeset group=$1
2626	typeset user=$2
2627	typeset basedir=$3
2628
2629	log_must useradd -g $group -d $basedir/$user -m $user
2630
2631	return 0
2632}
2633
2634function del_user_illumos #<user_name>
2635{
2636	typeset user=$1
2637
2638	if id $user > /dev/null 2>&1; then
2639		log_must_retry "currently used" 6 userdel $user
2640	fi
2641
2642	return 0
2643}
2644
2645function add_group_illumos #<group_name>
2646{
2647	typeset group=$1
2648
2649	typeset -i gid=100
2650	while true; do
2651		groupadd -g $gid $group > /dev/null 2>&1
2652		typeset -i ret=$?
2653		case $ret in
2654			0) return 0 ;;
2655			# The gid is not  unique
2656			4) ((gid += 1)) ;;
2657			*) return 1 ;;
2658		esac
2659	done
2660}
2661
2662function del_group_illumos #<group_name>
2663{
2664	typeset group=$1
2665
2666	groupmod -n $grp $grp > /dev/null 2>&1
2667	typeset -i ret=$?
2668	case $ret in
2669		# Group does not exist.
2670		6) return 0 ;;
2671		# Name already exists as a group name
2672		9) log_must groupdel $grp ;;
2673		*) return 1 ;;
2674	esac
2675}
2676
2677function add_user_linux #<group_name> <user_name> <basedir>
2678{
2679	typeset group=$1
2680	typeset user=$2
2681	typeset basedir=$3
2682
2683	log_must useradd -g $group -d $basedir/$user -m $user
2684
2685	# Add new users to the same group and the command line utils.
2686	# This allows them to be run out of the original users home
2687	# directory as long as it permissioned to be group readable.
2688	cmd_group=$(stat --format="%G" $(which zfs))
2689	log_must usermod -a -G $cmd_group $user
2690
2691	return 0
2692}
2693
2694function del_user_linux #<user_name>
2695{
2696	typeset user=$1
2697
2698	if id $user > /dev/null 2>&1; then
2699		log_must_retry "currently used" 6 userdel $user
2700	fi
2701
2702	return 0
2703}
2704
2705function add_group_linux #<group_name>
2706{
2707	typeset group=$1
2708
2709	# Assign 100 as the base gid, a larger value is selected for
2710	# Linux because for many distributions 1000 and under are reserved.
2711	while true; do
2712		groupadd $group > /dev/null 2>&1
2713		typeset -i ret=$?
2714		case $ret in
2715			0) return 0 ;;
2716			*) return 1 ;;
2717		esac
2718	done
2719}
2720
2721function del_group_linux #<group_name>
2722{
2723	typeset group=$1
2724
2725	getent group $group > /dev/null 2>&1
2726	typeset -i ret=$?
2727	case $ret in
2728		# Group does not exist.
2729		2) return 0 ;;
2730		# Name already exists as a group name
2731		0) log_must groupdel $group ;;
2732		*) return 1 ;;
2733	esac
2734
2735	return 0
2736}
2737
2738#
2739# Add specified user to specified group
2740#
2741# $1 group name
2742# $2 user name
2743# $3 base of the homedir (optional)
2744#
2745function add_user #<group_name> <user_name> <basedir>
2746{
2747	typeset group=$1
2748	typeset user=$2
2749	typeset basedir=${3:-"/var/tmp"}
2750
2751	if ((${#group} == 0 || ${#user} == 0)); then
2752		log_fail "group name or user name are not defined."
2753	fi
2754
2755	case $(uname) in
2756	FreeBSD)
2757		add_user_freebsd "$group" "$user" "$basedir"
2758		;;
2759	Linux)
2760		add_user_linux "$group" "$user" "$basedir"
2761		;;
2762	*)
2763		add_user_illumos "$group" "$user" "$basedir"
2764		;;
2765	esac
2766
2767	return 0
2768}
2769
2770#
2771# Delete the specified user.
2772#
2773# $1 login name
2774# $2 base of the homedir (optional)
2775#
2776function del_user #<logname> <basedir>
2777{
2778	typeset user=$1
2779	typeset basedir=${2:-"/var/tmp"}
2780
2781	if ((${#user} == 0)); then
2782		log_fail "login name is necessary."
2783	fi
2784
2785	case $(uname) in
2786	FreeBSD)
2787		del_user_freebsd "$user"
2788		;;
2789	Linux)
2790		del_user_linux "$user"
2791		;;
2792	*)
2793		del_user_illumos "$user"
2794		;;
2795	esac
2796
2797	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2798
2799	return 0
2800}
2801
2802#
2803# Select valid gid and create specified group.
2804#
2805# $1 group name
2806#
2807function add_group #<group_name>
2808{
2809	typeset group=$1
2810
2811	if ((${#group} == 0)); then
2812		log_fail "group name is necessary."
2813	fi
2814
2815	case $(uname) in
2816	FreeBSD)
2817		add_group_freebsd "$group"
2818		;;
2819	Linux)
2820		add_group_linux "$group"
2821		;;
2822	*)
2823		add_group_illumos "$group"
2824		;;
2825	esac
2826
2827	return 0
2828}
2829
2830#
2831# Delete the specified group.
2832#
2833# $1 group name
2834#
2835function del_group #<group_name>
2836{
2837	typeset group=$1
2838
2839	if ((${#group} == 0)); then
2840		log_fail "group name is necessary."
2841	fi
2842
2843	case $(uname) in
2844	FreeBSD)
2845		del_group_freebsd "$group"
2846		;;
2847	Linux)
2848		del_group_linux "$group"
2849		;;
2850	*)
2851		del_group_illumos "$group"
2852		;;
2853	esac
2854
2855	return 0
2856}
2857
2858#
2859# This function will return true if it's safe to destroy the pool passed
2860# as argument 1. It checks for pools based on zvols and files, and also
2861# files contained in a pool that may have a different mountpoint.
2862#
2863function safe_to_destroy_pool { # $1 the pool name
2864
2865	typeset pool=""
2866	typeset DONT_DESTROY=""
2867
2868	# We check that by deleting the $1 pool, we're not
2869	# going to pull the rug out from other pools. Do this
2870	# by looking at all other pools, ensuring that they
2871	# aren't built from files or zvols contained in this pool.
2872
2873	for pool in $(zpool list -H -o name)
2874	do
2875		ALTMOUNTPOOL=""
2876
2877		# this is a list of the top-level directories in each of the
2878		# files that make up the path to the files the pool is based on
2879		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2880			awk '{print $1}')
2881
2882		# this is a list of the zvols that make up the pool
2883		ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2884		    | awk '{print $1}')
2885
2886		# also want to determine if it's a file-based pool using an
2887		# alternate mountpoint...
2888		POOL_FILE_DIRS=$(zpool status -v $pool | \
2889					grep / | awk '{print $1}' | \
2890					awk -F/ '{print $2}' | grep -v "dev")
2891
2892		for pooldir in $POOL_FILE_DIRS
2893		do
2894			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2895					grep "${pooldir}$" | awk '{print $1}')
2896
2897			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2898		done
2899
2900
2901		if [ ! -z "$ZVOLPOOL" ]
2902		then
2903			DONT_DESTROY="true"
2904			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2905		fi
2906
2907		if [ ! -z "$FILEPOOL" ]
2908		then
2909			DONT_DESTROY="true"
2910			log_note "Pool $pool is built from $FILEPOOL on $1"
2911		fi
2912
2913		if [ ! -z "$ALTMOUNTPOOL" ]
2914		then
2915			DONT_DESTROY="true"
2916			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2917		fi
2918	done
2919
2920	if [ -z "${DONT_DESTROY}" ]
2921	then
2922		return 0
2923	else
2924		log_note "Warning: it is not safe to destroy $1!"
2925		return 1
2926	fi
2927}
2928
2929#
2930# Verify zfs operation with -p option work as expected
2931# $1 operation, value could be create, clone or rename
2932# $2 dataset type, value could be fs or vol
2933# $3 dataset name
2934# $4 new dataset name
2935#
2936function verify_opt_p_ops
2937{
2938	typeset ops=$1
2939	typeset datatype=$2
2940	typeset dataset=$3
2941	typeset newdataset=$4
2942
2943	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2944		log_fail "$datatype is not supported."
2945	fi
2946
2947	# check parameters accordingly
2948	case $ops in
2949		create)
2950			newdataset=$dataset
2951			dataset=""
2952			if [[ $datatype == "vol" ]]; then
2953				ops="create -V $VOLSIZE"
2954			fi
2955			;;
2956		clone)
2957			if [[ -z $newdataset ]]; then
2958				log_fail "newdataset should not be empty" \
2959					"when ops is $ops."
2960			fi
2961			log_must datasetexists $dataset
2962			log_must snapexists $dataset
2963			;;
2964		rename)
2965			if [[ -z $newdataset ]]; then
2966				log_fail "newdataset should not be empty" \
2967					"when ops is $ops."
2968			fi
2969			log_must datasetexists $dataset
2970			;;
2971		*)
2972			log_fail "$ops is not supported."
2973			;;
2974	esac
2975
2976	# make sure the upper level filesystem does not exist
2977	destroy_dataset "${newdataset%/*}" "-rRf"
2978
2979	# without -p option, operation will fail
2980	log_mustnot zfs $ops $dataset $newdataset
2981	log_mustnot datasetexists $newdataset ${newdataset%/*}
2982
2983	# with -p option, operation should succeed
2984	log_must zfs $ops -p $dataset $newdataset
2985	block_device_wait
2986
2987	if ! datasetexists $newdataset ; then
2988		log_fail "-p option does not work for $ops"
2989	fi
2990
2991	# when $ops is create or clone, redo the operation still return zero
2992	if [[ $ops != "rename" ]]; then
2993		log_must zfs $ops -p $dataset $newdataset
2994	fi
2995
2996	return 0
2997}
2998
2999#
3000# Get configuration of pool
3001# $1 pool name
3002# $2 config name
3003#
3004function get_config
3005{
3006	typeset pool=$1
3007	typeset config=$2
3008	typeset alt_root
3009
3010	if ! poolexists "$pool" ; then
3011		return 1
3012	fi
3013	alt_root=$(zpool list -H $pool | awk '{print $NF}')
3014	if [[ $alt_root == "-" ]]; then
3015		value=$(zdb -C $pool | grep "$config:" | awk -F: \
3016		    '{print $2}')
3017	else
3018		value=$(zdb -e $pool | grep "$config:" | awk -F: \
3019		    '{print $2}')
3020	fi
3021	if [[ -n $value ]] ; then
3022		value=${value#'}
3023		value=${value%'}
3024	fi
3025	echo $value
3026
3027	return 0
3028}
3029
3030#
3031# Privated function. Random select one of items from arguments.
3032#
3033# $1 count
3034# $2-n string
3035#
3036function _random_get
3037{
3038	typeset cnt=$1
3039	shift
3040
3041	typeset str="$@"
3042	typeset -i ind
3043	((ind = RANDOM % cnt + 1))
3044
3045	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
3046	echo $ret
3047}
3048
3049#
3050# Random select one of item from arguments which include NONE string
3051#
3052function random_get_with_non
3053{
3054	typeset -i cnt=$#
3055	((cnt =+ 1))
3056
3057	_random_get "$cnt" "$@"
3058}
3059
3060#
3061# Random select one of item from arguments which doesn't include NONE string
3062#
3063function random_get
3064{
3065	_random_get "$#" "$@"
3066}
3067
3068#
3069# The function will generate a dataset name with specific length
3070# $1, the length of the name
3071# $2, the base string to construct the name
3072#
3073function gen_dataset_name
3074{
3075	typeset -i len=$1
3076	typeset basestr="$2"
3077	typeset -i baselen=${#basestr}
3078	typeset -i iter=0
3079	typeset l_name=""
3080
3081	if ((len % baselen == 0)); then
3082		((iter = len / baselen))
3083	else
3084		((iter = len / baselen + 1))
3085	fi
3086	while ((iter > 0)); do
3087		l_name="${l_name}$basestr"
3088
3089		((iter -= 1))
3090	done
3091
3092	echo $l_name
3093}
3094
3095#
3096# Get cksum tuple of dataset
3097# $1 dataset name
3098#
3099# sample zdb output:
3100# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
3101# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
3102# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
3103# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
3104function datasetcksum
3105{
3106	typeset cksum
3107	sync
3108	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
3109		| awk -F= '{print $7}')
3110	echo $cksum
3111}
3112
3113#
3114# Get cksum of file
3115# #1 file path
3116#
3117function checksum
3118{
3119	typeset cksum
3120	cksum=$(cksum $1 | awk '{print $1}')
3121	echo $cksum
3122}
3123
3124#
3125# Get the given disk/slice state from the specific field of the pool
3126#
3127function get_device_state #pool disk field("", "spares","logs")
3128{
3129	typeset pool=$1
3130	typeset disk=${2#$DEV_DSKDIR/}
3131	typeset field=${3:-$pool}
3132
3133	state=$(zpool status -v "$pool" 2>/dev/null | \
3134		nawk -v device=$disk -v pool=$pool -v field=$field \
3135		'BEGIN {startconfig=0; startfield=0; }
3136		/config:/ {startconfig=1}
3137		(startconfig==1) && ($1==field) {startfield=1; next;}
3138		(startfield==1) && ($1==device) {print $2; exit;}
3139		(startfield==1) &&
3140		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
3141	echo $state
3142}
3143
3144
3145#
3146# print the given directory filesystem type
3147#
3148# $1 directory name
3149#
3150function get_fstype
3151{
3152	typeset dir=$1
3153
3154	if [[ -z $dir ]]; then
3155		log_fail "Usage: get_fstype <directory>"
3156	fi
3157
3158	#
3159	#  $ df -n /
3160	#  /		  : ufs
3161	#
3162	df -n $dir | awk '{print $3}'
3163}
3164
3165#
3166# Given a disk, label it to VTOC regardless what label was on the disk
3167# $1 disk
3168#
3169function labelvtoc
3170{
3171	typeset disk=$1
3172	if [[ -z $disk ]]; then
3173		log_fail "The disk name is unspecified."
3174	fi
3175	typeset label_file=/var/tmp/labelvtoc.$$
3176	typeset arch=$(uname -p)
3177
3178	if is_linux || is_freebsd; then
3179		log_note "Currently unsupported by the test framework"
3180		return 1
3181	fi
3182
3183	if [[ $arch == "i386" ]]; then
3184		echo "label" > $label_file
3185		echo "0" >> $label_file
3186		echo "" >> $label_file
3187		echo "q" >> $label_file
3188		echo "q" >> $label_file
3189
3190		fdisk -B $disk >/dev/null 2>&1
3191		# wait a while for fdisk finishes
3192		sleep 60
3193	elif [[ $arch == "sparc" ]]; then
3194		echo "label" > $label_file
3195		echo "0" >> $label_file
3196		echo "" >> $label_file
3197		echo "" >> $label_file
3198		echo "" >> $label_file
3199		echo "q" >> $label_file
3200	else
3201		log_fail "unknown arch type"
3202	fi
3203
3204	format -e -s -d $disk -f $label_file
3205	typeset -i ret_val=$?
3206	rm -f $label_file
3207	#
3208	# wait the format to finish
3209	#
3210	sleep 60
3211	if ((ret_val != 0)); then
3212		log_fail "unable to label $disk as VTOC."
3213	fi
3214
3215	return 0
3216}
3217
3218#
3219# check if the system was installed as zfsroot or not
3220# return: 0 if zfsroot, non-zero if not
3221#
3222function is_zfsroot
3223{
3224	df -n / | grep zfs > /dev/null 2>&1
3225	return $?
3226}
3227
3228#
3229# get the root filesystem name if it's zfsroot system.
3230#
3231# return: root filesystem name
3232function get_rootfs
3233{
3234	typeset rootfs=""
3235
3236	if is_freebsd; then
3237		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3238	elif ! is_linux; then
3239		rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
3240			/etc/mnttab)
3241	fi
3242	if [[ -z "$rootfs" ]]; then
3243		log_fail "Can not get rootfs"
3244	fi
3245	zfs list $rootfs > /dev/null 2>&1
3246	if (($? == 0)); then
3247		echo $rootfs
3248	else
3249		log_fail "This is not a zfsroot system."
3250	fi
3251}
3252
3253#
3254# get the rootfs's pool name
3255# return:
3256#       rootpool name
3257#
3258function get_rootpool
3259{
3260	typeset rootfs=""
3261	typeset rootpool=""
3262
3263	if is_freebsd; then
3264		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3265	elif ! is_linux; then
3266		rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
3267			 /etc/mnttab)
3268	fi
3269	if [[ -z "$rootfs" ]]; then
3270		log_fail "Can not get rootpool"
3271	fi
3272	zfs list $rootfs > /dev/null 2>&1
3273	if (($? == 0)); then
3274		echo ${rootfs%%/*}
3275	else
3276		log_fail "This is not a zfsroot system."
3277	fi
3278}
3279
3280#
3281# Get the word numbers from a string separated by white space
3282#
3283function get_word_count
3284{
3285	echo $1 | wc -w
3286}
3287
3288#
3289# To verify if the require numbers of disks is given
3290#
3291function verify_disk_count
3292{
3293	typeset -i min=${2:-1}
3294
3295	typeset -i count=$(get_word_count "$1")
3296
3297	if ((count < min)); then
3298		log_untested "A minimum of $min disks is required to run." \
3299			" You specified $count disk(s)"
3300	fi
3301}
3302
3303function ds_is_volume
3304{
3305	typeset type=$(get_prop type $1)
3306	[[ $type = "volume" ]] && return 0
3307	return 1
3308}
3309
3310function ds_is_filesystem
3311{
3312	typeset type=$(get_prop type $1)
3313	[[ $type = "filesystem" ]] && return 0
3314	return 1
3315}
3316
3317function ds_is_snapshot
3318{
3319	typeset type=$(get_prop type $1)
3320	[[ $type = "snapshot" ]] && return 0
3321	return 1
3322}
3323
3324#
3325# Check if Trusted Extensions are installed and enabled
3326#
3327function is_te_enabled
3328{
3329	svcs -H -o state labeld 2>/dev/null | grep "enabled"
3330	if (($? != 0)); then
3331		return 1
3332	else
3333		return 0
3334	fi
3335}
3336
3337# Utility function to determine if a system has multiple cpus.
3338function is_mp
3339{
3340	if is_linux; then
3341		(($(nproc) > 1))
3342	elif is_freebsd; then
3343		sysctl -n kern.smp.cpus
3344	else
3345		(($(psrinfo | wc -l) > 1))
3346	fi
3347
3348	return $?
3349}
3350
3351function get_cpu_freq
3352{
3353	if is_linux; then
3354		lscpu | awk '/CPU MHz/ { print $3 }'
3355	elif is_freebsd; then
3356		sysctl -n hw.clockrate
3357	else
3358		psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3359	fi
3360}
3361
3362# Run the given command as the user provided.
3363function user_run
3364{
3365	typeset user=$1
3366	shift
3367
3368	log_note "user: $user"
3369	log_note "cmd: $*"
3370
3371	typeset out=$TEST_BASE_DIR/out
3372	typeset err=$TEST_BASE_DIR/err
3373
3374	sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
3375	typeset res=$?
3376	log_note "out: $(<$out)"
3377	log_note "err: $(<$err)"
3378	return $res
3379}
3380
3381#
3382# Check if the pool contains the specified vdevs
3383#
3384# $1 pool
3385# $2..n <vdev> ...
3386#
3387# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3388# vdevs is not in the pool, and 2 if pool name is missing.
3389#
3390function vdevs_in_pool
3391{
3392	typeset pool=$1
3393	typeset vdev
3394
3395	if [[ -z $pool ]]; then
3396		log_note "Missing pool name."
3397		return 2
3398	fi
3399
3400	shift
3401
3402	# We could use 'zpool list' to only get the vdevs of the pool but we
3403	# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3404	# therefore we use the 'zpool status' output.
3405	typeset tmpfile=$(mktemp)
3406	zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3407	for vdev in $@; do
3408		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3409		[[ $? -ne 0 ]] && return 1
3410	done
3411
3412	rm -f $tmpfile
3413
3414	return 0;
3415}
3416
3417function get_max
3418{
3419	typeset -l i max=$1
3420	shift
3421
3422	for i in "$@"; do
3423		max=$((max > i ? max : i))
3424	done
3425
3426	echo $max
3427}
3428
3429function get_min
3430{
3431	typeset -l i min=$1
3432	shift
3433
3434	for i in "$@"; do
3435		min=$((min < i ? min : i))
3436	done
3437
3438	echo $min
3439}
3440
3441# Write data that can be compressed into a directory
3442function write_compressible
3443{
3444	typeset dir=$1
3445	typeset megs=$2
3446	typeset nfiles=${3:-1}
3447	typeset bs=${4:-1024k}
3448	typeset fname=${5:-file}
3449
3450	[[ -d $dir ]] || log_fail "No directory: $dir"
3451
3452	# Under Linux fio is not currently used since its behavior can
3453	# differ significantly across versions.  This includes missing
3454	# command line options and cases where the --buffer_compress_*
3455	# options fail to behave as expected.
3456	if is_linux; then
3457		typeset file_bytes=$(to_bytes $megs)
3458		typeset bs_bytes=4096
3459		typeset blocks=$(($file_bytes / $bs_bytes))
3460
3461		for (( i = 0; i < $nfiles; i++ )); do
3462			truncate -s $file_bytes $dir/$fname.$i
3463
3464			# Write every third block to get 66% compression.
3465			for (( j = 0; j < $blocks; j += 3 )); do
3466				dd if=/dev/urandom of=$dir/$fname.$i \
3467				    seek=$j bs=$bs_bytes count=1 \
3468				    conv=notrunc >/dev/null 2>&1
3469			done
3470		done
3471	else
3472		log_must eval "fio \
3473		    --name=job \
3474		    --fallocate=0 \
3475		    --minimal \
3476		    --randrepeat=0 \
3477		    --buffer_compress_percentage=66 \
3478		    --buffer_compress_chunk=4096 \
3479		    --directory=$dir \
3480		    --numjobs=$nfiles \
3481		    --nrfiles=$nfiles \
3482		    --rw=write \
3483		    --bs=$bs \
3484		    --filesize=$megs \
3485		    --filename_format='$fname.\$jobnum' >/dev/null"
3486	fi
3487}
3488
3489function get_objnum
3490{
3491	typeset pathname=$1
3492	typeset objnum
3493
3494	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3495	if is_freebsd; then
3496		objnum=$(stat -f "%i" $pathname)
3497	else
3498		objnum=$(stat -c %i $pathname)
3499	fi
3500	echo $objnum
3501}
3502
3503#
3504# Sync data to the pool
3505#
3506# $1 pool name
3507# $2 boolean to force uberblock (and config including zpool cache file) update
3508#
3509function sync_pool #pool <force>
3510{
3511	typeset pool=${1:-$TESTPOOL}
3512	typeset force=${2:-false}
3513
3514	if [[ $force == true ]]; then
3515		log_must zpool sync -f $pool
3516	else
3517		log_must zpool sync $pool
3518	fi
3519
3520	return 0
3521}
3522
3523#
3524# Wait for zpool 'freeing' property drops to zero.
3525#
3526# $1 pool name
3527#
3528function wait_freeing #pool
3529{
3530	typeset pool=${1:-$TESTPOOL}
3531	while true; do
3532		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3533		log_must sleep 1
3534	done
3535}
3536
3537#
3538# Wait for every device replace operation to complete
3539#
3540# $1 pool name
3541#
3542function wait_replacing #pool
3543{
3544	typeset pool=${1:-$TESTPOOL}
3545	while true; do
3546		[[ "" == "$(zpool status $pool |
3547		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3548		log_must sleep 1
3549	done
3550}
3551
3552#
3553# Wait for a pool to be scrubbed
3554#
3555# $1 pool name
3556#
3557function wait_scrubbed
3558{
3559	typeset pool=${1:-$TESTPOOL}
3560	while ! is_pool_scrubbed $pool ; do
3561		sleep 1
3562	done
3563}
3564
3565# Backup the zed.rc in our test directory so that we can edit it for our test.
3566#
3567# Returns: Backup file name.  You will need to pass this to zed_rc_restore().
3568function zed_rc_backup
3569{
3570	zedrc_backup="$(mktemp)"
3571	cp $ZEDLET_DIR/zed.rc $zedrc_backup
3572	echo $zedrc_backup
3573}
3574
3575function zed_rc_restore
3576{
3577	mv $1 $ZEDLET_DIR/zed.rc
3578}
3579
3580#
3581# Setup custom environment for the ZED.
3582#
3583# $@ Optional list of zedlets to run under zed.
3584function zed_setup
3585{
3586	if ! is_linux; then
3587		log_unsupported "No zed on $(uname)"
3588	fi
3589
3590	if [[ ! -d $ZEDLET_DIR ]]; then
3591		log_must mkdir $ZEDLET_DIR
3592	fi
3593
3594	if [[ ! -e $VDEVID_CONF ]]; then
3595		log_must touch $VDEVID_CONF
3596	fi
3597
3598	if [[ -e $VDEVID_CONF_ETC ]]; then
3599		log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3600	fi
3601	EXTRA_ZEDLETS=$@
3602
3603	# Create a symlink for /etc/zfs/vdev_id.conf file.
3604	log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3605
3606	# Setup minimal ZED configuration.  Individual test cases should
3607	# add additional ZEDLETs as needed for their specific test.
3608	log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3609	log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3610
3611	# Scripts must only be user writable.
3612	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3613		saved_umask=$(umask)
3614		log_must umask 0022
3615		for i in $EXTRA_ZEDLETS ; do
3616			log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3617		done
3618		log_must umask $saved_umask
3619	fi
3620
3621	# Customize the zed.rc file to enable the full debug log.
3622	log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3623	echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3624
3625}
3626
3627#
3628# Cleanup custom ZED environment.
3629#
3630# $@ Optional list of zedlets to remove from our test zed.d directory.
3631function zed_cleanup
3632{
3633	if ! is_linux; then
3634		return
3635	fi
3636	EXTRA_ZEDLETS=$@
3637
3638	log_must rm -f ${ZEDLET_DIR}/zed.rc
3639	log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3640	log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3641	log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3642	log_must rm -f ${ZEDLET_DIR}/state
3643
3644	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3645		for i in $EXTRA_ZEDLETS ; do
3646			log_must rm -f ${ZEDLET_DIR}/$i
3647		done
3648	fi
3649	log_must rm -f $ZED_LOG
3650	log_must rm -f $ZED_DEBUG_LOG
3651	log_must rm -f $VDEVID_CONF_ETC
3652	log_must rm -f $VDEVID_CONF
3653	rmdir $ZEDLET_DIR
3654}
3655
3656#
3657# Check if ZED is currently running, if not start ZED.
3658#
3659function zed_start
3660{
3661	if ! is_linux; then
3662		return
3663	fi
3664
3665	# ZEDLET_DIR=/var/tmp/zed
3666	if [[ ! -d $ZEDLET_DIR ]]; then
3667		log_must mkdir $ZEDLET_DIR
3668	fi
3669
3670	# Verify the ZED is not already running.
3671	pgrep -x zed > /dev/null
3672	if (($? == 0)); then
3673		log_note "ZED already running"
3674	else
3675		log_note "Starting ZED"
3676		# run ZED in the background and redirect foreground logging
3677		# output to $ZED_LOG.
3678		log_must truncate -s 0 $ZED_DEBUG_LOG
3679		log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3680		    "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3681	fi
3682
3683	return 0
3684}
3685
3686#
3687# Kill ZED process
3688#
3689function zed_stop
3690{
3691	if ! is_linux; then
3692		return
3693	fi
3694
3695	log_note "Stopping ZED"
3696	while true; do
3697		zedpids="$(pgrep -x zed)"
3698		[ "$?" -ne 0 ] && break
3699
3700		log_must kill $zedpids
3701		sleep 1
3702	done
3703	return 0
3704}
3705
3706#
3707# Drain all zevents
3708#
3709function zed_events_drain
3710{
3711	while [ $(zpool events -H | wc -l) -ne 0 ]; do
3712		sleep 1
3713		zpool events -c >/dev/null
3714	done
3715}
3716
3717# Set a variable in zed.rc to something, un-commenting it in the process.
3718#
3719# $1 variable
3720# $2 value
3721function zed_rc_set
3722{
3723	var="$1"
3724	val="$2"
3725	# Remove the line
3726	cmd="'/$var/d'"
3727	eval sed -i $cmd $ZEDLET_DIR/zed.rc
3728
3729	# Add it at the end
3730	echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3731}
3732
3733
3734#
3735# Check is provided device is being active used as a swap device.
3736#
3737function is_swap_inuse
3738{
3739	typeset device=$1
3740
3741	if [[ -z $device ]] ; then
3742		log_note "No device specified."
3743		return 1
3744	fi
3745
3746	if is_linux; then
3747		swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3748	elif is_freebsd; then
3749		swapctl -l | grep -w $device
3750	else
3751		swap -l | grep -w $device > /dev/null 2>&1
3752	fi
3753
3754	return $?
3755}
3756
3757#
3758# Setup a swap device using the provided device.
3759#
3760function swap_setup
3761{
3762	typeset swapdev=$1
3763
3764	if is_linux; then
3765		log_must eval "mkswap $swapdev > /dev/null 2>&1"
3766		log_must swapon $swapdev
3767	elif is_freebsd; then
3768		log_must swapctl -a $swapdev
3769	else
3770	        log_must swap -a $swapdev
3771	fi
3772
3773	return 0
3774}
3775
3776#
3777# Cleanup a swap device on the provided device.
3778#
3779function swap_cleanup
3780{
3781	typeset swapdev=$1
3782
3783	if is_swap_inuse $swapdev; then
3784		if is_linux; then
3785			log_must swapoff $swapdev
3786		elif is_freebsd; then
3787			log_must swapoff $swapdev
3788		else
3789			log_must swap -d $swapdev
3790		fi
3791	fi
3792
3793	return 0
3794}
3795
3796#
3797# Set a global system tunable (64-bit value)
3798#
3799# $1 tunable name (use a NAME defined in tunables.cfg)
3800# $2 tunable values
3801#
3802function set_tunable64
3803{
3804	set_tunable_impl "$1" "$2" Z
3805}
3806
3807#
3808# Set a global system tunable (32-bit value)
3809#
3810# $1 tunable name (use a NAME defined in tunables.cfg)
3811# $2 tunable values
3812#
3813function set_tunable32
3814{
3815	set_tunable_impl "$1" "$2" W
3816}
3817
3818function set_tunable_impl
3819{
3820	typeset name="$1"
3821	typeset value="$2"
3822	typeset mdb_cmd="$3"
3823	typeset module="${4:-zfs}"
3824
3825	eval "typeset tunable=\$$name"
3826	case "$tunable" in
3827	UNSUPPORTED)
3828		log_unsupported "Tunable '$name' is unsupported on $(uname)"
3829		;;
3830	"")
3831		log_fail "Tunable '$name' must be added to tunables.cfg"
3832		;;
3833	*)
3834		;;
3835	esac
3836
3837	[[ -z "$value" ]] && return 1
3838	[[ -z "$mdb_cmd" ]] && return 1
3839
3840	case "$(uname)" in
3841	Linux)
3842		typeset zfs_tunables="/sys/module/$module/parameters"
3843		[[ -w "$zfs_tunables/$tunable" ]] || return 1
3844		cat >"$zfs_tunables/$tunable" <<<"$value"
3845		return $?
3846		;;
3847	FreeBSD)
3848		sysctl vfs.zfs.$tunable=$value
3849		return "$?"
3850		;;
3851	SunOS)
3852		[[ "$module" -eq "zfs" ]] || return 1
3853		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3854		return $?
3855		;;
3856	esac
3857}
3858
3859#
3860# Get a global system tunable
3861#
3862# $1 tunable name (use a NAME defined in tunables.cfg)
3863#
3864function get_tunable
3865{
3866	get_tunable_impl "$1"
3867}
3868
3869function get_tunable_impl
3870{
3871	typeset name="$1"
3872	typeset module="${2:-zfs}"
3873
3874	eval "typeset tunable=\$$name"
3875	case "$tunable" in
3876	UNSUPPORTED)
3877		log_unsupported "Tunable '$name' is unsupported on $(uname)"
3878		;;
3879	"")
3880		log_fail "Tunable '$name' must be added to tunables.cfg"
3881		;;
3882	*)
3883		;;
3884	esac
3885
3886	case "$(uname)" in
3887	Linux)
3888		typeset zfs_tunables="/sys/module/$module/parameters"
3889		[[ -f "$zfs_tunables/$tunable" ]] || return 1
3890		cat $zfs_tunables/$tunable
3891		return $?
3892		;;
3893	FreeBSD)
3894		sysctl -n vfs.zfs.$tunable
3895		;;
3896	SunOS)
3897		[[ "$module" -eq "zfs" ]] || return 1
3898		;;
3899	esac
3900
3901	return 1
3902}
3903
3904#
3905# Prints the current time in seconds since UNIX Epoch.
3906#
3907function current_epoch
3908{
3909	printf '%(%s)T'
3910}
3911
3912#
3913# Get decimal value of global uint32_t variable using mdb.
3914#
3915function mdb_get_uint32
3916{
3917	typeset variable=$1
3918	typeset value
3919
3920	value=$(mdb -k -e "$variable/X | ::eval .=U")
3921	if [[ $? -ne 0 ]]; then
3922		log_fail "Failed to get value of '$variable' from mdb."
3923		return 1
3924	fi
3925
3926	echo $value
3927	return 0
3928}
3929
3930#
3931# Set global uint32_t variable to a decimal value using mdb.
3932#
3933function mdb_set_uint32
3934{
3935	typeset variable=$1
3936	typeset value=$2
3937
3938	mdb -kw -e "$variable/W 0t$value" > /dev/null
3939	if [[ $? -ne 0 ]]; then
3940		echo "Failed to set '$variable' to '$value' in mdb."
3941		return 1
3942	fi
3943
3944	return 0
3945}
3946
3947#
3948# Set global scalar integer variable to a hex value using mdb.
3949# Note: Target should have CTF data loaded.
3950#
3951function mdb_ctf_set_int
3952{
3953	typeset variable=$1
3954	typeset value=$2
3955
3956	mdb -kw -e "$variable/z $value" > /dev/null
3957	if [[ $? -ne 0 ]]; then
3958		echo "Failed to set '$variable' to '$value' in mdb."
3959		return 1
3960	fi
3961
3962	return 0
3963}
3964
3965#
3966# Compute MD5 digest for given file or stdin if no file given.
3967# Note: file path must not contain spaces
3968#
3969function md5digest
3970{
3971	typeset file=$1
3972
3973	case $(uname) in
3974	FreeBSD)
3975		md5 -q $file
3976		;;
3977	*)
3978		md5sum -b $file | awk '{ print $1 }'
3979		;;
3980	esac
3981}
3982
3983#
3984# Compute SHA256 digest for given file or stdin if no file given.
3985# Note: file path must not contain spaces
3986#
3987function sha256digest
3988{
3989	typeset file=$1
3990
3991	case $(uname) in
3992	FreeBSD)
3993		sha256 -q $file
3994		;;
3995	*)
3996		sha256sum -b $file | awk '{ print $1 }'
3997		;;
3998	esac
3999}
4000
4001function new_fs #<args>
4002{
4003	case $(uname) in
4004	FreeBSD)
4005		newfs "$@"
4006		;;
4007	*)
4008		echo y | newfs -v "$@"
4009		;;
4010	esac
4011}
4012
4013function stat_size #<path>
4014{
4015	typeset path=$1
4016
4017	case $(uname) in
4018	FreeBSD)
4019		stat -f %z "$path"
4020		;;
4021	*)
4022		stat -c %s "$path"
4023		;;
4024	esac
4025}
4026
4027# Run a command as if it was being run in a TTY.
4028#
4029# Usage:
4030#
4031#    faketty command
4032#
4033function faketty
4034{
4035    if is_freebsd; then
4036        script -q /dev/null env "$@"
4037    else
4038        script --return --quiet -c "$*" /dev/null
4039    fi
4040}
4041
4042#
4043# Produce a random permutation of the integers in a given range (inclusive).
4044#
4045function range_shuffle # begin end
4046{
4047	typeset -i begin=$1
4048	typeset -i end=$2
4049
4050	seq ${begin} ${end} | sort -R
4051}
4052
4053#
4054# Cross-platform xattr helpers
4055#
4056
4057function get_xattr # name path
4058{
4059	typeset name=$1
4060	typeset path=$2
4061
4062	case $(uname) in
4063	FreeBSD)
4064		getextattr -qq user "${name}" "${path}"
4065		;;
4066	*)
4067		attr -qg "${name}" "${path}"
4068		;;
4069	esac
4070}
4071
4072function set_xattr # name value path
4073{
4074	typeset name=$1
4075	typeset value=$2
4076	typeset path=$3
4077
4078	case $(uname) in
4079	FreeBSD)
4080		setextattr user "${name}" "${value}" "${path}"
4081		;;
4082	*)
4083		attr -qs "${name}" -V "${value}" "${path}"
4084		;;
4085	esac
4086}
4087
4088function set_xattr_stdin # name value
4089{
4090	typeset name=$1
4091	typeset path=$2
4092
4093	case $(uname) in
4094	FreeBSD)
4095		setextattr -i user "${name}" "${path}"
4096		;;
4097	*)
4098		attr -qs "${name}" "${path}"
4099		;;
4100	esac
4101}
4102
4103function rm_xattr # name path
4104{
4105	typeset name=$1
4106	typeset path=$2
4107
4108	case $(uname) in
4109	FreeBSD)
4110		rmextattr -q user "${name}" "${path}"
4111		;;
4112	*)
4113		attr -qr "${name}" "${path}"
4114		;;
4115	esac
4116}
4117
4118function ls_xattr # path
4119{
4120	typeset path=$1
4121
4122	case $(uname) in
4123	FreeBSD)
4124		lsextattr -qq user "${path}"
4125		;;
4126	*)
4127		attr -ql "${path}"
4128		;;
4129	esac
4130}
4131
4132function kstat # stat flags?
4133{
4134	typeset stat=$1
4135	typeset flags=${2-"-n"}
4136
4137	case $(uname) in
4138	FreeBSD)
4139		sysctl $flags kstat.zfs.misc.$stat
4140		;;
4141	Linux)
4142		typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
4143		[[ -f "$zfs_kstat" ]] || return 1
4144		cat $zfs_kstat
4145		;;
4146	*)
4147		false
4148		;;
4149	esac
4150}
4151
4152function get_arcstat # stat
4153{
4154	typeset stat=$1
4155
4156	case $(uname) in
4157	FreeBSD)
4158		kstat arcstats.$stat
4159		;;
4160	Linux)
4161		kstat arcstats | awk "/$stat/ { print \$3 }"
4162		;;
4163	*)
4164		false
4165		;;
4166	esac
4167}
4168
4169#
4170# Wait for the specified arcstat to reach non-zero quiescence.
4171# If echo is 1 echo the value after reaching quiescence, otherwise
4172# if echo is 0 print the arcstat we are waiting on.
4173#
4174function arcstat_quiescence # stat echo
4175{
4176	typeset stat=$1
4177	typeset echo=$2
4178	typeset do_once=true
4179
4180	if [[ $echo -eq 0 ]]; then
4181		echo "Waiting for arcstat $1 quiescence."
4182	fi
4183
4184	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
4185		typeset stat1=$(get_arcstat $stat)
4186		sleep 2
4187		typeset stat2=$(get_arcstat $stat)
4188		do_once=false
4189	done
4190
4191	if [[ $echo -eq 1 ]]; then
4192		echo $stat2
4193	fi
4194}
4195
4196function arcstat_quiescence_noecho # stat
4197{
4198	typeset stat=$1
4199	arcstat_quiescence $stat 0
4200}
4201
4202function arcstat_quiescence_echo # stat
4203{
4204	typeset stat=$1
4205	arcstat_quiescence $stat 1
4206}
4207
4208#
4209# Given an array of pids, wait until all processes
4210# have completed and check their return status.
4211#
4212function wait_for_children #children
4213{
4214	rv=0
4215	children=("$@")
4216	for child in "${children[@]}"
4217	do
4218		child_exit=0
4219		wait ${child} || child_exit=$?
4220		if [ $child_exit -ne 0 ]; then
4221			echo "child ${child} failed with ${child_exit}"
4222			rv=1
4223		fi
4224	done
4225	return $rv
4226}
4227