xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision 9d1bdd5320f2dd3350d31369cc595ea2862da380)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright (c) 2017 by Tim Chase. All rights reserved.
27# Copyright (c) 2017 by Nexenta Systems, Inc. All rights reserved.
28# Copyright (c) 2017 Datto Inc.
29# Copyright 2020 Joyent, Inc.
30# Copyright 2024 MNX Cloud, Inc.
31#
32
33. ${STF_TOOLS}/contrib/include/logapi.shlib
34. ${STF_SUITE}/include/math.shlib
35. ${STF_SUITE}/include/blkdev.shlib
36
37# Determine if this is a Linux test system
38#
39# Return 0 if platform Linux, 1 if otherwise
40
41function is_linux
42{
43	if [[ $(uname -o) == "GNU/Linux" ]]; then
44		return 0
45	else
46		return 1
47	fi
48}
49
50# Determine whether a dataset is mounted
51#
52# $1 dataset name
53# $2 filesystem type; optional - defaulted to zfs
54#
55# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
56
57function ismounted
58{
59	typeset fstype=$2
60	[[ -z $fstype ]] && fstype=zfs
61	typeset out dir name ret
62
63	case $fstype in
64		zfs)
65			if [[ "$1" == "/"* ]] ; then
66				for out in $(zfs mount | awk '{print $2}'); do
67					[[ $1 == $out ]] && return 0
68				done
69			else
70				for out in $(zfs mount | awk '{print $1}'); do
71					[[ $1 == $out ]] && return 0
72				done
73			fi
74		;;
75		ufs|nfs)
76			out=$(df -F $fstype $1 2>/dev/null)
77			ret=$?
78			(($ret != 0)) && return $ret
79
80			dir=${out%%\(*}
81			dir=${dir%% *}
82			name=${out##*\(}
83			name=${name%%\)*}
84			name=${name%% *}
85
86			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
87		;;
88	esac
89
90	return 1
91}
92
93# Return 0 if a dataset is mounted; 1 otherwise
94#
95# $1 dataset name
96# $2 filesystem type; optional - defaulted to zfs
97
98function mounted
99{
100	ismounted $1 $2
101	(($? == 0)) && return 0
102	return 1
103}
104
105# Return 0 if a dataset is unmounted; 1 otherwise
106#
107# $1 dataset name
108# $2 filesystem type; optional - defaulted to zfs
109
110function unmounted
111{
112	ismounted $1 $2
113	(($? == 1)) && return 0
114	return 1
115}
116
117# split line on ","
118#
119# $1 - line to split
120
121function splitline
122{
123	echo $1 | sed "s/,/ /g"
124}
125
126function default_setup
127{
128	default_setup_noexit "$@"
129
130	log_pass
131}
132
133#
134# Given a list of disks, setup storage pools and datasets.
135#
136function default_setup_noexit
137{
138	typeset disklist=$1
139	typeset container=$2
140	typeset volume=$3
141
142	if is_global_zone; then
143		if poolexists $TESTPOOL ; then
144			destroy_pool $TESTPOOL
145		fi
146		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
147		log_must zpool create -f $TESTPOOL $disklist
148	else
149		reexport_pool
150	fi
151
152	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
153	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
154
155	log_must zfs create $TESTPOOL/$TESTFS
156	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
157
158	if [[ -n $container ]]; then
159		rm -rf $TESTDIR1  || \
160			log_unresolved Could not remove $TESTDIR1
161		mkdir -p $TESTDIR1 || \
162			log_unresolved Could not create $TESTDIR1
163
164		log_must zfs create $TESTPOOL/$TESTCTR
165		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
166		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
167		log_must zfs set mountpoint=$TESTDIR1 \
168		    $TESTPOOL/$TESTCTR/$TESTFS1
169	fi
170
171	if [[ -n $volume ]]; then
172		if is_global_zone ; then
173			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
174		else
175			log_must zfs create $TESTPOOL/$TESTVOL
176		fi
177	fi
178}
179
180#
181# Given a list of disks, setup a storage pool, file system and
182# a container.
183#
184function default_container_setup
185{
186	typeset disklist=$1
187
188	default_setup "$disklist" "true"
189}
190
191#
192# Given a list of disks, setup a storage pool,file system
193# and a volume.
194#
195function default_volume_setup
196{
197	typeset disklist=$1
198
199	default_setup "$disklist" "" "true"
200}
201
202#
203# Given a list of disks, setup a storage pool,file system,
204# a container and a volume.
205#
206function default_container_volume_setup
207{
208	typeset disklist=$1
209
210	default_setup "$disklist" "true" "true"
211}
212
213#
214# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
215# filesystem
216#
217# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
218# $2 snapshot name. Default, $TESTSNAP
219#
220function create_snapshot
221{
222	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
223	typeset snap=${2:-$TESTSNAP}
224
225	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
226	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
227
228	if snapexists $fs_vol@$snap; then
229		log_fail "$fs_vol@$snap already exists."
230	fi
231	datasetexists $fs_vol || \
232		log_fail "$fs_vol must exist."
233
234	log_must zfs snapshot $fs_vol@$snap
235}
236
237#
238# Create a clone from a snapshot, default clone name is $TESTCLONE.
239#
240# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
241# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
242#
243function create_clone   # snapshot clone
244{
245	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
246	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
247
248	[[ -z $snap ]] && \
249		log_fail "Snapshot name is undefined."
250	[[ -z $clone ]] && \
251		log_fail "Clone name is undefined."
252
253	log_must zfs clone $snap $clone
254}
255
256#
257# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
258# filesystem.
259#
260# $1 Existing filesystem or volume name. Default, $TESTFS
261# $2 Existing snapshot name. Default, $TESTSNAP
262# $3 bookmark name. Default, $TESTBKMARK
263#
264function create_bookmark
265{
266	typeset fs_vol=${1:-$TESTFS}
267	typeset snap=${2:-$TESTSNAP}
268	typeset bkmark=${3:-$TESTBKMARK}
269
270	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
271	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
272	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
273
274	if bkmarkexists $fs_vol#$bkmark; then
275		log_fail "$fs_vol#$bkmark already exists."
276	fi
277	datasetexists $fs_vol || \
278		log_fail "$fs_vol must exist."
279	snapexists $fs_vol@$snap || \
280		log_fail "$fs_vol@$snap must exist."
281
282	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
283}
284
285#
286# Create a temporary clone result of an interrupted resumable 'zfs receive'
287# $1 Destination filesystem name. Must not exist, will be created as the result
288#    of this function along with its %recv temporary clone
289# $2 Source filesystem name. Must not exist, will be created and destroyed
290#
291function create_recv_clone
292{
293	typeset recvfs="$1"
294	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
295	typeset snap="$sendfs@snap1"
296	typeset incr="$sendfs@snap2"
297	typeset mountpoint="$TESTDIR/create_recv_clone"
298	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
299
300	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
301
302	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
303	datasetexists $sendfs && log_fail "Send filesystem must not exist."
304
305	log_must zfs create -o mountpoint="$mountpoint" $sendfs
306	log_must zfs snapshot $snap
307	log_must eval "zfs send $snap | zfs recv -u $recvfs"
308	log_must mkfile 1m "$mountpoint/data"
309	log_must zfs snapshot $incr
310	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
311	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
312	log_must zfs destroy -r $sendfs
313	log_must rm -f "$sendfile"
314
315	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
316		log_fail "Error creating temporary $recvfs/%recv clone"
317	fi
318}
319
320function default_mirror_setup
321{
322	default_mirror_setup_noexit $1 $2 $3
323
324	log_pass
325}
326
327function default_mirror_2way_setup
328{
329	default_mirror_setup_noexit $1 $2
330
331	log_pass
332}
333
334#
335# Given a pair of disks, set up a storage pool and dataset for the mirror
336# @parameters: $1 the primary side of the mirror
337#   $2 the secondary side of the mirror
338# @uses: ZPOOL ZFS TESTPOOL TESTFS
339function default_mirror_setup_noexit
340{
341	readonly func="default_mirror_setup_noexit"
342	typeset primary=$1
343	typeset secondary=$2
344
345	[[ -z $primary ]] && \
346		log_fail "$func: No parameters passed"
347	[[ -z $secondary ]] && \
348		log_fail "$func: No secondary partition passed"
349	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
350	log_must zpool create -f $TESTPOOL mirror $@
351	log_must zfs create $TESTPOOL/$TESTFS
352	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
353}
354
355#
356# create a number of mirrors.
357# We create a number($1) of 2 way mirrors using the pairs of disks named
358# on the command line. These mirrors are *not* mounted
359# @parameters: $1 the number of mirrors to create
360#  $... the devices to use to create the mirrors on
361# @uses: ZPOOL ZFS TESTPOOL
362function setup_mirrors
363{
364	typeset -i nmirrors=$1
365
366	shift
367	while ((nmirrors > 0)); do
368		log_must test -n "$1" -a -n "$2"
369		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
370		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
371		shift 2
372		((nmirrors = nmirrors - 1))
373	done
374}
375
376#
377# create a number of raidz pools.
378# We create a number($1) of 2 raidz pools  using the pairs of disks named
379# on the command line. These pools are *not* mounted
380# @parameters: $1 the number of pools to create
381#  $... the devices to use to create the pools on
382# @uses: ZPOOL ZFS TESTPOOL
383function setup_raidzs
384{
385	typeset -i nraidzs=$1
386
387	shift
388	while ((nraidzs > 0)); do
389		log_must test -n "$1" -a -n "$2"
390		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
391		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
392		shift 2
393		((nraidzs = nraidzs - 1))
394	done
395}
396
397#
398# Destroy the configured testpool mirrors.
399# the mirrors are of the form ${TESTPOOL}{number}
400# @uses: ZPOOL ZFS TESTPOOL
401function destroy_mirrors
402{
403	default_cleanup_noexit
404
405	log_pass
406}
407
408#
409# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
410# $1 the list of disks
411#
412function default_raidz_setup
413{
414	typeset disklist="$*"
415	disks=(${disklist[*]})
416
417	if [[ ${#disks[*]} -lt 2 ]]; then
418		log_fail "A raid-z requires a minimum of two disks."
419	fi
420
421	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
422	log_must zpool create -f $TESTPOOL raidz $disklist
423	log_must zfs create $TESTPOOL/$TESTFS
424	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
425
426	log_pass
427}
428
429#
430# Common function used to cleanup storage pools and datasets.
431#
432# Invoked at the start of the test suite to ensure the system
433# is in a known state, and also at the end of each set of
434# sub-tests to ensure errors from one set of tests doesn't
435# impact the execution of the next set.
436
437function default_cleanup
438{
439	default_cleanup_noexit
440
441	log_pass
442}
443
444function default_cleanup_noexit
445{
446	typeset exclude=""
447	typeset pool=""
448	#
449	# Destroying the pool will also destroy any
450	# filesystems it contains.
451	#
452	if is_global_zone; then
453		zfs unmount -a > /dev/null 2>&1
454		exclude=`eval echo \"'(${KEEP})'\"`
455		ALL_POOLS=$(zpool list -H -o name \
456		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
457		# Here, we loop through the pools we're allowed to
458		# destroy, only destroying them if it's safe to do
459		# so.
460		while [ ! -z ${ALL_POOLS} ]
461		do
462			for pool in ${ALL_POOLS}
463			do
464				if safe_to_destroy_pool $pool ;
465				then
466					destroy_pool $pool
467				fi
468				ALL_POOLS=$(zpool list -H -o name \
469				    | grep -v "$NO_POOLS" \
470				    | egrep -v "$exclude")
471			done
472		done
473
474		zfs mount -a
475	else
476		typeset fs=""
477		for fs in $(zfs list -H -o name \
478		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
479			datasetexists $fs && \
480				log_must zfs destroy -Rf $fs
481		done
482
483		# Need cleanup here to avoid garbage dir left.
484		for fs in $(zfs list -H -o name); do
485			[[ $fs == /$ZONE_POOL ]] && continue
486			[[ -d $fs ]] && log_must rm -rf $fs/*
487		done
488
489		#
490		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
491		# the default value
492		#
493		for fs in $(zfs list -H -o name); do
494			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
495				log_must zfs set reservation=none $fs
496				log_must zfs set recordsize=128K $fs
497				log_must zfs set mountpoint=/$fs $fs
498				typeset enc=""
499				enc=$(get_prop encryption $fs)
500				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
501					[[ "$enc" == "off" ]]; then
502					log_must zfs set checksum=on $fs
503				fi
504				log_must zfs set compression=off $fs
505				log_must zfs set atime=on $fs
506				log_must zfs set devices=off $fs
507				log_must zfs set exec=on $fs
508				log_must zfs set setuid=on $fs
509				log_must zfs set readonly=off $fs
510				log_must zfs set snapdir=hidden $fs
511				log_must zfs set aclmode=groupmask $fs
512				log_must zfs set aclinherit=secure $fs
513			fi
514		done
515	fi
516
517	[[ -d $TESTDIR ]] && \
518		log_must rm -rf $TESTDIR
519}
520
521
522#
523# Common function used to cleanup storage pools, file systems
524# and containers.
525#
526function default_container_cleanup
527{
528	if ! is_global_zone; then
529		reexport_pool
530	fi
531
532	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
533	[[ $? -eq 0 ]] && \
534	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
535
536	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
537	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
538
539	datasetexists $TESTPOOL/$TESTCTR && \
540	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
541
542	[[ -e $TESTDIR1 ]] && \
543	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
544
545	default_cleanup
546}
547
548#
549# Common function used to cleanup snapshot of file system or volume. Default to
550# delete the file system's snapshot
551#
552# $1 snapshot name
553#
554function destroy_snapshot
555{
556	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
557
558	if ! snapexists $snap; then
559		log_fail "'$snap' does not existed."
560	fi
561
562	#
563	# For the sake of the value which come from 'get_prop' is not equal
564	# to the really mountpoint when the snapshot is unmounted. So, firstly
565	# check and make sure this snapshot's been mounted in current system.
566	#
567	typeset mtpt=""
568	if ismounted $snap; then
569		mtpt=$(get_prop mountpoint $snap)
570		(($? != 0)) && \
571			log_fail "get_prop mountpoint $snap failed."
572	fi
573
574	log_must zfs destroy $snap
575	[[ $mtpt != "" && -d $mtpt ]] && \
576		log_must rm -rf $mtpt
577}
578
579#
580# Common function used to cleanup clone.
581#
582# $1 clone name
583#
584function destroy_clone
585{
586	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
587
588	if ! datasetexists $clone; then
589		log_fail "'$clone' does not existed."
590	fi
591
592	# With the same reason in destroy_snapshot
593	typeset mtpt=""
594	if ismounted $clone; then
595		mtpt=$(get_prop mountpoint $clone)
596		(($? != 0)) && \
597			log_fail "get_prop mountpoint $clone failed."
598	fi
599
600	log_must zfs destroy $clone
601	[[ $mtpt != "" && -d $mtpt ]] && \
602		log_must rm -rf $mtpt
603}
604
605#
606# Common function used to cleanup bookmark of file system or volume.  Default
607# to delete the file system's bookmark.
608#
609# $1 bookmark name
610#
611function destroy_bookmark
612{
613	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
614
615	if ! bkmarkexists $bkmark; then
616		log_fail "'$bkmarkp' does not existed."
617	fi
618
619	log_must zfs destroy $bkmark
620}
621
622# Return 0 if a snapshot exists; $? otherwise
623#
624# $1 - snapshot name
625
626function snapexists
627{
628	zfs list -H -t snapshot "$1" > /dev/null 2>&1
629	return $?
630}
631
632#
633# Return 0 if a bookmark exists; $? otherwise
634#
635# $1 - bookmark name
636#
637function bkmarkexists
638{
639	zfs list -H -t bookmark "$1" > /dev/null 2>&1
640	return $?
641}
642
643#
644# Set a property to a certain value on a dataset.
645# Sets a property of the dataset to the value as passed in.
646# @param:
647#	$1 dataset who's property is being set
648#	$2 property to set
649#	$3 value to set property to
650# @return:
651#	0 if the property could be set.
652#	non-zero otherwise.
653# @use: ZFS
654#
655function dataset_setprop
656{
657	typeset fn=dataset_setprop
658
659	if (($# < 3)); then
660		log_note "$fn: Insufficient parameters (need 3, had $#)"
661		return 1
662	fi
663	typeset output=
664	output=$(zfs set $2=$3 $1 2>&1)
665	typeset rv=$?
666	if ((rv != 0)); then
667		log_note "Setting property on $1 failed."
668		log_note "property $2=$3"
669		log_note "Return Code: $rv"
670		log_note "Output: $output"
671		return $rv
672	fi
673	return 0
674}
675
676#
677# Assign suite defined dataset properties.
678# This function is used to apply the suite's defined default set of
679# properties to a dataset.
680# @parameters: $1 dataset to use
681# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
682# @returns:
683#   0 if the dataset has been altered.
684#   1 if no pool name was passed in.
685#   2 if the dataset could not be found.
686#   3 if the dataset could not have it's properties set.
687#
688function dataset_set_defaultproperties
689{
690	typeset dataset="$1"
691
692	[[ -z $dataset ]] && return 1
693
694	typeset confset=
695	typeset -i found=0
696	for confset in $(zfs list); do
697		if [[ $dataset = $confset ]]; then
698			found=1
699			break
700		fi
701	done
702	[[ $found -eq 0 ]] && return 2
703	if [[ -n $COMPRESSION_PROP ]]; then
704		dataset_setprop $dataset compression $COMPRESSION_PROP || \
705			return 3
706		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
707	fi
708	if [[ -n $CHECKSUM_PROP ]]; then
709		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
710			return 3
711		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
712	fi
713	return 0
714}
715
716#
717# Check a numeric assertion
718# @parameter: $@ the assertion to check
719# @output: big loud notice if assertion failed
720# @use: log_fail
721#
722function assert
723{
724	(($@)) || log_fail "$@"
725}
726
727#
728# Function to format partition size of a disk
729# Given a disk cxtxdx reduces all partitions
730# to 0 size
731#
732function zero_partitions #<whole_disk_name>
733{
734	typeset diskname=$1
735	typeset i
736
737	for i in 0 1 3 4 5 6 7
738	do
739		set_partition $i "" 0mb $diskname
740	done
741}
742
743#
744# Given a slice, size and disk, this function
745# formats the slice to the specified size.
746# Size should be specified with units as per
747# the `format` command requirements eg. 100mb 3gb
748#
749function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
750{
751	typeset -i slicenum=$1
752	typeset start=$2
753	typeset size=$3
754	typeset disk=$4
755	[[ -z $slicenum || -z $size || -z $disk ]] && \
756	    log_fail "The slice, size or disk name is unspecified."
757	typeset format_file=/var/tmp/format_in.$$
758
759	echo "partition" >$format_file
760	echo "$slicenum" >> $format_file
761	echo "" >> $format_file
762	echo "" >> $format_file
763	echo "$start" >> $format_file
764	echo "$size" >> $format_file
765	echo "label" >> $format_file
766	echo "" >> $format_file
767	echo "q" >> $format_file
768	echo "q" >> $format_file
769
770	format -e -s -d $disk -f $format_file
771	typeset ret_val=$?
772	rm -f $format_file
773	[[ $ret_val -ne 0 ]] && \
774	    log_fail "Unable to format $disk slice $slicenum to $size"
775	return 0
776}
777
778#
779# Get the end cyl of the given slice
780#
781function get_endslice #<disk> <slice>
782{
783	typeset disk=$1
784	typeset slice=$2
785	if [[ -z $disk || -z $slice ]] ; then
786		log_fail "The disk name or slice number is unspecified."
787	fi
788
789	disk=${disk#/dev/dsk/}
790	disk=${disk#/dev/rdsk/}
791	disk=${disk%s*}
792
793	typeset -i ratio=0
794	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
795		grep "sectors\/cylinder" | \
796		awk '{print $2}')
797
798	if ((ratio == 0)); then
799		return
800	fi
801
802	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
803		nawk -v token="$slice" '{if ($1==token) print $6}')
804
805	((endcyl = (endcyl + 1) / ratio))
806	echo $endcyl
807}
808
809
810#
811# Given a size,disk and total slice number,  this function formats the
812# disk slices from 0 to the total slice number with the same specified
813# size.
814#
815function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
816{
817	typeset -i i=0
818	typeset slice_size=$1
819	typeset disk_name=$2
820	typeset total_slices=$3
821	typeset cyl
822
823	zero_partitions $disk_name
824	while ((i < $total_slices)); do
825		if ((i == 2)); then
826			((i = i + 1))
827			continue
828		fi
829		set_partition $i "$cyl" $slice_size $disk_name
830		cyl=$(get_endslice $disk_name $i)
831		((i = i+1))
832	done
833}
834
835#
836# This function continues to write to a filenum number of files into dirnum
837# number of directories until either file_write returns an error or the
838# maximum number of files per directory have been written.
839#
840# Usage:
841# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
842#
843# Return value: 0 on success
844#		non 0 on error
845#
846# Where :
847#	destdir:    is the directory where everything is to be created under
848#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
849#	filenum:    the maximum number of files per subdirectory
850#	bytes:	    number of bytes to write
851#	num_writes: numer of types to write out bytes
852#	data:	    the data that will be writen
853#
854#	E.g.
855#	file_fs /testdir 20 25 1024 256 0
856#
857# Note: bytes * num_writes equals the size of the testfile
858#
859function fill_fs # destdir dirnum filenum bytes num_writes data
860{
861	typeset destdir=${1:-$TESTDIR}
862	typeset -i dirnum=${2:-50}
863	typeset -i filenum=${3:-50}
864	typeset -i bytes=${4:-8192}
865	typeset -i num_writes=${5:-10240}
866	typeset data=${6:-0}
867
868	typeset -i odirnum=1
869	typeset -i idirnum=0
870	typeset -i fn=0
871	typeset -i retval=0
872
873	mkdir -p $destdir/$idirnum
874	while (($odirnum > 0)); do
875		if ((dirnum >= 0 && idirnum >= dirnum)); then
876			odirnum=0
877			break
878		fi
879		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
880		    -b $bytes -c $num_writes -d $data
881		retval=$?
882		if (($retval != 0)); then
883			odirnum=0
884			break
885		fi
886		if (($fn >= $filenum)); then
887			fn=0
888			((idirnum = idirnum + 1))
889			mkdir -p $destdir/$idirnum
890		else
891			((fn = fn + 1))
892		fi
893	done
894	return $retval
895}
896
897#
898# Simple function to get the specified property. If unable to
899# get the property then exits.
900#
901# Note property is in 'parsable' format (-p)
902#
903function get_prop # property dataset
904{
905	typeset prop_val
906	typeset prop=$1
907	typeset dataset=$2
908
909	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
910	if [[ $? -ne 0 ]]; then
911		log_note "Unable to get $prop property for dataset " \
912		"$dataset"
913		return 1
914	fi
915
916	echo "$prop_val"
917	return 0
918}
919
920#
921# Simple function to get the specified property of pool. If unable to
922# get the property then exits.
923#
924function get_pool_prop # property pool
925{
926	typeset prop_val
927	typeset prop=$1
928	typeset pool=$2
929
930	if poolexists $pool ; then
931		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
932			awk '{print $3}')
933		if [[ $? -ne 0 ]]; then
934			log_note "Unable to get $prop property for pool " \
935			"$pool"
936			return 1
937		fi
938	else
939		log_note "Pool $pool not exists."
940		return 1
941	fi
942
943	echo $prop_val
944	return 0
945}
946
947# Return 0 if a pool exists; $? otherwise
948#
949# $1 - pool name
950
951function poolexists
952{
953	typeset pool=$1
954
955	if [[ -z $pool ]]; then
956		log_note "No pool name given."
957		return 1
958	fi
959
960	zpool get name "$pool" > /dev/null 2>&1
961	return $?
962}
963
964# Return 0 if all the specified datasets exist; $? otherwise
965#
966# $1-n  dataset name
967function datasetexists
968{
969	if (($# == 0)); then
970		log_note "No dataset name given."
971		return 1
972	fi
973
974	while (($# > 0)); do
975		zfs get name $1 > /dev/null 2>&1 || \
976			return $?
977		shift
978	done
979
980	return 0
981}
982
983# return 0 if none of the specified datasets exists, otherwise return 1.
984#
985# $1-n  dataset name
986function datasetnonexists
987{
988	if (($# == 0)); then
989		log_note "No dataset name given."
990		return 1
991	fi
992
993	while (($# > 0)); do
994		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
995		    && return 1
996		shift
997	done
998
999	return 0
1000}
1001
1002#
1003# Given a mountpoint, or a dataset name, determine if it is shared.
1004#
1005# Returns 0 if shared, 1 otherwise.
1006#
1007function is_shared
1008{
1009	typeset fs=$1
1010	typeset mtpt
1011
1012	if [[ $fs != "/"* ]] ; then
1013		if datasetnonexists "$fs" ; then
1014			return 1
1015		else
1016			mtpt=$(get_prop mountpoint "$fs")
1017			case $mtpt in
1018				none|legacy|-) return 1
1019					;;
1020				*)	fs=$mtpt
1021					;;
1022			esac
1023		fi
1024	fi
1025
1026	for mtpt in `share | awk '{print $2}'` ; do
1027		if [[ $mtpt == $fs ]] ; then
1028			return 0
1029		fi
1030	done
1031
1032	typeset stat=$(svcs -H -o STA nfs/server:default)
1033	if [[ $stat != "ON" ]]; then
1034		log_note "Current nfs/server status: $stat"
1035	fi
1036
1037	return 1
1038}
1039
1040#
1041# Given a mountpoint, determine if it is not shared.
1042#
1043# Returns 0 if not shared, 1 otherwise.
1044#
1045function not_shared
1046{
1047	typeset fs=$1
1048
1049	is_shared $fs
1050	if (($? == 0)); then
1051		return 1
1052	fi
1053
1054	return 0
1055}
1056
1057#
1058# Helper function to unshare a mountpoint.
1059#
1060function unshare_fs #fs
1061{
1062	typeset fs=$1
1063
1064	is_shared $fs
1065	if (($? == 0)); then
1066		log_must zfs unshare $fs
1067	fi
1068
1069	return 0
1070}
1071
1072#
1073# Check NFS server status and trigger it online.
1074#
1075function setup_nfs_server
1076{
1077	# Cannot share directory in non-global zone.
1078	#
1079	if ! is_global_zone; then
1080		log_note "Cannot trigger NFS server by sharing in LZ."
1081		return
1082	fi
1083
1084	typeset nfs_fmri="svc:/network/nfs/server:default"
1085	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1086		#
1087		# Only really sharing operation can enable NFS server
1088		# to online permanently.
1089		#
1090		typeset dummy=/tmp/dummy
1091
1092		if [[ -d $dummy ]]; then
1093			log_must rm -rf $dummy
1094		fi
1095
1096		log_must mkdir $dummy
1097		log_must share $dummy
1098
1099		#
1100		# Waiting for fmri's status to be the final status.
1101		# Otherwise, in transition, an asterisk (*) is appended for
1102		# instances, unshare will reverse status to 'DIS' again.
1103		#
1104		# Waiting for 1's at least.
1105		#
1106		log_must sleep 1
1107		timeout=10
1108		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1109		do
1110			log_must sleep 1
1111
1112			((timeout -= 1))
1113		done
1114
1115		log_must unshare $dummy
1116		log_must rm -rf $dummy
1117	fi
1118
1119	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1120}
1121
1122#
1123# To verify whether calling process is in global zone
1124#
1125# Return 0 if in global zone, 1 in non-global zone
1126#
1127function is_global_zone
1128{
1129	typeset cur_zone=$(zonename 2>/dev/null)
1130	if [[ $cur_zone != "global" ]]; then
1131		return 1
1132	fi
1133	return 0
1134}
1135
1136#
1137# Verify whether test is permitted to run from
1138# global zone, local zone, or both
1139#
1140# $1 zone limit, could be "global", "local", or "both"(no limit)
1141#
1142# Return 0 if permitted, otherwise exit with log_unsupported
1143#
1144function verify_runnable # zone limit
1145{
1146	typeset limit=$1
1147
1148	[[ -z $limit ]] && return 0
1149
1150	if is_global_zone ; then
1151		case $limit in
1152			global|both)
1153				;;
1154			local)	log_unsupported "Test is unable to run from "\
1155					"global zone."
1156				;;
1157			*)	log_note "Warning: unknown limit $limit - " \
1158					"use both."
1159				;;
1160		esac
1161	else
1162		case $limit in
1163			local|both)
1164				;;
1165			global)	log_unsupported "Test is unable to run from "\
1166					"local zone."
1167				;;
1168			*)	log_note "Warning: unknown limit $limit - " \
1169					"use both."
1170				;;
1171		esac
1172
1173		reexport_pool
1174	fi
1175
1176	return 0
1177}
1178
1179# Return 0 if create successfully or the pool exists; $? otherwise
1180# Note: In local zones, this function should return 0 silently.
1181#
1182# $1 - pool name
1183# $2-n - [keyword] devs_list
1184
1185function create_pool #pool devs_list
1186{
1187	typeset pool=${1%%/*}
1188
1189	shift
1190
1191	if [[ -z $pool ]]; then
1192		log_note "Missing pool name."
1193		return 1
1194	fi
1195
1196	if poolexists $pool ; then
1197		destroy_pool $pool
1198	fi
1199
1200	if is_global_zone ; then
1201		[[ -d /$pool ]] && rm -rf /$pool
1202		log_must zpool create -f $pool $@
1203	fi
1204
1205	return 0
1206}
1207
1208# Return 0 if destroy successfully or the pool exists; $? otherwise
1209# Note: In local zones, this function should return 0 silently.
1210#
1211# $1 - pool name
1212# Destroy pool with the given parameters.
1213
1214function destroy_pool #pool
1215{
1216	typeset pool=${1%%/*}
1217	typeset mtpt
1218
1219	if [[ -z $pool ]]; then
1220		log_note "No pool name given."
1221		return 1
1222	fi
1223
1224	if is_global_zone ; then
1225		if poolexists "$pool" ; then
1226			mtpt=$(get_prop mountpoint "$pool")
1227
1228			# At times, syseventd activity can cause attempts to
1229			# destroy a pool to fail with EBUSY. We retry a few
1230			# times allowing failures before requiring the destroy
1231			# to succeed.
1232			typeset -i wait_time=10 ret=1 count=0
1233			must=""
1234			while [[ $ret -ne 0 ]]; do
1235				$must zpool destroy -f $pool
1236				ret=$?
1237				[[ $ret -eq 0 ]] && break
1238				log_note "zpool destroy failed with $ret"
1239				[[ count++ -ge 7 ]] && must=log_must
1240				sleep $wait_time
1241			done
1242
1243			[[ -d $mtpt ]] && \
1244				log_must rm -rf $mtpt
1245		else
1246			log_note "Pool does not exist. ($pool)"
1247			return 1
1248		fi
1249	fi
1250
1251	return 0
1252}
1253
1254# Return 0 if created successfully; $? otherwise
1255#
1256# $1 - dataset name
1257# $2-n - dataset options
1258
1259function create_dataset #dataset dataset_options
1260{
1261	typeset dataset=$1
1262
1263	shift
1264
1265	if [[ -z $dataset ]]; then
1266		log_note "Missing dataset name."
1267		return 1
1268	fi
1269
1270	if datasetexists $dataset ; then
1271		destroy_dataset $dataset
1272	fi
1273
1274	log_must zfs create $@ $dataset
1275
1276	return 0
1277}
1278
1279# Return 0 if destroy successfully or the dataset exists; $? otherwise
1280# Note: In local zones, this function should return 0 silently.
1281#
1282# $1 - dataset name
1283
1284function destroy_dataset #dataset
1285{
1286	typeset dataset=$1
1287	typeset mtpt
1288
1289	if [[ -z $dataset ]]; then
1290		log_note "No dataset name given."
1291		return 1
1292	fi
1293
1294	if datasetexists "$dataset" ; then
1295		mtpt=$(get_prop mountpoint "$dataset")
1296		log_must zfs destroy -r $dataset
1297		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1298	else
1299		log_note "Dataset does not exist. ($dataset)"
1300		return 1
1301	fi
1302
1303	return 0
1304}
1305
1306#
1307# Firstly, create a pool with 5 datasets. Then, create a single zone and
1308# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1309# and a zvol device to the zone.
1310#
1311# $1 zone name
1312# $2 zone root directory prefix
1313# $3 zone ip
1314#
1315function zfs_zones_setup #zone_name zone_root zone_ip
1316{
1317	typeset zone_name=${1:-$(hostname)-z}
1318	typeset zone_root=${2:-"/zone_root"}
1319	typeset zone_ip=${3:-"10.1.1.10"}
1320	typeset prefix_ctr=$ZONE_CTR
1321	typeset pool_name=$ZONE_POOL
1322	typeset -i cntctr=5
1323	typeset -i i=0
1324
1325	# Create pool and 5 container within it
1326	#
1327	[[ -d /$pool_name ]] && rm -rf /$pool_name
1328	log_must zpool create -f $pool_name $DISKS
1329	while ((i < cntctr)); do
1330		log_must zfs create $pool_name/$prefix_ctr$i
1331		((i += 1))
1332	done
1333
1334	# create a zvol
1335	log_must zfs create -V 1g $pool_name/zone_zvol
1336
1337	#
1338	# If current system support slog, add slog device for pool
1339	#
1340	if verify_slog_support ; then
1341		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1342		log_must mkfile $MINVDEVSIZE $sdevs
1343		log_must zpool add $pool_name log mirror $sdevs
1344	fi
1345
1346	# this isn't supported just yet.
1347	# Create a filesystem. In order to add this to
1348	# the zone, it must have it's mountpoint set to 'legacy'
1349	# log_must zfs create $pool_name/zfs_filesystem
1350	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1351
1352	[[ -d $zone_root ]] && \
1353		log_must rm -rf $zone_root/$zone_name
1354	[[ ! -d $zone_root ]] && \
1355		log_must mkdir -p -m 0700 $zone_root/$zone_name
1356
1357	# Create zone configure file and configure the zone
1358	#
1359	typeset zone_conf=/tmp/zone_conf.$$
1360	echo "create" > $zone_conf
1361	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1362	echo "set autoboot=true" >> $zone_conf
1363	i=0
1364	while ((i < cntctr)); do
1365		echo "add dataset" >> $zone_conf
1366		echo "set name=$pool_name/$prefix_ctr$i" >> \
1367			$zone_conf
1368		echo "end" >> $zone_conf
1369		((i += 1))
1370	done
1371
1372	# add our zvol to the zone
1373	echo "add device" >> $zone_conf
1374	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1375	echo "end" >> $zone_conf
1376
1377	# add a corresponding zvol rdsk to the zone
1378	echo "add device" >> $zone_conf
1379	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1380	echo "end" >> $zone_conf
1381
1382	# once it's supported, we'll add our filesystem to the zone
1383	# echo "add fs" >> $zone_conf
1384	# echo "set type=zfs" >> $zone_conf
1385	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1386	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1387	# echo "end" >> $zone_conf
1388
1389	echo "verify" >> $zone_conf
1390	echo "commit" >> $zone_conf
1391	log_must zonecfg -z $zone_name -f $zone_conf
1392	log_must rm -f $zone_conf
1393
1394	# Install the zone
1395	zoneadm -z $zone_name install
1396	if (($? == 0)); then
1397		log_note "SUCCESS: zoneadm -z $zone_name install"
1398	else
1399		log_fail "FAIL: zoneadm -z $zone_name install"
1400	fi
1401
1402	# Install sysidcfg file
1403	#
1404	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1405	echo "system_locale=C" > $sysidcfg
1406	echo  "terminal=dtterm" >> $sysidcfg
1407	echo  "network_interface=primary {" >> $sysidcfg
1408	echo  "hostname=$zone_name" >> $sysidcfg
1409	echo  "}" >> $sysidcfg
1410	echo  "name_service=NONE" >> $sysidcfg
1411	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1412	echo  "security_policy=NONE" >> $sysidcfg
1413	echo  "timezone=US/Eastern" >> $sysidcfg
1414
1415	# Boot this zone
1416	log_must zoneadm -z $zone_name boot
1417}
1418
1419#
1420# Reexport TESTPOOL & TESTPOOL(1-4)
1421#
1422function reexport_pool
1423{
1424	typeset -i cntctr=5
1425	typeset -i i=0
1426
1427	while ((i < cntctr)); do
1428		if ((i == 0)); then
1429			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1430			if ! ismounted $TESTPOOL; then
1431				log_must zfs mount $TESTPOOL
1432			fi
1433		else
1434			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1435			if eval ! ismounted \$TESTPOOL$i; then
1436				log_must eval zfs mount \$TESTPOOL$i
1437			fi
1438		fi
1439		((i += 1))
1440	done
1441}
1442
1443#
1444# Verify a given disk is online or offline
1445#
1446# Return 0 is pool/disk matches expected state, 1 otherwise
1447#
1448function check_state # pool disk state{online,offline}
1449{
1450	typeset pool=$1
1451	typeset disk=${2#/dev/dsk/}
1452	typeset state=$3
1453
1454	zpool status -v $pool | grep "$disk"  \
1455	    | grep -i "$state" > /dev/null 2>&1
1456
1457	return $?
1458}
1459
1460#
1461# Get the mountpoint of snapshot
1462# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1463# as its mountpoint
1464#
1465function snapshot_mountpoint
1466{
1467	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1468
1469	if [[ $dataset != *@* ]]; then
1470		log_fail "Error name of snapshot '$dataset'."
1471	fi
1472
1473	typeset fs=${dataset%@*}
1474	typeset snap=${dataset#*@}
1475
1476	if [[ -z $fs || -z $snap ]]; then
1477		log_fail "Error name of snapshot '$dataset'."
1478	fi
1479
1480	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1481}
1482
1483#
1484# Given a device and 'ashift' value verify it's correctly set on every label
1485#
1486function verify_ashift # device ashift
1487{
1488	typeset device="$1"
1489	typeset ashift="$2"
1490
1491	zdb -e -lll $device | nawk -v ashift=$ashift '/ashift: / {
1492	    if (ashift != $2)
1493	        exit 1;
1494	    else
1495	        count++;
1496	    } END {
1497	    if (count != 4)
1498	        exit 1;
1499	    else
1500	        exit 0;
1501	    }'
1502
1503	return $?
1504}
1505
1506#
1507# Given a pool and file system, this function will verify the file system
1508# using the zdb internal tool. Note that the pool is exported and imported
1509# to ensure it has consistent state.
1510#
1511function verify_filesys # pool filesystem dir
1512{
1513	typeset pool="$1"
1514	typeset filesys="$2"
1515	typeset zdbout="/tmp/zdbout.$$"
1516
1517	shift
1518	shift
1519	typeset dirs=$@
1520	typeset search_path=""
1521
1522	log_note "Calling zdb to verify filesystem '$filesys'"
1523	zfs unmount -a > /dev/null 2>&1
1524	log_must zpool export $pool
1525
1526	if [[ -n $dirs ]] ; then
1527		for dir in $dirs ; do
1528			search_path="$search_path -d $dir"
1529		done
1530	fi
1531
1532	log_must zpool import $search_path $pool
1533
1534	zdb -cudi $filesys > $zdbout 2>&1
1535	if [[ $? != 0 ]]; then
1536		log_note "Output: zdb -cudi $filesys"
1537		cat $zdbout
1538		log_fail "zdb detected errors with: '$filesys'"
1539	fi
1540
1541	log_must zfs mount -a
1542	log_must rm -rf $zdbout
1543}
1544
1545#
1546# Given a pool issue a scrub and verify that no checksum errors are reported.
1547#
1548function verify_pool
1549{
1550	typeset pool=${1:-$TESTPOOL}
1551
1552	log_must zpool scrub $pool
1553	log_must wait_scrubbed $pool
1554
1555	cksum=$(zpool status $pool | \
1556	    awk '{if ($5 == "CKSUM"){L=1; next} if (L) {print $NF;L=0}}')
1557	if [[ $cksum != 0 ]]; then
1558		log_must zpool status -v
1559	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1560	fi
1561}
1562
1563#
1564# Given a pool, and this function list all disks in the pool
1565#
1566function get_disklist # pool
1567{
1568	typeset disklist=""
1569
1570	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1571	    grep -v "\-\-\-\-\-" | \
1572	    egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
1573
1574	echo $disklist
1575}
1576
1577# /**
1578#  This function kills a given list of processes after a time period. We use
1579#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1580#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1581#  would be listed as FAIL, which we don't want : we're happy with stress tests
1582#  running for a certain amount of time, then finishing.
1583#
1584# @param $1 the time in seconds after which we should terminate these processes
1585# @param $2..$n the processes we wish to terminate.
1586# */
1587function stress_timeout
1588{
1589	typeset -i TIMEOUT=$1
1590	shift
1591	typeset cpids="$@"
1592
1593	log_note "Waiting for child processes($cpids). " \
1594		"It could last dozens of minutes, please be patient ..."
1595	log_must sleep $TIMEOUT
1596
1597	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1598	typeset pid
1599	for pid in $cpids; do
1600		ps -p $pid > /dev/null 2>&1
1601		if (($? == 0)); then
1602			log_must kill -USR1 $pid
1603		fi
1604	done
1605}
1606
1607#
1608# Verify a given hotspare disk is inuse or avail
1609#
1610# Return 0 is pool/disk matches expected state, 1 otherwise
1611#
1612function check_hotspare_state # pool disk state{inuse,avail}
1613{
1614	typeset pool=$1
1615	typeset disk=${2#/dev/dsk/}
1616	typeset state=$3
1617
1618	cur_state=$(get_device_state $pool $disk "spares")
1619
1620	if [[ $state != ${cur_state} ]]; then
1621		return 1
1622	fi
1623	return 0
1624}
1625
1626#
1627# Wait until a hotspare transitions to a given state or times out.
1628#
1629# Return 0 when  pool/disk matches expected state, 1 on timeout.
1630#
1631function wait_hotspare_state # pool disk state timeout
1632{
1633	typeset pool=$1
1634	typeset disk=${2#$/DEV_DSKDIR/}
1635	typeset state=$3
1636	typeset timeout=${4:-60}
1637	typeset -i i=0
1638
1639	while [[ $i -lt $timeout ]]; do
1640		if check_hotspare_state $pool $disk $state; then
1641			return 0
1642		fi
1643
1644		i=$((i+1))
1645		sleep 1
1646	done
1647
1648	return 1
1649}
1650
1651#
1652# Verify a given slog disk is inuse or avail
1653#
1654# Return 0 is pool/disk matches expected state, 1 otherwise
1655#
1656function check_slog_state # pool disk state{online,offline,unavail}
1657{
1658	typeset pool=$1
1659	typeset disk=${2#/dev/dsk/}
1660	typeset state=$3
1661
1662	cur_state=$(get_device_state $pool $disk "logs")
1663
1664	if [[ $state != ${cur_state} ]]; then
1665		return 1
1666	fi
1667	return 0
1668}
1669
1670#
1671# Verify a given vdev disk is inuse or avail
1672#
1673# Return 0 is pool/disk matches expected state, 1 otherwise
1674#
1675function check_vdev_state # pool disk state{online,offline,unavail}
1676{
1677	typeset pool=$1
1678	typeset disk=${2#/dev/dsk/}
1679	typeset state=$3
1680
1681	cur_state=$(get_device_state $pool $disk)
1682
1683	if [[ $state != ${cur_state} ]]; then
1684		return 1
1685	fi
1686	return 0
1687}
1688
1689#
1690# Wait until a vdev transitions to a given state or times out.
1691#
1692# Return 0 when  pool/disk matches expected state, 1 on timeout.
1693#
1694function wait_vdev_state # pool disk state timeout
1695{
1696	typeset pool=$1
1697	typeset disk=${2#$/DEV_DSKDIR/}
1698	typeset state=$3
1699	typeset timeout=${4:-60}
1700	typeset -i i=0
1701
1702	while [[ $i -lt $timeout ]]; do
1703		if check_vdev_state $pool $disk $state; then
1704			return 0
1705		fi
1706
1707		i=$((i+1))
1708		sleep 1
1709	done
1710
1711	return 1
1712}
1713
1714#
1715# Check the output of 'zpool status -v <pool>',
1716# and to see if the content of <token> contain the <keyword> specified.
1717#
1718# Return 0 is contain, 1 otherwise
1719#
1720function check_pool_status # pool token keyword <verbose>
1721{
1722	typeset pool=$1
1723	typeset token=$2
1724	typeset keyword=$3
1725	typeset verbose=${4:-false}
1726
1727	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1728		($1==token) {print $0}')
1729	if [[ $verbose == true ]]; then
1730		log_note $scan
1731	fi
1732	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1733
1734	return $?
1735}
1736
1737#
1738# These 6 following functions are instance of check_pool_status()
1739#	is_pool_resilvering - to check if the pool is resilver in progress
1740#	is_pool_resilvered - to check if the pool is resilver completed
1741#	is_pool_scrubbing - to check if the pool is scrub in progress
1742#	is_pool_scrubbed - to check if the pool is scrub completed
1743#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1744#	is_pool_scrub_paused - to check if the pool has scrub paused
1745#	is_pool_removing - to check if the pool is removing a vdev
1746#	is_pool_removed - to check if the pool is remove completed
1747#
1748function is_pool_resilvering #pool <verbose>
1749{
1750	check_pool_status "$1" "scan" "resilver in progress since " $2
1751	return $?
1752}
1753
1754function is_pool_resilvered #pool <verbose>
1755{
1756	check_pool_status "$1" "scan" "resilvered " $2
1757	return $?
1758}
1759
1760function is_pool_scrubbing #pool <verbose>
1761{
1762	check_pool_status "$1" "scan" "scrub in progress since " $2
1763	return $?
1764}
1765
1766function is_pool_scrubbed #pool <verbose>
1767{
1768	check_pool_status "$1" "scan" "scrub repaired" $2
1769	return $?
1770}
1771
1772function is_pool_scrub_stopped #pool <verbose>
1773{
1774	check_pool_status "$1" "scan" "scrub canceled" $2
1775	return $?
1776}
1777
1778function is_pool_scrub_paused #pool <verbose>
1779{
1780	check_pool_status "$1" "scan" "scrub paused since " $2
1781	return $?
1782}
1783
1784function is_pool_removing #pool
1785{
1786	check_pool_status "$1" "remove" "in progress since "
1787	return $?
1788}
1789
1790function is_pool_removed #pool
1791{
1792	check_pool_status "$1" "remove" "completed on"
1793	return $?
1794}
1795
1796function wait_for_degraded
1797{
1798	typeset pool=$1
1799	typeset timeout=${2:-30}
1800	typeset t0=$SECONDS
1801
1802	while :; do
1803		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1804		log_note "$pool is not yet degraded."
1805		sleep 1
1806		if ((SECONDS - t0 > $timeout)); then
1807			log_note "$pool not degraded after $timeout seconds."
1808			return 1
1809		fi
1810	done
1811
1812	return 0
1813}
1814
1815#
1816# Wait for a pool to be scrubbed
1817#
1818# $1 pool name
1819# $2 number of seconds to wait (optional)
1820#
1821# Returns true when pool has been scrubbed, or false if there's a timeout or if
1822# no scrub was done.
1823#
1824function wait_scrubbed
1825{
1826	typeset pool=${1:-$TESTPOOL}
1827	while true ; do
1828		is_pool_scrubbed $pool && break
1829		log_must sleep 1
1830	done
1831}
1832
1833#
1834# Use create_pool()/destroy_pool() to clean up the infomation in
1835# in the given disk to avoid slice overlapping.
1836#
1837function cleanup_devices #vdevs
1838{
1839	typeset pool="foopool$$"
1840
1841	if poolexists $pool ; then
1842		destroy_pool $pool
1843	fi
1844
1845	create_pool $pool $@
1846	destroy_pool $pool
1847
1848	return 0
1849}
1850
1851#/**
1852# A function to find and locate free disks on a system or from given
1853# disks as the parameter. It works by locating disks that are in use
1854# as swap devices and dump devices, and also disks listed in /etc/vfstab
1855#
1856# $@ given disks to find which are free, default is all disks in
1857# the test system
1858#
1859# @return a string containing the list of available disks
1860#*/
1861function find_disks
1862{
1863	sfi=/tmp/swaplist.$$
1864	dmpi=/tmp/dumpdev.$$
1865	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1866
1867	swap -l > $sfi
1868	dumpadm > $dmpi 2>/dev/null
1869
1870# write an awk script that can process the output of format
1871# to produce a list of disks we know about. Note that we have
1872# to escape "$2" so that the shell doesn't interpret it while
1873# we're creating the awk script.
1874# -------------------
1875	cat > /tmp/find_disks.awk <<EOF
1876#!/bin/nawk -f
1877	BEGIN { FS="."; }
1878
1879	/^Specify disk/{
1880		searchdisks=0;
1881	}
1882
1883	{
1884		if (searchdisks && \$2 !~ "^$"){
1885			split(\$2,arr," ");
1886			print arr[1];
1887		}
1888	}
1889
1890	/^AVAILABLE DISK SELECTIONS:/{
1891		searchdisks=1;
1892	}
1893EOF
1894#---------------------
1895
1896	chmod 755 /tmp/find_disks.awk
1897	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1898	rm /tmp/find_disks.awk
1899
1900	unused=""
1901	for disk in $disks; do
1902	# Check for mounted
1903		grep "${disk}[sp]" /etc/mnttab >/dev/null
1904		(($? == 0)) && continue
1905	# Check for swap
1906		grep "${disk}[sp]" $sfi >/dev/null
1907		(($? == 0)) && continue
1908	# check for dump device
1909		grep "${disk}[sp]" $dmpi >/dev/null
1910		(($? == 0)) && continue
1911	# check to see if this disk hasn't been explicitly excluded
1912	# by a user-set environment variable
1913		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1914		(($? == 0)) && continue
1915		unused_candidates="$unused_candidates $disk"
1916	done
1917	rm $sfi
1918	rm $dmpi
1919
1920# now just check to see if those disks do actually exist
1921# by looking for a device pointing to the first slice in
1922# each case. limit the number to max_finddisksnum
1923	count=0
1924	for disk in $unused_candidates; do
1925		if [ -b /dev/dsk/${disk}s0 ]; then
1926		if [ $count -lt $max_finddisksnum ]; then
1927			unused="$unused $disk"
1928			# do not impose limit if $@ is provided
1929			[[ -z $@ ]] && ((count = count + 1))
1930		fi
1931		fi
1932	done
1933
1934# finally, return our disk list
1935	echo $unused
1936}
1937
1938#
1939# Add specified user to specified group
1940#
1941# $1 group name
1942# $2 user name
1943# $3 base of the homedir (optional)
1944#
1945function add_user #<group_name> <user_name> <basedir>
1946{
1947	typeset gname=$1
1948	typeset uname=$2
1949	typeset basedir=${3:-"/var/tmp"}
1950
1951	if ((${#gname} == 0 || ${#uname} == 0)); then
1952		log_fail "group name or user name are not defined."
1953	fi
1954
1955	log_must useradd -g $gname -d $basedir/$uname -m $uname
1956	log_must passwd -N $uname
1957
1958	return 0
1959}
1960
1961#
1962# Delete the specified user.
1963#
1964# $1 login name
1965# $2 base of the homedir (optional)
1966#
1967function del_user #<logname> <basedir>
1968{
1969	typeset user=$1
1970	typeset basedir=${2:-"/var/tmp"}
1971
1972	if ((${#user} == 0)); then
1973		log_fail "login name is necessary."
1974	fi
1975
1976	if id $user > /dev/null 2>&1; then
1977		log_must userdel $user
1978	fi
1979
1980	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1981
1982	return 0
1983}
1984
1985#
1986# Select valid gid and create specified group.
1987#
1988# $1 group name
1989#
1990function add_group #<group_name>
1991{
1992	typeset group=$1
1993
1994	if ((${#group} == 0)); then
1995		log_fail "group name is necessary."
1996	fi
1997
1998	# Assign 100 as the base gid
1999	typeset -i gid=100
2000	while true; do
2001		groupadd -g $gid $group > /dev/null 2>&1
2002		typeset -i ret=$?
2003		case $ret in
2004			0) return 0 ;;
2005			# The gid is not  unique
2006			4) ((gid += 1)) ;;
2007			*) return 1 ;;
2008		esac
2009	done
2010}
2011
2012#
2013# Delete the specified group.
2014#
2015# $1 group name
2016#
2017function del_group #<group_name>
2018{
2019	typeset grp=$1
2020	if ((${#grp} == 0)); then
2021		log_fail "group name is necessary."
2022	fi
2023
2024	groupmod -n $grp $grp > /dev/null 2>&1
2025	typeset -i ret=$?
2026	case $ret in
2027		# Group does not exist.
2028		6) return 0 ;;
2029		# Name already exists as a group name
2030		9) log_must groupdel $grp ;;
2031		*) return 1 ;;
2032	esac
2033
2034	return 0
2035}
2036
2037#
2038# This function will return true if it's safe to destroy the pool passed
2039# as argument 1. It checks for pools based on zvols and files, and also
2040# files contained in a pool that may have a different mountpoint.
2041#
2042function safe_to_destroy_pool { # $1 the pool name
2043
2044	typeset pool=""
2045	typeset DONT_DESTROY=""
2046
2047	# We check that by deleting the $1 pool, we're not
2048	# going to pull the rug out from other pools. Do this
2049	# by looking at all other pools, ensuring that they
2050	# aren't built from files or zvols contained in this pool.
2051
2052	for pool in $(zpool list -H -o name)
2053	do
2054		ALTMOUNTPOOL=""
2055
2056		# this is a list of the top-level directories in each of the
2057		# files that make up the path to the files the pool is based on
2058		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2059			awk '{print $1}')
2060
2061		# this is a list of the zvols that make up the pool
2062		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
2063		    | awk '{print $1}')
2064
2065		# also want to determine if it's a file-based pool using an
2066		# alternate mountpoint...
2067		POOL_FILE_DIRS=$(zpool status -v $pool | \
2068					grep / | awk '{print $1}' | \
2069					awk -F/ '{print $2}' | grep -v "dev")
2070
2071		for pooldir in $POOL_FILE_DIRS
2072		do
2073			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2074					grep "${pooldir}$" | awk '{print $1}')
2075
2076			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2077		done
2078
2079
2080		if [ ! -z "$ZVOLPOOL" ]
2081		then
2082			DONT_DESTROY="true"
2083			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2084		fi
2085
2086		if [ ! -z "$FILEPOOL" ]
2087		then
2088			DONT_DESTROY="true"
2089			log_note "Pool $pool is built from $FILEPOOL on $1"
2090		fi
2091
2092		if [ ! -z "$ALTMOUNTPOOL" ]
2093		then
2094			DONT_DESTROY="true"
2095			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2096		fi
2097	done
2098
2099	if [ -z "${DONT_DESTROY}" ]
2100	then
2101		return 0
2102	else
2103		log_note "Warning: it is not safe to destroy $1!"
2104		return 1
2105	fi
2106}
2107
2108#
2109# Get the available ZFS compression options
2110# $1 option type zfs_set|zfs_compress
2111#
2112function get_compress_opts
2113{
2114	typeset COMPRESS_OPTS
2115	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2116			gzip-6 gzip-7 gzip-8 gzip-9"
2117
2118	if [[ $1 == "zfs_compress" ]] ; then
2119		COMPRESS_OPTS="on lzjb"
2120	elif [[ $1 == "zfs_set" ]] ; then
2121		COMPRESS_OPTS="on off lzjb"
2122	fi
2123	typeset valid_opts="$COMPRESS_OPTS"
2124	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2125	if [[ $? -eq 0 ]]; then
2126		valid_opts="$valid_opts $GZIP_OPTS"
2127	fi
2128	echo "$valid_opts"
2129}
2130
2131#
2132# Verify zfs operation with -p option work as expected
2133# $1 operation, value could be create, clone or rename
2134# $2 dataset type, value could be fs or vol
2135# $3 dataset name
2136# $4 new dataset name
2137#
2138function verify_opt_p_ops
2139{
2140	typeset ops=$1
2141	typeset datatype=$2
2142	typeset dataset=$3
2143	typeset newdataset=$4
2144
2145	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2146		log_fail "$datatype is not supported."
2147	fi
2148
2149	# check parameters accordingly
2150	case $ops in
2151		create)
2152			newdataset=$dataset
2153			dataset=""
2154			if [[ $datatype == "vol" ]]; then
2155				ops="create -V $VOLSIZE"
2156			fi
2157			;;
2158		clone)
2159			if [[ -z $newdataset ]]; then
2160				log_fail "newdataset should not be empty" \
2161					"when ops is $ops."
2162			fi
2163			log_must datasetexists $dataset
2164			log_must snapexists $dataset
2165			;;
2166		rename)
2167			if [[ -z $newdataset ]]; then
2168				log_fail "newdataset should not be empty" \
2169					"when ops is $ops."
2170			fi
2171			log_must datasetexists $dataset
2172			log_mustnot snapexists $dataset
2173			;;
2174		*)
2175			log_fail "$ops is not supported."
2176			;;
2177	esac
2178
2179	# make sure the upper level filesystem does not exist
2180	if datasetexists ${newdataset%/*} ; then
2181		log_must zfs destroy -rRf ${newdataset%/*}
2182	fi
2183
2184	# without -p option, operation will fail
2185	log_mustnot zfs $ops $dataset $newdataset
2186	log_mustnot datasetexists $newdataset ${newdataset%/*}
2187
2188	# with -p option, operation should succeed
2189	log_must zfs $ops -p $dataset $newdataset
2190	if ! datasetexists $newdataset ; then
2191		log_fail "-p option does not work for $ops"
2192	fi
2193
2194	# when $ops is create or clone, redo the operation still return zero
2195	if [[ $ops != "rename" ]]; then
2196		log_must zfs $ops -p $dataset $newdataset
2197	fi
2198
2199	return 0
2200}
2201
2202#
2203# Get configuration of pool
2204# $1 pool name
2205# $2 config name
2206#
2207function get_config
2208{
2209	typeset pool=$1
2210	typeset config=$2
2211	typeset alt_root
2212
2213	if ! poolexists "$pool" ; then
2214		return 1
2215	fi
2216	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2217	if [[ $alt_root == "-" ]]; then
2218		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2219		    '{print $2}')
2220	else
2221		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2222		    '{print $2}')
2223	fi
2224	if [[ -n $value ]] ; then
2225		value=${value#'}
2226		value=${value%'}
2227	fi
2228	echo $value
2229
2230	return 0
2231}
2232
2233#
2234# Privated function. Random select one of items from arguments.
2235#
2236# $1 count
2237# $2-n string
2238#
2239function _random_get
2240{
2241	typeset cnt=$1
2242	shift
2243
2244	typeset str="$@"
2245	typeset -i ind
2246	((ind = RANDOM % cnt + 1))
2247
2248	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2249	echo $ret
2250}
2251
2252#
2253# Random select one of item from arguments which include NONE string
2254#
2255function random_get_with_non
2256{
2257	typeset -i cnt=$#
2258	((cnt =+ 1))
2259
2260	_random_get "$cnt" "$@"
2261}
2262
2263#
2264# Random select one of item from arguments which doesn't include NONE string
2265#
2266function random_get
2267{
2268	_random_get "$#" "$@"
2269}
2270
2271#
2272# Detect if the current system support slog
2273#
2274function verify_slog_support
2275{
2276	typeset dir=/tmp/disk.$$
2277	typeset pool=foo.$$
2278	typeset vdev=$dir/a
2279	typeset sdev=$dir/b
2280
2281	mkdir -p $dir
2282	mkfile $MINVDEVSIZE $vdev $sdev
2283
2284	typeset -i ret=0
2285	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2286		ret=1
2287	fi
2288	rm -r $dir
2289
2290	return $ret
2291}
2292
2293#
2294# The function will generate a dataset name with specific length
2295# $1, the length of the name
2296# $2, the base string to construct the name
2297#
2298function gen_dataset_name
2299{
2300	typeset -i len=$1
2301	typeset basestr="$2"
2302	typeset -i baselen=${#basestr}
2303	typeset -i iter=0
2304	typeset l_name=""
2305
2306	if ((len % baselen == 0)); then
2307		((iter = len / baselen))
2308	else
2309		((iter = len / baselen + 1))
2310	fi
2311	while ((iter > 0)); do
2312		l_name="${l_name}$basestr"
2313
2314		((iter -= 1))
2315	done
2316
2317	echo $l_name
2318}
2319
2320#
2321# Get cksum tuple of dataset
2322# $1 dataset name
2323#
2324# sample zdb output:
2325# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2326# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2327# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2328# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2329function datasetcksum
2330{
2331	typeset cksum
2332	sync
2333	sync_all_pools
2334	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2335		| awk -F= '{print $7}')
2336	echo $cksum
2337}
2338
2339#
2340# Get cksum of file
2341# #1 file path
2342#
2343function checksum
2344{
2345	typeset cksum
2346	cksum=$(cksum $1 | awk '{print $1}')
2347	echo $cksum
2348}
2349
2350#
2351# Get the given disk/slice state from the specific field of the pool
2352#
2353function get_device_state #pool disk field("", "spares","logs")
2354{
2355	typeset pool=$1
2356	typeset disk=${2#/dev/dsk/}
2357	typeset field=${3:-$pool}
2358
2359	state=$(zpool status -v "$pool" 2>/dev/null | \
2360		nawk -v device=$disk -v pool=$pool -v field=$field \
2361		'BEGIN {startconfig=0; startfield=0; }
2362		/config:/ {startconfig=1}
2363		(startconfig==1) && ($1==field) {startfield=1; next;}
2364		(startfield==1) && ($1==device) {print $2; exit;}
2365		(startfield==1) &&
2366		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2367	echo $state
2368}
2369
2370
2371#
2372# print the given directory filesystem type
2373#
2374# $1 directory name
2375#
2376function get_fstype
2377{
2378	typeset dir=$1
2379
2380	if [[ -z $dir ]]; then
2381		log_fail "Usage: get_fstype <directory>"
2382	fi
2383
2384	#
2385	#  $ df -n /
2386	#  /		  : ufs
2387	#
2388	df -n $dir | awk '{print $3}'
2389}
2390
2391#
2392# Given a disk, label it to VTOC regardless what label was on the disk
2393# $1 disk
2394#
2395function labelvtoc
2396{
2397	typeset disk=$1
2398	typeset -i iter=120
2399	typeset -i ret_val=1
2400
2401	if [[ -z $disk ]]; then
2402		log_fail "The disk name is unspecified."
2403	fi
2404	typeset label_file=/var/tmp/labelvtoc.$$
2405	typeset arch=$(uname -p)
2406
2407	if [[ $arch == "i386" ]]; then
2408		log_must fdisk -B ${disk}p0
2409
2410		echo "label" > $label_file
2411		echo "0" >> $label_file
2412		echo "" >> $label_file
2413		echo "q" >> $label_file
2414		echo "q" >> $label_file
2415	elif [[ $arch == "sparc" ]]; then
2416		echo "label" > $label_file
2417		echo "0" >> $label_file
2418		echo "" >> $label_file
2419		echo "" >> $label_file
2420		echo "" >> $label_file
2421		echo "q" >> $label_file
2422	else
2423		log_fail "unknown arch type"
2424	fi
2425
2426	# Disk update from fdisk -B may be delayed
2427	while ((iter > 0)); do
2428		if format -e -s -d $disk -f $label_file ; then
2429			iter=0
2430			ret_val=0
2431		else
2432			sleep 1
2433			((iter -= 1))
2434		fi
2435	done
2436	rm -f $label_file
2437	if ((ret_val != 0)); then
2438		log_fail "unable to label $disk as VTOC."
2439	fi
2440
2441	return 0
2442}
2443
2444#
2445# check if the system was installed as zfsroot or not
2446# return: 0 ture, otherwise false
2447#
2448function is_zfsroot
2449{
2450	df -n / | grep zfs > /dev/null 2>&1
2451	return $?
2452}
2453
2454#
2455# get the root filesystem name if it's zfsroot system.
2456#
2457# return: root filesystem name
2458function get_rootfs
2459{
2460	typeset rootfs=""
2461	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2462		/etc/mnttab)
2463	if [[ -z "$rootfs" ]]; then
2464		log_fail "Can not get rootfs"
2465	fi
2466	zfs list $rootfs > /dev/null 2>&1
2467	if (($? == 0)); then
2468		echo $rootfs
2469	else
2470		log_fail "This is not a zfsroot system."
2471	fi
2472}
2473
2474#
2475# get the rootfs's pool name
2476# return:
2477#       rootpool name
2478#
2479function get_rootpool
2480{
2481	typeset rootfs=""
2482	typeset rootpool=""
2483	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2484		 /etc/mnttab)
2485	if [[ -z "$rootfs" ]]; then
2486		log_fail "Can not get rootpool"
2487	fi
2488	zfs list $rootfs > /dev/null 2>&1
2489	if (($? == 0)); then
2490		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2491		echo $rootpool
2492	else
2493		log_fail "This is not a zfsroot system."
2494	fi
2495}
2496
2497#
2498# Check if the given device is physical device
2499#
2500function is_physical_device #device
2501{
2502	typeset device=${1#/dev/dsk/}
2503	device=${device#/dev/rdsk/}
2504
2505	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2506	return $?
2507}
2508
2509#
2510# Get the directory path of given device
2511#
2512function get_device_dir #device
2513{
2514	typeset device=$1
2515
2516	if ! $(is_physical_device $device) ; then
2517		if [[ $device != "/" ]]; then
2518			device=${device%/*}
2519		fi
2520		echo $device
2521	else
2522		echo "/dev/dsk"
2523	fi
2524}
2525
2526#
2527# Get the package name
2528#
2529function get_package_name
2530{
2531	typeset dirpath=${1:-$STC_NAME}
2532
2533	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2534}
2535
2536#
2537# Get the word numbers from a string separated by white space
2538#
2539function get_word_count
2540{
2541	echo $1 | wc -w
2542}
2543
2544#
2545# To verify if the require numbers of disks is given
2546#
2547function verify_disk_count
2548{
2549	typeset -i min=${2:-1}
2550
2551	typeset -i count=$(get_word_count "$1")
2552
2553	if ((count < min)); then
2554		log_untested "A minimum of $min disks is required to run." \
2555			" You specified $count disk(s)"
2556	fi
2557}
2558
2559function ds_is_volume
2560{
2561	typeset type=$(get_prop type $1)
2562	[[ $type = "volume" ]] && return 0
2563	return 1
2564}
2565
2566function ds_is_filesystem
2567{
2568	typeset type=$(get_prop type $1)
2569	[[ $type = "filesystem" ]] && return 0
2570	return 1
2571}
2572
2573function ds_is_snapshot
2574{
2575	typeset type=$(get_prop type $1)
2576	[[ $type = "snapshot" ]] && return 0
2577	return 1
2578}
2579
2580#
2581# Check if Trusted Extensions are installed and enabled
2582#
2583function is_te_enabled
2584{
2585	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2586	if (($? != 0)); then
2587		return 1
2588	else
2589		return 0
2590	fi
2591}
2592
2593# Utility function to determine if a system has multiple cpus.
2594function is_mp
2595{
2596	(($(psrinfo | wc -l) > 1))
2597}
2598
2599function get_cpu_freq
2600{
2601	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2602}
2603
2604# Run the given command as the user provided.
2605function user_run
2606{
2607	typeset user=$1
2608	shift
2609
2610	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2611	return $?
2612}
2613
2614#
2615# Check if the pool contains the specified vdevs
2616#
2617# $1 pool
2618# $2..n <vdev> ...
2619#
2620# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2621# vdevs is not in the pool, and 2 if pool name is missing.
2622#
2623function vdevs_in_pool
2624{
2625	typeset pool=$1
2626	typeset vdev
2627
2628        if [[ -z $pool ]]; then
2629                log_note "Missing pool name."
2630                return 2
2631        fi
2632
2633	shift
2634
2635	typeset tmpfile=$(mktemp)
2636	zpool list -Hv "$pool" >$tmpfile
2637	for vdev in $@; do
2638		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2639		[[ $? -ne 0 ]] && return 1
2640	done
2641
2642	rm -f $tmpfile
2643
2644	return 0;
2645}
2646
2647function get_max
2648{
2649	typeset -l i max=$1
2650	shift
2651
2652	for i in "$@"; do
2653		max=$(echo $((max > i ? max : i)))
2654	done
2655
2656	echo $max
2657}
2658
2659function get_min
2660{
2661	typeset -l i min=$1
2662	shift
2663
2664	for i in "$@"; do
2665		min=$(echo $((min < i ? min : i)))
2666	done
2667
2668	echo $min
2669}
2670
2671#
2672# Generate a random number between 1 and the argument.
2673#
2674function random
2675{
2676        typeset max=$1
2677        echo $(( ($RANDOM % $max) + 1 ))
2678}
2679
2680# Write data that can be compressed into a directory
2681function write_compressible
2682{
2683	typeset dir=$1
2684	typeset megs=$2
2685	typeset nfiles=${3:-1}
2686	typeset bs=${4:-1024k}
2687	typeset fname=${5:-file}
2688
2689	[[ -d $dir ]] || log_fail "No directory: $dir"
2690
2691	log_must eval "fio \
2692	    --name=job \
2693	    --fallocate=0 \
2694	    --minimal \
2695	    --randrepeat=0 \
2696	    --buffer_compress_percentage=66 \
2697	    --buffer_compress_chunk=4096 \
2698	    --directory=$dir \
2699	    --numjobs=$nfiles \
2700	    --rw=write \
2701	    --bs=$bs \
2702	    --filesize=$megs \
2703	    --filename_format='$fname.\$jobnum' >/dev/null"
2704}
2705
2706function get_objnum
2707{
2708	typeset pathname=$1
2709	typeset objnum
2710
2711	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2712	objnum=$(stat -c %i $pathname)
2713	echo $objnum
2714}
2715
2716#
2717# Sync data to the pool
2718#
2719# $1 pool name
2720# $2 boolean to force uberblock (and config including zpool cache file) update
2721#
2722function sync_pool #pool <force>
2723{
2724	typeset pool=${1:-$TESTPOOL}
2725	typeset force=${2:-false}
2726
2727	if [[ $force == true ]]; then
2728		log_must zpool sync -f $pool
2729	else
2730		log_must zpool sync $pool
2731	fi
2732
2733	return 0
2734}
2735
2736#
2737# Sync all pools
2738#
2739# $1 boolean to force uberblock (and config including zpool cache file) update
2740#
2741function sync_all_pools #<force>
2742{
2743	typeset force=${1:-false}
2744
2745	if [[ $force == true ]]; then
2746		log_must zpool sync -f
2747	else
2748		log_must zpool sync
2749	fi
2750
2751	return 0
2752}
2753
2754#
2755# Prints the current time in seconds since UNIX Epoch.
2756#
2757function current_epoch
2758{
2759	printf '%(%s)T'
2760}
2761
2762#
2763# Get decimal value of global uint32_t variable using mdb.
2764#
2765function mdb_get_uint32
2766{
2767	typeset variable=$1
2768	typeset value
2769
2770	value=$(mdb -k -e "$variable/X | ::eval .=U")
2771	if [[ $? -ne 0 ]]; then
2772		log_fail "Failed to get value of '$variable' from mdb."
2773		return 1
2774	fi
2775
2776	echo $value
2777	return 0
2778}
2779
2780#
2781# Wait for every device replace operation to complete
2782#
2783# $1 pool name
2784#
2785function wait_replacing #pool
2786{
2787	typeset pool=${1:-$TESTPOOL}
2788	while true; do
2789		[[ "" == "$(zpool status $pool |
2790		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
2791		log_must sleep 1
2792	done
2793}
2794
2795#
2796# Set global uint32_t variable to a decimal value using mdb.
2797#
2798function mdb_set_uint32
2799{
2800	typeset variable=$1
2801	typeset value=$2
2802
2803	mdb -kw -e "$variable/W 0t$value" > /dev/null
2804	if [[ $? -ne 0 ]]; then
2805		echo "Failed to set '$variable' to '$value' in mdb."
2806		return 1
2807	fi
2808
2809	return 0
2810}
2811
2812#
2813# Set global scalar integer variable to a hex value using mdb.
2814# Note: Target should have CTF data loaded.
2815#
2816function mdb_ctf_set_int
2817{
2818	typeset variable=$1
2819	typeset value=$2
2820
2821	mdb -kw -e "$variable/z $value" > /dev/null
2822	if [[ $? -ne 0 ]]; then
2823		echo "Failed to set '$variable' to '$value' in mdb."
2824		return 1
2825	fi
2826
2827	return 0
2828}
2829
2830#
2831# Set a global system tunable (64-bit value)
2832#
2833# $1 tunable name
2834# $2 tunable values
2835#
2836function set_tunable64
2837{
2838	set_tunable_impl "$1" "$2" Z
2839}
2840
2841#
2842# Set a global system tunable (32-bit value)
2843#
2844# $1 tunable name
2845# $2 tunable values
2846#
2847function set_tunable32
2848{
2849	set_tunable_impl "$1" "$2" W
2850}
2851
2852function set_tunable_impl
2853{
2854	typeset tunable="$1"
2855	typeset value="$2"
2856	typeset mdb_cmd="$3"
2857	typeset module="${4:-zfs}"
2858
2859	[[ -z "$tunable" ]] && return 1
2860	[[ -z "$value" ]] && return 1
2861	[[ -z "$mdb_cmd" ]] && return 1
2862
2863	case "$(uname)" in
2864	Linux)
2865		typeset zfs_tunables="/sys/module/$module/parameters"
2866		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2867		cat >"$zfs_tunables/$tunable" <<<"$value"
2868		return $?
2869		;;
2870	SunOS)
2871		[[ "$module" -eq "zfs" ]] || return 1
2872		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2873		return $?
2874		;;
2875	esac
2876}
2877
2878#
2879# Get a global system tunable
2880#
2881# $1 tunable name
2882#
2883function get_tunable
2884{
2885	get_tunable_impl "$1"
2886}
2887
2888function get_tunable_impl
2889{
2890	typeset tunable="$1"
2891	typeset module="${2:-zfs}"
2892
2893	[[ -z "$tunable" ]] && return 1
2894
2895	case "$(uname)" in
2896	Linux)
2897		typeset zfs_tunables="/sys/module/$module/parameters"
2898		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2899		cat $zfs_tunables/$tunable
2900		return $?
2901		;;
2902	SunOS)
2903		typeset value=$(mdb -k -e "$tunable/X | ::eval .=U")
2904		if [[ $? -ne 0 ]]; then
2905			log_fail "Failed to get value of '$tunable' from mdb."
2906			return 1
2907		fi
2908		echo $value
2909		return 0
2910		;;
2911	esac
2912
2913	return 1
2914}
2915
2916#
2917# Wait for the specified arcstat to reach non-zero quiescence.
2918# If echo is 1 echo the value after reaching quiescence, otherwise
2919# if echo is 0 print the arcstat we are waiting on.
2920#
2921function arcstat_quiescence # stat echo
2922{
2923	typeset stat=$1
2924	typeset echo=$2
2925	typeset do_once=true
2926
2927	if [[ $echo -eq 0 ]]; then
2928		echo "Waiting for arcstat $1 quiescence."
2929	fi
2930
2931	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
2932		typeset stat1=$(get_arcstat $stat)
2933		sleep 2
2934		typeset stat2=$(get_arcstat $stat)
2935		do_once=false
2936	done
2937
2938	if [[ $echo -eq 1 ]]; then
2939		echo $stat2
2940	fi
2941}
2942
2943function arcstat_quiescence_noecho # stat
2944{
2945	typeset stat=$1
2946	arcstat_quiescence $stat 0
2947}
2948
2949function arcstat_quiescence_echo # stat
2950{
2951	typeset stat=$1
2952	arcstat_quiescence $stat 1
2953}
2954
2955#
2956# Compute SHA256 digest for given file or stdin if no file given.
2957# Note: file path must not contain spaces
2958#
2959function sha256digest
2960{
2961        typeset file=$1
2962
2963	if [ -x /usr/bin/digest ]; then
2964		/usr/bin/digest -a sha256 $file
2965	elif [ -x /usr/bin/sha256sum ]; then
2966		/usr/bin/sha256sum -b $file | awk '{ print $1 }'
2967	else
2968		echo "Cannot calculate SHA256 digest"
2969		return 1
2970	fi
2971	return 0
2972}
2973
2974function get_arcstat # stat
2975{
2976	if is_linux; then
2977		typeset stat=$1
2978		typeset zfs_arcstats="/proc/spl/kstat/zfs/arcstats"
2979		[[ -f "$zfs_arcstats" ]] || return 1
2980		grep $stat $zfs_arcstats | awk '{print $3}'
2981		return $?
2982	else
2983		kstat -p zfs::arcstats:$1 | awk '{ print $2 }'
2984		return $?
2985	fi
2986}
2987