xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision d8109ce4330e1b8ad6c29f9fccacec969066bb9d)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27# Copyright (c) 2017 Datto Inc.
28#
29
30. ${STF_TOOLS}/contrib/include/logapi.shlib
31. ${STF_SUITE}/include/blkdev.shlib
32
33# Determine if this is a Linux test system
34#
35# Return 0 if platform Linux, 1 if otherwise
36
37function is_linux
38{
39	if [[ $(uname -o) == "GNU/Linux" ]]; then
40		return 0
41	else
42		return 1
43	fi
44}
45
46# Determine whether a dataset is mounted
47#
48# $1 dataset name
49# $2 filesystem type; optional - defaulted to zfs
50#
51# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
52
53function ismounted
54{
55	typeset fstype=$2
56	[[ -z $fstype ]] && fstype=zfs
57	typeset out dir name ret
58
59	case $fstype in
60		zfs)
61			if [[ "$1" == "/"* ]] ; then
62				for out in $(zfs mount | awk '{print $2}'); do
63					[[ $1 == $out ]] && return 0
64				done
65			else
66				for out in $(zfs mount | awk '{print $1}'); do
67					[[ $1 == $out ]] && return 0
68				done
69			fi
70		;;
71		ufs|nfs)
72			out=$(df -F $fstype $1 2>/dev/null)
73			ret=$?
74			(($ret != 0)) && return $ret
75
76			dir=${out%%\(*}
77			dir=${dir%% *}
78			name=${out##*\(}
79			name=${name%%\)*}
80			name=${name%% *}
81
82			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
83		;;
84	esac
85
86	return 1
87}
88
89# Return 0 if a dataset is mounted; 1 otherwise
90#
91# $1 dataset name
92# $2 filesystem type; optional - defaulted to zfs
93
94function mounted
95{
96	ismounted $1 $2
97	(($? == 0)) && return 0
98	return 1
99}
100
101# Return 0 if a dataset is unmounted; 1 otherwise
102#
103# $1 dataset name
104# $2 filesystem type; optional - defaulted to zfs
105
106function unmounted
107{
108	ismounted $1 $2
109	(($? == 1)) && return 0
110	return 1
111}
112
113# split line on ","
114#
115# $1 - line to split
116
117function splitline
118{
119	echo $1 | sed "s/,/ /g"
120}
121
122function default_setup
123{
124	default_setup_noexit "$@"
125
126	log_pass
127}
128
129#
130# Given a list of disks, setup storage pools and datasets.
131#
132function default_setup_noexit
133{
134	typeset disklist=$1
135	typeset container=$2
136	typeset volume=$3
137
138	if is_global_zone; then
139		if poolexists $TESTPOOL ; then
140			destroy_pool $TESTPOOL
141		fi
142		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
143		log_must zpool create -f $TESTPOOL $disklist
144	else
145		reexport_pool
146	fi
147
148	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
149	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
150
151	log_must zfs create $TESTPOOL/$TESTFS
152	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
153
154	if [[ -n $container ]]; then
155		rm -rf $TESTDIR1  || \
156			log_unresolved Could not remove $TESTDIR1
157		mkdir -p $TESTDIR1 || \
158			log_unresolved Could not create $TESTDIR1
159
160		log_must zfs create $TESTPOOL/$TESTCTR
161		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
162		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
163		log_must zfs set mountpoint=$TESTDIR1 \
164		    $TESTPOOL/$TESTCTR/$TESTFS1
165	fi
166
167	if [[ -n $volume ]]; then
168		if is_global_zone ; then
169			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
170		else
171			log_must zfs create $TESTPOOL/$TESTVOL
172		fi
173	fi
174}
175
176#
177# Given a list of disks, setup a storage pool, file system and
178# a container.
179#
180function default_container_setup
181{
182	typeset disklist=$1
183
184	default_setup "$disklist" "true"
185}
186
187#
188# Given a list of disks, setup a storage pool,file system
189# and a volume.
190#
191function default_volume_setup
192{
193	typeset disklist=$1
194
195	default_setup "$disklist" "" "true"
196}
197
198#
199# Given a list of disks, setup a storage pool,file system,
200# a container and a volume.
201#
202function default_container_volume_setup
203{
204	typeset disklist=$1
205
206	default_setup "$disklist" "true" "true"
207}
208
209#
210# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
211# filesystem
212#
213# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
214# $2 snapshot name. Default, $TESTSNAP
215#
216function create_snapshot
217{
218	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
219	typeset snap=${2:-$TESTSNAP}
220
221	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
222	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
223
224	if snapexists $fs_vol@$snap; then
225		log_fail "$fs_vol@$snap already exists."
226	fi
227	datasetexists $fs_vol || \
228		log_fail "$fs_vol must exist."
229
230	log_must zfs snapshot $fs_vol@$snap
231}
232
233#
234# Create a clone from a snapshot, default clone name is $TESTCLONE.
235#
236# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
237# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
238#
239function create_clone   # snapshot clone
240{
241	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
242	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
243
244	[[ -z $snap ]] && \
245		log_fail "Snapshot name is undefined."
246	[[ -z $clone ]] && \
247		log_fail "Clone name is undefined."
248
249	log_must zfs clone $snap $clone
250}
251
252#
253# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
254# filesystem.
255#
256# $1 Existing filesystem or volume name. Default, $TESTFS
257# $2 Existing snapshot name. Default, $TESTSNAP
258# $3 bookmark name. Default, $TESTBKMARK
259#
260function create_bookmark
261{
262	typeset fs_vol=${1:-$TESTFS}
263	typeset snap=${2:-$TESTSNAP}
264	typeset bkmark=${3:-$TESTBKMARK}
265
266	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
267	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
268	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
269
270	if bkmarkexists $fs_vol#$bkmark; then
271		log_fail "$fs_vol#$bkmark already exists."
272	fi
273	datasetexists $fs_vol || \
274		log_fail "$fs_vol must exist."
275	snapexists $fs_vol@$snap || \
276		log_fail "$fs_vol@$snap must exist."
277
278	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
279}
280
281#
282# Create a temporary clone result of an interrupted resumable 'zfs receive'
283# $1 Destination filesystem name. Must not exist, will be created as the result
284#    of this function along with its %recv temporary clone
285# $2 Source filesystem name. Must not exist, will be created and destroyed
286#
287function create_recv_clone
288{
289	typeset recvfs="$1"
290	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
291	typeset snap="$sendfs@snap1"
292	typeset incr="$sendfs@snap2"
293	typeset mountpoint="$TESTDIR/create_recv_clone"
294	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
295
296	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
297
298	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
299	datasetexists $sendfs && log_fail "Send filesystem must not exist."
300
301	log_must zfs create -o mountpoint="$mountpoint" $sendfs
302	log_must zfs snapshot $snap
303	log_must eval "zfs send $snap | zfs recv -u $recvfs"
304	log_must mkfile 1m "$mountpoint/data"
305	log_must zfs snapshot $incr
306	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
307	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
308	log_must zfs destroy -r $sendfs
309	log_must rm -f "$sendfile"
310
311	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
312		log_fail "Error creating temporary $recvfs/%recv clone"
313	fi
314}
315
316function default_mirror_setup
317{
318	default_mirror_setup_noexit $1 $2 $3
319
320	log_pass
321}
322
323#
324# Given a pair of disks, set up a storage pool and dataset for the mirror
325# @parameters: $1 the primary side of the mirror
326#   $2 the secondary side of the mirror
327# @uses: ZPOOL ZFS TESTPOOL TESTFS
328function default_mirror_setup_noexit
329{
330	readonly func="default_mirror_setup_noexit"
331	typeset primary=$1
332	typeset secondary=$2
333
334	[[ -z $primary ]] && \
335		log_fail "$func: No parameters passed"
336	[[ -z $secondary ]] && \
337		log_fail "$func: No secondary partition passed"
338	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
339	log_must zpool create -f $TESTPOOL mirror $@
340	log_must zfs create $TESTPOOL/$TESTFS
341	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
342}
343
344#
345# create a number of mirrors.
346# We create a number($1) of 2 way mirrors using the pairs of disks named
347# on the command line. These mirrors are *not* mounted
348# @parameters: $1 the number of mirrors to create
349#  $... the devices to use to create the mirrors on
350# @uses: ZPOOL ZFS TESTPOOL
351function setup_mirrors
352{
353	typeset -i nmirrors=$1
354
355	shift
356	while ((nmirrors > 0)); do
357		log_must test -n "$1" -a -n "$2"
358		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
359		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
360		shift 2
361		((nmirrors = nmirrors - 1))
362	done
363}
364
365#
366# create a number of raidz pools.
367# We create a number($1) of 2 raidz pools  using the pairs of disks named
368# on the command line. These pools are *not* mounted
369# @parameters: $1 the number of pools to create
370#  $... the devices to use to create the pools on
371# @uses: ZPOOL ZFS TESTPOOL
372function setup_raidzs
373{
374	typeset -i nraidzs=$1
375
376	shift
377	while ((nraidzs > 0)); do
378		log_must test -n "$1" -a -n "$2"
379		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
380		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
381		shift 2
382		((nraidzs = nraidzs - 1))
383	done
384}
385
386#
387# Destroy the configured testpool mirrors.
388# the mirrors are of the form ${TESTPOOL}{number}
389# @uses: ZPOOL ZFS TESTPOOL
390function destroy_mirrors
391{
392	default_cleanup_noexit
393
394	log_pass
395}
396
397#
398# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
399# $1 the list of disks
400#
401function default_raidz_setup
402{
403	typeset disklist="$*"
404	disks=(${disklist[*]})
405
406	if [[ ${#disks[*]} -lt 2 ]]; then
407		log_fail "A raid-z requires a minimum of two disks."
408	fi
409
410	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
411	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
412	log_must zfs create $TESTPOOL/$TESTFS
413	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
414
415	log_pass
416}
417
418#
419# Common function used to cleanup storage pools and datasets.
420#
421# Invoked at the start of the test suite to ensure the system
422# is in a known state, and also at the end of each set of
423# sub-tests to ensure errors from one set of tests doesn't
424# impact the execution of the next set.
425
426function default_cleanup
427{
428	default_cleanup_noexit
429
430	log_pass
431}
432
433function default_cleanup_noexit
434{
435	typeset exclude=""
436	typeset pool=""
437	#
438	# Destroying the pool will also destroy any
439	# filesystems it contains.
440	#
441	if is_global_zone; then
442		zfs unmount -a > /dev/null 2>&1
443		exclude=`eval echo \"'(${KEEP})'\"`
444		ALL_POOLS=$(zpool list -H -o name \
445		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
446		# Here, we loop through the pools we're allowed to
447		# destroy, only destroying them if it's safe to do
448		# so.
449		while [ ! -z ${ALL_POOLS} ]
450		do
451			for pool in ${ALL_POOLS}
452			do
453				if safe_to_destroy_pool $pool ;
454				then
455					destroy_pool $pool
456				fi
457				ALL_POOLS=$(zpool list -H -o name \
458				    | grep -v "$NO_POOLS" \
459				    | egrep -v "$exclude")
460			done
461		done
462
463		zfs mount -a
464	else
465		typeset fs=""
466		for fs in $(zfs list -H -o name \
467		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
468			datasetexists $fs && \
469				log_must zfs destroy -Rf $fs
470		done
471
472		# Need cleanup here to avoid garbage dir left.
473		for fs in $(zfs list -H -o name); do
474			[[ $fs == /$ZONE_POOL ]] && continue
475			[[ -d $fs ]] && log_must rm -rf $fs/*
476		done
477
478		#
479		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
480		# the default value
481		#
482		for fs in $(zfs list -H -o name); do
483			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
484				log_must zfs set reservation=none $fs
485				log_must zfs set recordsize=128K $fs
486				log_must zfs set mountpoint=/$fs $fs
487				typeset enc=""
488				enc=$(get_prop encryption $fs)
489				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
490					[[ "$enc" == "off" ]]; then
491					log_must zfs set checksum=on $fs
492				fi
493				log_must zfs set compression=off $fs
494				log_must zfs set atime=on $fs
495				log_must zfs set devices=off $fs
496				log_must zfs set exec=on $fs
497				log_must zfs set setuid=on $fs
498				log_must zfs set readonly=off $fs
499				log_must zfs set snapdir=hidden $fs
500				log_must zfs set aclmode=groupmask $fs
501				log_must zfs set aclinherit=secure $fs
502			fi
503		done
504	fi
505
506	[[ -d $TESTDIR ]] && \
507		log_must rm -rf $TESTDIR
508}
509
510
511#
512# Common function used to cleanup storage pools, file systems
513# and containers.
514#
515function default_container_cleanup
516{
517	if ! is_global_zone; then
518		reexport_pool
519	fi
520
521	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
522	[[ $? -eq 0 ]] && \
523	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
524
525	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
526	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
527
528	datasetexists $TESTPOOL/$TESTCTR && \
529	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
530
531	[[ -e $TESTDIR1 ]] && \
532	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
533
534	default_cleanup
535}
536
537#
538# Common function used to cleanup snapshot of file system or volume. Default to
539# delete the file system's snapshot
540#
541# $1 snapshot name
542#
543function destroy_snapshot
544{
545	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
546
547	if ! snapexists $snap; then
548		log_fail "'$snap' does not existed."
549	fi
550
551	#
552	# For the sake of the value which come from 'get_prop' is not equal
553	# to the really mountpoint when the snapshot is unmounted. So, firstly
554	# check and make sure this snapshot's been mounted in current system.
555	#
556	typeset mtpt=""
557	if ismounted $snap; then
558		mtpt=$(get_prop mountpoint $snap)
559		(($? != 0)) && \
560			log_fail "get_prop mountpoint $snap failed."
561	fi
562
563	log_must zfs destroy $snap
564	[[ $mtpt != "" && -d $mtpt ]] && \
565		log_must rm -rf $mtpt
566}
567
568#
569# Common function used to cleanup clone.
570#
571# $1 clone name
572#
573function destroy_clone
574{
575	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
576
577	if ! datasetexists $clone; then
578		log_fail "'$clone' does not existed."
579	fi
580
581	# With the same reason in destroy_snapshot
582	typeset mtpt=""
583	if ismounted $clone; then
584		mtpt=$(get_prop mountpoint $clone)
585		(($? != 0)) && \
586			log_fail "get_prop mountpoint $clone failed."
587	fi
588
589	log_must zfs destroy $clone
590	[[ $mtpt != "" && -d $mtpt ]] && \
591		log_must rm -rf $mtpt
592}
593
594#
595# Common function used to cleanup bookmark of file system or volume.  Default
596# to delete the file system's bookmark.
597#
598# $1 bookmark name
599#
600function destroy_bookmark
601{
602	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
603
604	if ! bkmarkexists $bkmark; then
605		log_fail "'$bkmarkp' does not existed."
606	fi
607
608	log_must zfs destroy $bkmark
609}
610
611# Return 0 if a snapshot exists; $? otherwise
612#
613# $1 - snapshot name
614
615function snapexists
616{
617	zfs list -H -t snapshot "$1" > /dev/null 2>&1
618	return $?
619}
620
621#
622# Return 0 if a bookmark exists; $? otherwise
623#
624# $1 - bookmark name
625#
626function bkmarkexists
627{
628	zfs list -H -t bookmark "$1" > /dev/null 2>&1
629	return $?
630}
631
632#
633# Set a property to a certain value on a dataset.
634# Sets a property of the dataset to the value as passed in.
635# @param:
636#	$1 dataset who's property is being set
637#	$2 property to set
638#	$3 value to set property to
639# @return:
640#	0 if the property could be set.
641#	non-zero otherwise.
642# @use: ZFS
643#
644function dataset_setprop
645{
646	typeset fn=dataset_setprop
647
648	if (($# < 3)); then
649		log_note "$fn: Insufficient parameters (need 3, had $#)"
650		return 1
651	fi
652	typeset output=
653	output=$(zfs set $2=$3 $1 2>&1)
654	typeset rv=$?
655	if ((rv != 0)); then
656		log_note "Setting property on $1 failed."
657		log_note "property $2=$3"
658		log_note "Return Code: $rv"
659		log_note "Output: $output"
660		return $rv
661	fi
662	return 0
663}
664
665#
666# Assign suite defined dataset properties.
667# This function is used to apply the suite's defined default set of
668# properties to a dataset.
669# @parameters: $1 dataset to use
670# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
671# @returns:
672#   0 if the dataset has been altered.
673#   1 if no pool name was passed in.
674#   2 if the dataset could not be found.
675#   3 if the dataset could not have it's properties set.
676#
677function dataset_set_defaultproperties
678{
679	typeset dataset="$1"
680
681	[[ -z $dataset ]] && return 1
682
683	typeset confset=
684	typeset -i found=0
685	for confset in $(zfs list); do
686		if [[ $dataset = $confset ]]; then
687			found=1
688			break
689		fi
690	done
691	[[ $found -eq 0 ]] && return 2
692	if [[ -n $COMPRESSION_PROP ]]; then
693		dataset_setprop $dataset compression $COMPRESSION_PROP || \
694			return 3
695		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
696	fi
697	if [[ -n $CHECKSUM_PROP ]]; then
698		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
699			return 3
700		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
701	fi
702	return 0
703}
704
705#
706# Check a numeric assertion
707# @parameter: $@ the assertion to check
708# @output: big loud notice if assertion failed
709# @use: log_fail
710#
711function assert
712{
713	(($@)) || log_fail "$@"
714}
715
716#
717# Function to format partition size of a disk
718# Given a disk cxtxdx reduces all partitions
719# to 0 size
720#
721function zero_partitions #<whole_disk_name>
722{
723	typeset diskname=$1
724	typeset i
725
726	for i in 0 1 3 4 5 6 7
727	do
728		set_partition $i "" 0mb $diskname
729	done
730}
731
732#
733# Given a slice, size and disk, this function
734# formats the slice to the specified size.
735# Size should be specified with units as per
736# the `format` command requirements eg. 100mb 3gb
737#
738function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
739{
740	typeset -i slicenum=$1
741	typeset start=$2
742	typeset size=$3
743	typeset disk=$4
744	[[ -z $slicenum || -z $size || -z $disk ]] && \
745	    log_fail "The slice, size or disk name is unspecified."
746	typeset format_file=/var/tmp/format_in.$$
747
748	echo "partition" >$format_file
749	echo "$slicenum" >> $format_file
750	echo "" >> $format_file
751	echo "" >> $format_file
752	echo "$start" >> $format_file
753	echo "$size" >> $format_file
754	echo "label" >> $format_file
755	echo "" >> $format_file
756	echo "q" >> $format_file
757	echo "q" >> $format_file
758
759	format -e -s -d $disk -f $format_file
760	typeset ret_val=$?
761	rm -f $format_file
762	[[ $ret_val -ne 0 ]] && \
763	    log_fail "Unable to format $disk slice $slicenum to $size"
764	return 0
765}
766
767#
768# Get the end cyl of the given slice
769#
770function get_endslice #<disk> <slice>
771{
772	typeset disk=$1
773	typeset slice=$2
774	if [[ -z $disk || -z $slice ]] ; then
775		log_fail "The disk name or slice number is unspecified."
776	fi
777
778	disk=${disk#/dev/dsk/}
779	disk=${disk#/dev/rdsk/}
780	disk=${disk%s*}
781
782	typeset -i ratio=0
783	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
784		grep "sectors\/cylinder" | \
785		awk '{print $2}')
786
787	if ((ratio == 0)); then
788		return
789	fi
790
791	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
792		nawk -v token="$slice" '{if ($1==token) print $6}')
793
794	((endcyl = (endcyl + 1) / ratio))
795	echo $endcyl
796}
797
798
799#
800# Given a size,disk and total slice number,  this function formats the
801# disk slices from 0 to the total slice number with the same specified
802# size.
803#
804function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
805{
806	typeset -i i=0
807	typeset slice_size=$1
808	typeset disk_name=$2
809	typeset total_slices=$3
810	typeset cyl
811
812	zero_partitions $disk_name
813	while ((i < $total_slices)); do
814		if ((i == 2)); then
815			((i = i + 1))
816			continue
817		fi
818		set_partition $i "$cyl" $slice_size $disk_name
819		cyl=$(get_endslice $disk_name $i)
820		((i = i+1))
821	done
822}
823
824#
825# This function continues to write to a filenum number of files into dirnum
826# number of directories until either file_write returns an error or the
827# maximum number of files per directory have been written.
828#
829# Usage:
830# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
831#
832# Return value: 0 on success
833#		non 0 on error
834#
835# Where :
836#	destdir:    is the directory where everything is to be created under
837#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
838#	filenum:    the maximum number of files per subdirectory
839#	bytes:	    number of bytes to write
840#	num_writes: numer of types to write out bytes
841#	data:	    the data that will be writen
842#
843#	E.g.
844#	file_fs /testdir 20 25 1024 256 0
845#
846# Note: bytes * num_writes equals the size of the testfile
847#
848function fill_fs # destdir dirnum filenum bytes num_writes data
849{
850	typeset destdir=${1:-$TESTDIR}
851	typeset -i dirnum=${2:-50}
852	typeset -i filenum=${3:-50}
853	typeset -i bytes=${4:-8192}
854	typeset -i num_writes=${5:-10240}
855	typeset -i data=${6:-0}
856
857	typeset -i odirnum=1
858	typeset -i idirnum=0
859	typeset -i fn=0
860	typeset -i retval=0
861
862	log_must mkdir -p $destdir/$idirnum
863	while (($odirnum > 0)); do
864		if ((dirnum >= 0 && idirnum >= dirnum)); then
865			odirnum=0
866			break
867		fi
868		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
869		    -b $bytes -c $num_writes -d $data
870		retval=$?
871		if (($retval != 0)); then
872			odirnum=0
873			break
874		fi
875		if (($fn >= $filenum)); then
876			fn=0
877			((idirnum = idirnum + 1))
878			log_must mkdir -p $destdir/$idirnum
879		else
880			((fn = fn + 1))
881		fi
882	done
883	return $retval
884}
885
886#
887# Simple function to get the specified property. If unable to
888# get the property then exits.
889#
890# Note property is in 'parsable' format (-p)
891#
892function get_prop # property dataset
893{
894	typeset prop_val
895	typeset prop=$1
896	typeset dataset=$2
897
898	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
899	if [[ $? -ne 0 ]]; then
900		log_note "Unable to get $prop property for dataset " \
901		"$dataset"
902		return 1
903	fi
904
905	echo "$prop_val"
906	return 0
907}
908
909#
910# Simple function to get the specified property of pool. If unable to
911# get the property then exits.
912#
913function get_pool_prop # property pool
914{
915	typeset prop_val
916	typeset prop=$1
917	typeset pool=$2
918
919	if poolexists $pool ; then
920		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
921			awk '{print $3}')
922		if [[ $? -ne 0 ]]; then
923			log_note "Unable to get $prop property for pool " \
924			"$pool"
925			return 1
926		fi
927	else
928		log_note "Pool $pool not exists."
929		return 1
930	fi
931
932	echo $prop_val
933	return 0
934}
935
936# Return 0 if a pool exists; $? otherwise
937#
938# $1 - pool name
939
940function poolexists
941{
942	typeset pool=$1
943
944	if [[ -z $pool ]]; then
945		log_note "No pool name given."
946		return 1
947	fi
948
949	zpool get name "$pool" > /dev/null 2>&1
950	return $?
951}
952
953# Return 0 if all the specified datasets exist; $? otherwise
954#
955# $1-n  dataset name
956function datasetexists
957{
958	if (($# == 0)); then
959		log_note "No dataset name given."
960		return 1
961	fi
962
963	while (($# > 0)); do
964		zfs get name $1 > /dev/null 2>&1 || \
965			return $?
966		shift
967	done
968
969	return 0
970}
971
972# return 0 if none of the specified datasets exists, otherwise return 1.
973#
974# $1-n  dataset name
975function datasetnonexists
976{
977	if (($# == 0)); then
978		log_note "No dataset name given."
979		return 1
980	fi
981
982	while (($# > 0)); do
983		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
984		    && return 1
985		shift
986	done
987
988	return 0
989}
990
991#
992# Given a mountpoint, or a dataset name, determine if it is shared.
993#
994# Returns 0 if shared, 1 otherwise.
995#
996function is_shared
997{
998	typeset fs=$1
999	typeset mtpt
1000
1001	if [[ $fs != "/"* ]] ; then
1002		if datasetnonexists "$fs" ; then
1003			return 1
1004		else
1005			mtpt=$(get_prop mountpoint "$fs")
1006			case $mtpt in
1007				none|legacy|-) return 1
1008					;;
1009				*)	fs=$mtpt
1010					;;
1011			esac
1012		fi
1013	fi
1014
1015	for mtpt in `share | awk '{print $2}'` ; do
1016		if [[ $mtpt == $fs ]] ; then
1017			return 0
1018		fi
1019	done
1020
1021	typeset stat=$(svcs -H -o STA nfs/server:default)
1022	if [[ $stat != "ON" ]]; then
1023		log_note "Current nfs/server status: $stat"
1024	fi
1025
1026	return 1
1027}
1028
1029#
1030# Given a mountpoint, determine if it is not shared.
1031#
1032# Returns 0 if not shared, 1 otherwise.
1033#
1034function not_shared
1035{
1036	typeset fs=$1
1037
1038	is_shared $fs
1039	if (($? == 0)); then
1040		return 1
1041	fi
1042
1043	return 0
1044}
1045
1046#
1047# Helper function to unshare a mountpoint.
1048#
1049function unshare_fs #fs
1050{
1051	typeset fs=$1
1052
1053	is_shared $fs
1054	if (($? == 0)); then
1055		log_must zfs unshare $fs
1056	fi
1057
1058	return 0
1059}
1060
1061#
1062# Check NFS server status and trigger it online.
1063#
1064function setup_nfs_server
1065{
1066	# Cannot share directory in non-global zone.
1067	#
1068	if ! is_global_zone; then
1069		log_note "Cannot trigger NFS server by sharing in LZ."
1070		return
1071	fi
1072
1073	typeset nfs_fmri="svc:/network/nfs/server:default"
1074	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1075		#
1076		# Only really sharing operation can enable NFS server
1077		# to online permanently.
1078		#
1079		typeset dummy=/tmp/dummy
1080
1081		if [[ -d $dummy ]]; then
1082			log_must rm -rf $dummy
1083		fi
1084
1085		log_must mkdir $dummy
1086		log_must share $dummy
1087
1088		#
1089		# Waiting for fmri's status to be the final status.
1090		# Otherwise, in transition, an asterisk (*) is appended for
1091		# instances, unshare will reverse status to 'DIS' again.
1092		#
1093		# Waiting for 1's at least.
1094		#
1095		log_must sleep 1
1096		timeout=10
1097		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1098		do
1099			log_must sleep 1
1100
1101			((timeout -= 1))
1102		done
1103
1104		log_must unshare $dummy
1105		log_must rm -rf $dummy
1106	fi
1107
1108	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1109}
1110
1111#
1112# To verify whether calling process is in global zone
1113#
1114# Return 0 if in global zone, 1 in non-global zone
1115#
1116function is_global_zone
1117{
1118	typeset cur_zone=$(zonename 2>/dev/null)
1119	if [[ $cur_zone != "global" ]]; then
1120		return 1
1121	fi
1122	return 0
1123}
1124
1125#
1126# Verify whether test is permitted to run from
1127# global zone, local zone, or both
1128#
1129# $1 zone limit, could be "global", "local", or "both"(no limit)
1130#
1131# Return 0 if permitted, otherwise exit with log_unsupported
1132#
1133function verify_runnable # zone limit
1134{
1135	typeset limit=$1
1136
1137	[[ -z $limit ]] && return 0
1138
1139	if is_global_zone ; then
1140		case $limit in
1141			global|both)
1142				;;
1143			local)	log_unsupported "Test is unable to run from "\
1144					"global zone."
1145				;;
1146			*)	log_note "Warning: unknown limit $limit - " \
1147					"use both."
1148				;;
1149		esac
1150	else
1151		case $limit in
1152			local|both)
1153				;;
1154			global)	log_unsupported "Test is unable to run from "\
1155					"local zone."
1156				;;
1157			*)	log_note "Warning: unknown limit $limit - " \
1158					"use both."
1159				;;
1160		esac
1161
1162		reexport_pool
1163	fi
1164
1165	return 0
1166}
1167
1168# Return 0 if create successfully or the pool exists; $? otherwise
1169# Note: In local zones, this function should return 0 silently.
1170#
1171# $1 - pool name
1172# $2-n - [keyword] devs_list
1173
1174function create_pool #pool devs_list
1175{
1176	typeset pool=${1%%/*}
1177
1178	shift
1179
1180	if [[ -z $pool ]]; then
1181		log_note "Missing pool name."
1182		return 1
1183	fi
1184
1185	if poolexists $pool ; then
1186		destroy_pool $pool
1187	fi
1188
1189	if is_global_zone ; then
1190		[[ -d /$pool ]] && rm -rf /$pool
1191		log_must zpool create -f $pool $@
1192	fi
1193
1194	return 0
1195}
1196
1197# Return 0 if destroy successfully or the pool exists; $? otherwise
1198# Note: In local zones, this function should return 0 silently.
1199#
1200# $1 - pool name
1201# Destroy pool with the given parameters.
1202
1203function destroy_pool #pool
1204{
1205	typeset pool=${1%%/*}
1206	typeset mtpt
1207
1208	if [[ -z $pool ]]; then
1209		log_note "No pool name given."
1210		return 1
1211	fi
1212
1213	if is_global_zone ; then
1214		if poolexists "$pool" ; then
1215			mtpt=$(get_prop mountpoint "$pool")
1216
1217			# At times, syseventd activity can cause attempts to
1218			# destroy a pool to fail with EBUSY. We retry a few
1219			# times allowing failures before requiring the destroy
1220			# to succeed.
1221			typeset -i wait_time=10 ret=1 count=0
1222			must=""
1223			while [[ $ret -ne 0 ]]; do
1224				$must zpool destroy -f $pool
1225				ret=$?
1226				[[ $ret -eq 0 ]] && break
1227				log_note "zpool destroy failed with $ret"
1228				[[ count++ -ge 7 ]] && must=log_must
1229				sleep $wait_time
1230			done
1231
1232			[[ -d $mtpt ]] && \
1233				log_must rm -rf $mtpt
1234		else
1235			log_note "Pool does not exist. ($pool)"
1236			return 1
1237		fi
1238	fi
1239
1240	return 0
1241}
1242
1243# Return 0 if created successfully; $? otherwise
1244#
1245# $1 - dataset name
1246# $2-n - dataset options
1247
1248function create_dataset #dataset dataset_options
1249{
1250	typeset dataset=$1
1251
1252	shift
1253
1254	if [[ -z $dataset ]]; then
1255		log_note "Missing dataset name."
1256		return 1
1257	fi
1258
1259	if datasetexists $dataset ; then
1260		destroy_dataset $dataset
1261	fi
1262
1263	log_must zfs create $@ $dataset
1264
1265	return 0
1266}
1267
1268# Return 0 if destroy successfully or the dataset exists; $? otherwise
1269# Note: In local zones, this function should return 0 silently.
1270#
1271# $1 - dataset name
1272
1273function destroy_dataset #dataset
1274{
1275	typeset dataset=$1
1276	typeset mtpt
1277
1278	if [[ -z $dataset ]]; then
1279		log_note "No dataset name given."
1280		return 1
1281	fi
1282
1283	if datasetexists "$dataset" ; then
1284		mtpt=$(get_prop mountpoint "$dataset")
1285		log_must zfs destroy -r $dataset
1286		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1287	else
1288		log_note "Dataset does not exist. ($dataset)"
1289		return 1
1290	fi
1291
1292	return 0
1293}
1294
1295#
1296# Firstly, create a pool with 5 datasets. Then, create a single zone and
1297# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1298# and a zvol device to the zone.
1299#
1300# $1 zone name
1301# $2 zone root directory prefix
1302# $3 zone ip
1303#
1304function zfs_zones_setup #zone_name zone_root zone_ip
1305{
1306	typeset zone_name=${1:-$(hostname)-z}
1307	typeset zone_root=${2:-"/zone_root"}
1308	typeset zone_ip=${3:-"10.1.1.10"}
1309	typeset prefix_ctr=$ZONE_CTR
1310	typeset pool_name=$ZONE_POOL
1311	typeset -i cntctr=5
1312	typeset -i i=0
1313
1314	# Create pool and 5 container within it
1315	#
1316	[[ -d /$pool_name ]] && rm -rf /$pool_name
1317	log_must zpool create -f $pool_name $DISKS
1318	while ((i < cntctr)); do
1319		log_must zfs create $pool_name/$prefix_ctr$i
1320		((i += 1))
1321	done
1322
1323	# create a zvol
1324	log_must zfs create -V 1g $pool_name/zone_zvol
1325
1326	#
1327	# If current system support slog, add slog device for pool
1328	#
1329	if verify_slog_support ; then
1330		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1331		log_must mkfile $MINVDEVSIZE $sdevs
1332		log_must zpool add $pool_name log mirror $sdevs
1333	fi
1334
1335	# this isn't supported just yet.
1336	# Create a filesystem. In order to add this to
1337	# the zone, it must have it's mountpoint set to 'legacy'
1338	# log_must zfs create $pool_name/zfs_filesystem
1339	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1340
1341	[[ -d $zone_root ]] && \
1342		log_must rm -rf $zone_root/$zone_name
1343	[[ ! -d $zone_root ]] && \
1344		log_must mkdir -p -m 0700 $zone_root/$zone_name
1345
1346	# Create zone configure file and configure the zone
1347	#
1348	typeset zone_conf=/tmp/zone_conf.$$
1349	echo "create" > $zone_conf
1350	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1351	echo "set autoboot=true" >> $zone_conf
1352	i=0
1353	while ((i < cntctr)); do
1354		echo "add dataset" >> $zone_conf
1355		echo "set name=$pool_name/$prefix_ctr$i" >> \
1356			$zone_conf
1357		echo "end" >> $zone_conf
1358		((i += 1))
1359	done
1360
1361	# add our zvol to the zone
1362	echo "add device" >> $zone_conf
1363	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1364	echo "end" >> $zone_conf
1365
1366	# add a corresponding zvol rdsk to the zone
1367	echo "add device" >> $zone_conf
1368	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1369	echo "end" >> $zone_conf
1370
1371	# once it's supported, we'll add our filesystem to the zone
1372	# echo "add fs" >> $zone_conf
1373	# echo "set type=zfs" >> $zone_conf
1374	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1375	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1376	# echo "end" >> $zone_conf
1377
1378	echo "verify" >> $zone_conf
1379	echo "commit" >> $zone_conf
1380	log_must zonecfg -z $zone_name -f $zone_conf
1381	log_must rm -f $zone_conf
1382
1383	# Install the zone
1384	zoneadm -z $zone_name install
1385	if (($? == 0)); then
1386		log_note "SUCCESS: zoneadm -z $zone_name install"
1387	else
1388		log_fail "FAIL: zoneadm -z $zone_name install"
1389	fi
1390
1391	# Install sysidcfg file
1392	#
1393	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1394	echo "system_locale=C" > $sysidcfg
1395	echo  "terminal=dtterm" >> $sysidcfg
1396	echo  "network_interface=primary {" >> $sysidcfg
1397	echo  "hostname=$zone_name" >> $sysidcfg
1398	echo  "}" >> $sysidcfg
1399	echo  "name_service=NONE" >> $sysidcfg
1400	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1401	echo  "security_policy=NONE" >> $sysidcfg
1402	echo  "timezone=US/Eastern" >> $sysidcfg
1403
1404	# Boot this zone
1405	log_must zoneadm -z $zone_name boot
1406}
1407
1408#
1409# Reexport TESTPOOL & TESTPOOL(1-4)
1410#
1411function reexport_pool
1412{
1413	typeset -i cntctr=5
1414	typeset -i i=0
1415
1416	while ((i < cntctr)); do
1417		if ((i == 0)); then
1418			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1419			if ! ismounted $TESTPOOL; then
1420				log_must zfs mount $TESTPOOL
1421			fi
1422		else
1423			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1424			if eval ! ismounted \$TESTPOOL$i; then
1425				log_must eval zfs mount \$TESTPOOL$i
1426			fi
1427		fi
1428		((i += 1))
1429	done
1430}
1431
1432#
1433# Verify a given disk is online or offline
1434#
1435# Return 0 is pool/disk matches expected state, 1 otherwise
1436#
1437function check_state # pool disk state{online,offline}
1438{
1439	typeset pool=$1
1440	typeset disk=${2#/dev/dsk/}
1441	typeset state=$3
1442
1443	zpool status -v $pool | grep "$disk"  \
1444	    | grep -i "$state" > /dev/null 2>&1
1445
1446	return $?
1447}
1448
1449#
1450# Get the mountpoint of snapshot
1451# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1452# as its mountpoint
1453#
1454function snapshot_mountpoint
1455{
1456	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1457
1458	if [[ $dataset != *@* ]]; then
1459		log_fail "Error name of snapshot '$dataset'."
1460	fi
1461
1462	typeset fs=${dataset%@*}
1463	typeset snap=${dataset#*@}
1464
1465	if [[ -z $fs || -z $snap ]]; then
1466		log_fail "Error name of snapshot '$dataset'."
1467	fi
1468
1469	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1470}
1471
1472#
1473# Given a device and 'ashift' value verify it's correctly set on every label
1474#
1475function verify_ashift # device ashift
1476{
1477	typeset device="$1"
1478	typeset ashift="$2"
1479
1480	zdb -e -lll $device | nawk -v ashift=$ashift '/ashift: / {
1481	    if (ashift != $2)
1482	        exit 1;
1483	    else
1484	        count++;
1485	    } END {
1486	    if (count != 4)
1487	        exit 1;
1488	    else
1489	        exit 0;
1490	    }'
1491
1492	return $?
1493}
1494
1495#
1496# Given a pool and file system, this function will verify the file system
1497# using the zdb internal tool. Note that the pool is exported and imported
1498# to ensure it has consistent state.
1499#
1500function verify_filesys # pool filesystem dir
1501{
1502	typeset pool="$1"
1503	typeset filesys="$2"
1504	typeset zdbout="/tmp/zdbout.$$"
1505
1506	shift
1507	shift
1508	typeset dirs=$@
1509	typeset search_path=""
1510
1511	log_note "Calling zdb to verify filesystem '$filesys'"
1512	zfs unmount -a > /dev/null 2>&1
1513	log_must zpool export $pool
1514
1515	if [[ -n $dirs ]] ; then
1516		for dir in $dirs ; do
1517			search_path="$search_path -d $dir"
1518		done
1519	fi
1520
1521	log_must zpool import $search_path $pool
1522
1523	zdb -cudi $filesys > $zdbout 2>&1
1524	if [[ $? != 0 ]]; then
1525		log_note "Output: zdb -cudi $filesys"
1526		cat $zdbout
1527		log_fail "zdb detected errors with: '$filesys'"
1528	fi
1529
1530	log_must zfs mount -a
1531	log_must rm -rf $zdbout
1532}
1533
1534#
1535# Given a pool, and this function list all disks in the pool
1536#
1537function get_disklist # pool
1538{
1539	typeset disklist=""
1540
1541	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1542	    grep -v "\-\-\-\-\-" | \
1543	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1544
1545	echo $disklist
1546}
1547
1548# /**
1549#  This function kills a given list of processes after a time period. We use
1550#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1551#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1552#  would be listed as FAIL, which we don't want : we're happy with stress tests
1553#  running for a certain amount of time, then finishing.
1554#
1555# @param $1 the time in seconds after which we should terminate these processes
1556# @param $2..$n the processes we wish to terminate.
1557# */
1558function stress_timeout
1559{
1560	typeset -i TIMEOUT=$1
1561	shift
1562	typeset cpids="$@"
1563
1564	log_note "Waiting for child processes($cpids). " \
1565		"It could last dozens of minutes, please be patient ..."
1566	log_must sleep $TIMEOUT
1567
1568	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1569	typeset pid
1570	for pid in $cpids; do
1571		ps -p $pid > /dev/null 2>&1
1572		if (($? == 0)); then
1573			log_must kill -USR1 $pid
1574		fi
1575	done
1576}
1577
1578#
1579# Verify a given hotspare disk is inuse or avail
1580#
1581# Return 0 is pool/disk matches expected state, 1 otherwise
1582#
1583function check_hotspare_state # pool disk state{inuse,avail}
1584{
1585	typeset pool=$1
1586	typeset disk=${2#/dev/dsk/}
1587	typeset state=$3
1588
1589	cur_state=$(get_device_state $pool $disk "spares")
1590
1591	if [[ $state != ${cur_state} ]]; then
1592		return 1
1593	fi
1594	return 0
1595}
1596
1597#
1598# Wait until a hotspare transitions to a given state or times out.
1599#
1600# Return 0 when  pool/disk matches expected state, 1 on timeout.
1601#
1602function wait_hotspare_state # pool disk state timeout
1603{
1604	typeset pool=$1
1605	typeset disk=${2#$/DEV_DSKDIR/}
1606	typeset state=$3
1607	typeset timeout=${4:-60}
1608	typeset -i i=0
1609
1610	while [[ $i -lt $timeout ]]; do
1611		if check_hotspare_state $pool $disk $state; then
1612			return 0
1613		fi
1614
1615		i=$((i+1))
1616		sleep 1
1617	done
1618
1619	return 1
1620}
1621
1622#
1623# Verify a given slog disk is inuse or avail
1624#
1625# Return 0 is pool/disk matches expected state, 1 otherwise
1626#
1627function check_slog_state # pool disk state{online,offline,unavail}
1628{
1629	typeset pool=$1
1630	typeset disk=${2#/dev/dsk/}
1631	typeset state=$3
1632
1633	cur_state=$(get_device_state $pool $disk "logs")
1634
1635	if [[ $state != ${cur_state} ]]; then
1636		return 1
1637	fi
1638	return 0
1639}
1640
1641#
1642# Verify a given vdev disk is inuse or avail
1643#
1644# Return 0 is pool/disk matches expected state, 1 otherwise
1645#
1646function check_vdev_state # pool disk state{online,offline,unavail}
1647{
1648	typeset pool=$1
1649	typeset disk=${2#/dev/dsk/}
1650	typeset state=$3
1651
1652	cur_state=$(get_device_state $pool $disk)
1653
1654	if [[ $state != ${cur_state} ]]; then
1655		return 1
1656	fi
1657	return 0
1658}
1659
1660#
1661# Wait until a vdev transitions to a given state or times out.
1662#
1663# Return 0 when  pool/disk matches expected state, 1 on timeout.
1664#
1665function wait_vdev_state # pool disk state timeout
1666{
1667	typeset pool=$1
1668	typeset disk=${2#$/DEV_DSKDIR/}
1669	typeset state=$3
1670	typeset timeout=${4:-60}
1671	typeset -i i=0
1672
1673	while [[ $i -lt $timeout ]]; do
1674		if check_vdev_state $pool $disk $state; then
1675			return 0
1676		fi
1677
1678		i=$((i+1))
1679		sleep 1
1680	done
1681
1682	return 1
1683}
1684
1685#
1686# Check the output of 'zpool status -v <pool>',
1687# and to see if the content of <token> contain the <keyword> specified.
1688#
1689# Return 0 is contain, 1 otherwise
1690#
1691function check_pool_status # pool token keyword <verbose>
1692{
1693	typeset pool=$1
1694	typeset token=$2
1695	typeset keyword=$3
1696	typeset verbose=${4:-false}
1697
1698	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1699		($1==token) {print $0}')
1700	if [[ $verbose == true ]]; then
1701		log_note $scan
1702	fi
1703	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1704
1705	return $?
1706}
1707
1708#
1709# These 6 following functions are instance of check_pool_status()
1710#	is_pool_resilvering - to check if the pool is resilver in progress
1711#	is_pool_resilvered - to check if the pool is resilver completed
1712#	is_pool_scrubbing - to check if the pool is scrub in progress
1713#	is_pool_scrubbed - to check if the pool is scrub completed
1714#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1715#	is_pool_scrub_paused - to check if the pool has scrub paused
1716#	is_pool_removing - to check if the pool is removing a vdev
1717#	is_pool_removed - to check if the pool is remove completed
1718#
1719function is_pool_resilvering #pool <verbose>
1720{
1721	check_pool_status "$1" "scan" "resilver in progress since " $2
1722	return $?
1723}
1724
1725function is_pool_resilvered #pool <verbose>
1726{
1727	check_pool_status "$1" "scan" "resilvered " $2
1728	return $?
1729}
1730
1731function is_pool_scrubbing #pool <verbose>
1732{
1733	check_pool_status "$1" "scan" "scrub in progress since " $2
1734	return $?
1735}
1736
1737function is_pool_scrubbed #pool <verbose>
1738{
1739	check_pool_status "$1" "scan" "scrub repaired" $2
1740	return $?
1741}
1742
1743function is_pool_scrub_stopped #pool <verbose>
1744{
1745	check_pool_status "$1" "scan" "scrub canceled" $2
1746	return $?
1747}
1748
1749function is_pool_scrub_paused #pool <verbose>
1750{
1751	check_pool_status "$1" "scan" "scrub paused since " $2
1752	return $?
1753}
1754
1755function is_pool_removing #pool
1756{
1757	check_pool_status "$1" "remove" "in progress since "
1758	return $?
1759}
1760
1761function is_pool_removed #pool
1762{
1763	check_pool_status "$1" "remove" "completed on"
1764	return $?
1765}
1766
1767function wait_for_degraded
1768{
1769	typeset pool=$1
1770	typeset timeout=${2:-30}
1771	typeset t0=$SECONDS
1772
1773	while :; do
1774		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1775		log_note "$pool is not yet degraded."
1776		sleep 1
1777		if ((SECONDS - t0 > $timeout)); then
1778			log_note "$pool not degraded after $timeout seconds."
1779			return 1
1780		fi
1781	done
1782
1783	return 0
1784}
1785
1786#
1787# Wait for a pool to be scrubbed
1788#
1789# $1 pool name
1790# $2 number of seconds to wait (optional)
1791#
1792# Returns true when pool has been scrubbed, or false if there's a timeout or if
1793# no scrub was done.
1794#
1795function wait_scrubbed
1796{
1797	typeset pool=${1:-$TESTPOOL}
1798	while true ; do
1799		is_pool_scrubbed $pool && break
1800		log_must sleep 1
1801	done
1802}
1803
1804#
1805# Use create_pool()/destroy_pool() to clean up the infomation in
1806# in the given disk to avoid slice overlapping.
1807#
1808function cleanup_devices #vdevs
1809{
1810	typeset pool="foopool$$"
1811
1812	if poolexists $pool ; then
1813		destroy_pool $pool
1814	fi
1815
1816	create_pool $pool $@
1817	destroy_pool $pool
1818
1819	return 0
1820}
1821
1822#/**
1823# A function to find and locate free disks on a system or from given
1824# disks as the parameter. It works by locating disks that are in use
1825# as swap devices and dump devices, and also disks listed in /etc/vfstab
1826#
1827# $@ given disks to find which are free, default is all disks in
1828# the test system
1829#
1830# @return a string containing the list of available disks
1831#*/
1832function find_disks
1833{
1834	sfi=/tmp/swaplist.$$
1835	dmpi=/tmp/dumpdev.$$
1836	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1837
1838	swap -l > $sfi
1839	dumpadm > $dmpi 2>/dev/null
1840
1841# write an awk script that can process the output of format
1842# to produce a list of disks we know about. Note that we have
1843# to escape "$2" so that the shell doesn't interpret it while
1844# we're creating the awk script.
1845# -------------------
1846	cat > /tmp/find_disks.awk <<EOF
1847#!/bin/nawk -f
1848	BEGIN { FS="."; }
1849
1850	/^Specify disk/{
1851		searchdisks=0;
1852	}
1853
1854	{
1855		if (searchdisks && \$2 !~ "^$"){
1856			split(\$2,arr," ");
1857			print arr[1];
1858		}
1859	}
1860
1861	/^AVAILABLE DISK SELECTIONS:/{
1862		searchdisks=1;
1863	}
1864EOF
1865#---------------------
1866
1867	chmod 755 /tmp/find_disks.awk
1868	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1869	rm /tmp/find_disks.awk
1870
1871	unused=""
1872	for disk in $disks; do
1873	# Check for mounted
1874		grep "${disk}[sp]" /etc/mnttab >/dev/null
1875		(($? == 0)) && continue
1876	# Check for swap
1877		grep "${disk}[sp]" $sfi >/dev/null
1878		(($? == 0)) && continue
1879	# check for dump device
1880		grep "${disk}[sp]" $dmpi >/dev/null
1881		(($? == 0)) && continue
1882	# check to see if this disk hasn't been explicitly excluded
1883	# by a user-set environment variable
1884		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1885		(($? == 0)) && continue
1886		unused_candidates="$unused_candidates $disk"
1887	done
1888	rm $sfi
1889	rm $dmpi
1890
1891# now just check to see if those disks do actually exist
1892# by looking for a device pointing to the first slice in
1893# each case. limit the number to max_finddisksnum
1894	count=0
1895	for disk in $unused_candidates; do
1896		if [ -b /dev/dsk/${disk}s0 ]; then
1897		if [ $count -lt $max_finddisksnum ]; then
1898			unused="$unused $disk"
1899			# do not impose limit if $@ is provided
1900			[[ -z $@ ]] && ((count = count + 1))
1901		fi
1902		fi
1903	done
1904
1905# finally, return our disk list
1906	echo $unused
1907}
1908
1909#
1910# Add specified user to specified group
1911#
1912# $1 group name
1913# $2 user name
1914# $3 base of the homedir (optional)
1915#
1916function add_user #<group_name> <user_name> <basedir>
1917{
1918	typeset gname=$1
1919	typeset uname=$2
1920	typeset basedir=${3:-"/var/tmp"}
1921
1922	if ((${#gname} == 0 || ${#uname} == 0)); then
1923		log_fail "group name or user name are not defined."
1924	fi
1925
1926	log_must useradd -g $gname -d $basedir/$uname -m $uname
1927	log_must passwd -N $uname
1928
1929	return 0
1930}
1931
1932#
1933# Delete the specified user.
1934#
1935# $1 login name
1936# $2 base of the homedir (optional)
1937#
1938function del_user #<logname> <basedir>
1939{
1940	typeset user=$1
1941	typeset basedir=${2:-"/var/tmp"}
1942
1943	if ((${#user} == 0)); then
1944		log_fail "login name is necessary."
1945	fi
1946
1947	if id $user > /dev/null 2>&1; then
1948		log_must userdel $user
1949	fi
1950
1951	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1952
1953	return 0
1954}
1955
1956#
1957# Select valid gid and create specified group.
1958#
1959# $1 group name
1960#
1961function add_group #<group_name>
1962{
1963	typeset group=$1
1964
1965	if ((${#group} == 0)); then
1966		log_fail "group name is necessary."
1967	fi
1968
1969	# Assign 100 as the base gid
1970	typeset -i gid=100
1971	while true; do
1972		groupadd -g $gid $group > /dev/null 2>&1
1973		typeset -i ret=$?
1974		case $ret in
1975			0) return 0 ;;
1976			# The gid is not  unique
1977			4) ((gid += 1)) ;;
1978			*) return 1 ;;
1979		esac
1980	done
1981}
1982
1983#
1984# Delete the specified group.
1985#
1986# $1 group name
1987#
1988function del_group #<group_name>
1989{
1990	typeset grp=$1
1991	if ((${#grp} == 0)); then
1992		log_fail "group name is necessary."
1993	fi
1994
1995	groupmod -n $grp $grp > /dev/null 2>&1
1996	typeset -i ret=$?
1997	case $ret in
1998		# Group does not exist.
1999		6) return 0 ;;
2000		# Name already exists as a group name
2001		9) log_must groupdel $grp ;;
2002		*) return 1 ;;
2003	esac
2004
2005	return 0
2006}
2007
2008#
2009# This function will return true if it's safe to destroy the pool passed
2010# as argument 1. It checks for pools based on zvols and files, and also
2011# files contained in a pool that may have a different mountpoint.
2012#
2013function safe_to_destroy_pool { # $1 the pool name
2014
2015	typeset pool=""
2016	typeset DONT_DESTROY=""
2017
2018	# We check that by deleting the $1 pool, we're not
2019	# going to pull the rug out from other pools. Do this
2020	# by looking at all other pools, ensuring that they
2021	# aren't built from files or zvols contained in this pool.
2022
2023	for pool in $(zpool list -H -o name)
2024	do
2025		ALTMOUNTPOOL=""
2026
2027		# this is a list of the top-level directories in each of the
2028		# files that make up the path to the files the pool is based on
2029		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2030			awk '{print $1}')
2031
2032		# this is a list of the zvols that make up the pool
2033		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
2034		    | awk '{print $1}')
2035
2036		# also want to determine if it's a file-based pool using an
2037		# alternate mountpoint...
2038		POOL_FILE_DIRS=$(zpool status -v $pool | \
2039					grep / | awk '{print $1}' | \
2040					awk -F/ '{print $2}' | grep -v "dev")
2041
2042		for pooldir in $POOL_FILE_DIRS
2043		do
2044			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2045					grep "${pooldir}$" | awk '{print $1}')
2046
2047			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2048		done
2049
2050
2051		if [ ! -z "$ZVOLPOOL" ]
2052		then
2053			DONT_DESTROY="true"
2054			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2055		fi
2056
2057		if [ ! -z "$FILEPOOL" ]
2058		then
2059			DONT_DESTROY="true"
2060			log_note "Pool $pool is built from $FILEPOOL on $1"
2061		fi
2062
2063		if [ ! -z "$ALTMOUNTPOOL" ]
2064		then
2065			DONT_DESTROY="true"
2066			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2067		fi
2068	done
2069
2070	if [ -z "${DONT_DESTROY}" ]
2071	then
2072		return 0
2073	else
2074		log_note "Warning: it is not safe to destroy $1!"
2075		return 1
2076	fi
2077}
2078
2079#
2080# Get the available ZFS compression options
2081# $1 option type zfs_set|zfs_compress
2082#
2083function get_compress_opts
2084{
2085	typeset COMPRESS_OPTS
2086	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2087			gzip-6 gzip-7 gzip-8 gzip-9"
2088
2089	if [[ $1 == "zfs_compress" ]] ; then
2090		COMPRESS_OPTS="on lzjb"
2091	elif [[ $1 == "zfs_set" ]] ; then
2092		COMPRESS_OPTS="on off lzjb"
2093	fi
2094	typeset valid_opts="$COMPRESS_OPTS"
2095	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2096	if [[ $? -eq 0 ]]; then
2097		valid_opts="$valid_opts $GZIP_OPTS"
2098	fi
2099	echo "$valid_opts"
2100}
2101
2102#
2103# Verify zfs operation with -p option work as expected
2104# $1 operation, value could be create, clone or rename
2105# $2 dataset type, value could be fs or vol
2106# $3 dataset name
2107# $4 new dataset name
2108#
2109function verify_opt_p_ops
2110{
2111	typeset ops=$1
2112	typeset datatype=$2
2113	typeset dataset=$3
2114	typeset newdataset=$4
2115
2116	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2117		log_fail "$datatype is not supported."
2118	fi
2119
2120	# check parameters accordingly
2121	case $ops in
2122		create)
2123			newdataset=$dataset
2124			dataset=""
2125			if [[ $datatype == "vol" ]]; then
2126				ops="create -V $VOLSIZE"
2127			fi
2128			;;
2129		clone)
2130			if [[ -z $newdataset ]]; then
2131				log_fail "newdataset should not be empty" \
2132					"when ops is $ops."
2133			fi
2134			log_must datasetexists $dataset
2135			log_must snapexists $dataset
2136			;;
2137		rename)
2138			if [[ -z $newdataset ]]; then
2139				log_fail "newdataset should not be empty" \
2140					"when ops is $ops."
2141			fi
2142			log_must datasetexists $dataset
2143			log_mustnot snapexists $dataset
2144			;;
2145		*)
2146			log_fail "$ops is not supported."
2147			;;
2148	esac
2149
2150	# make sure the upper level filesystem does not exist
2151	if datasetexists ${newdataset%/*} ; then
2152		log_must zfs destroy -rRf ${newdataset%/*}
2153	fi
2154
2155	# without -p option, operation will fail
2156	log_mustnot zfs $ops $dataset $newdataset
2157	log_mustnot datasetexists $newdataset ${newdataset%/*}
2158
2159	# with -p option, operation should succeed
2160	log_must zfs $ops -p $dataset $newdataset
2161	if ! datasetexists $newdataset ; then
2162		log_fail "-p option does not work for $ops"
2163	fi
2164
2165	# when $ops is create or clone, redo the operation still return zero
2166	if [[ $ops != "rename" ]]; then
2167		log_must zfs $ops -p $dataset $newdataset
2168	fi
2169
2170	return 0
2171}
2172
2173#
2174# Get configuration of pool
2175# $1 pool name
2176# $2 config name
2177#
2178function get_config
2179{
2180	typeset pool=$1
2181	typeset config=$2
2182	typeset alt_root
2183
2184	if ! poolexists "$pool" ; then
2185		return 1
2186	fi
2187	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2188	if [[ $alt_root == "-" ]]; then
2189		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2190		    '{print $2}')
2191	else
2192		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2193		    '{print $2}')
2194	fi
2195	if [[ -n $value ]] ; then
2196		value=${value#'}
2197		value=${value%'}
2198	fi
2199	echo $value
2200
2201	return 0
2202}
2203
2204#
2205# Privated function. Random select one of items from arguments.
2206#
2207# $1 count
2208# $2-n string
2209#
2210function _random_get
2211{
2212	typeset cnt=$1
2213	shift
2214
2215	typeset str="$@"
2216	typeset -i ind
2217	((ind = RANDOM % cnt + 1))
2218
2219	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2220	echo $ret
2221}
2222
2223#
2224# Random select one of item from arguments which include NONE string
2225#
2226function random_get_with_non
2227{
2228	typeset -i cnt=$#
2229	((cnt =+ 1))
2230
2231	_random_get "$cnt" "$@"
2232}
2233
2234#
2235# Random select one of item from arguments which doesn't include NONE string
2236#
2237function random_get
2238{
2239	_random_get "$#" "$@"
2240}
2241
2242#
2243# Detect if the current system support slog
2244#
2245function verify_slog_support
2246{
2247	typeset dir=/tmp/disk.$$
2248	typeset pool=foo.$$
2249	typeset vdev=$dir/a
2250	typeset sdev=$dir/b
2251
2252	mkdir -p $dir
2253	mkfile $MINVDEVSIZE $vdev $sdev
2254
2255	typeset -i ret=0
2256	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2257		ret=1
2258	fi
2259	rm -r $dir
2260
2261	return $ret
2262}
2263
2264#
2265# The function will generate a dataset name with specific length
2266# $1, the length of the name
2267# $2, the base string to construct the name
2268#
2269function gen_dataset_name
2270{
2271	typeset -i len=$1
2272	typeset basestr="$2"
2273	typeset -i baselen=${#basestr}
2274	typeset -i iter=0
2275	typeset l_name=""
2276
2277	if ((len % baselen == 0)); then
2278		((iter = len / baselen))
2279	else
2280		((iter = len / baselen + 1))
2281	fi
2282	while ((iter > 0)); do
2283		l_name="${l_name}$basestr"
2284
2285		((iter -= 1))
2286	done
2287
2288	echo $l_name
2289}
2290
2291#
2292# Get cksum tuple of dataset
2293# $1 dataset name
2294#
2295# sample zdb output:
2296# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2297# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2298# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2299# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2300function datasetcksum
2301{
2302	typeset cksum
2303	sync
2304	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2305		| awk -F= '{print $7}')
2306	echo $cksum
2307}
2308
2309#
2310# Get cksum of file
2311# #1 file path
2312#
2313function checksum
2314{
2315	typeset cksum
2316	cksum=$(cksum $1 | awk '{print $1}')
2317	echo $cksum
2318}
2319
2320#
2321# Get the given disk/slice state from the specific field of the pool
2322#
2323function get_device_state #pool disk field("", "spares","logs")
2324{
2325	typeset pool=$1
2326	typeset disk=${2#/dev/dsk/}
2327	typeset field=${3:-$pool}
2328
2329	state=$(zpool status -v "$pool" 2>/dev/null | \
2330		nawk -v device=$disk -v pool=$pool -v field=$field \
2331		'BEGIN {startconfig=0; startfield=0; }
2332		/config:/ {startconfig=1}
2333		(startconfig==1) && ($1==field) {startfield=1; next;}
2334		(startfield==1) && ($1==device) {print $2; exit;}
2335		(startfield==1) &&
2336		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2337	echo $state
2338}
2339
2340
2341#
2342# print the given directory filesystem type
2343#
2344# $1 directory name
2345#
2346function get_fstype
2347{
2348	typeset dir=$1
2349
2350	if [[ -z $dir ]]; then
2351		log_fail "Usage: get_fstype <directory>"
2352	fi
2353
2354	#
2355	#  $ df -n /
2356	#  /		  : ufs
2357	#
2358	df -n $dir | awk '{print $3}'
2359}
2360
2361#
2362# Given a disk, label it to VTOC regardless what label was on the disk
2363# $1 disk
2364#
2365function labelvtoc
2366{
2367	typeset disk=$1
2368	if [[ -z $disk ]]; then
2369		log_fail "The disk name is unspecified."
2370	fi
2371	typeset label_file=/var/tmp/labelvtoc.$$
2372	typeset arch=$(uname -p)
2373
2374	if [[ $arch == "i386" ]]; then
2375		echo "label" > $label_file
2376		echo "0" >> $label_file
2377		echo "" >> $label_file
2378		echo "q" >> $label_file
2379		echo "q" >> $label_file
2380
2381		fdisk -B $disk >/dev/null 2>&1
2382		# wait a while for fdisk finishes
2383		sleep 60
2384	elif [[ $arch == "sparc" ]]; then
2385		echo "label" > $label_file
2386		echo "0" >> $label_file
2387		echo "" >> $label_file
2388		echo "" >> $label_file
2389		echo "" >> $label_file
2390		echo "q" >> $label_file
2391	else
2392		log_fail "unknown arch type"
2393	fi
2394
2395	format -e -s -d $disk -f $label_file
2396	typeset -i ret_val=$?
2397	rm -f $label_file
2398	#
2399	# wait the format to finish
2400	#
2401	sleep 60
2402	if ((ret_val != 0)); then
2403		log_fail "unable to label $disk as VTOC."
2404	fi
2405
2406	return 0
2407}
2408
2409#
2410# check if the system was installed as zfsroot or not
2411# return: 0 ture, otherwise false
2412#
2413function is_zfsroot
2414{
2415	df -n / | grep zfs > /dev/null 2>&1
2416	return $?
2417}
2418
2419#
2420# get the root filesystem name if it's zfsroot system.
2421#
2422# return: root filesystem name
2423function get_rootfs
2424{
2425	typeset rootfs=""
2426	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2427		/etc/mnttab)
2428	if [[ -z "$rootfs" ]]; then
2429		log_fail "Can not get rootfs"
2430	fi
2431	zfs list $rootfs > /dev/null 2>&1
2432	if (($? == 0)); then
2433		echo $rootfs
2434	else
2435		log_fail "This is not a zfsroot system."
2436	fi
2437}
2438
2439#
2440# get the rootfs's pool name
2441# return:
2442#       rootpool name
2443#
2444function get_rootpool
2445{
2446	typeset rootfs=""
2447	typeset rootpool=""
2448	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2449		 /etc/mnttab)
2450	if [[ -z "$rootfs" ]]; then
2451		log_fail "Can not get rootpool"
2452	fi
2453	zfs list $rootfs > /dev/null 2>&1
2454	if (($? == 0)); then
2455		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2456		echo $rootpool
2457	else
2458		log_fail "This is not a zfsroot system."
2459	fi
2460}
2461
2462#
2463# Check if the given device is physical device
2464#
2465function is_physical_device #device
2466{
2467	typeset device=${1#/dev/dsk/}
2468	device=${device#/dev/rdsk/}
2469
2470	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2471	return $?
2472}
2473
2474#
2475# Get the directory path of given device
2476#
2477function get_device_dir #device
2478{
2479	typeset device=$1
2480
2481	if ! $(is_physical_device $device) ; then
2482		if [[ $device != "/" ]]; then
2483			device=${device%/*}
2484		fi
2485		echo $device
2486	else
2487		echo "/dev/dsk"
2488	fi
2489}
2490
2491#
2492# Get the package name
2493#
2494function get_package_name
2495{
2496	typeset dirpath=${1:-$STC_NAME}
2497
2498	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2499}
2500
2501#
2502# Get the word numbers from a string separated by white space
2503#
2504function get_word_count
2505{
2506	echo $1 | wc -w
2507}
2508
2509#
2510# To verify if the require numbers of disks is given
2511#
2512function verify_disk_count
2513{
2514	typeset -i min=${2:-1}
2515
2516	typeset -i count=$(get_word_count "$1")
2517
2518	if ((count < min)); then
2519		log_untested "A minimum of $min disks is required to run." \
2520			" You specified $count disk(s)"
2521	fi
2522}
2523
2524function ds_is_volume
2525{
2526	typeset type=$(get_prop type $1)
2527	[[ $type = "volume" ]] && return 0
2528	return 1
2529}
2530
2531function ds_is_filesystem
2532{
2533	typeset type=$(get_prop type $1)
2534	[[ $type = "filesystem" ]] && return 0
2535	return 1
2536}
2537
2538function ds_is_snapshot
2539{
2540	typeset type=$(get_prop type $1)
2541	[[ $type = "snapshot" ]] && return 0
2542	return 1
2543}
2544
2545#
2546# Check if Trusted Extensions are installed and enabled
2547#
2548function is_te_enabled
2549{
2550	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2551	if (($? != 0)); then
2552		return 1
2553	else
2554		return 0
2555	fi
2556}
2557
2558# Utility function to determine if a system has multiple cpus.
2559function is_mp
2560{
2561	(($(psrinfo | wc -l) > 1))
2562}
2563
2564function get_cpu_freq
2565{
2566	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2567}
2568
2569# Run the given command as the user provided.
2570function user_run
2571{
2572	typeset user=$1
2573	shift
2574
2575	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2576	return $?
2577}
2578
2579#
2580# Check if the pool contains the specified vdevs
2581#
2582# $1 pool
2583# $2..n <vdev> ...
2584#
2585# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2586# vdevs is not in the pool, and 2 if pool name is missing.
2587#
2588function vdevs_in_pool
2589{
2590	typeset pool=$1
2591	typeset vdev
2592
2593        if [[ -z $pool ]]; then
2594                log_note "Missing pool name."
2595                return 2
2596        fi
2597
2598	shift
2599
2600	typeset tmpfile=$(mktemp)
2601	zpool list -Hv "$pool" >$tmpfile
2602	for vdev in $@; do
2603		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2604		[[ $? -ne 0 ]] && return 1
2605	done
2606
2607	rm -f $tmpfile
2608
2609	return 0;
2610}
2611
2612function get_max
2613{
2614	typeset -l i max=$1
2615	shift
2616
2617	for i in "$@"; do
2618		max=$(echo $((max > i ? max : i)))
2619	done
2620
2621	echo $max
2622}
2623
2624function get_min
2625{
2626	typeset -l i min=$1
2627	shift
2628
2629	for i in "$@"; do
2630		min=$(echo $((min < i ? min : i)))
2631	done
2632
2633	echo $min
2634}
2635
2636#
2637# Generate a random number between 1 and the argument.
2638#
2639function random
2640{
2641        typeset max=$1
2642        echo $(( ($RANDOM % $max) + 1 ))
2643}
2644
2645# Write data that can be compressed into a directory
2646function write_compressible
2647{
2648	typeset dir=$1
2649	typeset megs=$2
2650	typeset nfiles=${3:-1}
2651	typeset bs=${4:-1024k}
2652	typeset fname=${5:-file}
2653
2654	[[ -d $dir ]] || log_fail "No directory: $dir"
2655
2656	log_must eval "fio \
2657	    --name=job \
2658	    --fallocate=0 \
2659	    --minimal \
2660	    --randrepeat=0 \
2661	    --buffer_compress_percentage=66 \
2662	    --buffer_compress_chunk=4096 \
2663	    --directory=$dir \
2664	    --numjobs=$nfiles \
2665	    --rw=write \
2666	    --bs=$bs \
2667	    --filesize=$megs \
2668	    --filename_format='$fname.\$jobnum' >/dev/null"
2669}
2670
2671function get_objnum
2672{
2673	typeset pathname=$1
2674	typeset objnum
2675
2676	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2677	objnum=$(stat -c %i $pathname)
2678	echo $objnum
2679}
2680
2681#
2682# Sync data to the pool
2683#
2684# $1 pool name
2685# $2 boolean to force uberblock (and config including zpool cache file) update
2686#
2687function sync_pool #pool <force>
2688{
2689	typeset pool=${1:-$TESTPOOL}
2690	typeset force=${2:-false}
2691
2692	if [[ $force == true ]]; then
2693		log_must zpool sync -f $pool
2694	else
2695		log_must zpool sync $pool
2696	fi
2697
2698	return 0
2699}
2700
2701#
2702# Prints the current time in seconds since UNIX Epoch.
2703#
2704function current_epoch
2705{
2706	printf '%(%s)T'
2707}
2708
2709#
2710# Get decimal value of global uint32_t variable using mdb.
2711#
2712function mdb_get_uint32
2713{
2714	typeset variable=$1
2715	typeset value
2716
2717	value=$(mdb -k -e "$variable/X | ::eval .=U")
2718	if [[ $? -ne 0 ]]; then
2719		log_fail "Failed to get value of '$variable' from mdb."
2720		return 1
2721	fi
2722
2723	echo $value
2724	return 0
2725}
2726
2727#
2728# Wait for every device replace operation to complete
2729#
2730# $1 pool name
2731#
2732function wait_replacing #pool
2733{
2734	typeset pool=${1:-$TESTPOOL}
2735	while true; do
2736		[[ "" == "$(zpool status $pool |
2737		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
2738		log_must sleep 1
2739	done
2740}
2741
2742#
2743# Set global uint32_t variable to a decimal value using mdb.
2744#
2745function mdb_set_uint32
2746{
2747	typeset variable=$1
2748	typeset value=$2
2749
2750	mdb -kw -e "$variable/W 0t$value" > /dev/null
2751	if [[ $? -ne 0 ]]; then
2752		echo "Failed to set '$variable' to '$value' in mdb."
2753		return 1
2754	fi
2755
2756	return 0
2757}
2758
2759#
2760# Set global scalar integer variable to a hex value using mdb.
2761# Note: Target should have CTF data loaded.
2762#
2763function mdb_ctf_set_int
2764{
2765	typeset variable=$1
2766	typeset value=$2
2767
2768	mdb -kw -e "$variable/z $value" > /dev/null
2769	if [[ $? -ne 0 ]]; then
2770		echo "Failed to set '$variable' to '$value' in mdb."
2771		return 1
2772	fi
2773
2774	return 0
2775}
2776
2777#
2778# Set a global system tunable (64-bit value)
2779#
2780# $1 tunable name
2781# $2 tunable values
2782#
2783function set_tunable64
2784{
2785	set_tunable_impl "$1" "$2" Z
2786}
2787
2788#
2789# Set a global system tunable (32-bit value)
2790#
2791# $1 tunable name
2792# $2 tunable values
2793#
2794function set_tunable32
2795{
2796	set_tunable_impl "$1" "$2" W
2797}
2798
2799function set_tunable_impl
2800{
2801	typeset tunable="$1"
2802	typeset value="$2"
2803	typeset mdb_cmd="$3"
2804	typeset module="${4:-zfs}"
2805
2806	[[ -z "$tunable" ]] && return 1
2807	[[ -z "$value" ]] && return 1
2808	[[ -z "$mdb_cmd" ]] && return 1
2809
2810	case "$(uname)" in
2811	Linux)
2812		typeset zfs_tunables="/sys/module/$module/parameters"
2813		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2814		echo -n "$value" > "$zfs_tunables/$tunable"
2815		return "$?"
2816		;;
2817	SunOS)
2818		[[ "$module" -eq "zfs" ]] || return 1
2819		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2820		return "$?"
2821		;;
2822	esac
2823}
2824
2825#
2826# Get a global system tunable
2827#
2828# $1 tunable name
2829#
2830function get_tunable
2831{
2832	get_tunable_impl "$1"
2833}
2834
2835function get_tunable_impl
2836{
2837	typeset tunable="$1"
2838	typeset module="${2:-zfs}"
2839
2840	[[ -z "$tunable" ]] && return 1
2841
2842	case "$(uname)" in
2843	Linux)
2844		typeset zfs_tunables="/sys/module/$module/parameters"
2845		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2846		cat $zfs_tunables/$tunable
2847		return "$?"
2848		;;
2849	SunOS)
2850		typeset value=$(mdb -k -e "$tunable/X | ::eval .=U")
2851		if [[ $? -ne 0 ]]; then
2852			log_fail "Failed to get value of '$tunable' from mdb."
2853			return 1
2854		fi
2855		echo $value
2856		return 0
2857		;;
2858	esac
2859
2860	return 1
2861}
2862