xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision 45a4b79d042e642c2ed7090ec290469ccf8fc563)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27# Copyright (c) 2017 Datto Inc.
28#
29
30. ${STF_TOOLS}/contrib/include/logapi.shlib
31. ${STF_SUITE}/include/blkdev.shlib
32
33# Determine if this is a Linux test system
34#
35# Return 0 if platform Linux, 1 if otherwise
36
37function is_linux
38{
39	if [[ $(uname -o) == "GNU/Linux" ]]; then
40		return 0
41	else
42		return 1
43	fi
44}
45
46# Determine whether a dataset is mounted
47#
48# $1 dataset name
49# $2 filesystem type; optional - defaulted to zfs
50#
51# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
52
53function ismounted
54{
55	typeset fstype=$2
56	[[ -z $fstype ]] && fstype=zfs
57	typeset out dir name ret
58
59	case $fstype in
60		zfs)
61			if [[ "$1" == "/"* ]] ; then
62				for out in $(zfs mount | awk '{print $2}'); do
63					[[ $1 == $out ]] && return 0
64				done
65			else
66				for out in $(zfs mount | awk '{print $1}'); do
67					[[ $1 == $out ]] && return 0
68				done
69			fi
70		;;
71		ufs|nfs)
72			out=$(df -F $fstype $1 2>/dev/null)
73			ret=$?
74			(($ret != 0)) && return $ret
75
76			dir=${out%%\(*}
77			dir=${dir%% *}
78			name=${out##*\(}
79			name=${name%%\)*}
80			name=${name%% *}
81
82			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
83		;;
84	esac
85
86	return 1
87}
88
89# Return 0 if a dataset is mounted; 1 otherwise
90#
91# $1 dataset name
92# $2 filesystem type; optional - defaulted to zfs
93
94function mounted
95{
96	ismounted $1 $2
97	(($? == 0)) && return 0
98	return 1
99}
100
101# Return 0 if a dataset is unmounted; 1 otherwise
102#
103# $1 dataset name
104# $2 filesystem type; optional - defaulted to zfs
105
106function unmounted
107{
108	ismounted $1 $2
109	(($? == 1)) && return 0
110	return 1
111}
112
113# split line on ","
114#
115# $1 - line to split
116
117function splitline
118{
119	echo $1 | sed "s/,/ /g"
120}
121
122function default_setup
123{
124	default_setup_noexit "$@"
125
126	log_pass
127}
128
129#
130# Given a list of disks, setup storage pools and datasets.
131#
132function default_setup_noexit
133{
134	typeset disklist=$1
135	typeset container=$2
136	typeset volume=$3
137
138	if is_global_zone; then
139		if poolexists $TESTPOOL ; then
140			destroy_pool $TESTPOOL
141		fi
142		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
143		log_must zpool create -f $TESTPOOL $disklist
144	else
145		reexport_pool
146	fi
147
148	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
149	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
150
151	log_must zfs create $TESTPOOL/$TESTFS
152	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
153
154	if [[ -n $container ]]; then
155		rm -rf $TESTDIR1  || \
156			log_unresolved Could not remove $TESTDIR1
157		mkdir -p $TESTDIR1 || \
158			log_unresolved Could not create $TESTDIR1
159
160		log_must zfs create $TESTPOOL/$TESTCTR
161		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
162		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
163		log_must zfs set mountpoint=$TESTDIR1 \
164		    $TESTPOOL/$TESTCTR/$TESTFS1
165	fi
166
167	if [[ -n $volume ]]; then
168		if is_global_zone ; then
169			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
170		else
171			log_must zfs create $TESTPOOL/$TESTVOL
172		fi
173	fi
174}
175
176#
177# Given a list of disks, setup a storage pool, file system and
178# a container.
179#
180function default_container_setup
181{
182	typeset disklist=$1
183
184	default_setup "$disklist" "true"
185}
186
187#
188# Given a list of disks, setup a storage pool,file system
189# and a volume.
190#
191function default_volume_setup
192{
193	typeset disklist=$1
194
195	default_setup "$disklist" "" "true"
196}
197
198#
199# Given a list of disks, setup a storage pool,file system,
200# a container and a volume.
201#
202function default_container_volume_setup
203{
204	typeset disklist=$1
205
206	default_setup "$disklist" "true" "true"
207}
208
209#
210# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
211# filesystem
212#
213# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
214# $2 snapshot name. Default, $TESTSNAP
215#
216function create_snapshot
217{
218	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
219	typeset snap=${2:-$TESTSNAP}
220
221	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
222	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
223
224	if snapexists $fs_vol@$snap; then
225		log_fail "$fs_vol@$snap already exists."
226	fi
227	datasetexists $fs_vol || \
228		log_fail "$fs_vol must exist."
229
230	log_must zfs snapshot $fs_vol@$snap
231}
232
233#
234# Create a clone from a snapshot, default clone name is $TESTCLONE.
235#
236# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
237# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
238#
239function create_clone   # snapshot clone
240{
241	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
242	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
243
244	[[ -z $snap ]] && \
245		log_fail "Snapshot name is undefined."
246	[[ -z $clone ]] && \
247		log_fail "Clone name is undefined."
248
249	log_must zfs clone $snap $clone
250}
251
252#
253# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
254# filesystem.
255#
256# $1 Existing filesystem or volume name. Default, $TESTFS
257# $2 Existing snapshot name. Default, $TESTSNAP
258# $3 bookmark name. Default, $TESTBKMARK
259#
260function create_bookmark
261{
262	typeset fs_vol=${1:-$TESTFS}
263	typeset snap=${2:-$TESTSNAP}
264	typeset bkmark=${3:-$TESTBKMARK}
265
266	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
267	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
268	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
269
270	if bkmarkexists $fs_vol#$bkmark; then
271		log_fail "$fs_vol#$bkmark already exists."
272	fi
273	datasetexists $fs_vol || \
274		log_fail "$fs_vol must exist."
275	snapexists $fs_vol@$snap || \
276		log_fail "$fs_vol@$snap must exist."
277
278	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
279}
280
281#
282# Create a temporary clone result of an interrupted resumable 'zfs receive'
283# $1 Destination filesystem name. Must not exist, will be created as the result
284#    of this function along with its %recv temporary clone
285# $2 Source filesystem name. Must not exist, will be created and destroyed
286#
287function create_recv_clone
288{
289	typeset recvfs="$1"
290	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
291	typeset snap="$sendfs@snap1"
292	typeset incr="$sendfs@snap2"
293	typeset mountpoint="$TESTDIR/create_recv_clone"
294	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
295
296	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
297
298	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
299	datasetexists $sendfs && log_fail "Send filesystem must not exist."
300
301	log_must zfs create -o mountpoint="$mountpoint" $sendfs
302	log_must zfs snapshot $snap
303	log_must eval "zfs send $snap | zfs recv -u $recvfs"
304	log_must mkfile 1m "$mountpoint/data"
305	log_must zfs snapshot $incr
306	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
307	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
308	log_must zfs destroy -r $sendfs
309	log_must rm -f "$sendfile"
310
311	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
312		log_fail "Error creating temporary $recvfs/%recv clone"
313	fi
314}
315
316function default_mirror_setup
317{
318	default_mirror_setup_noexit $1 $2 $3
319
320	log_pass
321}
322
323#
324# Given a pair of disks, set up a storage pool and dataset for the mirror
325# @parameters: $1 the primary side of the mirror
326#   $2 the secondary side of the mirror
327# @uses: ZPOOL ZFS TESTPOOL TESTFS
328function default_mirror_setup_noexit
329{
330	readonly func="default_mirror_setup_noexit"
331	typeset primary=$1
332	typeset secondary=$2
333
334	[[ -z $primary ]] && \
335		log_fail "$func: No parameters passed"
336	[[ -z $secondary ]] && \
337		log_fail "$func: No secondary partition passed"
338	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
339	log_must zpool create -f $TESTPOOL mirror $@
340	log_must zfs create $TESTPOOL/$TESTFS
341	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
342}
343
344#
345# create a number of mirrors.
346# We create a number($1) of 2 way mirrors using the pairs of disks named
347# on the command line. These mirrors are *not* mounted
348# @parameters: $1 the number of mirrors to create
349#  $... the devices to use to create the mirrors on
350# @uses: ZPOOL ZFS TESTPOOL
351function setup_mirrors
352{
353	typeset -i nmirrors=$1
354
355	shift
356	while ((nmirrors > 0)); do
357		log_must test -n "$1" -a -n "$2"
358		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
359		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
360		shift 2
361		((nmirrors = nmirrors - 1))
362	done
363}
364
365#
366# create a number of raidz pools.
367# We create a number($1) of 2 raidz pools  using the pairs of disks named
368# on the command line. These pools are *not* mounted
369# @parameters: $1 the number of pools to create
370#  $... the devices to use to create the pools on
371# @uses: ZPOOL ZFS TESTPOOL
372function setup_raidzs
373{
374	typeset -i nraidzs=$1
375
376	shift
377	while ((nraidzs > 0)); do
378		log_must test -n "$1" -a -n "$2"
379		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
380		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
381		shift 2
382		((nraidzs = nraidzs - 1))
383	done
384}
385
386#
387# Destroy the configured testpool mirrors.
388# the mirrors are of the form ${TESTPOOL}{number}
389# @uses: ZPOOL ZFS TESTPOOL
390function destroy_mirrors
391{
392	default_cleanup_noexit
393
394	log_pass
395}
396
397#
398# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
399# $1 the list of disks
400#
401function default_raidz_setup
402{
403	typeset disklist="$*"
404	disks=(${disklist[*]})
405
406	if [[ ${#disks[*]} -lt 2 ]]; then
407		log_fail "A raid-z requires a minimum of two disks."
408	fi
409
410	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
411	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
412	log_must zfs create $TESTPOOL/$TESTFS
413	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
414
415	log_pass
416}
417
418#
419# Common function used to cleanup storage pools and datasets.
420#
421# Invoked at the start of the test suite to ensure the system
422# is in a known state, and also at the end of each set of
423# sub-tests to ensure errors from one set of tests doesn't
424# impact the execution of the next set.
425
426function default_cleanup
427{
428	default_cleanup_noexit
429
430	log_pass
431}
432
433function default_cleanup_noexit
434{
435	typeset exclude=""
436	typeset pool=""
437	#
438	# Destroying the pool will also destroy any
439	# filesystems it contains.
440	#
441	if is_global_zone; then
442		zfs unmount -a > /dev/null 2>&1
443		exclude=`eval echo \"'(${KEEP})'\"`
444		ALL_POOLS=$(zpool list -H -o name \
445		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
446		# Here, we loop through the pools we're allowed to
447		# destroy, only destroying them if it's safe to do
448		# so.
449		while [ ! -z ${ALL_POOLS} ]
450		do
451			for pool in ${ALL_POOLS}
452			do
453				if safe_to_destroy_pool $pool ;
454				then
455					destroy_pool $pool
456				fi
457				ALL_POOLS=$(zpool list -H -o name \
458				    | grep -v "$NO_POOLS" \
459				    | egrep -v "$exclude")
460			done
461		done
462
463		zfs mount -a
464	else
465		typeset fs=""
466		for fs in $(zfs list -H -o name \
467		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
468			datasetexists $fs && \
469				log_must zfs destroy -Rf $fs
470		done
471
472		# Need cleanup here to avoid garbage dir left.
473		for fs in $(zfs list -H -o name); do
474			[[ $fs == /$ZONE_POOL ]] && continue
475			[[ -d $fs ]] && log_must rm -rf $fs/*
476		done
477
478		#
479		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
480		# the default value
481		#
482		for fs in $(zfs list -H -o name); do
483			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
484				log_must zfs set reservation=none $fs
485				log_must zfs set recordsize=128K $fs
486				log_must zfs set mountpoint=/$fs $fs
487				typeset enc=""
488				enc=$(get_prop encryption $fs)
489				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
490					[[ "$enc" == "off" ]]; then
491					log_must zfs set checksum=on $fs
492				fi
493				log_must zfs set compression=off $fs
494				log_must zfs set atime=on $fs
495				log_must zfs set devices=off $fs
496				log_must zfs set exec=on $fs
497				log_must zfs set setuid=on $fs
498				log_must zfs set readonly=off $fs
499				log_must zfs set snapdir=hidden $fs
500				log_must zfs set aclmode=groupmask $fs
501				log_must zfs set aclinherit=secure $fs
502			fi
503		done
504	fi
505
506	[[ -d $TESTDIR ]] && \
507		log_must rm -rf $TESTDIR
508}
509
510
511#
512# Common function used to cleanup storage pools, file systems
513# and containers.
514#
515function default_container_cleanup
516{
517	if ! is_global_zone; then
518		reexport_pool
519	fi
520
521	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
522	[[ $? -eq 0 ]] && \
523	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
524
525	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
526	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
527
528	datasetexists $TESTPOOL/$TESTCTR && \
529	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
530
531	[[ -e $TESTDIR1 ]] && \
532	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
533
534	default_cleanup
535}
536
537#
538# Common function used to cleanup snapshot of file system or volume. Default to
539# delete the file system's snapshot
540#
541# $1 snapshot name
542#
543function destroy_snapshot
544{
545	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
546
547	if ! snapexists $snap; then
548		log_fail "'$snap' does not existed."
549	fi
550
551	#
552	# For the sake of the value which come from 'get_prop' is not equal
553	# to the really mountpoint when the snapshot is unmounted. So, firstly
554	# check and make sure this snapshot's been mounted in current system.
555	#
556	typeset mtpt=""
557	if ismounted $snap; then
558		mtpt=$(get_prop mountpoint $snap)
559		(($? != 0)) && \
560			log_fail "get_prop mountpoint $snap failed."
561	fi
562
563	log_must zfs destroy $snap
564	[[ $mtpt != "" && -d $mtpt ]] && \
565		log_must rm -rf $mtpt
566}
567
568#
569# Common function used to cleanup clone.
570#
571# $1 clone name
572#
573function destroy_clone
574{
575	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
576
577	if ! datasetexists $clone; then
578		log_fail "'$clone' does not existed."
579	fi
580
581	# With the same reason in destroy_snapshot
582	typeset mtpt=""
583	if ismounted $clone; then
584		mtpt=$(get_prop mountpoint $clone)
585		(($? != 0)) && \
586			log_fail "get_prop mountpoint $clone failed."
587	fi
588
589	log_must zfs destroy $clone
590	[[ $mtpt != "" && -d $mtpt ]] && \
591		log_must rm -rf $mtpt
592}
593
594#
595# Common function used to cleanup bookmark of file system or volume.  Default
596# to delete the file system's bookmark.
597#
598# $1 bookmark name
599#
600function destroy_bookmark
601{
602	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
603
604	if ! bkmarkexists $bkmark; then
605		log_fail "'$bkmarkp' does not existed."
606	fi
607
608	log_must zfs destroy $bkmark
609}
610
611# Return 0 if a snapshot exists; $? otherwise
612#
613# $1 - snapshot name
614
615function snapexists
616{
617	zfs list -H -t snapshot "$1" > /dev/null 2>&1
618	return $?
619}
620
621#
622# Return 0 if a bookmark exists; $? otherwise
623#
624# $1 - bookmark name
625#
626function bkmarkexists
627{
628	zfs list -H -t bookmark "$1" > /dev/null 2>&1
629	return $?
630}
631
632#
633# Set a property to a certain value on a dataset.
634# Sets a property of the dataset to the value as passed in.
635# @param:
636#	$1 dataset who's property is being set
637#	$2 property to set
638#	$3 value to set property to
639# @return:
640#	0 if the property could be set.
641#	non-zero otherwise.
642# @use: ZFS
643#
644function dataset_setprop
645{
646	typeset fn=dataset_setprop
647
648	if (($# < 3)); then
649		log_note "$fn: Insufficient parameters (need 3, had $#)"
650		return 1
651	fi
652	typeset output=
653	output=$(zfs set $2=$3 $1 2>&1)
654	typeset rv=$?
655	if ((rv != 0)); then
656		log_note "Setting property on $1 failed."
657		log_note "property $2=$3"
658		log_note "Return Code: $rv"
659		log_note "Output: $output"
660		return $rv
661	fi
662	return 0
663}
664
665#
666# Assign suite defined dataset properties.
667# This function is used to apply the suite's defined default set of
668# properties to a dataset.
669# @parameters: $1 dataset to use
670# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
671# @returns:
672#   0 if the dataset has been altered.
673#   1 if no pool name was passed in.
674#   2 if the dataset could not be found.
675#   3 if the dataset could not have it's properties set.
676#
677function dataset_set_defaultproperties
678{
679	typeset dataset="$1"
680
681	[[ -z $dataset ]] && return 1
682
683	typeset confset=
684	typeset -i found=0
685	for confset in $(zfs list); do
686		if [[ $dataset = $confset ]]; then
687			found=1
688			break
689		fi
690	done
691	[[ $found -eq 0 ]] && return 2
692	if [[ -n $COMPRESSION_PROP ]]; then
693		dataset_setprop $dataset compression $COMPRESSION_PROP || \
694			return 3
695		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
696	fi
697	if [[ -n $CHECKSUM_PROP ]]; then
698		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
699			return 3
700		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
701	fi
702	return 0
703}
704
705#
706# Check a numeric assertion
707# @parameter: $@ the assertion to check
708# @output: big loud notice if assertion failed
709# @use: log_fail
710#
711function assert
712{
713	(($@)) || log_fail "$@"
714}
715
716#
717# Function to format partition size of a disk
718# Given a disk cxtxdx reduces all partitions
719# to 0 size
720#
721function zero_partitions #<whole_disk_name>
722{
723	typeset diskname=$1
724	typeset i
725
726	for i in 0 1 3 4 5 6 7
727	do
728		set_partition $i "" 0mb $diskname
729	done
730}
731
732#
733# Given a slice, size and disk, this function
734# formats the slice to the specified size.
735# Size should be specified with units as per
736# the `format` command requirements eg. 100mb 3gb
737#
738function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
739{
740	typeset -i slicenum=$1
741	typeset start=$2
742	typeset size=$3
743	typeset disk=$4
744	[[ -z $slicenum || -z $size || -z $disk ]] && \
745	    log_fail "The slice, size or disk name is unspecified."
746	typeset format_file=/var/tmp/format_in.$$
747
748	echo "partition" >$format_file
749	echo "$slicenum" >> $format_file
750	echo "" >> $format_file
751	echo "" >> $format_file
752	echo "$start" >> $format_file
753	echo "$size" >> $format_file
754	echo "label" >> $format_file
755	echo "" >> $format_file
756	echo "q" >> $format_file
757	echo "q" >> $format_file
758
759	format -e -s -d $disk -f $format_file
760	typeset ret_val=$?
761	rm -f $format_file
762	[[ $ret_val -ne 0 ]] && \
763	    log_fail "Unable to format $disk slice $slicenum to $size"
764	return 0
765}
766
767#
768# Get the end cyl of the given slice
769#
770function get_endslice #<disk> <slice>
771{
772	typeset disk=$1
773	typeset slice=$2
774	if [[ -z $disk || -z $slice ]] ; then
775		log_fail "The disk name or slice number is unspecified."
776	fi
777
778	disk=${disk#/dev/dsk/}
779	disk=${disk#/dev/rdsk/}
780	disk=${disk%s*}
781
782	typeset -i ratio=0
783	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
784		grep "sectors\/cylinder" | \
785		awk '{print $2}')
786
787	if ((ratio == 0)); then
788		return
789	fi
790
791	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
792		nawk -v token="$slice" '{if ($1==token) print $6}')
793
794	((endcyl = (endcyl + 1) / ratio))
795	echo $endcyl
796}
797
798
799#
800# Given a size,disk and total slice number,  this function formats the
801# disk slices from 0 to the total slice number with the same specified
802# size.
803#
804function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
805{
806	typeset -i i=0
807	typeset slice_size=$1
808	typeset disk_name=$2
809	typeset total_slices=$3
810	typeset cyl
811
812	zero_partitions $disk_name
813	while ((i < $total_slices)); do
814		if ((i == 2)); then
815			((i = i + 1))
816			continue
817		fi
818		set_partition $i "$cyl" $slice_size $disk_name
819		cyl=$(get_endslice $disk_name $i)
820		((i = i+1))
821	done
822}
823
824#
825# This function continues to write to a filenum number of files into dirnum
826# number of directories until either file_write returns an error or the
827# maximum number of files per directory have been written.
828#
829# Usage:
830# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
831#
832# Return value: 0 on success
833#		non 0 on error
834#
835# Where :
836#	destdir:    is the directory where everything is to be created under
837#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
838#	filenum:    the maximum number of files per subdirectory
839#	bytes:	    number of bytes to write
840#	num_writes: numer of types to write out bytes
841#	data:	    the data that will be writen
842#
843#	E.g.
844#	file_fs /testdir 20 25 1024 256 0
845#
846# Note: bytes * num_writes equals the size of the testfile
847#
848function fill_fs # destdir dirnum filenum bytes num_writes data
849{
850	typeset destdir=${1:-$TESTDIR}
851	typeset -i dirnum=${2:-50}
852	typeset -i filenum=${3:-50}
853	typeset -i bytes=${4:-8192}
854	typeset -i num_writes=${5:-10240}
855	typeset -i data=${6:-0}
856
857	typeset -i odirnum=1
858	typeset -i idirnum=0
859	typeset -i fn=0
860	typeset -i retval=0
861
862	log_must mkdir -p $destdir/$idirnum
863	while (($odirnum > 0)); do
864		if ((dirnum >= 0 && idirnum >= dirnum)); then
865			odirnum=0
866			break
867		fi
868		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
869		    -b $bytes -c $num_writes -d $data
870		retval=$?
871		if (($retval != 0)); then
872			odirnum=0
873			break
874		fi
875		if (($fn >= $filenum)); then
876			fn=0
877			((idirnum = idirnum + 1))
878			log_must mkdir -p $destdir/$idirnum
879		else
880			((fn = fn + 1))
881		fi
882	done
883	return $retval
884}
885
886#
887# Simple function to get the specified property. If unable to
888# get the property then exits.
889#
890# Note property is in 'parsable' format (-p)
891#
892function get_prop # property dataset
893{
894	typeset prop_val
895	typeset prop=$1
896	typeset dataset=$2
897
898	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
899	if [[ $? -ne 0 ]]; then
900		log_note "Unable to get $prop property for dataset " \
901		"$dataset"
902		return 1
903	fi
904
905	echo "$prop_val"
906	return 0
907}
908
909#
910# Simple function to get the specified property of pool. If unable to
911# get the property then exits.
912#
913function get_pool_prop # property pool
914{
915	typeset prop_val
916	typeset prop=$1
917	typeset pool=$2
918
919	if poolexists $pool ; then
920		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
921			awk '{print $3}')
922		if [[ $? -ne 0 ]]; then
923			log_note "Unable to get $prop property for pool " \
924			"$pool"
925			return 1
926		fi
927	else
928		log_note "Pool $pool not exists."
929		return 1
930	fi
931
932	echo $prop_val
933	return 0
934}
935
936# Return 0 if a pool exists; $? otherwise
937#
938# $1 - pool name
939
940function poolexists
941{
942	typeset pool=$1
943
944	if [[ -z $pool ]]; then
945		log_note "No pool name given."
946		return 1
947	fi
948
949	zpool get name "$pool" > /dev/null 2>&1
950	return $?
951}
952
953# Return 0 if all the specified datasets exist; $? otherwise
954#
955# $1-n  dataset name
956function datasetexists
957{
958	if (($# == 0)); then
959		log_note "No dataset name given."
960		return 1
961	fi
962
963	while (($# > 0)); do
964		zfs get name $1 > /dev/null 2>&1 || \
965			return $?
966		shift
967	done
968
969	return 0
970}
971
972# return 0 if none of the specified datasets exists, otherwise return 1.
973#
974# $1-n  dataset name
975function datasetnonexists
976{
977	if (($# == 0)); then
978		log_note "No dataset name given."
979		return 1
980	fi
981
982	while (($# > 0)); do
983		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
984		    && return 1
985		shift
986	done
987
988	return 0
989}
990
991#
992# Given a mountpoint, or a dataset name, determine if it is shared.
993#
994# Returns 0 if shared, 1 otherwise.
995#
996function is_shared
997{
998	typeset fs=$1
999	typeset mtpt
1000
1001	if [[ $fs != "/"* ]] ; then
1002		if datasetnonexists "$fs" ; then
1003			return 1
1004		else
1005			mtpt=$(get_prop mountpoint "$fs")
1006			case $mtpt in
1007				none|legacy|-) return 1
1008					;;
1009				*)	fs=$mtpt
1010					;;
1011			esac
1012		fi
1013	fi
1014
1015	for mtpt in `share | awk '{print $2}'` ; do
1016		if [[ $mtpt == $fs ]] ; then
1017			return 0
1018		fi
1019	done
1020
1021	typeset stat=$(svcs -H -o STA nfs/server:default)
1022	if [[ $stat != "ON" ]]; then
1023		log_note "Current nfs/server status: $stat"
1024	fi
1025
1026	return 1
1027}
1028
1029#
1030# Given a mountpoint, determine if it is not shared.
1031#
1032# Returns 0 if not shared, 1 otherwise.
1033#
1034function not_shared
1035{
1036	typeset fs=$1
1037
1038	is_shared $fs
1039	if (($? == 0)); then
1040		return 1
1041	fi
1042
1043	return 0
1044}
1045
1046#
1047# Helper function to unshare a mountpoint.
1048#
1049function unshare_fs #fs
1050{
1051	typeset fs=$1
1052
1053	is_shared $fs
1054	if (($? == 0)); then
1055		log_must zfs unshare $fs
1056	fi
1057
1058	return 0
1059}
1060
1061#
1062# Check NFS server status and trigger it online.
1063#
1064function setup_nfs_server
1065{
1066	# Cannot share directory in non-global zone.
1067	#
1068	if ! is_global_zone; then
1069		log_note "Cannot trigger NFS server by sharing in LZ."
1070		return
1071	fi
1072
1073	typeset nfs_fmri="svc:/network/nfs/server:default"
1074	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1075		#
1076		# Only really sharing operation can enable NFS server
1077		# to online permanently.
1078		#
1079		typeset dummy=/tmp/dummy
1080
1081		if [[ -d $dummy ]]; then
1082			log_must rm -rf $dummy
1083		fi
1084
1085		log_must mkdir $dummy
1086		log_must share $dummy
1087
1088		#
1089		# Waiting for fmri's status to be the final status.
1090		# Otherwise, in transition, an asterisk (*) is appended for
1091		# instances, unshare will reverse status to 'DIS' again.
1092		#
1093		# Waiting for 1's at least.
1094		#
1095		log_must sleep 1
1096		timeout=10
1097		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1098		do
1099			log_must sleep 1
1100
1101			((timeout -= 1))
1102		done
1103
1104		log_must unshare $dummy
1105		log_must rm -rf $dummy
1106	fi
1107
1108	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1109}
1110
1111#
1112# To verify whether calling process is in global zone
1113#
1114# Return 0 if in global zone, 1 in non-global zone
1115#
1116function is_global_zone
1117{
1118	typeset cur_zone=$(zonename 2>/dev/null)
1119	if [[ $cur_zone != "global" ]]; then
1120		return 1
1121	fi
1122	return 0
1123}
1124
1125#
1126# Verify whether test is permitted to run from
1127# global zone, local zone, or both
1128#
1129# $1 zone limit, could be "global", "local", or "both"(no limit)
1130#
1131# Return 0 if permitted, otherwise exit with log_unsupported
1132#
1133function verify_runnable # zone limit
1134{
1135	typeset limit=$1
1136
1137	[[ -z $limit ]] && return 0
1138
1139	if is_global_zone ; then
1140		case $limit in
1141			global|both)
1142				;;
1143			local)	log_unsupported "Test is unable to run from "\
1144					"global zone."
1145				;;
1146			*)	log_note "Warning: unknown limit $limit - " \
1147					"use both."
1148				;;
1149		esac
1150	else
1151		case $limit in
1152			local|both)
1153				;;
1154			global)	log_unsupported "Test is unable to run from "\
1155					"local zone."
1156				;;
1157			*)	log_note "Warning: unknown limit $limit - " \
1158					"use both."
1159				;;
1160		esac
1161
1162		reexport_pool
1163	fi
1164
1165	return 0
1166}
1167
1168# Return 0 if create successfully or the pool exists; $? otherwise
1169# Note: In local zones, this function should return 0 silently.
1170#
1171# $1 - pool name
1172# $2-n - [keyword] devs_list
1173
1174function create_pool #pool devs_list
1175{
1176	typeset pool=${1%%/*}
1177
1178	shift
1179
1180	if [[ -z $pool ]]; then
1181		log_note "Missing pool name."
1182		return 1
1183	fi
1184
1185	if poolexists $pool ; then
1186		destroy_pool $pool
1187	fi
1188
1189	if is_global_zone ; then
1190		[[ -d /$pool ]] && rm -rf /$pool
1191		log_must zpool create -f $pool $@
1192	fi
1193
1194	return 0
1195}
1196
1197# Return 0 if destroy successfully or the pool exists; $? otherwise
1198# Note: In local zones, this function should return 0 silently.
1199#
1200# $1 - pool name
1201# Destroy pool with the given parameters.
1202
1203function destroy_pool #pool
1204{
1205	typeset pool=${1%%/*}
1206	typeset mtpt
1207
1208	if [[ -z $pool ]]; then
1209		log_note "No pool name given."
1210		return 1
1211	fi
1212
1213	if is_global_zone ; then
1214		if poolexists "$pool" ; then
1215			mtpt=$(get_prop mountpoint "$pool")
1216
1217			# At times, syseventd activity can cause attempts to
1218			# destroy a pool to fail with EBUSY. We retry a few
1219			# times allowing failures before requiring the destroy
1220			# to succeed.
1221			typeset -i wait_time=10 ret=1 count=0
1222			must=""
1223			while [[ $ret -ne 0 ]]; do
1224				$must zpool destroy -f $pool
1225				ret=$?
1226				[[ $ret -eq 0 ]] && break
1227				log_note "zpool destroy failed with $ret"
1228				[[ count++ -ge 7 ]] && must=log_must
1229				sleep $wait_time
1230			done
1231
1232			[[ -d $mtpt ]] && \
1233				log_must rm -rf $mtpt
1234		else
1235			log_note "Pool does not exist. ($pool)"
1236			return 1
1237		fi
1238	fi
1239
1240	return 0
1241}
1242
1243# Return 0 if created successfully; $? otherwise
1244#
1245# $1 - dataset name
1246# $2-n - dataset options
1247
1248function create_dataset #dataset dataset_options
1249{
1250	typeset dataset=$1
1251
1252	shift
1253
1254	if [[ -z $dataset ]]; then
1255		log_note "Missing dataset name."
1256		return 1
1257	fi
1258
1259	if datasetexists $dataset ; then
1260		destroy_dataset $dataset
1261	fi
1262
1263	log_must zfs create $@ $dataset
1264
1265	return 0
1266}
1267
1268# Return 0 if destroy successfully or the dataset exists; $? otherwise
1269# Note: In local zones, this function should return 0 silently.
1270#
1271# $1 - dataset name
1272
1273function destroy_dataset #dataset
1274{
1275	typeset dataset=$1
1276	typeset mtpt
1277
1278	if [[ -z $dataset ]]; then
1279		log_note "No dataset name given."
1280		return 1
1281	fi
1282
1283	if datasetexists "$dataset" ; then
1284		mtpt=$(get_prop mountpoint "$dataset")
1285		log_must zfs destroy -r $dataset
1286		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1287	else
1288		log_note "Dataset does not exist. ($dataset)"
1289		return 1
1290	fi
1291
1292	return 0
1293}
1294
1295#
1296# Firstly, create a pool with 5 datasets. Then, create a single zone and
1297# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1298# and a zvol device to the zone.
1299#
1300# $1 zone name
1301# $2 zone root directory prefix
1302# $3 zone ip
1303#
1304function zfs_zones_setup #zone_name zone_root zone_ip
1305{
1306	typeset zone_name=${1:-$(hostname)-z}
1307	typeset zone_root=${2:-"/zone_root"}
1308	typeset zone_ip=${3:-"10.1.1.10"}
1309	typeset prefix_ctr=$ZONE_CTR
1310	typeset pool_name=$ZONE_POOL
1311	typeset -i cntctr=5
1312	typeset -i i=0
1313
1314	# Create pool and 5 container within it
1315	#
1316	[[ -d /$pool_name ]] && rm -rf /$pool_name
1317	log_must zpool create -f $pool_name $DISKS
1318	while ((i < cntctr)); do
1319		log_must zfs create $pool_name/$prefix_ctr$i
1320		((i += 1))
1321	done
1322
1323	# create a zvol
1324	log_must zfs create -V 1g $pool_name/zone_zvol
1325
1326	#
1327	# If current system support slog, add slog device for pool
1328	#
1329	if verify_slog_support ; then
1330		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1331		log_must mkfile $MINVDEVSIZE $sdevs
1332		log_must zpool add $pool_name log mirror $sdevs
1333	fi
1334
1335	# this isn't supported just yet.
1336	# Create a filesystem. In order to add this to
1337	# the zone, it must have it's mountpoint set to 'legacy'
1338	# log_must zfs create $pool_name/zfs_filesystem
1339	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1340
1341	[[ -d $zone_root ]] && \
1342		log_must rm -rf $zone_root/$zone_name
1343	[[ ! -d $zone_root ]] && \
1344		log_must mkdir -p -m 0700 $zone_root/$zone_name
1345
1346	# Create zone configure file and configure the zone
1347	#
1348	typeset zone_conf=/tmp/zone_conf.$$
1349	echo "create" > $zone_conf
1350	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1351	echo "set autoboot=true" >> $zone_conf
1352	i=0
1353	while ((i < cntctr)); do
1354		echo "add dataset" >> $zone_conf
1355		echo "set name=$pool_name/$prefix_ctr$i" >> \
1356			$zone_conf
1357		echo "end" >> $zone_conf
1358		((i += 1))
1359	done
1360
1361	# add our zvol to the zone
1362	echo "add device" >> $zone_conf
1363	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1364	echo "end" >> $zone_conf
1365
1366	# add a corresponding zvol rdsk to the zone
1367	echo "add device" >> $zone_conf
1368	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1369	echo "end" >> $zone_conf
1370
1371	# once it's supported, we'll add our filesystem to the zone
1372	# echo "add fs" >> $zone_conf
1373	# echo "set type=zfs" >> $zone_conf
1374	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1375	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1376	# echo "end" >> $zone_conf
1377
1378	echo "verify" >> $zone_conf
1379	echo "commit" >> $zone_conf
1380	log_must zonecfg -z $zone_name -f $zone_conf
1381	log_must rm -f $zone_conf
1382
1383	# Install the zone
1384	zoneadm -z $zone_name install
1385	if (($? == 0)); then
1386		log_note "SUCCESS: zoneadm -z $zone_name install"
1387	else
1388		log_fail "FAIL: zoneadm -z $zone_name install"
1389	fi
1390
1391	# Install sysidcfg file
1392	#
1393	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1394	echo "system_locale=C" > $sysidcfg
1395	echo  "terminal=dtterm" >> $sysidcfg
1396	echo  "network_interface=primary {" >> $sysidcfg
1397	echo  "hostname=$zone_name" >> $sysidcfg
1398	echo  "}" >> $sysidcfg
1399	echo  "name_service=NONE" >> $sysidcfg
1400	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1401	echo  "security_policy=NONE" >> $sysidcfg
1402	echo  "timezone=US/Eastern" >> $sysidcfg
1403
1404	# Boot this zone
1405	log_must zoneadm -z $zone_name boot
1406}
1407
1408#
1409# Reexport TESTPOOL & TESTPOOL(1-4)
1410#
1411function reexport_pool
1412{
1413	typeset -i cntctr=5
1414	typeset -i i=0
1415
1416	while ((i < cntctr)); do
1417		if ((i == 0)); then
1418			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1419			if ! ismounted $TESTPOOL; then
1420				log_must zfs mount $TESTPOOL
1421			fi
1422		else
1423			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1424			if eval ! ismounted \$TESTPOOL$i; then
1425				log_must eval zfs mount \$TESTPOOL$i
1426			fi
1427		fi
1428		((i += 1))
1429	done
1430}
1431
1432#
1433# Verify a given disk is online or offline
1434#
1435# Return 0 is pool/disk matches expected state, 1 otherwise
1436#
1437function check_state # pool disk state{online,offline}
1438{
1439	typeset pool=$1
1440	typeset disk=${2#/dev/dsk/}
1441	typeset state=$3
1442
1443	zpool status -v $pool | grep "$disk"  \
1444	    | grep -i "$state" > /dev/null 2>&1
1445
1446	return $?
1447}
1448
1449#
1450# Get the mountpoint of snapshot
1451# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1452# as its mountpoint
1453#
1454function snapshot_mountpoint
1455{
1456	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1457
1458	if [[ $dataset != *@* ]]; then
1459		log_fail "Error name of snapshot '$dataset'."
1460	fi
1461
1462	typeset fs=${dataset%@*}
1463	typeset snap=${dataset#*@}
1464
1465	if [[ -z $fs || -z $snap ]]; then
1466		log_fail "Error name of snapshot '$dataset'."
1467	fi
1468
1469	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1470}
1471
1472#
1473# Given a pool and file system, this function will verify the file system
1474# using the zdb internal tool. Note that the pool is exported and imported
1475# to ensure it has consistent state.
1476#
1477function verify_filesys # pool filesystem dir
1478{
1479	typeset pool="$1"
1480	typeset filesys="$2"
1481	typeset zdbout="/tmp/zdbout.$$"
1482
1483	shift
1484	shift
1485	typeset dirs=$@
1486	typeset search_path=""
1487
1488	log_note "Calling zdb to verify filesystem '$filesys'"
1489	zfs unmount -a > /dev/null 2>&1
1490	log_must zpool export $pool
1491
1492	if [[ -n $dirs ]] ; then
1493		for dir in $dirs ; do
1494			search_path="$search_path -d $dir"
1495		done
1496	fi
1497
1498	log_must zpool import $search_path $pool
1499
1500	zdb -cudi $filesys > $zdbout 2>&1
1501	if [[ $? != 0 ]]; then
1502		log_note "Output: zdb -cudi $filesys"
1503		cat $zdbout
1504		log_fail "zdb detected errors with: '$filesys'"
1505	fi
1506
1507	log_must zfs mount -a
1508	log_must rm -rf $zdbout
1509}
1510
1511#
1512# Given a pool, and this function list all disks in the pool
1513#
1514function get_disklist # pool
1515{
1516	typeset disklist=""
1517
1518	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1519	    grep -v "\-\-\-\-\-" | \
1520	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1521
1522	echo $disklist
1523}
1524
1525# /**
1526#  This function kills a given list of processes after a time period. We use
1527#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1528#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1529#  would be listed as FAIL, which we don't want : we're happy with stress tests
1530#  running for a certain amount of time, then finishing.
1531#
1532# @param $1 the time in seconds after which we should terminate these processes
1533# @param $2..$n the processes we wish to terminate.
1534# */
1535function stress_timeout
1536{
1537	typeset -i TIMEOUT=$1
1538	shift
1539	typeset cpids="$@"
1540
1541	log_note "Waiting for child processes($cpids). " \
1542		"It could last dozens of minutes, please be patient ..."
1543	log_must sleep $TIMEOUT
1544
1545	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1546	typeset pid
1547	for pid in $cpids; do
1548		ps -p $pid > /dev/null 2>&1
1549		if (($? == 0)); then
1550			log_must kill -USR1 $pid
1551		fi
1552	done
1553}
1554
1555#
1556# Verify a given hotspare disk is inuse or avail
1557#
1558# Return 0 is pool/disk matches expected state, 1 otherwise
1559#
1560function check_hotspare_state # pool disk state{inuse,avail}
1561{
1562	typeset pool=$1
1563	typeset disk=${2#/dev/dsk/}
1564	typeset state=$3
1565
1566	cur_state=$(get_device_state $pool $disk "spares")
1567
1568	if [[ $state != ${cur_state} ]]; then
1569		return 1
1570	fi
1571	return 0
1572}
1573
1574#
1575# Wait until a hotspare transitions to a given state or times out.
1576#
1577# Return 0 when  pool/disk matches expected state, 1 on timeout.
1578#
1579function wait_hotspare_state # pool disk state timeout
1580{
1581	typeset pool=$1
1582	typeset disk=${2#$/DEV_DSKDIR/}
1583	typeset state=$3
1584	typeset timeout=${4:-60}
1585	typeset -i i=0
1586
1587	while [[ $i -lt $timeout ]]; do
1588		if check_hotspare_state $pool $disk $state; then
1589			return 0
1590		fi
1591
1592		i=$((i+1))
1593		sleep 1
1594	done
1595
1596	return 1
1597}
1598
1599#
1600# Verify a given slog disk is inuse or avail
1601#
1602# Return 0 is pool/disk matches expected state, 1 otherwise
1603#
1604function check_slog_state # pool disk state{online,offline,unavail}
1605{
1606	typeset pool=$1
1607	typeset disk=${2#/dev/dsk/}
1608	typeset state=$3
1609
1610	cur_state=$(get_device_state $pool $disk "logs")
1611
1612	if [[ $state != ${cur_state} ]]; then
1613		return 1
1614	fi
1615	return 0
1616}
1617
1618#
1619# Verify a given vdev disk is inuse or avail
1620#
1621# Return 0 is pool/disk matches expected state, 1 otherwise
1622#
1623function check_vdev_state # pool disk state{online,offline,unavail}
1624{
1625	typeset pool=$1
1626	typeset disk=${2#/dev/dsk/}
1627	typeset state=$3
1628
1629	cur_state=$(get_device_state $pool $disk)
1630
1631	if [[ $state != ${cur_state} ]]; then
1632		return 1
1633	fi
1634	return 0
1635}
1636
1637#
1638# Wait until a vdev transitions to a given state or times out.
1639#
1640# Return 0 when  pool/disk matches expected state, 1 on timeout.
1641#
1642function wait_vdev_state # pool disk state timeout
1643{
1644	typeset pool=$1
1645	typeset disk=${2#$/DEV_DSKDIR/}
1646	typeset state=$3
1647	typeset timeout=${4:-60}
1648	typeset -i i=0
1649
1650	while [[ $i -lt $timeout ]]; do
1651		if check_vdev_state $pool $disk $state; then
1652			return 0
1653		fi
1654
1655		i=$((i+1))
1656		sleep 1
1657	done
1658
1659	return 1
1660}
1661
1662#
1663# Check the output of 'zpool status -v <pool>',
1664# and to see if the content of <token> contain the <keyword> specified.
1665#
1666# Return 0 is contain, 1 otherwise
1667#
1668function check_pool_status # pool token keyword <verbose>
1669{
1670	typeset pool=$1
1671	typeset token=$2
1672	typeset keyword=$3
1673	typeset verbose=${4:-false}
1674
1675	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1676		($1==token) {print $0}')
1677	if [[ $verbose == true ]]; then
1678		log_note $scan
1679	fi
1680	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1681
1682	return $?
1683}
1684
1685#
1686# These 6 following functions are instance of check_pool_status()
1687#	is_pool_resilvering - to check if the pool is resilver in progress
1688#	is_pool_resilvered - to check if the pool is resilver completed
1689#	is_pool_scrubbing - to check if the pool is scrub in progress
1690#	is_pool_scrubbed - to check if the pool is scrub completed
1691#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1692#	is_pool_scrub_paused - to check if the pool has scrub paused
1693#	is_pool_removing - to check if the pool is removing a vdev
1694#	is_pool_removed - to check if the pool is remove completed
1695#
1696function is_pool_resilvering #pool <verbose>
1697{
1698	check_pool_status "$1" "scan" "resilver in progress since " $2
1699	return $?
1700}
1701
1702function is_pool_resilvered #pool <verbose>
1703{
1704	check_pool_status "$1" "scan" "resilvered " $2
1705	return $?
1706}
1707
1708function is_pool_scrubbing #pool <verbose>
1709{
1710	check_pool_status "$1" "scan" "scrub in progress since " $2
1711	return $?
1712}
1713
1714function is_pool_scrubbed #pool <verbose>
1715{
1716	check_pool_status "$1" "scan" "scrub repaired" $2
1717	return $?
1718}
1719
1720function is_pool_scrub_stopped #pool <verbose>
1721{
1722	check_pool_status "$1" "scan" "scrub canceled" $2
1723	return $?
1724}
1725
1726function is_pool_scrub_paused #pool <verbose>
1727{
1728	check_pool_status "$1" "scan" "scrub paused since " $2
1729	return $?
1730}
1731
1732function is_pool_removing #pool
1733{
1734	check_pool_status "$1" "remove" "in progress since "
1735	return $?
1736}
1737
1738function is_pool_removed #pool
1739{
1740	check_pool_status "$1" "remove" "completed on"
1741	return $?
1742}
1743
1744function wait_for_degraded
1745{
1746	typeset pool=$1
1747	typeset timeout=${2:-30}
1748	typeset t0=$SECONDS
1749
1750	while :; do
1751		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1752		log_note "$pool is not yet degraded."
1753		sleep 1
1754		if ((SECONDS - t0 > $timeout)); then
1755			log_note "$pool not degraded after $timeout seconds."
1756			return 1
1757		fi
1758	done
1759
1760	return 0
1761}
1762
1763#
1764# Wait for a pool to be scrubbed
1765#
1766# $1 pool name
1767# $2 number of seconds to wait (optional)
1768#
1769# Returns true when pool has been scrubbed, or false if there's a timeout or if
1770# no scrub was done.
1771#
1772function wait_scrubbed
1773{
1774	typeset pool=${1:-$TESTPOOL}
1775	while true ; do
1776		is_pool_scrubbed $pool && break
1777		log_must sleep 1
1778	done
1779}
1780
1781#
1782# Use create_pool()/destroy_pool() to clean up the infomation in
1783# in the given disk to avoid slice overlapping.
1784#
1785function cleanup_devices #vdevs
1786{
1787	typeset pool="foopool$$"
1788
1789	if poolexists $pool ; then
1790		destroy_pool $pool
1791	fi
1792
1793	create_pool $pool $@
1794	destroy_pool $pool
1795
1796	return 0
1797}
1798
1799#/**
1800# A function to find and locate free disks on a system or from given
1801# disks as the parameter. It works by locating disks that are in use
1802# as swap devices and dump devices, and also disks listed in /etc/vfstab
1803#
1804# $@ given disks to find which are free, default is all disks in
1805# the test system
1806#
1807# @return a string containing the list of available disks
1808#*/
1809function find_disks
1810{
1811	sfi=/tmp/swaplist.$$
1812	dmpi=/tmp/dumpdev.$$
1813	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1814
1815	swap -l > $sfi
1816	dumpadm > $dmpi 2>/dev/null
1817
1818# write an awk script that can process the output of format
1819# to produce a list of disks we know about. Note that we have
1820# to escape "$2" so that the shell doesn't interpret it while
1821# we're creating the awk script.
1822# -------------------
1823	cat > /tmp/find_disks.awk <<EOF
1824#!/bin/nawk -f
1825	BEGIN { FS="."; }
1826
1827	/^Specify disk/{
1828		searchdisks=0;
1829	}
1830
1831	{
1832		if (searchdisks && \$2 !~ "^$"){
1833			split(\$2,arr," ");
1834			print arr[1];
1835		}
1836	}
1837
1838	/^AVAILABLE DISK SELECTIONS:/{
1839		searchdisks=1;
1840	}
1841EOF
1842#---------------------
1843
1844	chmod 755 /tmp/find_disks.awk
1845	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1846	rm /tmp/find_disks.awk
1847
1848	unused=""
1849	for disk in $disks; do
1850	# Check for mounted
1851		grep "${disk}[sp]" /etc/mnttab >/dev/null
1852		(($? == 0)) && continue
1853	# Check for swap
1854		grep "${disk}[sp]" $sfi >/dev/null
1855		(($? == 0)) && continue
1856	# check for dump device
1857		grep "${disk}[sp]" $dmpi >/dev/null
1858		(($? == 0)) && continue
1859	# check to see if this disk hasn't been explicitly excluded
1860	# by a user-set environment variable
1861		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1862		(($? == 0)) && continue
1863		unused_candidates="$unused_candidates $disk"
1864	done
1865	rm $sfi
1866	rm $dmpi
1867
1868# now just check to see if those disks do actually exist
1869# by looking for a device pointing to the first slice in
1870# each case. limit the number to max_finddisksnum
1871	count=0
1872	for disk in $unused_candidates; do
1873		if [ -b /dev/dsk/${disk}s0 ]; then
1874		if [ $count -lt $max_finddisksnum ]; then
1875			unused="$unused $disk"
1876			# do not impose limit if $@ is provided
1877			[[ -z $@ ]] && ((count = count + 1))
1878		fi
1879		fi
1880	done
1881
1882# finally, return our disk list
1883	echo $unused
1884}
1885
1886#
1887# Add specified user to specified group
1888#
1889# $1 group name
1890# $2 user name
1891# $3 base of the homedir (optional)
1892#
1893function add_user #<group_name> <user_name> <basedir>
1894{
1895	typeset gname=$1
1896	typeset uname=$2
1897	typeset basedir=${3:-"/var/tmp"}
1898
1899	if ((${#gname} == 0 || ${#uname} == 0)); then
1900		log_fail "group name or user name are not defined."
1901	fi
1902
1903	log_must useradd -g $gname -d $basedir/$uname -m $uname
1904	log_must passwd -N $uname
1905
1906	return 0
1907}
1908
1909#
1910# Delete the specified user.
1911#
1912# $1 login name
1913# $2 base of the homedir (optional)
1914#
1915function del_user #<logname> <basedir>
1916{
1917	typeset user=$1
1918	typeset basedir=${2:-"/var/tmp"}
1919
1920	if ((${#user} == 0)); then
1921		log_fail "login name is necessary."
1922	fi
1923
1924	if id $user > /dev/null 2>&1; then
1925		log_must userdel $user
1926	fi
1927
1928	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1929
1930	return 0
1931}
1932
1933#
1934# Select valid gid and create specified group.
1935#
1936# $1 group name
1937#
1938function add_group #<group_name>
1939{
1940	typeset group=$1
1941
1942	if ((${#group} == 0)); then
1943		log_fail "group name is necessary."
1944	fi
1945
1946	# Assign 100 as the base gid
1947	typeset -i gid=100
1948	while true; do
1949		groupadd -g $gid $group > /dev/null 2>&1
1950		typeset -i ret=$?
1951		case $ret in
1952			0) return 0 ;;
1953			# The gid is not  unique
1954			4) ((gid += 1)) ;;
1955			*) return 1 ;;
1956		esac
1957	done
1958}
1959
1960#
1961# Delete the specified group.
1962#
1963# $1 group name
1964#
1965function del_group #<group_name>
1966{
1967	typeset grp=$1
1968	if ((${#grp} == 0)); then
1969		log_fail "group name is necessary."
1970	fi
1971
1972	groupmod -n $grp $grp > /dev/null 2>&1
1973	typeset -i ret=$?
1974	case $ret in
1975		# Group does not exist.
1976		6) return 0 ;;
1977		# Name already exists as a group name
1978		9) log_must groupdel $grp ;;
1979		*) return 1 ;;
1980	esac
1981
1982	return 0
1983}
1984
1985#
1986# This function will return true if it's safe to destroy the pool passed
1987# as argument 1. It checks for pools based on zvols and files, and also
1988# files contained in a pool that may have a different mountpoint.
1989#
1990function safe_to_destroy_pool { # $1 the pool name
1991
1992	typeset pool=""
1993	typeset DONT_DESTROY=""
1994
1995	# We check that by deleting the $1 pool, we're not
1996	# going to pull the rug out from other pools. Do this
1997	# by looking at all other pools, ensuring that they
1998	# aren't built from files or zvols contained in this pool.
1999
2000	for pool in $(zpool list -H -o name)
2001	do
2002		ALTMOUNTPOOL=""
2003
2004		# this is a list of the top-level directories in each of the
2005		# files that make up the path to the files the pool is based on
2006		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2007			awk '{print $1}')
2008
2009		# this is a list of the zvols that make up the pool
2010		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
2011		    | awk '{print $1}')
2012
2013		# also want to determine if it's a file-based pool using an
2014		# alternate mountpoint...
2015		POOL_FILE_DIRS=$(zpool status -v $pool | \
2016					grep / | awk '{print $1}' | \
2017					awk -F/ '{print $2}' | grep -v "dev")
2018
2019		for pooldir in $POOL_FILE_DIRS
2020		do
2021			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2022					grep "${pooldir}$" | awk '{print $1}')
2023
2024			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2025		done
2026
2027
2028		if [ ! -z "$ZVOLPOOL" ]
2029		then
2030			DONT_DESTROY="true"
2031			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2032		fi
2033
2034		if [ ! -z "$FILEPOOL" ]
2035		then
2036			DONT_DESTROY="true"
2037			log_note "Pool $pool is built from $FILEPOOL on $1"
2038		fi
2039
2040		if [ ! -z "$ALTMOUNTPOOL" ]
2041		then
2042			DONT_DESTROY="true"
2043			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2044		fi
2045	done
2046
2047	if [ -z "${DONT_DESTROY}" ]
2048	then
2049		return 0
2050	else
2051		log_note "Warning: it is not safe to destroy $1!"
2052		return 1
2053	fi
2054}
2055
2056#
2057# Get the available ZFS compression options
2058# $1 option type zfs_set|zfs_compress
2059#
2060function get_compress_opts
2061{
2062	typeset COMPRESS_OPTS
2063	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2064			gzip-6 gzip-7 gzip-8 gzip-9"
2065
2066	if [[ $1 == "zfs_compress" ]] ; then
2067		COMPRESS_OPTS="on lzjb"
2068	elif [[ $1 == "zfs_set" ]] ; then
2069		COMPRESS_OPTS="on off lzjb"
2070	fi
2071	typeset valid_opts="$COMPRESS_OPTS"
2072	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2073	if [[ $? -eq 0 ]]; then
2074		valid_opts="$valid_opts $GZIP_OPTS"
2075	fi
2076	echo "$valid_opts"
2077}
2078
2079#
2080# Verify zfs operation with -p option work as expected
2081# $1 operation, value could be create, clone or rename
2082# $2 dataset type, value could be fs or vol
2083# $3 dataset name
2084# $4 new dataset name
2085#
2086function verify_opt_p_ops
2087{
2088	typeset ops=$1
2089	typeset datatype=$2
2090	typeset dataset=$3
2091	typeset newdataset=$4
2092
2093	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2094		log_fail "$datatype is not supported."
2095	fi
2096
2097	# check parameters accordingly
2098	case $ops in
2099		create)
2100			newdataset=$dataset
2101			dataset=""
2102			if [[ $datatype == "vol" ]]; then
2103				ops="create -V $VOLSIZE"
2104			fi
2105			;;
2106		clone)
2107			if [[ -z $newdataset ]]; then
2108				log_fail "newdataset should not be empty" \
2109					"when ops is $ops."
2110			fi
2111			log_must datasetexists $dataset
2112			log_must snapexists $dataset
2113			;;
2114		rename)
2115			if [[ -z $newdataset ]]; then
2116				log_fail "newdataset should not be empty" \
2117					"when ops is $ops."
2118			fi
2119			log_must datasetexists $dataset
2120			log_mustnot snapexists $dataset
2121			;;
2122		*)
2123			log_fail "$ops is not supported."
2124			;;
2125	esac
2126
2127	# make sure the upper level filesystem does not exist
2128	if datasetexists ${newdataset%/*} ; then
2129		log_must zfs destroy -rRf ${newdataset%/*}
2130	fi
2131
2132	# without -p option, operation will fail
2133	log_mustnot zfs $ops $dataset $newdataset
2134	log_mustnot datasetexists $newdataset ${newdataset%/*}
2135
2136	# with -p option, operation should succeed
2137	log_must zfs $ops -p $dataset $newdataset
2138	if ! datasetexists $newdataset ; then
2139		log_fail "-p option does not work for $ops"
2140	fi
2141
2142	# when $ops is create or clone, redo the operation still return zero
2143	if [[ $ops != "rename" ]]; then
2144		log_must zfs $ops -p $dataset $newdataset
2145	fi
2146
2147	return 0
2148}
2149
2150#
2151# Get configuration of pool
2152# $1 pool name
2153# $2 config name
2154#
2155function get_config
2156{
2157	typeset pool=$1
2158	typeset config=$2
2159	typeset alt_root
2160
2161	if ! poolexists "$pool" ; then
2162		return 1
2163	fi
2164	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2165	if [[ $alt_root == "-" ]]; then
2166		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2167		    '{print $2}')
2168	else
2169		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2170		    '{print $2}')
2171	fi
2172	if [[ -n $value ]] ; then
2173		value=${value#'}
2174		value=${value%'}
2175	fi
2176	echo $value
2177
2178	return 0
2179}
2180
2181#
2182# Privated function. Random select one of items from arguments.
2183#
2184# $1 count
2185# $2-n string
2186#
2187function _random_get
2188{
2189	typeset cnt=$1
2190	shift
2191
2192	typeset str="$@"
2193	typeset -i ind
2194	((ind = RANDOM % cnt + 1))
2195
2196	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2197	echo $ret
2198}
2199
2200#
2201# Random select one of item from arguments which include NONE string
2202#
2203function random_get_with_non
2204{
2205	typeset -i cnt=$#
2206	((cnt =+ 1))
2207
2208	_random_get "$cnt" "$@"
2209}
2210
2211#
2212# Random select one of item from arguments which doesn't include NONE string
2213#
2214function random_get
2215{
2216	_random_get "$#" "$@"
2217}
2218
2219#
2220# Detect if the current system support slog
2221#
2222function verify_slog_support
2223{
2224	typeset dir=/tmp/disk.$$
2225	typeset pool=foo.$$
2226	typeset vdev=$dir/a
2227	typeset sdev=$dir/b
2228
2229	mkdir -p $dir
2230	mkfile $MINVDEVSIZE $vdev $sdev
2231
2232	typeset -i ret=0
2233	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2234		ret=1
2235	fi
2236	rm -r $dir
2237
2238	return $ret
2239}
2240
2241#
2242# The function will generate a dataset name with specific length
2243# $1, the length of the name
2244# $2, the base string to construct the name
2245#
2246function gen_dataset_name
2247{
2248	typeset -i len=$1
2249	typeset basestr="$2"
2250	typeset -i baselen=${#basestr}
2251	typeset -i iter=0
2252	typeset l_name=""
2253
2254	if ((len % baselen == 0)); then
2255		((iter = len / baselen))
2256	else
2257		((iter = len / baselen + 1))
2258	fi
2259	while ((iter > 0)); do
2260		l_name="${l_name}$basestr"
2261
2262		((iter -= 1))
2263	done
2264
2265	echo $l_name
2266}
2267
2268#
2269# Get cksum tuple of dataset
2270# $1 dataset name
2271#
2272# sample zdb output:
2273# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2274# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2275# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2276# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2277function datasetcksum
2278{
2279	typeset cksum
2280	sync
2281	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2282		| awk -F= '{print $7}')
2283	echo $cksum
2284}
2285
2286#
2287# Get cksum of file
2288# #1 file path
2289#
2290function checksum
2291{
2292	typeset cksum
2293	cksum=$(cksum $1 | awk '{print $1}')
2294	echo $cksum
2295}
2296
2297#
2298# Get the given disk/slice state from the specific field of the pool
2299#
2300function get_device_state #pool disk field("", "spares","logs")
2301{
2302	typeset pool=$1
2303	typeset disk=${2#/dev/dsk/}
2304	typeset field=${3:-$pool}
2305
2306	state=$(zpool status -v "$pool" 2>/dev/null | \
2307		nawk -v device=$disk -v pool=$pool -v field=$field \
2308		'BEGIN {startconfig=0; startfield=0; }
2309		/config:/ {startconfig=1}
2310		(startconfig==1) && ($1==field) {startfield=1; next;}
2311		(startfield==1) && ($1==device) {print $2; exit;}
2312		(startfield==1) &&
2313		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2314	echo $state
2315}
2316
2317
2318#
2319# print the given directory filesystem type
2320#
2321# $1 directory name
2322#
2323function get_fstype
2324{
2325	typeset dir=$1
2326
2327	if [[ -z $dir ]]; then
2328		log_fail "Usage: get_fstype <directory>"
2329	fi
2330
2331	#
2332	#  $ df -n /
2333	#  /		  : ufs
2334	#
2335	df -n $dir | awk '{print $3}'
2336}
2337
2338#
2339# Given a disk, label it to VTOC regardless what label was on the disk
2340# $1 disk
2341#
2342function labelvtoc
2343{
2344	typeset disk=$1
2345	if [[ -z $disk ]]; then
2346		log_fail "The disk name is unspecified."
2347	fi
2348	typeset label_file=/var/tmp/labelvtoc.$$
2349	typeset arch=$(uname -p)
2350
2351	if [[ $arch == "i386" ]]; then
2352		echo "label" > $label_file
2353		echo "0" >> $label_file
2354		echo "" >> $label_file
2355		echo "q" >> $label_file
2356		echo "q" >> $label_file
2357
2358		fdisk -B $disk >/dev/null 2>&1
2359		# wait a while for fdisk finishes
2360		sleep 60
2361	elif [[ $arch == "sparc" ]]; then
2362		echo "label" > $label_file
2363		echo "0" >> $label_file
2364		echo "" >> $label_file
2365		echo "" >> $label_file
2366		echo "" >> $label_file
2367		echo "q" >> $label_file
2368	else
2369		log_fail "unknown arch type"
2370	fi
2371
2372	format -e -s -d $disk -f $label_file
2373	typeset -i ret_val=$?
2374	rm -f $label_file
2375	#
2376	# wait the format to finish
2377	#
2378	sleep 60
2379	if ((ret_val != 0)); then
2380		log_fail "unable to label $disk as VTOC."
2381	fi
2382
2383	return 0
2384}
2385
2386#
2387# check if the system was installed as zfsroot or not
2388# return: 0 ture, otherwise false
2389#
2390function is_zfsroot
2391{
2392	df -n / | grep zfs > /dev/null 2>&1
2393	return $?
2394}
2395
2396#
2397# get the root filesystem name if it's zfsroot system.
2398#
2399# return: root filesystem name
2400function get_rootfs
2401{
2402	typeset rootfs=""
2403	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2404		/etc/mnttab)
2405	if [[ -z "$rootfs" ]]; then
2406		log_fail "Can not get rootfs"
2407	fi
2408	zfs list $rootfs > /dev/null 2>&1
2409	if (($? == 0)); then
2410		echo $rootfs
2411	else
2412		log_fail "This is not a zfsroot system."
2413	fi
2414}
2415
2416#
2417# get the rootfs's pool name
2418# return:
2419#       rootpool name
2420#
2421function get_rootpool
2422{
2423	typeset rootfs=""
2424	typeset rootpool=""
2425	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2426		 /etc/mnttab)
2427	if [[ -z "$rootfs" ]]; then
2428		log_fail "Can not get rootpool"
2429	fi
2430	zfs list $rootfs > /dev/null 2>&1
2431	if (($? == 0)); then
2432		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2433		echo $rootpool
2434	else
2435		log_fail "This is not a zfsroot system."
2436	fi
2437}
2438
2439#
2440# Check if the given device is physical device
2441#
2442function is_physical_device #device
2443{
2444	typeset device=${1#/dev/dsk/}
2445	device=${device#/dev/rdsk/}
2446
2447	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2448	return $?
2449}
2450
2451#
2452# Get the directory path of given device
2453#
2454function get_device_dir #device
2455{
2456	typeset device=$1
2457
2458	if ! $(is_physical_device $device) ; then
2459		if [[ $device != "/" ]]; then
2460			device=${device%/*}
2461		fi
2462		echo $device
2463	else
2464		echo "/dev/dsk"
2465	fi
2466}
2467
2468#
2469# Get the package name
2470#
2471function get_package_name
2472{
2473	typeset dirpath=${1:-$STC_NAME}
2474
2475	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2476}
2477
2478#
2479# Get the word numbers from a string separated by white space
2480#
2481function get_word_count
2482{
2483	echo $1 | wc -w
2484}
2485
2486#
2487# To verify if the require numbers of disks is given
2488#
2489function verify_disk_count
2490{
2491	typeset -i min=${2:-1}
2492
2493	typeset -i count=$(get_word_count "$1")
2494
2495	if ((count < min)); then
2496		log_untested "A minimum of $min disks is required to run." \
2497			" You specified $count disk(s)"
2498	fi
2499}
2500
2501function ds_is_volume
2502{
2503	typeset type=$(get_prop type $1)
2504	[[ $type = "volume" ]] && return 0
2505	return 1
2506}
2507
2508function ds_is_filesystem
2509{
2510	typeset type=$(get_prop type $1)
2511	[[ $type = "filesystem" ]] && return 0
2512	return 1
2513}
2514
2515function ds_is_snapshot
2516{
2517	typeset type=$(get_prop type $1)
2518	[[ $type = "snapshot" ]] && return 0
2519	return 1
2520}
2521
2522#
2523# Check if Trusted Extensions are installed and enabled
2524#
2525function is_te_enabled
2526{
2527	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2528	if (($? != 0)); then
2529		return 1
2530	else
2531		return 0
2532	fi
2533}
2534
2535# Utility function to determine if a system has multiple cpus.
2536function is_mp
2537{
2538	(($(psrinfo | wc -l) > 1))
2539}
2540
2541function get_cpu_freq
2542{
2543	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2544}
2545
2546# Run the given command as the user provided.
2547function user_run
2548{
2549	typeset user=$1
2550	shift
2551
2552	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2553	return $?
2554}
2555
2556#
2557# Check if the pool contains the specified vdevs
2558#
2559# $1 pool
2560# $2..n <vdev> ...
2561#
2562# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2563# vdevs is not in the pool, and 2 if pool name is missing.
2564#
2565function vdevs_in_pool
2566{
2567	typeset pool=$1
2568	typeset vdev
2569
2570        if [[ -z $pool ]]; then
2571                log_note "Missing pool name."
2572                return 2
2573        fi
2574
2575	shift
2576
2577	typeset tmpfile=$(mktemp)
2578	zpool list -Hv "$pool" >$tmpfile
2579	for vdev in $@; do
2580		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2581		[[ $? -ne 0 ]] && return 1
2582	done
2583
2584	rm -f $tmpfile
2585
2586	return 0;
2587}
2588
2589function get_max
2590{
2591	typeset -l i max=$1
2592	shift
2593
2594	for i in "$@"; do
2595		max=$(echo $((max > i ? max : i)))
2596	done
2597
2598	echo $max
2599}
2600
2601function get_min
2602{
2603	typeset -l i min=$1
2604	shift
2605
2606	for i in "$@"; do
2607		min=$(echo $((min < i ? min : i)))
2608	done
2609
2610	echo $min
2611}
2612
2613#
2614# Generate a random number between 1 and the argument.
2615#
2616function random
2617{
2618        typeset max=$1
2619        echo $(( ($RANDOM % $max) + 1 ))
2620}
2621
2622# Write data that can be compressed into a directory
2623function write_compressible
2624{
2625	typeset dir=$1
2626	typeset megs=$2
2627	typeset nfiles=${3:-1}
2628	typeset bs=${4:-1024k}
2629	typeset fname=${5:-file}
2630
2631	[[ -d $dir ]] || log_fail "No directory: $dir"
2632
2633	log_must eval "fio \
2634	    --name=job \
2635	    --fallocate=0 \
2636	    --minimal \
2637	    --randrepeat=0 \
2638	    --buffer_compress_percentage=66 \
2639	    --buffer_compress_chunk=4096 \
2640	    --directory=$dir \
2641	    --numjobs=$nfiles \
2642	    --rw=write \
2643	    --bs=$bs \
2644	    --filesize=$megs \
2645	    --filename_format='$fname.\$jobnum' >/dev/null"
2646}
2647
2648function get_objnum
2649{
2650	typeset pathname=$1
2651	typeset objnum
2652
2653	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2654	objnum=$(stat -c %i $pathname)
2655	echo $objnum
2656}
2657
2658#
2659# Sync data to the pool
2660#
2661# $1 pool name
2662# $2 boolean to force uberblock (and config including zpool cache file) update
2663#
2664function sync_pool #pool <force>
2665{
2666	typeset pool=${1:-$TESTPOOL}
2667	typeset force=${2:-false}
2668
2669	if [[ $force == true ]]; then
2670		log_must zpool sync -f $pool
2671	else
2672		log_must zpool sync $pool
2673	fi
2674
2675	return 0
2676}
2677
2678#
2679# Prints the current time in seconds since UNIX Epoch.
2680#
2681function current_epoch
2682{
2683	printf '%(%s)T'
2684}
2685
2686#
2687# Get decimal value of global uint32_t variable using mdb.
2688#
2689function mdb_get_uint32
2690{
2691	typeset variable=$1
2692	typeset value
2693
2694	value=$(mdb -k -e "$variable/X | ::eval .=U")
2695	if [[ $? -ne 0 ]]; then
2696		log_fail "Failed to get value of '$variable' from mdb."
2697		return 1
2698	fi
2699
2700	echo $value
2701	return 0
2702}
2703
2704#
2705# Set global uint32_t variable to a decimal value using mdb.
2706#
2707function mdb_set_uint32
2708{
2709	typeset variable=$1
2710	typeset value=$2
2711
2712	mdb -kw -e "$variable/W 0t$value" > /dev/null
2713	if [[ $? -ne 0 ]]; then
2714		echo "Failed to set '$variable' to '$value' in mdb."
2715		return 1
2716	fi
2717
2718	return 0
2719}
2720
2721#
2722# Set global scalar integer variable to a hex value using mdb.
2723# Note: Target should have CTF data loaded.
2724#
2725function mdb_ctf_set_int
2726{
2727	typeset variable=$1
2728	typeset value=$2
2729
2730	mdb -kw -e "$variable/z $value" > /dev/null
2731	if [[ $? -ne 0 ]]; then
2732		echo "Failed to set '$variable' to '$value' in mdb."
2733		return 1
2734	fi
2735
2736	return 0
2737}
2738
2739#
2740# Set a global system tunable (64-bit value)
2741#
2742# $1 tunable name
2743# $2 tunable values
2744#
2745function set_tunable64
2746{
2747	set_tunable_impl "$1" "$2" Z
2748}
2749
2750#
2751# Set a global system tunable (32-bit value)
2752#
2753# $1 tunable name
2754# $2 tunable values
2755#
2756function set_tunable32
2757{
2758	set_tunable_impl "$1" "$2" W
2759}
2760
2761function set_tunable_impl
2762{
2763	typeset tunable="$1"
2764	typeset value="$2"
2765	typeset mdb_cmd="$3"
2766	typeset module="${4:-zfs}"
2767
2768	[[ -z "$tunable" ]] && return 1
2769	[[ -z "$value" ]] && return 1
2770	[[ -z "$mdb_cmd" ]] && return 1
2771
2772	case "$(uname)" in
2773	Linux)
2774		typeset zfs_tunables="/sys/module/$module/parameters"
2775		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2776		echo -n "$value" > "$zfs_tunables/$tunable"
2777		return "$?"
2778		;;
2779	SunOS)
2780		[[ "$module" -eq "zfs" ]] || return 1
2781		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2782		return "$?"
2783		;;
2784	esac
2785}
2786
2787#
2788# Get a global system tunable
2789#
2790# $1 tunable name
2791#
2792function get_tunable
2793{
2794	get_tunable_impl "$1"
2795}
2796
2797function get_tunable_impl
2798{
2799	typeset tunable="$1"
2800	typeset module="${2:-zfs}"
2801
2802	[[ -z "$tunable" ]] && return 1
2803
2804	case "$(uname)" in
2805	Linux)
2806		typeset zfs_tunables="/sys/module/$module/parameters"
2807		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2808		cat $zfs_tunables/$tunable
2809		return "$?"
2810		;;
2811	SunOS)
2812		typeset value=$(mdb -k -e "$tunable/X | ::eval .=U")
2813		if [[ $? -ne 0 ]]; then
2814			log_fail "Failed to get value of '$tunable' from mdb."
2815			return 1
2816		fi
2817		echo $value
2818		return 0
2819		;;
2820	esac
2821
2822	return 1
2823}
2824