xref: /freebsd/tests/sys/cddl/zfs/include/libtest.kshlib (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1# vim: filetype=sh
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25# Use is subject to license terms.
26
27. ${STF_SUITE}/include/logapi.kshlib
28
29ZFS=${ZFS:-/sbin/zfs}
30ZPOOL=${ZPOOL:-/sbin/zpool}
31os_name=`uname -s`
32
33# Determine if a test has the necessary requirements to run
34
35function test_requires
36{
37        integer unsupported=0
38        unsupported_list=""
39        until [[ $# -eq 0 ]];do
40                var_name=$1
41                cmd=$(eval echo \$${1})
42                if [[ ! "$cmd" != "" ]] ; then
43                        print $var_name is not set
44                        unsupported_list="$var_name $unsupported_list"
45                        ((unsupported=unsupported+1))
46                fi
47                shift
48        done
49        if [[ unsupported -gt 0 ]] ; then
50                log_unsupported "$unsupported_list commands are unsupported"
51        else
52                log_note "All commands are supported"
53        fi
54}
55
56# Determine whether a dataset is mounted
57#
58# $1 dataset name
59# $2 filesystem type; optional - defaulted to zfs
60#
61# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
62
63function ismounted
64{
65	typeset fstype=$2
66	[[ -z $fstype ]] && fstype=zfs
67	typeset out dir name ret
68
69	case $fstype in
70		zfs)
71			if [[ "$1" == "/"* ]] ; then
72				for out in $($ZFS mount | $AWK '{print $2}') ; do
73					[[ $1 == $out ]] && return 0
74				done
75			else
76				for out in $($ZFS mount | $AWK '{print $1}') ; do
77					[[ $1 == $out ]] && return 0
78				done
79			fi
80		;;
81		ufs|nfs)
82			# a = device, b = "on", c = mount point", d = flags
83			$MOUNT | $GREP $fstype | while read a b c d
84			do
85				[[ "$1" == "$a" || "$1" == "$c" ]] && return 0
86			done
87		;;
88	esac
89
90	return 1
91}
92
93# Return 0 if a dataset is mounted; 1 otherwise
94#
95# $1 dataset name
96# $2 filesystem type; optional - defaulted to zfs
97
98function mounted
99{
100	ismounted $1 $2
101	(( $? == 0 )) && return 0
102	return 1
103}
104
105# Return 0 if a dataset is unmounted; 1 otherwise
106#
107# $1 dataset name
108# $2 filesystem type; optional - defaulted to zfs
109
110function unmounted
111{
112	ismounted $1 $2
113	(( $? == 1 )) && return 0
114	return 1
115}
116
117# split line on ","
118#
119# $1 - line to split
120
121function splitline
122{
123	$ECHO $1 | $SED "s/,/ /g"
124}
125
126function default_setup
127{
128	default_setup_noexit "$@"
129
130	log_pass
131}
132
133#
134# Given a list of disks, setup storage pools and datasets.
135#
136function default_setup_noexit
137{
138	typeset disklist=$1
139	typeset container=$2
140	typeset volume=$3
141
142	if is_global_zone; then
143		if poolexists $TESTPOOL ; then
144			destroy_pool $TESTPOOL
145		fi
146		[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
147		log_must $ZPOOL create -f $TESTPOOL $disklist
148	else
149		reexport_pool
150	fi
151
152	$RM -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
153	$MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
154
155	log_must $ZFS create $TESTPOOL/$TESTFS
156	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
157
158	if [[ -n $container ]]; then
159		$RM -rf $TESTDIR1  || \
160			log_unresolved Could not remove $TESTDIR1
161		$MKDIR -p $TESTDIR1 || \
162			log_unresolved Could not create $TESTDIR1
163
164		log_must $ZFS create $TESTPOOL/$TESTCTR
165		log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
166		log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
167		log_must $ZFS set mountpoint=$TESTDIR1 \
168		    $TESTPOOL/$TESTCTR/$TESTFS1
169	fi
170
171	if [[ -n $volume ]]; then
172		if is_global_zone ; then
173			log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
174		else
175			log_must $ZFS create $TESTPOOL/$TESTVOL
176		fi
177
178	fi
179}
180
181#
182# Given a list of disks, setup a storage pool, file system and
183# a container.
184#
185function default_container_setup
186{
187	typeset disklist=$1
188
189	default_setup "$disklist" "true"
190}
191
192#
193# Given a list of disks, setup a storage pool,file system
194# and a volume.
195#
196function default_volume_setup
197{
198	typeset disklist=$1
199
200	default_setup "$disklist" "" "true"
201}
202
203#
204# Given a list of disks, setup a storage pool,file system,
205# a container and a volume.
206#
207function default_container_volume_setup
208{
209	typeset disklist=$1
210
211	default_setup "$disklist" "true" "true"
212}
213
214#
215# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
216# filesystem
217#
218# $1 Existing filesystem or volume name. Default, $TESTFS
219# $2 snapshot name. Default, $TESTSNAP
220#
221function create_snapshot
222{
223	typeset fs_vol=${1:-$TESTFS}
224	typeset snap=${2:-$TESTSNAP}
225
226	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
227	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
228
229	if snapexists $fs_vol@$snap; then
230		log_fail "$fs_vol@$snap already exists."
231	fi
232	datasetexists $fs_vol || \
233		log_fail "$fs_vol must exist."
234
235	log_must $ZFS snapshot $fs_vol@$snap
236}
237
238#
239# Create a clone from a snapshot, default clone name is $TESTCLONE.
240#
241# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
242# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
243#
244function create_clone   # snapshot clone
245{
246	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
247	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
248
249	[[ -z $snap ]] && \
250		log_fail "Snapshot name is undefined."
251	[[ -z $clone ]] && \
252		log_fail "Clone name is undefined."
253
254	log_must $ZFS clone $snap $clone
255}
256
257function default_mirror_setup
258{
259	default_mirror_setup_noexit $1 $2 $3
260
261	log_pass
262}
263
264#
265# Given a pair of disks, set up a storage pool and dataset for the mirror
266# @parameters: $1 the primary side of the mirror
267#   $2 the secondary side of the mirror
268# @uses: ZPOOL ZFS TESTPOOL TESTFS
269function default_mirror_setup_noexit
270{
271	readonly func="default_mirror_setup_noexit"
272	typeset primary=$1
273	typeset secondary=$2
274
275	[[ -z $primary ]] && \
276		log_fail "$func: No parameters passed"
277	[[ -z $secondary ]] && \
278		log_fail "$func: No secondary partition passed"
279	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
280	log_must $ZPOOL create -f $TESTPOOL mirror $@
281	log_must $ZFS create $TESTPOOL/$TESTFS
282	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
283}
284
285#
286# create a number of mirrors.
287# We create a number($1) of 2 way mirrors using the pairs of disks named
288# on the command line. These mirrors are *not* mounted
289# @parameters: $1 the number of mirrors to create
290#  $... the devices to use to create the mirrors on
291# @uses: ZPOOL ZFS TESTPOOL
292function setup_mirrors
293{
294	typeset -i nmirrors=$1
295
296	shift
297	while (( nmirrors > 0 )); do
298		log_must test -n "$1" -a -n "$2"
299		[[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
300		log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
301		shift 2
302		(( nmirrors = nmirrors - 1 ))
303	done
304}
305
306#
307# create a number of raidz pools.
308# We create a number($1) of 2 raidz pools  using the pairs of disks named
309# on the command line. These pools are *not* mounted
310# @parameters: $1 the number of pools to create
311#  $... the devices to use to create the pools on
312# @uses: ZPOOL ZFS TESTPOOL
313function setup_raidzs
314{
315	typeset -i nraidzs=$1
316
317	shift
318	while (( nraidzs > 0 )); do
319		log_must test -n "$1" -a -n "$2"
320		[[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
321		log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
322		shift 2
323		(( nraidzs = nraidzs - 1 ))
324	done
325}
326
327#
328# Destroy the configured testpool mirrors.
329# the mirrors are of the form ${TESTPOOL}{number}
330# @uses: ZPOOL ZFS TESTPOOL
331function destroy_mirrors
332{
333	default_cleanup_noexit
334
335	log_pass
336}
337
338#
339# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
340# $1 the list of disks
341#
342function default_raidz_setup
343{
344	typeset disklist="$*"
345	set -A disks $disklist
346
347	if [[ ${#disks[*]} -lt 2 ]]; then
348		log_fail "A raid-z requires a minimum of two disks."
349	fi
350
351	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
352	log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
353	log_must $ZFS create $TESTPOOL/$TESTFS
354	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
355
356	log_pass
357}
358
359#
360# Common function used to cleanup storage pools and datasets.
361#
362# Invoked at the start of the test suite to ensure the system
363# is in a known state, and also at the end of each set of
364# sub-tests to ensure errors from one set of tests doesn't
365# impact the execution of the next set.
366
367function default_cleanup
368{
369	default_cleanup_noexit
370
371	log_pass
372}
373
374function all_pools
375{
376	cmd="$ZPOOL list -H -o name | $GREP 'testpool'"
377	eval $cmd
378}
379
380#
381# Returns 0 if the system contains any pools that must not be modified by the
382# ZFS tests.
383#
384function other_pools_exist
385{
386	typeset pool_count=`$ZPOOL list -H | $GREP -v '^testpool' | $WC -l`
387	[ "$pool_count" -ne 0 ]
388}
389
390function default_cleanup_noexit
391{
392	typeset exclude=""
393	typeset pool=""
394	#
395	# Destroying the pool will also destroy any
396	# filesystems it contains.
397	#
398	if is_global_zone; then
399		# Here, we loop through the pools we're allowed to
400		# destroy, only destroying them if it's safe to do
401		# so.
402		for pool in $(all_pools); do
403			if safe_to_destroy_pool $pool; then
404				destroy_pool $pool
405			fi
406		done
407	else
408		typeset fs=""
409		for fs in $($ZFS list -H -o name \
410		    | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
411			datasetexists $fs && \
412				log_must $ZFS destroy -Rf $fs
413		done
414
415		# Need cleanup here to avoid garbage dir left.
416		for fs in $($ZFS list -H -o name \
417		    ); do
418			[[ $fs == /$ZONE_POOL ]] && continue
419			[[ -d $fs ]] && log_must $RM -rf $fs/*
420		done
421
422		#
423		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
424		# the default value
425		#
426		for fs in $($ZFS list -H -o name \
427		    ); do
428			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
429				log_must $ZFS set reservation=none $fs
430				log_must $ZFS set recordsize=128K $fs
431				log_must $ZFS set mountpoint=/$fs $fs
432				typeset enc=""
433				enc=$(get_prop encryption $fs)
434				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
435					[[ "$enc" == "off" ]]; then
436					log_must $ZFS set checksum=on $fs
437				fi
438				log_must $ZFS set compression=off $fs
439				log_must $ZFS set atime=on $fs
440				log_must $ZFS set devices=off $fs
441				log_must $ZFS set exec=on $fs
442				log_must $ZFS set setuid=on $fs
443				log_must $ZFS set readonly=off $fs
444				log_must $ZFS set snapdir=hidden $fs
445				log_must $ZFS set aclmode=groupmask $fs
446				log_must $ZFS set aclinherit=secure $fs
447			fi
448		done
449	fi
450
451	[[ -d $TESTDIR ]] && \
452		log_must $RM -rf $TESTDIR
453}
454
455
456#
457# Common function used to cleanup storage pools, file systems
458# and containers.
459#
460function default_container_cleanup
461{
462	if ! is_global_zone; then
463		reexport_pool
464	fi
465
466	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
467	[[ $? -eq 0 ]] && \
468	    log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
469
470	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
471	    log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
472
473	datasetexists $TESTPOOL/$TESTCTR && \
474	    log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
475
476	[[ -e $TESTDIR1 ]] && \
477	    log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
478
479	default_cleanup
480}
481
482#
483# Common function used to cleanup snapshot of file system or volume. Default to
484# delete the file system's snapshot
485#
486# $1 snapshot name
487#
488function destroy_snapshot
489{
490	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
491
492	if ! snapexists $snap; then
493		log_fail "'$snap' does not existed."
494	fi
495
496	#
497	# For the sake of the value which come from 'get_prop' is not equal
498	# to the really mountpoint when the snapshot is unmounted. So, firstly
499	# check and make sure this snapshot's been mounted in current system.
500	#
501	typeset mtpt=""
502	if ismounted $snap; then
503		mtpt=$(get_prop mountpoint $snap)
504		(( $? != 0 )) && \
505			log_fail "get_prop mountpoint $snap failed."
506	fi
507
508	log_must $ZFS destroy $snap
509	[[ $mtpt != "" && -d $mtpt ]] && \
510		log_must $RM -rf $mtpt
511}
512
513#
514# Common function used to cleanup clone.
515#
516# $1 clone name
517#
518function destroy_clone
519{
520	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
521
522	if ! datasetexists $clone; then
523		log_fail "'$clone' does not existed."
524	fi
525
526	# With the same reason in destroy_snapshot
527	typeset mtpt=""
528	if ismounted $clone; then
529		mtpt=$(get_prop mountpoint $clone)
530		(( $? != 0 )) && \
531			log_fail "get_prop mountpoint $clone failed."
532	fi
533
534	log_must $ZFS destroy $clone
535	[[ $mtpt != "" && -d $mtpt ]] && \
536		log_must $RM -rf $mtpt
537}
538
539# Return 0 if a snapshot exists; $? otherwise
540#
541# $1 - snapshot name
542
543function snapexists
544{
545	$ZFS list -H -t snapshot "$1" > /dev/null 2>&1
546	return $?
547}
548
549#
550# Set a property to a certain value on a dataset.
551# Sets a property of the dataset to the value as passed in.
552# @param:
553#	$1 dataset who's property is being set
554# 	$2 property to set
555#	$3 value to set property to
556# @return:
557#	0 if the property could be set.
558#	non-zero otherwise.
559# @use: ZFS
560#
561function dataset_setprop
562{
563	typeset fn=dataset_setprop
564
565	if (( $# < 3 )); then
566		log_note "$fn: Insufficient parameters (need 3, had $#)"
567		return 1
568	fi
569	typeset output=
570	output=$($ZFS set $2=$3 $1 2>&1)
571	typeset rv=$?
572	if (( rv != 0 )); then
573		log_note "Setting property on $1 failed."
574		log_note "property $2=$3"
575		log_note "Return Code: $rv"
576		log_note "Output: $output"
577		return $rv
578	fi
579	return 0
580}
581
582#
583# Assign suite defined dataset properties.
584# This function is used to apply the suite's defined default set of
585# properties to a dataset.
586# @parameters: $1 dataset to use
587# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
588# @returns:
589#   0 if the dataset has been altered.
590#   1 if no pool name was passed in.
591#   2 if the dataset could not be found.
592#   3 if the dataset could not have it's properties set.
593#
594function dataset_set_defaultproperties
595{
596	typeset dataset="$1"
597
598	[[ -z $dataset ]] && return 1
599
600	typeset confset=
601	typeset -i found=0
602	for confset in $($ZFS list); do
603		if [[ $dataset = $confset ]]; then
604			found=1
605			break
606		fi
607	done
608	[[ $found -eq 0 ]] && return 2
609	if [[ -n $COMPRESSION_PROP ]]; then
610		dataset_setprop $dataset compression $COMPRESSION_PROP || \
611			return 3
612		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
613	fi
614	if [[ -n $CHECKSUM_PROP && $WRAPPER != *"crypto"* ]]; then
615		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
616			return 3
617		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
618	fi
619	return 0
620}
621
622#
623# Check a numeric assertion
624# @parameter: $@ the assertion to check
625# @output: big loud notice if assertion failed
626# @use: log_fail
627#
628function assert
629{
630	(( $@ )) || log_fail $@
631}
632
633function wipe_partition_table #<whole_disk_name> [<whole_disk_name> ...]
634{
635	while [[ -n $* ]]; do
636		typeset diskname=$1
637		[ ! -e $diskname ] && log_fail "ERROR: $diskname doesn't exist"
638		if gpart list ${diskname#/dev/} >/dev/null 2>&1; then
639			wait_for 5 1 $GPART destroy -F $diskname
640		else
641			log_note "No GPT partitions detected on $diskname"
642		fi
643		log_must $GPART create -s gpt $diskname
644		shift
645	done
646}
647
648#
649# Given a slice, size and disk, this function
650# formats the slice to the specified size.
651# Size should be specified with units as per
652# the `format` command requirements eg. 100mb 3gb
653#
654function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
655{
656	typeset -i slicenum=$1
657	typeset start=$2
658	typeset size=$3
659	typeset disk=$4
660	set -A devmap a b c d e f g h
661	[[ -z $slicenum || -z $size || -z $disk ]] && \
662		log_fail "The slice, size or disk name is unspecified."
663
664	size=`$ECHO $size| sed s/mb/M/`
665	size=`$ECHO $size| sed s/m/M/`
666	size=`$ECHO $size| sed s/gb/G/`
667	size=`$ECHO $size| sed s/g/G/`
668	[[ -n $start ]] && start="-b $start"
669	log_must $GPART add -t efi $start -s $size -i $slicenum $disk
670	return 0
671}
672
673function get_disk_size #<disk>
674{
675	typeset disk=$1
676	diskinfo $disk | awk '{print $3}'
677}
678
679function get_available_disk_size #<disk>
680{
681	typeset disk=$1
682	raw_size=`get_disk_size $disk`
683	(( available_size = raw_size * 95 / 100 ))
684	echo $available_size
685}
686
687#
688# Get the end cyl of the given slice
689# #TODO: fix this to be GPT-compatible if we want to use the SMI WRAPPER.  This
690# function is not necessary on FreeBSD
691#
692function get_endslice #<disk> <slice>
693{
694	log_fail "get_endslice has not been updated for GPT partitions"
695}
696
697#
698# Get the first LBA that is beyond the end of the given partition
699function get_partition_end #<disk> <partition_index>
700{
701	typeset disk=$1
702	typeset partition_index=$2
703	export partition_index
704	$GPART show $disk | $AWK \
705		'/^[ \t]/ && $3 ~ ENVIRON["partition_index"] {print $1 + $2}'
706}
707
708
709#
710# Given a size,disk and total number of partitions,  this function formats the
711# disk partitions from 0 to the total partition number with the same specified
712# size.
713#
714function partition_disk	#<part_size> <whole_disk_name>	<total_parts>
715{
716	typeset -i i=1
717	typeset part_size=$1
718	typeset disk_name=$2
719	typeset total_parts=$3
720	typeset cyl
721
722	wipe_partition_table $disk_name
723	while (( i <= $total_parts )); do
724		set_partition $i "" $part_size $disk_name
725		(( i = i+1 ))
726	done
727}
728
729function size_of_file # fname
730{
731	typeset fname=$1
732	sz=`stat -f '%z' $fname`
733	[[ -z "$sz" ]] && log_fail "stat($fname) failed"
734	$ECHO $sz
735	return 0
736}
737
738#
739# This function continues to write to a filenum number of files into dirnum
740# number of directories until either $FILE_WRITE returns an error or the
741# maximum number of files per directory have been written.
742#
743# Usage:
744# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
745#
746# Return value: 0 on success
747#		non 0 on error
748#
749# Where :
750#	destdir:    is the directory where everything is to be created under
751#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
752#	filenum:    the maximum number of files per subdirectory
753#	blocksz:    number of bytes per block
754#	num_writes: number of blocks to write
755#	data:	    the data that will be written
756#
757#	E.g.
758#	file_fs /testdir 20 25 1024 256 0
759#
760# Note: blocksz * num_writes equals the size of the testfile
761#
762function fill_fs # destdir dirnum filenum blocksz num_writes data
763{
764	typeset destdir=${1:-$TESTDIR}
765	typeset -i dirnum=${2:-50}
766	typeset -i filenum=${3:-50}
767	typeset -i blocksz=${4:-8192}
768	typeset -i num_writes=${5:-10240}
769	typeset -i data=${6:-0}
770
771	typeset -i retval=0
772	typeset -i dn=0 # current dir number
773	typeset -i fn=0 # current file number
774	while (( retval == 0 )); do
775		(( dirnum >= 0 && dn >= dirnum )) && break
776		typeset curdir=$destdir/$dn
777		log_must $MKDIR -p $curdir
778		for (( fn = 0; $fn < $filenum && $retval == 0; fn++ )); do
779			log_cmd $FILE_WRITE -o create -f $curdir/$TESTFILE.$fn \
780			    -b $blocksz -c $num_writes -d $data
781			retval=$?
782		done
783		(( dn = dn + 1 ))
784	done
785	return $retval
786}
787
788#
789# Simple function to get the specified property. If unable to
790# get the property then exits.
791#
792# Note property is in 'parsable' format (-p)
793#
794function get_prop # property dataset
795{
796	typeset prop_val
797	typeset prop=$1
798	typeset dataset=$2
799
800	prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
801	if [[ $? -ne 0 ]]; then
802		log_note "Unable to get $prop property for dataset $dataset"
803		return 1
804	fi
805
806	$ECHO $prop_val
807	return 0
808}
809
810#
811# Simple function to return the lesser of two values.
812#
813function min
814{
815	typeset first_arg=$1
816	typeset second_arg=$2
817
818	if (( first_arg < second_arg )); then
819		$ECHO $first_arg
820	else
821		$ECHO $second_arg
822	fi
823	return 0
824}
825
826#
827# Simple function to get the specified property of pool. If unable to
828# get the property then exits.
829#
830function get_pool_prop # property pool
831{
832	typeset prop_val
833	typeset prop=$1
834	typeset pool=$2
835
836	if poolexists $pool ; then
837		prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
838			$AWK '{print $3}')
839		if [[ $? -ne 0 ]]; then
840			log_note "Unable to get $prop property for pool " \
841			"$pool"
842			return 1
843		fi
844	else
845		log_note "Pool $pool not exists."
846		return 1
847	fi
848
849	$ECHO $prop_val
850	return 0
851}
852
853# Return 0 if a pool exists; $? otherwise
854#
855# $1 - pool name
856
857function poolexists
858{
859	typeset pool=$1
860
861	if [[ -z $pool ]]; then
862		log_note "No pool name given."
863		return 1
864	fi
865
866	$ZPOOL list -H "$pool" > /dev/null 2>&1
867	return $?
868}
869
870# Return 0 if all the specified datasets exist; $? otherwise
871#
872# $1-n  dataset name
873function datasetexists
874{
875	if (( $# == 0 )); then
876		log_note "No dataset name given."
877		return 1
878	fi
879
880	while (( $# > 0 )); do
881		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 || \
882			return $?
883		shift
884	done
885
886	return 0
887}
888
889# return 0 if none of the specified datasets exists, otherwise return 1.
890#
891# $1-n  dataset name
892function datasetnonexists
893{
894	if (( $# == 0 )); then
895		log_note "No dataset name given."
896		return 1
897	fi
898
899	while (( $# > 0 )); do
900		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 && \
901			return 1
902		shift
903	done
904
905	return 0
906}
907
908#
909# Given a mountpoint, or a dataset name, determine if it is shared.
910#
911# Returns 0 if shared, 1 otherwise.
912#
913function is_shared
914{
915	typeset fs=$1
916	typeset mtpt
917
918	if [[ $fs != "/"* ]] ; then
919		if datasetnonexists "$fs" ; then
920			return 1
921		else
922			mtpt=$(get_prop mountpoint "$fs")
923			case $mtpt in
924				none|legacy|-) return 1
925					;;
926				*)	fs=$mtpt
927					;;
928			esac
929		fi
930	fi
931
932	for mtpt in `$SHARE | $AWK '{print $2}'` ; do
933		if [[ $mtpt == $fs ]] ; then
934			return 0
935		fi
936	done
937
938	typeset stat=$($SVCS -H -o STA nfs/server:default)
939	if [[ $stat != "ON" ]]; then
940		log_note "Current nfs/server status: $stat"
941	fi
942
943	return 1
944}
945
946#
947# Given a mountpoint, determine if it is not shared.
948#
949# Returns 0 if not shared, 1 otherwise.
950#
951function not_shared
952{
953	typeset fs=$1
954
955	is_shared $fs
956	if (( $? == 0)); then
957		return 1
958	fi
959
960	return 0
961}
962
963#
964# Helper function to unshare a mountpoint.
965#
966function unshare_fs #fs
967{
968	typeset fs=$1
969
970	is_shared $fs
971	if (( $? == 0 )); then
972		log_must $ZFS unshare $fs
973	fi
974
975	return 0
976}
977
978#
979# Check NFS server status and trigger it online.
980#
981function setup_nfs_server
982{
983	# Cannot share directory in non-global zone.
984	#
985	if ! is_global_zone; then
986		log_note "Cannot trigger NFS server by sharing in LZ."
987		return
988	fi
989
990	typeset nfs_fmri="svc:/network/nfs/server:default"
991	if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
992		#
993		# Only really sharing operation can enable NFS server
994		# to online permanently.
995		#
996		typeset dummy=$TMPDIR/dummy
997
998		if [[ -d $dummy ]]; then
999			log_must $RM -rf $dummy
1000		fi
1001
1002		log_must $MKDIR $dummy
1003		log_must $SHARE $dummy
1004
1005		#
1006		# Waiting for fmri's status to be the final status.
1007		# Otherwise, in transition, an asterisk (*) is appended for
1008		# instances, unshare will reverse status to 'DIS' again.
1009		#
1010		# Waiting for 1's at least.
1011		#
1012		log_must $SLEEP 1
1013		timeout=10
1014		while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1015		do
1016			log_must $SLEEP 1
1017
1018			(( timeout -= 1 ))
1019		done
1020
1021		log_must $UNSHARE $dummy
1022		log_must $RM -rf $dummy
1023	fi
1024
1025	log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1026}
1027
1028#
1029# To verify whether calling process is in global zone
1030#
1031# Return 0 if in global zone, 1 in non-global zone
1032#
1033function is_global_zone
1034{
1035	typeset cur_zone=$($ZONENAME 2>/dev/null)
1036
1037	# Zones are not supported on FreeBSD.
1038	if [[ $os_name == "FreeBSD" ]]; then
1039		return 0
1040	fi
1041
1042	if [[ $cur_zone != "global" ]]; then
1043		return 1
1044	fi
1045	return 0
1046}
1047
1048#
1049# Verify whether test is permit to run from
1050# global zone, local zone, or both
1051#
1052# $1 zone limit, could be "global", "local", or "both"(no limit)
1053#
1054# Return 0 if permit, otherwise exit with log_unsupported
1055#
1056function verify_runnable # zone limit
1057{
1058	typeset limit=$1
1059
1060	[[ -z $limit ]] && return 0
1061
1062	if is_global_zone ; then
1063		case $limit in
1064			global|both)
1065				break
1066				;;
1067			local)  log_unsupported "Test is unable to run from \
1068					global zone."
1069				break
1070				;;
1071			*)      log_note "Warning: unknown limit $limit - use both."
1072				;;
1073		esac
1074	else
1075		case $limit in
1076			local|both)
1077				break
1078				;;
1079			global) log_unsupported "Test is unable to run from \
1080					local zone."
1081				break
1082				;;
1083			*)      log_note "Warning: unknown limit $limit - use both."
1084				;;
1085		esac
1086
1087		reexport_pool
1088	fi
1089
1090	return 0
1091}
1092
1093# Return 0 if create successfully or the pool exists; $? otherwise
1094# Note: In local zones, this function should return 0 silently.
1095#
1096# $1 - pool name
1097# $2-n - [keyword] devs_list
1098
1099function create_pool #pool devs_list
1100{
1101	typeset pool=${1%%/*}
1102
1103	shift
1104
1105	if [[ -z $pool ]]; then
1106		log_note "Missing pool name."
1107		return 1
1108	fi
1109
1110	if poolexists $pool ; then
1111		destroy_pool $pool
1112	fi
1113
1114	if is_global_zone ; then
1115		[[ -d /$pool ]] && $RM -rf /$pool
1116		log_must $ZPOOL create -f $pool $@
1117	fi
1118
1119	return 0
1120}
1121
1122# Return 0 if destroy successfully or the pool exists; $? otherwise
1123# Note: In local zones, this function should return 0 silently.
1124#
1125# $1 - pool name
1126# Destroy pool with the given parameters.
1127
1128function destroy_pool #pool
1129{
1130	typeset pool=${1%%/*}
1131	typeset mtpt
1132
1133	if [[ -z $pool ]]; then
1134		log_note "No pool name given."
1135		return 1
1136	fi
1137
1138	if is_global_zone ; then
1139		if poolexists "$pool" ; then
1140			mtpt=$(get_prop mountpoint "$pool")
1141			log_must $ZPOOL destroy -f $pool
1142
1143			[[ -d $mtpt ]] && \
1144				log_must $RM -rf $mtpt
1145		else
1146			log_note "Pool $pool does not exist, skipping destroy."
1147			return 1
1148		fi
1149	fi
1150
1151	return 0
1152}
1153
1154#
1155# Create file vdevs.
1156# By default this generates sparse vdevs 10GB in size, for performance.
1157#
1158function create_vdevs # vdevs
1159{
1160	typeset vdsize=10G
1161
1162	[ -n "$VDEV_SIZE" ] && vdsize=$VDEV_SIZE
1163	rm -f $@ || return 1
1164	truncate -s $vdsize $@
1165}
1166
1167#
1168# Firstly, create a pool with 5 datasets. Then, create a single zone and
1169# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1170# and a zvol device to the zone.
1171#
1172# $1 zone name
1173# $2 zone root directory prefix
1174# $3 zone ip
1175#
1176function zfs_zones_setup #zone_name zone_root zone_ip
1177{
1178	typeset zone_name=${1:-$(hostname)-z}
1179	typeset zone_root=${2:-"/zone_root"}
1180	typeset zone_ip=${3:-"10.1.1.10"}
1181	typeset prefix_ctr=$ZONE_CTR
1182	typeset pool_name=$ZONE_POOL
1183	typeset -i cntctr=5
1184	typeset -i i=0
1185
1186	# Create pool and 5 container within it
1187	#
1188	[[ -d /$pool_name ]] && $RM -rf /$pool_name
1189	log_must $ZPOOL create -f $pool_name $DISKS
1190	while (( i < cntctr )); do
1191		log_must $ZFS create $pool_name/$prefix_ctr$i
1192		(( i += 1 ))
1193	done
1194
1195	# create a zvol
1196	log_must $ZFS create -V 1g $pool_name/zone_zvol
1197
1198	#
1199	# If current system support slog, add slog device for pool
1200	#
1201	typeset sdevs="$TMPDIR/sdev1 $TMPDIR/sdev2"
1202	log_must create_vdevs $sdevs
1203	log_must $ZPOOL add $pool_name log mirror $sdevs
1204
1205	# this isn't supported just yet.
1206	# Create a filesystem. In order to add this to
1207	# the zone, it must have it's mountpoint set to 'legacy'
1208	# log_must $ZFS create $pool_name/zfs_filesystem
1209	# log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1210
1211	[[ -d $zone_root ]] && \
1212		log_must $RM -rf $zone_root/$zone_name
1213	[[ ! -d $zone_root ]] && \
1214		log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1215
1216	# Create zone configure file and configure the zone
1217	#
1218	typeset zone_conf=$TMPDIR/zone_conf.${TESTCASE_ID}
1219	$ECHO "create" > $zone_conf
1220	$ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1221	$ECHO "set autoboot=true" >> $zone_conf
1222	i=0
1223	while (( i < cntctr )); do
1224		$ECHO "add dataset" >> $zone_conf
1225		$ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1226			$zone_conf
1227		$ECHO "end" >> $zone_conf
1228		(( i += 1 ))
1229	done
1230
1231	# add our zvol to the zone
1232	$ECHO "add device" >> $zone_conf
1233	$ECHO "set match=/dev/zvol/$pool_name/zone_zvol" >> $zone_conf
1234	$ECHO "end" >> $zone_conf
1235
1236	# add a corresponding zvol to the zone
1237	$ECHO "add device" >> $zone_conf
1238	$ECHO "set match=/dev/zvol/$pool_name/zone_zvol" >> $zone_conf
1239	$ECHO "end" >> $zone_conf
1240
1241	# once it's supported, we'll add our filesystem to the zone
1242	# $ECHO "add fs" >> $zone_conf
1243	# $ECHO "set type=zfs" >> $zone_conf
1244	# $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1245	# $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1246	# $ECHO "end" >> $zone_conf
1247
1248	$ECHO "verify" >> $zone_conf
1249	$ECHO "commit" >> $zone_conf
1250	log_must $ZONECFG -z $zone_name -f $zone_conf
1251	log_must $RM -f $zone_conf
1252
1253	# Install the zone
1254	$ZONEADM -z $zone_name install
1255	if (( $? == 0 )); then
1256		log_note "SUCCESS: $ZONEADM -z $zone_name install"
1257	else
1258		log_fail "FAIL: $ZONEADM -z $zone_name install"
1259	fi
1260
1261	# Install sysidcfg file
1262	#
1263	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1264  	$ECHO "system_locale=C" > $sysidcfg
1265  	$ECHO  "terminal=dtterm" >> $sysidcfg
1266  	$ECHO  "network_interface=primary {" >> $sysidcfg
1267  	$ECHO  "hostname=$zone_name" >> $sysidcfg
1268  	$ECHO  "}" >> $sysidcfg
1269  	$ECHO  "name_service=NONE" >> $sysidcfg
1270  	$ECHO  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1271  	$ECHO  "security_policy=NONE" >> $sysidcfg
1272  	$ECHO  "timezone=US/Eastern" >> $sysidcfg
1273
1274	# Boot this zone
1275	log_must $ZONEADM -z $zone_name boot
1276}
1277
1278#
1279# Reexport TESTPOOL & TESTPOOL(1-4)
1280#
1281function reexport_pool
1282{
1283	typeset -i cntctr=5
1284	typeset -i i=0
1285
1286	while (( i < cntctr )); do
1287		if (( i == 0 )); then
1288			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1289			if ! ismounted $TESTPOOL; then
1290				log_must $ZFS mount $TESTPOOL
1291			fi
1292		else
1293			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1294			if eval ! ismounted \$TESTPOOL$i; then
1295				log_must eval $ZFS mount \$TESTPOOL$i
1296			fi
1297		fi
1298		(( i += 1 ))
1299	done
1300}
1301
1302#
1303# Wait for something to return true, checked by the caller.
1304#
1305function wait_for_checked # timeout dt <method> [args...]
1306{
1307	typeset timeout=$1
1308	typeset dt=$2
1309	shift; shift
1310	typeset -i start=$(date '+%s')
1311	typeset -i endtime
1312
1313	log_note "Waiting $timeout seconds (checked every $dt seconds) for: $*"
1314	((endtime = start + timeout))
1315	while :; do
1316		$*
1317		[ $? -eq 0 ] && return
1318		curtime=$(date '+%s')
1319		[ $curtime -gt $endtime ] && return 1
1320		sleep $dt
1321	done
1322	return 0
1323}
1324
1325#
1326# Wait for something to return true.
1327#
1328function wait_for # timeout dt <method> [args...]
1329{
1330	typeset timeout=$1
1331	typeset dt=$2
1332	shift; shift
1333
1334	wait_for_checked $timeout $dt $* || \
1335		log_fail "ERROR: Timed out waiting for: $*"
1336}
1337
1338#
1339# Verify a given disk is online or offline
1340#
1341# Return 0 is pool/disk matches expected state, 1 otherwise
1342# stateexpr is a regex like ONLINE or REMOVED|UNAVAIL
1343#
1344function check_state # pool disk stateexpr
1345{
1346	typeset pool=$1
1347	typeset disk=${2#/dev/}
1348	disk=${disk#/dev/}
1349	disk=${disk#/dev/}
1350	typeset stateexpr=$3
1351
1352	$ZPOOL status -v $pool | grep "$disk"  \
1353	    | egrep -i "$stateexpr" > /dev/null 2>&1
1354
1355	return $?
1356}
1357
1358#
1359# Wait for a given disk to leave a state
1360#
1361function wait_for_state_exit
1362{
1363	typeset pool=$1
1364	typeset disk=$2
1365	typeset state=$3
1366
1367	while check_state "$pool" "$disk" "$state"; do
1368		$SLEEP 1
1369	done
1370}
1371
1372#
1373# Wait for a given disk to enter a state
1374#
1375function wait_for_state_enter
1376{
1377	typeset -i timeout=$1
1378	typeset pool=$2
1379	typeset disk=$3
1380	typeset state=$4
1381
1382	log_note "Waiting up to $timeout seconds for $disk to become $state ..."
1383	for ((; $timeout > 0; timeout=$timeout-1)); do
1384		check_state $pool "$disk" "$state"
1385		[ $? -eq 0 ] && return
1386		$SLEEP 1
1387	done
1388	log_must $ZPOOL status $pool
1389	log_fail "ERROR: Disk $disk not marked as $state in $pool"
1390}
1391
1392#
1393# Get the mountpoint of snapshot
1394# as its mountpoint
1395#
1396function snapshot_mountpoint
1397{
1398	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1399
1400	if [[ $dataset != *@* ]]; then
1401		log_fail "Error name of snapshot '$dataset'."
1402	fi
1403
1404	typeset fs=${dataset%@*}
1405	typeset snap=${dataset#*@}
1406
1407	if [[ -z $fs || -z $snap ]]; then
1408		log_fail "Error name of snapshot '$dataset'."
1409	fi
1410
1411	$ECHO $(get_prop mountpoint $fs)/$(get_snapdir_name)/$snap
1412}
1413
1414function pool_maps_intact # pool
1415{
1416	typeset pool="$1"
1417
1418	if ! $ZDB -bcv $pool; then
1419		return 1
1420	fi
1421	return 0
1422}
1423
1424function filesys_has_zil # filesystem
1425{
1426	typeset filesys="$1"
1427
1428	if ! $ZDB -ivv $filesys | $GREP "ZIL header"; then
1429		return 1
1430	fi
1431	return 0
1432}
1433
1434#
1435# Given a pool and file system, this function will verify the file system
1436# using the zdb internal tool. Note that the pool is exported and imported
1437# to ensure it has consistent state.
1438#
1439function verify_filesys # pool filesystem dir
1440{
1441	typeset pool="$1"
1442	typeset filesys="$2"
1443	typeset zdbout="$TMPDIR/zdbout.${TESTCASE_ID}"
1444
1445	shift
1446	shift
1447	typeset dirs=$@
1448	typeset search_path=""
1449
1450	log_note "Calling $ZDB to verify filesystem '$filesys'"
1451	log_must $ZPOOL export $pool
1452
1453	if [[ -n $dirs ]] ; then
1454		for dir in $dirs ; do
1455			search_path="$search_path -d $dir"
1456		done
1457	fi
1458
1459	log_must $ZPOOL import $search_path $pool
1460
1461	$ZDB -cudi $filesys > $zdbout 2>&1
1462	if [[ $? != 0 ]]; then
1463		log_note "Output: $ZDB -cudi $filesys"
1464		$CAT $zdbout
1465		log_fail "$ZDB detected errors with: '$filesys'"
1466	fi
1467
1468	log_must $RM -rf $zdbout
1469}
1470
1471#
1472# Given a pool, and this function list all disks in the pool
1473#
1474function get_disklist # pool
1475{
1476	typeset disklist=""
1477
1478	disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4 ) {print $1}' | \
1479 		$GREP -v "\-\-\-\-\-" | \
1480		$EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$" )
1481
1482	$ECHO $disklist
1483}
1484
1485#
1486# Destroy all existing metadevices and state database
1487#
1488function destroy_metas
1489{
1490	typeset metad
1491
1492	for metad in $($METASTAT -p | $AWK '{print $1}'); do
1493		log_must $METACLEAR -rf $metad
1494	done
1495
1496	for metad in $($METADB | $CUT -f6 | $GREP dev | $UNIQ); do
1497		log_must $METADB -fd $metad
1498	done
1499}
1500
1501# /**
1502#  This function kills a given list of processes after a time period. We use
1503#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1504#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1505#  would be listed as FAIL, which we don't want : we're happy with stress tests
1506#  running for a certain amount of time, then finishing.
1507#
1508# @param $1 the time in seconds after which we should terminate these processes
1509# @param $2..$n the processes we wish to terminate.
1510# */
1511function stress_timeout
1512{
1513	typeset -i TIMEOUT=$1
1514	shift
1515	typeset cpids="$@"
1516
1517	log_note "Waiting for child processes($cpids). " \
1518		"It could last dozens of minutes, please be patient ..."
1519	log_must $SLEEP $TIMEOUT
1520
1521	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1522	typeset pid
1523	for pid in $cpids; do
1524		$PS -p $pid > /dev/null 2>&1
1525		if (( $? == 0 )); then
1526			log_must $KILL -USR1 $pid
1527		fi
1528	done
1529}
1530
1531#
1532# Check whether current OS support a specified feature or not
1533#
1534# return 0 if current OS version is in unsupported list, 1 otherwise
1535#
1536# $1 unsupported target OS versions
1537#
1538function check_version # <OS version>
1539{
1540	typeset unsupported_vers="$@"
1541	typeset ver
1542	typeset cur_ver=`$UNAME -r`
1543
1544	for ver in $unsupported_vers; do
1545		[[ "$cur_ver" == "$ver" ]] && return 0
1546	done
1547
1548	return 1
1549}
1550
1551#
1552# Verify a given hotspare disk is inuse or avail
1553#
1554# Return 0 is pool/disk matches expected state, 1 otherwise
1555#
1556function check_hotspare_state # pool disk state{inuse,avail}
1557{
1558	typeset pool=$1
1559	typeset disk=${2#/dev/}
1560	disk=${disk#/dev/}
1561	disk=${disk#/dev/}
1562	typeset state=$3
1563
1564	cur_state=$(get_device_state $pool $disk "spares")
1565
1566	if [[ $state != ${cur_state} ]]; then
1567		return 1
1568	fi
1569	return 0
1570}
1571
1572#
1573# Verify a given slog disk is inuse or avail
1574#
1575# Return 0 is pool/disk matches expected state, 1 otherwise
1576#
1577function check_slog_state # pool disk state{online,offline,unavail}
1578{
1579	typeset pool=$1
1580	typeset disk=${2#/dev/}
1581	disk=${disk#/dev/}
1582	disk=${disk#/dev/}
1583	typeset state=$3
1584
1585	cur_state=$(get_device_state $pool $disk "logs")
1586
1587	if [[ $state != ${cur_state} ]]; then
1588		return 1
1589	fi
1590	return 0
1591}
1592
1593#
1594# Verify a given vdev disk is inuse or avail
1595#
1596# Return 0 is pool/disk matches expected state, 1 otherwise
1597#
1598function check_vdev_state # pool disk state{online,offline,unavail}
1599{
1600	typeset pool=$1
1601	typeset disk=${2#/dev/}
1602	disk=${disk#/dev/}
1603	disk=${disk#/dev/}
1604	typeset state=$3
1605
1606	if [[ $WRAPPER == *"smi"* ]]; then
1607		$ECHO $disk | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
1608		if (( $? == 0 )); then
1609			disk=${disk}s2
1610		fi
1611	fi
1612
1613	cur_state=$(get_device_state $pool $disk)
1614
1615	if [[ $state != ${cur_state} ]]; then
1616		return 1
1617	fi
1618	return 0
1619}
1620
1621#
1622# Check the output of 'zpool status -v <pool>',
1623# and to see if the content of <token> contain the <keyword> specified.
1624#
1625# Return 0 is contain, 1 otherwise
1626#
1627function check_pool_status # pool token keyword
1628{
1629	typeset pool=$1
1630	typeset token=$2
1631	typeset keyword=$3
1632
1633	$ZPOOL status -v "$pool" 2>/dev/null | \
1634		$NAWK -v token="$token:" '($1==token) {print $0}' | \
1635		$GREP -i "$keyword" >/dev/null 2>&1
1636
1637	return $?
1638}
1639
1640function vdev_pool_error_count
1641{
1642	typeset errs=$1
1643	if [ -z "$2" ]; then
1644		test $errs -gt 0; ret=$?
1645	else
1646		test $errs -eq $2; ret=$?
1647	fi
1648	log_debug "vdev_pool_error_count: errs='$errs' \$2='$2' ret='$ret'"
1649	return $ret
1650}
1651
1652#
1653# Generate a pool status error file suitable for pool_errors_from_file.
1654# If the pool is healthy, returns 0.  Otherwise, the caller must handle the
1655# returned temporarily file appropriately.
1656#
1657function pool_error_file # <pool>
1658{
1659	typeset pool="$1"
1660
1661	typeset tmpfile=$TMPDIR/pool_status.${TESTCASE_ID}
1662	$ZPOOL status -x $pool > ${tmpfile}
1663	echo $tmpfile
1664}
1665
1666#
1667# Evaluates <file> counting the number of errors.  If vdev specified, only
1668# that vdev's errors are counted.  Returns the total number.  <file> will be
1669# deleted on exit.
1670#
1671function pool_errors_from_file # <file> [vdev]
1672{
1673	typeset file=$1
1674	shift
1675	typeset checkvdev="$2"
1676
1677	typeset line
1678	typeset -i fetchbegin=1
1679	typeset -i errnum=0
1680	typeset -i c_read=0
1681	typeset -i c_write=0
1682	typeset -i c_cksum=0
1683
1684	cat ${file} | $EGREP -v "pool:" | while read line; do
1685	 	if (( $fetchbegin != 0 )); then
1686                        $ECHO $line | $GREP "NAME" >/dev/null 2>&1
1687                        (( $? == 0 )) && (( fetchbegin = 0 ))
1688                         continue
1689                fi
1690
1691		if [[ -n $checkvdev ]]; then
1692			$ECHO $line | $GREP $checkvdev >/dev/null 2>&1
1693			(( $? != 0 )) && continue
1694			c_read=`$ECHO $line | $AWK '{print $3}'`
1695			c_write=`$ECHO $line | $AWK '{print $4}'`
1696			c_cksum=`$ECHO $line | $AWK '{print $5}'`
1697			if [ $c_read != 0 ] || [ $c_write != 0 ] || \
1698		   	   [ $c_cksum != 0 ]
1699			then
1700				(( errnum = errnum + 1 ))
1701			fi
1702			break
1703		fi
1704
1705		c_read=`$ECHO $line | $AWK '{print $3}'`
1706		c_write=`$ECHO $line | $AWK '{print $4}'`
1707		c_cksum=`$ECHO $line | $AWK '{print $5}'`
1708		if [ $c_read != 0 ] || [ $c_write != 0 ] || \
1709		    [ $c_cksum != 0 ]
1710		then
1711			(( errnum = errnum + 1 ))
1712		fi
1713	done
1714
1715	rm -f $file
1716	echo $errnum
1717}
1718
1719#
1720# Returns whether the vdev has the given number of errors.
1721# If the number is unspecified, any non-zero number returns true.
1722#
1723function vdev_has_errors # pool vdev [errors]
1724{
1725	typeset pool=$1
1726	typeset vdev=$2
1727	typeset tmpfile=$(pool_error_file $pool)
1728	log_note "Original pool status:"
1729	cat $tmpfile
1730
1731	typeset -i errs=$(pool_errors_from_file $tmpfile $vdev)
1732	vdev_pool_error_count $errs $3
1733}
1734
1735#
1736# Returns whether the pool has the given number of errors.
1737# If the number is unspecified, any non-zero number returns true.
1738#
1739function pool_has_errors # pool [errors]
1740{
1741	typeset pool=$1
1742	typeset tmpfile=$(pool_error_file $pool)
1743	log_note "Original pool status:"
1744	cat $tmpfile
1745
1746	typeset -i errs=$(pool_errors_from_file $tmpfile)
1747	vdev_pool_error_count $errs $2
1748}
1749
1750#
1751# Returns whether clearing $pool at $vdev (if given) succeeds.
1752#
1753function pool_clear_succeeds
1754{
1755	typeset pool="$1"
1756	typeset vdev=$2
1757
1758	$ZPOOL clear $pool $vdev
1759	! pool_has_errors $pool
1760}
1761
1762#
1763# Return whether the pool is healthy
1764#
1765function is_pool_healthy # pool
1766{
1767	typeset pool=$1
1768
1769	typeset healthy_output="pool '$pool' is healthy"
1770	typeset real_output=$($ZPOOL status -x $pool)
1771
1772	if [[ "$real_output" == "$healthy_output" ]]; then
1773		return 0
1774	else
1775		typeset -i ret
1776		$ZPOOL status -x $pool | $GREP "state:" | \
1777			$GREP "FAULTED" >/dev/null 2>&1
1778		ret=$?
1779		(( $ret == 0 )) && return 1
1780		typeset l_scan
1781		typeset errnum
1782		l_scan=$($ZPOOL status -x $pool | $GREP "scan:")
1783		l_scan=${l_scan##*"with"}
1784		errnum=$($ECHO $l_scan | $AWK '{print $1}')
1785		if [ "$errnum" != "0" ]; then
1786		 	return 1
1787		else
1788			return 0
1789		fi
1790	fi
1791}
1792
1793#
1794# These 5 following functions are instance of check_pool_status()
1795#	is_pool_resilvering - to check if the pool is resilver in progress
1796#	is_pool_resilvered - to check if the pool is resilver completed
1797#	is_pool_scrubbing - to check if the pool is scrub in progress
1798#	is_pool_scrubbed - to check if the pool is scrub completed
1799#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1800#
1801function is_pool_resilvering #pool
1802{
1803	check_pool_status "$1" "scan" "resilver in progress"
1804	return $?
1805}
1806
1807function is_pool_resilvered #pool
1808{
1809	check_pool_status "$1" "scan" "resilvered"
1810	return $?
1811}
1812
1813function resilver_happened # pool
1814{
1815	typeset pool=$1
1816	is_pool_resilvering "$pool" || is_pool_resilvered "$pool"
1817}
1818
1819function is_pool_scrubbing #pool
1820{
1821	check_pool_status "$1" "scan" "scrub in progress"
1822	return $?
1823}
1824
1825function is_pool_scrubbed #pool
1826{
1827	check_pool_status "$1" "scan" "scrub repaired"
1828	return $?
1829}
1830
1831function is_pool_scrub_stopped #pool
1832{
1833	check_pool_status "$1" "scan" "scrub canceled"
1834	return $?
1835}
1836
1837function is_pool_state # pool state
1838{
1839	check_pool_status "$1" "state" "$2"
1840	return $?
1841}
1842
1843#
1844# Erase the partition tables and destroy any zfs labels
1845#
1846function cleanup_devices #vdevs
1847{
1848	for device in $@; do
1849		# Labelclear must happen first, otherwise it may interfere
1850		# with the teardown/setup of GPT labels.
1851		$ZPOOL labelclear -f $device
1852		# Only wipe partition tables for arguments that are disks,
1853		# as opposed to slices (which are valid arguments here).
1854		if geom disk list | grep -qx "Geom name: ${device#/dev/}"; then
1855			wipe_partition_table $device
1856		fi
1857	done
1858	return 0
1859}
1860
1861#
1862# Verify the rsh connectivity to each remote host in RHOSTS.
1863#
1864# Return 0 if remote host is accessible; otherwise 1.
1865# $1 remote host name
1866# $2 username
1867#
1868function verify_rsh_connect #rhost, username
1869{
1870	typeset rhost=$1
1871	typeset username=$2
1872	typeset rsh_cmd="$RSH -n"
1873	typeset cur_user=
1874
1875	$GETENT hosts $rhost >/dev/null 2>&1
1876	if (( $? != 0 )); then
1877		log_note "$rhost cannot be found from" \
1878			"administrative database."
1879		return 1
1880	fi
1881
1882	$PING $rhost 3 >/dev/null 2>&1
1883	if (( $? != 0 )); then
1884		log_note "$rhost is not reachable."
1885		return 1
1886	fi
1887
1888	if (( ${#username} != 0 )); then
1889		rsh_cmd="$rsh_cmd -l $username"
1890		cur_user="given user \"$username\""
1891	else
1892		cur_user="current user \"`$LOGNAME`\""
1893	fi
1894
1895	 if ! $rsh_cmd $rhost $TRUE; then
1896		log_note "$RSH to $rhost is not accessible" \
1897			"with $cur_user."
1898		return 1
1899	fi
1900
1901	return 0
1902}
1903
1904#
1905# Verify the remote host connection via rsh after rebooting
1906# $1 remote host
1907#
1908function verify_remote
1909{
1910	rhost=$1
1911
1912	#
1913	# The following loop waits for the remote system rebooting.
1914	# Each iteration will wait for 150 seconds. there are
1915	# total 5 iterations, so the total timeout value will
1916	# be 12.5  minutes for the system rebooting. This number
1917	# is an approxiate number.
1918	#
1919	typeset -i count=0
1920	while ! verify_rsh_connect $rhost; do
1921		sleep 150
1922		(( count = count + 1 ))
1923		if (( count > 5 )); then
1924			return 1
1925		fi
1926	done
1927	return 0
1928}
1929
1930#
1931# Replacement function for /usr/bin/rsh. This function will include
1932# the /usr/bin/rsh and meanwhile return the execution status of the
1933# last command.
1934#
1935# $1 usrname passing down to -l option of /usr/bin/rsh
1936# $2 remote machine hostname
1937# $3... command string
1938#
1939
1940function rsh_status
1941{
1942	typeset ruser=$1
1943	typeset rhost=$2
1944	typeset -i ret=0
1945	typeset cmd_str=""
1946	typeset rsh_str=""
1947
1948	shift; shift
1949	cmd_str="$@"
1950
1951	err_file=$TMPDIR/${rhost}.${TESTCASE_ID}.err
1952	if (( ${#ruser} == 0 )); then
1953		rsh_str="$RSH -n"
1954	else
1955		rsh_str="$RSH -n -l $ruser"
1956	fi
1957
1958	$rsh_str $rhost /usr/local/bin/ksh93 -c "'$cmd_str; \
1959		print -u 2 \"status=\$?\"'" \
1960		>/dev/null 2>$err_file
1961	ret=$?
1962	if (( $ret != 0 )); then
1963		$CAT $err_file
1964		$RM -f $std_file $err_file
1965		log_fail  "$RSH itself failed with exit code $ret..."
1966	fi
1967
1968	 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1969		$CUT -d= -f2)
1970	(( $ret != 0 )) && $CAT $err_file >&2
1971
1972	$RM -f $err_file >/dev/null 2>&1
1973	return $ret
1974}
1975
1976#
1977# Get the SUNWstc-fs-zfs package installation path in a remote host
1978# $1 remote host name
1979#
1980function get_remote_pkgpath
1981{
1982	typeset rhost=$1
1983	typeset pkgpath=""
1984
1985	pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1986			$CUT -d: -f2")
1987
1988	$ECHO $pkgpath
1989}
1990
1991#/**
1992# A function to find and locate free disks on a system or from given
1993# disks as the parameter.  Since the conversion to ATF, this function is
1994# superfluous; it is assumed that the user will supply an accurate list of
1995# disks to use.  So we just return the arguments.
1996#
1997# $@ given disks to find which are free
1998#
1999# @return a string containing the list of available disks
2000#*/
2001function find_disks
2002{
2003	(( first=0 ))
2004	for disk in $@; do
2005		[[ $first == 1 ]] && echo -n " "
2006		(( first=1 ))
2007		case $disk in
2008		/dev/*)	echo -n "$disk" ;;
2009		*)	echo -n "/dev/$disk" ;;
2010		esac
2011	done
2012}
2013
2014# A function to set convenience variables for disks.
2015function set_disks
2016{
2017	set -A disk_array $(find_disks $DISKS)
2018	[[ -z "$DISK_ARRAY_LIMIT" ]] && typeset -i DISK_ARRAY_LIMIT=5
2019
2020	export DISK=""
2021	typeset -i i=0
2022	while (( i < ${#disk_array[*]} && i <= $DISK_ARRAY_LIMIT )); do
2023		export DISK${i}="${disk_array[$i]}"
2024		DISKSARRAY="$DISKSARRAY ${disk_array[$i]}"
2025		(( i = i + 1 ))
2026	done
2027	export DISK_ARRAY_NUM=$i
2028	export DISKSARRAY
2029	export disk=$DISK0
2030}
2031
2032#
2033# Add specified user to specified group
2034#
2035# $1 group name
2036# $2 user name
2037#
2038function add_user #<group_name> <user_name>
2039{
2040	typeset gname=$1
2041	typeset uname=$2
2042
2043	if (( ${#gname} == 0 || ${#uname} == 0 )); then
2044		log_fail "group name or user name are not defined."
2045	fi
2046
2047	# Check to see if the user exists.
2048	$ID $uname > /dev/null 2>&1 && return 0
2049
2050	# Assign 1000 as the base uid
2051	typeset -i uid=1000
2052	while true; do
2053		typeset -i ret
2054		$USERADD -u $uid -g $gname -d /var/tmp/$uname -m $uname
2055		ret=$?
2056		case $ret in
2057			0) return 0 ;;
2058			# The uid is not unique
2059			65) ((uid += 1)) ;;
2060			*) return 1 ;;
2061		esac
2062		if [[ $uid == 65000 ]]; then
2063			log_fail "No user id available under 65000 for $uname"
2064		fi
2065	done
2066
2067	return 0
2068}
2069
2070#
2071# Delete the specified user.
2072#
2073# $1 login name
2074#
2075function del_user #<logname>
2076{
2077	typeset user=$1
2078
2079	if (( ${#user} == 0 )); then
2080		log_fail "login name is necessary."
2081	fi
2082
2083	if $ID $user > /dev/null 2>&1; then
2084		log_must $USERDEL $user
2085	fi
2086
2087	return 0
2088}
2089
2090#
2091# Select valid gid and create specified group.
2092#
2093# $1 group name
2094#
2095function add_group #<group_name>
2096{
2097	typeset group=$1
2098
2099	if (( ${#group} == 0 )); then
2100		log_fail "group name is necessary."
2101	fi
2102
2103	# See if the group already exists.
2104	$GROUPSHOW $group >/dev/null 2>&1
2105	[[ $? == 0 ]] && return 0
2106
2107	# Assign 100 as the base gid
2108	typeset -i gid=100
2109	while true; do
2110		$GROUPADD -g $gid $group > /dev/null 2>&1
2111		typeset -i ret=$?
2112		case $ret in
2113			0) return 0 ;;
2114			# The gid is not  unique
2115			65) ((gid += 1)) ;;
2116			*) return 1 ;;
2117		esac
2118		if [[ $gid == 65000 ]]; then
2119			log_fail "No user id available under 65000 for $group"
2120		fi
2121	done
2122}
2123
2124#
2125# Delete the specified group.
2126#
2127# $1 group name
2128#
2129function del_group #<group_name>
2130{
2131	typeset grp=$1
2132	if (( ${#grp} == 0 )); then
2133		log_fail "group name is necessary."
2134	fi
2135
2136	$GROUPDEL -n $grp > /dev/null 2>&1
2137	typeset -i ret=$?
2138	case $ret in
2139		# Group does not exist, or was deleted successfully.
2140		0|6|65) return 0 ;;
2141		# Name already exists as a group name
2142		9) log_must $GROUPDEL $grp ;;
2143		*) return 1 ;;
2144	esac
2145
2146	return 0
2147}
2148
2149#
2150# This function will return true if it's safe to destroy the pool passed
2151# as argument 1. It checks for pools based on zvols and files, and also
2152# files contained in a pool that may have a different mountpoint.
2153#
2154function safe_to_destroy_pool { # $1 the pool name
2155
2156	typeset pool=""
2157	typeset DONT_DESTROY=""
2158
2159	# We check that by deleting the $1 pool, we're not
2160	# going to pull the rug out from other pools. Do this
2161	# by looking at all other pools, ensuring that they
2162	# aren't built from files or zvols contained in this pool.
2163
2164	for pool in $($ZPOOL list -H -o name)
2165	do
2166		ALTMOUNTPOOL=""
2167
2168		# this is a list of the top-level directories in each of the files
2169		# that make up the path to the files the pool is based on
2170		FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2171			$AWK '{print $1}')
2172
2173		# this is a list of the zvols that make up the pool
2174		ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/$1$" | \
2175			$AWK '{print $1}')
2176
2177		# also want to determine if it's a file-based pool using an
2178		# alternate mountpoint...
2179		POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2180					$GREP / | $AWK '{print $1}' | \
2181					$AWK -F/ '{print $2}' | $GREP -v "dev")
2182
2183		for pooldir in $POOL_FILE_DIRS
2184		do
2185			OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2186					$GREP "${pooldir}$" | $AWK '{print $1}')
2187
2188			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2189		done
2190
2191
2192		if [ ! -z "$ZVOLPOOL" ]
2193		then
2194			DONT_DESTROY="true"
2195			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2196		fi
2197
2198		if [ ! -z "$FILEPOOL" ]
2199		then
2200			DONT_DESTROY="true"
2201			log_note "Pool $pool is built from $FILEPOOL on $1"
2202		fi
2203
2204		if [ ! -z "$ALTMOUNTPOOL" ]
2205		then
2206			DONT_DESTROY="true"
2207			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2208		fi
2209	done
2210
2211	if [ -z "${DONT_DESTROY}" ]
2212	then
2213		return 0
2214	else
2215		log_note "Warning: it is not safe to destroy $1!"
2216		return 1
2217	fi
2218}
2219
2220#
2221# Get IP address of hostname
2222# $1 hostname
2223#
2224function getipbyhost
2225{
2226	typeset ip
2227	ip=`$ARP $1 2>/dev/null | $AWK -F\) '{print $1}' \
2228		| $AWK -F\( '{print $2}'`
2229	$ECHO $ip
2230}
2231
2232#
2233# Setup iSCSI initiator to target
2234# $1 target hostname
2235#
2236function iscsi_isetup
2237{
2238	# check svc:/network/iscsi_initiator:default state, try to enable it
2239	# if the state is not ON
2240	typeset ISCSII_FMRI="svc:/network/iscsi_initiator:default"
2241	if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
2242		log_must $SVCADM enable $ISCSII_FMRI
2243
2244		typeset -i retry=20
2245		while [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) && \
2246			( $retry -ne 0 ) ]]
2247		do
2248			(( retry = retry - 1 ))
2249			$SLEEP 1
2250		done
2251
2252		if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
2253			log_fail "$ISCSII_FMRI service can not be enabled!"
2254		fi
2255	fi
2256
2257	log_must $ISCSIADM add discovery-address $(getipbyhost $1)
2258	log_must $ISCSIADM modify discovery --sendtargets enable
2259	log_must $DEVFSADM -i iscsi
2260}
2261
2262#
2263# Check whether iscsi parameter is set as remote
2264#
2265# return 0 if iscsi is set as remote, otherwise 1
2266#
2267function check_iscsi_remote
2268{
2269	if [[ $iscsi == "remote" ]] ; then
2270		return 0
2271	else
2272		return 1
2273	fi
2274}
2275
2276#
2277# Check if a volume is a valide iscsi target
2278# $1 volume name
2279# return 0 if suceeds, otherwise, return 1
2280#
2281function is_iscsi_target
2282{
2283	typeset dataset=$1
2284	typeset target targets
2285
2286	[[ -z $dataset ]] && return 1
2287
2288	targets=$($ISCSITADM list target | $GREP "Target:" | $AWK '{print $2}')
2289	[[ -z $targets ]] && return 1
2290
2291	for target in $targets; do
2292		[[ $dataset == $target ]] && return 0
2293	done
2294
2295	return 1
2296}
2297
2298#
2299# Get the iSCSI name of a target
2300# $1 target name
2301#
2302function iscsi_name
2303{
2304	typeset target=$1
2305	typeset name
2306
2307	[[ -z $target ]] && log_fail "No parameter."
2308
2309	if ! is_iscsi_target $target ; then
2310		log_fail "Not a target."
2311	fi
2312
2313	name=$($ISCSITADM list target $target | $GREP "iSCSI Name:" \
2314		| $AWK '{print $2}')
2315
2316	return $name
2317}
2318
2319#
2320# check svc:/system/iscsitgt:default state, try to enable it if the state
2321# is not ON
2322#
2323function iscsitgt_setup
2324{
2325	log_must $RM -f $ISCSITGTFILE
2326	if [[ "ON" == $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2327		log_note "iscsitgt is already enabled"
2328		return
2329	fi
2330
2331    	log_must $SVCADM enable -t $ISCSITGT_FMRI
2332
2333	typeset -i retry=20
2334	while [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) && \
2335		( $retry -ne 0 ) ]]
2336	do
2337		$SLEEP 1
2338		(( retry = retry - 1 ))
2339	done
2340
2341	if [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2342		log_fail "$ISCSITGT_FMRI service can not be enabled!"
2343	fi
2344
2345	log_must $TOUCH $ISCSITGTFILE
2346}
2347
2348#
2349# set DISABLED state of svc:/system/iscsitgt:default
2350# which is the most suiteable state if $ISCSITGTFILE exists
2351#
2352function iscsitgt_cleanup
2353{
2354	if [[ -e $ISCSITGTFILE ]]; then
2355		log_must $SVCADM disable $ISCSITGT_FMRI
2356		log_must $RM -f $ISCSITGTFILE
2357	fi
2358}
2359
2360#
2361# Close iSCSI initiator to target
2362# $1 target hostname
2363#
2364function iscsi_iclose
2365{
2366	log_must $ISCSIADM modify discovery --sendtargets disable
2367	log_must $ISCSIADM remove discovery-address $(getipbyhost $1)
2368	$DEVFSADM -Cv
2369}
2370
2371#
2372# Get the available ZFS compression options
2373# $1 option type zfs_set|zfs_compress
2374#
2375function get_compress_opts
2376{
2377	typeset COMPRESS_OPTS
2378	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2379			gzip-6 gzip-7 gzip-8 gzip-9"
2380
2381	if [[ $1 == "zfs_compress" ]] ; then
2382		COMPRESS_OPTS="on lzjb"
2383	elif [[ $1 == "zfs_set" ]] ; then
2384		COMPRESS_OPTS="on off lzjb"
2385	fi
2386	typeset valid_opts="$COMPRESS_OPTS"
2387	$ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2388	if [[ $? -eq 0 ]]; then
2389		valid_opts="$valid_opts $GZIP_OPTS"
2390	fi
2391	$ECHO "$valid_opts"
2392}
2393
2394#
2395# Check the subcommand/option is supported
2396#
2397function check_opt_support #command, option
2398{
2399	typeset command=$1
2400	typeset option=$2
2401
2402	if [[ -z $command ]]; then
2403		return 0
2404	elif [[ -z $option ]]; then
2405		eval "$ZFS 2>&1 | $GREP '$command' > /dev/null 2>&1"
2406	else
2407		eval "$ZFS $command 2>&1 | $GREP -- '$option' | \
2408			$GREP -v -- 'User-defined' > /dev/null 2>&1"
2409	fi
2410	return $?
2411}
2412
2413#
2414# Check the zpool subcommand/option is supported
2415#
2416function check_zpool_opt_support #command, option
2417{
2418	typeset command=$1
2419	typeset option=$2
2420
2421	if [[ -z $command ]]; then
2422		return 0
2423	elif [[ -z $option ]]; then
2424		eval "$ZPOOL 2>&1 | $GREP '$command' > /dev/null 2>&1"
2425	else
2426		eval "$ZPOOL $command 2>&1 | $GREP -- '$option' > /dev/null 2>&1"
2427	fi
2428	return $?
2429}
2430
2431#
2432# Verify zfs operation with -p option work as expected
2433# $1 operation, value could be create, clone or rename
2434# $2 dataset type, value could be fs or vol
2435# $3 dataset name
2436# $4 new dataset name
2437#
2438function verify_opt_p_ops
2439{
2440	typeset ops=$1
2441	typeset datatype=$2
2442	typeset dataset=$3
2443	typeset newdataset=$4
2444
2445	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2446		log_fail "$datatype is not supported."
2447	fi
2448
2449	# check parameters accordingly
2450	case $ops in
2451		create)
2452			newdataset=$dataset
2453			dataset=""
2454			if [[ $datatype == "vol" ]]; then
2455				ops="create -V $VOLSIZE"
2456			fi
2457			;;
2458		clone)
2459			if [[ -z $newdataset ]]; then
2460				log_fail "newdataset should not be empty" \
2461					"when ops is $ops."
2462			fi
2463			log_must datasetexists $dataset
2464			log_must snapexists $dataset
2465			;;
2466		rename)
2467			if [[ -z $newdataset ]]; then
2468				log_fail "newdataset should not be empty" \
2469					"when ops is $ops."
2470			fi
2471			log_must datasetexists $dataset
2472			log_mustnot snapexists $dataset
2473			;;
2474		*)
2475			log_fail "$ops is not supported."
2476			;;
2477	esac
2478
2479	# make sure the upper level filesystem does not exist
2480	if datasetexists ${newdataset%/*} ; then
2481		log_must $ZFS destroy -rRf ${newdataset%/*}
2482	fi
2483
2484	# without -p option, operation will fail
2485	log_mustnot $ZFS $ops $dataset $newdataset
2486	log_mustnot datasetexists $newdataset ${newdataset%/*}
2487
2488	# with -p option, operation should succeed
2489	log_must $ZFS $ops -p $dataset $newdataset
2490	if ! datasetexists $newdataset ; then
2491		log_fail "-p option does not work for $ops"
2492	fi
2493
2494	# when $ops is create or clone, redo the operation still return zero
2495	if [[ $ops != "rename" ]]; then
2496		log_must $ZFS $ops -p $dataset $newdataset
2497	fi
2498
2499	return 0
2500}
2501
2502function get_disk_guid
2503{
2504	typeset diskname=$1
2505	lastcwd=$(pwd)
2506	cd /dev
2507	guid=$($ZDB -l ${diskname} | ${AWK} '/^    guid:/ {print $2}' | head -1)
2508	cd $lastcwd
2509	echo $guid
2510}
2511
2512#
2513# Get cachefile for a pool.
2514# Prints the cache file, if there is one.
2515# Returns 0 for a default zpool.cache, 1 for an explicit one, and 2 for none.
2516#
2517function cachefile_for_pool
2518{
2519	typeset pool=$1
2520
2521	cachefile=$(get_pool_prop cachefile $pool)
2522	[[ $? != 0 ]] && return 1
2523
2524	case "$cachefile" in
2525		none)	ret=2 ;;
2526		"-")
2527			ret=2
2528			for dir in /boot/zfs /etc/zfs; do
2529				if [[ -f "${dir}/zpool.cache" ]]; then
2530					cachefile="${dir}/zpool.cache"
2531					ret=0
2532					break
2533				fi
2534			done
2535			;;
2536		*)	ret=1;
2537	esac
2538	[[ $ret -eq 0 || $ret -eq 1 ]] && print "$cachefile"
2539	return $ret
2540}
2541
2542#
2543# Assert that the pool is in the appropriate cachefile.
2544#
2545function assert_pool_in_cachefile
2546{
2547	typeset pool=$1
2548
2549	cachefile=$(cachefile_for_pool $pool)
2550	[ $? -ne 0 ] && log_fail "ERROR: Cachefile not created for '$pool'?"
2551	log_must test -e "${cachefile}"
2552	log_must zdb -U ${cachefile} -C ${pool}
2553}
2554
2555#
2556# Get the zdb options given the cachefile state of the pool.
2557#
2558function zdb_cachefile_opts
2559{
2560	typeset pool=$1
2561	typeset vdevdir=$2
2562	typeset opts
2563
2564	if poolexists "$pool"; then
2565		cachefile=$(cachefile_for_pool $pool)
2566		typeset -i ret=$?
2567		case $ret in
2568			0)	opts="-C" ;;
2569			1)	opts="-U $cachefile -C" ;;
2570			2)	opts="-eC" ;;
2571			*)	log_fail "Unknown return '$ret'" ;;
2572		esac
2573	else
2574		opts="-eC"
2575		[[ -n "$vdevdir" ]] && opts="$opts -p $vdevdir"
2576	fi
2577	echo "$opts"
2578}
2579
2580#
2581# Get configuration of pool
2582# $1 pool name
2583# $2 config name
2584#
2585function get_config
2586{
2587	typeset pool=$1
2588	typeset config=$2
2589	typeset vdevdir=$3
2590	typeset alt_root
2591	typeset zdb_opts
2592
2593	zdb_opts=$(zdb_cachefile_opts $pool $vdevdir)
2594	value=$($ZDB $zdb_opts $pool | $GREP "$config:" | $AWK -F: '{print $2}')
2595	if [[ -n $value ]] ; then
2596		value=${value#'}
2597		value=${value%'}
2598	else
2599		return 1
2600	fi
2601	echo $value
2602
2603	return 0
2604}
2605
2606#
2607# Privated function. Random select one of items from arguments.
2608#
2609# $1 count
2610# $2-n string
2611#
2612function _random_get
2613{
2614	typeset cnt=$1
2615	shift
2616
2617	typeset str="$@"
2618	typeset -i ind
2619	((ind = RANDOM % cnt + 1))
2620
2621	typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2622	$ECHO $ret
2623}
2624
2625#
2626# Random select one of item from arguments which include NONE string
2627#
2628function random_get_with_non
2629{
2630	typeset -i cnt=$#
2631	((cnt =+ 1))
2632
2633	_random_get "$cnt" "$@"
2634}
2635
2636#
2637# Random select one of item from arguments which doesn't include NONE string
2638#
2639function random_get
2640{
2641	_random_get "$#" "$@"
2642}
2643
2644#
2645# The function will generate a dataset name with specific length
2646# $1, the length of the name
2647# $2, the base string to construct the name
2648#
2649function gen_dataset_name
2650{
2651	typeset -i len=$1
2652	typeset basestr="$2"
2653	typeset -i baselen=${#basestr}
2654	typeset -i iter=0
2655	typeset l_name=""
2656
2657	if (( len % baselen == 0 )); then
2658		(( iter = len / baselen ))
2659	else
2660		(( iter = len / baselen + 1 ))
2661	fi
2662	while (( iter > 0 )); do
2663		l_name="${l_name}$basestr"
2664
2665		(( iter -= 1 ))
2666	done
2667
2668	$ECHO $l_name
2669}
2670
2671#
2672# Ensure that a given path has been synced, not just ZIL committed.
2673#
2674# XXX On FreeBSD, the sync(8) command (via $SYNC) calls zfs_sync() which just
2675#     does a zil_commit(), as opposed to a txg_wait_synced().  For things that
2676#     require writing to their final destination (e.g. for intentional
2677#     corruption purposes), zil_commit() is not good enough.
2678#
2679function force_sync_path # path
2680{
2681	typeset path="$1"
2682
2683	log_must $ZPOOL export $TESTPOOL
2684	log_must $ZPOOL import -d $path $TESTPOOL
2685}
2686
2687#
2688# Get cksum tuple of dataset
2689# $1 dataset name
2690#
2691# zdb output is like below
2692# " Dataset pool/fs [ZPL], ID 978, cr_txg 2277, 19.0K, 5 objects,
2693# rootbp [L0 DMU objset] 400L/200P DVA[0]=<0:1880c00:200>
2694# DVA[1]=<0:341880c00:200> fletcher4 lzjb LE contiguous birth=2292 fill=5
2695# cksum=989930ccf:4014fe00c83:da5e388e58b4:1f7332052252ac "
2696#
2697function datasetcksum
2698{
2699	typeset cksum
2700	$SYNC
2701	cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2702		| $AWK -F= '{print $6}')
2703	$ECHO $cksum
2704}
2705
2706#
2707# Get cksum of file
2708# #1 file path
2709#
2710function checksum
2711{
2712	typeset cksum
2713	cksum=$($CKSUM $1 | $AWK '{print $1}')
2714	$ECHO $cksum
2715}
2716
2717#
2718# Get the given disk/slice state from the specific field of the pool
2719#
2720function get_device_state #pool disk field("", "spares","logs")
2721{
2722	typeset pool=$1
2723	typeset disk=${2#/dev/}
2724	disk=${disk#/dev/}
2725	disk=${disk#/dev/}
2726	typeset field=${3:-$pool}
2727
2728	state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2729		$NAWK -v device=$disk -v pool=$pool -v field=$field \
2730		'BEGIN {startconfig=0; startfield=0; }
2731		/config:/ {startconfig=1}
2732		(startconfig==1)&&($1==field) {startfield=1; next;}
2733		(startfield==1)&&($1==device) {print $2; exit;}
2734		(startfield==1)&&(NF>=3)&&($(NF-1)=="was")&&($NF==device) {print $2; exit;}
2735		(startfield==1)&&($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2736	print $state
2737}
2738
2739
2740#
2741# print the given directory filesystem type
2742#
2743# $1 directory name
2744#
2745function get_fstype
2746{
2747	typeset dir=$1
2748
2749	if [[ -z $dir ]]; then
2750		log_fail "Usage: get_fstype <directory>"
2751	fi
2752
2753	$DF -T $dir | $AWK '{print $2}'
2754}
2755
2756#
2757# Given a disk, label it to VTOC regardless what label was on the disk
2758# $1 disk
2759#
2760function labelvtoc
2761{
2762	typeset disk=$1
2763	if [[ -z $disk ]]; then
2764		log_fail "The disk name is unspecified."
2765	fi
2766	typeset label_file=$TMPDIR/labelvtoc.${TESTCASE_ID}
2767	typeset arch=$($UNAME -p)
2768
2769	if [[ $arch == "i386" ]]; then
2770		 $ECHO "label" > $label_file
2771		 $ECHO "0" >> $label_file
2772		 $ECHO "" >> $label_file
2773		 $ECHO "q" >> $label_file
2774		 $ECHO "q" >> $label_file
2775
2776		 $FDISK -B $disk >/dev/null 2>&1
2777		 # wait a while for fdisk finishes
2778		 $SLEEP 60
2779	elif [[ $arch == "sparc" ]]; then
2780	     	 $ECHO "label" > $label_file
2781		 $ECHO "0" >> $label_file
2782		 $ECHO "" >> $label_file
2783		 $ECHO "" >> $label_file
2784		 $ECHO "" >> $label_file
2785		 $ECHO "q" >> $label_file
2786	else
2787		log_fail "unknown arch type"
2788	fi
2789
2790	$FORMAT -e -s -d $disk -f $label_file
2791	typeset -i ret_val=$?
2792	$RM -f $label_file
2793	#
2794	# wait the format to finish
2795	#
2796	$SLEEP 60
2797	if (( ret_val != 0 )); then
2798		log_fail "unable to label $disk as VTOC."
2799	fi
2800
2801	return 0
2802}
2803
2804#
2805# Detect if the given filesystem property is supported in this release
2806#
2807# 0	Yes, it is supported
2808# !0	No, it is not supported
2809#
2810function fs_prop_exist
2811{
2812	typeset prop=$1
2813
2814	if [[ -z $prop ]]; then
2815		log_fail "Usage: fs_prop_exist <property>"
2816
2817		return 1
2818	fi
2819
2820	#
2821	# If the property is shortened column name,
2822	# convert it to the standard name
2823	#
2824	case $prop in
2825		avail)		prop=available		;;
2826		refer)		prop=referenced		;;
2827		volblock)	prop=volblocksize	;;
2828		compress)	prop=compression	;;
2829		rdonly)		prop=readonly		;;
2830		recsize)	prop=recordsize		;;
2831		reserv)		prop=reservation	;;
2832		refreserv)	prop=refreservation	;;
2833	esac
2834
2835	#
2836	# The zfs get output looks like the following
2837	#
2838
2839	#
2840	# The following properties are supported:
2841	#
2842	#	PROPERTY       EDIT  INHERIT   VALUES
2843	#
2844	#	available	NO	NO	<size>
2845	#	compressratio	NO	NO	<1.00x or higher if compressed>
2846	#	creation	NO	NO	<date>
2847	#	 ... ...
2848	#	zoned		YES	YES	on | off
2849	#
2850	# Sizes are specified in bytes with standard units such as K, M, G, etc.
2851	#
2852
2853	#
2854	# Start to extract property from the first blank line after 'PROPERTY'
2855	# and stop at the next blank line
2856	#
2857	$ZFS get 2>&1 | \
2858		$AWK '/PROPERTY/ {start=1; next}
2859			/Sizes/ {start=0}
2860		  	start==1 {print $1}' | \
2861		$GREP -w "$prop" > /dev/null 2>&1
2862
2863	return $?
2864}
2865
2866#
2867# Detect if the given pool property is supported in this release
2868#
2869# 0	Yes, it is supported
2870# !0	No, it is not supported
2871#
2872function pool_prop_exist
2873{
2874	typeset prop=$1
2875	if [[ -z $prop ]]; then
2876		log_fail "Usage: pool_prop_exist <property>"
2877
2878		return 1
2879	fi
2880	#
2881	# If the property is shortened column name,
2882	# convert it to the standard name
2883	#
2884	case $prop in
2885		avail)		prop=available		;;
2886		cap)		prop=capacity		;;
2887		replace)	prop=autoreplace	;;
2888	esac
2889
2890	#
2891	# The zpool get output looks like the following
2892	#
2893
2894	# usage:
2895	#	get <"all" | property[,...]> <pool> ...
2896	#
2897	# the following properties are supported:
2898	#
2899	#	PROPERTY       EDIT  VALUES
2900	#
2901	#	available	NO	<size>
2902	#	capacity	NO	<size>
2903	#	guid		NO	<guid>
2904	#	health		NO	<state>
2905	#	size		NO	<size>
2906	#	used		NO	<size>
2907	#	altroot		YES	<path>
2908	#	autoreplace	YES	on | off
2909	#	bootfs		YES	<filesystem>
2910	#	cachefile       YES	<file> | none
2911	#	delegation      YES	on | off
2912	#	failmode	YES	wait | continue | panic
2913	#	version		YES	<version>
2914
2915	$ZPOOL get 2>&1 | \
2916		$AWK '/PROPERTY/ {start=1; next}
2917			start==1 {print $1}' | \
2918		$GREP -w "$prop" > /dev/null 2>&1
2919
2920	return $?
2921}
2922
2923#
2924# check if the system was installed as zfsroot or not
2925# return: 0 ture, otherwise false
2926#
2927function is_zfsroot
2928{
2929	$DF -T / | $GREP -q zfs
2930}
2931
2932#
2933# get the root filesystem name if it's zfsroot system.
2934#
2935# return: root filesystem name
2936function get_rootfs
2937{
2938	typeset rootfs=""
2939	rootfs=$($MOUNT | $AWK '$3 == "\/" && $4~/zfs/ {print $1}')
2940	if [[ -z "$rootfs" ]]; then
2941		log_fail "Can not get rootfs"
2942	fi
2943	$ZFS list $rootfs > /dev/null 2>&1
2944	if (( $? == 0 )); then
2945		$ECHO $rootfs
2946	else
2947		log_fail "This is not a zfsroot system."
2948	fi
2949}
2950
2951#
2952# get the rootfs's pool name
2953# return:
2954#       rootpool name
2955#
2956function get_rootpool
2957{
2958	typeset rootfs=""
2959	typeset rootpool=""
2960	rootfs=$(get_rootfs)
2961	rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2962	echo $rootpool
2963}
2964
2965#
2966# Get the sub string from specified source string
2967#
2968# $1 source string
2969# $2 start position. Count from 1
2970# $3 offset
2971#
2972function get_substr #src_str pos offset
2973{
2974	typeset pos offset
2975
2976	$ECHO $1 | \
2977		$NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2978}
2979
2980#
2981# Get the directory path of given device
2982#
2983function get_device_dir #device
2984{
2985	typeset device=$1
2986
2987	$ECHO "/dev"
2988}
2989
2990#
2991# Get the package name
2992#
2993function get_package_name
2994{
2995	typeset dirpath=${1:-$STC_NAME}
2996
2997	print "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2998}
2999
3000#
3001# Get the word numbers from a string separated by white space
3002#
3003function get_word_count
3004{
3005	$ECHO $1 | $WC -w
3006}
3007
3008#
3009# To verify if the require numbers of disks is given
3010#
3011function verify_disk_count
3012{
3013	typeset -i min=${2:-1}
3014
3015	typeset -i count=$(get_word_count "$1")
3016
3017	if (( count < min )); then
3018		atf_skip "A minimum of $min disks is required to run." \
3019			" You specified $count disk(s)"
3020	fi
3021}
3022
3023#
3024# Verify that vfs.zfs.vol.recursive is set, so pools can be created using zvols
3025# as backing stores.
3026#
3027function verify_zvol_recursive
3028{
3029	if [ "`sysctl -n vfs.zfs.vol.recursive`" -ne 1 ]; then
3030		atf_skip "Recursive ZVOLs not enabled"
3031	fi
3032}
3033
3034#
3035# bsdmap disk/slice number to a device path
3036#
3037function bsddevmap
3038{
3039	typeset arg=$1
3040	echo $arg | egrep "*s[0-9]$" > /dev/null 2>&1
3041	if [ $? -eq 0 ]
3042	then
3043		n=`echo $arg| wc -c`
3044		set -A map a b c d e f g h i j
3045		s=`echo $arg | cut -c $((n-1))`
3046		arg=${arg%s[0-9]}${map[$s]}
3047	fi
3048	echo $arg
3049}
3050
3051#
3052# Get the name of the snapshots directory.  Traditionally .zfs/snapshots
3053#
3054function get_snapdir_name
3055{
3056	echo ".zfs/snapshot"
3057}
3058
3059#
3060# Unmount all ZFS filesystems except for those that are in the KEEP variable
3061#
3062function unmount_all_safe
3063{
3064	echo $(all_pools) | \
3065		$XARGS -n 1 $ZFS list -H -o name -t all -r | \
3066		$XARGS -n 1 $ZFS unmount
3067}
3068
3069#
3070# Return the highest pool version that this OS can create
3071#
3072function get_zpool_version
3073{
3074	# We assume output from zpool upgrade -v of the form:
3075	#
3076	# This system is currently running ZFS version 2.
3077	# .
3078	# .
3079	typeset ZPOOL_VERSION=$($ZPOOL upgrade -v | $HEAD -1 | \
3080		$AWK '{print $NF}' | $SED -e 's/\.//g')
3081	# Starting with version 5000, the output format changes to:
3082	# This system supports ZFS pool feature flags.
3083	# .
3084	# .
3085	if [[ $ZPOOL_VERSION = "flags" ]]; then
3086		ZPOOL_VERSION=5000
3087	fi
3088	echo $ZPOOL_VERSION
3089}
3090
3091# Ensures that zfsd is running, starting it if necessary.  Every test that
3092# interacts with zfsd must call this at startup.  This is intended primarily
3093# to eliminate interference from outside the test suite.
3094function ensure_zfsd_running
3095{
3096	if ! service zfsd status > /dev/null 2>&1; then
3097		service zfsd start || service zfsd onestart
3098		service zfsd status > /dev/null 2>&1 ||
3099			log_unsupported "Test requires zfsd"
3100	fi
3101}
3102
3103# Temporarily stops ZFSD, because it can interfere with some tests.  If this
3104# function is used, then restart_zfsd _must_ be called in the cleanup routine.
3105function stop_zfsd
3106{
3107	$RM -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3108	if [[ -n "$ZFSD" && -x "$ZFSD" ]]; then
3109		if /etc/rc.d/zfsd status > /dev/null; then
3110			log_note "Stopping zfsd"
3111			$TOUCH $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3112			/etc/rc.d/zfsd stop || /etc/rc.d/zfsd onestop
3113		fi
3114	fi
3115}
3116
3117# Restarts zfsd after it has been stopped by stop_zfsd.  Intelligently restarts
3118# only iff zfsd was running at the time stop_zfsd was called.
3119function restart_zfsd
3120{
3121	if [[ -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests ]]; then
3122		log_note "Restarting zfsd"
3123		/etc/rc.d/zfsd start || /etc/rc.d/zfsd onestart
3124	fi
3125	$RM -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3126}
3127
3128#
3129# Using the given <vdev>, obtain the value of the property <propname> for
3130# the given <tvd> identified by numeric id.
3131#
3132function get_tvd_prop # vdev tvd propname
3133{
3134	typeset vdev=$1
3135	typeset -i tvd=$2
3136	typeset propname=$3
3137
3138	$ZDB -l $vdev | $AWK -v tvd=$tvd -v prop="${propname}:" '
3139		BEGIN { start = 0; }
3140		/^        id:/ && ($2==tvd) { start = 1; next; }
3141		(start==0) { next; }
3142		/^        [a-z]+/ && ($1==prop) { print $2; exit; }
3143		/^        children/ { exit; }
3144		'
3145}
3146
3147#
3148# Convert a DVA into a physical block address.  Prints number of blocks.
3149# This takes the usual printed form, in which offsets are left shifted so
3150# they represent bytes rather than the native sector count.
3151#
3152function dva_to_block_addr # dva
3153{
3154	typeset dva=$1
3155
3156	typeset offcol=$(echo $dva | cut -f2 -d:)
3157	typeset -i offset="0x${offcol}"
3158	# First add 4MB to skip the boot blocks and first two vdev labels,
3159	# then convert to 512 byte blocks (for use with dd).  Note that this
3160	# differs from simply adding 8192 blocks, since the input offset is
3161	# given in bytes and has the actual ashift baked in.
3162	(( offset += 4*1024*1024 ))
3163	(( offset >>= 9 ))
3164	echo "$offset"
3165}
3166
3167#
3168# Convert a RAIDZ DVA into a physical block address.  This has the same
3169# output as dva_to_block_addr (number of blocks from beginning of device), but
3170# is more complicated due to RAIDZ.  ashift is normally always 9, but RAIDZ
3171# uses the actual tvd ashift instead.  Furthermore, the number of vdevs changes
3172# the actual block for each device.
3173#
3174function raidz_dva_to_block_addr # dva ncols ashift
3175{
3176	typeset dva=$1
3177	typeset -i ncols=$2
3178        typeset -i ashift=$3
3179
3180	typeset -i offset=0x$(echo $dva | cut -f2 -d:)
3181	(( offset >>= ashift ))
3182
3183	typeset -i ioff=$(( (offset + ncols - 1) / ncols  ))
3184
3185	# Now add the front 4MB and return.
3186	(( ioff += ( 4194304 >> $ashift ) ))
3187	echo "$ioff"
3188}
3189
3190#
3191# Return the vdevs for the given toplevel vdev number.
3192# Child vdevs will only be included if they are ONLINE.  Output format:
3193#
3194#   <toplevel vdev type> <nchildren> <child1>[:<child2> ...]
3195#
3196# Valid toplevel vdev types are mirror, raidz[1-3], leaf (which can be a
3197# disk or a file).  Note that 'nchildren' can be larger than the number of
3198# returned children; it represents the number of children regardless of how
3199# many are actually online.
3200#
3201function vdevs_for_tvd # pool tvd
3202{
3203	typeset pool=$1
3204	typeset -i tvd=$2
3205
3206	$ZPOOL status $pool | $AWK -v want_tvd=$tvd '
3207		BEGIN {
3208			 start = 0; tvd = -1; lvd = -1;
3209			 type = "UNKNOWN"; disks = ""; disk = "";
3210			 nchildren = 0;
3211		}
3212		/NAME.*STATE/ { start = 1; next; }
3213		(start==0) { next; }
3214
3215		(tvd > want_tvd) { exit; }
3216		END { print type " " nchildren " " disks; }
3217
3218		length(disk) > 0 {
3219			if (length(disks) > 0) { disks = disks " "; }
3220			if (substr(disk, 0, 1) == "/") {
3221				disks = disks disk;
3222			} else {
3223				disks = disks "/dev/" disk;
3224			}
3225			disk = "";
3226		}
3227
3228		/^\t(spares|logs)/ { tvd = want_tvd + 1; next; }
3229		/^\t  (mirror|raidz[1-3])-[0-9]+/ {
3230			tvd += 1;
3231			(tvd == want_tvd) && type = substr($1, 0, 6);
3232			next;
3233		}
3234		/^\t  [\/A-Za-z]+/ {
3235			tvd += 1;
3236			if (tvd == want_tvd) {
3237				(( nchildren += 1 ))
3238				type = "leaf";
3239				($2 == "ONLINE") && disk = $1;
3240			}
3241			next;
3242		}
3243
3244		(tvd < want_tvd) { next; }
3245
3246		/^\t    spare-[0-9]+/ { next; }
3247		/^\t      [\/A-Za-z]+/ {
3248			(( nchildren += 1 ))
3249			($2 == "ONLINE") && disk = $1;
3250			next;
3251		}
3252
3253		/^\t    [\/A-Za-z]+/ {
3254			(( nchildren += 1 ))
3255			($2 == "ONLINE") && disk = $1;
3256			next;
3257		}
3258		'
3259}
3260
3261#
3262# Get a vdev path, ashift & offset for a given pool/dataset and DVA.
3263# If desired, can also select the toplevel vdev child number.
3264#
3265function dva_to_vdev_ashift_off # pool/dataset dva [leaf_vdev_num]
3266{
3267	typeset poollike=$1
3268	typeset dva=$2
3269	typeset -i leaf_vdev_num=$3
3270
3271	# vdevs are normally 0-indexed while arguments are 1-indexed.
3272	(( leaf_vdev_num += 1 ))
3273
3274	# Strip any child datasets or snapshots.
3275	pool=$(echo $poollike | sed -e 's,[/@].*,,g')
3276	tvd=$(echo $dva | cut -d: -f1)
3277
3278	set -- $(vdevs_for_tvd $pool $tvd)
3279	log_debug "vdevs_for_tvd: $* <EOM>"
3280	tvd_type=$1; shift
3281	nchildren=$1; shift
3282
3283	lvd=$(eval echo \$$leaf_vdev_num)
3284	log_debug "type='$tvd_type' children='$nchildren' lvd='$lvd' dva='$dva'"
3285	case $tvd_type in
3286	raidz*)
3287		ashift=$(get_tvd_prop $lvd $tvd ashift)
3288		log_debug "raidz: ashift='${ashift}'"
3289		off=$(raidz_dva_to_block_addr $dva $nchildren $ashift)
3290		;;
3291	*)
3292		ashift=9
3293		off=$(dva_to_block_addr $dva)
3294		;;
3295	esac
3296	echo "${lvd}:${ashift}:${off}"
3297}
3298
3299#
3300# Get the DVA for the specified dataset's given filepath.
3301#
3302function file_dva # dataset filepath [level] [offset] [dva_num]
3303{
3304	typeset dataset=$1
3305	typeset filepath=$2
3306	typeset -i level=$3
3307	typeset -i offset=$4
3308	typeset -i dva_num=$5
3309
3310	typeset -li blksz=0
3311	typeset -li blknum=0
3312	typeset -li startoff
3313	typeset -li inode
3314
3315	eval `$STAT -s "$filepath"`
3316	inode="$st_ino"
3317
3318	# The inner match is for 'DVA[0]=<0:1b412600:200>', in which the
3319	# text surrounding the actual DVA is a fixed size with 8 characters
3320	# before it and 1 after.
3321	$ZDB -P -vvvvv "$dataset/" $inode | \
3322	    $AWK -v level=${level} -v dva_num=${dva_num} '
3323		BEGIN { stage = 0; }
3324		(stage == 0) && ($1=="Object") { stage = 1; next; }
3325
3326		(stage == 1) {
3327			print $3 " " $4;
3328			stage = 2; next;
3329		}
3330
3331		(stage == 2) && /^Indirect blocks/ { stage=3; next; }
3332		(stage < 3) { next; }
3333
3334		match($2, /L[0-9]/) {
3335			if (substr($2, RSTART+1, RLENGTH-1) != level) { next; }
3336		}
3337		match($3, /DVA\[.*>/) {
3338			dva = substr($3, RSTART+8, RLENGTH-9);
3339			if (substr($3, RSTART+4, 1) == dva_num) {
3340				print $1 " " dva;
3341			}
3342		}
3343		' | \
3344	while read line; do
3345		log_debug "params='$blksz/$blknum/$startoff' line='$line'"
3346		if (( blksz == 0 )); then
3347			typeset -i iblksz=$(echo $line | cut -d " " -f1)
3348			typeset -i dblksz=$(echo $line | cut -d " " -f2)
3349
3350			# Calculate the actual desired block starting offset.
3351			if (( level > 0 )); then
3352				typeset -i nbps_per_level
3353				typeset -i indsz
3354				typeset -i i=0
3355
3356				(( nbps_per_level = iblksz / 128 ))
3357				(( blksz = dblksz ))
3358				for (( i = 0; $i < $level; i++ )); do
3359					(( blksz *= nbps_per_level ))
3360				done
3361			else
3362				blksz=$dblksz
3363			fi
3364
3365			(( blknum = offset / blksz ))
3366			(( startoff = blknum * blksz ))
3367			continue
3368		fi
3369
3370		typeset lineoffstr=$(echo $line | cut -d " " -f1)
3371		typeset -i lineoff=$(printf "%d" "0x${lineoffstr}")
3372		typeset dva="$(echo $line | cut -d " " -f2)"
3373		log_debug "str='$lineoffstr' lineoff='$lineoff' dva='$dva'"
3374		if [[ -n "$dva" ]] && (( lineoff == startoff )); then
3375			echo $line | cut -d " " -f2
3376			return 0
3377		fi
3378	done
3379	return 1
3380}
3381
3382#
3383# Corrupt the given dataset's filepath file.  This will obtain the first
3384# level 0 block's DVA and scribble random bits on it.
3385#
3386function corrupt_file # dataset filepath [leaf_vdev_num]
3387{
3388	typeset dataset=$1
3389	typeset filepath=$2
3390	typeset -i leaf_vdev_num="$3"
3391
3392	dva=$(file_dva $dataset $filepath)
3393	[ $? -ne 0 ] && log_fail "ERROR: Can't find file $filepath on $dataset"
3394
3395	vdoff=$(dva_to_vdev_ashift_off $dataset $dva $leaf_vdev_num)
3396	vdev=$(echo $vdoff | cut -d: -f1)
3397	ashift=$(echo $vdoff | cut -d: -f2)
3398	off=$(echo $vdoff | cut -d: -f3)
3399	blocksize=$(( 1 << $ashift ))
3400
3401	log_note "Corrupting ${dataset}'s $filepath on $vdev at DVA $dva with ashift $ashift"
3402	log_must $DD if=/dev/urandom bs=$blocksize of=$vdev seek=$off count=1 conv=notrunc
3403}
3404
3405#
3406# Given a number of files, this function will iterate through
3407# the loop creating the specified number of files, whose names
3408# will start with <basename>.
3409#
3410# The <data> argument is special: it can be "ITER", in which case
3411# the -d argument will be the value of the current iteration.  It
3412# can be 0, in which case it will always be 0.  Otherwise, it will
3413# always be the given value.
3414#
3415# If <snapbase> is specified, a snapshot will be taken using the
3416# argument as the snapshot basename.
3417#
3418function populate_dir # basename num_files write_count blocksz data snapbase
3419{
3420	typeset basename=$1
3421	typeset -i num_files=$2
3422	typeset -i write_count=$3
3423	typeset -i blocksz=$4
3424	typeset -i i
3425	typeset data=$5
3426	typeset snapbase="$6"
3427
3428	log_note "populate_dir: data='$data'"
3429	for (( i = 0; i < num_files; i++ )); do
3430		case "$data" in
3431		0)	d=0	;;
3432		ITER)	d=$i ;;
3433		*)	d=$data	;;
3434		esac
3435
3436        	log_must $FILE_WRITE -o create -c $write_count \
3437		    -f ${basename}.$i -b $blocksz -d $d
3438
3439		[ -n "$snapbase" ] && log_must $ZFS snapshot ${snapbase}.${i}
3440	done
3441}
3442
3443# Reap all children registered in $child_pids.
3444function reap_children
3445{
3446	[ -z "$child_pids" ] && return
3447	for wait_pid in $child_pids; do
3448		log_must $KILL $wait_pid
3449	done
3450	child_pids=""
3451}
3452
3453# Busy a path.  Expects to be reaped via reap_children.  Tries to run as
3454# long and slowly as possible.  [num] is taken as a hint; if such a file
3455# already exists a different one will be chosen.
3456function busy_path # <path> [num]
3457{
3458	typeset busypath=$1
3459	typeset -i num=$2
3460
3461	while :; do
3462		busyfile="$busypath/busyfile.${num}"
3463		[ ! -f "$busyfile" ] && break
3464	done
3465
3466	cmd="$DD if=/dev/urandom of=$busyfile bs=512"
3467	( cd $busypath && $cmd ) &
3468	typeset pid=$!
3469	$SLEEP 1
3470	log_must $PS -p $pid
3471	child_pids="$child_pids $pid"
3472}
3473