xref: /freebsd/tests/sys/cddl/zfs/include/libtest.kshlib (revision 4f52dfbb8d6c4d446500c5b097e3806ec219fbd4)
1# vim: filetype=sh
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23# $FreeBSD$
24
25#
26# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
27# Use is subject to license terms.
28#
29# ident	"@(#)libtest.kshlib	1.15	09/08/06 SMI"
30#
31
32. ${STF_SUITE}/include/logapi.kshlib
33
34ZFS=${ZFS:-/sbin/zfs}
35ZPOOL=${ZPOOL:-/sbin/zpool}
36os_name=`uname -s`
37
38# Determine if a test has the necessary requirements to run
39
40function test_requires
41{
42        integer unsupported=0
43        unsupported_list=""
44        until [[ $# -eq 0 ]];do
45                var_name=$1
46                cmd=$(eval echo \$${1})
47                if [[ ! "$cmd" != "" ]] ; then
48                        print $var_name is not set
49                        unsupported_list="$var_name $unsupported_list"
50                        ((unsupported=unsupported+1))
51                fi
52                shift
53        done
54        if [[ unsupported -gt 0 ]] ; then
55                log_unsupported "$unsupported_list commands are unsupported"
56        else
57                log_note "All commands are supported"
58        fi
59}
60
61# Determine whether a dataset is mounted
62#
63# $1 dataset name
64# $2 filesystem type; optional - defaulted to zfs
65#
66# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
67
68function ismounted
69{
70	typeset fstype=$2
71	[[ -z $fstype ]] && fstype=zfs
72	typeset out dir name ret
73
74	case $fstype in
75		zfs)
76			if [[ "$1" == "/"* ]] ; then
77				for out in $($ZFS mount | $AWK '{print $2}') ; do
78					[[ $1 == $out ]] && return 0
79				done
80			else
81				for out in $($ZFS mount | $AWK '{print $1}') ; do
82					[[ $1 == $out ]] && return 0
83				done
84			fi
85		;;
86		ufs|nfs)
87			# a = device, b = "on", c = mount point", d = flags
88			$MOUNT | $GREP $fstype | while read a b c d
89			do
90				[[ "$1" == "$a" || "$1" == "$c" ]] && return 0
91			done
92		;;
93	esac
94
95	return 1
96}
97
98# Return 0 if a dataset is mounted; 1 otherwise
99#
100# $1 dataset name
101# $2 filesystem type; optional - defaulted to zfs
102
103function mounted
104{
105	ismounted $1 $2
106	(( $? == 0 )) && return 0
107	return 1
108}
109
110# Return 0 if a dataset is unmounted; 1 otherwise
111#
112# $1 dataset name
113# $2 filesystem type; optional - defaulted to zfs
114
115function unmounted
116{
117	ismounted $1 $2
118	(( $? == 1 )) && return 0
119	return 1
120}
121
122# split line on ","
123#
124# $1 - line to split
125
126function splitline
127{
128	$ECHO $1 | $SED "s/,/ /g"
129}
130
131function default_setup
132{
133	default_setup_noexit "$@"
134
135	log_pass
136}
137
138#
139# Given a list of disks, setup storage pools and datasets.
140#
141function default_setup_noexit
142{
143	typeset disklist=$1
144	typeset container=$2
145	typeset volume=$3
146
147	if is_global_zone; then
148		if poolexists $TESTPOOL ; then
149			destroy_pool $TESTPOOL
150		fi
151		[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
152		log_must $ZPOOL create -f $TESTPOOL $disklist
153	else
154		reexport_pool
155	fi
156
157	$RM -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
158	$MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
159
160	log_must $ZFS create $TESTPOOL/$TESTFS
161	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
162
163	if [[ -n $container ]]; then
164		$RM -rf $TESTDIR1  || \
165			log_unresolved Could not remove $TESTDIR1
166		$MKDIR -p $TESTDIR1 || \
167			log_unresolved Could not create $TESTDIR1
168
169		log_must $ZFS create $TESTPOOL/$TESTCTR
170		log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
171		log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
172		log_must $ZFS set mountpoint=$TESTDIR1 \
173		    $TESTPOOL/$TESTCTR/$TESTFS1
174	fi
175
176	if [[ -n $volume ]]; then
177		if is_global_zone ; then
178			log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
179		else
180			log_must $ZFS create $TESTPOOL/$TESTVOL
181		fi
182
183	fi
184}
185
186#
187# Given a list of disks, setup a storage pool, file system and
188# a container.
189#
190function default_container_setup
191{
192	typeset disklist=$1
193
194	default_setup "$disklist" "true"
195}
196
197#
198# Given a list of disks, setup a storage pool,file system
199# and a volume.
200#
201function default_volume_setup
202{
203	typeset disklist=$1
204
205	default_setup "$disklist" "" "true"
206}
207
208#
209# Given a list of disks, setup a storage pool,file system,
210# a container and a volume.
211#
212function default_container_volume_setup
213{
214	typeset disklist=$1
215
216	default_setup "$disklist" "true" "true"
217}
218
219#
220# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
221# filesystem
222#
223# $1 Existing filesystem or volume name. Default, $TESTFS
224# $2 snapshot name. Default, $TESTSNAP
225#
226function create_snapshot
227{
228	typeset fs_vol=${1:-$TESTFS}
229	typeset snap=${2:-$TESTSNAP}
230
231	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
232	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
233
234	if snapexists $fs_vol@$snap; then
235		log_fail "$fs_vol@$snap already exists."
236	fi
237	datasetexists $fs_vol || \
238		log_fail "$fs_vol must exist."
239
240	log_must $ZFS snapshot $fs_vol@$snap
241}
242
243#
244# Create a clone from a snapshot, default clone name is $TESTCLONE.
245#
246# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
247# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
248#
249function create_clone   # snapshot clone
250{
251	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
252	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
253
254	[[ -z $snap ]] && \
255		log_fail "Snapshot name is undefined."
256	[[ -z $clone ]] && \
257		log_fail "Clone name is undefined."
258
259	log_must $ZFS clone $snap $clone
260}
261
262function default_mirror_setup
263{
264	default_mirror_setup_noexit $1 $2 $3
265
266	log_pass
267}
268
269#
270# Given a pair of disks, set up a storage pool and dataset for the mirror
271# @parameters: $1 the primary side of the mirror
272#   $2 the secondary side of the mirror
273# @uses: ZPOOL ZFS TESTPOOL TESTFS
274function default_mirror_setup_noexit
275{
276	readonly func="default_mirror_setup_noexit"
277	typeset primary=$1
278	typeset secondary=$2
279
280	[[ -z $primary ]] && \
281		log_fail "$func: No parameters passed"
282	[[ -z $secondary ]] && \
283		log_fail "$func: No secondary partition passed"
284	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
285	log_must $ZPOOL create -f $TESTPOOL mirror $@
286	log_must $ZFS create $TESTPOOL/$TESTFS
287	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
288}
289
290#
291# create a number of mirrors.
292# We create a number($1) of 2 way mirrors using the pairs of disks named
293# on the command line. These mirrors are *not* mounted
294# @parameters: $1 the number of mirrors to create
295#  $... the devices to use to create the mirrors on
296# @uses: ZPOOL ZFS TESTPOOL
297function setup_mirrors
298{
299	typeset -i nmirrors=$1
300
301	shift
302	while (( nmirrors > 0 )); do
303		log_must test -n "$1" -a -n "$2"
304		[[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
305		log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
306		shift 2
307		(( nmirrors = nmirrors - 1 ))
308	done
309}
310
311#
312# create a number of raidz pools.
313# We create a number($1) of 2 raidz pools  using the pairs of disks named
314# on the command line. These pools are *not* mounted
315# @parameters: $1 the number of pools to create
316#  $... the devices to use to create the pools on
317# @uses: ZPOOL ZFS TESTPOOL
318function setup_raidzs
319{
320	typeset -i nraidzs=$1
321
322	shift
323	while (( nraidzs > 0 )); do
324		log_must test -n "$1" -a -n "$2"
325		[[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
326		log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
327		shift 2
328		(( nraidzs = nraidzs - 1 ))
329	done
330}
331
332#
333# Destroy the configured testpool mirrors.
334# the mirrors are of the form ${TESTPOOL}{number}
335# @uses: ZPOOL ZFS TESTPOOL
336function destroy_mirrors
337{
338	default_cleanup_noexit
339
340	log_pass
341}
342
343#
344# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
345# $1 the list of disks
346#
347function default_raidz_setup
348{
349	typeset disklist="$*"
350	set -A disks $disklist
351
352	if [[ ${#disks[*]} -lt 2 ]]; then
353		log_fail "A raid-z requires a minimum of two disks."
354	fi
355
356	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
357	log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
358	log_must $ZFS create $TESTPOOL/$TESTFS
359	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
360
361	log_pass
362}
363
364#
365# Common function used to cleanup storage pools and datasets.
366#
367# Invoked at the start of the test suite to ensure the system
368# is in a known state, and also at the end of each set of
369# sub-tests to ensure errors from one set of tests doesn't
370# impact the execution of the next set.
371
372function default_cleanup
373{
374	default_cleanup_noexit
375
376	log_pass
377}
378
379function all_pools
380{
381	cmd="$ZPOOL list -H -o name | $GREP 'testpool'"
382	eval $cmd
383}
384
385#
386# Returns 0 if the system contains any pools that must not be modified by the
387# ZFS tests.
388#
389function other_pools_exist
390{
391	typeset pool_count=`$ZPOOL list -H | $GREP -v '^testpool' | $WC -l`
392	[ "$pool_count" -ne 0 ]
393}
394
395function default_cleanup_noexit
396{
397	typeset exclude=""
398	typeset pool=""
399	#
400	# Destroying the pool will also destroy any
401	# filesystems it contains.
402	#
403	if is_global_zone; then
404		# Here, we loop through the pools we're allowed to
405		# destroy, only destroying them if it's safe to do
406		# so.
407		for pool in $(all_pools); do
408			if safe_to_destroy_pool $pool; then
409				destroy_pool $pool
410			fi
411		done
412	else
413		typeset fs=""
414		for fs in $($ZFS list -H -o name \
415		    | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
416			datasetexists $fs && \
417				log_must $ZFS destroy -Rf $fs
418		done
419
420		# Need cleanup here to avoid garbage dir left.
421		for fs in $($ZFS list -H -o name \
422		    ); do
423			[[ $fs == /$ZONE_POOL ]] && continue
424			[[ -d $fs ]] && log_must $RM -rf $fs/*
425		done
426
427		#
428		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
429		# the default value
430		#
431		for fs in $($ZFS list -H -o name \
432		    ); do
433			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
434				log_must $ZFS set reservation=none $fs
435				log_must $ZFS set recordsize=128K $fs
436				log_must $ZFS set mountpoint=/$fs $fs
437				typeset enc=""
438				enc=$(get_prop encryption $fs)
439				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
440					[[ "$enc" == "off" ]]; then
441					log_must $ZFS set checksum=on $fs
442				fi
443				log_must $ZFS set compression=off $fs
444				log_must $ZFS set atime=on $fs
445				log_must $ZFS set devices=off $fs
446				log_must $ZFS set exec=on $fs
447				log_must $ZFS set setuid=on $fs
448				log_must $ZFS set readonly=off $fs
449				log_must $ZFS set snapdir=hidden $fs
450				log_must $ZFS set aclmode=groupmask $fs
451				log_must $ZFS set aclinherit=secure $fs
452			fi
453		done
454	fi
455
456	[[ -d $TESTDIR ]] && \
457		log_must $RM -rf $TESTDIR
458}
459
460
461#
462# Common function used to cleanup storage pools, file systems
463# and containers.
464#
465function default_container_cleanup
466{
467	if ! is_global_zone; then
468		reexport_pool
469	fi
470
471	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
472	[[ $? -eq 0 ]] && \
473	    log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
474
475	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
476	    log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
477
478	datasetexists $TESTPOOL/$TESTCTR && \
479	    log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
480
481	[[ -e $TESTDIR1 ]] && \
482	    log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
483
484	default_cleanup
485}
486
487#
488# Common function used to cleanup snapshot of file system or volume. Default to
489# delete the file system's snapshot
490#
491# $1 snapshot name
492#
493function destroy_snapshot
494{
495	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
496
497	if ! snapexists $snap; then
498		log_fail "'$snap' does not existed."
499	fi
500
501	#
502	# For the sake of the value which come from 'get_prop' is not equal
503	# to the really mountpoint when the snapshot is unmounted. So, firstly
504	# check and make sure this snapshot's been mounted in current system.
505	#
506	typeset mtpt=""
507	if ismounted $snap; then
508		mtpt=$(get_prop mountpoint $snap)
509		(( $? != 0 )) && \
510			log_fail "get_prop mountpoint $snap failed."
511	fi
512
513	log_must $ZFS destroy $snap
514	[[ $mtpt != "" && -d $mtpt ]] && \
515		log_must $RM -rf $mtpt
516}
517
518#
519# Common function used to cleanup clone.
520#
521# $1 clone name
522#
523function destroy_clone
524{
525	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
526
527	if ! datasetexists $clone; then
528		log_fail "'$clone' does not existed."
529	fi
530
531	# With the same reason in destroy_snapshot
532	typeset mtpt=""
533	if ismounted $clone; then
534		mtpt=$(get_prop mountpoint $clone)
535		(( $? != 0 )) && \
536			log_fail "get_prop mountpoint $clone failed."
537	fi
538
539	log_must $ZFS destroy $clone
540	[[ $mtpt != "" && -d $mtpt ]] && \
541		log_must $RM -rf $mtpt
542}
543
544# Return 0 if a snapshot exists; $? otherwise
545#
546# $1 - snapshot name
547
548function snapexists
549{
550	$ZFS list -H -t snapshot "$1" > /dev/null 2>&1
551	return $?
552}
553
554#
555# Set a property to a certain value on a dataset.
556# Sets a property of the dataset to the value as passed in.
557# @param:
558#	$1 dataset who's property is being set
559# 	$2 property to set
560#	$3 value to set property to
561# @return:
562#	0 if the property could be set.
563#	non-zero otherwise.
564# @use: ZFS
565#
566function dataset_setprop
567{
568	typeset fn=dataset_setprop
569
570	if (( $# < 3 )); then
571		log_note "$fn: Insufficient parameters (need 3, had $#)"
572		return 1
573	fi
574	typeset output=
575	output=$($ZFS set $2=$3 $1 2>&1)
576	typeset rv=$?
577	if (( rv != 0 )); then
578		log_note "Setting property on $1 failed."
579		log_note "property $2=$3"
580		log_note "Return Code: $rv"
581		log_note "Output: $output"
582		return $rv
583	fi
584	return 0
585}
586
587#
588# Assign suite defined dataset properties.
589# This function is used to apply the suite's defined default set of
590# properties to a dataset.
591# @parameters: $1 dataset to use
592# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
593# @returns:
594#   0 if the dataset has been altered.
595#   1 if no pool name was passed in.
596#   2 if the dataset could not be found.
597#   3 if the dataset could not have it's properties set.
598#
599function dataset_set_defaultproperties
600{
601	typeset dataset="$1"
602
603	[[ -z $dataset ]] && return 1
604
605	typeset confset=
606	typeset -i found=0
607	for confset in $($ZFS list); do
608		if [[ $dataset = $confset ]]; then
609			found=1
610			break
611		fi
612	done
613	[[ $found -eq 0 ]] && return 2
614	if [[ -n $COMPRESSION_PROP ]]; then
615		dataset_setprop $dataset compression $COMPRESSION_PROP || \
616			return 3
617		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
618	fi
619	if [[ -n $CHECKSUM_PROP && $WRAPPER != *"crypto"* ]]; then
620		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
621			return 3
622		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
623	fi
624	return 0
625}
626
627#
628# Check a numeric assertion
629# @parameter: $@ the assertion to check
630# @output: big loud notice if assertion failed
631# @use: log_fail
632#
633function assert
634{
635	(( $@ )) || log_fail $@
636}
637
638function wipe_partition_table #<whole_disk_name> [<whole_disk_name> ...]
639{
640	while [[ -n $* ]]; do
641		typeset diskname=$1
642		[ ! -e $diskname ] && log_fail "ERROR: $diskname doesn't exist"
643		if gpart list ${diskname#/dev/} >/dev/null 2>&1; then
644			wait_for 5 1 $GPART destroy -F $diskname
645		else
646			log_note "No GPT partitions detected on $diskname"
647		fi
648		log_must $GPART create -s gpt $diskname
649		shift
650	done
651}
652
653#
654# Given a slice, size and disk, this function
655# formats the slice to the specified size.
656# Size should be specified with units as per
657# the `format` command requirements eg. 100mb 3gb
658#
659function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
660{
661	typeset -i slicenum=$1
662	typeset start=$2
663	typeset size=$3
664	typeset disk=$4
665	set -A devmap a b c d e f g h
666	[[ -z $slicenum || -z $size || -z $disk ]] && \
667		log_fail "The slice, size or disk name is unspecified."
668
669	size=`$ECHO $size| sed s/mb/M/`
670	size=`$ECHO $size| sed s/m/M/`
671	size=`$ECHO $size| sed s/gb/G/`
672	size=`$ECHO $size| sed s/g/G/`
673	[[ -n $start ]] && start="-b $start"
674	log_must $GPART add -t efi $start -s $size -i $slicenum $disk
675	return 0
676}
677
678function get_disk_size #<disk>
679{
680	typeset disk=$1
681	diskinfo $disk | awk '{print $3}'
682}
683
684function get_available_disk_size #<disk>
685{
686	typeset disk=$1
687	raw_size=`get_disk_size $disk`
688	(( available_size = raw_size * 95 / 100 ))
689	echo $available_size
690}
691
692#
693# Get the end cyl of the given slice
694# #TODO: fix this to be GPT-compatible if we want to use the SMI WRAPPER.  This
695# function is not necessary on FreeBSD
696#
697function get_endslice #<disk> <slice>
698{
699	log_fail "get_endslice has not been updated for GPT partitions"
700}
701
702#
703# Get the first LBA that is beyond the end of the given partition
704function get_partition_end #<disk> <partition_index>
705{
706	typeset disk=$1
707	typeset partition_index=$2
708	export partition_index
709	$GPART show $disk | $AWK \
710		'/^[ \t]/ && $3 ~ ENVIRON["partition_index"] {print $1 + $2}'
711}
712
713
714#
715# Given a size,disk and total number of partitions,  this function formats the
716# disk partitions from 0 to the total partition number with the same specified
717# size.
718#
719function partition_disk	#<part_size> <whole_disk_name>	<total_parts>
720{
721	typeset -i i=1
722	typeset part_size=$1
723	typeset disk_name=$2
724	typeset total_parts=$3
725	typeset cyl
726
727	wipe_partition_table $disk_name
728	while (( i <= $total_parts )); do
729		set_partition $i "" $part_size $disk_name
730		(( i = i+1 ))
731	done
732}
733
734function size_of_file # fname
735{
736	typeset fname=$1
737	sz=`stat -f '%z' $fname`
738	[[ -z "$sz" ]] && log_fail "stat($fname) failed"
739	$ECHO $sz
740	return 0
741}
742
743#
744# This function continues to write to a filenum number of files into dirnum
745# number of directories until either $FILE_WRITE returns an error or the
746# maximum number of files per directory have been written.
747#
748# Usage:
749# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
750#
751# Return value: 0 on success
752#		non 0 on error
753#
754# Where :
755#	destdir:    is the directory where everything is to be created under
756#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
757#	filenum:    the maximum number of files per subdirectory
758#	blocksz:    number of bytes per block
759#	num_writes: number of blocks to write
760#	data:	    the data that will be written
761#
762#	E.g.
763#	file_fs /testdir 20 25 1024 256 0
764#
765# Note: blocksz * num_writes equals the size of the testfile
766#
767function fill_fs # destdir dirnum filenum blocksz num_writes data
768{
769	typeset destdir=${1:-$TESTDIR}
770	typeset -i dirnum=${2:-50}
771	typeset -i filenum=${3:-50}
772	typeset -i blocksz=${4:-8192}
773	typeset -i num_writes=${5:-10240}
774	typeset -i data=${6:-0}
775
776	typeset -i retval=0
777	typeset -i dn=0 # current dir number
778	typeset -i fn=0 # current file number
779	while (( retval == 0 )); do
780		(( dirnum >= 0 && dn >= dirnum )) && break
781		typeset curdir=$destdir/$dn
782		log_must $MKDIR -p $curdir
783		for (( fn = 0; $fn < $filenum && $retval == 0; fn++ )); do
784			log_cmd $FILE_WRITE -o create -f $curdir/$TESTFILE.$fn \
785			    -b $blocksz -c $num_writes -d $data
786			retval=$?
787		done
788		(( dn = dn + 1 ))
789	done
790	return $retval
791}
792
793#
794# Simple function to get the specified property. If unable to
795# get the property then exits.
796#
797# Note property is in 'parsable' format (-p)
798#
799function get_prop # property dataset
800{
801	typeset prop_val
802	typeset prop=$1
803	typeset dataset=$2
804
805	prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
806	if [[ $? -ne 0 ]]; then
807		log_note "Unable to get $prop property for dataset $dataset"
808		return 1
809	fi
810
811	$ECHO $prop_val
812	return 0
813}
814
815#
816# Simple function to return the lesser of two values.
817#
818function min
819{
820	typeset first_arg=$1
821	typeset second_arg=$2
822
823	if (( first_arg < second_arg )); then
824		$ECHO $first_arg
825	else
826		$ECHO $second_arg
827	fi
828	return 0
829}
830
831#
832# Simple function to get the specified property of pool. If unable to
833# get the property then exits.
834#
835function get_pool_prop # property pool
836{
837	typeset prop_val
838	typeset prop=$1
839	typeset pool=$2
840
841	if poolexists $pool ; then
842		prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
843			$AWK '{print $3}')
844		if [[ $? -ne 0 ]]; then
845			log_note "Unable to get $prop property for pool " \
846			"$pool"
847			return 1
848		fi
849	else
850		log_note "Pool $pool not exists."
851		return 1
852	fi
853
854	$ECHO $prop_val
855	return 0
856}
857
858# Return 0 if a pool exists; $? otherwise
859#
860# $1 - pool name
861
862function poolexists
863{
864	typeset pool=$1
865
866	if [[ -z $pool ]]; then
867		log_note "No pool name given."
868		return 1
869	fi
870
871	$ZPOOL list -H "$pool" > /dev/null 2>&1
872	return $?
873}
874
875# Return 0 if all the specified datasets exist; $? otherwise
876#
877# $1-n  dataset name
878function datasetexists
879{
880	if (( $# == 0 )); then
881		log_note "No dataset name given."
882		return 1
883	fi
884
885	while (( $# > 0 )); do
886		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 || \
887			return $?
888		shift
889	done
890
891	return 0
892}
893
894# return 0 if none of the specified datasets exists, otherwise return 1.
895#
896# $1-n  dataset name
897function datasetnonexists
898{
899	if (( $# == 0 )); then
900		log_note "No dataset name given."
901		return 1
902	fi
903
904	while (( $# > 0 )); do
905		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 && \
906			return 1
907		shift
908	done
909
910	return 0
911}
912
913#
914# Given a mountpoint, or a dataset name, determine if it is shared.
915#
916# Returns 0 if shared, 1 otherwise.
917#
918function is_shared
919{
920	typeset fs=$1
921	typeset mtpt
922
923	if [[ $fs != "/"* ]] ; then
924		if datasetnonexists "$fs" ; then
925			return 1
926		else
927			mtpt=$(get_prop mountpoint "$fs")
928			case $mtpt in
929				none|legacy|-) return 1
930					;;
931				*)	fs=$mtpt
932					;;
933			esac
934		fi
935	fi
936
937	for mtpt in `$SHARE | $AWK '{print $2}'` ; do
938		if [[ $mtpt == $fs ]] ; then
939			return 0
940		fi
941	done
942
943	typeset stat=$($SVCS -H -o STA nfs/server:default)
944	if [[ $stat != "ON" ]]; then
945		log_note "Current nfs/server status: $stat"
946	fi
947
948	return 1
949}
950
951#
952# Given a mountpoint, determine if it is not shared.
953#
954# Returns 0 if not shared, 1 otherwise.
955#
956function not_shared
957{
958	typeset fs=$1
959
960	is_shared $fs
961	if (( $? == 0)); then
962		return 1
963	fi
964
965	return 0
966}
967
968#
969# Helper function to unshare a mountpoint.
970#
971function unshare_fs #fs
972{
973	typeset fs=$1
974
975	is_shared $fs
976	if (( $? == 0 )); then
977		log_must $ZFS unshare $fs
978	fi
979
980	return 0
981}
982
983#
984# Check NFS server status and trigger it online.
985#
986function setup_nfs_server
987{
988	# Cannot share directory in non-global zone.
989	#
990	if ! is_global_zone; then
991		log_note "Cannot trigger NFS server by sharing in LZ."
992		return
993	fi
994
995	typeset nfs_fmri="svc:/network/nfs/server:default"
996	if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
997		#
998		# Only really sharing operation can enable NFS server
999		# to online permanently.
1000		#
1001		typeset dummy=$TMPDIR/dummy
1002
1003		if [[ -d $dummy ]]; then
1004			log_must $RM -rf $dummy
1005		fi
1006
1007		log_must $MKDIR $dummy
1008		log_must $SHARE $dummy
1009
1010		#
1011		# Waiting for fmri's status to be the final status.
1012		# Otherwise, in transition, an asterisk (*) is appended for
1013		# instances, unshare will reverse status to 'DIS' again.
1014		#
1015		# Waiting for 1's at least.
1016		#
1017		log_must $SLEEP 1
1018		timeout=10
1019		while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1020		do
1021			log_must $SLEEP 1
1022
1023			(( timeout -= 1 ))
1024		done
1025
1026		log_must $UNSHARE $dummy
1027		log_must $RM -rf $dummy
1028	fi
1029
1030	log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1031}
1032
1033#
1034# To verify whether calling process is in global zone
1035#
1036# Return 0 if in global zone, 1 in non-global zone
1037#
1038function is_global_zone
1039{
1040	typeset cur_zone=$($ZONENAME 2>/dev/null)
1041
1042	# Zones are not supported on FreeBSD.
1043	if [[ $os_name == "FreeBSD" ]]; then
1044		return 0
1045	fi
1046
1047	if [[ $cur_zone != "global" ]]; then
1048		return 1
1049	fi
1050	return 0
1051}
1052
1053#
1054# Verify whether test is permit to run from
1055# global zone, local zone, or both
1056#
1057# $1 zone limit, could be "global", "local", or "both"(no limit)
1058#
1059# Return 0 if permit, otherwise exit with log_unsupported
1060#
1061function verify_runnable # zone limit
1062{
1063	typeset limit=$1
1064
1065	[[ -z $limit ]] && return 0
1066
1067	if is_global_zone ; then
1068		case $limit in
1069			global|both)
1070				break
1071				;;
1072			local)  log_unsupported "Test is unable to run from \
1073					global zone."
1074				break
1075				;;
1076			*)      log_note "Warning: unknown limit $limit - use both."
1077				;;
1078		esac
1079	else
1080		case $limit in
1081			local|both)
1082				break
1083				;;
1084			global) log_unsupported "Test is unable to run from \
1085					local zone."
1086				break
1087				;;
1088			*)      log_note "Warning: unknown limit $limit - use both."
1089				;;
1090		esac
1091
1092		reexport_pool
1093	fi
1094
1095	return 0
1096}
1097
1098# Return 0 if create successfully or the pool exists; $? otherwise
1099# Note: In local zones, this function should return 0 silently.
1100#
1101# $1 - pool name
1102# $2-n - [keyword] devs_list
1103
1104function create_pool #pool devs_list
1105{
1106	typeset pool=${1%%/*}
1107
1108	shift
1109
1110	if [[ -z $pool ]]; then
1111		log_note "Missing pool name."
1112		return 1
1113	fi
1114
1115	if poolexists $pool ; then
1116		destroy_pool $pool
1117	fi
1118
1119	if is_global_zone ; then
1120		[[ -d /$pool ]] && $RM -rf /$pool
1121		log_must $ZPOOL create -f $pool $@
1122	fi
1123
1124	return 0
1125}
1126
1127# Return 0 if destroy successfully or the pool exists; $? otherwise
1128# Note: In local zones, this function should return 0 silently.
1129#
1130# $1 - pool name
1131# Destroy pool with the given parameters.
1132
1133function destroy_pool #pool
1134{
1135	typeset pool=${1%%/*}
1136	typeset mtpt
1137
1138	if [[ -z $pool ]]; then
1139		log_note "No pool name given."
1140		return 1
1141	fi
1142
1143	if is_global_zone ; then
1144		if poolexists "$pool" ; then
1145			mtpt=$(get_prop mountpoint "$pool")
1146			log_must $ZPOOL destroy -f $pool
1147
1148			[[ -d $mtpt ]] && \
1149				log_must $RM -rf $mtpt
1150		else
1151			log_note "Pool $pool does not exist, skipping destroy."
1152			return 1
1153		fi
1154	fi
1155
1156	return 0
1157}
1158
1159#
1160# Create file vdevs.
1161# By default this generates sparse vdevs 10GB in size, for performance.
1162#
1163function create_vdevs # vdevs
1164{
1165	typeset vdsize=10G
1166
1167	[ -n "$VDEV_SIZE" ] && vdsize=$VDEV_SIZE
1168	rm -f $@ || return 1
1169	truncate -s $vdsize $@
1170}
1171
1172#
1173# Firstly, create a pool with 5 datasets. Then, create a single zone and
1174# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1175# and a zvol device to the zone.
1176#
1177# $1 zone name
1178# $2 zone root directory prefix
1179# $3 zone ip
1180#
1181function zfs_zones_setup #zone_name zone_root zone_ip
1182{
1183	typeset zone_name=${1:-$(hostname)-z}
1184	typeset zone_root=${2:-"/zone_root"}
1185	typeset zone_ip=${3:-"10.1.1.10"}
1186	typeset prefix_ctr=$ZONE_CTR
1187	typeset pool_name=$ZONE_POOL
1188	typeset -i cntctr=5
1189	typeset -i i=0
1190
1191	# Create pool and 5 container within it
1192	#
1193	[[ -d /$pool_name ]] && $RM -rf /$pool_name
1194	log_must $ZPOOL create -f $pool_name $DISKS
1195	while (( i < cntctr )); do
1196		log_must $ZFS create $pool_name/$prefix_ctr$i
1197		(( i += 1 ))
1198	done
1199
1200	# create a zvol
1201	log_must $ZFS create -V 1g $pool_name/zone_zvol
1202
1203	#
1204	# If current system support slog, add slog device for pool
1205	#
1206	typeset sdevs="$TMPDIR/sdev1 $TMPDIR/sdev2"
1207	log_must create_vdevs $sdevs
1208	log_must $ZPOOL add $pool_name log mirror $sdevs
1209
1210	# this isn't supported just yet.
1211	# Create a filesystem. In order to add this to
1212	# the zone, it must have it's mountpoint set to 'legacy'
1213	# log_must $ZFS create $pool_name/zfs_filesystem
1214	# log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1215
1216	[[ -d $zone_root ]] && \
1217		log_must $RM -rf $zone_root/$zone_name
1218	[[ ! -d $zone_root ]] && \
1219		log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1220
1221	# Create zone configure file and configure the zone
1222	#
1223	typeset zone_conf=$TMPDIR/zone_conf.${TESTCASE_ID}
1224	$ECHO "create" > $zone_conf
1225	$ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1226	$ECHO "set autoboot=true" >> $zone_conf
1227	i=0
1228	while (( i < cntctr )); do
1229		$ECHO "add dataset" >> $zone_conf
1230		$ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1231			$zone_conf
1232		$ECHO "end" >> $zone_conf
1233		(( i += 1 ))
1234	done
1235
1236	# add our zvol to the zone
1237	$ECHO "add device" >> $zone_conf
1238	$ECHO "set match=/dev/zvol/$pool_name/zone_zvol" >> $zone_conf
1239	$ECHO "end" >> $zone_conf
1240
1241	# add a corresponding zvol to the zone
1242	$ECHO "add device" >> $zone_conf
1243	$ECHO "set match=/dev/zvol/$pool_name/zone_zvol" >> $zone_conf
1244	$ECHO "end" >> $zone_conf
1245
1246	# once it's supported, we'll add our filesystem to the zone
1247	# $ECHO "add fs" >> $zone_conf
1248	# $ECHO "set type=zfs" >> $zone_conf
1249	# $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1250	# $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1251	# $ECHO "end" >> $zone_conf
1252
1253	$ECHO "verify" >> $zone_conf
1254	$ECHO "commit" >> $zone_conf
1255	log_must $ZONECFG -z $zone_name -f $zone_conf
1256	log_must $RM -f $zone_conf
1257
1258	# Install the zone
1259	$ZONEADM -z $zone_name install
1260	if (( $? == 0 )); then
1261		log_note "SUCCESS: $ZONEADM -z $zone_name install"
1262	else
1263		log_fail "FAIL: $ZONEADM -z $zone_name install"
1264	fi
1265
1266	# Install sysidcfg file
1267	#
1268	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1269  	$ECHO "system_locale=C" > $sysidcfg
1270  	$ECHO  "terminal=dtterm" >> $sysidcfg
1271  	$ECHO  "network_interface=primary {" >> $sysidcfg
1272  	$ECHO  "hostname=$zone_name" >> $sysidcfg
1273  	$ECHO  "}" >> $sysidcfg
1274  	$ECHO  "name_service=NONE" >> $sysidcfg
1275  	$ECHO  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1276  	$ECHO  "security_policy=NONE" >> $sysidcfg
1277  	$ECHO  "timezone=US/Eastern" >> $sysidcfg
1278
1279	# Boot this zone
1280	log_must $ZONEADM -z $zone_name boot
1281}
1282
1283#
1284# Reexport TESTPOOL & TESTPOOL(1-4)
1285#
1286function reexport_pool
1287{
1288	typeset -i cntctr=5
1289	typeset -i i=0
1290
1291	while (( i < cntctr )); do
1292		if (( i == 0 )); then
1293			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1294			if ! ismounted $TESTPOOL; then
1295				log_must $ZFS mount $TESTPOOL
1296			fi
1297		else
1298			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1299			if eval ! ismounted \$TESTPOOL$i; then
1300				log_must eval $ZFS mount \$TESTPOOL$i
1301			fi
1302		fi
1303		(( i += 1 ))
1304	done
1305}
1306
1307#
1308# Wait for something to return true, checked by the caller.
1309#
1310function wait_for_checked # timeout dt <method> [args...]
1311{
1312	typeset timeout=$1
1313	typeset dt=$2
1314	shift; shift
1315	typeset -i start=$(date '+%s')
1316	typeset -i endtime
1317
1318	log_note "Waiting $timeout seconds (checked every $dt seconds) for: $*"
1319	((endtime = start + timeout))
1320	while :; do
1321		$*
1322		[ $? -eq 0 ] && return
1323		curtime=$(date '+%s')
1324		[ $curtime -gt $endtime ] && return 1
1325		sleep $dt
1326	done
1327	return 0
1328}
1329
1330#
1331# Wait for something to return true.
1332#
1333function wait_for # timeout dt <method> [args...]
1334{
1335	typeset timeout=$1
1336	typeset dt=$2
1337	shift; shift
1338
1339	wait_for_checked $timeout $dt $* || \
1340		log_fail "ERROR: Timed out waiting for: $*"
1341}
1342
1343#
1344# Verify a given disk is online or offline
1345#
1346# Return 0 is pool/disk matches expected state, 1 otherwise
1347# stateexpr is a regex like ONLINE or REMOVED|UNAVAIL
1348#
1349function check_state # pool disk stateexpr
1350{
1351	typeset pool=$1
1352	typeset disk=${2#/dev/}
1353	disk=${disk#/dev/}
1354	disk=${disk#/dev/}
1355	typeset stateexpr=$3
1356
1357	$ZPOOL status -v $pool | grep "$disk"  \
1358	    | egrep -i "$stateexpr" > /dev/null 2>&1
1359
1360	return $?
1361}
1362
1363#
1364# Wait for a given disk to leave a state
1365#
1366function wait_for_state_exit
1367{
1368	typeset pool=$1
1369	typeset disk=$2
1370	typeset state=$3
1371
1372	while check_state "$pool" "$disk" "$state"; do
1373		$SLEEP 1
1374	done
1375}
1376
1377#
1378# Wait for a given disk to enter a state
1379#
1380function wait_for_state_enter
1381{
1382	typeset -i timeout=$1
1383	typeset pool=$2
1384	typeset disk=$3
1385	typeset state=$4
1386
1387	log_note "Waiting up to $timeout seconds for $disk to become $state ..."
1388	for ((; $timeout > 0; timeout=$timeout-1)); do
1389		check_state $pool "$disk" "$state"
1390		[ $? -eq 0 ] && return
1391		$SLEEP 1
1392	done
1393	log_must $ZPOOL status $pool
1394	log_fail "ERROR: Disk $disk not marked as $state in $pool"
1395}
1396
1397#
1398# Get the mountpoint of snapshot
1399# as its mountpoint
1400#
1401function snapshot_mountpoint
1402{
1403	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1404
1405	if [[ $dataset != *@* ]]; then
1406		log_fail "Error name of snapshot '$dataset'."
1407	fi
1408
1409	typeset fs=${dataset%@*}
1410	typeset snap=${dataset#*@}
1411
1412	if [[ -z $fs || -z $snap ]]; then
1413		log_fail "Error name of snapshot '$dataset'."
1414	fi
1415
1416	$ECHO $(get_prop mountpoint $fs)/$(get_snapdir_name)/$snap
1417}
1418
1419function pool_maps_intact # pool
1420{
1421	typeset pool="$1"
1422
1423	if ! $ZDB -bcv $pool; then
1424		return 1
1425	fi
1426	return 0
1427}
1428
1429function filesys_has_zil # filesystem
1430{
1431	typeset filesys="$1"
1432
1433	if ! $ZDB -ivv $filesys | $GREP "ZIL header"; then
1434		return 1
1435	fi
1436	return 0
1437}
1438
1439#
1440# Given a pool and file system, this function will verify the file system
1441# using the zdb internal tool. Note that the pool is exported and imported
1442# to ensure it has consistent state.
1443#
1444function verify_filesys # pool filesystem dir
1445{
1446	typeset pool="$1"
1447	typeset filesys="$2"
1448	typeset zdbout="$TMPDIR/zdbout.${TESTCASE_ID}"
1449
1450	shift
1451	shift
1452	typeset dirs=$@
1453	typeset search_path=""
1454
1455	log_note "Calling $ZDB to verify filesystem '$filesys'"
1456	log_must $ZPOOL export $pool
1457
1458	if [[ -n $dirs ]] ; then
1459		for dir in $dirs ; do
1460			search_path="$search_path -d $dir"
1461		done
1462	fi
1463
1464	log_must $ZPOOL import $search_path $pool
1465
1466	$ZDB -cudi $filesys > $zdbout 2>&1
1467	if [[ $? != 0 ]]; then
1468		log_note "Output: $ZDB -cudi $filesys"
1469		$CAT $zdbout
1470		log_fail "$ZDB detected errors with: '$filesys'"
1471	fi
1472
1473	log_must $RM -rf $zdbout
1474}
1475
1476#
1477# Given a pool, and this function list all disks in the pool
1478#
1479function get_disklist # pool
1480{
1481	typeset disklist=""
1482
1483	disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4 ) {print $1}' | \
1484 		$GREP -v "\-\-\-\-\-" | \
1485		$EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$" )
1486
1487	$ECHO $disklist
1488}
1489
1490#
1491# Destroy all existing metadevices and state database
1492#
1493function destroy_metas
1494{
1495	typeset metad
1496
1497	for metad in $($METASTAT -p | $AWK '{print $1}'); do
1498		log_must $METACLEAR -rf $metad
1499	done
1500
1501	for metad in $($METADB | $CUT -f6 | $GREP dev | $UNIQ); do
1502		log_must $METADB -fd $metad
1503	done
1504}
1505
1506# /**
1507#  This function kills a given list of processes after a time period. We use
1508#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1509#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1510#  would be listed as FAIL, which we don't want : we're happy with stress tests
1511#  running for a certain amount of time, then finishing.
1512#
1513# @param $1 the time in seconds after which we should terminate these processes
1514# @param $2..$n the processes we wish to terminate.
1515# */
1516function stress_timeout
1517{
1518	typeset -i TIMEOUT=$1
1519	shift
1520	typeset cpids="$@"
1521
1522	log_note "Waiting for child processes($cpids). " \
1523		"It could last dozens of minutes, please be patient ..."
1524	log_must $SLEEP $TIMEOUT
1525
1526	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1527	typeset pid
1528	for pid in $cpids; do
1529		$PS -p $pid > /dev/null 2>&1
1530		if (( $? == 0 )); then
1531			log_must $KILL -USR1 $pid
1532		fi
1533	done
1534}
1535
1536#
1537# Check whether current OS support a specified feature or not
1538#
1539# return 0 if current OS version is in unsupported list, 1 otherwise
1540#
1541# $1 unsupported target OS versions
1542#
1543function check_version # <OS version>
1544{
1545	typeset unsupported_vers="$@"
1546	typeset ver
1547	typeset cur_ver=`$UNAME -r`
1548
1549	for ver in $unsupported_vers; do
1550		[[ "$cur_ver" == "$ver" ]] && return 0
1551	done
1552
1553	return 1
1554}
1555
1556#
1557# Verify a given hotspare disk is inuse or avail
1558#
1559# Return 0 is pool/disk matches expected state, 1 otherwise
1560#
1561function check_hotspare_state # pool disk state{inuse,avail}
1562{
1563	typeset pool=$1
1564	typeset disk=${2#/dev/}
1565	disk=${disk#/dev/}
1566	disk=${disk#/dev/}
1567	typeset state=$3
1568
1569	cur_state=$(get_device_state $pool $disk "spares")
1570
1571	if [[ $state != ${cur_state} ]]; then
1572		return 1
1573	fi
1574	return 0
1575}
1576
1577#
1578# Verify a given slog disk is inuse or avail
1579#
1580# Return 0 is pool/disk matches expected state, 1 otherwise
1581#
1582function check_slog_state # pool disk state{online,offline,unavail}
1583{
1584	typeset pool=$1
1585	typeset disk=${2#/dev/}
1586	disk=${disk#/dev/}
1587	disk=${disk#/dev/}
1588	typeset state=$3
1589
1590	cur_state=$(get_device_state $pool $disk "logs")
1591
1592	if [[ $state != ${cur_state} ]]; then
1593		return 1
1594	fi
1595	return 0
1596}
1597
1598#
1599# Verify a given vdev disk is inuse or avail
1600#
1601# Return 0 is pool/disk matches expected state, 1 otherwise
1602#
1603function check_vdev_state # pool disk state{online,offline,unavail}
1604{
1605	typeset pool=$1
1606	typeset disk=${2#/dev/}
1607	disk=${disk#/dev/}
1608	disk=${disk#/dev/}
1609	typeset state=$3
1610
1611	if [[ $WRAPPER == *"smi"* ]]; then
1612		$ECHO $disk | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
1613		if (( $? == 0 )); then
1614			disk=${disk}s2
1615		fi
1616	fi
1617
1618	cur_state=$(get_device_state $pool $disk)
1619
1620	if [[ $state != ${cur_state} ]]; then
1621		return 1
1622	fi
1623	return 0
1624}
1625
1626#
1627# Check the output of 'zpool status -v <pool>',
1628# and to see if the content of <token> contain the <keyword> specified.
1629#
1630# Return 0 is contain, 1 otherwise
1631#
1632function check_pool_status # pool token keyword
1633{
1634	typeset pool=$1
1635	typeset token=$2
1636	typeset keyword=$3
1637
1638	$ZPOOL status -v "$pool" 2>/dev/null | \
1639		$NAWK -v token="$token:" '($1==token) {print $0}' | \
1640		$GREP -i "$keyword" >/dev/null 2>&1
1641
1642	return $?
1643}
1644
1645function vdev_pool_error_count
1646{
1647	typeset errs=$1
1648	if [ -z "$2" ]; then
1649		test $errs -gt 0; ret=$?
1650	else
1651		test $errs -eq $2; ret=$?
1652	fi
1653	log_debug "vdev_pool_error_count: errs='$errs' \$2='$2' ret='$ret'"
1654	return $ret
1655}
1656
1657#
1658# Generate a pool status error file suitable for pool_errors_from_file.
1659# If the pool is healthy, returns 0.  Otherwise, the caller must handle the
1660# returned temporarily file appropriately.
1661#
1662function pool_error_file # <pool>
1663{
1664	typeset pool="$1"
1665
1666	typeset tmpfile=$TMPDIR/pool_status.${TESTCASE_ID}
1667	$ZPOOL status -x $pool > ${tmpfile}
1668	echo $tmpfile
1669}
1670
1671#
1672# Evaluates <file> counting the number of errors.  If vdev specified, only
1673# that vdev's errors are counted.  Returns the total number.  <file> will be
1674# deleted on exit.
1675#
1676function pool_errors_from_file # <file> [vdev]
1677{
1678	typeset file=$1
1679	shift
1680	typeset checkvdev="$2"
1681
1682	typeset line
1683	typeset -i fetchbegin=1
1684	typeset -i errnum=0
1685	typeset -i c_read=0
1686	typeset -i c_write=0
1687	typeset -i c_cksum=0
1688
1689	cat ${file} | $EGREP -v "pool:" | while read line; do
1690	 	if (( $fetchbegin != 0 )); then
1691                        $ECHO $line | $GREP "NAME" >/dev/null 2>&1
1692                        (( $? == 0 )) && (( fetchbegin = 0 ))
1693                         continue
1694                fi
1695
1696		if [[ -n $checkvdev ]]; then
1697			$ECHO $line | $GREP $checkvdev >/dev/null 2>&1
1698			(( $? != 0 )) && continue
1699			c_read=`$ECHO $line | $AWK '{print $3}'`
1700			c_write=`$ECHO $line | $AWK '{print $4}'`
1701			c_cksum=`$ECHO $line | $AWK '{print $5}'`
1702			if [ $c_read != 0 ] || [ $c_write != 0 ] || \
1703		   	   [ $c_cksum != 0 ]
1704			then
1705				(( errnum = errnum + 1 ))
1706			fi
1707			break
1708		fi
1709
1710		c_read=`$ECHO $line | $AWK '{print $3}'`
1711		c_write=`$ECHO $line | $AWK '{print $4}'`
1712		c_cksum=`$ECHO $line | $AWK '{print $5}'`
1713		if [ $c_read != 0 ] || [ $c_write != 0 ] || \
1714		    [ $c_cksum != 0 ]
1715		then
1716			(( errnum = errnum + 1 ))
1717		fi
1718	done
1719
1720	rm -f $file
1721	echo $errnum
1722}
1723
1724#
1725# Returns whether the vdev has the given number of errors.
1726# If the number is unspecified, any non-zero number returns true.
1727#
1728function vdev_has_errors # pool vdev [errors]
1729{
1730	typeset pool=$1
1731	typeset vdev=$2
1732	typeset tmpfile=$(pool_error_file $pool)
1733	log_note "Original pool status:"
1734	cat $tmpfile
1735
1736	typeset -i errs=$(pool_errors_from_file $tmpfile $vdev)
1737	vdev_pool_error_count $errs $3
1738}
1739
1740#
1741# Returns whether the pool has the given number of errors.
1742# If the number is unspecified, any non-zero number returns true.
1743#
1744function pool_has_errors # pool [errors]
1745{
1746	typeset pool=$1
1747	typeset tmpfile=$(pool_error_file $pool)
1748	log_note "Original pool status:"
1749	cat $tmpfile
1750
1751	typeset -i errs=$(pool_errors_from_file $tmpfile)
1752	vdev_pool_error_count $errs $2
1753}
1754
1755#
1756# Returns whether clearing $pool at $vdev (if given) succeeds.
1757#
1758function pool_clear_succeeds
1759{
1760	typeset pool="$1"
1761	typeset vdev=$2
1762
1763	$ZPOOL clear $pool $vdev
1764	! pool_has_errors $pool
1765}
1766
1767#
1768# Return whether the pool is healthy
1769#
1770function is_pool_healthy # pool
1771{
1772	typeset pool=$1
1773
1774	typeset healthy_output="pool '$pool' is healthy"
1775	typeset real_output=$($ZPOOL status -x $pool)
1776
1777	if [[ "$real_output" == "$healthy_output" ]]; then
1778		return 0
1779	else
1780		typeset -i ret
1781		$ZPOOL status -x $pool | $GREP "state:" | \
1782			$GREP "FAULTED" >/dev/null 2>&1
1783		ret=$?
1784		(( $ret == 0 )) && return 1
1785		typeset l_scan
1786		typeset errnum
1787		l_scan=$($ZPOOL status -x $pool | $GREP "scan:")
1788		l_scan=${l_scan##*"with"}
1789		errnum=$($ECHO $l_scan | $AWK '{print $1}')
1790		if [ "$errnum" != "0" ]; then
1791		 	return 1
1792		else
1793			return 0
1794		fi
1795	fi
1796}
1797
1798#
1799# These 5 following functions are instance of check_pool_status()
1800#	is_pool_resilvering - to check if the pool is resilver in progress
1801#	is_pool_resilvered - to check if the pool is resilver completed
1802#	is_pool_scrubbing - to check if the pool is scrub in progress
1803#	is_pool_scrubbed - to check if the pool is scrub completed
1804#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1805#
1806function is_pool_resilvering #pool
1807{
1808	check_pool_status "$1" "scan" "resilver in progress"
1809	return $?
1810}
1811
1812function is_pool_resilvered #pool
1813{
1814	check_pool_status "$1" "scan" "resilvered"
1815	return $?
1816}
1817
1818function resilver_happened # pool
1819{
1820	typeset pool=$1
1821	is_pool_resilvering "$pool" || is_pool_resilvered "$pool"
1822}
1823
1824function is_pool_scrubbing #pool
1825{
1826	check_pool_status "$1" "scan" "scrub in progress"
1827	return $?
1828}
1829
1830function is_pool_scrubbed #pool
1831{
1832	check_pool_status "$1" "scan" "scrub repaired"
1833	return $?
1834}
1835
1836function is_pool_scrub_stopped #pool
1837{
1838	check_pool_status "$1" "scan" "scrub canceled"
1839	return $?
1840}
1841
1842function is_pool_state # pool state
1843{
1844	check_pool_status "$1" "state" "$2"
1845	return $?
1846}
1847
1848#
1849# Erase the partition tables and destroy any zfs labels
1850#
1851function cleanup_devices #vdevs
1852{
1853	for device in $@; do
1854		# Labelclear must happen first, otherwise it may interfere
1855		# with the teardown/setup of GPT labels.
1856		$ZPOOL labelclear -f $device
1857		# Only wipe partition tables for arguments that are disks,
1858		# as opposed to slices (which are valid arguments here).
1859		if camcontrol inquiry $device >/dev/null 2>&1; then
1860			wipe_partition_table $device
1861		fi
1862	done
1863	return 0
1864}
1865
1866#
1867# Verify the rsh connectivity to each remote host in RHOSTS.
1868#
1869# Return 0 if remote host is accessible; otherwise 1.
1870# $1 remote host name
1871# $2 username
1872#
1873function verify_rsh_connect #rhost, username
1874{
1875	typeset rhost=$1
1876	typeset username=$2
1877	typeset rsh_cmd="$RSH -n"
1878	typeset cur_user=
1879
1880	$GETENT hosts $rhost >/dev/null 2>&1
1881	if (( $? != 0 )); then
1882		log_note "$rhost cannot be found from" \
1883			"administrative database."
1884		return 1
1885	fi
1886
1887	$PING $rhost 3 >/dev/null 2>&1
1888	if (( $? != 0 )); then
1889		log_note "$rhost is not reachable."
1890		return 1
1891	fi
1892
1893	if (( ${#username} != 0 )); then
1894		rsh_cmd="$rsh_cmd -l $username"
1895		cur_user="given user \"$username\""
1896	else
1897		cur_user="current user \"`$LOGNAME`\""
1898	fi
1899
1900	 if ! $rsh_cmd $rhost $TRUE; then
1901		log_note "$RSH to $rhost is not accessible" \
1902			"with $cur_user."
1903		return 1
1904	fi
1905
1906	return 0
1907}
1908
1909#
1910# Verify the remote host connection via rsh after rebooting
1911# $1 remote host
1912#
1913function verify_remote
1914{
1915	rhost=$1
1916
1917	#
1918	# The following loop waits for the remote system rebooting.
1919	# Each iteration will wait for 150 seconds. there are
1920	# total 5 iterations, so the total timeout value will
1921	# be 12.5  minutes for the system rebooting. This number
1922	# is an approxiate number.
1923	#
1924	typeset -i count=0
1925	while ! verify_rsh_connect $rhost; do
1926		sleep 150
1927		(( count = count + 1 ))
1928		if (( count > 5 )); then
1929			return 1
1930		fi
1931	done
1932	return 0
1933}
1934
1935#
1936# Replacement function for /usr/bin/rsh. This function will include
1937# the /usr/bin/rsh and meanwhile return the execution status of the
1938# last command.
1939#
1940# $1 usrname passing down to -l option of /usr/bin/rsh
1941# $2 remote machine hostname
1942# $3... command string
1943#
1944
1945function rsh_status
1946{
1947	typeset ruser=$1
1948	typeset rhost=$2
1949	typeset -i ret=0
1950	typeset cmd_str=""
1951	typeset rsh_str=""
1952
1953	shift; shift
1954	cmd_str="$@"
1955
1956	err_file=$TMPDIR/${rhost}.${TESTCASE_ID}.err
1957	if (( ${#ruser} == 0 )); then
1958		rsh_str="$RSH -n"
1959	else
1960		rsh_str="$RSH -n -l $ruser"
1961	fi
1962
1963	$rsh_str $rhost /usr/local/bin/ksh93 -c "'$cmd_str; \
1964		print -u 2 \"status=\$?\"'" \
1965		>/dev/null 2>$err_file
1966	ret=$?
1967	if (( $ret != 0 )); then
1968		$CAT $err_file
1969		$RM -f $std_file $err_file
1970		log_fail  "$RSH itself failed with exit code $ret..."
1971	fi
1972
1973	 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1974		$CUT -d= -f2)
1975	(( $ret != 0 )) && $CAT $err_file >&2
1976
1977	$RM -f $err_file >/dev/null 2>&1
1978	return $ret
1979}
1980
1981#
1982# Get the SUNWstc-fs-zfs package installation path in a remote host
1983# $1 remote host name
1984#
1985function get_remote_pkgpath
1986{
1987	typeset rhost=$1
1988	typeset pkgpath=""
1989
1990	pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1991			$CUT -d: -f2")
1992
1993	$ECHO $pkgpath
1994}
1995
1996#/**
1997# A function to find and locate free disks on a system or from given
1998# disks as the parameter.  Since the conversion to ATF, this function is
1999# superfluous; it is assumed that the user will supply an accurate list of
2000# disks to use.  So we just return the arguments.
2001#
2002# $@ given disks to find which are free
2003#
2004# @return a string containing the list of available disks
2005#*/
2006function find_disks
2007{
2008	(( first=0 ))
2009	for disk in $@; do
2010		[[ $first == 1 ]] && echo -n " "
2011		(( first=1 ))
2012		case $disk in
2013		/dev/*)	echo -n "$disk" ;;
2014		*)	echo -n "/dev/$disk" ;;
2015		esac
2016	done
2017}
2018
2019# A function to set convenience variables for disks.
2020function set_disks
2021{
2022	set -A disk_array $(find_disks $DISKS)
2023	[[ -z "$DISK_ARRAY_LIMIT" ]] && typeset -i DISK_ARRAY_LIMIT=5
2024
2025	export DISK=""
2026	typeset -i i=0
2027	while (( i < ${#disk_array[*]} && i <= $DISK_ARRAY_LIMIT )); do
2028		export DISK${i}="${disk_array[$i]}"
2029		DISKSARRAY="$DISKSARRAY ${disk_array[$i]}"
2030		(( i = i + 1 ))
2031	done
2032	export DISK_ARRAY_NUM=$i
2033	export DISKSARRAY
2034	export disk=$DISK0
2035}
2036
2037#
2038# Add specified user to specified group
2039#
2040# $1 group name
2041# $2 user name
2042#
2043function add_user #<group_name> <user_name>
2044{
2045	typeset gname=$1
2046	typeset uname=$2
2047
2048	if (( ${#gname} == 0 || ${#uname} == 0 )); then
2049		log_fail "group name or user name are not defined."
2050	fi
2051
2052	# Check to see if the user exists.
2053	$ID $uname > /dev/null 2>&1 && return 0
2054
2055	# Assign 1000 as the base uid
2056	typeset -i uid=1000
2057	while true; do
2058		typeset -i ret
2059		$USERADD -u $uid -g $gname -d /var/tmp/$uname -m $uname
2060		ret=$?
2061		case $ret in
2062			0) return 0 ;;
2063			# The uid is not unique
2064			65) ((uid += 1)) ;;
2065			*) return 1 ;;
2066		esac
2067		if [[ $uid == 65000 ]]; then
2068			log_fail "No user id available under 65000 for $uname"
2069		fi
2070	done
2071
2072	return 0
2073}
2074
2075#
2076# Delete the specified user.
2077#
2078# $1 login name
2079#
2080function del_user #<logname>
2081{
2082	typeset user=$1
2083
2084	if (( ${#user} == 0 )); then
2085		log_fail "login name is necessary."
2086	fi
2087
2088	if $ID $user > /dev/null 2>&1; then
2089		log_must $USERDEL $user
2090	fi
2091
2092	return 0
2093}
2094
2095#
2096# Select valid gid and create specified group.
2097#
2098# $1 group name
2099#
2100function add_group #<group_name>
2101{
2102	typeset group=$1
2103
2104	if (( ${#group} == 0 )); then
2105		log_fail "group name is necessary."
2106	fi
2107
2108	# See if the group already exists.
2109	$GROUPSHOW $group >/dev/null 2>&1
2110	[[ $? == 0 ]] && return 0
2111
2112	# Assign 100 as the base gid
2113	typeset -i gid=100
2114	while true; do
2115		$GROUPADD -g $gid $group > /dev/null 2>&1
2116		typeset -i ret=$?
2117		case $ret in
2118			0) return 0 ;;
2119			# The gid is not  unique
2120			65) ((gid += 1)) ;;
2121			*) return 1 ;;
2122		esac
2123		if [[ $gid == 65000 ]]; then
2124			log_fail "No user id available under 65000 for $group"
2125		fi
2126	done
2127}
2128
2129#
2130# Delete the specified group.
2131#
2132# $1 group name
2133#
2134function del_group #<group_name>
2135{
2136	typeset grp=$1
2137	if (( ${#grp} == 0 )); then
2138		log_fail "group name is necessary."
2139	fi
2140
2141	$GROUPDEL -n $grp > /dev/null 2>&1
2142	typeset -i ret=$?
2143	case $ret in
2144		# Group does not exist, or was deleted successfully.
2145		0|6|65) return 0 ;;
2146		# Name already exists as a group name
2147		9) log_must $GROUPDEL $grp ;;
2148		*) return 1 ;;
2149	esac
2150
2151	return 0
2152}
2153
2154#
2155# This function will return true if it's safe to destroy the pool passed
2156# as argument 1. It checks for pools based on zvols and files, and also
2157# files contained in a pool that may have a different mountpoint.
2158#
2159function safe_to_destroy_pool { # $1 the pool name
2160
2161	typeset pool=""
2162	typeset DONT_DESTROY=""
2163
2164	# We check that by deleting the $1 pool, we're not
2165	# going to pull the rug out from other pools. Do this
2166	# by looking at all other pools, ensuring that they
2167	# aren't built from files or zvols contained in this pool.
2168
2169	for pool in $($ZPOOL list -H -o name)
2170	do
2171		ALTMOUNTPOOL=""
2172
2173		# this is a list of the top-level directories in each of the files
2174		# that make up the path to the files the pool is based on
2175		FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2176			$AWK '{print $1}')
2177
2178		# this is a list of the zvols that make up the pool
2179		ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/$1$" | \
2180			$AWK '{print $1}')
2181
2182		# also want to determine if it's a file-based pool using an
2183		# alternate mountpoint...
2184		POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2185					$GREP / | $AWK '{print $1}' | \
2186					$AWK -F/ '{print $2}' | $GREP -v "dev")
2187
2188		for pooldir in $POOL_FILE_DIRS
2189		do
2190			OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2191					$GREP "${pooldir}$" | $AWK '{print $1}')
2192
2193			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2194		done
2195
2196
2197		if [ ! -z "$ZVOLPOOL" ]
2198		then
2199			DONT_DESTROY="true"
2200			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2201		fi
2202
2203		if [ ! -z "$FILEPOOL" ]
2204		then
2205			DONT_DESTROY="true"
2206			log_note "Pool $pool is built from $FILEPOOL on $1"
2207		fi
2208
2209		if [ ! -z "$ALTMOUNTPOOL" ]
2210		then
2211			DONT_DESTROY="true"
2212			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2213		fi
2214	done
2215
2216	if [ -z "${DONT_DESTROY}" ]
2217	then
2218		return 0
2219	else
2220		log_note "Warning: it is not safe to destroy $1!"
2221		return 1
2222	fi
2223}
2224
2225#
2226# Get IP address of hostname
2227# $1 hostname
2228#
2229function getipbyhost
2230{
2231	typeset ip
2232	ip=`$ARP $1 2>/dev/null | $AWK -F\) '{print $1}' \
2233		| $AWK -F\( '{print $2}'`
2234	$ECHO $ip
2235}
2236
2237#
2238# Setup iSCSI initiator to target
2239# $1 target hostname
2240#
2241function iscsi_isetup
2242{
2243	# check svc:/network/iscsi_initiator:default state, try to enable it
2244	# if the state is not ON
2245	typeset ISCSII_FMRI="svc:/network/iscsi_initiator:default"
2246	if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
2247		log_must $SVCADM enable $ISCSII_FMRI
2248
2249		typeset -i retry=20
2250		while [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) && \
2251			( $retry -ne 0 ) ]]
2252		do
2253			(( retry = retry - 1 ))
2254			$SLEEP 1
2255		done
2256
2257		if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
2258			log_fail "$ISCSII_FMRI service can not be enabled!"
2259		fi
2260	fi
2261
2262	log_must $ISCSIADM add discovery-address $(getipbyhost $1)
2263	log_must $ISCSIADM modify discovery --sendtargets enable
2264	log_must $DEVFSADM -i iscsi
2265}
2266
2267#
2268# Check whether iscsi parameter is set as remote
2269#
2270# return 0 if iscsi is set as remote, otherwise 1
2271#
2272function check_iscsi_remote
2273{
2274	if [[ $iscsi == "remote" ]] ; then
2275		return 0
2276	else
2277		return 1
2278	fi
2279}
2280
2281#
2282# Check if a volume is a valide iscsi target
2283# $1 volume name
2284# return 0 if suceeds, otherwise, return 1
2285#
2286function is_iscsi_target
2287{
2288	typeset dataset=$1
2289	typeset target targets
2290
2291	[[ -z $dataset ]] && return 1
2292
2293	targets=$($ISCSITADM list target | $GREP "Target:" | $AWK '{print $2}')
2294	[[ -z $targets ]] && return 1
2295
2296	for target in $targets; do
2297		[[ $dataset == $target ]] && return 0
2298	done
2299
2300	return 1
2301}
2302
2303#
2304# Get the iSCSI name of a target
2305# $1 target name
2306#
2307function iscsi_name
2308{
2309	typeset target=$1
2310	typeset name
2311
2312	[[ -z $target ]] && log_fail "No parameter."
2313
2314	if ! is_iscsi_target $target ; then
2315		log_fail "Not a target."
2316	fi
2317
2318	name=$($ISCSITADM list target $target | $GREP "iSCSI Name:" \
2319		| $AWK '{print $2}')
2320
2321	return $name
2322}
2323
2324#
2325# check svc:/system/iscsitgt:default state, try to enable it if the state
2326# is not ON
2327#
2328function iscsitgt_setup
2329{
2330	log_must $RM -f $ISCSITGTFILE
2331	if [[ "ON" == $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2332		log_note "iscsitgt is already enabled"
2333		return
2334	fi
2335
2336    	log_must $SVCADM enable -t $ISCSITGT_FMRI
2337
2338	typeset -i retry=20
2339	while [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) && \
2340		( $retry -ne 0 ) ]]
2341	do
2342		$SLEEP 1
2343		(( retry = retry - 1 ))
2344	done
2345
2346	if [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2347		log_fail "$ISCSITGT_FMRI service can not be enabled!"
2348	fi
2349
2350	log_must $TOUCH $ISCSITGTFILE
2351}
2352
2353#
2354# set DISABLED state of svc:/system/iscsitgt:default
2355# which is the most suiteable state if $ISCSITGTFILE exists
2356#
2357function iscsitgt_cleanup
2358{
2359	if [[ -e $ISCSITGTFILE ]]; then
2360		log_must $SVCADM disable $ISCSITGT_FMRI
2361		log_must $RM -f $ISCSITGTFILE
2362	fi
2363}
2364
2365#
2366# Close iSCSI initiator to target
2367# $1 target hostname
2368#
2369function iscsi_iclose
2370{
2371	log_must $ISCSIADM modify discovery --sendtargets disable
2372	log_must $ISCSIADM remove discovery-address $(getipbyhost $1)
2373	$DEVFSADM -Cv
2374}
2375
2376#
2377# Get the available ZFS compression options
2378# $1 option type zfs_set|zfs_compress
2379#
2380function get_compress_opts
2381{
2382	typeset COMPRESS_OPTS
2383	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2384			gzip-6 gzip-7 gzip-8 gzip-9"
2385
2386	if [[ $1 == "zfs_compress" ]] ; then
2387		COMPRESS_OPTS="on lzjb"
2388	elif [[ $1 == "zfs_set" ]] ; then
2389		COMPRESS_OPTS="on off lzjb"
2390	fi
2391	typeset valid_opts="$COMPRESS_OPTS"
2392	$ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2393	if [[ $? -eq 0 ]]; then
2394		valid_opts="$valid_opts $GZIP_OPTS"
2395	fi
2396	$ECHO "$valid_opts"
2397}
2398
2399#
2400# Check the subcommand/option is supported
2401#
2402function check_opt_support #command, option
2403{
2404	typeset command=$1
2405	typeset option=$2
2406
2407	if [[ -z $command ]]; then
2408		return 0
2409	elif [[ -z $option ]]; then
2410		eval "$ZFS 2>&1 | $GREP '$command' > /dev/null 2>&1"
2411	else
2412		eval "$ZFS $command 2>&1 | $GREP -- '$option' | \
2413			$GREP -v -- 'User-defined' > /dev/null 2>&1"
2414	fi
2415	return $?
2416}
2417
2418#
2419# Check the zpool subcommand/option is supported
2420#
2421function check_zpool_opt_support #command, option
2422{
2423	typeset command=$1
2424	typeset option=$2
2425
2426	if [[ -z $command ]]; then
2427		return 0
2428	elif [[ -z $option ]]; then
2429		eval "$ZPOOL 2>&1 | $GREP '$command' > /dev/null 2>&1"
2430	else
2431		eval "$ZPOOL $command 2>&1 | $GREP -- '$option' > /dev/null 2>&1"
2432	fi
2433	return $?
2434}
2435
2436#
2437# Verify zfs operation with -p option work as expected
2438# $1 operation, value could be create, clone or rename
2439# $2 dataset type, value could be fs or vol
2440# $3 dataset name
2441# $4 new dataset name
2442#
2443function verify_opt_p_ops
2444{
2445	typeset ops=$1
2446	typeset datatype=$2
2447	typeset dataset=$3
2448	typeset newdataset=$4
2449
2450	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2451		log_fail "$datatype is not supported."
2452	fi
2453
2454	# check parameters accordingly
2455	case $ops in
2456		create)
2457			newdataset=$dataset
2458			dataset=""
2459			if [[ $datatype == "vol" ]]; then
2460				ops="create -V $VOLSIZE"
2461			fi
2462			;;
2463		clone)
2464			if [[ -z $newdataset ]]; then
2465				log_fail "newdataset should not be empty" \
2466					"when ops is $ops."
2467			fi
2468			log_must datasetexists $dataset
2469			log_must snapexists $dataset
2470			;;
2471		rename)
2472			if [[ -z $newdataset ]]; then
2473				log_fail "newdataset should not be empty" \
2474					"when ops is $ops."
2475			fi
2476			log_must datasetexists $dataset
2477			log_mustnot snapexists $dataset
2478			;;
2479		*)
2480			log_fail "$ops is not supported."
2481			;;
2482	esac
2483
2484	# make sure the upper level filesystem does not exist
2485	if datasetexists ${newdataset%/*} ; then
2486		log_must $ZFS destroy -rRf ${newdataset%/*}
2487	fi
2488
2489	# without -p option, operation will fail
2490	log_mustnot $ZFS $ops $dataset $newdataset
2491	log_mustnot datasetexists $newdataset ${newdataset%/*}
2492
2493	# with -p option, operation should succeed
2494	log_must $ZFS $ops -p $dataset $newdataset
2495	if ! datasetexists $newdataset ; then
2496		log_fail "-p option does not work for $ops"
2497	fi
2498
2499	# when $ops is create or clone, redo the operation still return zero
2500	if [[ $ops != "rename" ]]; then
2501		log_must $ZFS $ops -p $dataset $newdataset
2502	fi
2503
2504	return 0
2505}
2506
2507function get_disk_guid
2508{
2509	typeset diskname=$1
2510	lastcwd=$(pwd)
2511	cd /dev
2512	guid=$($ZDB -l ${diskname} | ${AWK} '/^    guid:/ {print $2}' | head -1)
2513	cd $lastcwd
2514	echo $guid
2515}
2516
2517#
2518# Get cachefile for a pool.
2519# Prints the cache file, if there is one.
2520# Returns 0 for a default zpool.cache, 1 for an explicit one, and 2 for none.
2521#
2522function cachefile_for_pool
2523{
2524	typeset pool=$1
2525
2526	cachefile=$(get_pool_prop cachefile $pool)
2527	[[ $? != 0 ]] && return 1
2528
2529	case "$cachefile" in
2530		none)	ret=2 ;;
2531		"-")
2532			ret=2
2533			for dir in /boot/zfs /etc/zfs; do
2534				if [[ -f "${dir}/zpool.cache" ]]; then
2535					cachefile="${dir}/zpool.cache"
2536					ret=0
2537					break
2538				fi
2539			done
2540			;;
2541		*)	ret=1;
2542	esac
2543	[[ $ret -eq 0 || $ret -eq 1 ]] && print "$cachefile"
2544	return $ret
2545}
2546
2547#
2548# Assert that the pool is in the appropriate cachefile.
2549#
2550function assert_pool_in_cachefile
2551{
2552	typeset pool=$1
2553
2554	cachefile=$(cachefile_for_pool $pool)
2555	[ $? -ne 0 ] && log_fail "ERROR: Cachefile not created for '$pool'?"
2556	log_must test -e "${cachefile}"
2557	log_must zdb -U ${cachefile} -C ${pool}
2558}
2559
2560#
2561# Get the zdb options given the cachefile state of the pool.
2562#
2563function zdb_cachefile_opts
2564{
2565	typeset pool=$1
2566	typeset vdevdir=$2
2567	typeset opts
2568
2569	if poolexists "$pool"; then
2570		cachefile=$(cachefile_for_pool $pool)
2571		typeset -i ret=$?
2572		case $ret in
2573			0)	opts="-C" ;;
2574			1)	opts="-U $cachefile -C" ;;
2575			2)	opts="-eC" ;;
2576			*)	log_fail "Unknown return '$ret'" ;;
2577		esac
2578	else
2579		opts="-eC"
2580		[[ -n "$vdevdir" ]] && opts="$opts -p $vdevdir"
2581	fi
2582	echo "$opts"
2583}
2584
2585#
2586# Get configuration of pool
2587# $1 pool name
2588# $2 config name
2589#
2590function get_config
2591{
2592	typeset pool=$1
2593	typeset config=$2
2594	typeset vdevdir=$3
2595	typeset alt_root
2596	typeset zdb_opts
2597
2598	zdb_opts=$(zdb_cachefile_opts $pool $vdevdir)
2599	value=$($ZDB $zdb_opts $pool | $GREP "$config:" | $AWK -F: '{print $2}')
2600	if [[ -n $value ]] ; then
2601		value=${value#'}
2602		value=${value%'}
2603	else
2604		return 1
2605	fi
2606	echo $value
2607
2608	return 0
2609}
2610
2611#
2612# Privated function. Random select one of items from arguments.
2613#
2614# $1 count
2615# $2-n string
2616#
2617function _random_get
2618{
2619	typeset cnt=$1
2620	shift
2621
2622	typeset str="$@"
2623	typeset -i ind
2624	((ind = RANDOM % cnt + 1))
2625
2626	typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2627	$ECHO $ret
2628}
2629
2630#
2631# Random select one of item from arguments which include NONE string
2632#
2633function random_get_with_non
2634{
2635	typeset -i cnt=$#
2636	((cnt =+ 1))
2637
2638	_random_get "$cnt" "$@"
2639}
2640
2641#
2642# Random select one of item from arguments which doesn't include NONE string
2643#
2644function random_get
2645{
2646	_random_get "$#" "$@"
2647}
2648
2649#
2650# The function will generate a dataset name with specific length
2651# $1, the length of the name
2652# $2, the base string to construct the name
2653#
2654function gen_dataset_name
2655{
2656	typeset -i len=$1
2657	typeset basestr="$2"
2658	typeset -i baselen=${#basestr}
2659	typeset -i iter=0
2660	typeset l_name=""
2661
2662	if (( len % baselen == 0 )); then
2663		(( iter = len / baselen ))
2664	else
2665		(( iter = len / baselen + 1 ))
2666	fi
2667	while (( iter > 0 )); do
2668		l_name="${l_name}$basestr"
2669
2670		(( iter -= 1 ))
2671	done
2672
2673	$ECHO $l_name
2674}
2675
2676#
2677# Ensure that a given path has been synced, not just ZIL committed.
2678#
2679# XXX The implementation currently requires calling 'zpool history'.  On
2680#     FreeBSD, the sync(8) command (via $SYNC) calls zfs_sync() which just
2681#     does a zil_commit(), as opposed to a txg_wait_synced().  For things that
2682#     require writing to their final destination (e.g. for intentional
2683#     corruption purposes), zil_commit() is not good enough.
2684#
2685function force_sync_path # path
2686{
2687	typeset path="$1"
2688
2689	zfspath=$($DF $path 2>/dev/null | tail -1 | cut -d" " -f1 | cut -d/ -f1)
2690	[ -z "$zfspath" ] && return false
2691	log_note "Force syncing ${zfspath} for ${path} ..."
2692	$ZPOOL history $zfspath >/dev/null 2>&1
2693}
2694
2695#
2696# Get cksum tuple of dataset
2697# $1 dataset name
2698#
2699# zdb output is like below
2700# " Dataset pool/fs [ZPL], ID 978, cr_txg 2277, 19.0K, 5 objects,
2701# rootbp [L0 DMU objset] 400L/200P DVA[0]=<0:1880c00:200>
2702# DVA[1]=<0:341880c00:200> fletcher4 lzjb LE contiguous birth=2292 fill=5
2703# cksum=989930ccf:4014fe00c83:da5e388e58b4:1f7332052252ac "
2704#
2705function datasetcksum
2706{
2707	typeset cksum
2708	$SYNC
2709	cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2710		| $AWK -F= '{print $6}')
2711	$ECHO $cksum
2712}
2713
2714#
2715# Get cksum of file
2716# #1 file path
2717#
2718function checksum
2719{
2720	typeset cksum
2721	cksum=$($CKSUM $1 | $AWK '{print $1}')
2722	$ECHO $cksum
2723}
2724
2725#
2726# Get the given disk/slice state from the specific field of the pool
2727#
2728function get_device_state #pool disk field("", "spares","logs")
2729{
2730	typeset pool=$1
2731	typeset disk=${2#/dev/}
2732	disk=${disk#/dev/}
2733	disk=${disk#/dev/}
2734	typeset field=${3:-$pool}
2735
2736	state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2737		$NAWK -v device=$disk -v pool=$pool -v field=$field \
2738		'BEGIN {startconfig=0; startfield=0; }
2739		/config:/ {startconfig=1}
2740		(startconfig==1)&&($1==field) {startfield=1; next;}
2741		(startfield==1)&&($1==device) {print $2; exit;}
2742		(startfield==1)&&(NF>=3)&&($(NF-1)=="was")&&($NF==device) {print $2; exit;}
2743		(startfield==1)&&($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2744	print $state
2745}
2746
2747
2748#
2749# print the given directory filesystem type
2750#
2751# $1 directory name
2752#
2753function get_fstype
2754{
2755	typeset dir=$1
2756
2757	if [[ -z $dir ]]; then
2758		log_fail "Usage: get_fstype <directory>"
2759	fi
2760
2761	$DF -T $dir | $AWK '{print $2}'
2762}
2763
2764#
2765# Given a disk, label it to VTOC regardless what label was on the disk
2766# $1 disk
2767#
2768function labelvtoc
2769{
2770	typeset disk=$1
2771	if [[ -z $disk ]]; then
2772		log_fail "The disk name is unspecified."
2773	fi
2774	typeset label_file=$TMPDIR/labelvtoc.${TESTCASE_ID}
2775	typeset arch=$($UNAME -p)
2776
2777	if [[ $arch == "i386" ]]; then
2778		 $ECHO "label" > $label_file
2779		 $ECHO "0" >> $label_file
2780		 $ECHO "" >> $label_file
2781		 $ECHO "q" >> $label_file
2782		 $ECHO "q" >> $label_file
2783
2784		 $FDISK -B $disk >/dev/null 2>&1
2785		 # wait a while for fdisk finishes
2786		 $SLEEP 60
2787	elif [[ $arch == "sparc" ]]; then
2788	     	 $ECHO "label" > $label_file
2789		 $ECHO "0" >> $label_file
2790		 $ECHO "" >> $label_file
2791		 $ECHO "" >> $label_file
2792		 $ECHO "" >> $label_file
2793		 $ECHO "q" >> $label_file
2794	else
2795		log_fail "unknown arch type"
2796	fi
2797
2798	$FORMAT -e -s -d $disk -f $label_file
2799	typeset -i ret_val=$?
2800	$RM -f $label_file
2801	#
2802	# wait the format to finish
2803	#
2804	$SLEEP 60
2805	if (( ret_val != 0 )); then
2806		log_fail "unable to label $disk as VTOC."
2807	fi
2808
2809	return 0
2810}
2811
2812#
2813# Detect if the given filesystem property is supported in this release
2814#
2815# 0	Yes, it is supported
2816# !0	No, it is not supported
2817#
2818function fs_prop_exist
2819{
2820	typeset prop=$1
2821
2822	if [[ -z $prop ]]; then
2823		log_fail "Usage: fs_prop_exist <property>"
2824
2825		return 1
2826	fi
2827
2828	#
2829	# If the property is shortened column name,
2830	# convert it to the standard name
2831	#
2832	case $prop in
2833		avail)		prop=available		;;
2834		refer)		prop=referenced		;;
2835		volblock)	prop=volblocksize	;;
2836		compress)	prop=compression	;;
2837		rdonly)		prop=readonly		;;
2838		recsize)	prop=recordsize		;;
2839		reserv)		prop=reservation	;;
2840		refreserv)	prop=refreservation	;;
2841	esac
2842
2843	#
2844	# The zfs get output looks like the following
2845	#
2846
2847	#
2848	# The following properties are supported:
2849	#
2850	#	PROPERTY       EDIT  INHERIT   VALUES
2851	#
2852	#	available	NO	NO	<size>
2853	#	compressratio	NO	NO	<1.00x or higher if compressed>
2854	#	creation	NO	NO	<date>
2855	#	 ... ...
2856	#	zoned		YES	YES	on | off
2857	#
2858	# Sizes are specified in bytes with standard units such as K, M, G, etc.
2859	#
2860
2861	#
2862	# Start to extract property from the first blank line after 'PROPERTY'
2863	# and stop at the next blank line
2864	#
2865	$ZFS get 2>&1 | \
2866		$AWK '/PROPERTY/ {start=1; next}
2867			/Sizes/ {start=0}
2868		  	start==1 {print $1}' | \
2869		$GREP -w "$prop" > /dev/null 2>&1
2870
2871	return $?
2872}
2873
2874#
2875# Detect if the given pool property is supported in this release
2876#
2877# 0	Yes, it is supported
2878# !0	No, it is not supported
2879#
2880function pool_prop_exist
2881{
2882	typeset prop=$1
2883	if [[ -z $prop ]]; then
2884		log_fail "Usage: pool_prop_exist <property>"
2885
2886		return 1
2887	fi
2888	#
2889	# If the property is shortened column name,
2890	# convert it to the standard name
2891	#
2892	case $prop in
2893		avail)		prop=available		;;
2894		cap)		prop=capacity		;;
2895		replace)	prop=autoreplace	;;
2896	esac
2897
2898	#
2899	# The zpool get output looks like the following
2900	#
2901
2902	# usage:
2903	#	get <"all" | property[,...]> <pool> ...
2904	#
2905	# the following properties are supported:
2906	#
2907	#	PROPERTY       EDIT  VALUES
2908	#
2909	#	available	NO	<size>
2910	#	capacity	NO	<size>
2911	#	guid		NO	<guid>
2912	#	health		NO	<state>
2913	#	size		NO	<size>
2914	#	used		NO	<size>
2915	#	altroot		YES	<path>
2916	#	autoreplace	YES	on | off
2917	#	bootfs		YES	<filesystem>
2918	#	cachefile       YES	<file> | none
2919	#	delegation      YES	on | off
2920	#	failmode	YES	wait | continue | panic
2921	#	version		YES	<version>
2922
2923	$ZPOOL get 2>&1 | \
2924		$AWK '/PROPERTY/ {start=1; next}
2925			start==1 {print $1}' | \
2926		$GREP -w "$prop" > /dev/null 2>&1
2927
2928	return $?
2929}
2930
2931#
2932# check if the system was installed as zfsroot or not
2933# return: 0 ture, otherwise false
2934#
2935function is_zfsroot
2936{
2937	$DF -T / | $GREP -q zfs
2938}
2939
2940#
2941# get the root filesystem name if it's zfsroot system.
2942#
2943# return: root filesystem name
2944function get_rootfs
2945{
2946	typeset rootfs=""
2947	rootfs=$($MOUNT | $AWK '$3 == "\/" && $4~/zfs/ {print $1}')
2948	if [[ -z "$rootfs" ]]; then
2949		log_fail "Can not get rootfs"
2950	fi
2951	$ZFS list $rootfs > /dev/null 2>&1
2952	if (( $? == 0 )); then
2953		$ECHO $rootfs
2954	else
2955		log_fail "This is not a zfsroot system."
2956	fi
2957}
2958
2959#
2960# get the rootfs's pool name
2961# return:
2962#       rootpool name
2963#
2964function get_rootpool
2965{
2966	typeset rootfs=""
2967	typeset rootpool=""
2968	rootfs=$(get_rootfs)
2969	rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2970	echo $rootpool
2971}
2972
2973#
2974# Get the sub string from specified source string
2975#
2976# $1 source string
2977# $2 start position. Count from 1
2978# $3 offset
2979#
2980function get_substr #src_str pos offset
2981{
2982	typeset pos offset
2983
2984	$ECHO $1 | \
2985		$NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2986}
2987
2988#
2989# Get the directory path of given device
2990#
2991function get_device_dir #device
2992{
2993	typeset device=$1
2994
2995	$ECHO "/dev"
2996}
2997
2998#
2999# Get the package name
3000#
3001function get_package_name
3002{
3003	typeset dirpath=${1:-$STC_NAME}
3004
3005	print "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3006}
3007
3008#
3009# Get the word numbers from a string separated by white space
3010#
3011function get_word_count
3012{
3013	$ECHO $1 | $WC -w
3014}
3015
3016#
3017# To verify if the require numbers of disks is given
3018#
3019function verify_disk_count
3020{
3021	typeset -i min=${2:-1}
3022
3023	typeset -i count=$(get_word_count "$1")
3024
3025	if (( count < min )); then
3026		atf_skip "A minimum of $min disks is required to run." \
3027			" You specified $count disk(s)"
3028	fi
3029}
3030
3031#
3032# Verify that vfs.zfs.vol.recursive is set, so pools can be created using zvols
3033# as backing stores.
3034#
3035function verify_zvol_recursive
3036{
3037	if [ "`sysctl -n vfs.zfs.vol.recursive`" -ne 1 ]; then
3038		atf_skip "Recursive ZVOLs not enabled"
3039	fi
3040}
3041
3042#
3043# bsdmap disk/slice number to a device path
3044#
3045function bsddevmap
3046{
3047	typeset arg=$1
3048	echo $arg | egrep "*s[0-9]$" > /dev/null 2>&1
3049	if [ $? -eq 0 ]
3050	then
3051		n=`echo $arg| wc -c`
3052		set -A map a b c d e f g h i j
3053		s=`echo $arg | cut -c $((n-1))`
3054		arg=${arg%s[0-9]}${map[$s]}
3055	fi
3056	echo $arg
3057}
3058
3059#
3060# Get the name of the snapshots directory.  Traditionally .zfs/snapshots
3061#
3062function get_snapdir_name
3063{
3064	echo ".zfs/snapshot"
3065}
3066
3067#
3068# Unmount all ZFS filesystems except for those that are in the KEEP variable
3069#
3070function unmount_all_safe
3071{
3072	echo $(all_pools) | \
3073		$XARGS -n 1 $ZFS list -H -o name -t all -r | \
3074		$XARGS -n 1 $ZFS unmount
3075}
3076
3077#
3078# Return the highest pool version that this OS can create
3079#
3080function get_zpool_version
3081{
3082	# We assume output from zpool upgrade -v of the form:
3083	#
3084	# This system is currently running ZFS version 2.
3085	# .
3086	# .
3087	typeset ZPOOL_VERSION=$($ZPOOL upgrade -v | $HEAD -1 | \
3088		$AWK '{print $NF}' | $SED -e 's/\.//g')
3089	# Starting with version 5000, the output format changes to:
3090	# This system supports ZFS pool feature flags.
3091	# .
3092	# .
3093	if [[ $ZPOOL_VERSION = "flags" ]]; then
3094		ZPOOL_VERSION=5000
3095	fi
3096	echo $ZPOOL_VERSION
3097}
3098
3099# Ensures that zfsd is running, starting it if necessary.  Every test that
3100# interacts with zfsd must call this at startup.  This is intended primarily
3101# to eliminate interference from outside the test suite.
3102function ensure_zfsd_running
3103{
3104	if ! service zfsd status > /dev/null 2>&1; then
3105		service zfsd start || service zfsd onestart
3106		service zfsd status > /dev/null 2>&1 ||
3107			log_unsupported "Test requires zfsd"
3108	fi
3109}
3110
3111# Temporarily stops ZFSD, because it can interfere with some tests.  If this
3112# function is used, then restart_zfsd _must_ be called in the cleanup routine.
3113function stop_zfsd
3114{
3115	$RM -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3116	if [[ -n "$ZFSD" && -x "$ZFSD" ]]; then
3117		if /etc/rc.d/zfsd status > /dev/null; then
3118			log_note "Stopping zfsd"
3119			$TOUCH $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3120			/etc/rc.d/zfsd stop || /etc/rc.d/zfsd onestop
3121		fi
3122	fi
3123}
3124
3125# Restarts zfsd after it has been stopped by stop_zfsd.  Intelligently restarts
3126# only iff zfsd was running at the time stop_zfsd was called.
3127function restart_zfsd
3128{
3129	if [[ -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests ]]; then
3130		log_note "Restarting zfsd"
3131		/etc/rc.d/zfsd start || /etc/rc.d/zfsd onestart
3132	fi
3133	$RM -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3134}
3135
3136#
3137# Using the given <vdev>, obtain the value of the property <propname> for
3138# the given <tvd> identified by numeric id.
3139#
3140function get_tvd_prop # vdev tvd propname
3141{
3142	typeset vdev=$1
3143	typeset -i tvd=$2
3144	typeset propname=$3
3145
3146	$ZDB -l $vdev | $AWK -v tvd=$tvd -v prop="${propname}:" '
3147		BEGIN { start = 0; }
3148		/^        id:/ && ($2==tvd) { start = 1; next; }
3149		(start==0) { next; }
3150		/^        [a-z]+/ && ($1==prop) { print $2; exit; }
3151		/^        children/ { exit; }
3152		'
3153}
3154
3155#
3156# Convert a DVA into a physical block address.  Prints number of blocks.
3157# This takes the usual printed form, in which offsets are left shifted so
3158# they represent bytes rather than the native sector count.
3159#
3160function dva_to_block_addr # dva
3161{
3162	typeset dva=$1
3163
3164	typeset offcol=$(echo $dva | cut -f2 -d:)
3165	typeset -i offset="0x${offcol}"
3166	# First add 4MB to skip the boot blocks and first two vdev labels,
3167	# then convert to 512 byte blocks (for use with dd).  Note that this
3168	# differs from simply adding 8192 blocks, since the input offset is
3169	# given in bytes and has the actual ashift baked in.
3170	(( offset += 4*1024*1024 ))
3171	(( offset >>= 9 ))
3172	echo "$offset"
3173}
3174
3175#
3176# Convert a RAIDZ DVA into a physical block address.  This has the same
3177# output as dva_to_block_addr (number of blocks from beginning of device), but
3178# is more complicated due to RAIDZ.  ashift is normally always 9, but RAIDZ
3179# uses the actual tvd ashift instead.  Furthermore, the number of vdevs changes
3180# the actual block for each device.
3181#
3182function raidz_dva_to_block_addr # dva ncols ashift
3183{
3184	typeset dva=$1
3185	typeset -i ncols=$2
3186        typeset -i ashift=$3
3187
3188	typeset -i offset=0x$(echo $dva | cut -f2 -d:)
3189	(( offset >>= ashift ))
3190
3191	typeset -i ioff=$(( (offset + ncols - 1) / ncols  ))
3192
3193	# Now add the front 4MB and return.
3194	(( ioff += ( 4194304 >> $ashift ) ))
3195	echo "$ioff"
3196}
3197
3198#
3199# Return the vdevs for the given toplevel vdev number.
3200# Child vdevs will only be included if they are ONLINE.  Output format:
3201#
3202#   <toplevel vdev type> <nchildren> <child1>[:<child2> ...]
3203#
3204# Valid toplevel vdev types are mirror, raidz[1-3], leaf (which can be a
3205# disk or a file).  Note that 'nchildren' can be larger than the number of
3206# returned children; it represents the number of children regardless of how
3207# many are actually online.
3208#
3209function vdevs_for_tvd # pool tvd
3210{
3211	typeset pool=$1
3212	typeset -i tvd=$2
3213
3214	$ZPOOL status $pool | $AWK -v want_tvd=$tvd '
3215		BEGIN {
3216			 start = 0; tvd = -1; lvd = -1;
3217			 type = "UNKNOWN"; disks = ""; disk = "";
3218			 nchildren = 0;
3219		}
3220		/NAME.*STATE/ { start = 1; next; }
3221		(start==0) { next; }
3222
3223		(tvd > want_tvd) { exit; }
3224		END { print type " " nchildren " " disks; }
3225
3226		length(disk) > 0 {
3227			if (length(disks) > 0) { disks = disks " "; }
3228			if (substr(disk, 0, 1) == "/") {
3229				disks = disks disk;
3230			} else {
3231				disks = disks "/dev/" disk;
3232			}
3233			disk = "";
3234		}
3235
3236		/^\t(spares|logs)/ { tvd = want_tvd + 1; next; }
3237		/^\t  (mirror|raidz[1-3])-[0-9]+/ {
3238			tvd += 1;
3239			(tvd == want_tvd) && type = substr($1, 0, 6);
3240			next;
3241		}
3242		/^\t  [\/A-Za-z]+/ {
3243			tvd += 1;
3244			if (tvd == want_tvd) {
3245				(( nchildren += 1 ))
3246				type = "leaf";
3247				($2 == "ONLINE") && disk = $1;
3248			}
3249			next;
3250		}
3251
3252		(tvd < want_tvd) { next; }
3253
3254		/^\t    spare-[0-9]+/ { next; }
3255		/^\t      [\/A-Za-z]+/ {
3256			(( nchildren += 1 ))
3257			($2 == "ONLINE") && disk = $1;
3258			next;
3259		}
3260
3261		/^\t    [\/A-Za-z]+/ {
3262			(( nchildren += 1 ))
3263			($2 == "ONLINE") && disk = $1;
3264			next;
3265		}
3266		'
3267}
3268
3269#
3270# Get a vdev path, ashift & offset for a given pool/dataset and DVA.
3271# If desired, can also select the toplevel vdev child number.
3272#
3273function dva_to_vdev_ashift_off # pool/dataset dva [leaf_vdev_num]
3274{
3275	typeset poollike=$1
3276	typeset dva=$2
3277	typeset -i leaf_vdev_num=$3
3278
3279	# vdevs are normally 0-indexed while arguments are 1-indexed.
3280	(( leaf_vdev_num += 1 ))
3281
3282	# Strip any child datasets or snapshots.
3283	pool=$(echo $poollike | sed -e 's,[/@].*,,g')
3284	tvd=$(echo $dva | cut -d: -f1)
3285
3286	set -- $(vdevs_for_tvd $pool $tvd)
3287	log_debug "vdevs_for_tvd: $* <EOM>"
3288	tvd_type=$1; shift
3289	nchildren=$1; shift
3290
3291	lvd=$(eval echo \$$leaf_vdev_num)
3292	log_debug "type='$tvd_type' children='$nchildren' lvd='$lvd' dva='$dva'"
3293	case $tvd_type in
3294	raidz*)
3295		ashift=$(get_tvd_prop $lvd $tvd ashift)
3296		log_debug "raidz: ashift='${ashift}'"
3297		off=$(raidz_dva_to_block_addr $dva $nchildren $ashift)
3298		;;
3299	*)
3300		ashift=9
3301		off=$(dva_to_block_addr $dva)
3302		;;
3303	esac
3304	echo "${lvd}:${ashift}:${off}"
3305}
3306
3307#
3308# Get the DVA for the specified dataset's given filepath.
3309#
3310function file_dva # dataset filepath [level] [offset] [dva_num]
3311{
3312	typeset dataset=$1
3313	typeset filepath=$2
3314	typeset -i level=$3
3315	typeset -i offset=$4
3316	typeset -i dva_num=$5
3317
3318	typeset -li blksz=0
3319	typeset -li blknum=0
3320	typeset -li startoff
3321	typeset -li inode
3322
3323	eval `$STAT -s "$filepath"`
3324	inode="$st_ino"
3325
3326	# The inner match is for 'DVA[0]=<0:1b412600:200>', in which the
3327	# text surrounding the actual DVA is a fixed size with 8 characters
3328	# before it and 1 after.
3329	$ZDB -P -vvvvv $dataset $inode | \
3330	    $AWK -v level=${level} -v dva_num=${dva_num} '
3331		BEGIN { stage = 0; }
3332		(stage == 0) && ($1=="Object") { stage = 1; next; }
3333
3334		(stage == 1) {
3335			print $3 " " $4;
3336			stage = 2; next;
3337		}
3338
3339		(stage == 2) && /^Indirect blocks/ { stage=3; next; }
3340		(stage < 3) { next; }
3341
3342		match($2, /L[0-9]/) {
3343			if (substr($2, RSTART+1, RLENGTH-1) != level) { next; }
3344		}
3345		match($3, /DVA\[.*>/) {
3346			dva = substr($3, RSTART+8, RLENGTH-9);
3347			if (substr($3, RSTART+4, 1) == dva_num) {
3348				print $1 " " dva;
3349			}
3350		}
3351		' | \
3352	while read line; do
3353		log_debug "params='$blksz/$blknum/$startoff' line='$line'"
3354		if (( blksz == 0 )); then
3355			typeset -i iblksz=$(echo $line | cut -d " " -f1)
3356			typeset -i dblksz=$(echo $line | cut -d " " -f2)
3357
3358			# Calculate the actual desired block starting offset.
3359			if (( level > 0 )); then
3360				typeset -i nbps_per_level
3361				typeset -i indsz
3362				typeset -i i=0
3363
3364				(( nbps_per_level = iblksz / 128 ))
3365				(( blksz = dblksz ))
3366				for (( i = 0; $i < $level; i++ )); do
3367					(( blksz *= nbps_per_level ))
3368				done
3369			else
3370				blksz=$dblksz
3371			fi
3372
3373			(( blknum = offset / blksz ))
3374			(( startoff = blknum * blksz ))
3375			continue
3376		fi
3377
3378		typeset lineoffstr=$(echo $line | cut -d " " -f1)
3379		typeset -i lineoff=$(printf "%d" "0x${lineoffstr}")
3380		typeset dva="$(echo $line | cut -d " " -f2)"
3381		log_debug "str='$lineoffstr' lineoff='$lineoff' dva='$dva'"
3382		if [[ -n "$dva" ]] && (( lineoff == startoff )); then
3383			echo $line | cut -d " " -f2
3384			return 0
3385		fi
3386	done
3387	return 1
3388}
3389
3390#
3391# Corrupt the given dataset's filepath file.  This will obtain the first
3392# level 0 block's DVA and scribble random bits on it.
3393#
3394function corrupt_file # dataset filepath [leaf_vdev_num]
3395{
3396	typeset dataset=$1
3397	typeset filepath=$2
3398	typeset -i leaf_vdev_num="$3"
3399
3400	dva=$(file_dva $dataset $filepath)
3401	[ $? -ne 0 ] && log_fail "ERROR: Can't find file $filepath on $dataset"
3402
3403	vdoff=$(dva_to_vdev_ashift_off $dataset $dva $leaf_vdev_num)
3404	vdev=$(echo $vdoff | cut -d: -f1)
3405	ashift=$(echo $vdoff | cut -d: -f2)
3406	off=$(echo $vdoff | cut -d: -f3)
3407	blocksize=$(( 1 << $ashift ))
3408
3409	log_note "Corrupting ${dataset}'s $filepath on $vdev at DVA $dva with ashift $ashift"
3410	log_must $DD if=/dev/urandom bs=$blocksize of=$vdev seek=$off count=1 conv=notrunc
3411}
3412
3413#
3414# Given a number of files, this function will iterate through
3415# the loop creating the specified number of files, whose names
3416# will start with <basename>.
3417#
3418# The <data> argument is special: it can be "ITER", in which case
3419# the -d argument will be the value of the current iteration.  It
3420# can be 0, in which case it will always be 0.  Otherwise, it will
3421# always be the given value.
3422#
3423# If <snapbase> is specified, a snapshot will be taken using the
3424# argument as the snapshot basename.
3425#
3426function populate_dir # basename num_files write_count blocksz data snapbase
3427{
3428	typeset basename=$1
3429	typeset -i num_files=$2
3430	typeset -i write_count=$3
3431	typeset -i blocksz=$4
3432	typeset -i i
3433	typeset data=$5
3434	typeset snapbase="$6"
3435
3436	log_note "populate_dir: data='$data'"
3437	for (( i = 0; i < num_files; i++ )); do
3438		case "$data" in
3439		0)	d=0	;;
3440		ITER)	d=$i ;;
3441		*)	d=$data	;;
3442		esac
3443
3444        	log_must $FILE_WRITE -o create -c $write_count \
3445		    -f ${basename}.$i -b $blocksz -d $d
3446
3447		[ -n "$snapbase" ] && log_must $ZFS snapshot ${snapbase}.${i}
3448	done
3449}
3450
3451# Reap all children registered in $child_pids.
3452function reap_children
3453{
3454	[ -z "$child_pids" ] && return
3455	for wait_pid in $child_pids; do
3456		log_must $KILL $wait_pid
3457	done
3458	child_pids=""
3459}
3460
3461# Busy a path.  Expects to be reaped via reap_children.  Tries to run as
3462# long and slowly as possible.  [num] is taken as a hint; if such a file
3463# already exists a different one will be chosen.
3464function busy_path # <path> [num]
3465{
3466	typeset busypath=$1
3467	typeset -i num=$2
3468
3469	while :; do
3470		busyfile="$busypath/busyfile.${num}"
3471		[ ! -f "$busyfile" ] && break
3472	done
3473
3474	cmd="$DD if=/dev/urandom of=$busyfile bs=512"
3475	( cd $busypath && $cmd ) &
3476	typeset pid=$!
3477	$SLEEP 1
3478	log_must $PS -p $pid
3479	child_pids="$child_pids $pid"
3480}
3481