xref: /freebsd/tests/sys/cddl/zfs/include/libtest.kshlib (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1# vim: filetype=sh
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25# Use is subject to license terms.
26#
27# ident	"@(#)libtest.kshlib	1.15	09/08/06 SMI"
28#
29
30. ${STF_SUITE}/include/logapi.kshlib
31
32ZFS=${ZFS:-/sbin/zfs}
33ZPOOL=${ZPOOL:-/sbin/zpool}
34os_name=`uname -s`
35
36# Determine if a test has the necessary requirements to run
37
38function test_requires
39{
40        integer unsupported=0
41        unsupported_list=""
42        until [[ $# -eq 0 ]];do
43                var_name=$1
44                cmd=$(eval echo \$${1})
45                if [[ ! "$cmd" != "" ]] ; then
46                        print $var_name is not set
47                        unsupported_list="$var_name $unsupported_list"
48                        ((unsupported=unsupported+1))
49                fi
50                shift
51        done
52        if [[ unsupported -gt 0 ]] ; then
53                log_unsupported "$unsupported_list commands are unsupported"
54        else
55                log_note "All commands are supported"
56        fi
57}
58
59# Determine whether a dataset is mounted
60#
61# $1 dataset name
62# $2 filesystem type; optional - defaulted to zfs
63#
64# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
65
66function ismounted
67{
68	typeset fstype=$2
69	[[ -z $fstype ]] && fstype=zfs
70	typeset out dir name ret
71
72	case $fstype in
73		zfs)
74			if [[ "$1" == "/"* ]] ; then
75				for out in $($ZFS mount | $AWK '{print $2}') ; do
76					[[ $1 == $out ]] && return 0
77				done
78			else
79				for out in $($ZFS mount | $AWK '{print $1}') ; do
80					[[ $1 == $out ]] && return 0
81				done
82			fi
83		;;
84		ufs|nfs)
85			# a = device, b = "on", c = mount point", d = flags
86			$MOUNT | $GREP $fstype | while read a b c d
87			do
88				[[ "$1" == "$a" || "$1" == "$c" ]] && return 0
89			done
90		;;
91	esac
92
93	return 1
94}
95
96# Return 0 if a dataset is mounted; 1 otherwise
97#
98# $1 dataset name
99# $2 filesystem type; optional - defaulted to zfs
100
101function mounted
102{
103	ismounted $1 $2
104	(( $? == 0 )) && return 0
105	return 1
106}
107
108# Return 0 if a dataset is unmounted; 1 otherwise
109#
110# $1 dataset name
111# $2 filesystem type; optional - defaulted to zfs
112
113function unmounted
114{
115	ismounted $1 $2
116	(( $? == 1 )) && return 0
117	return 1
118}
119
120# split line on ","
121#
122# $1 - line to split
123
124function splitline
125{
126	$ECHO $1 | $SED "s/,/ /g"
127}
128
129function default_setup
130{
131	default_setup_noexit "$@"
132
133	log_pass
134}
135
136#
137# Given a list of disks, setup storage pools and datasets.
138#
139function default_setup_noexit
140{
141	typeset disklist=$1
142	typeset container=$2
143	typeset volume=$3
144
145	if is_global_zone; then
146		if poolexists $TESTPOOL ; then
147			destroy_pool $TESTPOOL
148		fi
149		[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
150		log_must $ZPOOL create -f $TESTPOOL $disklist
151	else
152		reexport_pool
153	fi
154
155	$RM -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
156	$MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
157
158	log_must $ZFS create $TESTPOOL/$TESTFS
159	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
160
161	if [[ -n $container ]]; then
162		$RM -rf $TESTDIR1  || \
163			log_unresolved Could not remove $TESTDIR1
164		$MKDIR -p $TESTDIR1 || \
165			log_unresolved Could not create $TESTDIR1
166
167		log_must $ZFS create $TESTPOOL/$TESTCTR
168		log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
169		log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
170		log_must $ZFS set mountpoint=$TESTDIR1 \
171		    $TESTPOOL/$TESTCTR/$TESTFS1
172	fi
173
174	if [[ -n $volume ]]; then
175		if is_global_zone ; then
176			log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
177		else
178			log_must $ZFS create $TESTPOOL/$TESTVOL
179		fi
180
181	fi
182}
183
184#
185# Given a list of disks, setup a storage pool, file system and
186# a container.
187#
188function default_container_setup
189{
190	typeset disklist=$1
191
192	default_setup "$disklist" "true"
193}
194
195#
196# Given a list of disks, setup a storage pool,file system
197# and a volume.
198#
199function default_volume_setup
200{
201	typeset disklist=$1
202
203	default_setup "$disklist" "" "true"
204}
205
206#
207# Given a list of disks, setup a storage pool,file system,
208# a container and a volume.
209#
210function default_container_volume_setup
211{
212	typeset disklist=$1
213
214	default_setup "$disklist" "true" "true"
215}
216
217#
218# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
219# filesystem
220#
221# $1 Existing filesystem or volume name. Default, $TESTFS
222# $2 snapshot name. Default, $TESTSNAP
223#
224function create_snapshot
225{
226	typeset fs_vol=${1:-$TESTFS}
227	typeset snap=${2:-$TESTSNAP}
228
229	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
230	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
231
232	if snapexists $fs_vol@$snap; then
233		log_fail "$fs_vol@$snap already exists."
234	fi
235	datasetexists $fs_vol || \
236		log_fail "$fs_vol must exist."
237
238	log_must $ZFS snapshot $fs_vol@$snap
239}
240
241#
242# Create a clone from a snapshot, default clone name is $TESTCLONE.
243#
244# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
245# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
246#
247function create_clone   # snapshot clone
248{
249	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
250	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
251
252	[[ -z $snap ]] && \
253		log_fail "Snapshot name is undefined."
254	[[ -z $clone ]] && \
255		log_fail "Clone name is undefined."
256
257	log_must $ZFS clone $snap $clone
258}
259
260function default_mirror_setup
261{
262	default_mirror_setup_noexit $1 $2 $3
263
264	log_pass
265}
266
267#
268# Given a pair of disks, set up a storage pool and dataset for the mirror
269# @parameters: $1 the primary side of the mirror
270#   $2 the secondary side of the mirror
271# @uses: ZPOOL ZFS TESTPOOL TESTFS
272function default_mirror_setup_noexit
273{
274	readonly func="default_mirror_setup_noexit"
275	typeset primary=$1
276	typeset secondary=$2
277
278	[[ -z $primary ]] && \
279		log_fail "$func: No parameters passed"
280	[[ -z $secondary ]] && \
281		log_fail "$func: No secondary partition passed"
282	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
283	log_must $ZPOOL create -f $TESTPOOL mirror $@
284	log_must $ZFS create $TESTPOOL/$TESTFS
285	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
286}
287
288#
289# create a number of mirrors.
290# We create a number($1) of 2 way mirrors using the pairs of disks named
291# on the command line. These mirrors are *not* mounted
292# @parameters: $1 the number of mirrors to create
293#  $... the devices to use to create the mirrors on
294# @uses: ZPOOL ZFS TESTPOOL
295function setup_mirrors
296{
297	typeset -i nmirrors=$1
298
299	shift
300	while (( nmirrors > 0 )); do
301		log_must test -n "$1" -a -n "$2"
302		[[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
303		log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
304		shift 2
305		(( nmirrors = nmirrors - 1 ))
306	done
307}
308
309#
310# create a number of raidz pools.
311# We create a number($1) of 2 raidz pools  using the pairs of disks named
312# on the command line. These pools are *not* mounted
313# @parameters: $1 the number of pools to create
314#  $... the devices to use to create the pools on
315# @uses: ZPOOL ZFS TESTPOOL
316function setup_raidzs
317{
318	typeset -i nraidzs=$1
319
320	shift
321	while (( nraidzs > 0 )); do
322		log_must test -n "$1" -a -n "$2"
323		[[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
324		log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
325		shift 2
326		(( nraidzs = nraidzs - 1 ))
327	done
328}
329
330#
331# Destroy the configured testpool mirrors.
332# the mirrors are of the form ${TESTPOOL}{number}
333# @uses: ZPOOL ZFS TESTPOOL
334function destroy_mirrors
335{
336	default_cleanup_noexit
337
338	log_pass
339}
340
341#
342# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
343# $1 the list of disks
344#
345function default_raidz_setup
346{
347	typeset disklist="$*"
348	set -A disks $disklist
349
350	if [[ ${#disks[*]} -lt 2 ]]; then
351		log_fail "A raid-z requires a minimum of two disks."
352	fi
353
354	[[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
355	log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
356	log_must $ZFS create $TESTPOOL/$TESTFS
357	log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
358
359	log_pass
360}
361
362#
363# Common function used to cleanup storage pools and datasets.
364#
365# Invoked at the start of the test suite to ensure the system
366# is in a known state, and also at the end of each set of
367# sub-tests to ensure errors from one set of tests doesn't
368# impact the execution of the next set.
369
370function default_cleanup
371{
372	default_cleanup_noexit
373
374	log_pass
375}
376
377function all_pools
378{
379	cmd="$ZPOOL list -H -o name | $GREP 'testpool'"
380	eval $cmd
381}
382
383#
384# Returns 0 if the system contains any pools that must not be modified by the
385# ZFS tests.
386#
387function other_pools_exist
388{
389	typeset pool_count=`$ZPOOL list -H | $GREP -v '^testpool' | $WC -l`
390	[ "$pool_count" -ne 0 ]
391}
392
393function default_cleanup_noexit
394{
395	typeset exclude=""
396	typeset pool=""
397	#
398	# Destroying the pool will also destroy any
399	# filesystems it contains.
400	#
401	if is_global_zone; then
402		# Here, we loop through the pools we're allowed to
403		# destroy, only destroying them if it's safe to do
404		# so.
405		for pool in $(all_pools); do
406			if safe_to_destroy_pool $pool; then
407				destroy_pool $pool
408			fi
409		done
410	else
411		typeset fs=""
412		for fs in $($ZFS list -H -o name \
413		    | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
414			datasetexists $fs && \
415				log_must $ZFS destroy -Rf $fs
416		done
417
418		# Need cleanup here to avoid garbage dir left.
419		for fs in $($ZFS list -H -o name \
420		    ); do
421			[[ $fs == /$ZONE_POOL ]] && continue
422			[[ -d $fs ]] && log_must $RM -rf $fs/*
423		done
424
425		#
426		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
427		# the default value
428		#
429		for fs in $($ZFS list -H -o name \
430		    ); do
431			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
432				log_must $ZFS set reservation=none $fs
433				log_must $ZFS set recordsize=128K $fs
434				log_must $ZFS set mountpoint=/$fs $fs
435				typeset enc=""
436				enc=$(get_prop encryption $fs)
437				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
438					[[ "$enc" == "off" ]]; then
439					log_must $ZFS set checksum=on $fs
440				fi
441				log_must $ZFS set compression=off $fs
442				log_must $ZFS set atime=on $fs
443				log_must $ZFS set devices=off $fs
444				log_must $ZFS set exec=on $fs
445				log_must $ZFS set setuid=on $fs
446				log_must $ZFS set readonly=off $fs
447				log_must $ZFS set snapdir=hidden $fs
448				log_must $ZFS set aclmode=groupmask $fs
449				log_must $ZFS set aclinherit=secure $fs
450			fi
451		done
452	fi
453
454	[[ -d $TESTDIR ]] && \
455		log_must $RM -rf $TESTDIR
456}
457
458
459#
460# Common function used to cleanup storage pools, file systems
461# and containers.
462#
463function default_container_cleanup
464{
465	if ! is_global_zone; then
466		reexport_pool
467	fi
468
469	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
470	[[ $? -eq 0 ]] && \
471	    log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
472
473	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
474	    log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
475
476	datasetexists $TESTPOOL/$TESTCTR && \
477	    log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
478
479	[[ -e $TESTDIR1 ]] && \
480	    log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
481
482	default_cleanup
483}
484
485#
486# Common function used to cleanup snapshot of file system or volume. Default to
487# delete the file system's snapshot
488#
489# $1 snapshot name
490#
491function destroy_snapshot
492{
493	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
494
495	if ! snapexists $snap; then
496		log_fail "'$snap' does not existed."
497	fi
498
499	#
500	# For the sake of the value which come from 'get_prop' is not equal
501	# to the really mountpoint when the snapshot is unmounted. So, firstly
502	# check and make sure this snapshot's been mounted in current system.
503	#
504	typeset mtpt=""
505	if ismounted $snap; then
506		mtpt=$(get_prop mountpoint $snap)
507		(( $? != 0 )) && \
508			log_fail "get_prop mountpoint $snap failed."
509	fi
510
511	log_must $ZFS destroy $snap
512	[[ $mtpt != "" && -d $mtpt ]] && \
513		log_must $RM -rf $mtpt
514}
515
516#
517# Common function used to cleanup clone.
518#
519# $1 clone name
520#
521function destroy_clone
522{
523	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
524
525	if ! datasetexists $clone; then
526		log_fail "'$clone' does not existed."
527	fi
528
529	# With the same reason in destroy_snapshot
530	typeset mtpt=""
531	if ismounted $clone; then
532		mtpt=$(get_prop mountpoint $clone)
533		(( $? != 0 )) && \
534			log_fail "get_prop mountpoint $clone failed."
535	fi
536
537	log_must $ZFS destroy $clone
538	[[ $mtpt != "" && -d $mtpt ]] && \
539		log_must $RM -rf $mtpt
540}
541
542# Return 0 if a snapshot exists; $? otherwise
543#
544# $1 - snapshot name
545
546function snapexists
547{
548	$ZFS list -H -t snapshot "$1" > /dev/null 2>&1
549	return $?
550}
551
552#
553# Set a property to a certain value on a dataset.
554# Sets a property of the dataset to the value as passed in.
555# @param:
556#	$1 dataset who's property is being set
557# 	$2 property to set
558#	$3 value to set property to
559# @return:
560#	0 if the property could be set.
561#	non-zero otherwise.
562# @use: ZFS
563#
564function dataset_setprop
565{
566	typeset fn=dataset_setprop
567
568	if (( $# < 3 )); then
569		log_note "$fn: Insufficient parameters (need 3, had $#)"
570		return 1
571	fi
572	typeset output=
573	output=$($ZFS set $2=$3 $1 2>&1)
574	typeset rv=$?
575	if (( rv != 0 )); then
576		log_note "Setting property on $1 failed."
577		log_note "property $2=$3"
578		log_note "Return Code: $rv"
579		log_note "Output: $output"
580		return $rv
581	fi
582	return 0
583}
584
585#
586# Assign suite defined dataset properties.
587# This function is used to apply the suite's defined default set of
588# properties to a dataset.
589# @parameters: $1 dataset to use
590# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
591# @returns:
592#   0 if the dataset has been altered.
593#   1 if no pool name was passed in.
594#   2 if the dataset could not be found.
595#   3 if the dataset could not have it's properties set.
596#
597function dataset_set_defaultproperties
598{
599	typeset dataset="$1"
600
601	[[ -z $dataset ]] && return 1
602
603	typeset confset=
604	typeset -i found=0
605	for confset in $($ZFS list); do
606		if [[ $dataset = $confset ]]; then
607			found=1
608			break
609		fi
610	done
611	[[ $found -eq 0 ]] && return 2
612	if [[ -n $COMPRESSION_PROP ]]; then
613		dataset_setprop $dataset compression $COMPRESSION_PROP || \
614			return 3
615		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
616	fi
617	if [[ -n $CHECKSUM_PROP && $WRAPPER != *"crypto"* ]]; then
618		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
619			return 3
620		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
621	fi
622	return 0
623}
624
625#
626# Check a numeric assertion
627# @parameter: $@ the assertion to check
628# @output: big loud notice if assertion failed
629# @use: log_fail
630#
631function assert
632{
633	(( $@ )) || log_fail $@
634}
635
636function wipe_partition_table #<whole_disk_name> [<whole_disk_name> ...]
637{
638	while [[ -n $* ]]; do
639		typeset diskname=$1
640		[ ! -e $diskname ] && log_fail "ERROR: $diskname doesn't exist"
641		if gpart list ${diskname#/dev/} >/dev/null 2>&1; then
642			wait_for 5 1 $GPART destroy -F $diskname
643		else
644			log_note "No GPT partitions detected on $diskname"
645		fi
646		log_must $GPART create -s gpt $diskname
647		shift
648	done
649}
650
651#
652# Given a slice, size and disk, this function
653# formats the slice to the specified size.
654# Size should be specified with units as per
655# the `format` command requirements eg. 100mb 3gb
656#
657function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
658{
659	typeset -i slicenum=$1
660	typeset start=$2
661	typeset size=$3
662	typeset disk=$4
663	set -A devmap a b c d e f g h
664	[[ -z $slicenum || -z $size || -z $disk ]] && \
665		log_fail "The slice, size or disk name is unspecified."
666
667	size=`$ECHO $size| sed s/mb/M/`
668	size=`$ECHO $size| sed s/m/M/`
669	size=`$ECHO $size| sed s/gb/G/`
670	size=`$ECHO $size| sed s/g/G/`
671	[[ -n $start ]] && start="-b $start"
672	log_must $GPART add -t efi $start -s $size -i $slicenum $disk
673	return 0
674}
675
676function get_disk_size #<disk>
677{
678	typeset disk=$1
679	diskinfo $disk | awk '{print $3}'
680}
681
682function get_available_disk_size #<disk>
683{
684	typeset disk=$1
685	raw_size=`get_disk_size $disk`
686	(( available_size = raw_size * 95 / 100 ))
687	echo $available_size
688}
689
690#
691# Get the end cyl of the given slice
692# #TODO: fix this to be GPT-compatible if we want to use the SMI WRAPPER.  This
693# function is not necessary on FreeBSD
694#
695function get_endslice #<disk> <slice>
696{
697	log_fail "get_endslice has not been updated for GPT partitions"
698}
699
700#
701# Get the first LBA that is beyond the end of the given partition
702function get_partition_end #<disk> <partition_index>
703{
704	typeset disk=$1
705	typeset partition_index=$2
706	export partition_index
707	$GPART show $disk | $AWK \
708		'/^[ \t]/ && $3 ~ ENVIRON["partition_index"] {print $1 + $2}'
709}
710
711
712#
713# Given a size,disk and total number of partitions,  this function formats the
714# disk partitions from 0 to the total partition number with the same specified
715# size.
716#
717function partition_disk	#<part_size> <whole_disk_name>	<total_parts>
718{
719	typeset -i i=1
720	typeset part_size=$1
721	typeset disk_name=$2
722	typeset total_parts=$3
723	typeset cyl
724
725	wipe_partition_table $disk_name
726	while (( i <= $total_parts )); do
727		set_partition $i "" $part_size $disk_name
728		(( i = i+1 ))
729	done
730}
731
732function size_of_file # fname
733{
734	typeset fname=$1
735	sz=`stat -f '%z' $fname`
736	[[ -z "$sz" ]] && log_fail "stat($fname) failed"
737	$ECHO $sz
738	return 0
739}
740
741#
742# This function continues to write to a filenum number of files into dirnum
743# number of directories until either $FILE_WRITE returns an error or the
744# maximum number of files per directory have been written.
745#
746# Usage:
747# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
748#
749# Return value: 0 on success
750#		non 0 on error
751#
752# Where :
753#	destdir:    is the directory where everything is to be created under
754#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
755#	filenum:    the maximum number of files per subdirectory
756#	blocksz:    number of bytes per block
757#	num_writes: number of blocks to write
758#	data:	    the data that will be written
759#
760#	E.g.
761#	file_fs /testdir 20 25 1024 256 0
762#
763# Note: blocksz * num_writes equals the size of the testfile
764#
765function fill_fs # destdir dirnum filenum blocksz num_writes data
766{
767	typeset destdir=${1:-$TESTDIR}
768	typeset -i dirnum=${2:-50}
769	typeset -i filenum=${3:-50}
770	typeset -i blocksz=${4:-8192}
771	typeset -i num_writes=${5:-10240}
772	typeset -i data=${6:-0}
773
774	typeset -i retval=0
775	typeset -i dn=0 # current dir number
776	typeset -i fn=0 # current file number
777	while (( retval == 0 )); do
778		(( dirnum >= 0 && dn >= dirnum )) && break
779		typeset curdir=$destdir/$dn
780		log_must $MKDIR -p $curdir
781		for (( fn = 0; $fn < $filenum && $retval == 0; fn++ )); do
782			log_cmd $FILE_WRITE -o create -f $curdir/$TESTFILE.$fn \
783			    -b $blocksz -c $num_writes -d $data
784			retval=$?
785		done
786		(( dn = dn + 1 ))
787	done
788	return $retval
789}
790
791#
792# Simple function to get the specified property. If unable to
793# get the property then exits.
794#
795# Note property is in 'parsable' format (-p)
796#
797function get_prop # property dataset
798{
799	typeset prop_val
800	typeset prop=$1
801	typeset dataset=$2
802
803	prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
804	if [[ $? -ne 0 ]]; then
805		log_note "Unable to get $prop property for dataset $dataset"
806		return 1
807	fi
808
809	$ECHO $prop_val
810	return 0
811}
812
813#
814# Simple function to return the lesser of two values.
815#
816function min
817{
818	typeset first_arg=$1
819	typeset second_arg=$2
820
821	if (( first_arg < second_arg )); then
822		$ECHO $first_arg
823	else
824		$ECHO $second_arg
825	fi
826	return 0
827}
828
829#
830# Simple function to get the specified property of pool. If unable to
831# get the property then exits.
832#
833function get_pool_prop # property pool
834{
835	typeset prop_val
836	typeset prop=$1
837	typeset pool=$2
838
839	if poolexists $pool ; then
840		prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
841			$AWK '{print $3}')
842		if [[ $? -ne 0 ]]; then
843			log_note "Unable to get $prop property for pool " \
844			"$pool"
845			return 1
846		fi
847	else
848		log_note "Pool $pool not exists."
849		return 1
850	fi
851
852	$ECHO $prop_val
853	return 0
854}
855
856# Return 0 if a pool exists; $? otherwise
857#
858# $1 - pool name
859
860function poolexists
861{
862	typeset pool=$1
863
864	if [[ -z $pool ]]; then
865		log_note "No pool name given."
866		return 1
867	fi
868
869	$ZPOOL list -H "$pool" > /dev/null 2>&1
870	return $?
871}
872
873# Return 0 if all the specified datasets exist; $? otherwise
874#
875# $1-n  dataset name
876function datasetexists
877{
878	if (( $# == 0 )); then
879		log_note "No dataset name given."
880		return 1
881	fi
882
883	while (( $# > 0 )); do
884		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 || \
885			return $?
886		shift
887	done
888
889	return 0
890}
891
892# return 0 if none of the specified datasets exists, otherwise return 1.
893#
894# $1-n  dataset name
895function datasetnonexists
896{
897	if (( $# == 0 )); then
898		log_note "No dataset name given."
899		return 1
900	fi
901
902	while (( $# > 0 )); do
903		$ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 && \
904			return 1
905		shift
906	done
907
908	return 0
909}
910
911#
912# Given a mountpoint, or a dataset name, determine if it is shared.
913#
914# Returns 0 if shared, 1 otherwise.
915#
916function is_shared
917{
918	typeset fs=$1
919	typeset mtpt
920
921	if [[ $fs != "/"* ]] ; then
922		if datasetnonexists "$fs" ; then
923			return 1
924		else
925			mtpt=$(get_prop mountpoint "$fs")
926			case $mtpt in
927				none|legacy|-) return 1
928					;;
929				*)	fs=$mtpt
930					;;
931			esac
932		fi
933	fi
934
935	for mtpt in `$SHARE | $AWK '{print $2}'` ; do
936		if [[ $mtpt == $fs ]] ; then
937			return 0
938		fi
939	done
940
941	typeset stat=$($SVCS -H -o STA nfs/server:default)
942	if [[ $stat != "ON" ]]; then
943		log_note "Current nfs/server status: $stat"
944	fi
945
946	return 1
947}
948
949#
950# Given a mountpoint, determine if it is not shared.
951#
952# Returns 0 if not shared, 1 otherwise.
953#
954function not_shared
955{
956	typeset fs=$1
957
958	is_shared $fs
959	if (( $? == 0)); then
960		return 1
961	fi
962
963	return 0
964}
965
966#
967# Helper function to unshare a mountpoint.
968#
969function unshare_fs #fs
970{
971	typeset fs=$1
972
973	is_shared $fs
974	if (( $? == 0 )); then
975		log_must $ZFS unshare $fs
976	fi
977
978	return 0
979}
980
981#
982# Check NFS server status and trigger it online.
983#
984function setup_nfs_server
985{
986	# Cannot share directory in non-global zone.
987	#
988	if ! is_global_zone; then
989		log_note "Cannot trigger NFS server by sharing in LZ."
990		return
991	fi
992
993	typeset nfs_fmri="svc:/network/nfs/server:default"
994	if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
995		#
996		# Only really sharing operation can enable NFS server
997		# to online permanently.
998		#
999		typeset dummy=$TMPDIR/dummy
1000
1001		if [[ -d $dummy ]]; then
1002			log_must $RM -rf $dummy
1003		fi
1004
1005		log_must $MKDIR $dummy
1006		log_must $SHARE $dummy
1007
1008		#
1009		# Waiting for fmri's status to be the final status.
1010		# Otherwise, in transition, an asterisk (*) is appended for
1011		# instances, unshare will reverse status to 'DIS' again.
1012		#
1013		# Waiting for 1's at least.
1014		#
1015		log_must $SLEEP 1
1016		timeout=10
1017		while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1018		do
1019			log_must $SLEEP 1
1020
1021			(( timeout -= 1 ))
1022		done
1023
1024		log_must $UNSHARE $dummy
1025		log_must $RM -rf $dummy
1026	fi
1027
1028	log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1029}
1030
1031#
1032# To verify whether calling process is in global zone
1033#
1034# Return 0 if in global zone, 1 in non-global zone
1035#
1036function is_global_zone
1037{
1038	typeset cur_zone=$($ZONENAME 2>/dev/null)
1039
1040	# Zones are not supported on FreeBSD.
1041	if [[ $os_name == "FreeBSD" ]]; then
1042		return 0
1043	fi
1044
1045	if [[ $cur_zone != "global" ]]; then
1046		return 1
1047	fi
1048	return 0
1049}
1050
1051#
1052# Verify whether test is permit to run from
1053# global zone, local zone, or both
1054#
1055# $1 zone limit, could be "global", "local", or "both"(no limit)
1056#
1057# Return 0 if permit, otherwise exit with log_unsupported
1058#
1059function verify_runnable # zone limit
1060{
1061	typeset limit=$1
1062
1063	[[ -z $limit ]] && return 0
1064
1065	if is_global_zone ; then
1066		case $limit in
1067			global|both)
1068				break
1069				;;
1070			local)  log_unsupported "Test is unable to run from \
1071					global zone."
1072				break
1073				;;
1074			*)      log_note "Warning: unknown limit $limit - use both."
1075				;;
1076		esac
1077	else
1078		case $limit in
1079			local|both)
1080				break
1081				;;
1082			global) log_unsupported "Test is unable to run from \
1083					local zone."
1084				break
1085				;;
1086			*)      log_note "Warning: unknown limit $limit - use both."
1087				;;
1088		esac
1089
1090		reexport_pool
1091	fi
1092
1093	return 0
1094}
1095
1096# Return 0 if create successfully or the pool exists; $? otherwise
1097# Note: In local zones, this function should return 0 silently.
1098#
1099# $1 - pool name
1100# $2-n - [keyword] devs_list
1101
1102function create_pool #pool devs_list
1103{
1104	typeset pool=${1%%/*}
1105
1106	shift
1107
1108	if [[ -z $pool ]]; then
1109		log_note "Missing pool name."
1110		return 1
1111	fi
1112
1113	if poolexists $pool ; then
1114		destroy_pool $pool
1115	fi
1116
1117	if is_global_zone ; then
1118		[[ -d /$pool ]] && $RM -rf /$pool
1119		log_must $ZPOOL create -f $pool $@
1120	fi
1121
1122	return 0
1123}
1124
1125# Return 0 if destroy successfully or the pool exists; $? otherwise
1126# Note: In local zones, this function should return 0 silently.
1127#
1128# $1 - pool name
1129# Destroy pool with the given parameters.
1130
1131function destroy_pool #pool
1132{
1133	typeset pool=${1%%/*}
1134	typeset mtpt
1135
1136	if [[ -z $pool ]]; then
1137		log_note "No pool name given."
1138		return 1
1139	fi
1140
1141	if is_global_zone ; then
1142		if poolexists "$pool" ; then
1143			mtpt=$(get_prop mountpoint "$pool")
1144			log_must $ZPOOL destroy -f $pool
1145
1146			[[ -d $mtpt ]] && \
1147				log_must $RM -rf $mtpt
1148		else
1149			log_note "Pool $pool does not exist, skipping destroy."
1150			return 1
1151		fi
1152	fi
1153
1154	return 0
1155}
1156
1157#
1158# Create file vdevs.
1159# By default this generates sparse vdevs 10GB in size, for performance.
1160#
1161function create_vdevs # vdevs
1162{
1163	typeset vdsize=10G
1164
1165	[ -n "$VDEV_SIZE" ] && vdsize=$VDEV_SIZE
1166	rm -f $@ || return 1
1167	truncate -s $vdsize $@
1168}
1169
1170#
1171# Firstly, create a pool with 5 datasets. Then, create a single zone and
1172# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1173# and a zvol device to the zone.
1174#
1175# $1 zone name
1176# $2 zone root directory prefix
1177# $3 zone ip
1178#
1179function zfs_zones_setup #zone_name zone_root zone_ip
1180{
1181	typeset zone_name=${1:-$(hostname)-z}
1182	typeset zone_root=${2:-"/zone_root"}
1183	typeset zone_ip=${3:-"10.1.1.10"}
1184	typeset prefix_ctr=$ZONE_CTR
1185	typeset pool_name=$ZONE_POOL
1186	typeset -i cntctr=5
1187	typeset -i i=0
1188
1189	# Create pool and 5 container within it
1190	#
1191	[[ -d /$pool_name ]] && $RM -rf /$pool_name
1192	log_must $ZPOOL create -f $pool_name $DISKS
1193	while (( i < cntctr )); do
1194		log_must $ZFS create $pool_name/$prefix_ctr$i
1195		(( i += 1 ))
1196	done
1197
1198	# create a zvol
1199	log_must $ZFS create -V 1g $pool_name/zone_zvol
1200
1201	#
1202	# If current system support slog, add slog device for pool
1203	#
1204	typeset sdevs="$TMPDIR/sdev1 $TMPDIR/sdev2"
1205	log_must create_vdevs $sdevs
1206	log_must $ZPOOL add $pool_name log mirror $sdevs
1207
1208	# this isn't supported just yet.
1209	# Create a filesystem. In order to add this to
1210	# the zone, it must have it's mountpoint set to 'legacy'
1211	# log_must $ZFS create $pool_name/zfs_filesystem
1212	# log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1213
1214	[[ -d $zone_root ]] && \
1215		log_must $RM -rf $zone_root/$zone_name
1216	[[ ! -d $zone_root ]] && \
1217		log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1218
1219	# Create zone configure file and configure the zone
1220	#
1221	typeset zone_conf=$TMPDIR/zone_conf.${TESTCASE_ID}
1222	$ECHO "create" > $zone_conf
1223	$ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1224	$ECHO "set autoboot=true" >> $zone_conf
1225	i=0
1226	while (( i < cntctr )); do
1227		$ECHO "add dataset" >> $zone_conf
1228		$ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1229			$zone_conf
1230		$ECHO "end" >> $zone_conf
1231		(( i += 1 ))
1232	done
1233
1234	# add our zvol to the zone
1235	$ECHO "add device" >> $zone_conf
1236	$ECHO "set match=/dev/zvol/$pool_name/zone_zvol" >> $zone_conf
1237	$ECHO "end" >> $zone_conf
1238
1239	# add a corresponding zvol to the zone
1240	$ECHO "add device" >> $zone_conf
1241	$ECHO "set match=/dev/zvol/$pool_name/zone_zvol" >> $zone_conf
1242	$ECHO "end" >> $zone_conf
1243
1244	# once it's supported, we'll add our filesystem to the zone
1245	# $ECHO "add fs" >> $zone_conf
1246	# $ECHO "set type=zfs" >> $zone_conf
1247	# $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1248	# $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1249	# $ECHO "end" >> $zone_conf
1250
1251	$ECHO "verify" >> $zone_conf
1252	$ECHO "commit" >> $zone_conf
1253	log_must $ZONECFG -z $zone_name -f $zone_conf
1254	log_must $RM -f $zone_conf
1255
1256	# Install the zone
1257	$ZONEADM -z $zone_name install
1258	if (( $? == 0 )); then
1259		log_note "SUCCESS: $ZONEADM -z $zone_name install"
1260	else
1261		log_fail "FAIL: $ZONEADM -z $zone_name install"
1262	fi
1263
1264	# Install sysidcfg file
1265	#
1266	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1267  	$ECHO "system_locale=C" > $sysidcfg
1268  	$ECHO  "terminal=dtterm" >> $sysidcfg
1269  	$ECHO  "network_interface=primary {" >> $sysidcfg
1270  	$ECHO  "hostname=$zone_name" >> $sysidcfg
1271  	$ECHO  "}" >> $sysidcfg
1272  	$ECHO  "name_service=NONE" >> $sysidcfg
1273  	$ECHO  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1274  	$ECHO  "security_policy=NONE" >> $sysidcfg
1275  	$ECHO  "timezone=US/Eastern" >> $sysidcfg
1276
1277	# Boot this zone
1278	log_must $ZONEADM -z $zone_name boot
1279}
1280
1281#
1282# Reexport TESTPOOL & TESTPOOL(1-4)
1283#
1284function reexport_pool
1285{
1286	typeset -i cntctr=5
1287	typeset -i i=0
1288
1289	while (( i < cntctr )); do
1290		if (( i == 0 )); then
1291			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1292			if ! ismounted $TESTPOOL; then
1293				log_must $ZFS mount $TESTPOOL
1294			fi
1295		else
1296			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1297			if eval ! ismounted \$TESTPOOL$i; then
1298				log_must eval $ZFS mount \$TESTPOOL$i
1299			fi
1300		fi
1301		(( i += 1 ))
1302	done
1303}
1304
1305#
1306# Wait for something to return true, checked by the caller.
1307#
1308function wait_for_checked # timeout dt <method> [args...]
1309{
1310	typeset timeout=$1
1311	typeset dt=$2
1312	shift; shift
1313	typeset -i start=$(date '+%s')
1314	typeset -i endtime
1315
1316	log_note "Waiting $timeout seconds (checked every $dt seconds) for: $*"
1317	((endtime = start + timeout))
1318	while :; do
1319		$*
1320		[ $? -eq 0 ] && return
1321		curtime=$(date '+%s')
1322		[ $curtime -gt $endtime ] && return 1
1323		sleep $dt
1324	done
1325	return 0
1326}
1327
1328#
1329# Wait for something to return true.
1330#
1331function wait_for # timeout dt <method> [args...]
1332{
1333	typeset timeout=$1
1334	typeset dt=$2
1335	shift; shift
1336
1337	wait_for_checked $timeout $dt $* || \
1338		log_fail "ERROR: Timed out waiting for: $*"
1339}
1340
1341#
1342# Verify a given disk is online or offline
1343#
1344# Return 0 is pool/disk matches expected state, 1 otherwise
1345# stateexpr is a regex like ONLINE or REMOVED|UNAVAIL
1346#
1347function check_state # pool disk stateexpr
1348{
1349	typeset pool=$1
1350	typeset disk=${2#/dev/}
1351	disk=${disk#/dev/}
1352	disk=${disk#/dev/}
1353	typeset stateexpr=$3
1354
1355	$ZPOOL status -v $pool | grep "$disk"  \
1356	    | egrep -i "$stateexpr" > /dev/null 2>&1
1357
1358	return $?
1359}
1360
1361#
1362# Wait for a given disk to leave a state
1363#
1364function wait_for_state_exit
1365{
1366	typeset pool=$1
1367	typeset disk=$2
1368	typeset state=$3
1369
1370	while check_state "$pool" "$disk" "$state"; do
1371		$SLEEP 1
1372	done
1373}
1374
1375#
1376# Wait for a given disk to enter a state
1377#
1378function wait_for_state_enter
1379{
1380	typeset -i timeout=$1
1381	typeset pool=$2
1382	typeset disk=$3
1383	typeset state=$4
1384
1385	log_note "Waiting up to $timeout seconds for $disk to become $state ..."
1386	for ((; $timeout > 0; timeout=$timeout-1)); do
1387		check_state $pool "$disk" "$state"
1388		[ $? -eq 0 ] && return
1389		$SLEEP 1
1390	done
1391	log_must $ZPOOL status $pool
1392	log_fail "ERROR: Disk $disk not marked as $state in $pool"
1393}
1394
1395#
1396# Get the mountpoint of snapshot
1397# as its mountpoint
1398#
1399function snapshot_mountpoint
1400{
1401	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1402
1403	if [[ $dataset != *@* ]]; then
1404		log_fail "Error name of snapshot '$dataset'."
1405	fi
1406
1407	typeset fs=${dataset%@*}
1408	typeset snap=${dataset#*@}
1409
1410	if [[ -z $fs || -z $snap ]]; then
1411		log_fail "Error name of snapshot '$dataset'."
1412	fi
1413
1414	$ECHO $(get_prop mountpoint $fs)/$(get_snapdir_name)/$snap
1415}
1416
1417function pool_maps_intact # pool
1418{
1419	typeset pool="$1"
1420
1421	if ! $ZDB -bcv $pool; then
1422		return 1
1423	fi
1424	return 0
1425}
1426
1427function filesys_has_zil # filesystem
1428{
1429	typeset filesys="$1"
1430
1431	if ! $ZDB -ivv $filesys | $GREP "ZIL header"; then
1432		return 1
1433	fi
1434	return 0
1435}
1436
1437#
1438# Given a pool and file system, this function will verify the file system
1439# using the zdb internal tool. Note that the pool is exported and imported
1440# to ensure it has consistent state.
1441#
1442function verify_filesys # pool filesystem dir
1443{
1444	typeset pool="$1"
1445	typeset filesys="$2"
1446	typeset zdbout="$TMPDIR/zdbout.${TESTCASE_ID}"
1447
1448	shift
1449	shift
1450	typeset dirs=$@
1451	typeset search_path=""
1452
1453	log_note "Calling $ZDB to verify filesystem '$filesys'"
1454	log_must $ZPOOL export $pool
1455
1456	if [[ -n $dirs ]] ; then
1457		for dir in $dirs ; do
1458			search_path="$search_path -d $dir"
1459		done
1460	fi
1461
1462	log_must $ZPOOL import $search_path $pool
1463
1464	$ZDB -cudi $filesys > $zdbout 2>&1
1465	if [[ $? != 0 ]]; then
1466		log_note "Output: $ZDB -cudi $filesys"
1467		$CAT $zdbout
1468		log_fail "$ZDB detected errors with: '$filesys'"
1469	fi
1470
1471	log_must $RM -rf $zdbout
1472}
1473
1474#
1475# Given a pool, and this function list all disks in the pool
1476#
1477function get_disklist # pool
1478{
1479	typeset disklist=""
1480
1481	disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4 ) {print $1}' | \
1482 		$GREP -v "\-\-\-\-\-" | \
1483		$EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$" )
1484
1485	$ECHO $disklist
1486}
1487
1488#
1489# Destroy all existing metadevices and state database
1490#
1491function destroy_metas
1492{
1493	typeset metad
1494
1495	for metad in $($METASTAT -p | $AWK '{print $1}'); do
1496		log_must $METACLEAR -rf $metad
1497	done
1498
1499	for metad in $($METADB | $CUT -f6 | $GREP dev | $UNIQ); do
1500		log_must $METADB -fd $metad
1501	done
1502}
1503
1504# /**
1505#  This function kills a given list of processes after a time period. We use
1506#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1507#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1508#  would be listed as FAIL, which we don't want : we're happy with stress tests
1509#  running for a certain amount of time, then finishing.
1510#
1511# @param $1 the time in seconds after which we should terminate these processes
1512# @param $2..$n the processes we wish to terminate.
1513# */
1514function stress_timeout
1515{
1516	typeset -i TIMEOUT=$1
1517	shift
1518	typeset cpids="$@"
1519
1520	log_note "Waiting for child processes($cpids). " \
1521		"It could last dozens of minutes, please be patient ..."
1522	log_must $SLEEP $TIMEOUT
1523
1524	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1525	typeset pid
1526	for pid in $cpids; do
1527		$PS -p $pid > /dev/null 2>&1
1528		if (( $? == 0 )); then
1529			log_must $KILL -USR1 $pid
1530		fi
1531	done
1532}
1533
1534#
1535# Check whether current OS support a specified feature or not
1536#
1537# return 0 if current OS version is in unsupported list, 1 otherwise
1538#
1539# $1 unsupported target OS versions
1540#
1541function check_version # <OS version>
1542{
1543	typeset unsupported_vers="$@"
1544	typeset ver
1545	typeset cur_ver=`$UNAME -r`
1546
1547	for ver in $unsupported_vers; do
1548		[[ "$cur_ver" == "$ver" ]] && return 0
1549	done
1550
1551	return 1
1552}
1553
1554#
1555# Verify a given hotspare disk is inuse or avail
1556#
1557# Return 0 is pool/disk matches expected state, 1 otherwise
1558#
1559function check_hotspare_state # pool disk state{inuse,avail}
1560{
1561	typeset pool=$1
1562	typeset disk=${2#/dev/}
1563	disk=${disk#/dev/}
1564	disk=${disk#/dev/}
1565	typeset state=$3
1566
1567	cur_state=$(get_device_state $pool $disk "spares")
1568
1569	if [[ $state != ${cur_state} ]]; then
1570		return 1
1571	fi
1572	return 0
1573}
1574
1575#
1576# Verify a given slog disk is inuse or avail
1577#
1578# Return 0 is pool/disk matches expected state, 1 otherwise
1579#
1580function check_slog_state # pool disk state{online,offline,unavail}
1581{
1582	typeset pool=$1
1583	typeset disk=${2#/dev/}
1584	disk=${disk#/dev/}
1585	disk=${disk#/dev/}
1586	typeset state=$3
1587
1588	cur_state=$(get_device_state $pool $disk "logs")
1589
1590	if [[ $state != ${cur_state} ]]; then
1591		return 1
1592	fi
1593	return 0
1594}
1595
1596#
1597# Verify a given vdev disk is inuse or avail
1598#
1599# Return 0 is pool/disk matches expected state, 1 otherwise
1600#
1601function check_vdev_state # pool disk state{online,offline,unavail}
1602{
1603	typeset pool=$1
1604	typeset disk=${2#/dev/}
1605	disk=${disk#/dev/}
1606	disk=${disk#/dev/}
1607	typeset state=$3
1608
1609	if [[ $WRAPPER == *"smi"* ]]; then
1610		$ECHO $disk | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
1611		if (( $? == 0 )); then
1612			disk=${disk}s2
1613		fi
1614	fi
1615
1616	cur_state=$(get_device_state $pool $disk)
1617
1618	if [[ $state != ${cur_state} ]]; then
1619		return 1
1620	fi
1621	return 0
1622}
1623
1624#
1625# Check the output of 'zpool status -v <pool>',
1626# and to see if the content of <token> contain the <keyword> specified.
1627#
1628# Return 0 is contain, 1 otherwise
1629#
1630function check_pool_status # pool token keyword
1631{
1632	typeset pool=$1
1633	typeset token=$2
1634	typeset keyword=$3
1635
1636	$ZPOOL status -v "$pool" 2>/dev/null | \
1637		$NAWK -v token="$token:" '($1==token) {print $0}' | \
1638		$GREP -i "$keyword" >/dev/null 2>&1
1639
1640	return $?
1641}
1642
1643function vdev_pool_error_count
1644{
1645	typeset errs=$1
1646	if [ -z "$2" ]; then
1647		test $errs -gt 0; ret=$?
1648	else
1649		test $errs -eq $2; ret=$?
1650	fi
1651	log_debug "vdev_pool_error_count: errs='$errs' \$2='$2' ret='$ret'"
1652	return $ret
1653}
1654
1655#
1656# Generate a pool status error file suitable for pool_errors_from_file.
1657# If the pool is healthy, returns 0.  Otherwise, the caller must handle the
1658# returned temporarily file appropriately.
1659#
1660function pool_error_file # <pool>
1661{
1662	typeset pool="$1"
1663
1664	typeset tmpfile=$TMPDIR/pool_status.${TESTCASE_ID}
1665	$ZPOOL status -x $pool > ${tmpfile}
1666	echo $tmpfile
1667}
1668
1669#
1670# Evaluates <file> counting the number of errors.  If vdev specified, only
1671# that vdev's errors are counted.  Returns the total number.  <file> will be
1672# deleted on exit.
1673#
1674function pool_errors_from_file # <file> [vdev]
1675{
1676	typeset file=$1
1677	shift
1678	typeset checkvdev="$2"
1679
1680	typeset line
1681	typeset -i fetchbegin=1
1682	typeset -i errnum=0
1683	typeset -i c_read=0
1684	typeset -i c_write=0
1685	typeset -i c_cksum=0
1686
1687	cat ${file} | $EGREP -v "pool:" | while read line; do
1688	 	if (( $fetchbegin != 0 )); then
1689                        $ECHO $line | $GREP "NAME" >/dev/null 2>&1
1690                        (( $? == 0 )) && (( fetchbegin = 0 ))
1691                         continue
1692                fi
1693
1694		if [[ -n $checkvdev ]]; then
1695			$ECHO $line | $GREP $checkvdev >/dev/null 2>&1
1696			(( $? != 0 )) && continue
1697			c_read=`$ECHO $line | $AWK '{print $3}'`
1698			c_write=`$ECHO $line | $AWK '{print $4}'`
1699			c_cksum=`$ECHO $line | $AWK '{print $5}'`
1700			if [ $c_read != 0 ] || [ $c_write != 0 ] || \
1701		   	   [ $c_cksum != 0 ]
1702			then
1703				(( errnum = errnum + 1 ))
1704			fi
1705			break
1706		fi
1707
1708		c_read=`$ECHO $line | $AWK '{print $3}'`
1709		c_write=`$ECHO $line | $AWK '{print $4}'`
1710		c_cksum=`$ECHO $line | $AWK '{print $5}'`
1711		if [ $c_read != 0 ] || [ $c_write != 0 ] || \
1712		    [ $c_cksum != 0 ]
1713		then
1714			(( errnum = errnum + 1 ))
1715		fi
1716	done
1717
1718	rm -f $file
1719	echo $errnum
1720}
1721
1722#
1723# Returns whether the vdev has the given number of errors.
1724# If the number is unspecified, any non-zero number returns true.
1725#
1726function vdev_has_errors # pool vdev [errors]
1727{
1728	typeset pool=$1
1729	typeset vdev=$2
1730	typeset tmpfile=$(pool_error_file $pool)
1731	log_note "Original pool status:"
1732	cat $tmpfile
1733
1734	typeset -i errs=$(pool_errors_from_file $tmpfile $vdev)
1735	vdev_pool_error_count $errs $3
1736}
1737
1738#
1739# Returns whether the pool has the given number of errors.
1740# If the number is unspecified, any non-zero number returns true.
1741#
1742function pool_has_errors # pool [errors]
1743{
1744	typeset pool=$1
1745	typeset tmpfile=$(pool_error_file $pool)
1746	log_note "Original pool status:"
1747	cat $tmpfile
1748
1749	typeset -i errs=$(pool_errors_from_file $tmpfile)
1750	vdev_pool_error_count $errs $2
1751}
1752
1753#
1754# Returns whether clearing $pool at $vdev (if given) succeeds.
1755#
1756function pool_clear_succeeds
1757{
1758	typeset pool="$1"
1759	typeset vdev=$2
1760
1761	$ZPOOL clear $pool $vdev
1762	! pool_has_errors $pool
1763}
1764
1765#
1766# Return whether the pool is healthy
1767#
1768function is_pool_healthy # pool
1769{
1770	typeset pool=$1
1771
1772	typeset healthy_output="pool '$pool' is healthy"
1773	typeset real_output=$($ZPOOL status -x $pool)
1774
1775	if [[ "$real_output" == "$healthy_output" ]]; then
1776		return 0
1777	else
1778		typeset -i ret
1779		$ZPOOL status -x $pool | $GREP "state:" | \
1780			$GREP "FAULTED" >/dev/null 2>&1
1781		ret=$?
1782		(( $ret == 0 )) && return 1
1783		typeset l_scan
1784		typeset errnum
1785		l_scan=$($ZPOOL status -x $pool | $GREP "scan:")
1786		l_scan=${l_scan##*"with"}
1787		errnum=$($ECHO $l_scan | $AWK '{print $1}')
1788		if [ "$errnum" != "0" ]; then
1789		 	return 1
1790		else
1791			return 0
1792		fi
1793	fi
1794}
1795
1796#
1797# These 5 following functions are instance of check_pool_status()
1798#	is_pool_resilvering - to check if the pool is resilver in progress
1799#	is_pool_resilvered - to check if the pool is resilver completed
1800#	is_pool_scrubbing - to check if the pool is scrub in progress
1801#	is_pool_scrubbed - to check if the pool is scrub completed
1802#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1803#
1804function is_pool_resilvering #pool
1805{
1806	check_pool_status "$1" "scan" "resilver in progress"
1807	return $?
1808}
1809
1810function is_pool_resilvered #pool
1811{
1812	check_pool_status "$1" "scan" "resilvered"
1813	return $?
1814}
1815
1816function resilver_happened # pool
1817{
1818	typeset pool=$1
1819	is_pool_resilvering "$pool" || is_pool_resilvered "$pool"
1820}
1821
1822function is_pool_scrubbing #pool
1823{
1824	check_pool_status "$1" "scan" "scrub in progress"
1825	return $?
1826}
1827
1828function is_pool_scrubbed #pool
1829{
1830	check_pool_status "$1" "scan" "scrub repaired"
1831	return $?
1832}
1833
1834function is_pool_scrub_stopped #pool
1835{
1836	check_pool_status "$1" "scan" "scrub canceled"
1837	return $?
1838}
1839
1840function is_pool_state # pool state
1841{
1842	check_pool_status "$1" "state" "$2"
1843	return $?
1844}
1845
1846#
1847# Erase the partition tables and destroy any zfs labels
1848#
1849function cleanup_devices #vdevs
1850{
1851	for device in $@; do
1852		# Labelclear must happen first, otherwise it may interfere
1853		# with the teardown/setup of GPT labels.
1854		$ZPOOL labelclear -f $device
1855		# Only wipe partition tables for arguments that are disks,
1856		# as opposed to slices (which are valid arguments here).
1857		if geom disk list | grep -qx "Geom name: ${device#/dev/}"; then
1858			wipe_partition_table $device
1859		fi
1860	done
1861	return 0
1862}
1863
1864#
1865# Verify the rsh connectivity to each remote host in RHOSTS.
1866#
1867# Return 0 if remote host is accessible; otherwise 1.
1868# $1 remote host name
1869# $2 username
1870#
1871function verify_rsh_connect #rhost, username
1872{
1873	typeset rhost=$1
1874	typeset username=$2
1875	typeset rsh_cmd="$RSH -n"
1876	typeset cur_user=
1877
1878	$GETENT hosts $rhost >/dev/null 2>&1
1879	if (( $? != 0 )); then
1880		log_note "$rhost cannot be found from" \
1881			"administrative database."
1882		return 1
1883	fi
1884
1885	$PING $rhost 3 >/dev/null 2>&1
1886	if (( $? != 0 )); then
1887		log_note "$rhost is not reachable."
1888		return 1
1889	fi
1890
1891	if (( ${#username} != 0 )); then
1892		rsh_cmd="$rsh_cmd -l $username"
1893		cur_user="given user \"$username\""
1894	else
1895		cur_user="current user \"`$LOGNAME`\""
1896	fi
1897
1898	 if ! $rsh_cmd $rhost $TRUE; then
1899		log_note "$RSH to $rhost is not accessible" \
1900			"with $cur_user."
1901		return 1
1902	fi
1903
1904	return 0
1905}
1906
1907#
1908# Verify the remote host connection via rsh after rebooting
1909# $1 remote host
1910#
1911function verify_remote
1912{
1913	rhost=$1
1914
1915	#
1916	# The following loop waits for the remote system rebooting.
1917	# Each iteration will wait for 150 seconds. there are
1918	# total 5 iterations, so the total timeout value will
1919	# be 12.5  minutes for the system rebooting. This number
1920	# is an approxiate number.
1921	#
1922	typeset -i count=0
1923	while ! verify_rsh_connect $rhost; do
1924		sleep 150
1925		(( count = count + 1 ))
1926		if (( count > 5 )); then
1927			return 1
1928		fi
1929	done
1930	return 0
1931}
1932
1933#
1934# Replacement function for /usr/bin/rsh. This function will include
1935# the /usr/bin/rsh and meanwhile return the execution status of the
1936# last command.
1937#
1938# $1 usrname passing down to -l option of /usr/bin/rsh
1939# $2 remote machine hostname
1940# $3... command string
1941#
1942
1943function rsh_status
1944{
1945	typeset ruser=$1
1946	typeset rhost=$2
1947	typeset -i ret=0
1948	typeset cmd_str=""
1949	typeset rsh_str=""
1950
1951	shift; shift
1952	cmd_str="$@"
1953
1954	err_file=$TMPDIR/${rhost}.${TESTCASE_ID}.err
1955	if (( ${#ruser} == 0 )); then
1956		rsh_str="$RSH -n"
1957	else
1958		rsh_str="$RSH -n -l $ruser"
1959	fi
1960
1961	$rsh_str $rhost /usr/local/bin/ksh93 -c "'$cmd_str; \
1962		print -u 2 \"status=\$?\"'" \
1963		>/dev/null 2>$err_file
1964	ret=$?
1965	if (( $ret != 0 )); then
1966		$CAT $err_file
1967		$RM -f $std_file $err_file
1968		log_fail  "$RSH itself failed with exit code $ret..."
1969	fi
1970
1971	 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1972		$CUT -d= -f2)
1973	(( $ret != 0 )) && $CAT $err_file >&2
1974
1975	$RM -f $err_file >/dev/null 2>&1
1976	return $ret
1977}
1978
1979#
1980# Get the SUNWstc-fs-zfs package installation path in a remote host
1981# $1 remote host name
1982#
1983function get_remote_pkgpath
1984{
1985	typeset rhost=$1
1986	typeset pkgpath=""
1987
1988	pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1989			$CUT -d: -f2")
1990
1991	$ECHO $pkgpath
1992}
1993
1994#/**
1995# A function to find and locate free disks on a system or from given
1996# disks as the parameter.  Since the conversion to ATF, this function is
1997# superfluous; it is assumed that the user will supply an accurate list of
1998# disks to use.  So we just return the arguments.
1999#
2000# $@ given disks to find which are free
2001#
2002# @return a string containing the list of available disks
2003#*/
2004function find_disks
2005{
2006	(( first=0 ))
2007	for disk in $@; do
2008		[[ $first == 1 ]] && echo -n " "
2009		(( first=1 ))
2010		case $disk in
2011		/dev/*)	echo -n "$disk" ;;
2012		*)	echo -n "/dev/$disk" ;;
2013		esac
2014	done
2015}
2016
2017# A function to set convenience variables for disks.
2018function set_disks
2019{
2020	set -A disk_array $(find_disks $DISKS)
2021	[[ -z "$DISK_ARRAY_LIMIT" ]] && typeset -i DISK_ARRAY_LIMIT=5
2022
2023	export DISK=""
2024	typeset -i i=0
2025	while (( i < ${#disk_array[*]} && i <= $DISK_ARRAY_LIMIT )); do
2026		export DISK${i}="${disk_array[$i]}"
2027		DISKSARRAY="$DISKSARRAY ${disk_array[$i]}"
2028		(( i = i + 1 ))
2029	done
2030	export DISK_ARRAY_NUM=$i
2031	export DISKSARRAY
2032	export disk=$DISK0
2033}
2034
2035#
2036# Add specified user to specified group
2037#
2038# $1 group name
2039# $2 user name
2040#
2041function add_user #<group_name> <user_name>
2042{
2043	typeset gname=$1
2044	typeset uname=$2
2045
2046	if (( ${#gname} == 0 || ${#uname} == 0 )); then
2047		log_fail "group name or user name are not defined."
2048	fi
2049
2050	# Check to see if the user exists.
2051	$ID $uname > /dev/null 2>&1 && return 0
2052
2053	# Assign 1000 as the base uid
2054	typeset -i uid=1000
2055	while true; do
2056		typeset -i ret
2057		$USERADD -u $uid -g $gname -d /var/tmp/$uname -m $uname
2058		ret=$?
2059		case $ret in
2060			0) return 0 ;;
2061			# The uid is not unique
2062			65) ((uid += 1)) ;;
2063			*) return 1 ;;
2064		esac
2065		if [[ $uid == 65000 ]]; then
2066			log_fail "No user id available under 65000 for $uname"
2067		fi
2068	done
2069
2070	return 0
2071}
2072
2073#
2074# Delete the specified user.
2075#
2076# $1 login name
2077#
2078function del_user #<logname>
2079{
2080	typeset user=$1
2081
2082	if (( ${#user} == 0 )); then
2083		log_fail "login name is necessary."
2084	fi
2085
2086	if $ID $user > /dev/null 2>&1; then
2087		log_must $USERDEL $user
2088	fi
2089
2090	return 0
2091}
2092
2093#
2094# Select valid gid and create specified group.
2095#
2096# $1 group name
2097#
2098function add_group #<group_name>
2099{
2100	typeset group=$1
2101
2102	if (( ${#group} == 0 )); then
2103		log_fail "group name is necessary."
2104	fi
2105
2106	# See if the group already exists.
2107	$GROUPSHOW $group >/dev/null 2>&1
2108	[[ $? == 0 ]] && return 0
2109
2110	# Assign 100 as the base gid
2111	typeset -i gid=100
2112	while true; do
2113		$GROUPADD -g $gid $group > /dev/null 2>&1
2114		typeset -i ret=$?
2115		case $ret in
2116			0) return 0 ;;
2117			# The gid is not  unique
2118			65) ((gid += 1)) ;;
2119			*) return 1 ;;
2120		esac
2121		if [[ $gid == 65000 ]]; then
2122			log_fail "No user id available under 65000 for $group"
2123		fi
2124	done
2125}
2126
2127#
2128# Delete the specified group.
2129#
2130# $1 group name
2131#
2132function del_group #<group_name>
2133{
2134	typeset grp=$1
2135	if (( ${#grp} == 0 )); then
2136		log_fail "group name is necessary."
2137	fi
2138
2139	$GROUPDEL -n $grp > /dev/null 2>&1
2140	typeset -i ret=$?
2141	case $ret in
2142		# Group does not exist, or was deleted successfully.
2143		0|6|65) return 0 ;;
2144		# Name already exists as a group name
2145		9) log_must $GROUPDEL $grp ;;
2146		*) return 1 ;;
2147	esac
2148
2149	return 0
2150}
2151
2152#
2153# This function will return true if it's safe to destroy the pool passed
2154# as argument 1. It checks for pools based on zvols and files, and also
2155# files contained in a pool that may have a different mountpoint.
2156#
2157function safe_to_destroy_pool { # $1 the pool name
2158
2159	typeset pool=""
2160	typeset DONT_DESTROY=""
2161
2162	# We check that by deleting the $1 pool, we're not
2163	# going to pull the rug out from other pools. Do this
2164	# by looking at all other pools, ensuring that they
2165	# aren't built from files or zvols contained in this pool.
2166
2167	for pool in $($ZPOOL list -H -o name)
2168	do
2169		ALTMOUNTPOOL=""
2170
2171		# this is a list of the top-level directories in each of the files
2172		# that make up the path to the files the pool is based on
2173		FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
2174			$AWK '{print $1}')
2175
2176		# this is a list of the zvols that make up the pool
2177		ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/$1$" | \
2178			$AWK '{print $1}')
2179
2180		# also want to determine if it's a file-based pool using an
2181		# alternate mountpoint...
2182		POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
2183					$GREP / | $AWK '{print $1}' | \
2184					$AWK -F/ '{print $2}' | $GREP -v "dev")
2185
2186		for pooldir in $POOL_FILE_DIRS
2187		do
2188			OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
2189					$GREP "${pooldir}$" | $AWK '{print $1}')
2190
2191			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2192		done
2193
2194
2195		if [ ! -z "$ZVOLPOOL" ]
2196		then
2197			DONT_DESTROY="true"
2198			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2199		fi
2200
2201		if [ ! -z "$FILEPOOL" ]
2202		then
2203			DONT_DESTROY="true"
2204			log_note "Pool $pool is built from $FILEPOOL on $1"
2205		fi
2206
2207		if [ ! -z "$ALTMOUNTPOOL" ]
2208		then
2209			DONT_DESTROY="true"
2210			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2211		fi
2212	done
2213
2214	if [ -z "${DONT_DESTROY}" ]
2215	then
2216		return 0
2217	else
2218		log_note "Warning: it is not safe to destroy $1!"
2219		return 1
2220	fi
2221}
2222
2223#
2224# Get IP address of hostname
2225# $1 hostname
2226#
2227function getipbyhost
2228{
2229	typeset ip
2230	ip=`$ARP $1 2>/dev/null | $AWK -F\) '{print $1}' \
2231		| $AWK -F\( '{print $2}'`
2232	$ECHO $ip
2233}
2234
2235#
2236# Setup iSCSI initiator to target
2237# $1 target hostname
2238#
2239function iscsi_isetup
2240{
2241	# check svc:/network/iscsi_initiator:default state, try to enable it
2242	# if the state is not ON
2243	typeset ISCSII_FMRI="svc:/network/iscsi_initiator:default"
2244	if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
2245		log_must $SVCADM enable $ISCSII_FMRI
2246
2247		typeset -i retry=20
2248		while [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) && \
2249			( $retry -ne 0 ) ]]
2250		do
2251			(( retry = retry - 1 ))
2252			$SLEEP 1
2253		done
2254
2255		if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
2256			log_fail "$ISCSII_FMRI service can not be enabled!"
2257		fi
2258	fi
2259
2260	log_must $ISCSIADM add discovery-address $(getipbyhost $1)
2261	log_must $ISCSIADM modify discovery --sendtargets enable
2262	log_must $DEVFSADM -i iscsi
2263}
2264
2265#
2266# Check whether iscsi parameter is set as remote
2267#
2268# return 0 if iscsi is set as remote, otherwise 1
2269#
2270function check_iscsi_remote
2271{
2272	if [[ $iscsi == "remote" ]] ; then
2273		return 0
2274	else
2275		return 1
2276	fi
2277}
2278
2279#
2280# Check if a volume is a valide iscsi target
2281# $1 volume name
2282# return 0 if suceeds, otherwise, return 1
2283#
2284function is_iscsi_target
2285{
2286	typeset dataset=$1
2287	typeset target targets
2288
2289	[[ -z $dataset ]] && return 1
2290
2291	targets=$($ISCSITADM list target | $GREP "Target:" | $AWK '{print $2}')
2292	[[ -z $targets ]] && return 1
2293
2294	for target in $targets; do
2295		[[ $dataset == $target ]] && return 0
2296	done
2297
2298	return 1
2299}
2300
2301#
2302# Get the iSCSI name of a target
2303# $1 target name
2304#
2305function iscsi_name
2306{
2307	typeset target=$1
2308	typeset name
2309
2310	[[ -z $target ]] && log_fail "No parameter."
2311
2312	if ! is_iscsi_target $target ; then
2313		log_fail "Not a target."
2314	fi
2315
2316	name=$($ISCSITADM list target $target | $GREP "iSCSI Name:" \
2317		| $AWK '{print $2}')
2318
2319	return $name
2320}
2321
2322#
2323# check svc:/system/iscsitgt:default state, try to enable it if the state
2324# is not ON
2325#
2326function iscsitgt_setup
2327{
2328	log_must $RM -f $ISCSITGTFILE
2329	if [[ "ON" == $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2330		log_note "iscsitgt is already enabled"
2331		return
2332	fi
2333
2334    	log_must $SVCADM enable -t $ISCSITGT_FMRI
2335
2336	typeset -i retry=20
2337	while [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) && \
2338		( $retry -ne 0 ) ]]
2339	do
2340		$SLEEP 1
2341		(( retry = retry - 1 ))
2342	done
2343
2344	if [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2345		log_fail "$ISCSITGT_FMRI service can not be enabled!"
2346	fi
2347
2348	log_must $TOUCH $ISCSITGTFILE
2349}
2350
2351#
2352# set DISABLED state of svc:/system/iscsitgt:default
2353# which is the most suiteable state if $ISCSITGTFILE exists
2354#
2355function iscsitgt_cleanup
2356{
2357	if [[ -e $ISCSITGTFILE ]]; then
2358		log_must $SVCADM disable $ISCSITGT_FMRI
2359		log_must $RM -f $ISCSITGTFILE
2360	fi
2361}
2362
2363#
2364# Close iSCSI initiator to target
2365# $1 target hostname
2366#
2367function iscsi_iclose
2368{
2369	log_must $ISCSIADM modify discovery --sendtargets disable
2370	log_must $ISCSIADM remove discovery-address $(getipbyhost $1)
2371	$DEVFSADM -Cv
2372}
2373
2374#
2375# Get the available ZFS compression options
2376# $1 option type zfs_set|zfs_compress
2377#
2378function get_compress_opts
2379{
2380	typeset COMPRESS_OPTS
2381	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2382			gzip-6 gzip-7 gzip-8 gzip-9"
2383
2384	if [[ $1 == "zfs_compress" ]] ; then
2385		COMPRESS_OPTS="on lzjb"
2386	elif [[ $1 == "zfs_set" ]] ; then
2387		COMPRESS_OPTS="on off lzjb"
2388	fi
2389	typeset valid_opts="$COMPRESS_OPTS"
2390	$ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2391	if [[ $? -eq 0 ]]; then
2392		valid_opts="$valid_opts $GZIP_OPTS"
2393	fi
2394	$ECHO "$valid_opts"
2395}
2396
2397#
2398# Check the subcommand/option is supported
2399#
2400function check_opt_support #command, option
2401{
2402	typeset command=$1
2403	typeset option=$2
2404
2405	if [[ -z $command ]]; then
2406		return 0
2407	elif [[ -z $option ]]; then
2408		eval "$ZFS 2>&1 | $GREP '$command' > /dev/null 2>&1"
2409	else
2410		eval "$ZFS $command 2>&1 | $GREP -- '$option' | \
2411			$GREP -v -- 'User-defined' > /dev/null 2>&1"
2412	fi
2413	return $?
2414}
2415
2416#
2417# Check the zpool subcommand/option is supported
2418#
2419function check_zpool_opt_support #command, option
2420{
2421	typeset command=$1
2422	typeset option=$2
2423
2424	if [[ -z $command ]]; then
2425		return 0
2426	elif [[ -z $option ]]; then
2427		eval "$ZPOOL 2>&1 | $GREP '$command' > /dev/null 2>&1"
2428	else
2429		eval "$ZPOOL $command 2>&1 | $GREP -- '$option' > /dev/null 2>&1"
2430	fi
2431	return $?
2432}
2433
2434#
2435# Verify zfs operation with -p option work as expected
2436# $1 operation, value could be create, clone or rename
2437# $2 dataset type, value could be fs or vol
2438# $3 dataset name
2439# $4 new dataset name
2440#
2441function verify_opt_p_ops
2442{
2443	typeset ops=$1
2444	typeset datatype=$2
2445	typeset dataset=$3
2446	typeset newdataset=$4
2447
2448	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2449		log_fail "$datatype is not supported."
2450	fi
2451
2452	# check parameters accordingly
2453	case $ops in
2454		create)
2455			newdataset=$dataset
2456			dataset=""
2457			if [[ $datatype == "vol" ]]; then
2458				ops="create -V $VOLSIZE"
2459			fi
2460			;;
2461		clone)
2462			if [[ -z $newdataset ]]; then
2463				log_fail "newdataset should not be empty" \
2464					"when ops is $ops."
2465			fi
2466			log_must datasetexists $dataset
2467			log_must snapexists $dataset
2468			;;
2469		rename)
2470			if [[ -z $newdataset ]]; then
2471				log_fail "newdataset should not be empty" \
2472					"when ops is $ops."
2473			fi
2474			log_must datasetexists $dataset
2475			log_mustnot snapexists $dataset
2476			;;
2477		*)
2478			log_fail "$ops is not supported."
2479			;;
2480	esac
2481
2482	# make sure the upper level filesystem does not exist
2483	if datasetexists ${newdataset%/*} ; then
2484		log_must $ZFS destroy -rRf ${newdataset%/*}
2485	fi
2486
2487	# without -p option, operation will fail
2488	log_mustnot $ZFS $ops $dataset $newdataset
2489	log_mustnot datasetexists $newdataset ${newdataset%/*}
2490
2491	# with -p option, operation should succeed
2492	log_must $ZFS $ops -p $dataset $newdataset
2493	if ! datasetexists $newdataset ; then
2494		log_fail "-p option does not work for $ops"
2495	fi
2496
2497	# when $ops is create or clone, redo the operation still return zero
2498	if [[ $ops != "rename" ]]; then
2499		log_must $ZFS $ops -p $dataset $newdataset
2500	fi
2501
2502	return 0
2503}
2504
2505function get_disk_guid
2506{
2507	typeset diskname=$1
2508	lastcwd=$(pwd)
2509	cd /dev
2510	guid=$($ZDB -l ${diskname} | ${AWK} '/^    guid:/ {print $2}' | head -1)
2511	cd $lastcwd
2512	echo $guid
2513}
2514
2515#
2516# Get cachefile for a pool.
2517# Prints the cache file, if there is one.
2518# Returns 0 for a default zpool.cache, 1 for an explicit one, and 2 for none.
2519#
2520function cachefile_for_pool
2521{
2522	typeset pool=$1
2523
2524	cachefile=$(get_pool_prop cachefile $pool)
2525	[[ $? != 0 ]] && return 1
2526
2527	case "$cachefile" in
2528		none)	ret=2 ;;
2529		"-")
2530			ret=2
2531			for dir in /boot/zfs /etc/zfs; do
2532				if [[ -f "${dir}/zpool.cache" ]]; then
2533					cachefile="${dir}/zpool.cache"
2534					ret=0
2535					break
2536				fi
2537			done
2538			;;
2539		*)	ret=1;
2540	esac
2541	[[ $ret -eq 0 || $ret -eq 1 ]] && print "$cachefile"
2542	return $ret
2543}
2544
2545#
2546# Assert that the pool is in the appropriate cachefile.
2547#
2548function assert_pool_in_cachefile
2549{
2550	typeset pool=$1
2551
2552	cachefile=$(cachefile_for_pool $pool)
2553	[ $? -ne 0 ] && log_fail "ERROR: Cachefile not created for '$pool'?"
2554	log_must test -e "${cachefile}"
2555	log_must zdb -U ${cachefile} -C ${pool}
2556}
2557
2558#
2559# Get the zdb options given the cachefile state of the pool.
2560#
2561function zdb_cachefile_opts
2562{
2563	typeset pool=$1
2564	typeset vdevdir=$2
2565	typeset opts
2566
2567	if poolexists "$pool"; then
2568		cachefile=$(cachefile_for_pool $pool)
2569		typeset -i ret=$?
2570		case $ret in
2571			0)	opts="-C" ;;
2572			1)	opts="-U $cachefile -C" ;;
2573			2)	opts="-eC" ;;
2574			*)	log_fail "Unknown return '$ret'" ;;
2575		esac
2576	else
2577		opts="-eC"
2578		[[ -n "$vdevdir" ]] && opts="$opts -p $vdevdir"
2579	fi
2580	echo "$opts"
2581}
2582
2583#
2584# Get configuration of pool
2585# $1 pool name
2586# $2 config name
2587#
2588function get_config
2589{
2590	typeset pool=$1
2591	typeset config=$2
2592	typeset vdevdir=$3
2593	typeset alt_root
2594	typeset zdb_opts
2595
2596	zdb_opts=$(zdb_cachefile_opts $pool $vdevdir)
2597	value=$($ZDB $zdb_opts $pool | $GREP "$config:" | $AWK -F: '{print $2}')
2598	if [[ -n $value ]] ; then
2599		value=${value#'}
2600		value=${value%'}
2601	else
2602		return 1
2603	fi
2604	echo $value
2605
2606	return 0
2607}
2608
2609#
2610# Privated function. Random select one of items from arguments.
2611#
2612# $1 count
2613# $2-n string
2614#
2615function _random_get
2616{
2617	typeset cnt=$1
2618	shift
2619
2620	typeset str="$@"
2621	typeset -i ind
2622	((ind = RANDOM % cnt + 1))
2623
2624	typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2625	$ECHO $ret
2626}
2627
2628#
2629# Random select one of item from arguments which include NONE string
2630#
2631function random_get_with_non
2632{
2633	typeset -i cnt=$#
2634	((cnt =+ 1))
2635
2636	_random_get "$cnt" "$@"
2637}
2638
2639#
2640# Random select one of item from arguments which doesn't include NONE string
2641#
2642function random_get
2643{
2644	_random_get "$#" "$@"
2645}
2646
2647#
2648# The function will generate a dataset name with specific length
2649# $1, the length of the name
2650# $2, the base string to construct the name
2651#
2652function gen_dataset_name
2653{
2654	typeset -i len=$1
2655	typeset basestr="$2"
2656	typeset -i baselen=${#basestr}
2657	typeset -i iter=0
2658	typeset l_name=""
2659
2660	if (( len % baselen == 0 )); then
2661		(( iter = len / baselen ))
2662	else
2663		(( iter = len / baselen + 1 ))
2664	fi
2665	while (( iter > 0 )); do
2666		l_name="${l_name}$basestr"
2667
2668		(( iter -= 1 ))
2669	done
2670
2671	$ECHO $l_name
2672}
2673
2674#
2675# Ensure that a given path has been synced, not just ZIL committed.
2676#
2677# XXX On FreeBSD, the sync(8) command (via $SYNC) calls zfs_sync() which just
2678#     does a zil_commit(), as opposed to a txg_wait_synced().  For things that
2679#     require writing to their final destination (e.g. for intentional
2680#     corruption purposes), zil_commit() is not good enough.
2681#
2682function force_sync_path # path
2683{
2684	typeset path="$1"
2685
2686	log_must $ZPOOL export $TESTPOOL
2687	log_must $ZPOOL import -d $path $TESTPOOL
2688}
2689
2690#
2691# Get cksum tuple of dataset
2692# $1 dataset name
2693#
2694# zdb output is like below
2695# " Dataset pool/fs [ZPL], ID 978, cr_txg 2277, 19.0K, 5 objects,
2696# rootbp [L0 DMU objset] 400L/200P DVA[0]=<0:1880c00:200>
2697# DVA[1]=<0:341880c00:200> fletcher4 lzjb LE contiguous birth=2292 fill=5
2698# cksum=989930ccf:4014fe00c83:da5e388e58b4:1f7332052252ac "
2699#
2700function datasetcksum
2701{
2702	typeset cksum
2703	$SYNC
2704	cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2705		| $AWK -F= '{print $6}')
2706	$ECHO $cksum
2707}
2708
2709#
2710# Get cksum of file
2711# #1 file path
2712#
2713function checksum
2714{
2715	typeset cksum
2716	cksum=$($CKSUM $1 | $AWK '{print $1}')
2717	$ECHO $cksum
2718}
2719
2720#
2721# Get the given disk/slice state from the specific field of the pool
2722#
2723function get_device_state #pool disk field("", "spares","logs")
2724{
2725	typeset pool=$1
2726	typeset disk=${2#/dev/}
2727	disk=${disk#/dev/}
2728	disk=${disk#/dev/}
2729	typeset field=${3:-$pool}
2730
2731	state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2732		$NAWK -v device=$disk -v pool=$pool -v field=$field \
2733		'BEGIN {startconfig=0; startfield=0; }
2734		/config:/ {startconfig=1}
2735		(startconfig==1)&&($1==field) {startfield=1; next;}
2736		(startfield==1)&&($1==device) {print $2; exit;}
2737		(startfield==1)&&(NF>=3)&&($(NF-1)=="was")&&($NF==device) {print $2; exit;}
2738		(startfield==1)&&($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2739	print $state
2740}
2741
2742
2743#
2744# print the given directory filesystem type
2745#
2746# $1 directory name
2747#
2748function get_fstype
2749{
2750	typeset dir=$1
2751
2752	if [[ -z $dir ]]; then
2753		log_fail "Usage: get_fstype <directory>"
2754	fi
2755
2756	$DF -T $dir | $AWK '{print $2}'
2757}
2758
2759#
2760# Given a disk, label it to VTOC regardless what label was on the disk
2761# $1 disk
2762#
2763function labelvtoc
2764{
2765	typeset disk=$1
2766	if [[ -z $disk ]]; then
2767		log_fail "The disk name is unspecified."
2768	fi
2769	typeset label_file=$TMPDIR/labelvtoc.${TESTCASE_ID}
2770	typeset arch=$($UNAME -p)
2771
2772	if [[ $arch == "i386" ]]; then
2773		 $ECHO "label" > $label_file
2774		 $ECHO "0" >> $label_file
2775		 $ECHO "" >> $label_file
2776		 $ECHO "q" >> $label_file
2777		 $ECHO "q" >> $label_file
2778
2779		 $FDISK -B $disk >/dev/null 2>&1
2780		 # wait a while for fdisk finishes
2781		 $SLEEP 60
2782	elif [[ $arch == "sparc" ]]; then
2783	     	 $ECHO "label" > $label_file
2784		 $ECHO "0" >> $label_file
2785		 $ECHO "" >> $label_file
2786		 $ECHO "" >> $label_file
2787		 $ECHO "" >> $label_file
2788		 $ECHO "q" >> $label_file
2789	else
2790		log_fail "unknown arch type"
2791	fi
2792
2793	$FORMAT -e -s -d $disk -f $label_file
2794	typeset -i ret_val=$?
2795	$RM -f $label_file
2796	#
2797	# wait the format to finish
2798	#
2799	$SLEEP 60
2800	if (( ret_val != 0 )); then
2801		log_fail "unable to label $disk as VTOC."
2802	fi
2803
2804	return 0
2805}
2806
2807#
2808# Detect if the given filesystem property is supported in this release
2809#
2810# 0	Yes, it is supported
2811# !0	No, it is not supported
2812#
2813function fs_prop_exist
2814{
2815	typeset prop=$1
2816
2817	if [[ -z $prop ]]; then
2818		log_fail "Usage: fs_prop_exist <property>"
2819
2820		return 1
2821	fi
2822
2823	#
2824	# If the property is shortened column name,
2825	# convert it to the standard name
2826	#
2827	case $prop in
2828		avail)		prop=available		;;
2829		refer)		prop=referenced		;;
2830		volblock)	prop=volblocksize	;;
2831		compress)	prop=compression	;;
2832		rdonly)		prop=readonly		;;
2833		recsize)	prop=recordsize		;;
2834		reserv)		prop=reservation	;;
2835		refreserv)	prop=refreservation	;;
2836	esac
2837
2838	#
2839	# The zfs get output looks like the following
2840	#
2841
2842	#
2843	# The following properties are supported:
2844	#
2845	#	PROPERTY       EDIT  INHERIT   VALUES
2846	#
2847	#	available	NO	NO	<size>
2848	#	compressratio	NO	NO	<1.00x or higher if compressed>
2849	#	creation	NO	NO	<date>
2850	#	 ... ...
2851	#	zoned		YES	YES	on | off
2852	#
2853	# Sizes are specified in bytes with standard units such as K, M, G, etc.
2854	#
2855
2856	#
2857	# Start to extract property from the first blank line after 'PROPERTY'
2858	# and stop at the next blank line
2859	#
2860	$ZFS get 2>&1 | \
2861		$AWK '/PROPERTY/ {start=1; next}
2862			/Sizes/ {start=0}
2863		  	start==1 {print $1}' | \
2864		$GREP -w "$prop" > /dev/null 2>&1
2865
2866	return $?
2867}
2868
2869#
2870# Detect if the given pool property is supported in this release
2871#
2872# 0	Yes, it is supported
2873# !0	No, it is not supported
2874#
2875function pool_prop_exist
2876{
2877	typeset prop=$1
2878	if [[ -z $prop ]]; then
2879		log_fail "Usage: pool_prop_exist <property>"
2880
2881		return 1
2882	fi
2883	#
2884	# If the property is shortened column name,
2885	# convert it to the standard name
2886	#
2887	case $prop in
2888		avail)		prop=available		;;
2889		cap)		prop=capacity		;;
2890		replace)	prop=autoreplace	;;
2891	esac
2892
2893	#
2894	# The zpool get output looks like the following
2895	#
2896
2897	# usage:
2898	#	get <"all" | property[,...]> <pool> ...
2899	#
2900	# the following properties are supported:
2901	#
2902	#	PROPERTY       EDIT  VALUES
2903	#
2904	#	available	NO	<size>
2905	#	capacity	NO	<size>
2906	#	guid		NO	<guid>
2907	#	health		NO	<state>
2908	#	size		NO	<size>
2909	#	used		NO	<size>
2910	#	altroot		YES	<path>
2911	#	autoreplace	YES	on | off
2912	#	bootfs		YES	<filesystem>
2913	#	cachefile       YES	<file> | none
2914	#	delegation      YES	on | off
2915	#	failmode	YES	wait | continue | panic
2916	#	version		YES	<version>
2917
2918	$ZPOOL get 2>&1 | \
2919		$AWK '/PROPERTY/ {start=1; next}
2920			start==1 {print $1}' | \
2921		$GREP -w "$prop" > /dev/null 2>&1
2922
2923	return $?
2924}
2925
2926#
2927# check if the system was installed as zfsroot or not
2928# return: 0 ture, otherwise false
2929#
2930function is_zfsroot
2931{
2932	$DF -T / | $GREP -q zfs
2933}
2934
2935#
2936# get the root filesystem name if it's zfsroot system.
2937#
2938# return: root filesystem name
2939function get_rootfs
2940{
2941	typeset rootfs=""
2942	rootfs=$($MOUNT | $AWK '$3 == "\/" && $4~/zfs/ {print $1}')
2943	if [[ -z "$rootfs" ]]; then
2944		log_fail "Can not get rootfs"
2945	fi
2946	$ZFS list $rootfs > /dev/null 2>&1
2947	if (( $? == 0 )); then
2948		$ECHO $rootfs
2949	else
2950		log_fail "This is not a zfsroot system."
2951	fi
2952}
2953
2954#
2955# get the rootfs's pool name
2956# return:
2957#       rootpool name
2958#
2959function get_rootpool
2960{
2961	typeset rootfs=""
2962	typeset rootpool=""
2963	rootfs=$(get_rootfs)
2964	rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2965	echo $rootpool
2966}
2967
2968#
2969# Get the sub string from specified source string
2970#
2971# $1 source string
2972# $2 start position. Count from 1
2973# $3 offset
2974#
2975function get_substr #src_str pos offset
2976{
2977	typeset pos offset
2978
2979	$ECHO $1 | \
2980		$NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2981}
2982
2983#
2984# Get the directory path of given device
2985#
2986function get_device_dir #device
2987{
2988	typeset device=$1
2989
2990	$ECHO "/dev"
2991}
2992
2993#
2994# Get the package name
2995#
2996function get_package_name
2997{
2998	typeset dirpath=${1:-$STC_NAME}
2999
3000	print "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
3001}
3002
3003#
3004# Get the word numbers from a string separated by white space
3005#
3006function get_word_count
3007{
3008	$ECHO $1 | $WC -w
3009}
3010
3011#
3012# To verify if the require numbers of disks is given
3013#
3014function verify_disk_count
3015{
3016	typeset -i min=${2:-1}
3017
3018	typeset -i count=$(get_word_count "$1")
3019
3020	if (( count < min )); then
3021		atf_skip "A minimum of $min disks is required to run." \
3022			" You specified $count disk(s)"
3023	fi
3024}
3025
3026#
3027# Verify that vfs.zfs.vol.recursive is set, so pools can be created using zvols
3028# as backing stores.
3029#
3030function verify_zvol_recursive
3031{
3032	if [ "`sysctl -n vfs.zfs.vol.recursive`" -ne 1 ]; then
3033		atf_skip "Recursive ZVOLs not enabled"
3034	fi
3035}
3036
3037#
3038# bsdmap disk/slice number to a device path
3039#
3040function bsddevmap
3041{
3042	typeset arg=$1
3043	echo $arg | egrep "*s[0-9]$" > /dev/null 2>&1
3044	if [ $? -eq 0 ]
3045	then
3046		n=`echo $arg| wc -c`
3047		set -A map a b c d e f g h i j
3048		s=`echo $arg | cut -c $((n-1))`
3049		arg=${arg%s[0-9]}${map[$s]}
3050	fi
3051	echo $arg
3052}
3053
3054#
3055# Get the name of the snapshots directory.  Traditionally .zfs/snapshots
3056#
3057function get_snapdir_name
3058{
3059	echo ".zfs/snapshot"
3060}
3061
3062#
3063# Unmount all ZFS filesystems except for those that are in the KEEP variable
3064#
3065function unmount_all_safe
3066{
3067	echo $(all_pools) | \
3068		$XARGS -n 1 $ZFS list -H -o name -t all -r | \
3069		$XARGS -n 1 $ZFS unmount
3070}
3071
3072#
3073# Return the highest pool version that this OS can create
3074#
3075function get_zpool_version
3076{
3077	# We assume output from zpool upgrade -v of the form:
3078	#
3079	# This system is currently running ZFS version 2.
3080	# .
3081	# .
3082	typeset ZPOOL_VERSION=$($ZPOOL upgrade -v | $HEAD -1 | \
3083		$AWK '{print $NF}' | $SED -e 's/\.//g')
3084	# Starting with version 5000, the output format changes to:
3085	# This system supports ZFS pool feature flags.
3086	# .
3087	# .
3088	if [[ $ZPOOL_VERSION = "flags" ]]; then
3089		ZPOOL_VERSION=5000
3090	fi
3091	echo $ZPOOL_VERSION
3092}
3093
3094# Ensures that zfsd is running, starting it if necessary.  Every test that
3095# interacts with zfsd must call this at startup.  This is intended primarily
3096# to eliminate interference from outside the test suite.
3097function ensure_zfsd_running
3098{
3099	if ! service zfsd status > /dev/null 2>&1; then
3100		service zfsd start || service zfsd onestart
3101		service zfsd status > /dev/null 2>&1 ||
3102			log_unsupported "Test requires zfsd"
3103	fi
3104}
3105
3106# Temporarily stops ZFSD, because it can interfere with some tests.  If this
3107# function is used, then restart_zfsd _must_ be called in the cleanup routine.
3108function stop_zfsd
3109{
3110	$RM -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3111	if [[ -n "$ZFSD" && -x "$ZFSD" ]]; then
3112		if /etc/rc.d/zfsd status > /dev/null; then
3113			log_note "Stopping zfsd"
3114			$TOUCH $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3115			/etc/rc.d/zfsd stop || /etc/rc.d/zfsd onestop
3116		fi
3117	fi
3118}
3119
3120# Restarts zfsd after it has been stopped by stop_zfsd.  Intelligently restarts
3121# only iff zfsd was running at the time stop_zfsd was called.
3122function restart_zfsd
3123{
3124	if [[ -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests ]]; then
3125		log_note "Restarting zfsd"
3126		/etc/rc.d/zfsd start || /etc/rc.d/zfsd onestart
3127	fi
3128	$RM -f $TMPDIR/.zfsd_enabled_during_stf_zfs_tests
3129}
3130
3131#
3132# Using the given <vdev>, obtain the value of the property <propname> for
3133# the given <tvd> identified by numeric id.
3134#
3135function get_tvd_prop # vdev tvd propname
3136{
3137	typeset vdev=$1
3138	typeset -i tvd=$2
3139	typeset propname=$3
3140
3141	$ZDB -l $vdev | $AWK -v tvd=$tvd -v prop="${propname}:" '
3142		BEGIN { start = 0; }
3143		/^        id:/ && ($2==tvd) { start = 1; next; }
3144		(start==0) { next; }
3145		/^        [a-z]+/ && ($1==prop) { print $2; exit; }
3146		/^        children/ { exit; }
3147		'
3148}
3149
3150#
3151# Convert a DVA into a physical block address.  Prints number of blocks.
3152# This takes the usual printed form, in which offsets are left shifted so
3153# they represent bytes rather than the native sector count.
3154#
3155function dva_to_block_addr # dva
3156{
3157	typeset dva=$1
3158
3159	typeset offcol=$(echo $dva | cut -f2 -d:)
3160	typeset -i offset="0x${offcol}"
3161	# First add 4MB to skip the boot blocks and first two vdev labels,
3162	# then convert to 512 byte blocks (for use with dd).  Note that this
3163	# differs from simply adding 8192 blocks, since the input offset is
3164	# given in bytes and has the actual ashift baked in.
3165	(( offset += 4*1024*1024 ))
3166	(( offset >>= 9 ))
3167	echo "$offset"
3168}
3169
3170#
3171# Convert a RAIDZ DVA into a physical block address.  This has the same
3172# output as dva_to_block_addr (number of blocks from beginning of device), but
3173# is more complicated due to RAIDZ.  ashift is normally always 9, but RAIDZ
3174# uses the actual tvd ashift instead.  Furthermore, the number of vdevs changes
3175# the actual block for each device.
3176#
3177function raidz_dva_to_block_addr # dva ncols ashift
3178{
3179	typeset dva=$1
3180	typeset -i ncols=$2
3181        typeset -i ashift=$3
3182
3183	typeset -i offset=0x$(echo $dva | cut -f2 -d:)
3184	(( offset >>= ashift ))
3185
3186	typeset -i ioff=$(( (offset + ncols - 1) / ncols  ))
3187
3188	# Now add the front 4MB and return.
3189	(( ioff += ( 4194304 >> $ashift ) ))
3190	echo "$ioff"
3191}
3192
3193#
3194# Return the vdevs for the given toplevel vdev number.
3195# Child vdevs will only be included if they are ONLINE.  Output format:
3196#
3197#   <toplevel vdev type> <nchildren> <child1>[:<child2> ...]
3198#
3199# Valid toplevel vdev types are mirror, raidz[1-3], leaf (which can be a
3200# disk or a file).  Note that 'nchildren' can be larger than the number of
3201# returned children; it represents the number of children regardless of how
3202# many are actually online.
3203#
3204function vdevs_for_tvd # pool tvd
3205{
3206	typeset pool=$1
3207	typeset -i tvd=$2
3208
3209	$ZPOOL status $pool | $AWK -v want_tvd=$tvd '
3210		BEGIN {
3211			 start = 0; tvd = -1; lvd = -1;
3212			 type = "UNKNOWN"; disks = ""; disk = "";
3213			 nchildren = 0;
3214		}
3215		/NAME.*STATE/ { start = 1; next; }
3216		(start==0) { next; }
3217
3218		(tvd > want_tvd) { exit; }
3219		END { print type " " nchildren " " disks; }
3220
3221		length(disk) > 0 {
3222			if (length(disks) > 0) { disks = disks " "; }
3223			if (substr(disk, 0, 1) == "/") {
3224				disks = disks disk;
3225			} else {
3226				disks = disks "/dev/" disk;
3227			}
3228			disk = "";
3229		}
3230
3231		/^\t(spares|logs)/ { tvd = want_tvd + 1; next; }
3232		/^\t  (mirror|raidz[1-3])-[0-9]+/ {
3233			tvd += 1;
3234			(tvd == want_tvd) && type = substr($1, 0, 6);
3235			next;
3236		}
3237		/^\t  [\/A-Za-z]+/ {
3238			tvd += 1;
3239			if (tvd == want_tvd) {
3240				(( nchildren += 1 ))
3241				type = "leaf";
3242				($2 == "ONLINE") && disk = $1;
3243			}
3244			next;
3245		}
3246
3247		(tvd < want_tvd) { next; }
3248
3249		/^\t    spare-[0-9]+/ { next; }
3250		/^\t      [\/A-Za-z]+/ {
3251			(( nchildren += 1 ))
3252			($2 == "ONLINE") && disk = $1;
3253			next;
3254		}
3255
3256		/^\t    [\/A-Za-z]+/ {
3257			(( nchildren += 1 ))
3258			($2 == "ONLINE") && disk = $1;
3259			next;
3260		}
3261		'
3262}
3263
3264#
3265# Get a vdev path, ashift & offset for a given pool/dataset and DVA.
3266# If desired, can also select the toplevel vdev child number.
3267#
3268function dva_to_vdev_ashift_off # pool/dataset dva [leaf_vdev_num]
3269{
3270	typeset poollike=$1
3271	typeset dva=$2
3272	typeset -i leaf_vdev_num=$3
3273
3274	# vdevs are normally 0-indexed while arguments are 1-indexed.
3275	(( leaf_vdev_num += 1 ))
3276
3277	# Strip any child datasets or snapshots.
3278	pool=$(echo $poollike | sed -e 's,[/@].*,,g')
3279	tvd=$(echo $dva | cut -d: -f1)
3280
3281	set -- $(vdevs_for_tvd $pool $tvd)
3282	log_debug "vdevs_for_tvd: $* <EOM>"
3283	tvd_type=$1; shift
3284	nchildren=$1; shift
3285
3286	lvd=$(eval echo \$$leaf_vdev_num)
3287	log_debug "type='$tvd_type' children='$nchildren' lvd='$lvd' dva='$dva'"
3288	case $tvd_type in
3289	raidz*)
3290		ashift=$(get_tvd_prop $lvd $tvd ashift)
3291		log_debug "raidz: ashift='${ashift}'"
3292		off=$(raidz_dva_to_block_addr $dva $nchildren $ashift)
3293		;;
3294	*)
3295		ashift=9
3296		off=$(dva_to_block_addr $dva)
3297		;;
3298	esac
3299	echo "${lvd}:${ashift}:${off}"
3300}
3301
3302#
3303# Get the DVA for the specified dataset's given filepath.
3304#
3305function file_dva # dataset filepath [level] [offset] [dva_num]
3306{
3307	typeset dataset=$1
3308	typeset filepath=$2
3309	typeset -i level=$3
3310	typeset -i offset=$4
3311	typeset -i dva_num=$5
3312
3313	typeset -li blksz=0
3314	typeset -li blknum=0
3315	typeset -li startoff
3316	typeset -li inode
3317
3318	eval `$STAT -s "$filepath"`
3319	inode="$st_ino"
3320
3321	# The inner match is for 'DVA[0]=<0:1b412600:200>', in which the
3322	# text surrounding the actual DVA is a fixed size with 8 characters
3323	# before it and 1 after.
3324	$ZDB -P -vvvvv "$dataset/" $inode | \
3325	    $AWK -v level=${level} -v dva_num=${dva_num} '
3326		BEGIN { stage = 0; }
3327		(stage == 0) && ($1=="Object") { stage = 1; next; }
3328
3329		(stage == 1) {
3330			print $3 " " $4;
3331			stage = 2; next;
3332		}
3333
3334		(stage == 2) && /^Indirect blocks/ { stage=3; next; }
3335		(stage < 3) { next; }
3336
3337		match($2, /L[0-9]/) {
3338			if (substr($2, RSTART+1, RLENGTH-1) != level) { next; }
3339		}
3340		match($3, /DVA\[.*>/) {
3341			dva = substr($3, RSTART+8, RLENGTH-9);
3342			if (substr($3, RSTART+4, 1) == dva_num) {
3343				print $1 " " dva;
3344			}
3345		}
3346		' | \
3347	while read line; do
3348		log_debug "params='$blksz/$blknum/$startoff' line='$line'"
3349		if (( blksz == 0 )); then
3350			typeset -i iblksz=$(echo $line | cut -d " " -f1)
3351			typeset -i dblksz=$(echo $line | cut -d " " -f2)
3352
3353			# Calculate the actual desired block starting offset.
3354			if (( level > 0 )); then
3355				typeset -i nbps_per_level
3356				typeset -i indsz
3357				typeset -i i=0
3358
3359				(( nbps_per_level = iblksz / 128 ))
3360				(( blksz = dblksz ))
3361				for (( i = 0; $i < $level; i++ )); do
3362					(( blksz *= nbps_per_level ))
3363				done
3364			else
3365				blksz=$dblksz
3366			fi
3367
3368			(( blknum = offset / blksz ))
3369			(( startoff = blknum * blksz ))
3370			continue
3371		fi
3372
3373		typeset lineoffstr=$(echo $line | cut -d " " -f1)
3374		typeset -i lineoff=$(printf "%d" "0x${lineoffstr}")
3375		typeset dva="$(echo $line | cut -d " " -f2)"
3376		log_debug "str='$lineoffstr' lineoff='$lineoff' dva='$dva'"
3377		if [[ -n "$dva" ]] && (( lineoff == startoff )); then
3378			echo $line | cut -d " " -f2
3379			return 0
3380		fi
3381	done
3382	return 1
3383}
3384
3385#
3386# Corrupt the given dataset's filepath file.  This will obtain the first
3387# level 0 block's DVA and scribble random bits on it.
3388#
3389function corrupt_file # dataset filepath [leaf_vdev_num]
3390{
3391	typeset dataset=$1
3392	typeset filepath=$2
3393	typeset -i leaf_vdev_num="$3"
3394
3395	dva=$(file_dva $dataset $filepath)
3396	[ $? -ne 0 ] && log_fail "ERROR: Can't find file $filepath on $dataset"
3397
3398	vdoff=$(dva_to_vdev_ashift_off $dataset $dva $leaf_vdev_num)
3399	vdev=$(echo $vdoff | cut -d: -f1)
3400	ashift=$(echo $vdoff | cut -d: -f2)
3401	off=$(echo $vdoff | cut -d: -f3)
3402	blocksize=$(( 1 << $ashift ))
3403
3404	log_note "Corrupting ${dataset}'s $filepath on $vdev at DVA $dva with ashift $ashift"
3405	log_must $DD if=/dev/urandom bs=$blocksize of=$vdev seek=$off count=1 conv=notrunc
3406}
3407
3408#
3409# Given a number of files, this function will iterate through
3410# the loop creating the specified number of files, whose names
3411# will start with <basename>.
3412#
3413# The <data> argument is special: it can be "ITER", in which case
3414# the -d argument will be the value of the current iteration.  It
3415# can be 0, in which case it will always be 0.  Otherwise, it will
3416# always be the given value.
3417#
3418# If <snapbase> is specified, a snapshot will be taken using the
3419# argument as the snapshot basename.
3420#
3421function populate_dir # basename num_files write_count blocksz data snapbase
3422{
3423	typeset basename=$1
3424	typeset -i num_files=$2
3425	typeset -i write_count=$3
3426	typeset -i blocksz=$4
3427	typeset -i i
3428	typeset data=$5
3429	typeset snapbase="$6"
3430
3431	log_note "populate_dir: data='$data'"
3432	for (( i = 0; i < num_files; i++ )); do
3433		case "$data" in
3434		0)	d=0	;;
3435		ITER)	d=$i ;;
3436		*)	d=$data	;;
3437		esac
3438
3439        	log_must $FILE_WRITE -o create -c $write_count \
3440		    -f ${basename}.$i -b $blocksz -d $d
3441
3442		[ -n "$snapbase" ] && log_must $ZFS snapshot ${snapbase}.${i}
3443	done
3444}
3445
3446# Reap all children registered in $child_pids.
3447function reap_children
3448{
3449	[ -z "$child_pids" ] && return
3450	for wait_pid in $child_pids; do
3451		log_must $KILL $wait_pid
3452	done
3453	child_pids=""
3454}
3455
3456# Busy a path.  Expects to be reaped via reap_children.  Tries to run as
3457# long and slowly as possible.  [num] is taken as a hint; if such a file
3458# already exists a different one will be chosen.
3459function busy_path # <path> [num]
3460{
3461	typeset busypath=$1
3462	typeset -i num=$2
3463
3464	while :; do
3465		busyfile="$busypath/busyfile.${num}"
3466		[ ! -f "$busyfile" ] && break
3467	done
3468
3469	cmd="$DD if=/dev/urandom of=$busyfile bs=512"
3470	( cd $busypath && $cmd ) &
3471	typeset pid=$!
3472	$SLEEP 1
3473	log_must $PS -p $pid
3474	child_pids="$child_pids $pid"
3475}
3476