xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision 04427e3bf236c18cc532680b957267ee70b1037d)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27#
28
29. ${STF_TOOLS}/contrib/include/logapi.shlib
30
31# Determine whether a dataset is mounted
32#
33# $1 dataset name
34# $2 filesystem type; optional - defaulted to zfs
35#
36# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
37
38function ismounted
39{
40	typeset fstype=$2
41	[[ -z $fstype ]] && fstype=zfs
42	typeset out dir name ret
43
44	case $fstype in
45		zfs)
46			if [[ "$1" == "/"* ]] ; then
47				for out in $(zfs mount | awk '{print $2}'); do
48					[[ $1 == $out ]] && return 0
49				done
50			else
51				for out in $(zfs mount | awk '{print $1}'); do
52					[[ $1 == $out ]] && return 0
53				done
54			fi
55		;;
56		ufs|nfs)
57			out=$(df -F $fstype $1 2>/dev/null)
58			ret=$?
59			(($ret != 0)) && return $ret
60
61			dir=${out%%\(*}
62			dir=${dir%% *}
63			name=${out##*\(}
64			name=${name%%\)*}
65			name=${name%% *}
66
67			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
68		;;
69	esac
70
71	return 1
72}
73
74# Return 0 if a dataset is mounted; 1 otherwise
75#
76# $1 dataset name
77# $2 filesystem type; optional - defaulted to zfs
78
79function mounted
80{
81	ismounted $1 $2
82	(($? == 0)) && return 0
83	return 1
84}
85
86# Return 0 if a dataset is unmounted; 1 otherwise
87#
88# $1 dataset name
89# $2 filesystem type; optional - defaulted to zfs
90
91function unmounted
92{
93	ismounted $1 $2
94	(($? == 1)) && return 0
95	return 1
96}
97
98# split line on ","
99#
100# $1 - line to split
101
102function splitline
103{
104	echo $1 | sed "s/,/ /g"
105}
106
107function default_setup
108{
109	default_setup_noexit "$@"
110
111	log_pass
112}
113
114#
115# Given a list of disks, setup storage pools and datasets.
116#
117function default_setup_noexit
118{
119	typeset disklist=$1
120	typeset container=$2
121	typeset volume=$3
122
123	if is_global_zone; then
124		if poolexists $TESTPOOL ; then
125			destroy_pool $TESTPOOL
126		fi
127		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
128		log_must zpool create -f $TESTPOOL $disklist
129	else
130		reexport_pool
131	fi
132
133	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
134	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
135
136	log_must zfs create $TESTPOOL/$TESTFS
137	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
138
139	if [[ -n $container ]]; then
140		rm -rf $TESTDIR1  || \
141			log_unresolved Could not remove $TESTDIR1
142		mkdir -p $TESTDIR1 || \
143			log_unresolved Could not create $TESTDIR1
144
145		log_must zfs create $TESTPOOL/$TESTCTR
146		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
147		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
148		log_must zfs set mountpoint=$TESTDIR1 \
149		    $TESTPOOL/$TESTCTR/$TESTFS1
150	fi
151
152	if [[ -n $volume ]]; then
153		if is_global_zone ; then
154			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
155		else
156			log_must zfs create $TESTPOOL/$TESTVOL
157		fi
158	fi
159}
160
161#
162# Given a list of disks, setup a storage pool, file system and
163# a container.
164#
165function default_container_setup
166{
167	typeset disklist=$1
168
169	default_setup "$disklist" "true"
170}
171
172#
173# Given a list of disks, setup a storage pool,file system
174# and a volume.
175#
176function default_volume_setup
177{
178	typeset disklist=$1
179
180	default_setup "$disklist" "" "true"
181}
182
183#
184# Given a list of disks, setup a storage pool,file system,
185# a container and a volume.
186#
187function default_container_volume_setup
188{
189	typeset disklist=$1
190
191	default_setup "$disklist" "true" "true"
192}
193
194#
195# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
196# filesystem
197#
198# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
199# $2 snapshot name. Default, $TESTSNAP
200#
201function create_snapshot
202{
203	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
204	typeset snap=${2:-$TESTSNAP}
205
206	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
207	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
208
209	if snapexists $fs_vol@$snap; then
210		log_fail "$fs_vol@$snap already exists."
211	fi
212	datasetexists $fs_vol || \
213		log_fail "$fs_vol must exist."
214
215	log_must zfs snapshot $fs_vol@$snap
216}
217
218#
219# Create a clone from a snapshot, default clone name is $TESTCLONE.
220#
221# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
222# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
223#
224function create_clone   # snapshot clone
225{
226	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
227	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
228
229	[[ -z $snap ]] && \
230		log_fail "Snapshot name is undefined."
231	[[ -z $clone ]] && \
232		log_fail "Clone name is undefined."
233
234	log_must zfs clone $snap $clone
235}
236
237#
238# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
239# filesystem.
240#
241# $1 Existing filesystem or volume name. Default, $TESTFS
242# $2 Existing snapshot name. Default, $TESTSNAP
243# $3 bookmark name. Default, $TESTBKMARK
244#
245function create_bookmark
246{
247	typeset fs_vol=${1:-$TESTFS}
248	typeset snap=${2:-$TESTSNAP}
249	typeset bkmark=${3:-$TESTBKMARK}
250
251	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
252	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
253	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
254
255	if bkmarkexists $fs_vol#$bkmark; then
256		log_fail "$fs_vol#$bkmark already exists."
257	fi
258	datasetexists $fs_vol || \
259		log_fail "$fs_vol must exist."
260	snapexists $fs_vol@$snap || \
261		log_fail "$fs_vol@$snap must exist."
262
263	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
264}
265
266function default_mirror_setup
267{
268	default_mirror_setup_noexit $1 $2 $3
269
270	log_pass
271}
272
273#
274# Given a pair of disks, set up a storage pool and dataset for the mirror
275# @parameters: $1 the primary side of the mirror
276#   $2 the secondary side of the mirror
277# @uses: ZPOOL ZFS TESTPOOL TESTFS
278function default_mirror_setup_noexit
279{
280	readonly func="default_mirror_setup_noexit"
281	typeset primary=$1
282	typeset secondary=$2
283
284	[[ -z $primary ]] && \
285		log_fail "$func: No parameters passed"
286	[[ -z $secondary ]] && \
287		log_fail "$func: No secondary partition passed"
288	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
289	log_must zpool create -f $TESTPOOL mirror $@
290	log_must zfs create $TESTPOOL/$TESTFS
291	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
292}
293
294#
295# create a number of mirrors.
296# We create a number($1) of 2 way mirrors using the pairs of disks named
297# on the command line. These mirrors are *not* mounted
298# @parameters: $1 the number of mirrors to create
299#  $... the devices to use to create the mirrors on
300# @uses: ZPOOL ZFS TESTPOOL
301function setup_mirrors
302{
303	typeset -i nmirrors=$1
304
305	shift
306	while ((nmirrors > 0)); do
307		log_must test -n "$1" -a -n "$2"
308		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
309		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
310		shift 2
311		((nmirrors = nmirrors - 1))
312	done
313}
314
315#
316# create a number of raidz pools.
317# We create a number($1) of 2 raidz pools  using the pairs of disks named
318# on the command line. These pools are *not* mounted
319# @parameters: $1 the number of pools to create
320#  $... the devices to use to create the pools on
321# @uses: ZPOOL ZFS TESTPOOL
322function setup_raidzs
323{
324	typeset -i nraidzs=$1
325
326	shift
327	while ((nraidzs > 0)); do
328		log_must test -n "$1" -a -n "$2"
329		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
330		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
331		shift 2
332		((nraidzs = nraidzs - 1))
333	done
334}
335
336#
337# Destroy the configured testpool mirrors.
338# the mirrors are of the form ${TESTPOOL}{number}
339# @uses: ZPOOL ZFS TESTPOOL
340function destroy_mirrors
341{
342	default_cleanup_noexit
343
344	log_pass
345}
346
347#
348# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
349# $1 the list of disks
350#
351function default_raidz_setup
352{
353	typeset disklist="$*"
354	disks=(${disklist[*]})
355
356	if [[ ${#disks[*]} -lt 2 ]]; then
357		log_fail "A raid-z requires a minimum of two disks."
358	fi
359
360	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
361	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
362	log_must zfs create $TESTPOOL/$TESTFS
363	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
364
365	log_pass
366}
367
368#
369# Common function used to cleanup storage pools and datasets.
370#
371# Invoked at the start of the test suite to ensure the system
372# is in a known state, and also at the end of each set of
373# sub-tests to ensure errors from one set of tests doesn't
374# impact the execution of the next set.
375
376function default_cleanup
377{
378	default_cleanup_noexit
379
380	log_pass
381}
382
383function default_cleanup_noexit
384{
385	typeset exclude=""
386	typeset pool=""
387	#
388	# Destroying the pool will also destroy any
389	# filesystems it contains.
390	#
391	if is_global_zone; then
392		zfs unmount -a > /dev/null 2>&1
393		exclude=`eval echo \"'(${KEEP})'\"`
394		ALL_POOLS=$(zpool list -H -o name \
395		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
396		# Here, we loop through the pools we're allowed to
397		# destroy, only destroying them if it's safe to do
398		# so.
399		while [ ! -z ${ALL_POOLS} ]
400		do
401			for pool in ${ALL_POOLS}
402			do
403				if safe_to_destroy_pool $pool ;
404				then
405					destroy_pool $pool
406				fi
407				ALL_POOLS=$(zpool list -H -o name \
408				    | grep -v "$NO_POOLS" \
409				    | egrep -v "$exclude")
410			done
411		done
412
413		zfs mount -a
414	else
415		typeset fs=""
416		for fs in $(zfs list -H -o name \
417		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
418			datasetexists $fs && \
419				log_must zfs destroy -Rf $fs
420		done
421
422		# Need cleanup here to avoid garbage dir left.
423		for fs in $(zfs list -H -o name); do
424			[[ $fs == /$ZONE_POOL ]] && continue
425			[[ -d $fs ]] && log_must rm -rf $fs/*
426		done
427
428		#
429		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
430		# the default value
431		#
432		for fs in $(zfs list -H -o name); do
433			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
434				log_must zfs set reservation=none $fs
435				log_must zfs set recordsize=128K $fs
436				log_must zfs set mountpoint=/$fs $fs
437				typeset enc=""
438				enc=$(get_prop encryption $fs)
439				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
440					[[ "$enc" == "off" ]]; then
441					log_must zfs set checksum=on $fs
442				fi
443				log_must zfs set compression=off $fs
444				log_must zfs set atime=on $fs
445				log_must zfs set devices=off $fs
446				log_must zfs set exec=on $fs
447				log_must zfs set setuid=on $fs
448				log_must zfs set readonly=off $fs
449				log_must zfs set snapdir=hidden $fs
450				log_must zfs set aclmode=groupmask $fs
451				log_must zfs set aclinherit=secure $fs
452			fi
453		done
454	fi
455
456	[[ -d $TESTDIR ]] && \
457		log_must rm -rf $TESTDIR
458}
459
460
461#
462# Common function used to cleanup storage pools, file systems
463# and containers.
464#
465function default_container_cleanup
466{
467	if ! is_global_zone; then
468		reexport_pool
469	fi
470
471	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
472	[[ $? -eq 0 ]] && \
473	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
474
475	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
476	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
477
478	datasetexists $TESTPOOL/$TESTCTR && \
479	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
480
481	[[ -e $TESTDIR1 ]] && \
482	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
483
484	default_cleanup
485}
486
487#
488# Common function used to cleanup snapshot of file system or volume. Default to
489# delete the file system's snapshot
490#
491# $1 snapshot name
492#
493function destroy_snapshot
494{
495	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
496
497	if ! snapexists $snap; then
498		log_fail "'$snap' does not existed."
499	fi
500
501	#
502	# For the sake of the value which come from 'get_prop' is not equal
503	# to the really mountpoint when the snapshot is unmounted. So, firstly
504	# check and make sure this snapshot's been mounted in current system.
505	#
506	typeset mtpt=""
507	if ismounted $snap; then
508		mtpt=$(get_prop mountpoint $snap)
509		(($? != 0)) && \
510			log_fail "get_prop mountpoint $snap failed."
511	fi
512
513	log_must zfs destroy $snap
514	[[ $mtpt != "" && -d $mtpt ]] && \
515		log_must rm -rf $mtpt
516}
517
518#
519# Common function used to cleanup clone.
520#
521# $1 clone name
522#
523function destroy_clone
524{
525	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
526
527	if ! datasetexists $clone; then
528		log_fail "'$clone' does not existed."
529	fi
530
531	# With the same reason in destroy_snapshot
532	typeset mtpt=""
533	if ismounted $clone; then
534		mtpt=$(get_prop mountpoint $clone)
535		(($? != 0)) && \
536			log_fail "get_prop mountpoint $clone failed."
537	fi
538
539	log_must zfs destroy $clone
540	[[ $mtpt != "" && -d $mtpt ]] && \
541		log_must rm -rf $mtpt
542}
543
544#
545# Common function used to cleanup bookmark of file system or volume.  Default
546# to delete the file system's bookmark.
547#
548# $1 bookmark name
549#
550function destroy_bookmark
551{
552	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
553
554	if ! bkmarkexists $bkmark; then
555		log_fail "'$bkmarkp' does not existed."
556	fi
557
558	log_must zfs destroy $bkmark
559}
560
561# Return 0 if a snapshot exists; $? otherwise
562#
563# $1 - snapshot name
564
565function snapexists
566{
567	zfs list -H -t snapshot "$1" > /dev/null 2>&1
568	return $?
569}
570
571#
572# Return 0 if a bookmark exists; $? otherwise
573#
574# $1 - bookmark name
575#
576function bkmarkexists
577{
578	zfs list -H -t bookmark "$1" > /dev/null 2>&1
579	return $?
580}
581
582#
583# Set a property to a certain value on a dataset.
584# Sets a property of the dataset to the value as passed in.
585# @param:
586#	$1 dataset who's property is being set
587#	$2 property to set
588#	$3 value to set property to
589# @return:
590#	0 if the property could be set.
591#	non-zero otherwise.
592# @use: ZFS
593#
594function dataset_setprop
595{
596	typeset fn=dataset_setprop
597
598	if (($# < 3)); then
599		log_note "$fn: Insufficient parameters (need 3, had $#)"
600		return 1
601	fi
602	typeset output=
603	output=$(zfs set $2=$3 $1 2>&1)
604	typeset rv=$?
605	if ((rv != 0)); then
606		log_note "Setting property on $1 failed."
607		log_note "property $2=$3"
608		log_note "Return Code: $rv"
609		log_note "Output: $output"
610		return $rv
611	fi
612	return 0
613}
614
615#
616# Assign suite defined dataset properties.
617# This function is used to apply the suite's defined default set of
618# properties to a dataset.
619# @parameters: $1 dataset to use
620# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
621# @returns:
622#   0 if the dataset has been altered.
623#   1 if no pool name was passed in.
624#   2 if the dataset could not be found.
625#   3 if the dataset could not have it's properties set.
626#
627function dataset_set_defaultproperties
628{
629	typeset dataset="$1"
630
631	[[ -z $dataset ]] && return 1
632
633	typeset confset=
634	typeset -i found=0
635	for confset in $(zfs list); do
636		if [[ $dataset = $confset ]]; then
637			found=1
638			break
639		fi
640	done
641	[[ $found -eq 0 ]] && return 2
642	if [[ -n $COMPRESSION_PROP ]]; then
643		dataset_setprop $dataset compression $COMPRESSION_PROP || \
644			return 3
645		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
646	fi
647	if [[ -n $CHECKSUM_PROP ]]; then
648		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
649			return 3
650		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
651	fi
652	return 0
653}
654
655#
656# Check a numeric assertion
657# @parameter: $@ the assertion to check
658# @output: big loud notice if assertion failed
659# @use: log_fail
660#
661function assert
662{
663	(($@)) || log_fail "$@"
664}
665
666#
667# Function to format partition size of a disk
668# Given a disk cxtxdx reduces all partitions
669# to 0 size
670#
671function zero_partitions #<whole_disk_name>
672{
673	typeset diskname=$1
674	typeset i
675
676	for i in 0 1 3 4 5 6 7
677	do
678		set_partition $i "" 0mb $diskname
679	done
680}
681
682#
683# Given a slice, size and disk, this function
684# formats the slice to the specified size.
685# Size should be specified with units as per
686# the `format` command requirements eg. 100mb 3gb
687#
688function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
689{
690	typeset -i slicenum=$1
691	typeset start=$2
692	typeset size=$3
693	typeset disk=$4
694	[[ -z $slicenum || -z $size || -z $disk ]] && \
695	    log_fail "The slice, size or disk name is unspecified."
696	typeset format_file=/var/tmp/format_in.$$
697
698	echo "partition" >$format_file
699	echo "$slicenum" >> $format_file
700	echo "" >> $format_file
701	echo "" >> $format_file
702	echo "$start" >> $format_file
703	echo "$size" >> $format_file
704	echo "label" >> $format_file
705	echo "" >> $format_file
706	echo "q" >> $format_file
707	echo "q" >> $format_file
708
709	format -e -s -d $disk -f $format_file
710	typeset ret_val=$?
711	rm -f $format_file
712	[[ $ret_val -ne 0 ]] && \
713	    log_fail "Unable to format $disk slice $slicenum to $size"
714	return 0
715}
716
717#
718# Get the end cyl of the given slice
719#
720function get_endslice #<disk> <slice>
721{
722	typeset disk=$1
723	typeset slice=$2
724	if [[ -z $disk || -z $slice ]] ; then
725		log_fail "The disk name or slice number is unspecified."
726	fi
727
728	disk=${disk#/dev/dsk/}
729	disk=${disk#/dev/rdsk/}
730	disk=${disk%s*}
731
732	typeset -i ratio=0
733	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
734		grep "sectors\/cylinder" | \
735		awk '{print $2}')
736
737	if ((ratio == 0)); then
738		return
739	fi
740
741	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
742		nawk -v token="$slice" '{if ($1==token) print $6}')
743
744	((endcyl = (endcyl + 1) / ratio))
745	echo $endcyl
746}
747
748
749#
750# Given a size,disk and total slice number,  this function formats the
751# disk slices from 0 to the total slice number with the same specified
752# size.
753#
754function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
755{
756	typeset -i i=0
757	typeset slice_size=$1
758	typeset disk_name=$2
759	typeset total_slices=$3
760	typeset cyl
761
762	zero_partitions $disk_name
763	while ((i < $total_slices)); do
764		if ((i == 2)); then
765			((i = i + 1))
766			continue
767		fi
768		set_partition $i "$cyl" $slice_size $disk_name
769		cyl=$(get_endslice $disk_name $i)
770		((i = i+1))
771	done
772}
773
774#
775# This function continues to write to a filenum number of files into dirnum
776# number of directories until either file_write returns an error or the
777# maximum number of files per directory have been written.
778#
779# Usage:
780# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
781#
782# Return value: 0 on success
783#		non 0 on error
784#
785# Where :
786#	destdir:    is the directory where everything is to be created under
787#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
788#	filenum:    the maximum number of files per subdirectory
789#	bytes:	    number of bytes to write
790#	num_writes: numer of types to write out bytes
791#	data:	    the data that will be writen
792#
793#	E.g.
794#	file_fs /testdir 20 25 1024 256 0
795#
796# Note: bytes * num_writes equals the size of the testfile
797#
798function fill_fs # destdir dirnum filenum bytes num_writes data
799{
800	typeset destdir=${1:-$TESTDIR}
801	typeset -i dirnum=${2:-50}
802	typeset -i filenum=${3:-50}
803	typeset -i bytes=${4:-8192}
804	typeset -i num_writes=${5:-10240}
805	typeset -i data=${6:-0}
806
807	typeset -i odirnum=1
808	typeset -i idirnum=0
809	typeset -i fn=0
810	typeset -i retval=0
811
812	log_must mkdir -p $destdir/$idirnum
813	while (($odirnum > 0)); do
814		if ((dirnum >= 0 && idirnum >= dirnum)); then
815			odirnum=0
816			break
817		fi
818		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
819		    -b $bytes -c $num_writes -d $data
820		retval=$?
821		if (($retval != 0)); then
822			odirnum=0
823			break
824		fi
825		if (($fn >= $filenum)); then
826			fn=0
827			((idirnum = idirnum + 1))
828			log_must mkdir -p $destdir/$idirnum
829		else
830			((fn = fn + 1))
831		fi
832	done
833	return $retval
834}
835
836#
837# Simple function to get the specified property. If unable to
838# get the property then exits.
839#
840# Note property is in 'parsable' format (-p)
841#
842function get_prop # property dataset
843{
844	typeset prop_val
845	typeset prop=$1
846	typeset dataset=$2
847
848	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
849	if [[ $? -ne 0 ]]; then
850		log_note "Unable to get $prop property for dataset " \
851		"$dataset"
852		return 1
853	fi
854
855	echo "$prop_val"
856	return 0
857}
858
859#
860# Simple function to get the specified property of pool. If unable to
861# get the property then exits.
862#
863function get_pool_prop # property pool
864{
865	typeset prop_val
866	typeset prop=$1
867	typeset pool=$2
868
869	if poolexists $pool ; then
870		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
871			awk '{print $3}')
872		if [[ $? -ne 0 ]]; then
873			log_note "Unable to get $prop property for pool " \
874			"$pool"
875			return 1
876		fi
877	else
878		log_note "Pool $pool not exists."
879		return 1
880	fi
881
882	echo $prop_val
883	return 0
884}
885
886# Return 0 if a pool exists; $? otherwise
887#
888# $1 - pool name
889
890function poolexists
891{
892	typeset pool=$1
893
894	if [[ -z $pool ]]; then
895		log_note "No pool name given."
896		return 1
897	fi
898
899	zpool get name "$pool" > /dev/null 2>&1
900	return $?
901}
902
903# Return 0 if all the specified datasets exist; $? otherwise
904#
905# $1-n  dataset name
906function datasetexists
907{
908	if (($# == 0)); then
909		log_note "No dataset name given."
910		return 1
911	fi
912
913	while (($# > 0)); do
914		zfs get name $1 > /dev/null 2>&1 || \
915			return $?
916		shift
917	done
918
919	return 0
920}
921
922# return 0 if none of the specified datasets exists, otherwise return 1.
923#
924# $1-n  dataset name
925function datasetnonexists
926{
927	if (($# == 0)); then
928		log_note "No dataset name given."
929		return 1
930	fi
931
932	while (($# > 0)); do
933		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
934		    && return 1
935		shift
936	done
937
938	return 0
939}
940
941#
942# Given a mountpoint, or a dataset name, determine if it is shared.
943#
944# Returns 0 if shared, 1 otherwise.
945#
946function is_shared
947{
948	typeset fs=$1
949	typeset mtpt
950
951	if [[ $fs != "/"* ]] ; then
952		if datasetnonexists "$fs" ; then
953			return 1
954		else
955			mtpt=$(get_prop mountpoint "$fs")
956			case $mtpt in
957				none|legacy|-) return 1
958					;;
959				*)	fs=$mtpt
960					;;
961			esac
962		fi
963	fi
964
965	for mtpt in `share | awk '{print $2}'` ; do
966		if [[ $mtpt == $fs ]] ; then
967			return 0
968		fi
969	done
970
971	typeset stat=$(svcs -H -o STA nfs/server:default)
972	if [[ $stat != "ON" ]]; then
973		log_note "Current nfs/server status: $stat"
974	fi
975
976	return 1
977}
978
979#
980# Given a mountpoint, determine if it is not shared.
981#
982# Returns 0 if not shared, 1 otherwise.
983#
984function not_shared
985{
986	typeset fs=$1
987
988	is_shared $fs
989	if (($? == 0)); then
990		return 1
991	fi
992
993	return 0
994}
995
996#
997# Helper function to unshare a mountpoint.
998#
999function unshare_fs #fs
1000{
1001	typeset fs=$1
1002
1003	is_shared $fs
1004	if (($? == 0)); then
1005		log_must zfs unshare $fs
1006	fi
1007
1008	return 0
1009}
1010
1011#
1012# Check NFS server status and trigger it online.
1013#
1014function setup_nfs_server
1015{
1016	# Cannot share directory in non-global zone.
1017	#
1018	if ! is_global_zone; then
1019		log_note "Cannot trigger NFS server by sharing in LZ."
1020		return
1021	fi
1022
1023	typeset nfs_fmri="svc:/network/nfs/server:default"
1024	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1025		#
1026		# Only really sharing operation can enable NFS server
1027		# to online permanently.
1028		#
1029		typeset dummy=/tmp/dummy
1030
1031		if [[ -d $dummy ]]; then
1032			log_must rm -rf $dummy
1033		fi
1034
1035		log_must mkdir $dummy
1036		log_must share $dummy
1037
1038		#
1039		# Waiting for fmri's status to be the final status.
1040		# Otherwise, in transition, an asterisk (*) is appended for
1041		# instances, unshare will reverse status to 'DIS' again.
1042		#
1043		# Waiting for 1's at least.
1044		#
1045		log_must sleep 1
1046		timeout=10
1047		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1048		do
1049			log_must sleep 1
1050
1051			((timeout -= 1))
1052		done
1053
1054		log_must unshare $dummy
1055		log_must rm -rf $dummy
1056	fi
1057
1058	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1059}
1060
1061#
1062# To verify whether calling process is in global zone
1063#
1064# Return 0 if in global zone, 1 in non-global zone
1065#
1066function is_global_zone
1067{
1068	typeset cur_zone=$(zonename 2>/dev/null)
1069	if [[ $cur_zone != "global" ]]; then
1070		return 1
1071	fi
1072	return 0
1073}
1074
1075#
1076# Verify whether test is permitted to run from
1077# global zone, local zone, or both
1078#
1079# $1 zone limit, could be "global", "local", or "both"(no limit)
1080#
1081# Return 0 if permitted, otherwise exit with log_unsupported
1082#
1083function verify_runnable # zone limit
1084{
1085	typeset limit=$1
1086
1087	[[ -z $limit ]] && return 0
1088
1089	if is_global_zone ; then
1090		case $limit in
1091			global|both)
1092				;;
1093			local)	log_unsupported "Test is unable to run from "\
1094					"global zone."
1095				;;
1096			*)	log_note "Warning: unknown limit $limit - " \
1097					"use both."
1098				;;
1099		esac
1100	else
1101		case $limit in
1102			local|both)
1103				;;
1104			global)	log_unsupported "Test is unable to run from "\
1105					"local zone."
1106				;;
1107			*)	log_note "Warning: unknown limit $limit - " \
1108					"use both."
1109				;;
1110		esac
1111
1112		reexport_pool
1113	fi
1114
1115	return 0
1116}
1117
1118# Return 0 if create successfully or the pool exists; $? otherwise
1119# Note: In local zones, this function should return 0 silently.
1120#
1121# $1 - pool name
1122# $2-n - [keyword] devs_list
1123
1124function create_pool #pool devs_list
1125{
1126	typeset pool=${1%%/*}
1127
1128	shift
1129
1130	if [[ -z $pool ]]; then
1131		log_note "Missing pool name."
1132		return 1
1133	fi
1134
1135	if poolexists $pool ; then
1136		destroy_pool $pool
1137	fi
1138
1139	if is_global_zone ; then
1140		[[ -d /$pool ]] && rm -rf /$pool
1141		log_must zpool create -f $pool $@
1142	fi
1143
1144	return 0
1145}
1146
1147# Return 0 if destroy successfully or the pool exists; $? otherwise
1148# Note: In local zones, this function should return 0 silently.
1149#
1150# $1 - pool name
1151# Destroy pool with the given parameters.
1152
1153function destroy_pool #pool
1154{
1155	typeset pool=${1%%/*}
1156	typeset mtpt
1157
1158	if [[ -z $pool ]]; then
1159		log_note "No pool name given."
1160		return 1
1161	fi
1162
1163	if is_global_zone ; then
1164		if poolexists "$pool" ; then
1165			mtpt=$(get_prop mountpoint "$pool")
1166
1167			# At times, syseventd activity can cause attempts to
1168			# destroy a pool to fail with EBUSY. We retry a few
1169			# times allowing failures before requiring the destroy
1170			# to succeed.
1171			typeset -i wait_time=10 ret=1 count=0
1172			must=""
1173			while [[ $ret -ne 0 ]]; do
1174				$must zpool destroy -f $pool
1175				ret=$?
1176				[[ $ret -eq 0 ]] && break
1177				log_note "zpool destroy failed with $ret"
1178				[[ count++ -ge 7 ]] && must=log_must
1179				sleep $wait_time
1180			done
1181
1182			[[ -d $mtpt ]] && \
1183				log_must rm -rf $mtpt
1184		else
1185			log_note "Pool does not exist. ($pool)"
1186			return 1
1187		fi
1188	fi
1189
1190	return 0
1191}
1192
1193#
1194# Firstly, create a pool with 5 datasets. Then, create a single zone and
1195# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1196# and a zvol device to the zone.
1197#
1198# $1 zone name
1199# $2 zone root directory prefix
1200# $3 zone ip
1201#
1202function zfs_zones_setup #zone_name zone_root zone_ip
1203{
1204	typeset zone_name=${1:-$(hostname)-z}
1205	typeset zone_root=${2:-"/zone_root"}
1206	typeset zone_ip=${3:-"10.1.1.10"}
1207	typeset prefix_ctr=$ZONE_CTR
1208	typeset pool_name=$ZONE_POOL
1209	typeset -i cntctr=5
1210	typeset -i i=0
1211
1212	# Create pool and 5 container within it
1213	#
1214	[[ -d /$pool_name ]] && rm -rf /$pool_name
1215	log_must zpool create -f $pool_name $DISKS
1216	while ((i < cntctr)); do
1217		log_must zfs create $pool_name/$prefix_ctr$i
1218		((i += 1))
1219	done
1220
1221	# create a zvol
1222	log_must zfs create -V 1g $pool_name/zone_zvol
1223
1224	#
1225	# If current system support slog, add slog device for pool
1226	#
1227	if verify_slog_support ; then
1228		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1229		log_must mkfile $MINVDEVSIZE $sdevs
1230		log_must zpool add $pool_name log mirror $sdevs
1231	fi
1232
1233	# this isn't supported just yet.
1234	# Create a filesystem. In order to add this to
1235	# the zone, it must have it's mountpoint set to 'legacy'
1236	# log_must zfs create $pool_name/zfs_filesystem
1237	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1238
1239	[[ -d $zone_root ]] && \
1240		log_must rm -rf $zone_root/$zone_name
1241	[[ ! -d $zone_root ]] && \
1242		log_must mkdir -p -m 0700 $zone_root/$zone_name
1243
1244	# Create zone configure file and configure the zone
1245	#
1246	typeset zone_conf=/tmp/zone_conf.$$
1247	echo "create" > $zone_conf
1248	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1249	echo "set autoboot=true" >> $zone_conf
1250	i=0
1251	while ((i < cntctr)); do
1252		echo "add dataset" >> $zone_conf
1253		echo "set name=$pool_name/$prefix_ctr$i" >> \
1254			$zone_conf
1255		echo "end" >> $zone_conf
1256		((i += 1))
1257	done
1258
1259	# add our zvol to the zone
1260	echo "add device" >> $zone_conf
1261	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1262	echo "end" >> $zone_conf
1263
1264	# add a corresponding zvol rdsk to the zone
1265	echo "add device" >> $zone_conf
1266	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1267	echo "end" >> $zone_conf
1268
1269	# once it's supported, we'll add our filesystem to the zone
1270	# echo "add fs" >> $zone_conf
1271	# echo "set type=zfs" >> $zone_conf
1272	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1273	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1274	# echo "end" >> $zone_conf
1275
1276	echo "verify" >> $zone_conf
1277	echo "commit" >> $zone_conf
1278	log_must zonecfg -z $zone_name -f $zone_conf
1279	log_must rm -f $zone_conf
1280
1281	# Install the zone
1282	zoneadm -z $zone_name install
1283	if (($? == 0)); then
1284		log_note "SUCCESS: zoneadm -z $zone_name install"
1285	else
1286		log_fail "FAIL: zoneadm -z $zone_name install"
1287	fi
1288
1289	# Install sysidcfg file
1290	#
1291	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1292	echo "system_locale=C" > $sysidcfg
1293	echo  "terminal=dtterm" >> $sysidcfg
1294	echo  "network_interface=primary {" >> $sysidcfg
1295	echo  "hostname=$zone_name" >> $sysidcfg
1296	echo  "}" >> $sysidcfg
1297	echo  "name_service=NONE" >> $sysidcfg
1298	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1299	echo  "security_policy=NONE" >> $sysidcfg
1300	echo  "timezone=US/Eastern" >> $sysidcfg
1301
1302	# Boot this zone
1303	log_must zoneadm -z $zone_name boot
1304}
1305
1306#
1307# Reexport TESTPOOL & TESTPOOL(1-4)
1308#
1309function reexport_pool
1310{
1311	typeset -i cntctr=5
1312	typeset -i i=0
1313
1314	while ((i < cntctr)); do
1315		if ((i == 0)); then
1316			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1317			if ! ismounted $TESTPOOL; then
1318				log_must zfs mount $TESTPOOL
1319			fi
1320		else
1321			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1322			if eval ! ismounted \$TESTPOOL$i; then
1323				log_must eval zfs mount \$TESTPOOL$i
1324			fi
1325		fi
1326		((i += 1))
1327	done
1328}
1329
1330#
1331# Verify a given disk is online or offline
1332#
1333# Return 0 is pool/disk matches expected state, 1 otherwise
1334#
1335function check_state # pool disk state{online,offline}
1336{
1337	typeset pool=$1
1338	typeset disk=${2#/dev/dsk/}
1339	typeset state=$3
1340
1341	zpool status -v $pool | grep "$disk"  \
1342	    | grep -i "$state" > /dev/null 2>&1
1343
1344	return $?
1345}
1346
1347#
1348# Get the mountpoint of snapshot
1349# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1350# as its mountpoint
1351#
1352function snapshot_mountpoint
1353{
1354	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1355
1356	if [[ $dataset != *@* ]]; then
1357		log_fail "Error name of snapshot '$dataset'."
1358	fi
1359
1360	typeset fs=${dataset%@*}
1361	typeset snap=${dataset#*@}
1362
1363	if [[ -z $fs || -z $snap ]]; then
1364		log_fail "Error name of snapshot '$dataset'."
1365	fi
1366
1367	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1368}
1369
1370#
1371# Given a pool and file system, this function will verify the file system
1372# using the zdb internal tool. Note that the pool is exported and imported
1373# to ensure it has consistent state.
1374#
1375function verify_filesys # pool filesystem dir
1376{
1377	typeset pool="$1"
1378	typeset filesys="$2"
1379	typeset zdbout="/tmp/zdbout.$$"
1380
1381	shift
1382	shift
1383	typeset dirs=$@
1384	typeset search_path=""
1385
1386	log_note "Calling zdb to verify filesystem '$filesys'"
1387	zfs unmount -a > /dev/null 2>&1
1388	log_must zpool export $pool
1389
1390	if [[ -n $dirs ]] ; then
1391		for dir in $dirs ; do
1392			search_path="$search_path -d $dir"
1393		done
1394	fi
1395
1396	log_must zpool import $search_path $pool
1397
1398	zdb -cudi $filesys > $zdbout 2>&1
1399	if [[ $? != 0 ]]; then
1400		log_note "Output: zdb -cudi $filesys"
1401		cat $zdbout
1402		log_fail "zdb detected errors with: '$filesys'"
1403	fi
1404
1405	log_must zfs mount -a
1406	log_must rm -rf $zdbout
1407}
1408
1409#
1410# Given a pool, and this function list all disks in the pool
1411#
1412function get_disklist # pool
1413{
1414	typeset disklist=""
1415
1416	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1417	    grep -v "\-\-\-\-\-" | \
1418	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1419
1420	echo $disklist
1421}
1422
1423# /**
1424#  This function kills a given list of processes after a time period. We use
1425#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1426#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1427#  would be listed as FAIL, which we don't want : we're happy with stress tests
1428#  running for a certain amount of time, then finishing.
1429#
1430# @param $1 the time in seconds after which we should terminate these processes
1431# @param $2..$n the processes we wish to terminate.
1432# */
1433function stress_timeout
1434{
1435	typeset -i TIMEOUT=$1
1436	shift
1437	typeset cpids="$@"
1438
1439	log_note "Waiting for child processes($cpids). " \
1440		"It could last dozens of minutes, please be patient ..."
1441	log_must sleep $TIMEOUT
1442
1443	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1444	typeset pid
1445	for pid in $cpids; do
1446		ps -p $pid > /dev/null 2>&1
1447		if (($? == 0)); then
1448			log_must kill -USR1 $pid
1449		fi
1450	done
1451}
1452
1453#
1454# Verify a given hotspare disk is inuse or avail
1455#
1456# Return 0 is pool/disk matches expected state, 1 otherwise
1457#
1458function check_hotspare_state # pool disk state{inuse,avail}
1459{
1460	typeset pool=$1
1461	typeset disk=${2#/dev/dsk/}
1462	typeset state=$3
1463
1464	cur_state=$(get_device_state $pool $disk "spares")
1465
1466	if [[ $state != ${cur_state} ]]; then
1467		return 1
1468	fi
1469	return 0
1470}
1471
1472#
1473# Verify a given slog disk is inuse or avail
1474#
1475# Return 0 is pool/disk matches expected state, 1 otherwise
1476#
1477function check_slog_state # pool disk state{online,offline,unavail}
1478{
1479	typeset pool=$1
1480	typeset disk=${2#/dev/dsk/}
1481	typeset state=$3
1482
1483	cur_state=$(get_device_state $pool $disk "logs")
1484
1485	if [[ $state != ${cur_state} ]]; then
1486		return 1
1487	fi
1488	return 0
1489}
1490
1491#
1492# Verify a given vdev disk is inuse or avail
1493#
1494# Return 0 is pool/disk matches expected state, 1 otherwise
1495#
1496function check_vdev_state # pool disk state{online,offline,unavail}
1497{
1498	typeset pool=$1
1499	typeset disk=${2#/dev/dsk/}
1500	typeset state=$3
1501
1502	cur_state=$(get_device_state $pool $disk)
1503
1504	if [[ $state != ${cur_state} ]]; then
1505		return 1
1506	fi
1507	return 0
1508}
1509
1510#
1511# Check the output of 'zpool status -v <pool>',
1512# and to see if the content of <token> contain the <keyword> specified.
1513#
1514# Return 0 is contain, 1 otherwise
1515#
1516function check_pool_status # pool token keyword
1517{
1518	typeset pool=$1
1519	typeset token=$2
1520	typeset keyword=$3
1521
1522	zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1523		($1==token) {print $0}' \
1524	| grep -i "$keyword" > /dev/null 2>&1
1525
1526	return $?
1527}
1528
1529#
1530# These 5 following functions are instance of check_pool_status()
1531#	is_pool_resilvering - to check if the pool is resilver in progress
1532#	is_pool_resilvered - to check if the pool is resilver completed
1533#	is_pool_scrubbing - to check if the pool is scrub in progress
1534#	is_pool_scrubbed - to check if the pool is scrub completed
1535#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1536#
1537function is_pool_resilvering #pool
1538{
1539	check_pool_status "$1" "scan" "resilver in progress since "
1540	return $?
1541}
1542
1543function is_pool_resilvered #pool
1544{
1545	check_pool_status "$1" "scan" "resilvered "
1546	return $?
1547}
1548
1549function is_pool_scrubbing #pool
1550{
1551	check_pool_status "$1" "scan" "scrub in progress since "
1552	return $?
1553}
1554
1555function is_pool_scrubbed #pool
1556{
1557	check_pool_status "$1" "scan" "scrub repaired"
1558	return $?
1559}
1560
1561function is_pool_scrub_stopped #pool
1562{
1563	check_pool_status "$1" "scan" "scrub canceled"
1564	return $?
1565}
1566
1567#
1568# Use create_pool()/destroy_pool() to clean up the infomation in
1569# in the given disk to avoid slice overlapping.
1570#
1571function cleanup_devices #vdevs
1572{
1573	typeset pool="foopool$$"
1574
1575	if poolexists $pool ; then
1576		destroy_pool $pool
1577	fi
1578
1579	create_pool $pool $@
1580	destroy_pool $pool
1581
1582	return 0
1583}
1584
1585#/**
1586# A function to find and locate free disks on a system or from given
1587# disks as the parameter. It works by locating disks that are in use
1588# as swap devices and dump devices, and also disks listed in /etc/vfstab
1589#
1590# $@ given disks to find which are free, default is all disks in
1591# the test system
1592#
1593# @return a string containing the list of available disks
1594#*/
1595function find_disks
1596{
1597	sfi=/tmp/swaplist.$$
1598	dmpi=/tmp/dumpdev.$$
1599	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1600
1601	swap -l > $sfi
1602	dumpadm > $dmpi 2>/dev/null
1603
1604# write an awk script that can process the output of format
1605# to produce a list of disks we know about. Note that we have
1606# to escape "$2" so that the shell doesn't interpret it while
1607# we're creating the awk script.
1608# -------------------
1609	cat > /tmp/find_disks.awk <<EOF
1610#!/bin/nawk -f
1611	BEGIN { FS="."; }
1612
1613	/^Specify disk/{
1614		searchdisks=0;
1615	}
1616
1617	{
1618		if (searchdisks && \$2 !~ "^$"){
1619			split(\$2,arr," ");
1620			print arr[1];
1621		}
1622	}
1623
1624	/^AVAILABLE DISK SELECTIONS:/{
1625		searchdisks=1;
1626	}
1627EOF
1628#---------------------
1629
1630	chmod 755 /tmp/find_disks.awk
1631	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1632	rm /tmp/find_disks.awk
1633
1634	unused=""
1635	for disk in $disks; do
1636	# Check for mounted
1637		grep "${disk}[sp]" /etc/mnttab >/dev/null
1638		(($? == 0)) && continue
1639	# Check for swap
1640		grep "${disk}[sp]" $sfi >/dev/null
1641		(($? == 0)) && continue
1642	# check for dump device
1643		grep "${disk}[sp]" $dmpi >/dev/null
1644		(($? == 0)) && continue
1645	# check to see if this disk hasn't been explicitly excluded
1646	# by a user-set environment variable
1647		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1648		(($? == 0)) && continue
1649		unused_candidates="$unused_candidates $disk"
1650	done
1651	rm $sfi
1652	rm $dmpi
1653
1654# now just check to see if those disks do actually exist
1655# by looking for a device pointing to the first slice in
1656# each case. limit the number to max_finddisksnum
1657	count=0
1658	for disk in $unused_candidates; do
1659		if [ -b /dev/dsk/${disk}s0 ]; then
1660		if [ $count -lt $max_finddisksnum ]; then
1661			unused="$unused $disk"
1662			# do not impose limit if $@ is provided
1663			[[ -z $@ ]] && ((count = count + 1))
1664		fi
1665		fi
1666	done
1667
1668# finally, return our disk list
1669	echo $unused
1670}
1671
1672#
1673# Add specified user to specified group
1674#
1675# $1 group name
1676# $2 user name
1677# $3 base of the homedir (optional)
1678#
1679function add_user #<group_name> <user_name> <basedir>
1680{
1681	typeset gname=$1
1682	typeset uname=$2
1683	typeset basedir=${3:-"/var/tmp"}
1684
1685	if ((${#gname} == 0 || ${#uname} == 0)); then
1686		log_fail "group name or user name are not defined."
1687	fi
1688
1689	log_must useradd -g $gname -d $basedir/$uname -m $uname
1690
1691	return 0
1692}
1693
1694#
1695# Delete the specified user.
1696#
1697# $1 login name
1698# $2 base of the homedir (optional)
1699#
1700function del_user #<logname> <basedir>
1701{
1702	typeset user=$1
1703	typeset basedir=${2:-"/var/tmp"}
1704
1705	if ((${#user} == 0)); then
1706		log_fail "login name is necessary."
1707	fi
1708
1709	if id $user > /dev/null 2>&1; then
1710		log_must userdel $user
1711	fi
1712
1713	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1714
1715	return 0
1716}
1717
1718#
1719# Select valid gid and create specified group.
1720#
1721# $1 group name
1722#
1723function add_group #<group_name>
1724{
1725	typeset group=$1
1726
1727	if ((${#group} == 0)); then
1728		log_fail "group name is necessary."
1729	fi
1730
1731	# Assign 100 as the base gid
1732	typeset -i gid=100
1733	while true; do
1734		groupadd -g $gid $group > /dev/null 2>&1
1735		typeset -i ret=$?
1736		case $ret in
1737			0) return 0 ;;
1738			# The gid is not  unique
1739			4) ((gid += 1)) ;;
1740			*) return 1 ;;
1741		esac
1742	done
1743}
1744
1745#
1746# Delete the specified group.
1747#
1748# $1 group name
1749#
1750function del_group #<group_name>
1751{
1752	typeset grp=$1
1753	if ((${#grp} == 0)); then
1754		log_fail "group name is necessary."
1755	fi
1756
1757	groupmod -n $grp $grp > /dev/null 2>&1
1758	typeset -i ret=$?
1759	case $ret in
1760		# Group does not exist.
1761		6) return 0 ;;
1762		# Name already exists as a group name
1763		9) log_must groupdel $grp ;;
1764		*) return 1 ;;
1765	esac
1766
1767	return 0
1768}
1769
1770#
1771# This function will return true if it's safe to destroy the pool passed
1772# as argument 1. It checks for pools based on zvols and files, and also
1773# files contained in a pool that may have a different mountpoint.
1774#
1775function safe_to_destroy_pool { # $1 the pool name
1776
1777	typeset pool=""
1778	typeset DONT_DESTROY=""
1779
1780	# We check that by deleting the $1 pool, we're not
1781	# going to pull the rug out from other pools. Do this
1782	# by looking at all other pools, ensuring that they
1783	# aren't built from files or zvols contained in this pool.
1784
1785	for pool in $(zpool list -H -o name)
1786	do
1787		ALTMOUNTPOOL=""
1788
1789		# this is a list of the top-level directories in each of the
1790		# files that make up the path to the files the pool is based on
1791		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1792			awk '{print $1}')
1793
1794		# this is a list of the zvols that make up the pool
1795		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1796		    | awk '{print $1}')
1797
1798		# also want to determine if it's a file-based pool using an
1799		# alternate mountpoint...
1800		POOL_FILE_DIRS=$(zpool status -v $pool | \
1801					grep / | awk '{print $1}' | \
1802					awk -F/ '{print $2}' | grep -v "dev")
1803
1804		for pooldir in $POOL_FILE_DIRS
1805		do
1806			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1807					grep "${pooldir}$" | awk '{print $1}')
1808
1809			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1810		done
1811
1812
1813		if [ ! -z "$ZVOLPOOL" ]
1814		then
1815			DONT_DESTROY="true"
1816			log_note "Pool $pool is built from $ZVOLPOOL on $1"
1817		fi
1818
1819		if [ ! -z "$FILEPOOL" ]
1820		then
1821			DONT_DESTROY="true"
1822			log_note "Pool $pool is built from $FILEPOOL on $1"
1823		fi
1824
1825		if [ ! -z "$ALTMOUNTPOOL" ]
1826		then
1827			DONT_DESTROY="true"
1828			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1829		fi
1830	done
1831
1832	if [ -z "${DONT_DESTROY}" ]
1833	then
1834		return 0
1835	else
1836		log_note "Warning: it is not safe to destroy $1!"
1837		return 1
1838	fi
1839}
1840
1841#
1842# Get the available ZFS compression options
1843# $1 option type zfs_set|zfs_compress
1844#
1845function get_compress_opts
1846{
1847	typeset COMPRESS_OPTS
1848	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
1849			gzip-6 gzip-7 gzip-8 gzip-9"
1850
1851	if [[ $1 == "zfs_compress" ]] ; then
1852		COMPRESS_OPTS="on lzjb"
1853	elif [[ $1 == "zfs_set" ]] ; then
1854		COMPRESS_OPTS="on off lzjb"
1855	fi
1856	typeset valid_opts="$COMPRESS_OPTS"
1857	zfs get 2>&1 | grep gzip >/dev/null 2>&1
1858	if [[ $? -eq 0 ]]; then
1859		valid_opts="$valid_opts $GZIP_OPTS"
1860	fi
1861	echo "$valid_opts"
1862}
1863
1864#
1865# Verify zfs operation with -p option work as expected
1866# $1 operation, value could be create, clone or rename
1867# $2 dataset type, value could be fs or vol
1868# $3 dataset name
1869# $4 new dataset name
1870#
1871function verify_opt_p_ops
1872{
1873	typeset ops=$1
1874	typeset datatype=$2
1875	typeset dataset=$3
1876	typeset newdataset=$4
1877
1878	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
1879		log_fail "$datatype is not supported."
1880	fi
1881
1882	# check parameters accordingly
1883	case $ops in
1884		create)
1885			newdataset=$dataset
1886			dataset=""
1887			if [[ $datatype == "vol" ]]; then
1888				ops="create -V $VOLSIZE"
1889			fi
1890			;;
1891		clone)
1892			if [[ -z $newdataset ]]; then
1893				log_fail "newdataset should not be empty" \
1894					"when ops is $ops."
1895			fi
1896			log_must datasetexists $dataset
1897			log_must snapexists $dataset
1898			;;
1899		rename)
1900			if [[ -z $newdataset ]]; then
1901				log_fail "newdataset should not be empty" \
1902					"when ops is $ops."
1903			fi
1904			log_must datasetexists $dataset
1905			log_mustnot snapexists $dataset
1906			;;
1907		*)
1908			log_fail "$ops is not supported."
1909			;;
1910	esac
1911
1912	# make sure the upper level filesystem does not exist
1913	if datasetexists ${newdataset%/*} ; then
1914		log_must zfs destroy -rRf ${newdataset%/*}
1915	fi
1916
1917	# without -p option, operation will fail
1918	log_mustnot zfs $ops $dataset $newdataset
1919	log_mustnot datasetexists $newdataset ${newdataset%/*}
1920
1921	# with -p option, operation should succeed
1922	log_must zfs $ops -p $dataset $newdataset
1923	if ! datasetexists $newdataset ; then
1924		log_fail "-p option does not work for $ops"
1925	fi
1926
1927	# when $ops is create or clone, redo the operation still return zero
1928	if [[ $ops != "rename" ]]; then
1929		log_must zfs $ops -p $dataset $newdataset
1930	fi
1931
1932	return 0
1933}
1934
1935#
1936# Get configuration of pool
1937# $1 pool name
1938# $2 config name
1939#
1940function get_config
1941{
1942	typeset pool=$1
1943	typeset config=$2
1944	typeset alt_root
1945
1946	if ! poolexists "$pool" ; then
1947		return 1
1948	fi
1949	alt_root=$(zpool list -H $pool | awk '{print $NF}')
1950	if [[ $alt_root == "-" ]]; then
1951		value=$(zdb -C $pool | grep "$config:" | awk -F: \
1952		    '{print $2}')
1953	else
1954		value=$(zdb -e $pool | grep "$config:" | awk -F: \
1955		    '{print $2}')
1956	fi
1957	if [[ -n $value ]] ; then
1958		value=${value#'}
1959		value=${value%'}
1960	fi
1961	echo $value
1962
1963	return 0
1964}
1965
1966#
1967# Privated function. Random select one of items from arguments.
1968#
1969# $1 count
1970# $2-n string
1971#
1972function _random_get
1973{
1974	typeset cnt=$1
1975	shift
1976
1977	typeset str="$@"
1978	typeset -i ind
1979	((ind = RANDOM % cnt + 1))
1980
1981	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
1982	echo $ret
1983}
1984
1985#
1986# Random select one of item from arguments which include NONE string
1987#
1988function random_get_with_non
1989{
1990	typeset -i cnt=$#
1991	((cnt =+ 1))
1992
1993	_random_get "$cnt" "$@"
1994}
1995
1996#
1997# Random select one of item from arguments which doesn't include NONE string
1998#
1999function random_get
2000{
2001	_random_get "$#" "$@"
2002}
2003
2004#
2005# Detect if the current system support slog
2006#
2007function verify_slog_support
2008{
2009	typeset dir=/tmp/disk.$$
2010	typeset pool=foo.$$
2011	typeset vdev=$dir/a
2012	typeset sdev=$dir/b
2013
2014	mkdir -p $dir
2015	mkfile $MINVDEVSIZE $vdev $sdev
2016
2017	typeset -i ret=0
2018	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2019		ret=1
2020	fi
2021	rm -r $dir
2022
2023	return $ret
2024}
2025
2026#
2027# The function will generate a dataset name with specific length
2028# $1, the length of the name
2029# $2, the base string to construct the name
2030#
2031function gen_dataset_name
2032{
2033	typeset -i len=$1
2034	typeset basestr="$2"
2035	typeset -i baselen=${#basestr}
2036	typeset -i iter=0
2037	typeset l_name=""
2038
2039	if ((len % baselen == 0)); then
2040		((iter = len / baselen))
2041	else
2042		((iter = len / baselen + 1))
2043	fi
2044	while ((iter > 0)); do
2045		l_name="${l_name}$basestr"
2046
2047		((iter -= 1))
2048	done
2049
2050	echo $l_name
2051}
2052
2053#
2054# Get cksum tuple of dataset
2055# $1 dataset name
2056#
2057# sample zdb output:
2058# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2059# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2060# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2061# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2062function datasetcksum
2063{
2064	typeset cksum
2065	sync
2066	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2067		| awk -F= '{print $7}')
2068	echo $cksum
2069}
2070
2071#
2072# Get cksum of file
2073# #1 file path
2074#
2075function checksum
2076{
2077	typeset cksum
2078	cksum=$(cksum $1 | awk '{print $1}')
2079	echo $cksum
2080}
2081
2082#
2083# Get the given disk/slice state from the specific field of the pool
2084#
2085function get_device_state #pool disk field("", "spares","logs")
2086{
2087	typeset pool=$1
2088	typeset disk=${2#/dev/dsk/}
2089	typeset field=${3:-$pool}
2090
2091	state=$(zpool status -v "$pool" 2>/dev/null | \
2092		nawk -v device=$disk -v pool=$pool -v field=$field \
2093		'BEGIN {startconfig=0; startfield=0; }
2094		/config:/ {startconfig=1}
2095		(startconfig==1) && ($1==field) {startfield=1; next;}
2096		(startfield==1) && ($1==device) {print $2; exit;}
2097		(startfield==1) &&
2098		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2099	echo $state
2100}
2101
2102
2103#
2104# print the given directory filesystem type
2105#
2106# $1 directory name
2107#
2108function get_fstype
2109{
2110	typeset dir=$1
2111
2112	if [[ -z $dir ]]; then
2113		log_fail "Usage: get_fstype <directory>"
2114	fi
2115
2116	#
2117	#  $ df -n /
2118	#  /		  : ufs
2119	#
2120	df -n $dir | awk '{print $3}'
2121}
2122
2123#
2124# Given a disk, label it to VTOC regardless what label was on the disk
2125# $1 disk
2126#
2127function labelvtoc
2128{
2129	typeset disk=$1
2130	if [[ -z $disk ]]; then
2131		log_fail "The disk name is unspecified."
2132	fi
2133	typeset label_file=/var/tmp/labelvtoc.$$
2134	typeset arch=$(uname -p)
2135
2136	if [[ $arch == "i386" ]]; then
2137		echo "label" > $label_file
2138		echo "0" >> $label_file
2139		echo "" >> $label_file
2140		echo "q" >> $label_file
2141		echo "q" >> $label_file
2142
2143		fdisk -B $disk >/dev/null 2>&1
2144		# wait a while for fdisk finishes
2145		sleep 60
2146	elif [[ $arch == "sparc" ]]; then
2147		echo "label" > $label_file
2148		echo "0" >> $label_file
2149		echo "" >> $label_file
2150		echo "" >> $label_file
2151		echo "" >> $label_file
2152		echo "q" >> $label_file
2153	else
2154		log_fail "unknown arch type"
2155	fi
2156
2157	format -e -s -d $disk -f $label_file
2158	typeset -i ret_val=$?
2159	rm -f $label_file
2160	#
2161	# wait the format to finish
2162	#
2163	sleep 60
2164	if ((ret_val != 0)); then
2165		log_fail "unable to label $disk as VTOC."
2166	fi
2167
2168	return 0
2169}
2170
2171#
2172# check if the system was installed as zfsroot or not
2173# return: 0 ture, otherwise false
2174#
2175function is_zfsroot
2176{
2177	df -n / | grep zfs > /dev/null 2>&1
2178	return $?
2179}
2180
2181#
2182# get the root filesystem name if it's zfsroot system.
2183#
2184# return: root filesystem name
2185function get_rootfs
2186{
2187	typeset rootfs=""
2188	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2189		/etc/mnttab)
2190	if [[ -z "$rootfs" ]]; then
2191		log_fail "Can not get rootfs"
2192	fi
2193	zfs list $rootfs > /dev/null 2>&1
2194	if (($? == 0)); then
2195		echo $rootfs
2196	else
2197		log_fail "This is not a zfsroot system."
2198	fi
2199}
2200
2201#
2202# get the rootfs's pool name
2203# return:
2204#       rootpool name
2205#
2206function get_rootpool
2207{
2208	typeset rootfs=""
2209	typeset rootpool=""
2210	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2211		 /etc/mnttab)
2212	if [[ -z "$rootfs" ]]; then
2213		log_fail "Can not get rootpool"
2214	fi
2215	zfs list $rootfs > /dev/null 2>&1
2216	if (($? == 0)); then
2217		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2218		echo $rootpool
2219	else
2220		log_fail "This is not a zfsroot system."
2221	fi
2222}
2223
2224#
2225# Check if the given device is physical device
2226#
2227function is_physical_device #device
2228{
2229	typeset device=${1#/dev/dsk/}
2230	device=${device#/dev/rdsk/}
2231
2232	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2233	return $?
2234}
2235
2236#
2237# Get the directory path of given device
2238#
2239function get_device_dir #device
2240{
2241	typeset device=$1
2242
2243	if ! $(is_physical_device $device) ; then
2244		if [[ $device != "/" ]]; then
2245			device=${device%/*}
2246		fi
2247		echo $device
2248	else
2249		echo "/dev/dsk"
2250	fi
2251}
2252
2253#
2254# Get the package name
2255#
2256function get_package_name
2257{
2258	typeset dirpath=${1:-$STC_NAME}
2259
2260	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2261}
2262
2263#
2264# Get the word numbers from a string separated by white space
2265#
2266function get_word_count
2267{
2268	echo $1 | wc -w
2269}
2270
2271#
2272# To verify if the require numbers of disks is given
2273#
2274function verify_disk_count
2275{
2276	typeset -i min=${2:-1}
2277
2278	typeset -i count=$(get_word_count "$1")
2279
2280	if ((count < min)); then
2281		log_untested "A minimum of $min disks is required to run." \
2282			" You specified $count disk(s)"
2283	fi
2284}
2285
2286function ds_is_volume
2287{
2288	typeset type=$(get_prop type $1)
2289	[[ $type = "volume" ]] && return 0
2290	return 1
2291}
2292
2293function ds_is_filesystem
2294{
2295	typeset type=$(get_prop type $1)
2296	[[ $type = "filesystem" ]] && return 0
2297	return 1
2298}
2299
2300function ds_is_snapshot
2301{
2302	typeset type=$(get_prop type $1)
2303	[[ $type = "snapshot" ]] && return 0
2304	return 1
2305}
2306
2307#
2308# Check if Trusted Extensions are installed and enabled
2309#
2310function is_te_enabled
2311{
2312	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2313	if (($? != 0)); then
2314		return 1
2315	else
2316		return 0
2317	fi
2318}
2319
2320# Utility function to determine if a system has multiple cpus.
2321function is_mp
2322{
2323	(($(psrinfo | wc -l) > 1))
2324}
2325
2326function get_cpu_freq
2327{
2328	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2329}
2330
2331# Run the given command as the user provided.
2332function user_run
2333{
2334	typeset user=$1
2335	shift
2336
2337	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2338	return $?
2339}
2340
2341#
2342# Check if the pool contains the specified vdevs
2343#
2344# $1 pool
2345# $2..n <vdev> ...
2346#
2347# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2348# vdevs is not in the pool, and 2 if pool name is missing.
2349#
2350function vdevs_in_pool
2351{
2352	typeset pool=$1
2353	typeset vdev
2354
2355        if [[ -z $pool ]]; then
2356                log_note "Missing pool name."
2357                return 2
2358        fi
2359
2360	shift
2361
2362	typeset tmpfile=$(mktemp)
2363	zpool list -Hv "$pool" >$tmpfile
2364	for vdev in $@; do
2365		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2366		[[ $? -ne 0 ]] && return 1
2367	done
2368
2369	rm -f $tmpfile
2370
2371	return 0;
2372}
2373
2374function get_max
2375{
2376	typeset -l i max=$1
2377	shift
2378
2379	for i in "$@"; do
2380		max=$(echo $((max > i ? max : i)))
2381	done
2382
2383	echo $max
2384}
2385
2386function get_min
2387{
2388	typeset -l i min=$1
2389	shift
2390
2391	for i in "$@"; do
2392		min=$(echo $((min < i ? min : i)))
2393	done
2394
2395	echo $min
2396}
2397
2398#
2399# Generate a random number between 1 and the argument.
2400#
2401function random
2402{
2403        typeset max=$1
2404        echo $(( ($RANDOM % $max) + 1 ))
2405}
2406
2407# Write data that can be compressed into a directory
2408function write_compressible
2409{
2410	typeset dir=$1
2411	typeset megs=$2
2412	typeset nfiles=${3:-1}
2413	typeset bs=${4:-1024k}
2414	typeset fname=${5:-file}
2415
2416	[[ -d $dir ]] || log_fail "No directory: $dir"
2417
2418	log_must eval "fio \
2419	    --name=job \
2420	    --fallocate=0 \
2421	    --minimal \
2422	    --randrepeat=0 \
2423	    --buffer_compress_percentage=66 \
2424	    --buffer_compress_chunk=4096 \
2425	    --directory=$dir \
2426	    --numjobs=$nfiles \
2427	    --rw=write \
2428	    --bs=$bs \
2429	    --filesize=$megs \
2430	    --filename_format='$fname.\$jobnum' >/dev/null"
2431}
2432
2433function get_objnum
2434{
2435	typeset pathname=$1
2436	typeset objnum
2437
2438	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2439	objnum=$(stat -c %i $pathname)
2440	echo $objnum
2441}
2442