xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision fa79a855d371dfcb29461ad6ebaf48a458bf9f14)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27# Copyright (c) 2017 Datto Inc.
28#
29
30. ${STF_TOOLS}/contrib/include/logapi.shlib
31
32# Determine whether a dataset is mounted
33#
34# $1 dataset name
35# $2 filesystem type; optional - defaulted to zfs
36#
37# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
38
39function ismounted
40{
41	typeset fstype=$2
42	[[ -z $fstype ]] && fstype=zfs
43	typeset out dir name ret
44
45	case $fstype in
46		zfs)
47			if [[ "$1" == "/"* ]] ; then
48				for out in $(zfs mount | awk '{print $2}'); do
49					[[ $1 == $out ]] && return 0
50				done
51			else
52				for out in $(zfs mount | awk '{print $1}'); do
53					[[ $1 == $out ]] && return 0
54				done
55			fi
56		;;
57		ufs|nfs)
58			out=$(df -F $fstype $1 2>/dev/null)
59			ret=$?
60			(($ret != 0)) && return $ret
61
62			dir=${out%%\(*}
63			dir=${dir%% *}
64			name=${out##*\(}
65			name=${name%%\)*}
66			name=${name%% *}
67
68			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
69		;;
70	esac
71
72	return 1
73}
74
75# Return 0 if a dataset is mounted; 1 otherwise
76#
77# $1 dataset name
78# $2 filesystem type; optional - defaulted to zfs
79
80function mounted
81{
82	ismounted $1 $2
83	(($? == 0)) && return 0
84	return 1
85}
86
87# Return 0 if a dataset is unmounted; 1 otherwise
88#
89# $1 dataset name
90# $2 filesystem type; optional - defaulted to zfs
91
92function unmounted
93{
94	ismounted $1 $2
95	(($? == 1)) && return 0
96	return 1
97}
98
99# split line on ","
100#
101# $1 - line to split
102
103function splitline
104{
105	echo $1 | sed "s/,/ /g"
106}
107
108function default_setup
109{
110	default_setup_noexit "$@"
111
112	log_pass
113}
114
115#
116# Given a list of disks, setup storage pools and datasets.
117#
118function default_setup_noexit
119{
120	typeset disklist=$1
121	typeset container=$2
122	typeset volume=$3
123
124	if is_global_zone; then
125		if poolexists $TESTPOOL ; then
126			destroy_pool $TESTPOOL
127		fi
128		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
129		log_must zpool create -f $TESTPOOL $disklist
130	else
131		reexport_pool
132	fi
133
134	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
135	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
136
137	log_must zfs create $TESTPOOL/$TESTFS
138	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
139
140	if [[ -n $container ]]; then
141		rm -rf $TESTDIR1  || \
142			log_unresolved Could not remove $TESTDIR1
143		mkdir -p $TESTDIR1 || \
144			log_unresolved Could not create $TESTDIR1
145
146		log_must zfs create $TESTPOOL/$TESTCTR
147		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
148		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
149		log_must zfs set mountpoint=$TESTDIR1 \
150		    $TESTPOOL/$TESTCTR/$TESTFS1
151	fi
152
153	if [[ -n $volume ]]; then
154		if is_global_zone ; then
155			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
156		else
157			log_must zfs create $TESTPOOL/$TESTVOL
158		fi
159	fi
160}
161
162#
163# Given a list of disks, setup a storage pool, file system and
164# a container.
165#
166function default_container_setup
167{
168	typeset disklist=$1
169
170	default_setup "$disklist" "true"
171}
172
173#
174# Given a list of disks, setup a storage pool,file system
175# and a volume.
176#
177function default_volume_setup
178{
179	typeset disklist=$1
180
181	default_setup "$disklist" "" "true"
182}
183
184#
185# Given a list of disks, setup a storage pool,file system,
186# a container and a volume.
187#
188function default_container_volume_setup
189{
190	typeset disklist=$1
191
192	default_setup "$disklist" "true" "true"
193}
194
195#
196# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
197# filesystem
198#
199# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
200# $2 snapshot name. Default, $TESTSNAP
201#
202function create_snapshot
203{
204	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
205	typeset snap=${2:-$TESTSNAP}
206
207	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
208	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
209
210	if snapexists $fs_vol@$snap; then
211		log_fail "$fs_vol@$snap already exists."
212	fi
213	datasetexists $fs_vol || \
214		log_fail "$fs_vol must exist."
215
216	log_must zfs snapshot $fs_vol@$snap
217}
218
219#
220# Create a clone from a snapshot, default clone name is $TESTCLONE.
221#
222# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
223# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
224#
225function create_clone   # snapshot clone
226{
227	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
228	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
229
230	[[ -z $snap ]] && \
231		log_fail "Snapshot name is undefined."
232	[[ -z $clone ]] && \
233		log_fail "Clone name is undefined."
234
235	log_must zfs clone $snap $clone
236}
237
238#
239# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
240# filesystem.
241#
242# $1 Existing filesystem or volume name. Default, $TESTFS
243# $2 Existing snapshot name. Default, $TESTSNAP
244# $3 bookmark name. Default, $TESTBKMARK
245#
246function create_bookmark
247{
248	typeset fs_vol=${1:-$TESTFS}
249	typeset snap=${2:-$TESTSNAP}
250	typeset bkmark=${3:-$TESTBKMARK}
251
252	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
253	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
254	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
255
256	if bkmarkexists $fs_vol#$bkmark; then
257		log_fail "$fs_vol#$bkmark already exists."
258	fi
259	datasetexists $fs_vol || \
260		log_fail "$fs_vol must exist."
261	snapexists $fs_vol@$snap || \
262		log_fail "$fs_vol@$snap must exist."
263
264	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
265}
266
267#
268# Create a temporary clone result of an interrupted resumable 'zfs receive'
269# $1 Destination filesystem name. Must not exist, will be created as the result
270#    of this function along with its %recv temporary clone
271# $2 Source filesystem name. Must not exist, will be created and destroyed
272#
273function create_recv_clone
274{
275	typeset recvfs="$1"
276	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
277	typeset snap="$sendfs@snap1"
278	typeset incr="$sendfs@snap2"
279	typeset mountpoint="$TESTDIR/create_recv_clone"
280	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
281
282	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
283
284	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
285	datasetexists $sendfs && log_fail "Send filesystem must not exist."
286
287	log_must zfs create -o mountpoint="$mountpoint" $sendfs
288	log_must zfs snapshot $snap
289	log_must eval "zfs send $snap | zfs recv -u $recvfs"
290	log_must mkfile 1m "$mountpoint/data"
291	log_must zfs snapshot $incr
292	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
293	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
294	log_must zfs destroy -r $sendfs
295	log_must rm -f "$sendfile"
296
297	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
298		log_fail "Error creating temporary $recvfs/%recv clone"
299	fi
300}
301
302function default_mirror_setup
303{
304	default_mirror_setup_noexit $1 $2 $3
305
306	log_pass
307}
308
309#
310# Given a pair of disks, set up a storage pool and dataset for the mirror
311# @parameters: $1 the primary side of the mirror
312#   $2 the secondary side of the mirror
313# @uses: ZPOOL ZFS TESTPOOL TESTFS
314function default_mirror_setup_noexit
315{
316	readonly func="default_mirror_setup_noexit"
317	typeset primary=$1
318	typeset secondary=$2
319
320	[[ -z $primary ]] && \
321		log_fail "$func: No parameters passed"
322	[[ -z $secondary ]] && \
323		log_fail "$func: No secondary partition passed"
324	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
325	log_must zpool create -f $TESTPOOL mirror $@
326	log_must zfs create $TESTPOOL/$TESTFS
327	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
328}
329
330#
331# create a number of mirrors.
332# We create a number($1) of 2 way mirrors using the pairs of disks named
333# on the command line. These mirrors are *not* mounted
334# @parameters: $1 the number of mirrors to create
335#  $... the devices to use to create the mirrors on
336# @uses: ZPOOL ZFS TESTPOOL
337function setup_mirrors
338{
339	typeset -i nmirrors=$1
340
341	shift
342	while ((nmirrors > 0)); do
343		log_must test -n "$1" -a -n "$2"
344		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
345		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
346		shift 2
347		((nmirrors = nmirrors - 1))
348	done
349}
350
351#
352# create a number of raidz pools.
353# We create a number($1) of 2 raidz pools  using the pairs of disks named
354# on the command line. These pools are *not* mounted
355# @parameters: $1 the number of pools to create
356#  $... the devices to use to create the pools on
357# @uses: ZPOOL ZFS TESTPOOL
358function setup_raidzs
359{
360	typeset -i nraidzs=$1
361
362	shift
363	while ((nraidzs > 0)); do
364		log_must test -n "$1" -a -n "$2"
365		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
366		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
367		shift 2
368		((nraidzs = nraidzs - 1))
369	done
370}
371
372#
373# Destroy the configured testpool mirrors.
374# the mirrors are of the form ${TESTPOOL}{number}
375# @uses: ZPOOL ZFS TESTPOOL
376function destroy_mirrors
377{
378	default_cleanup_noexit
379
380	log_pass
381}
382
383#
384# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
385# $1 the list of disks
386#
387function default_raidz_setup
388{
389	typeset disklist="$*"
390	disks=(${disklist[*]})
391
392	if [[ ${#disks[*]} -lt 2 ]]; then
393		log_fail "A raid-z requires a minimum of two disks."
394	fi
395
396	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
397	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
398	log_must zfs create $TESTPOOL/$TESTFS
399	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
400
401	log_pass
402}
403
404#
405# Common function used to cleanup storage pools and datasets.
406#
407# Invoked at the start of the test suite to ensure the system
408# is in a known state, and also at the end of each set of
409# sub-tests to ensure errors from one set of tests doesn't
410# impact the execution of the next set.
411
412function default_cleanup
413{
414	default_cleanup_noexit
415
416	log_pass
417}
418
419function default_cleanup_noexit
420{
421	typeset exclude=""
422	typeset pool=""
423	#
424	# Destroying the pool will also destroy any
425	# filesystems it contains.
426	#
427	if is_global_zone; then
428		zfs unmount -a > /dev/null 2>&1
429		exclude=`eval echo \"'(${KEEP})'\"`
430		ALL_POOLS=$(zpool list -H -o name \
431		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
432		# Here, we loop through the pools we're allowed to
433		# destroy, only destroying them if it's safe to do
434		# so.
435		while [ ! -z ${ALL_POOLS} ]
436		do
437			for pool in ${ALL_POOLS}
438			do
439				if safe_to_destroy_pool $pool ;
440				then
441					destroy_pool $pool
442				fi
443				ALL_POOLS=$(zpool list -H -o name \
444				    | grep -v "$NO_POOLS" \
445				    | egrep -v "$exclude")
446			done
447		done
448
449		zfs mount -a
450	else
451		typeset fs=""
452		for fs in $(zfs list -H -o name \
453		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
454			datasetexists $fs && \
455				log_must zfs destroy -Rf $fs
456		done
457
458		# Need cleanup here to avoid garbage dir left.
459		for fs in $(zfs list -H -o name); do
460			[[ $fs == /$ZONE_POOL ]] && continue
461			[[ -d $fs ]] && log_must rm -rf $fs/*
462		done
463
464		#
465		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
466		# the default value
467		#
468		for fs in $(zfs list -H -o name); do
469			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
470				log_must zfs set reservation=none $fs
471				log_must zfs set recordsize=128K $fs
472				log_must zfs set mountpoint=/$fs $fs
473				typeset enc=""
474				enc=$(get_prop encryption $fs)
475				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
476					[[ "$enc" == "off" ]]; then
477					log_must zfs set checksum=on $fs
478				fi
479				log_must zfs set compression=off $fs
480				log_must zfs set atime=on $fs
481				log_must zfs set devices=off $fs
482				log_must zfs set exec=on $fs
483				log_must zfs set setuid=on $fs
484				log_must zfs set readonly=off $fs
485				log_must zfs set snapdir=hidden $fs
486				log_must zfs set aclmode=groupmask $fs
487				log_must zfs set aclinherit=secure $fs
488			fi
489		done
490	fi
491
492	[[ -d $TESTDIR ]] && \
493		log_must rm -rf $TESTDIR
494}
495
496
497#
498# Common function used to cleanup storage pools, file systems
499# and containers.
500#
501function default_container_cleanup
502{
503	if ! is_global_zone; then
504		reexport_pool
505	fi
506
507	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
508	[[ $? -eq 0 ]] && \
509	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
510
511	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
512	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
513
514	datasetexists $TESTPOOL/$TESTCTR && \
515	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
516
517	[[ -e $TESTDIR1 ]] && \
518	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
519
520	default_cleanup
521}
522
523#
524# Common function used to cleanup snapshot of file system or volume. Default to
525# delete the file system's snapshot
526#
527# $1 snapshot name
528#
529function destroy_snapshot
530{
531	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
532
533	if ! snapexists $snap; then
534		log_fail "'$snap' does not existed."
535	fi
536
537	#
538	# For the sake of the value which come from 'get_prop' is not equal
539	# to the really mountpoint when the snapshot is unmounted. So, firstly
540	# check and make sure this snapshot's been mounted in current system.
541	#
542	typeset mtpt=""
543	if ismounted $snap; then
544		mtpt=$(get_prop mountpoint $snap)
545		(($? != 0)) && \
546			log_fail "get_prop mountpoint $snap failed."
547	fi
548
549	log_must zfs destroy $snap
550	[[ $mtpt != "" && -d $mtpt ]] && \
551		log_must rm -rf $mtpt
552}
553
554#
555# Common function used to cleanup clone.
556#
557# $1 clone name
558#
559function destroy_clone
560{
561	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
562
563	if ! datasetexists $clone; then
564		log_fail "'$clone' does not existed."
565	fi
566
567	# With the same reason in destroy_snapshot
568	typeset mtpt=""
569	if ismounted $clone; then
570		mtpt=$(get_prop mountpoint $clone)
571		(($? != 0)) && \
572			log_fail "get_prop mountpoint $clone failed."
573	fi
574
575	log_must zfs destroy $clone
576	[[ $mtpt != "" && -d $mtpt ]] && \
577		log_must rm -rf $mtpt
578}
579
580#
581# Common function used to cleanup bookmark of file system or volume.  Default
582# to delete the file system's bookmark.
583#
584# $1 bookmark name
585#
586function destroy_bookmark
587{
588	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
589
590	if ! bkmarkexists $bkmark; then
591		log_fail "'$bkmarkp' does not existed."
592	fi
593
594	log_must zfs destroy $bkmark
595}
596
597# Return 0 if a snapshot exists; $? otherwise
598#
599# $1 - snapshot name
600
601function snapexists
602{
603	zfs list -H -t snapshot "$1" > /dev/null 2>&1
604	return $?
605}
606
607#
608# Return 0 if a bookmark exists; $? otherwise
609#
610# $1 - bookmark name
611#
612function bkmarkexists
613{
614	zfs list -H -t bookmark "$1" > /dev/null 2>&1
615	return $?
616}
617
618#
619# Set a property to a certain value on a dataset.
620# Sets a property of the dataset to the value as passed in.
621# @param:
622#	$1 dataset who's property is being set
623#	$2 property to set
624#	$3 value to set property to
625# @return:
626#	0 if the property could be set.
627#	non-zero otherwise.
628# @use: ZFS
629#
630function dataset_setprop
631{
632	typeset fn=dataset_setprop
633
634	if (($# < 3)); then
635		log_note "$fn: Insufficient parameters (need 3, had $#)"
636		return 1
637	fi
638	typeset output=
639	output=$(zfs set $2=$3 $1 2>&1)
640	typeset rv=$?
641	if ((rv != 0)); then
642		log_note "Setting property on $1 failed."
643		log_note "property $2=$3"
644		log_note "Return Code: $rv"
645		log_note "Output: $output"
646		return $rv
647	fi
648	return 0
649}
650
651#
652# Assign suite defined dataset properties.
653# This function is used to apply the suite's defined default set of
654# properties to a dataset.
655# @parameters: $1 dataset to use
656# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
657# @returns:
658#   0 if the dataset has been altered.
659#   1 if no pool name was passed in.
660#   2 if the dataset could not be found.
661#   3 if the dataset could not have it's properties set.
662#
663function dataset_set_defaultproperties
664{
665	typeset dataset="$1"
666
667	[[ -z $dataset ]] && return 1
668
669	typeset confset=
670	typeset -i found=0
671	for confset in $(zfs list); do
672		if [[ $dataset = $confset ]]; then
673			found=1
674			break
675		fi
676	done
677	[[ $found -eq 0 ]] && return 2
678	if [[ -n $COMPRESSION_PROP ]]; then
679		dataset_setprop $dataset compression $COMPRESSION_PROP || \
680			return 3
681		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
682	fi
683	if [[ -n $CHECKSUM_PROP ]]; then
684		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
685			return 3
686		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
687	fi
688	return 0
689}
690
691#
692# Check a numeric assertion
693# @parameter: $@ the assertion to check
694# @output: big loud notice if assertion failed
695# @use: log_fail
696#
697function assert
698{
699	(($@)) || log_fail "$@"
700}
701
702#
703# Function to format partition size of a disk
704# Given a disk cxtxdx reduces all partitions
705# to 0 size
706#
707function zero_partitions #<whole_disk_name>
708{
709	typeset diskname=$1
710	typeset i
711
712	for i in 0 1 3 4 5 6 7
713	do
714		set_partition $i "" 0mb $diskname
715	done
716}
717
718#
719# Given a slice, size and disk, this function
720# formats the slice to the specified size.
721# Size should be specified with units as per
722# the `format` command requirements eg. 100mb 3gb
723#
724function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
725{
726	typeset -i slicenum=$1
727	typeset start=$2
728	typeset size=$3
729	typeset disk=$4
730	[[ -z $slicenum || -z $size || -z $disk ]] && \
731	    log_fail "The slice, size or disk name is unspecified."
732	typeset format_file=/var/tmp/format_in.$$
733
734	echo "partition" >$format_file
735	echo "$slicenum" >> $format_file
736	echo "" >> $format_file
737	echo "" >> $format_file
738	echo "$start" >> $format_file
739	echo "$size" >> $format_file
740	echo "label" >> $format_file
741	echo "" >> $format_file
742	echo "q" >> $format_file
743	echo "q" >> $format_file
744
745	format -e -s -d $disk -f $format_file
746	typeset ret_val=$?
747	rm -f $format_file
748	[[ $ret_val -ne 0 ]] && \
749	    log_fail "Unable to format $disk slice $slicenum to $size"
750	return 0
751}
752
753#
754# Get the end cyl of the given slice
755#
756function get_endslice #<disk> <slice>
757{
758	typeset disk=$1
759	typeset slice=$2
760	if [[ -z $disk || -z $slice ]] ; then
761		log_fail "The disk name or slice number is unspecified."
762	fi
763
764	disk=${disk#/dev/dsk/}
765	disk=${disk#/dev/rdsk/}
766	disk=${disk%s*}
767
768	typeset -i ratio=0
769	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
770		grep "sectors\/cylinder" | \
771		awk '{print $2}')
772
773	if ((ratio == 0)); then
774		return
775	fi
776
777	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
778		nawk -v token="$slice" '{if ($1==token) print $6}')
779
780	((endcyl = (endcyl + 1) / ratio))
781	echo $endcyl
782}
783
784
785#
786# Given a size,disk and total slice number,  this function formats the
787# disk slices from 0 to the total slice number with the same specified
788# size.
789#
790function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
791{
792	typeset -i i=0
793	typeset slice_size=$1
794	typeset disk_name=$2
795	typeset total_slices=$3
796	typeset cyl
797
798	zero_partitions $disk_name
799	while ((i < $total_slices)); do
800		if ((i == 2)); then
801			((i = i + 1))
802			continue
803		fi
804		set_partition $i "$cyl" $slice_size $disk_name
805		cyl=$(get_endslice $disk_name $i)
806		((i = i+1))
807	done
808}
809
810#
811# This function continues to write to a filenum number of files into dirnum
812# number of directories until either file_write returns an error or the
813# maximum number of files per directory have been written.
814#
815# Usage:
816# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
817#
818# Return value: 0 on success
819#		non 0 on error
820#
821# Where :
822#	destdir:    is the directory where everything is to be created under
823#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
824#	filenum:    the maximum number of files per subdirectory
825#	bytes:	    number of bytes to write
826#	num_writes: numer of types to write out bytes
827#	data:	    the data that will be writen
828#
829#	E.g.
830#	file_fs /testdir 20 25 1024 256 0
831#
832# Note: bytes * num_writes equals the size of the testfile
833#
834function fill_fs # destdir dirnum filenum bytes num_writes data
835{
836	typeset destdir=${1:-$TESTDIR}
837	typeset -i dirnum=${2:-50}
838	typeset -i filenum=${3:-50}
839	typeset -i bytes=${4:-8192}
840	typeset -i num_writes=${5:-10240}
841	typeset -i data=${6:-0}
842
843	typeset -i odirnum=1
844	typeset -i idirnum=0
845	typeset -i fn=0
846	typeset -i retval=0
847
848	log_must mkdir -p $destdir/$idirnum
849	while (($odirnum > 0)); do
850		if ((dirnum >= 0 && idirnum >= dirnum)); then
851			odirnum=0
852			break
853		fi
854		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
855		    -b $bytes -c $num_writes -d $data
856		retval=$?
857		if (($retval != 0)); then
858			odirnum=0
859			break
860		fi
861		if (($fn >= $filenum)); then
862			fn=0
863			((idirnum = idirnum + 1))
864			log_must mkdir -p $destdir/$idirnum
865		else
866			((fn = fn + 1))
867		fi
868	done
869	return $retval
870}
871
872#
873# Simple function to get the specified property. If unable to
874# get the property then exits.
875#
876# Note property is in 'parsable' format (-p)
877#
878function get_prop # property dataset
879{
880	typeset prop_val
881	typeset prop=$1
882	typeset dataset=$2
883
884	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
885	if [[ $? -ne 0 ]]; then
886		log_note "Unable to get $prop property for dataset " \
887		"$dataset"
888		return 1
889	fi
890
891	echo "$prop_val"
892	return 0
893}
894
895#
896# Simple function to get the specified property of pool. If unable to
897# get the property then exits.
898#
899function get_pool_prop # property pool
900{
901	typeset prop_val
902	typeset prop=$1
903	typeset pool=$2
904
905	if poolexists $pool ; then
906		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
907			awk '{print $3}')
908		if [[ $? -ne 0 ]]; then
909			log_note "Unable to get $prop property for pool " \
910			"$pool"
911			return 1
912		fi
913	else
914		log_note "Pool $pool not exists."
915		return 1
916	fi
917
918	echo $prop_val
919	return 0
920}
921
922# Return 0 if a pool exists; $? otherwise
923#
924# $1 - pool name
925
926function poolexists
927{
928	typeset pool=$1
929
930	if [[ -z $pool ]]; then
931		log_note "No pool name given."
932		return 1
933	fi
934
935	zpool get name "$pool" > /dev/null 2>&1
936	return $?
937}
938
939# Return 0 if all the specified datasets exist; $? otherwise
940#
941# $1-n  dataset name
942function datasetexists
943{
944	if (($# == 0)); then
945		log_note "No dataset name given."
946		return 1
947	fi
948
949	while (($# > 0)); do
950		zfs get name $1 > /dev/null 2>&1 || \
951			return $?
952		shift
953	done
954
955	return 0
956}
957
958# return 0 if none of the specified datasets exists, otherwise return 1.
959#
960# $1-n  dataset name
961function datasetnonexists
962{
963	if (($# == 0)); then
964		log_note "No dataset name given."
965		return 1
966	fi
967
968	while (($# > 0)); do
969		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
970		    && return 1
971		shift
972	done
973
974	return 0
975}
976
977#
978# Given a mountpoint, or a dataset name, determine if it is shared.
979#
980# Returns 0 if shared, 1 otherwise.
981#
982function is_shared
983{
984	typeset fs=$1
985	typeset mtpt
986
987	if [[ $fs != "/"* ]] ; then
988		if datasetnonexists "$fs" ; then
989			return 1
990		else
991			mtpt=$(get_prop mountpoint "$fs")
992			case $mtpt in
993				none|legacy|-) return 1
994					;;
995				*)	fs=$mtpt
996					;;
997			esac
998		fi
999	fi
1000
1001	for mtpt in `share | awk '{print $2}'` ; do
1002		if [[ $mtpt == $fs ]] ; then
1003			return 0
1004		fi
1005	done
1006
1007	typeset stat=$(svcs -H -o STA nfs/server:default)
1008	if [[ $stat != "ON" ]]; then
1009		log_note "Current nfs/server status: $stat"
1010	fi
1011
1012	return 1
1013}
1014
1015#
1016# Given a mountpoint, determine if it is not shared.
1017#
1018# Returns 0 if not shared, 1 otherwise.
1019#
1020function not_shared
1021{
1022	typeset fs=$1
1023
1024	is_shared $fs
1025	if (($? == 0)); then
1026		return 1
1027	fi
1028
1029	return 0
1030}
1031
1032#
1033# Helper function to unshare a mountpoint.
1034#
1035function unshare_fs #fs
1036{
1037	typeset fs=$1
1038
1039	is_shared $fs
1040	if (($? == 0)); then
1041		log_must zfs unshare $fs
1042	fi
1043
1044	return 0
1045}
1046
1047#
1048# Check NFS server status and trigger it online.
1049#
1050function setup_nfs_server
1051{
1052	# Cannot share directory in non-global zone.
1053	#
1054	if ! is_global_zone; then
1055		log_note "Cannot trigger NFS server by sharing in LZ."
1056		return
1057	fi
1058
1059	typeset nfs_fmri="svc:/network/nfs/server:default"
1060	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1061		#
1062		# Only really sharing operation can enable NFS server
1063		# to online permanently.
1064		#
1065		typeset dummy=/tmp/dummy
1066
1067		if [[ -d $dummy ]]; then
1068			log_must rm -rf $dummy
1069		fi
1070
1071		log_must mkdir $dummy
1072		log_must share $dummy
1073
1074		#
1075		# Waiting for fmri's status to be the final status.
1076		# Otherwise, in transition, an asterisk (*) is appended for
1077		# instances, unshare will reverse status to 'DIS' again.
1078		#
1079		# Waiting for 1's at least.
1080		#
1081		log_must sleep 1
1082		timeout=10
1083		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1084		do
1085			log_must sleep 1
1086
1087			((timeout -= 1))
1088		done
1089
1090		log_must unshare $dummy
1091		log_must rm -rf $dummy
1092	fi
1093
1094	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1095}
1096
1097#
1098# To verify whether calling process is in global zone
1099#
1100# Return 0 if in global zone, 1 in non-global zone
1101#
1102function is_global_zone
1103{
1104	typeset cur_zone=$(zonename 2>/dev/null)
1105	if [[ $cur_zone != "global" ]]; then
1106		return 1
1107	fi
1108	return 0
1109}
1110
1111#
1112# Verify whether test is permitted to run from
1113# global zone, local zone, or both
1114#
1115# $1 zone limit, could be "global", "local", or "both"(no limit)
1116#
1117# Return 0 if permitted, otherwise exit with log_unsupported
1118#
1119function verify_runnable # zone limit
1120{
1121	typeset limit=$1
1122
1123	[[ -z $limit ]] && return 0
1124
1125	if is_global_zone ; then
1126		case $limit in
1127			global|both)
1128				;;
1129			local)	log_unsupported "Test is unable to run from "\
1130					"global zone."
1131				;;
1132			*)	log_note "Warning: unknown limit $limit - " \
1133					"use both."
1134				;;
1135		esac
1136	else
1137		case $limit in
1138			local|both)
1139				;;
1140			global)	log_unsupported "Test is unable to run from "\
1141					"local zone."
1142				;;
1143			*)	log_note "Warning: unknown limit $limit - " \
1144					"use both."
1145				;;
1146		esac
1147
1148		reexport_pool
1149	fi
1150
1151	return 0
1152}
1153
1154# Return 0 if create successfully or the pool exists; $? otherwise
1155# Note: In local zones, this function should return 0 silently.
1156#
1157# $1 - pool name
1158# $2-n - [keyword] devs_list
1159
1160function create_pool #pool devs_list
1161{
1162	typeset pool=${1%%/*}
1163
1164	shift
1165
1166	if [[ -z $pool ]]; then
1167		log_note "Missing pool name."
1168		return 1
1169	fi
1170
1171	if poolexists $pool ; then
1172		destroy_pool $pool
1173	fi
1174
1175	if is_global_zone ; then
1176		[[ -d /$pool ]] && rm -rf /$pool
1177		log_must zpool create -f $pool $@
1178	fi
1179
1180	return 0
1181}
1182
1183# Return 0 if destroy successfully or the pool exists; $? otherwise
1184# Note: In local zones, this function should return 0 silently.
1185#
1186# $1 - pool name
1187# Destroy pool with the given parameters.
1188
1189function destroy_pool #pool
1190{
1191	typeset pool=${1%%/*}
1192	typeset mtpt
1193
1194	if [[ -z $pool ]]; then
1195		log_note "No pool name given."
1196		return 1
1197	fi
1198
1199	if is_global_zone ; then
1200		if poolexists "$pool" ; then
1201			mtpt=$(get_prop mountpoint "$pool")
1202
1203			# At times, syseventd activity can cause attempts to
1204			# destroy a pool to fail with EBUSY. We retry a few
1205			# times allowing failures before requiring the destroy
1206			# to succeed.
1207			typeset -i wait_time=10 ret=1 count=0
1208			must=""
1209			while [[ $ret -ne 0 ]]; do
1210				$must zpool destroy -f $pool
1211				ret=$?
1212				[[ $ret -eq 0 ]] && break
1213				log_note "zpool destroy failed with $ret"
1214				[[ count++ -ge 7 ]] && must=log_must
1215				sleep $wait_time
1216			done
1217
1218			[[ -d $mtpt ]] && \
1219				log_must rm -rf $mtpt
1220		else
1221			log_note "Pool does not exist. ($pool)"
1222			return 1
1223		fi
1224	fi
1225
1226	return 0
1227}
1228
1229# Return 0 if created successfully; $? otherwise
1230#
1231# $1 - dataset name
1232# $2-n - dataset options
1233
1234function create_dataset #dataset dataset_options
1235{
1236	typeset dataset=$1
1237
1238	shift
1239
1240	if [[ -z $dataset ]]; then
1241		log_note "Missing dataset name."
1242		return 1
1243	fi
1244
1245	if datasetexists $dataset ; then
1246		destroy_dataset $dataset
1247	fi
1248
1249	log_must zfs create $@ $dataset
1250
1251	return 0
1252}
1253
1254# Return 0 if destroy successfully or the dataset exists; $? otherwise
1255# Note: In local zones, this function should return 0 silently.
1256#
1257# $1 - dataset name
1258
1259function destroy_dataset #dataset
1260{
1261	typeset dataset=$1
1262	typeset mtpt
1263
1264	if [[ -z $dataset ]]; then
1265		log_note "No dataset name given."
1266		return 1
1267	fi
1268
1269	if datasetexists "$dataset" ; then
1270		mtpt=$(get_prop mountpoint "$dataset")
1271		log_must zfs destroy -r $dataset
1272		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1273	else
1274		log_note "Dataset does not exist. ($dataset)"
1275		return 1
1276	fi
1277
1278	return 0
1279}
1280
1281#
1282# Firstly, create a pool with 5 datasets. Then, create a single zone and
1283# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1284# and a zvol device to the zone.
1285#
1286# $1 zone name
1287# $2 zone root directory prefix
1288# $3 zone ip
1289#
1290function zfs_zones_setup #zone_name zone_root zone_ip
1291{
1292	typeset zone_name=${1:-$(hostname)-z}
1293	typeset zone_root=${2:-"/zone_root"}
1294	typeset zone_ip=${3:-"10.1.1.10"}
1295	typeset prefix_ctr=$ZONE_CTR
1296	typeset pool_name=$ZONE_POOL
1297	typeset -i cntctr=5
1298	typeset -i i=0
1299
1300	# Create pool and 5 container within it
1301	#
1302	[[ -d /$pool_name ]] && rm -rf /$pool_name
1303	log_must zpool create -f $pool_name $DISKS
1304	while ((i < cntctr)); do
1305		log_must zfs create $pool_name/$prefix_ctr$i
1306		((i += 1))
1307	done
1308
1309	# create a zvol
1310	log_must zfs create -V 1g $pool_name/zone_zvol
1311
1312	#
1313	# If current system support slog, add slog device for pool
1314	#
1315	if verify_slog_support ; then
1316		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1317		log_must mkfile $MINVDEVSIZE $sdevs
1318		log_must zpool add $pool_name log mirror $sdevs
1319	fi
1320
1321	# this isn't supported just yet.
1322	# Create a filesystem. In order to add this to
1323	# the zone, it must have it's mountpoint set to 'legacy'
1324	# log_must zfs create $pool_name/zfs_filesystem
1325	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1326
1327	[[ -d $zone_root ]] && \
1328		log_must rm -rf $zone_root/$zone_name
1329	[[ ! -d $zone_root ]] && \
1330		log_must mkdir -p -m 0700 $zone_root/$zone_name
1331
1332	# Create zone configure file and configure the zone
1333	#
1334	typeset zone_conf=/tmp/zone_conf.$$
1335	echo "create" > $zone_conf
1336	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1337	echo "set autoboot=true" >> $zone_conf
1338	i=0
1339	while ((i < cntctr)); do
1340		echo "add dataset" >> $zone_conf
1341		echo "set name=$pool_name/$prefix_ctr$i" >> \
1342			$zone_conf
1343		echo "end" >> $zone_conf
1344		((i += 1))
1345	done
1346
1347	# add our zvol to the zone
1348	echo "add device" >> $zone_conf
1349	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1350	echo "end" >> $zone_conf
1351
1352	# add a corresponding zvol rdsk to the zone
1353	echo "add device" >> $zone_conf
1354	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1355	echo "end" >> $zone_conf
1356
1357	# once it's supported, we'll add our filesystem to the zone
1358	# echo "add fs" >> $zone_conf
1359	# echo "set type=zfs" >> $zone_conf
1360	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1361	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1362	# echo "end" >> $zone_conf
1363
1364	echo "verify" >> $zone_conf
1365	echo "commit" >> $zone_conf
1366	log_must zonecfg -z $zone_name -f $zone_conf
1367	log_must rm -f $zone_conf
1368
1369	# Install the zone
1370	zoneadm -z $zone_name install
1371	if (($? == 0)); then
1372		log_note "SUCCESS: zoneadm -z $zone_name install"
1373	else
1374		log_fail "FAIL: zoneadm -z $zone_name install"
1375	fi
1376
1377	# Install sysidcfg file
1378	#
1379	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1380	echo "system_locale=C" > $sysidcfg
1381	echo  "terminal=dtterm" >> $sysidcfg
1382	echo  "network_interface=primary {" >> $sysidcfg
1383	echo  "hostname=$zone_name" >> $sysidcfg
1384	echo  "}" >> $sysidcfg
1385	echo  "name_service=NONE" >> $sysidcfg
1386	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1387	echo  "security_policy=NONE" >> $sysidcfg
1388	echo  "timezone=US/Eastern" >> $sysidcfg
1389
1390	# Boot this zone
1391	log_must zoneadm -z $zone_name boot
1392}
1393
1394#
1395# Reexport TESTPOOL & TESTPOOL(1-4)
1396#
1397function reexport_pool
1398{
1399	typeset -i cntctr=5
1400	typeset -i i=0
1401
1402	while ((i < cntctr)); do
1403		if ((i == 0)); then
1404			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1405			if ! ismounted $TESTPOOL; then
1406				log_must zfs mount $TESTPOOL
1407			fi
1408		else
1409			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1410			if eval ! ismounted \$TESTPOOL$i; then
1411				log_must eval zfs mount \$TESTPOOL$i
1412			fi
1413		fi
1414		((i += 1))
1415	done
1416}
1417
1418#
1419# Verify a given disk is online or offline
1420#
1421# Return 0 is pool/disk matches expected state, 1 otherwise
1422#
1423function check_state # pool disk state{online,offline}
1424{
1425	typeset pool=$1
1426	typeset disk=${2#/dev/dsk/}
1427	typeset state=$3
1428
1429	zpool status -v $pool | grep "$disk"  \
1430	    | grep -i "$state" > /dev/null 2>&1
1431
1432	return $?
1433}
1434
1435#
1436# Get the mountpoint of snapshot
1437# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1438# as its mountpoint
1439#
1440function snapshot_mountpoint
1441{
1442	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1443
1444	if [[ $dataset != *@* ]]; then
1445		log_fail "Error name of snapshot '$dataset'."
1446	fi
1447
1448	typeset fs=${dataset%@*}
1449	typeset snap=${dataset#*@}
1450
1451	if [[ -z $fs || -z $snap ]]; then
1452		log_fail "Error name of snapshot '$dataset'."
1453	fi
1454
1455	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1456}
1457
1458#
1459# Given a pool and file system, this function will verify the file system
1460# using the zdb internal tool. Note that the pool is exported and imported
1461# to ensure it has consistent state.
1462#
1463function verify_filesys # pool filesystem dir
1464{
1465	typeset pool="$1"
1466	typeset filesys="$2"
1467	typeset zdbout="/tmp/zdbout.$$"
1468
1469	shift
1470	shift
1471	typeset dirs=$@
1472	typeset search_path=""
1473
1474	log_note "Calling zdb to verify filesystem '$filesys'"
1475	zfs unmount -a > /dev/null 2>&1
1476	log_must zpool export $pool
1477
1478	if [[ -n $dirs ]] ; then
1479		for dir in $dirs ; do
1480			search_path="$search_path -d $dir"
1481		done
1482	fi
1483
1484	log_must zpool import $search_path $pool
1485
1486	zdb -cudi $filesys > $zdbout 2>&1
1487	if [[ $? != 0 ]]; then
1488		log_note "Output: zdb -cudi $filesys"
1489		cat $zdbout
1490		log_fail "zdb detected errors with: '$filesys'"
1491	fi
1492
1493	log_must zfs mount -a
1494	log_must rm -rf $zdbout
1495}
1496
1497#
1498# Given a pool, and this function list all disks in the pool
1499#
1500function get_disklist # pool
1501{
1502	typeset disklist=""
1503
1504	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1505	    grep -v "\-\-\-\-\-" | \
1506	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1507
1508	echo $disklist
1509}
1510
1511# /**
1512#  This function kills a given list of processes after a time period. We use
1513#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1514#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1515#  would be listed as FAIL, which we don't want : we're happy with stress tests
1516#  running for a certain amount of time, then finishing.
1517#
1518# @param $1 the time in seconds after which we should terminate these processes
1519# @param $2..$n the processes we wish to terminate.
1520# */
1521function stress_timeout
1522{
1523	typeset -i TIMEOUT=$1
1524	shift
1525	typeset cpids="$@"
1526
1527	log_note "Waiting for child processes($cpids). " \
1528		"It could last dozens of minutes, please be patient ..."
1529	log_must sleep $TIMEOUT
1530
1531	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1532	typeset pid
1533	for pid in $cpids; do
1534		ps -p $pid > /dev/null 2>&1
1535		if (($? == 0)); then
1536			log_must kill -USR1 $pid
1537		fi
1538	done
1539}
1540
1541#
1542# Verify a given hotspare disk is inuse or avail
1543#
1544# Return 0 is pool/disk matches expected state, 1 otherwise
1545#
1546function check_hotspare_state # pool disk state{inuse,avail}
1547{
1548	typeset pool=$1
1549	typeset disk=${2#/dev/dsk/}
1550	typeset state=$3
1551
1552	cur_state=$(get_device_state $pool $disk "spares")
1553
1554	if [[ $state != ${cur_state} ]]; then
1555		return 1
1556	fi
1557	return 0
1558}
1559
1560#
1561# Wait until a hotspare transitions to a given state or times out.
1562#
1563# Return 0 when  pool/disk matches expected state, 1 on timeout.
1564#
1565function wait_hotspare_state # pool disk state timeout
1566{
1567	typeset pool=$1
1568	typeset disk=${2#$/DEV_DSKDIR/}
1569	typeset state=$3
1570	typeset timeout=${4:-60}
1571	typeset -i i=0
1572
1573	while [[ $i -lt $timeout ]]; do
1574		if check_hotspare_state $pool $disk $state; then
1575			return 0
1576		fi
1577
1578		i=$((i+1))
1579		sleep 1
1580	done
1581
1582	return 1
1583}
1584
1585#
1586# Verify a given slog disk is inuse or avail
1587#
1588# Return 0 is pool/disk matches expected state, 1 otherwise
1589#
1590function check_slog_state # pool disk state{online,offline,unavail}
1591{
1592	typeset pool=$1
1593	typeset disk=${2#/dev/dsk/}
1594	typeset state=$3
1595
1596	cur_state=$(get_device_state $pool $disk "logs")
1597
1598	if [[ $state != ${cur_state} ]]; then
1599		return 1
1600	fi
1601	return 0
1602}
1603
1604#
1605# Verify a given vdev disk is inuse or avail
1606#
1607# Return 0 is pool/disk matches expected state, 1 otherwise
1608#
1609function check_vdev_state # pool disk state{online,offline,unavail}
1610{
1611	typeset pool=$1
1612	typeset disk=${2#/dev/dsk/}
1613	typeset state=$3
1614
1615	cur_state=$(get_device_state $pool $disk)
1616
1617	if [[ $state != ${cur_state} ]]; then
1618		return 1
1619	fi
1620	return 0
1621}
1622
1623#
1624# Wait until a vdev transitions to a given state or times out.
1625#
1626# Return 0 when  pool/disk matches expected state, 1 on timeout.
1627#
1628function wait_vdev_state # pool disk state timeout
1629{
1630	typeset pool=$1
1631	typeset disk=${2#$/DEV_DSKDIR/}
1632	typeset state=$3
1633	typeset timeout=${4:-60}
1634	typeset -i i=0
1635
1636	while [[ $i -lt $timeout ]]; do
1637		if check_vdev_state $pool $disk $state; then
1638			return 0
1639		fi
1640
1641		i=$((i+1))
1642		sleep 1
1643	done
1644
1645	return 1
1646}
1647
1648#
1649# Check the output of 'zpool status -v <pool>',
1650# and to see if the content of <token> contain the <keyword> specified.
1651#
1652# Return 0 is contain, 1 otherwise
1653#
1654function check_pool_status # pool token keyword <verbose>
1655{
1656	typeset pool=$1
1657	typeset token=$2
1658	typeset keyword=$3
1659	typeset verbose=${4:-false}
1660
1661	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1662		($1==token) {print $0}')
1663	if [[ $verbose == true ]]; then
1664		log_note $scan
1665	fi
1666	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1667
1668	return $?
1669}
1670
1671#
1672# These 6 following functions are instance of check_pool_status()
1673#	is_pool_resilvering - to check if the pool is resilver in progress
1674#	is_pool_resilvered - to check if the pool is resilver completed
1675#	is_pool_scrubbing - to check if the pool is scrub in progress
1676#	is_pool_scrubbed - to check if the pool is scrub completed
1677#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1678#	is_pool_scrub_paused - to check if the pool has scrub paused
1679#	is_pool_removing - to check if the pool is removing a vdev
1680#	is_pool_removed - to check if the pool is remove completed
1681#
1682function is_pool_resilvering #pool <verbose>
1683{
1684	check_pool_status "$1" "scan" "resilver in progress since " $2
1685	return $?
1686}
1687
1688function is_pool_resilvered #pool <verbose>
1689{
1690	check_pool_status "$1" "scan" "resilvered " $2
1691	return $?
1692}
1693
1694function is_pool_scrubbing #pool <verbose>
1695{
1696	check_pool_status "$1" "scan" "scrub in progress since " $2
1697	return $?
1698}
1699
1700function is_pool_scrubbed #pool <verbose>
1701{
1702	check_pool_status "$1" "scan" "scrub repaired" $2
1703	return $?
1704}
1705
1706function is_pool_scrub_stopped #pool <verbose>
1707{
1708	check_pool_status "$1" "scan" "scrub canceled" $2
1709	return $?
1710}
1711
1712function is_pool_scrub_paused #pool <verbose>
1713{
1714	check_pool_status "$1" "scan" "scrub paused since " $2
1715	return $?
1716}
1717
1718function is_pool_removing #pool
1719{
1720	check_pool_status "$1" "remove" "in progress since "
1721	return $?
1722}
1723
1724function is_pool_removed #pool
1725{
1726	check_pool_status "$1" "remove" "completed on"
1727	return $?
1728}
1729
1730function wait_for_degraded
1731{
1732	typeset pool=$1
1733	typeset timeout=${2:-30}
1734	typeset t0=$SECONDS
1735
1736	while :; do
1737		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1738		log_note "$pool is not yet degraded."
1739		sleep 1
1740		if ((SECONDS - t0 > $timeout)); then
1741			log_note "$pool not degraded after $timeout seconds."
1742			return 1
1743		fi
1744	done
1745
1746	return 0
1747}
1748
1749#
1750# Use create_pool()/destroy_pool() to clean up the infomation in
1751# in the given disk to avoid slice overlapping.
1752#
1753function cleanup_devices #vdevs
1754{
1755	typeset pool="foopool$$"
1756
1757	if poolexists $pool ; then
1758		destroy_pool $pool
1759	fi
1760
1761	create_pool $pool $@
1762	destroy_pool $pool
1763
1764	return 0
1765}
1766
1767#/**
1768# A function to find and locate free disks on a system or from given
1769# disks as the parameter. It works by locating disks that are in use
1770# as swap devices and dump devices, and also disks listed in /etc/vfstab
1771#
1772# $@ given disks to find which are free, default is all disks in
1773# the test system
1774#
1775# @return a string containing the list of available disks
1776#*/
1777function find_disks
1778{
1779	sfi=/tmp/swaplist.$$
1780	dmpi=/tmp/dumpdev.$$
1781	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1782
1783	swap -l > $sfi
1784	dumpadm > $dmpi 2>/dev/null
1785
1786# write an awk script that can process the output of format
1787# to produce a list of disks we know about. Note that we have
1788# to escape "$2" so that the shell doesn't interpret it while
1789# we're creating the awk script.
1790# -------------------
1791	cat > /tmp/find_disks.awk <<EOF
1792#!/bin/nawk -f
1793	BEGIN { FS="."; }
1794
1795	/^Specify disk/{
1796		searchdisks=0;
1797	}
1798
1799	{
1800		if (searchdisks && \$2 !~ "^$"){
1801			split(\$2,arr," ");
1802			print arr[1];
1803		}
1804	}
1805
1806	/^AVAILABLE DISK SELECTIONS:/{
1807		searchdisks=1;
1808	}
1809EOF
1810#---------------------
1811
1812	chmod 755 /tmp/find_disks.awk
1813	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1814	rm /tmp/find_disks.awk
1815
1816	unused=""
1817	for disk in $disks; do
1818	# Check for mounted
1819		grep "${disk}[sp]" /etc/mnttab >/dev/null
1820		(($? == 0)) && continue
1821	# Check for swap
1822		grep "${disk}[sp]" $sfi >/dev/null
1823		(($? == 0)) && continue
1824	# check for dump device
1825		grep "${disk}[sp]" $dmpi >/dev/null
1826		(($? == 0)) && continue
1827	# check to see if this disk hasn't been explicitly excluded
1828	# by a user-set environment variable
1829		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1830		(($? == 0)) && continue
1831		unused_candidates="$unused_candidates $disk"
1832	done
1833	rm $sfi
1834	rm $dmpi
1835
1836# now just check to see if those disks do actually exist
1837# by looking for a device pointing to the first slice in
1838# each case. limit the number to max_finddisksnum
1839	count=0
1840	for disk in $unused_candidates; do
1841		if [ -b /dev/dsk/${disk}s0 ]; then
1842		if [ $count -lt $max_finddisksnum ]; then
1843			unused="$unused $disk"
1844			# do not impose limit if $@ is provided
1845			[[ -z $@ ]] && ((count = count + 1))
1846		fi
1847		fi
1848	done
1849
1850# finally, return our disk list
1851	echo $unused
1852}
1853
1854#
1855# Add specified user to specified group
1856#
1857# $1 group name
1858# $2 user name
1859# $3 base of the homedir (optional)
1860#
1861function add_user #<group_name> <user_name> <basedir>
1862{
1863	typeset gname=$1
1864	typeset uname=$2
1865	typeset basedir=${3:-"/var/tmp"}
1866
1867	if ((${#gname} == 0 || ${#uname} == 0)); then
1868		log_fail "group name or user name are not defined."
1869	fi
1870
1871	log_must useradd -g $gname -d $basedir/$uname -m $uname
1872	log_must passwd -N $uname
1873
1874	return 0
1875}
1876
1877#
1878# Delete the specified user.
1879#
1880# $1 login name
1881# $2 base of the homedir (optional)
1882#
1883function del_user #<logname> <basedir>
1884{
1885	typeset user=$1
1886	typeset basedir=${2:-"/var/tmp"}
1887
1888	if ((${#user} == 0)); then
1889		log_fail "login name is necessary."
1890	fi
1891
1892	if id $user > /dev/null 2>&1; then
1893		log_must userdel $user
1894	fi
1895
1896	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1897
1898	return 0
1899}
1900
1901#
1902# Select valid gid and create specified group.
1903#
1904# $1 group name
1905#
1906function add_group #<group_name>
1907{
1908	typeset group=$1
1909
1910	if ((${#group} == 0)); then
1911		log_fail "group name is necessary."
1912	fi
1913
1914	# Assign 100 as the base gid
1915	typeset -i gid=100
1916	while true; do
1917		groupadd -g $gid $group > /dev/null 2>&1
1918		typeset -i ret=$?
1919		case $ret in
1920			0) return 0 ;;
1921			# The gid is not  unique
1922			4) ((gid += 1)) ;;
1923			*) return 1 ;;
1924		esac
1925	done
1926}
1927
1928#
1929# Delete the specified group.
1930#
1931# $1 group name
1932#
1933function del_group #<group_name>
1934{
1935	typeset grp=$1
1936	if ((${#grp} == 0)); then
1937		log_fail "group name is necessary."
1938	fi
1939
1940	groupmod -n $grp $grp > /dev/null 2>&1
1941	typeset -i ret=$?
1942	case $ret in
1943		# Group does not exist.
1944		6) return 0 ;;
1945		# Name already exists as a group name
1946		9) log_must groupdel $grp ;;
1947		*) return 1 ;;
1948	esac
1949
1950	return 0
1951}
1952
1953#
1954# This function will return true if it's safe to destroy the pool passed
1955# as argument 1. It checks for pools based on zvols and files, and also
1956# files contained in a pool that may have a different mountpoint.
1957#
1958function safe_to_destroy_pool { # $1 the pool name
1959
1960	typeset pool=""
1961	typeset DONT_DESTROY=""
1962
1963	# We check that by deleting the $1 pool, we're not
1964	# going to pull the rug out from other pools. Do this
1965	# by looking at all other pools, ensuring that they
1966	# aren't built from files or zvols contained in this pool.
1967
1968	for pool in $(zpool list -H -o name)
1969	do
1970		ALTMOUNTPOOL=""
1971
1972		# this is a list of the top-level directories in each of the
1973		# files that make up the path to the files the pool is based on
1974		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1975			awk '{print $1}')
1976
1977		# this is a list of the zvols that make up the pool
1978		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1979		    | awk '{print $1}')
1980
1981		# also want to determine if it's a file-based pool using an
1982		# alternate mountpoint...
1983		POOL_FILE_DIRS=$(zpool status -v $pool | \
1984					grep / | awk '{print $1}' | \
1985					awk -F/ '{print $2}' | grep -v "dev")
1986
1987		for pooldir in $POOL_FILE_DIRS
1988		do
1989			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1990					grep "${pooldir}$" | awk '{print $1}')
1991
1992			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1993		done
1994
1995
1996		if [ ! -z "$ZVOLPOOL" ]
1997		then
1998			DONT_DESTROY="true"
1999			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2000		fi
2001
2002		if [ ! -z "$FILEPOOL" ]
2003		then
2004			DONT_DESTROY="true"
2005			log_note "Pool $pool is built from $FILEPOOL on $1"
2006		fi
2007
2008		if [ ! -z "$ALTMOUNTPOOL" ]
2009		then
2010			DONT_DESTROY="true"
2011			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2012		fi
2013	done
2014
2015	if [ -z "${DONT_DESTROY}" ]
2016	then
2017		return 0
2018	else
2019		log_note "Warning: it is not safe to destroy $1!"
2020		return 1
2021	fi
2022}
2023
2024#
2025# Get the available ZFS compression options
2026# $1 option type zfs_set|zfs_compress
2027#
2028function get_compress_opts
2029{
2030	typeset COMPRESS_OPTS
2031	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2032			gzip-6 gzip-7 gzip-8 gzip-9"
2033
2034	if [[ $1 == "zfs_compress" ]] ; then
2035		COMPRESS_OPTS="on lzjb"
2036	elif [[ $1 == "zfs_set" ]] ; then
2037		COMPRESS_OPTS="on off lzjb"
2038	fi
2039	typeset valid_opts="$COMPRESS_OPTS"
2040	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2041	if [[ $? -eq 0 ]]; then
2042		valid_opts="$valid_opts $GZIP_OPTS"
2043	fi
2044	echo "$valid_opts"
2045}
2046
2047#
2048# Verify zfs operation with -p option work as expected
2049# $1 operation, value could be create, clone or rename
2050# $2 dataset type, value could be fs or vol
2051# $3 dataset name
2052# $4 new dataset name
2053#
2054function verify_opt_p_ops
2055{
2056	typeset ops=$1
2057	typeset datatype=$2
2058	typeset dataset=$3
2059	typeset newdataset=$4
2060
2061	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2062		log_fail "$datatype is not supported."
2063	fi
2064
2065	# check parameters accordingly
2066	case $ops in
2067		create)
2068			newdataset=$dataset
2069			dataset=""
2070			if [[ $datatype == "vol" ]]; then
2071				ops="create -V $VOLSIZE"
2072			fi
2073			;;
2074		clone)
2075			if [[ -z $newdataset ]]; then
2076				log_fail "newdataset should not be empty" \
2077					"when ops is $ops."
2078			fi
2079			log_must datasetexists $dataset
2080			log_must snapexists $dataset
2081			;;
2082		rename)
2083			if [[ -z $newdataset ]]; then
2084				log_fail "newdataset should not be empty" \
2085					"when ops is $ops."
2086			fi
2087			log_must datasetexists $dataset
2088			log_mustnot snapexists $dataset
2089			;;
2090		*)
2091			log_fail "$ops is not supported."
2092			;;
2093	esac
2094
2095	# make sure the upper level filesystem does not exist
2096	if datasetexists ${newdataset%/*} ; then
2097		log_must zfs destroy -rRf ${newdataset%/*}
2098	fi
2099
2100	# without -p option, operation will fail
2101	log_mustnot zfs $ops $dataset $newdataset
2102	log_mustnot datasetexists $newdataset ${newdataset%/*}
2103
2104	# with -p option, operation should succeed
2105	log_must zfs $ops -p $dataset $newdataset
2106	if ! datasetexists $newdataset ; then
2107		log_fail "-p option does not work for $ops"
2108	fi
2109
2110	# when $ops is create or clone, redo the operation still return zero
2111	if [[ $ops != "rename" ]]; then
2112		log_must zfs $ops -p $dataset $newdataset
2113	fi
2114
2115	return 0
2116}
2117
2118#
2119# Get configuration of pool
2120# $1 pool name
2121# $2 config name
2122#
2123function get_config
2124{
2125	typeset pool=$1
2126	typeset config=$2
2127	typeset alt_root
2128
2129	if ! poolexists "$pool" ; then
2130		return 1
2131	fi
2132	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2133	if [[ $alt_root == "-" ]]; then
2134		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2135		    '{print $2}')
2136	else
2137		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2138		    '{print $2}')
2139	fi
2140	if [[ -n $value ]] ; then
2141		value=${value#'}
2142		value=${value%'}
2143	fi
2144	echo $value
2145
2146	return 0
2147}
2148
2149#
2150# Privated function. Random select one of items from arguments.
2151#
2152# $1 count
2153# $2-n string
2154#
2155function _random_get
2156{
2157	typeset cnt=$1
2158	shift
2159
2160	typeset str="$@"
2161	typeset -i ind
2162	((ind = RANDOM % cnt + 1))
2163
2164	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2165	echo $ret
2166}
2167
2168#
2169# Random select one of item from arguments which include NONE string
2170#
2171function random_get_with_non
2172{
2173	typeset -i cnt=$#
2174	((cnt =+ 1))
2175
2176	_random_get "$cnt" "$@"
2177}
2178
2179#
2180# Random select one of item from arguments which doesn't include NONE string
2181#
2182function random_get
2183{
2184	_random_get "$#" "$@"
2185}
2186
2187#
2188# Detect if the current system support slog
2189#
2190function verify_slog_support
2191{
2192	typeset dir=/tmp/disk.$$
2193	typeset pool=foo.$$
2194	typeset vdev=$dir/a
2195	typeset sdev=$dir/b
2196
2197	mkdir -p $dir
2198	mkfile $MINVDEVSIZE $vdev $sdev
2199
2200	typeset -i ret=0
2201	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2202		ret=1
2203	fi
2204	rm -r $dir
2205
2206	return $ret
2207}
2208
2209#
2210# The function will generate a dataset name with specific length
2211# $1, the length of the name
2212# $2, the base string to construct the name
2213#
2214function gen_dataset_name
2215{
2216	typeset -i len=$1
2217	typeset basestr="$2"
2218	typeset -i baselen=${#basestr}
2219	typeset -i iter=0
2220	typeset l_name=""
2221
2222	if ((len % baselen == 0)); then
2223		((iter = len / baselen))
2224	else
2225		((iter = len / baselen + 1))
2226	fi
2227	while ((iter > 0)); do
2228		l_name="${l_name}$basestr"
2229
2230		((iter -= 1))
2231	done
2232
2233	echo $l_name
2234}
2235
2236#
2237# Get cksum tuple of dataset
2238# $1 dataset name
2239#
2240# sample zdb output:
2241# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2242# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2243# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2244# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2245function datasetcksum
2246{
2247	typeset cksum
2248	sync
2249	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2250		| awk -F= '{print $7}')
2251	echo $cksum
2252}
2253
2254#
2255# Get cksum of file
2256# #1 file path
2257#
2258function checksum
2259{
2260	typeset cksum
2261	cksum=$(cksum $1 | awk '{print $1}')
2262	echo $cksum
2263}
2264
2265#
2266# Get the given disk/slice state from the specific field of the pool
2267#
2268function get_device_state #pool disk field("", "spares","logs")
2269{
2270	typeset pool=$1
2271	typeset disk=${2#/dev/dsk/}
2272	typeset field=${3:-$pool}
2273
2274	state=$(zpool status -v "$pool" 2>/dev/null | \
2275		nawk -v device=$disk -v pool=$pool -v field=$field \
2276		'BEGIN {startconfig=0; startfield=0; }
2277		/config:/ {startconfig=1}
2278		(startconfig==1) && ($1==field) {startfield=1; next;}
2279		(startfield==1) && ($1==device) {print $2; exit;}
2280		(startfield==1) &&
2281		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2282	echo $state
2283}
2284
2285
2286#
2287# print the given directory filesystem type
2288#
2289# $1 directory name
2290#
2291function get_fstype
2292{
2293	typeset dir=$1
2294
2295	if [[ -z $dir ]]; then
2296		log_fail "Usage: get_fstype <directory>"
2297	fi
2298
2299	#
2300	#  $ df -n /
2301	#  /		  : ufs
2302	#
2303	df -n $dir | awk '{print $3}'
2304}
2305
2306#
2307# Given a disk, label it to VTOC regardless what label was on the disk
2308# $1 disk
2309#
2310function labelvtoc
2311{
2312	typeset disk=$1
2313	if [[ -z $disk ]]; then
2314		log_fail "The disk name is unspecified."
2315	fi
2316	typeset label_file=/var/tmp/labelvtoc.$$
2317	typeset arch=$(uname -p)
2318
2319	if [[ $arch == "i386" ]]; then
2320		echo "label" > $label_file
2321		echo "0" >> $label_file
2322		echo "" >> $label_file
2323		echo "q" >> $label_file
2324		echo "q" >> $label_file
2325
2326		fdisk -B $disk >/dev/null 2>&1
2327		# wait a while for fdisk finishes
2328		sleep 60
2329	elif [[ $arch == "sparc" ]]; then
2330		echo "label" > $label_file
2331		echo "0" >> $label_file
2332		echo "" >> $label_file
2333		echo "" >> $label_file
2334		echo "" >> $label_file
2335		echo "q" >> $label_file
2336	else
2337		log_fail "unknown arch type"
2338	fi
2339
2340	format -e -s -d $disk -f $label_file
2341	typeset -i ret_val=$?
2342	rm -f $label_file
2343	#
2344	# wait the format to finish
2345	#
2346	sleep 60
2347	if ((ret_val != 0)); then
2348		log_fail "unable to label $disk as VTOC."
2349	fi
2350
2351	return 0
2352}
2353
2354#
2355# check if the system was installed as zfsroot or not
2356# return: 0 ture, otherwise false
2357#
2358function is_zfsroot
2359{
2360	df -n / | grep zfs > /dev/null 2>&1
2361	return $?
2362}
2363
2364#
2365# get the root filesystem name if it's zfsroot system.
2366#
2367# return: root filesystem name
2368function get_rootfs
2369{
2370	typeset rootfs=""
2371	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2372		/etc/mnttab)
2373	if [[ -z "$rootfs" ]]; then
2374		log_fail "Can not get rootfs"
2375	fi
2376	zfs list $rootfs > /dev/null 2>&1
2377	if (($? == 0)); then
2378		echo $rootfs
2379	else
2380		log_fail "This is not a zfsroot system."
2381	fi
2382}
2383
2384#
2385# get the rootfs's pool name
2386# return:
2387#       rootpool name
2388#
2389function get_rootpool
2390{
2391	typeset rootfs=""
2392	typeset rootpool=""
2393	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2394		 /etc/mnttab)
2395	if [[ -z "$rootfs" ]]; then
2396		log_fail "Can not get rootpool"
2397	fi
2398	zfs list $rootfs > /dev/null 2>&1
2399	if (($? == 0)); then
2400		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2401		echo $rootpool
2402	else
2403		log_fail "This is not a zfsroot system."
2404	fi
2405}
2406
2407#
2408# Check if the given device is physical device
2409#
2410function is_physical_device #device
2411{
2412	typeset device=${1#/dev/dsk/}
2413	device=${device#/dev/rdsk/}
2414
2415	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2416	return $?
2417}
2418
2419#
2420# Get the directory path of given device
2421#
2422function get_device_dir #device
2423{
2424	typeset device=$1
2425
2426	if ! $(is_physical_device $device) ; then
2427		if [[ $device != "/" ]]; then
2428			device=${device%/*}
2429		fi
2430		echo $device
2431	else
2432		echo "/dev/dsk"
2433	fi
2434}
2435
2436#
2437# Get the package name
2438#
2439function get_package_name
2440{
2441	typeset dirpath=${1:-$STC_NAME}
2442
2443	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2444}
2445
2446#
2447# Get the word numbers from a string separated by white space
2448#
2449function get_word_count
2450{
2451	echo $1 | wc -w
2452}
2453
2454#
2455# To verify if the require numbers of disks is given
2456#
2457function verify_disk_count
2458{
2459	typeset -i min=${2:-1}
2460
2461	typeset -i count=$(get_word_count "$1")
2462
2463	if ((count < min)); then
2464		log_untested "A minimum of $min disks is required to run." \
2465			" You specified $count disk(s)"
2466	fi
2467}
2468
2469function ds_is_volume
2470{
2471	typeset type=$(get_prop type $1)
2472	[[ $type = "volume" ]] && return 0
2473	return 1
2474}
2475
2476function ds_is_filesystem
2477{
2478	typeset type=$(get_prop type $1)
2479	[[ $type = "filesystem" ]] && return 0
2480	return 1
2481}
2482
2483function ds_is_snapshot
2484{
2485	typeset type=$(get_prop type $1)
2486	[[ $type = "snapshot" ]] && return 0
2487	return 1
2488}
2489
2490#
2491# Check if Trusted Extensions are installed and enabled
2492#
2493function is_te_enabled
2494{
2495	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2496	if (($? != 0)); then
2497		return 1
2498	else
2499		return 0
2500	fi
2501}
2502
2503# Utility function to determine if a system has multiple cpus.
2504function is_mp
2505{
2506	(($(psrinfo | wc -l) > 1))
2507}
2508
2509function get_cpu_freq
2510{
2511	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2512}
2513
2514# Run the given command as the user provided.
2515function user_run
2516{
2517	typeset user=$1
2518	shift
2519
2520	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2521	return $?
2522}
2523
2524#
2525# Check if the pool contains the specified vdevs
2526#
2527# $1 pool
2528# $2..n <vdev> ...
2529#
2530# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2531# vdevs is not in the pool, and 2 if pool name is missing.
2532#
2533function vdevs_in_pool
2534{
2535	typeset pool=$1
2536	typeset vdev
2537
2538        if [[ -z $pool ]]; then
2539                log_note "Missing pool name."
2540                return 2
2541        fi
2542
2543	shift
2544
2545	typeset tmpfile=$(mktemp)
2546	zpool list -Hv "$pool" >$tmpfile
2547	for vdev in $@; do
2548		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2549		[[ $? -ne 0 ]] && return 1
2550	done
2551
2552	rm -f $tmpfile
2553
2554	return 0;
2555}
2556
2557function get_max
2558{
2559	typeset -l i max=$1
2560	shift
2561
2562	for i in "$@"; do
2563		max=$(echo $((max > i ? max : i)))
2564	done
2565
2566	echo $max
2567}
2568
2569function get_min
2570{
2571	typeset -l i min=$1
2572	shift
2573
2574	for i in "$@"; do
2575		min=$(echo $((min < i ? min : i)))
2576	done
2577
2578	echo $min
2579}
2580
2581#
2582# Generate a random number between 1 and the argument.
2583#
2584function random
2585{
2586        typeset max=$1
2587        echo $(( ($RANDOM % $max) + 1 ))
2588}
2589
2590# Write data that can be compressed into a directory
2591function write_compressible
2592{
2593	typeset dir=$1
2594	typeset megs=$2
2595	typeset nfiles=${3:-1}
2596	typeset bs=${4:-1024k}
2597	typeset fname=${5:-file}
2598
2599	[[ -d $dir ]] || log_fail "No directory: $dir"
2600
2601	log_must eval "fio \
2602	    --name=job \
2603	    --fallocate=0 \
2604	    --minimal \
2605	    --randrepeat=0 \
2606	    --buffer_compress_percentage=66 \
2607	    --buffer_compress_chunk=4096 \
2608	    --directory=$dir \
2609	    --numjobs=$nfiles \
2610	    --rw=write \
2611	    --bs=$bs \
2612	    --filesize=$megs \
2613	    --filename_format='$fname.\$jobnum' >/dev/null"
2614}
2615
2616function get_objnum
2617{
2618	typeset pathname=$1
2619	typeset objnum
2620
2621	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2622	objnum=$(stat -c %i $pathname)
2623	echo $objnum
2624}
2625
2626#
2627# Prints the current time in seconds since UNIX Epoch.
2628#
2629function current_epoch
2630{
2631	printf '%(%s)T'
2632}
2633
2634#
2635# Get decimal value of global uint32_t variable using mdb.
2636#
2637function mdb_get_uint32
2638{
2639	typeset variable=$1
2640	typeset value
2641
2642	value=$(mdb -k -e "$variable/X | ::eval .=U")
2643	if [[ $? -ne 0 ]]; then
2644		log_fail "Failed to get value of '$variable' from mdb."
2645		return 1
2646	fi
2647
2648	echo $value
2649	return 0
2650}
2651
2652#
2653# Set global uint32_t variable to a decimal value using mdb.
2654#
2655function mdb_set_uint32
2656{
2657	typeset variable=$1
2658	typeset value=$2
2659
2660	mdb -kw -e "$variable/W 0t$value" > /dev/null
2661	if [[ $? -ne 0 ]]; then
2662		echo "Failed to set '$variable' to '$value' in mdb."
2663		return 1
2664	fi
2665
2666	return 0
2667}
2668
2669#
2670# Set global scalar integer variable to a hex value using mdb.
2671# Note: Target should have CTF data loaded.
2672#
2673function mdb_ctf_set_int
2674{
2675	typeset variable=$1
2676	typeset value=$2
2677
2678	mdb -kw -e "$variable/z $value" > /dev/null
2679	if [[ $? -ne 0 ]]; then
2680		echo "Failed to set '$variable' to '$value' in mdb."
2681		return 1
2682	fi
2683
2684	return 0
2685}
2686