xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision b1e2e3fb17324e9ddf43db264a0c64da7756d9e6)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright 2016 Nexenta Systems, Inc.
27# Copyright (c) 2017 Datto Inc.
28#
29
30. ${STF_TOOLS}/contrib/include/logapi.shlib
31
32# Determine whether a dataset is mounted
33#
34# $1 dataset name
35# $2 filesystem type; optional - defaulted to zfs
36#
37# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
38
39function ismounted
40{
41	typeset fstype=$2
42	[[ -z $fstype ]] && fstype=zfs
43	typeset out dir name ret
44
45	case $fstype in
46		zfs)
47			if [[ "$1" == "/"* ]] ; then
48				for out in $(zfs mount | awk '{print $2}'); do
49					[[ $1 == $out ]] && return 0
50				done
51			else
52				for out in $(zfs mount | awk '{print $1}'); do
53					[[ $1 == $out ]] && return 0
54				done
55			fi
56		;;
57		ufs|nfs)
58			out=$(df -F $fstype $1 2>/dev/null)
59			ret=$?
60			(($ret != 0)) && return $ret
61
62			dir=${out%%\(*}
63			dir=${dir%% *}
64			name=${out##*\(}
65			name=${name%%\)*}
66			name=${name%% *}
67
68			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
69		;;
70	esac
71
72	return 1
73}
74
75# Return 0 if a dataset is mounted; 1 otherwise
76#
77# $1 dataset name
78# $2 filesystem type; optional - defaulted to zfs
79
80function mounted
81{
82	ismounted $1 $2
83	(($? == 0)) && return 0
84	return 1
85}
86
87# Return 0 if a dataset is unmounted; 1 otherwise
88#
89# $1 dataset name
90# $2 filesystem type; optional - defaulted to zfs
91
92function unmounted
93{
94	ismounted $1 $2
95	(($? == 1)) && return 0
96	return 1
97}
98
99# split line on ","
100#
101# $1 - line to split
102
103function splitline
104{
105	echo $1 | sed "s/,/ /g"
106}
107
108function default_setup
109{
110	default_setup_noexit "$@"
111
112	log_pass
113}
114
115#
116# Given a list of disks, setup storage pools and datasets.
117#
118function default_setup_noexit
119{
120	typeset disklist=$1
121	typeset container=$2
122	typeset volume=$3
123
124	if is_global_zone; then
125		if poolexists $TESTPOOL ; then
126			destroy_pool $TESTPOOL
127		fi
128		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
129		log_must zpool create -f $TESTPOOL $disklist
130	else
131		reexport_pool
132	fi
133
134	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
135	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
136
137	log_must zfs create $TESTPOOL/$TESTFS
138	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
139
140	if [[ -n $container ]]; then
141		rm -rf $TESTDIR1  || \
142			log_unresolved Could not remove $TESTDIR1
143		mkdir -p $TESTDIR1 || \
144			log_unresolved Could not create $TESTDIR1
145
146		log_must zfs create $TESTPOOL/$TESTCTR
147		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
148		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
149		log_must zfs set mountpoint=$TESTDIR1 \
150		    $TESTPOOL/$TESTCTR/$TESTFS1
151	fi
152
153	if [[ -n $volume ]]; then
154		if is_global_zone ; then
155			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
156		else
157			log_must zfs create $TESTPOOL/$TESTVOL
158		fi
159	fi
160}
161
162#
163# Given a list of disks, setup a storage pool, file system and
164# a container.
165#
166function default_container_setup
167{
168	typeset disklist=$1
169
170	default_setup "$disklist" "true"
171}
172
173#
174# Given a list of disks, setup a storage pool,file system
175# and a volume.
176#
177function default_volume_setup
178{
179	typeset disklist=$1
180
181	default_setup "$disklist" "" "true"
182}
183
184#
185# Given a list of disks, setup a storage pool,file system,
186# a container and a volume.
187#
188function default_container_volume_setup
189{
190	typeset disklist=$1
191
192	default_setup "$disklist" "true" "true"
193}
194
195#
196# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
197# filesystem
198#
199# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
200# $2 snapshot name. Default, $TESTSNAP
201#
202function create_snapshot
203{
204	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
205	typeset snap=${2:-$TESTSNAP}
206
207	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
208	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
209
210	if snapexists $fs_vol@$snap; then
211		log_fail "$fs_vol@$snap already exists."
212	fi
213	datasetexists $fs_vol || \
214		log_fail "$fs_vol must exist."
215
216	log_must zfs snapshot $fs_vol@$snap
217}
218
219#
220# Create a clone from a snapshot, default clone name is $TESTCLONE.
221#
222# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
223# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
224#
225function create_clone   # snapshot clone
226{
227	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
228	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
229
230	[[ -z $snap ]] && \
231		log_fail "Snapshot name is undefined."
232	[[ -z $clone ]] && \
233		log_fail "Clone name is undefined."
234
235	log_must zfs clone $snap $clone
236}
237
238#
239# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
240# filesystem.
241#
242# $1 Existing filesystem or volume name. Default, $TESTFS
243# $2 Existing snapshot name. Default, $TESTSNAP
244# $3 bookmark name. Default, $TESTBKMARK
245#
246function create_bookmark
247{
248	typeset fs_vol=${1:-$TESTFS}
249	typeset snap=${2:-$TESTSNAP}
250	typeset bkmark=${3:-$TESTBKMARK}
251
252	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
253	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
254	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
255
256	if bkmarkexists $fs_vol#$bkmark; then
257		log_fail "$fs_vol#$bkmark already exists."
258	fi
259	datasetexists $fs_vol || \
260		log_fail "$fs_vol must exist."
261	snapexists $fs_vol@$snap || \
262		log_fail "$fs_vol@$snap must exist."
263
264	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
265}
266
267#
268# Create a temporary clone result of an interrupted resumable 'zfs receive'
269# $1 Destination filesystem name. Must not exist, will be created as the result
270#    of this function along with its %recv temporary clone
271# $2 Source filesystem name. Must not exist, will be created and destroyed
272#
273function create_recv_clone
274{
275	typeset recvfs="$1"
276	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
277	typeset snap="$sendfs@snap1"
278	typeset incr="$sendfs@snap2"
279	typeset mountpoint="$TESTDIR/create_recv_clone"
280	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
281
282	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
283
284	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
285	datasetexists $sendfs && log_fail "Send filesystem must not exist."
286
287	log_must zfs create -o mountpoint="$mountpoint" $sendfs
288	log_must zfs snapshot $snap
289	log_must eval "zfs send $snap | zfs recv -u $recvfs"
290	log_must mkfile 1m "$mountpoint/data"
291	log_must zfs snapshot $incr
292	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
293	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
294	log_must zfs destroy -r $sendfs
295	log_must rm -f "$sendfile"
296
297	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
298		log_fail "Error creating temporary $recvfs/%recv clone"
299	fi
300}
301
302function default_mirror_setup
303{
304	default_mirror_setup_noexit $1 $2 $3
305
306	log_pass
307}
308
309#
310# Given a pair of disks, set up a storage pool and dataset for the mirror
311# @parameters: $1 the primary side of the mirror
312#   $2 the secondary side of the mirror
313# @uses: ZPOOL ZFS TESTPOOL TESTFS
314function default_mirror_setup_noexit
315{
316	readonly func="default_mirror_setup_noexit"
317	typeset primary=$1
318	typeset secondary=$2
319
320	[[ -z $primary ]] && \
321		log_fail "$func: No parameters passed"
322	[[ -z $secondary ]] && \
323		log_fail "$func: No secondary partition passed"
324	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
325	log_must zpool create -f $TESTPOOL mirror $@
326	log_must zfs create $TESTPOOL/$TESTFS
327	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
328}
329
330#
331# create a number of mirrors.
332# We create a number($1) of 2 way mirrors using the pairs of disks named
333# on the command line. These mirrors are *not* mounted
334# @parameters: $1 the number of mirrors to create
335#  $... the devices to use to create the mirrors on
336# @uses: ZPOOL ZFS TESTPOOL
337function setup_mirrors
338{
339	typeset -i nmirrors=$1
340
341	shift
342	while ((nmirrors > 0)); do
343		log_must test -n "$1" -a -n "$2"
344		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
345		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
346		shift 2
347		((nmirrors = nmirrors - 1))
348	done
349}
350
351#
352# create a number of raidz pools.
353# We create a number($1) of 2 raidz pools  using the pairs of disks named
354# on the command line. These pools are *not* mounted
355# @parameters: $1 the number of pools to create
356#  $... the devices to use to create the pools on
357# @uses: ZPOOL ZFS TESTPOOL
358function setup_raidzs
359{
360	typeset -i nraidzs=$1
361
362	shift
363	while ((nraidzs > 0)); do
364		log_must test -n "$1" -a -n "$2"
365		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
366		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
367		shift 2
368		((nraidzs = nraidzs - 1))
369	done
370}
371
372#
373# Destroy the configured testpool mirrors.
374# the mirrors are of the form ${TESTPOOL}{number}
375# @uses: ZPOOL ZFS TESTPOOL
376function destroy_mirrors
377{
378	default_cleanup_noexit
379
380	log_pass
381}
382
383#
384# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
385# $1 the list of disks
386#
387function default_raidz_setup
388{
389	typeset disklist="$*"
390	disks=(${disklist[*]})
391
392	if [[ ${#disks[*]} -lt 2 ]]; then
393		log_fail "A raid-z requires a minimum of two disks."
394	fi
395
396	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
397	log_must zpool create -f $TESTPOOL raidz $1 $2 $3
398	log_must zfs create $TESTPOOL/$TESTFS
399	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
400
401	log_pass
402}
403
404#
405# Common function used to cleanup storage pools and datasets.
406#
407# Invoked at the start of the test suite to ensure the system
408# is in a known state, and also at the end of each set of
409# sub-tests to ensure errors from one set of tests doesn't
410# impact the execution of the next set.
411
412function default_cleanup
413{
414	default_cleanup_noexit
415
416	log_pass
417}
418
419function default_cleanup_noexit
420{
421	typeset exclude=""
422	typeset pool=""
423	#
424	# Destroying the pool will also destroy any
425	# filesystems it contains.
426	#
427	if is_global_zone; then
428		zfs unmount -a > /dev/null 2>&1
429		exclude=`eval echo \"'(${KEEP})'\"`
430		ALL_POOLS=$(zpool list -H -o name \
431		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
432		# Here, we loop through the pools we're allowed to
433		# destroy, only destroying them if it's safe to do
434		# so.
435		while [ ! -z ${ALL_POOLS} ]
436		do
437			for pool in ${ALL_POOLS}
438			do
439				if safe_to_destroy_pool $pool ;
440				then
441					destroy_pool $pool
442				fi
443				ALL_POOLS=$(zpool list -H -o name \
444				    | grep -v "$NO_POOLS" \
445				    | egrep -v "$exclude")
446			done
447		done
448
449		zfs mount -a
450	else
451		typeset fs=""
452		for fs in $(zfs list -H -o name \
453		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
454			datasetexists $fs && \
455				log_must zfs destroy -Rf $fs
456		done
457
458		# Need cleanup here to avoid garbage dir left.
459		for fs in $(zfs list -H -o name); do
460			[[ $fs == /$ZONE_POOL ]] && continue
461			[[ -d $fs ]] && log_must rm -rf $fs/*
462		done
463
464		#
465		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
466		# the default value
467		#
468		for fs in $(zfs list -H -o name); do
469			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
470				log_must zfs set reservation=none $fs
471				log_must zfs set recordsize=128K $fs
472				log_must zfs set mountpoint=/$fs $fs
473				typeset enc=""
474				enc=$(get_prop encryption $fs)
475				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
476					[[ "$enc" == "off" ]]; then
477					log_must zfs set checksum=on $fs
478				fi
479				log_must zfs set compression=off $fs
480				log_must zfs set atime=on $fs
481				log_must zfs set devices=off $fs
482				log_must zfs set exec=on $fs
483				log_must zfs set setuid=on $fs
484				log_must zfs set readonly=off $fs
485				log_must zfs set snapdir=hidden $fs
486				log_must zfs set aclmode=groupmask $fs
487				log_must zfs set aclinherit=secure $fs
488			fi
489		done
490	fi
491
492	[[ -d $TESTDIR ]] && \
493		log_must rm -rf $TESTDIR
494}
495
496
497#
498# Common function used to cleanup storage pools, file systems
499# and containers.
500#
501function default_container_cleanup
502{
503	if ! is_global_zone; then
504		reexport_pool
505	fi
506
507	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
508	[[ $? -eq 0 ]] && \
509	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
510
511	datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
512	    log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
513
514	datasetexists $TESTPOOL/$TESTCTR && \
515	    log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
516
517	[[ -e $TESTDIR1 ]] && \
518	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
519
520	default_cleanup
521}
522
523#
524# Common function used to cleanup snapshot of file system or volume. Default to
525# delete the file system's snapshot
526#
527# $1 snapshot name
528#
529function destroy_snapshot
530{
531	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
532
533	if ! snapexists $snap; then
534		log_fail "'$snap' does not existed."
535	fi
536
537	#
538	# For the sake of the value which come from 'get_prop' is not equal
539	# to the really mountpoint when the snapshot is unmounted. So, firstly
540	# check and make sure this snapshot's been mounted in current system.
541	#
542	typeset mtpt=""
543	if ismounted $snap; then
544		mtpt=$(get_prop mountpoint $snap)
545		(($? != 0)) && \
546			log_fail "get_prop mountpoint $snap failed."
547	fi
548
549	log_must zfs destroy $snap
550	[[ $mtpt != "" && -d $mtpt ]] && \
551		log_must rm -rf $mtpt
552}
553
554#
555# Common function used to cleanup clone.
556#
557# $1 clone name
558#
559function destroy_clone
560{
561	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
562
563	if ! datasetexists $clone; then
564		log_fail "'$clone' does not existed."
565	fi
566
567	# With the same reason in destroy_snapshot
568	typeset mtpt=""
569	if ismounted $clone; then
570		mtpt=$(get_prop mountpoint $clone)
571		(($? != 0)) && \
572			log_fail "get_prop mountpoint $clone failed."
573	fi
574
575	log_must zfs destroy $clone
576	[[ $mtpt != "" && -d $mtpt ]] && \
577		log_must rm -rf $mtpt
578}
579
580#
581# Common function used to cleanup bookmark of file system or volume.  Default
582# to delete the file system's bookmark.
583#
584# $1 bookmark name
585#
586function destroy_bookmark
587{
588	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
589
590	if ! bkmarkexists $bkmark; then
591		log_fail "'$bkmarkp' does not existed."
592	fi
593
594	log_must zfs destroy $bkmark
595}
596
597# Return 0 if a snapshot exists; $? otherwise
598#
599# $1 - snapshot name
600
601function snapexists
602{
603	zfs list -H -t snapshot "$1" > /dev/null 2>&1
604	return $?
605}
606
607#
608# Return 0 if a bookmark exists; $? otherwise
609#
610# $1 - bookmark name
611#
612function bkmarkexists
613{
614	zfs list -H -t bookmark "$1" > /dev/null 2>&1
615	return $?
616}
617
618#
619# Set a property to a certain value on a dataset.
620# Sets a property of the dataset to the value as passed in.
621# @param:
622#	$1 dataset who's property is being set
623#	$2 property to set
624#	$3 value to set property to
625# @return:
626#	0 if the property could be set.
627#	non-zero otherwise.
628# @use: ZFS
629#
630function dataset_setprop
631{
632	typeset fn=dataset_setprop
633
634	if (($# < 3)); then
635		log_note "$fn: Insufficient parameters (need 3, had $#)"
636		return 1
637	fi
638	typeset output=
639	output=$(zfs set $2=$3 $1 2>&1)
640	typeset rv=$?
641	if ((rv != 0)); then
642		log_note "Setting property on $1 failed."
643		log_note "property $2=$3"
644		log_note "Return Code: $rv"
645		log_note "Output: $output"
646		return $rv
647	fi
648	return 0
649}
650
651#
652# Assign suite defined dataset properties.
653# This function is used to apply the suite's defined default set of
654# properties to a dataset.
655# @parameters: $1 dataset to use
656# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
657# @returns:
658#   0 if the dataset has been altered.
659#   1 if no pool name was passed in.
660#   2 if the dataset could not be found.
661#   3 if the dataset could not have it's properties set.
662#
663function dataset_set_defaultproperties
664{
665	typeset dataset="$1"
666
667	[[ -z $dataset ]] && return 1
668
669	typeset confset=
670	typeset -i found=0
671	for confset in $(zfs list); do
672		if [[ $dataset = $confset ]]; then
673			found=1
674			break
675		fi
676	done
677	[[ $found -eq 0 ]] && return 2
678	if [[ -n $COMPRESSION_PROP ]]; then
679		dataset_setprop $dataset compression $COMPRESSION_PROP || \
680			return 3
681		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
682	fi
683	if [[ -n $CHECKSUM_PROP ]]; then
684		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
685			return 3
686		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
687	fi
688	return 0
689}
690
691#
692# Check a numeric assertion
693# @parameter: $@ the assertion to check
694# @output: big loud notice if assertion failed
695# @use: log_fail
696#
697function assert
698{
699	(($@)) || log_fail "$@"
700}
701
702#
703# Function to format partition size of a disk
704# Given a disk cxtxdx reduces all partitions
705# to 0 size
706#
707function zero_partitions #<whole_disk_name>
708{
709	typeset diskname=$1
710	typeset i
711
712	for i in 0 1 3 4 5 6 7
713	do
714		set_partition $i "" 0mb $diskname
715	done
716}
717
718#
719# Given a slice, size and disk, this function
720# formats the slice to the specified size.
721# Size should be specified with units as per
722# the `format` command requirements eg. 100mb 3gb
723#
724function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
725{
726	typeset -i slicenum=$1
727	typeset start=$2
728	typeset size=$3
729	typeset disk=$4
730	[[ -z $slicenum || -z $size || -z $disk ]] && \
731	    log_fail "The slice, size or disk name is unspecified."
732	typeset format_file=/var/tmp/format_in.$$
733
734	echo "partition" >$format_file
735	echo "$slicenum" >> $format_file
736	echo "" >> $format_file
737	echo "" >> $format_file
738	echo "$start" >> $format_file
739	echo "$size" >> $format_file
740	echo "label" >> $format_file
741	echo "" >> $format_file
742	echo "q" >> $format_file
743	echo "q" >> $format_file
744
745	format -e -s -d $disk -f $format_file
746	typeset ret_val=$?
747	rm -f $format_file
748	[[ $ret_val -ne 0 ]] && \
749	    log_fail "Unable to format $disk slice $slicenum to $size"
750	return 0
751}
752
753#
754# Get the end cyl of the given slice
755#
756function get_endslice #<disk> <slice>
757{
758	typeset disk=$1
759	typeset slice=$2
760	if [[ -z $disk || -z $slice ]] ; then
761		log_fail "The disk name or slice number is unspecified."
762	fi
763
764	disk=${disk#/dev/dsk/}
765	disk=${disk#/dev/rdsk/}
766	disk=${disk%s*}
767
768	typeset -i ratio=0
769	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
770		grep "sectors\/cylinder" | \
771		awk '{print $2}')
772
773	if ((ratio == 0)); then
774		return
775	fi
776
777	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
778		nawk -v token="$slice" '{if ($1==token) print $6}')
779
780	((endcyl = (endcyl + 1) / ratio))
781	echo $endcyl
782}
783
784
785#
786# Given a size,disk and total slice number,  this function formats the
787# disk slices from 0 to the total slice number with the same specified
788# size.
789#
790function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
791{
792	typeset -i i=0
793	typeset slice_size=$1
794	typeset disk_name=$2
795	typeset total_slices=$3
796	typeset cyl
797
798	zero_partitions $disk_name
799	while ((i < $total_slices)); do
800		if ((i == 2)); then
801			((i = i + 1))
802			continue
803		fi
804		set_partition $i "$cyl" $slice_size $disk_name
805		cyl=$(get_endslice $disk_name $i)
806		((i = i+1))
807	done
808}
809
810#
811# This function continues to write to a filenum number of files into dirnum
812# number of directories until either file_write returns an error or the
813# maximum number of files per directory have been written.
814#
815# Usage:
816# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
817#
818# Return value: 0 on success
819#		non 0 on error
820#
821# Where :
822#	destdir:    is the directory where everything is to be created under
823#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
824#	filenum:    the maximum number of files per subdirectory
825#	bytes:	    number of bytes to write
826#	num_writes: numer of types to write out bytes
827#	data:	    the data that will be writen
828#
829#	E.g.
830#	file_fs /testdir 20 25 1024 256 0
831#
832# Note: bytes * num_writes equals the size of the testfile
833#
834function fill_fs # destdir dirnum filenum bytes num_writes data
835{
836	typeset destdir=${1:-$TESTDIR}
837	typeset -i dirnum=${2:-50}
838	typeset -i filenum=${3:-50}
839	typeset -i bytes=${4:-8192}
840	typeset -i num_writes=${5:-10240}
841	typeset -i data=${6:-0}
842
843	typeset -i odirnum=1
844	typeset -i idirnum=0
845	typeset -i fn=0
846	typeset -i retval=0
847
848	log_must mkdir -p $destdir/$idirnum
849	while (($odirnum > 0)); do
850		if ((dirnum >= 0 && idirnum >= dirnum)); then
851			odirnum=0
852			break
853		fi
854		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
855		    -b $bytes -c $num_writes -d $data
856		retval=$?
857		if (($retval != 0)); then
858			odirnum=0
859			break
860		fi
861		if (($fn >= $filenum)); then
862			fn=0
863			((idirnum = idirnum + 1))
864			log_must mkdir -p $destdir/$idirnum
865		else
866			((fn = fn + 1))
867		fi
868	done
869	return $retval
870}
871
872#
873# Simple function to get the specified property. If unable to
874# get the property then exits.
875#
876# Note property is in 'parsable' format (-p)
877#
878function get_prop # property dataset
879{
880	typeset prop_val
881	typeset prop=$1
882	typeset dataset=$2
883
884	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
885	if [[ $? -ne 0 ]]; then
886		log_note "Unable to get $prop property for dataset " \
887		"$dataset"
888		return 1
889	fi
890
891	echo "$prop_val"
892	return 0
893}
894
895#
896# Simple function to get the specified property of pool. If unable to
897# get the property then exits.
898#
899function get_pool_prop # property pool
900{
901	typeset prop_val
902	typeset prop=$1
903	typeset pool=$2
904
905	if poolexists $pool ; then
906		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
907			awk '{print $3}')
908		if [[ $? -ne 0 ]]; then
909			log_note "Unable to get $prop property for pool " \
910			"$pool"
911			return 1
912		fi
913	else
914		log_note "Pool $pool not exists."
915		return 1
916	fi
917
918	echo $prop_val
919	return 0
920}
921
922# Return 0 if a pool exists; $? otherwise
923#
924# $1 - pool name
925
926function poolexists
927{
928	typeset pool=$1
929
930	if [[ -z $pool ]]; then
931		log_note "No pool name given."
932		return 1
933	fi
934
935	zpool get name "$pool" > /dev/null 2>&1
936	return $?
937}
938
939# Return 0 if all the specified datasets exist; $? otherwise
940#
941# $1-n  dataset name
942function datasetexists
943{
944	if (($# == 0)); then
945		log_note "No dataset name given."
946		return 1
947	fi
948
949	while (($# > 0)); do
950		zfs get name $1 > /dev/null 2>&1 || \
951			return $?
952		shift
953	done
954
955	return 0
956}
957
958# return 0 if none of the specified datasets exists, otherwise return 1.
959#
960# $1-n  dataset name
961function datasetnonexists
962{
963	if (($# == 0)); then
964		log_note "No dataset name given."
965		return 1
966	fi
967
968	while (($# > 0)); do
969		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
970		    && return 1
971		shift
972	done
973
974	return 0
975}
976
977#
978# Given a mountpoint, or a dataset name, determine if it is shared.
979#
980# Returns 0 if shared, 1 otherwise.
981#
982function is_shared
983{
984	typeset fs=$1
985	typeset mtpt
986
987	if [[ $fs != "/"* ]] ; then
988		if datasetnonexists "$fs" ; then
989			return 1
990		else
991			mtpt=$(get_prop mountpoint "$fs")
992			case $mtpt in
993				none|legacy|-) return 1
994					;;
995				*)	fs=$mtpt
996					;;
997			esac
998		fi
999	fi
1000
1001	for mtpt in `share | awk '{print $2}'` ; do
1002		if [[ $mtpt == $fs ]] ; then
1003			return 0
1004		fi
1005	done
1006
1007	typeset stat=$(svcs -H -o STA nfs/server:default)
1008	if [[ $stat != "ON" ]]; then
1009		log_note "Current nfs/server status: $stat"
1010	fi
1011
1012	return 1
1013}
1014
1015#
1016# Given a mountpoint, determine if it is not shared.
1017#
1018# Returns 0 if not shared, 1 otherwise.
1019#
1020function not_shared
1021{
1022	typeset fs=$1
1023
1024	is_shared $fs
1025	if (($? == 0)); then
1026		return 1
1027	fi
1028
1029	return 0
1030}
1031
1032#
1033# Helper function to unshare a mountpoint.
1034#
1035function unshare_fs #fs
1036{
1037	typeset fs=$1
1038
1039	is_shared $fs
1040	if (($? == 0)); then
1041		log_must zfs unshare $fs
1042	fi
1043
1044	return 0
1045}
1046
1047#
1048# Check NFS server status and trigger it online.
1049#
1050function setup_nfs_server
1051{
1052	# Cannot share directory in non-global zone.
1053	#
1054	if ! is_global_zone; then
1055		log_note "Cannot trigger NFS server by sharing in LZ."
1056		return
1057	fi
1058
1059	typeset nfs_fmri="svc:/network/nfs/server:default"
1060	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1061		#
1062		# Only really sharing operation can enable NFS server
1063		# to online permanently.
1064		#
1065		typeset dummy=/tmp/dummy
1066
1067		if [[ -d $dummy ]]; then
1068			log_must rm -rf $dummy
1069		fi
1070
1071		log_must mkdir $dummy
1072		log_must share $dummy
1073
1074		#
1075		# Waiting for fmri's status to be the final status.
1076		# Otherwise, in transition, an asterisk (*) is appended for
1077		# instances, unshare will reverse status to 'DIS' again.
1078		#
1079		# Waiting for 1's at least.
1080		#
1081		log_must sleep 1
1082		timeout=10
1083		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1084		do
1085			log_must sleep 1
1086
1087			((timeout -= 1))
1088		done
1089
1090		log_must unshare $dummy
1091		log_must rm -rf $dummy
1092	fi
1093
1094	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1095}
1096
1097#
1098# To verify whether calling process is in global zone
1099#
1100# Return 0 if in global zone, 1 in non-global zone
1101#
1102function is_global_zone
1103{
1104	typeset cur_zone=$(zonename 2>/dev/null)
1105	if [[ $cur_zone != "global" ]]; then
1106		return 1
1107	fi
1108	return 0
1109}
1110
1111#
1112# Verify whether test is permitted to run from
1113# global zone, local zone, or both
1114#
1115# $1 zone limit, could be "global", "local", or "both"(no limit)
1116#
1117# Return 0 if permitted, otherwise exit with log_unsupported
1118#
1119function verify_runnable # zone limit
1120{
1121	typeset limit=$1
1122
1123	[[ -z $limit ]] && return 0
1124
1125	if is_global_zone ; then
1126		case $limit in
1127			global|both)
1128				;;
1129			local)	log_unsupported "Test is unable to run from "\
1130					"global zone."
1131				;;
1132			*)	log_note "Warning: unknown limit $limit - " \
1133					"use both."
1134				;;
1135		esac
1136	else
1137		case $limit in
1138			local|both)
1139				;;
1140			global)	log_unsupported "Test is unable to run from "\
1141					"local zone."
1142				;;
1143			*)	log_note "Warning: unknown limit $limit - " \
1144					"use both."
1145				;;
1146		esac
1147
1148		reexport_pool
1149	fi
1150
1151	return 0
1152}
1153
1154# Return 0 if create successfully or the pool exists; $? otherwise
1155# Note: In local zones, this function should return 0 silently.
1156#
1157# $1 - pool name
1158# $2-n - [keyword] devs_list
1159
1160function create_pool #pool devs_list
1161{
1162	typeset pool=${1%%/*}
1163
1164	shift
1165
1166	if [[ -z $pool ]]; then
1167		log_note "Missing pool name."
1168		return 1
1169	fi
1170
1171	if poolexists $pool ; then
1172		destroy_pool $pool
1173	fi
1174
1175	if is_global_zone ; then
1176		[[ -d /$pool ]] && rm -rf /$pool
1177		log_must zpool create -f $pool $@
1178	fi
1179
1180	return 0
1181}
1182
1183# Return 0 if destroy successfully or the pool exists; $? otherwise
1184# Note: In local zones, this function should return 0 silently.
1185#
1186# $1 - pool name
1187# Destroy pool with the given parameters.
1188
1189function destroy_pool #pool
1190{
1191	typeset pool=${1%%/*}
1192	typeset mtpt
1193
1194	if [[ -z $pool ]]; then
1195		log_note "No pool name given."
1196		return 1
1197	fi
1198
1199	if is_global_zone ; then
1200		if poolexists "$pool" ; then
1201			mtpt=$(get_prop mountpoint "$pool")
1202
1203			# At times, syseventd activity can cause attempts to
1204			# destroy a pool to fail with EBUSY. We retry a few
1205			# times allowing failures before requiring the destroy
1206			# to succeed.
1207			typeset -i wait_time=10 ret=1 count=0
1208			must=""
1209			while [[ $ret -ne 0 ]]; do
1210				$must zpool destroy -f $pool
1211				ret=$?
1212				[[ $ret -eq 0 ]] && break
1213				log_note "zpool destroy failed with $ret"
1214				[[ count++ -ge 7 ]] && must=log_must
1215				sleep $wait_time
1216			done
1217
1218			[[ -d $mtpt ]] && \
1219				log_must rm -rf $mtpt
1220		else
1221			log_note "Pool does not exist. ($pool)"
1222			return 1
1223		fi
1224	fi
1225
1226	return 0
1227}
1228
1229# Return 0 if created successfully; $? otherwise
1230#
1231# $1 - dataset name
1232# $2-n - dataset options
1233
1234function create_dataset #dataset dataset_options
1235{
1236	typeset dataset=$1
1237
1238	shift
1239
1240	if [[ -z $dataset ]]; then
1241		log_note "Missing dataset name."
1242		return 1
1243	fi
1244
1245	if datasetexists $dataset ; then
1246		destroy_dataset $dataset
1247	fi
1248
1249	log_must zfs create $@ $dataset
1250
1251	return 0
1252}
1253
1254# Return 0 if destroy successfully or the dataset exists; $? otherwise
1255# Note: In local zones, this function should return 0 silently.
1256#
1257# $1 - dataset name
1258
1259function destroy_dataset #dataset
1260{
1261	typeset dataset=$1
1262	typeset mtpt
1263
1264	if [[ -z $dataset ]]; then
1265		log_note "No dataset name given."
1266		return 1
1267	fi
1268
1269	if datasetexists "$dataset" ; then
1270		mtpt=$(get_prop mountpoint "$dataset")
1271		log_must zfs destroy -r $dataset
1272		[[ -d $mtpt ]] && log_must rm -rf $mtpt
1273	else
1274		log_note "Dataset does not exist. ($dataset)"
1275		return 1
1276	fi
1277
1278	return 0
1279}
1280
1281#
1282# Firstly, create a pool with 5 datasets. Then, create a single zone and
1283# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1284# and a zvol device to the zone.
1285#
1286# $1 zone name
1287# $2 zone root directory prefix
1288# $3 zone ip
1289#
1290function zfs_zones_setup #zone_name zone_root zone_ip
1291{
1292	typeset zone_name=${1:-$(hostname)-z}
1293	typeset zone_root=${2:-"/zone_root"}
1294	typeset zone_ip=${3:-"10.1.1.10"}
1295	typeset prefix_ctr=$ZONE_CTR
1296	typeset pool_name=$ZONE_POOL
1297	typeset -i cntctr=5
1298	typeset -i i=0
1299
1300	# Create pool and 5 container within it
1301	#
1302	[[ -d /$pool_name ]] && rm -rf /$pool_name
1303	log_must zpool create -f $pool_name $DISKS
1304	while ((i < cntctr)); do
1305		log_must zfs create $pool_name/$prefix_ctr$i
1306		((i += 1))
1307	done
1308
1309	# create a zvol
1310	log_must zfs create -V 1g $pool_name/zone_zvol
1311
1312	#
1313	# If current system support slog, add slog device for pool
1314	#
1315	if verify_slog_support ; then
1316		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1317		log_must mkfile $MINVDEVSIZE $sdevs
1318		log_must zpool add $pool_name log mirror $sdevs
1319	fi
1320
1321	# this isn't supported just yet.
1322	# Create a filesystem. In order to add this to
1323	# the zone, it must have it's mountpoint set to 'legacy'
1324	# log_must zfs create $pool_name/zfs_filesystem
1325	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1326
1327	[[ -d $zone_root ]] && \
1328		log_must rm -rf $zone_root/$zone_name
1329	[[ ! -d $zone_root ]] && \
1330		log_must mkdir -p -m 0700 $zone_root/$zone_name
1331
1332	# Create zone configure file and configure the zone
1333	#
1334	typeset zone_conf=/tmp/zone_conf.$$
1335	echo "create" > $zone_conf
1336	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1337	echo "set autoboot=true" >> $zone_conf
1338	i=0
1339	while ((i < cntctr)); do
1340		echo "add dataset" >> $zone_conf
1341		echo "set name=$pool_name/$prefix_ctr$i" >> \
1342			$zone_conf
1343		echo "end" >> $zone_conf
1344		((i += 1))
1345	done
1346
1347	# add our zvol to the zone
1348	echo "add device" >> $zone_conf
1349	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1350	echo "end" >> $zone_conf
1351
1352	# add a corresponding zvol rdsk to the zone
1353	echo "add device" >> $zone_conf
1354	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1355	echo "end" >> $zone_conf
1356
1357	# once it's supported, we'll add our filesystem to the zone
1358	# echo "add fs" >> $zone_conf
1359	# echo "set type=zfs" >> $zone_conf
1360	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1361	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1362	# echo "end" >> $zone_conf
1363
1364	echo "verify" >> $zone_conf
1365	echo "commit" >> $zone_conf
1366	log_must zonecfg -z $zone_name -f $zone_conf
1367	log_must rm -f $zone_conf
1368
1369	# Install the zone
1370	zoneadm -z $zone_name install
1371	if (($? == 0)); then
1372		log_note "SUCCESS: zoneadm -z $zone_name install"
1373	else
1374		log_fail "FAIL: zoneadm -z $zone_name install"
1375	fi
1376
1377	# Install sysidcfg file
1378	#
1379	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1380	echo "system_locale=C" > $sysidcfg
1381	echo  "terminal=dtterm" >> $sysidcfg
1382	echo  "network_interface=primary {" >> $sysidcfg
1383	echo  "hostname=$zone_name" >> $sysidcfg
1384	echo  "}" >> $sysidcfg
1385	echo  "name_service=NONE" >> $sysidcfg
1386	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1387	echo  "security_policy=NONE" >> $sysidcfg
1388	echo  "timezone=US/Eastern" >> $sysidcfg
1389
1390	# Boot this zone
1391	log_must zoneadm -z $zone_name boot
1392}
1393
1394#
1395# Reexport TESTPOOL & TESTPOOL(1-4)
1396#
1397function reexport_pool
1398{
1399	typeset -i cntctr=5
1400	typeset -i i=0
1401
1402	while ((i < cntctr)); do
1403		if ((i == 0)); then
1404			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1405			if ! ismounted $TESTPOOL; then
1406				log_must zfs mount $TESTPOOL
1407			fi
1408		else
1409			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1410			if eval ! ismounted \$TESTPOOL$i; then
1411				log_must eval zfs mount \$TESTPOOL$i
1412			fi
1413		fi
1414		((i += 1))
1415	done
1416}
1417
1418#
1419# Verify a given disk is online or offline
1420#
1421# Return 0 is pool/disk matches expected state, 1 otherwise
1422#
1423function check_state # pool disk state{online,offline}
1424{
1425	typeset pool=$1
1426	typeset disk=${2#/dev/dsk/}
1427	typeset state=$3
1428
1429	zpool status -v $pool | grep "$disk"  \
1430	    | grep -i "$state" > /dev/null 2>&1
1431
1432	return $?
1433}
1434
1435#
1436# Get the mountpoint of snapshot
1437# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1438# as its mountpoint
1439#
1440function snapshot_mountpoint
1441{
1442	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1443
1444	if [[ $dataset != *@* ]]; then
1445		log_fail "Error name of snapshot '$dataset'."
1446	fi
1447
1448	typeset fs=${dataset%@*}
1449	typeset snap=${dataset#*@}
1450
1451	if [[ -z $fs || -z $snap ]]; then
1452		log_fail "Error name of snapshot '$dataset'."
1453	fi
1454
1455	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1456}
1457
1458#
1459# Given a pool and file system, this function will verify the file system
1460# using the zdb internal tool. Note that the pool is exported and imported
1461# to ensure it has consistent state.
1462#
1463function verify_filesys # pool filesystem dir
1464{
1465	typeset pool="$1"
1466	typeset filesys="$2"
1467	typeset zdbout="/tmp/zdbout.$$"
1468
1469	shift
1470	shift
1471	typeset dirs=$@
1472	typeset search_path=""
1473
1474	log_note "Calling zdb to verify filesystem '$filesys'"
1475	zfs unmount -a > /dev/null 2>&1
1476	log_must zpool export $pool
1477
1478	if [[ -n $dirs ]] ; then
1479		for dir in $dirs ; do
1480			search_path="$search_path -d $dir"
1481		done
1482	fi
1483
1484	log_must zpool import $search_path $pool
1485
1486	zdb -cudi $filesys > $zdbout 2>&1
1487	if [[ $? != 0 ]]; then
1488		log_note "Output: zdb -cudi $filesys"
1489		cat $zdbout
1490		log_fail "zdb detected errors with: '$filesys'"
1491	fi
1492
1493	log_must zfs mount -a
1494	log_must rm -rf $zdbout
1495}
1496
1497#
1498# Given a pool, and this function list all disks in the pool
1499#
1500function get_disklist # pool
1501{
1502	typeset disklist=""
1503
1504	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1505	    grep -v "\-\-\-\-\-" | \
1506	    egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1507
1508	echo $disklist
1509}
1510
1511# /**
1512#  This function kills a given list of processes after a time period. We use
1513#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1514#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1515#  would be listed as FAIL, which we don't want : we're happy with stress tests
1516#  running for a certain amount of time, then finishing.
1517#
1518# @param $1 the time in seconds after which we should terminate these processes
1519# @param $2..$n the processes we wish to terminate.
1520# */
1521function stress_timeout
1522{
1523	typeset -i TIMEOUT=$1
1524	shift
1525	typeset cpids="$@"
1526
1527	log_note "Waiting for child processes($cpids). " \
1528		"It could last dozens of minutes, please be patient ..."
1529	log_must sleep $TIMEOUT
1530
1531	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1532	typeset pid
1533	for pid in $cpids; do
1534		ps -p $pid > /dev/null 2>&1
1535		if (($? == 0)); then
1536			log_must kill -USR1 $pid
1537		fi
1538	done
1539}
1540
1541#
1542# Verify a given hotspare disk is inuse or avail
1543#
1544# Return 0 is pool/disk matches expected state, 1 otherwise
1545#
1546function check_hotspare_state # pool disk state{inuse,avail}
1547{
1548	typeset pool=$1
1549	typeset disk=${2#/dev/dsk/}
1550	typeset state=$3
1551
1552	cur_state=$(get_device_state $pool $disk "spares")
1553
1554	if [[ $state != ${cur_state} ]]; then
1555		return 1
1556	fi
1557	return 0
1558}
1559
1560#
1561# Wait until a hotspare transitions to a given state or times out.
1562#
1563# Return 0 when  pool/disk matches expected state, 1 on timeout.
1564#
1565function wait_hotspare_state # pool disk state timeout
1566{
1567	typeset pool=$1
1568	typeset disk=${2#$/DEV_DSKDIR/}
1569	typeset state=$3
1570	typeset timeout=${4:-60}
1571	typeset -i i=0
1572
1573	while [[ $i -lt $timeout ]]; do
1574		if check_hotspare_state $pool $disk $state; then
1575			return 0
1576		fi
1577
1578		i=$((i+1))
1579		sleep 1
1580	done
1581
1582	return 1
1583}
1584
1585#
1586# Verify a given slog disk is inuse or avail
1587#
1588# Return 0 is pool/disk matches expected state, 1 otherwise
1589#
1590function check_slog_state # pool disk state{online,offline,unavail}
1591{
1592	typeset pool=$1
1593	typeset disk=${2#/dev/dsk/}
1594	typeset state=$3
1595
1596	cur_state=$(get_device_state $pool $disk "logs")
1597
1598	if [[ $state != ${cur_state} ]]; then
1599		return 1
1600	fi
1601	return 0
1602}
1603
1604#
1605# Verify a given vdev disk is inuse or avail
1606#
1607# Return 0 is pool/disk matches expected state, 1 otherwise
1608#
1609function check_vdev_state # pool disk state{online,offline,unavail}
1610{
1611	typeset pool=$1
1612	typeset disk=${2#/dev/dsk/}
1613	typeset state=$3
1614
1615	cur_state=$(get_device_state $pool $disk)
1616
1617	if [[ $state != ${cur_state} ]]; then
1618		return 1
1619	fi
1620	return 0
1621}
1622
1623#
1624# Wait until a vdev transitions to a given state or times out.
1625#
1626# Return 0 when  pool/disk matches expected state, 1 on timeout.
1627#
1628function wait_vdev_state # pool disk state timeout
1629{
1630	typeset pool=$1
1631	typeset disk=${2#$/DEV_DSKDIR/}
1632	typeset state=$3
1633	typeset timeout=${4:-60}
1634	typeset -i i=0
1635
1636	while [[ $i -lt $timeout ]]; do
1637		if check_vdev_state $pool $disk $state; then
1638			return 0
1639		fi
1640
1641		i=$((i+1))
1642		sleep 1
1643	done
1644
1645	return 1
1646}
1647
1648#
1649# Check the output of 'zpool status -v <pool>',
1650# and to see if the content of <token> contain the <keyword> specified.
1651#
1652# Return 0 is contain, 1 otherwise
1653#
1654function check_pool_status # pool token keyword <verbose>
1655{
1656	typeset pool=$1
1657	typeset token=$2
1658	typeset keyword=$3
1659	typeset verbose=${4:-false}
1660
1661	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1662		($1==token) {print $0}')
1663	if [[ $verbose == true ]]; then
1664		log_note $scan
1665	fi
1666	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1667
1668	return $?
1669}
1670
1671#
1672# These 6 following functions are instance of check_pool_status()
1673#	is_pool_resilvering - to check if the pool is resilver in progress
1674#	is_pool_resilvered - to check if the pool is resilver completed
1675#	is_pool_scrubbing - to check if the pool is scrub in progress
1676#	is_pool_scrubbed - to check if the pool is scrub completed
1677#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1678#	is_pool_scrub_paused - to check if the pool has scrub paused
1679#	is_pool_removing - to check if the pool is removing a vdev
1680#	is_pool_removed - to check if the pool is remove completed
1681#
1682function is_pool_resilvering #pool <verbose>
1683{
1684	check_pool_status "$1" "scan" "resilver in progress since " $2
1685	return $?
1686}
1687
1688function is_pool_resilvered #pool <verbose>
1689{
1690	check_pool_status "$1" "scan" "resilvered " $2
1691	return $?
1692}
1693
1694function is_pool_scrubbing #pool <verbose>
1695{
1696	check_pool_status "$1" "scan" "scrub in progress since " $2
1697	return $?
1698}
1699
1700function is_pool_scrubbed #pool <verbose>
1701{
1702	check_pool_status "$1" "scan" "scrub repaired" $2
1703	return $?
1704}
1705
1706function is_pool_scrub_stopped #pool <verbose>
1707{
1708	check_pool_status "$1" "scan" "scrub canceled" $2
1709	return $?
1710}
1711
1712function is_pool_scrub_paused #pool <verbose>
1713{
1714	check_pool_status "$1" "scan" "scrub paused since " $2
1715	return $?
1716}
1717
1718function is_pool_removing #pool
1719{
1720	check_pool_status "$1" "remove" "in progress since "
1721	return $?
1722}
1723
1724function is_pool_removed #pool
1725{
1726	check_pool_status "$1" "remove" "completed on"
1727	return $?
1728}
1729
1730function wait_for_degraded
1731{
1732	typeset pool=$1
1733	typeset timeout=${2:-30}
1734	typeset t0=$SECONDS
1735
1736	while :; do
1737		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1738		log_note "$pool is not yet degraded."
1739		sleep 1
1740		if ((SECONDS - t0 > $timeout)); then
1741			log_note "$pool not degraded after $timeout seconds."
1742			return 1
1743		fi
1744	done
1745
1746	return 0
1747}
1748
1749#
1750# Wait for a pool to be scrubbed
1751#
1752# $1 pool name
1753# $2 number of seconds to wait (optional)
1754#
1755# Returns true when pool has been scrubbed, or false if there's a timeout or if
1756# no scrub was done.
1757#
1758function wait_scrubbed
1759{
1760	typeset pool=${1:-$TESTPOOL}
1761	while true ; do
1762		is_pool_scrubbed $pool && break
1763		log_must sleep 1
1764	done
1765}
1766
1767#
1768# Use create_pool()/destroy_pool() to clean up the infomation in
1769# in the given disk to avoid slice overlapping.
1770#
1771function cleanup_devices #vdevs
1772{
1773	typeset pool="foopool$$"
1774
1775	if poolexists $pool ; then
1776		destroy_pool $pool
1777	fi
1778
1779	create_pool $pool $@
1780	destroy_pool $pool
1781
1782	return 0
1783}
1784
1785#/**
1786# A function to find and locate free disks on a system or from given
1787# disks as the parameter. It works by locating disks that are in use
1788# as swap devices and dump devices, and also disks listed in /etc/vfstab
1789#
1790# $@ given disks to find which are free, default is all disks in
1791# the test system
1792#
1793# @return a string containing the list of available disks
1794#*/
1795function find_disks
1796{
1797	sfi=/tmp/swaplist.$$
1798	dmpi=/tmp/dumpdev.$$
1799	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1800
1801	swap -l > $sfi
1802	dumpadm > $dmpi 2>/dev/null
1803
1804# write an awk script that can process the output of format
1805# to produce a list of disks we know about. Note that we have
1806# to escape "$2" so that the shell doesn't interpret it while
1807# we're creating the awk script.
1808# -------------------
1809	cat > /tmp/find_disks.awk <<EOF
1810#!/bin/nawk -f
1811	BEGIN { FS="."; }
1812
1813	/^Specify disk/{
1814		searchdisks=0;
1815	}
1816
1817	{
1818		if (searchdisks && \$2 !~ "^$"){
1819			split(\$2,arr," ");
1820			print arr[1];
1821		}
1822	}
1823
1824	/^AVAILABLE DISK SELECTIONS:/{
1825		searchdisks=1;
1826	}
1827EOF
1828#---------------------
1829
1830	chmod 755 /tmp/find_disks.awk
1831	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1832	rm /tmp/find_disks.awk
1833
1834	unused=""
1835	for disk in $disks; do
1836	# Check for mounted
1837		grep "${disk}[sp]" /etc/mnttab >/dev/null
1838		(($? == 0)) && continue
1839	# Check for swap
1840		grep "${disk}[sp]" $sfi >/dev/null
1841		(($? == 0)) && continue
1842	# check for dump device
1843		grep "${disk}[sp]" $dmpi >/dev/null
1844		(($? == 0)) && continue
1845	# check to see if this disk hasn't been explicitly excluded
1846	# by a user-set environment variable
1847		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1848		(($? == 0)) && continue
1849		unused_candidates="$unused_candidates $disk"
1850	done
1851	rm $sfi
1852	rm $dmpi
1853
1854# now just check to see if those disks do actually exist
1855# by looking for a device pointing to the first slice in
1856# each case. limit the number to max_finddisksnum
1857	count=0
1858	for disk in $unused_candidates; do
1859		if [ -b /dev/dsk/${disk}s0 ]; then
1860		if [ $count -lt $max_finddisksnum ]; then
1861			unused="$unused $disk"
1862			# do not impose limit if $@ is provided
1863			[[ -z $@ ]] && ((count = count + 1))
1864		fi
1865		fi
1866	done
1867
1868# finally, return our disk list
1869	echo $unused
1870}
1871
1872#
1873# Add specified user to specified group
1874#
1875# $1 group name
1876# $2 user name
1877# $3 base of the homedir (optional)
1878#
1879function add_user #<group_name> <user_name> <basedir>
1880{
1881	typeset gname=$1
1882	typeset uname=$2
1883	typeset basedir=${3:-"/var/tmp"}
1884
1885	if ((${#gname} == 0 || ${#uname} == 0)); then
1886		log_fail "group name or user name are not defined."
1887	fi
1888
1889	log_must useradd -g $gname -d $basedir/$uname -m $uname
1890	log_must passwd -N $uname
1891
1892	return 0
1893}
1894
1895#
1896# Delete the specified user.
1897#
1898# $1 login name
1899# $2 base of the homedir (optional)
1900#
1901function del_user #<logname> <basedir>
1902{
1903	typeset user=$1
1904	typeset basedir=${2:-"/var/tmp"}
1905
1906	if ((${#user} == 0)); then
1907		log_fail "login name is necessary."
1908	fi
1909
1910	if id $user > /dev/null 2>&1; then
1911		log_must userdel $user
1912	fi
1913
1914	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1915
1916	return 0
1917}
1918
1919#
1920# Select valid gid and create specified group.
1921#
1922# $1 group name
1923#
1924function add_group #<group_name>
1925{
1926	typeset group=$1
1927
1928	if ((${#group} == 0)); then
1929		log_fail "group name is necessary."
1930	fi
1931
1932	# Assign 100 as the base gid
1933	typeset -i gid=100
1934	while true; do
1935		groupadd -g $gid $group > /dev/null 2>&1
1936		typeset -i ret=$?
1937		case $ret in
1938			0) return 0 ;;
1939			# The gid is not  unique
1940			4) ((gid += 1)) ;;
1941			*) return 1 ;;
1942		esac
1943	done
1944}
1945
1946#
1947# Delete the specified group.
1948#
1949# $1 group name
1950#
1951function del_group #<group_name>
1952{
1953	typeset grp=$1
1954	if ((${#grp} == 0)); then
1955		log_fail "group name is necessary."
1956	fi
1957
1958	groupmod -n $grp $grp > /dev/null 2>&1
1959	typeset -i ret=$?
1960	case $ret in
1961		# Group does not exist.
1962		6) return 0 ;;
1963		# Name already exists as a group name
1964		9) log_must groupdel $grp ;;
1965		*) return 1 ;;
1966	esac
1967
1968	return 0
1969}
1970
1971#
1972# This function will return true if it's safe to destroy the pool passed
1973# as argument 1. It checks for pools based on zvols and files, and also
1974# files contained in a pool that may have a different mountpoint.
1975#
1976function safe_to_destroy_pool { # $1 the pool name
1977
1978	typeset pool=""
1979	typeset DONT_DESTROY=""
1980
1981	# We check that by deleting the $1 pool, we're not
1982	# going to pull the rug out from other pools. Do this
1983	# by looking at all other pools, ensuring that they
1984	# aren't built from files or zvols contained in this pool.
1985
1986	for pool in $(zpool list -H -o name)
1987	do
1988		ALTMOUNTPOOL=""
1989
1990		# this is a list of the top-level directories in each of the
1991		# files that make up the path to the files the pool is based on
1992		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1993			awk '{print $1}')
1994
1995		# this is a list of the zvols that make up the pool
1996		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1997		    | awk '{print $1}')
1998
1999		# also want to determine if it's a file-based pool using an
2000		# alternate mountpoint...
2001		POOL_FILE_DIRS=$(zpool status -v $pool | \
2002					grep / | awk '{print $1}' | \
2003					awk -F/ '{print $2}' | grep -v "dev")
2004
2005		for pooldir in $POOL_FILE_DIRS
2006		do
2007			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2008					grep "${pooldir}$" | awk '{print $1}')
2009
2010			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2011		done
2012
2013
2014		if [ ! -z "$ZVOLPOOL" ]
2015		then
2016			DONT_DESTROY="true"
2017			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2018		fi
2019
2020		if [ ! -z "$FILEPOOL" ]
2021		then
2022			DONT_DESTROY="true"
2023			log_note "Pool $pool is built from $FILEPOOL on $1"
2024		fi
2025
2026		if [ ! -z "$ALTMOUNTPOOL" ]
2027		then
2028			DONT_DESTROY="true"
2029			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2030		fi
2031	done
2032
2033	if [ -z "${DONT_DESTROY}" ]
2034	then
2035		return 0
2036	else
2037		log_note "Warning: it is not safe to destroy $1!"
2038		return 1
2039	fi
2040}
2041
2042#
2043# Get the available ZFS compression options
2044# $1 option type zfs_set|zfs_compress
2045#
2046function get_compress_opts
2047{
2048	typeset COMPRESS_OPTS
2049	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2050			gzip-6 gzip-7 gzip-8 gzip-9"
2051
2052	if [[ $1 == "zfs_compress" ]] ; then
2053		COMPRESS_OPTS="on lzjb"
2054	elif [[ $1 == "zfs_set" ]] ; then
2055		COMPRESS_OPTS="on off lzjb"
2056	fi
2057	typeset valid_opts="$COMPRESS_OPTS"
2058	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2059	if [[ $? -eq 0 ]]; then
2060		valid_opts="$valid_opts $GZIP_OPTS"
2061	fi
2062	echo "$valid_opts"
2063}
2064
2065#
2066# Verify zfs operation with -p option work as expected
2067# $1 operation, value could be create, clone or rename
2068# $2 dataset type, value could be fs or vol
2069# $3 dataset name
2070# $4 new dataset name
2071#
2072function verify_opt_p_ops
2073{
2074	typeset ops=$1
2075	typeset datatype=$2
2076	typeset dataset=$3
2077	typeset newdataset=$4
2078
2079	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2080		log_fail "$datatype is not supported."
2081	fi
2082
2083	# check parameters accordingly
2084	case $ops in
2085		create)
2086			newdataset=$dataset
2087			dataset=""
2088			if [[ $datatype == "vol" ]]; then
2089				ops="create -V $VOLSIZE"
2090			fi
2091			;;
2092		clone)
2093			if [[ -z $newdataset ]]; then
2094				log_fail "newdataset should not be empty" \
2095					"when ops is $ops."
2096			fi
2097			log_must datasetexists $dataset
2098			log_must snapexists $dataset
2099			;;
2100		rename)
2101			if [[ -z $newdataset ]]; then
2102				log_fail "newdataset should not be empty" \
2103					"when ops is $ops."
2104			fi
2105			log_must datasetexists $dataset
2106			log_mustnot snapexists $dataset
2107			;;
2108		*)
2109			log_fail "$ops is not supported."
2110			;;
2111	esac
2112
2113	# make sure the upper level filesystem does not exist
2114	if datasetexists ${newdataset%/*} ; then
2115		log_must zfs destroy -rRf ${newdataset%/*}
2116	fi
2117
2118	# without -p option, operation will fail
2119	log_mustnot zfs $ops $dataset $newdataset
2120	log_mustnot datasetexists $newdataset ${newdataset%/*}
2121
2122	# with -p option, operation should succeed
2123	log_must zfs $ops -p $dataset $newdataset
2124	if ! datasetexists $newdataset ; then
2125		log_fail "-p option does not work for $ops"
2126	fi
2127
2128	# when $ops is create or clone, redo the operation still return zero
2129	if [[ $ops != "rename" ]]; then
2130		log_must zfs $ops -p $dataset $newdataset
2131	fi
2132
2133	return 0
2134}
2135
2136#
2137# Get configuration of pool
2138# $1 pool name
2139# $2 config name
2140#
2141function get_config
2142{
2143	typeset pool=$1
2144	typeset config=$2
2145	typeset alt_root
2146
2147	if ! poolexists "$pool" ; then
2148		return 1
2149	fi
2150	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2151	if [[ $alt_root == "-" ]]; then
2152		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2153		    '{print $2}')
2154	else
2155		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2156		    '{print $2}')
2157	fi
2158	if [[ -n $value ]] ; then
2159		value=${value#'}
2160		value=${value%'}
2161	fi
2162	echo $value
2163
2164	return 0
2165}
2166
2167#
2168# Privated function. Random select one of items from arguments.
2169#
2170# $1 count
2171# $2-n string
2172#
2173function _random_get
2174{
2175	typeset cnt=$1
2176	shift
2177
2178	typeset str="$@"
2179	typeset -i ind
2180	((ind = RANDOM % cnt + 1))
2181
2182	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2183	echo $ret
2184}
2185
2186#
2187# Random select one of item from arguments which include NONE string
2188#
2189function random_get_with_non
2190{
2191	typeset -i cnt=$#
2192	((cnt =+ 1))
2193
2194	_random_get "$cnt" "$@"
2195}
2196
2197#
2198# Random select one of item from arguments which doesn't include NONE string
2199#
2200function random_get
2201{
2202	_random_get "$#" "$@"
2203}
2204
2205#
2206# Detect if the current system support slog
2207#
2208function verify_slog_support
2209{
2210	typeset dir=/tmp/disk.$$
2211	typeset pool=foo.$$
2212	typeset vdev=$dir/a
2213	typeset sdev=$dir/b
2214
2215	mkdir -p $dir
2216	mkfile $MINVDEVSIZE $vdev $sdev
2217
2218	typeset -i ret=0
2219	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2220		ret=1
2221	fi
2222	rm -r $dir
2223
2224	return $ret
2225}
2226
2227#
2228# The function will generate a dataset name with specific length
2229# $1, the length of the name
2230# $2, the base string to construct the name
2231#
2232function gen_dataset_name
2233{
2234	typeset -i len=$1
2235	typeset basestr="$2"
2236	typeset -i baselen=${#basestr}
2237	typeset -i iter=0
2238	typeset l_name=""
2239
2240	if ((len % baselen == 0)); then
2241		((iter = len / baselen))
2242	else
2243		((iter = len / baselen + 1))
2244	fi
2245	while ((iter > 0)); do
2246		l_name="${l_name}$basestr"
2247
2248		((iter -= 1))
2249	done
2250
2251	echo $l_name
2252}
2253
2254#
2255# Get cksum tuple of dataset
2256# $1 dataset name
2257#
2258# sample zdb output:
2259# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2260# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2261# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2262# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2263function datasetcksum
2264{
2265	typeset cksum
2266	sync
2267	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2268		| awk -F= '{print $7}')
2269	echo $cksum
2270}
2271
2272#
2273# Get cksum of file
2274# #1 file path
2275#
2276function checksum
2277{
2278	typeset cksum
2279	cksum=$(cksum $1 | awk '{print $1}')
2280	echo $cksum
2281}
2282
2283#
2284# Get the given disk/slice state from the specific field of the pool
2285#
2286function get_device_state #pool disk field("", "spares","logs")
2287{
2288	typeset pool=$1
2289	typeset disk=${2#/dev/dsk/}
2290	typeset field=${3:-$pool}
2291
2292	state=$(zpool status -v "$pool" 2>/dev/null | \
2293		nawk -v device=$disk -v pool=$pool -v field=$field \
2294		'BEGIN {startconfig=0; startfield=0; }
2295		/config:/ {startconfig=1}
2296		(startconfig==1) && ($1==field) {startfield=1; next;}
2297		(startfield==1) && ($1==device) {print $2; exit;}
2298		(startfield==1) &&
2299		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2300	echo $state
2301}
2302
2303
2304#
2305# print the given directory filesystem type
2306#
2307# $1 directory name
2308#
2309function get_fstype
2310{
2311	typeset dir=$1
2312
2313	if [[ -z $dir ]]; then
2314		log_fail "Usage: get_fstype <directory>"
2315	fi
2316
2317	#
2318	#  $ df -n /
2319	#  /		  : ufs
2320	#
2321	df -n $dir | awk '{print $3}'
2322}
2323
2324#
2325# Given a disk, label it to VTOC regardless what label was on the disk
2326# $1 disk
2327#
2328function labelvtoc
2329{
2330	typeset disk=$1
2331	if [[ -z $disk ]]; then
2332		log_fail "The disk name is unspecified."
2333	fi
2334	typeset label_file=/var/tmp/labelvtoc.$$
2335	typeset arch=$(uname -p)
2336
2337	if [[ $arch == "i386" ]]; then
2338		echo "label" > $label_file
2339		echo "0" >> $label_file
2340		echo "" >> $label_file
2341		echo "q" >> $label_file
2342		echo "q" >> $label_file
2343
2344		fdisk -B $disk >/dev/null 2>&1
2345		# wait a while for fdisk finishes
2346		sleep 60
2347	elif [[ $arch == "sparc" ]]; then
2348		echo "label" > $label_file
2349		echo "0" >> $label_file
2350		echo "" >> $label_file
2351		echo "" >> $label_file
2352		echo "" >> $label_file
2353		echo "q" >> $label_file
2354	else
2355		log_fail "unknown arch type"
2356	fi
2357
2358	format -e -s -d $disk -f $label_file
2359	typeset -i ret_val=$?
2360	rm -f $label_file
2361	#
2362	# wait the format to finish
2363	#
2364	sleep 60
2365	if ((ret_val != 0)); then
2366		log_fail "unable to label $disk as VTOC."
2367	fi
2368
2369	return 0
2370}
2371
2372#
2373# check if the system was installed as zfsroot or not
2374# return: 0 ture, otherwise false
2375#
2376function is_zfsroot
2377{
2378	df -n / | grep zfs > /dev/null 2>&1
2379	return $?
2380}
2381
2382#
2383# get the root filesystem name if it's zfsroot system.
2384#
2385# return: root filesystem name
2386function get_rootfs
2387{
2388	typeset rootfs=""
2389	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2390		/etc/mnttab)
2391	if [[ -z "$rootfs" ]]; then
2392		log_fail "Can not get rootfs"
2393	fi
2394	zfs list $rootfs > /dev/null 2>&1
2395	if (($? == 0)); then
2396		echo $rootfs
2397	else
2398		log_fail "This is not a zfsroot system."
2399	fi
2400}
2401
2402#
2403# get the rootfs's pool name
2404# return:
2405#       rootpool name
2406#
2407function get_rootpool
2408{
2409	typeset rootfs=""
2410	typeset rootpool=""
2411	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2412		 /etc/mnttab)
2413	if [[ -z "$rootfs" ]]; then
2414		log_fail "Can not get rootpool"
2415	fi
2416	zfs list $rootfs > /dev/null 2>&1
2417	if (($? == 0)); then
2418		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2419		echo $rootpool
2420	else
2421		log_fail "This is not a zfsroot system."
2422	fi
2423}
2424
2425#
2426# Check if the given device is physical device
2427#
2428function is_physical_device #device
2429{
2430	typeset device=${1#/dev/dsk/}
2431	device=${device#/dev/rdsk/}
2432
2433	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2434	return $?
2435}
2436
2437#
2438# Get the directory path of given device
2439#
2440function get_device_dir #device
2441{
2442	typeset device=$1
2443
2444	if ! $(is_physical_device $device) ; then
2445		if [[ $device != "/" ]]; then
2446			device=${device%/*}
2447		fi
2448		echo $device
2449	else
2450		echo "/dev/dsk"
2451	fi
2452}
2453
2454#
2455# Get the package name
2456#
2457function get_package_name
2458{
2459	typeset dirpath=${1:-$STC_NAME}
2460
2461	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2462}
2463
2464#
2465# Get the word numbers from a string separated by white space
2466#
2467function get_word_count
2468{
2469	echo $1 | wc -w
2470}
2471
2472#
2473# To verify if the require numbers of disks is given
2474#
2475function verify_disk_count
2476{
2477	typeset -i min=${2:-1}
2478
2479	typeset -i count=$(get_word_count "$1")
2480
2481	if ((count < min)); then
2482		log_untested "A minimum of $min disks is required to run." \
2483			" You specified $count disk(s)"
2484	fi
2485}
2486
2487function ds_is_volume
2488{
2489	typeset type=$(get_prop type $1)
2490	[[ $type = "volume" ]] && return 0
2491	return 1
2492}
2493
2494function ds_is_filesystem
2495{
2496	typeset type=$(get_prop type $1)
2497	[[ $type = "filesystem" ]] && return 0
2498	return 1
2499}
2500
2501function ds_is_snapshot
2502{
2503	typeset type=$(get_prop type $1)
2504	[[ $type = "snapshot" ]] && return 0
2505	return 1
2506}
2507
2508#
2509# Check if Trusted Extensions are installed and enabled
2510#
2511function is_te_enabled
2512{
2513	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2514	if (($? != 0)); then
2515		return 1
2516	else
2517		return 0
2518	fi
2519}
2520
2521# Utility function to determine if a system has multiple cpus.
2522function is_mp
2523{
2524	(($(psrinfo | wc -l) > 1))
2525}
2526
2527function get_cpu_freq
2528{
2529	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2530}
2531
2532# Run the given command as the user provided.
2533function user_run
2534{
2535	typeset user=$1
2536	shift
2537
2538	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2539	return $?
2540}
2541
2542#
2543# Check if the pool contains the specified vdevs
2544#
2545# $1 pool
2546# $2..n <vdev> ...
2547#
2548# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2549# vdevs is not in the pool, and 2 if pool name is missing.
2550#
2551function vdevs_in_pool
2552{
2553	typeset pool=$1
2554	typeset vdev
2555
2556        if [[ -z $pool ]]; then
2557                log_note "Missing pool name."
2558                return 2
2559        fi
2560
2561	shift
2562
2563	typeset tmpfile=$(mktemp)
2564	zpool list -Hv "$pool" >$tmpfile
2565	for vdev in $@; do
2566		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2567		[[ $? -ne 0 ]] && return 1
2568	done
2569
2570	rm -f $tmpfile
2571
2572	return 0;
2573}
2574
2575function get_max
2576{
2577	typeset -l i max=$1
2578	shift
2579
2580	for i in "$@"; do
2581		max=$(echo $((max > i ? max : i)))
2582	done
2583
2584	echo $max
2585}
2586
2587function get_min
2588{
2589	typeset -l i min=$1
2590	shift
2591
2592	for i in "$@"; do
2593		min=$(echo $((min < i ? min : i)))
2594	done
2595
2596	echo $min
2597}
2598
2599#
2600# Generate a random number between 1 and the argument.
2601#
2602function random
2603{
2604        typeset max=$1
2605        echo $(( ($RANDOM % $max) + 1 ))
2606}
2607
2608# Write data that can be compressed into a directory
2609function write_compressible
2610{
2611	typeset dir=$1
2612	typeset megs=$2
2613	typeset nfiles=${3:-1}
2614	typeset bs=${4:-1024k}
2615	typeset fname=${5:-file}
2616
2617	[[ -d $dir ]] || log_fail "No directory: $dir"
2618
2619	log_must eval "fio \
2620	    --name=job \
2621	    --fallocate=0 \
2622	    --minimal \
2623	    --randrepeat=0 \
2624	    --buffer_compress_percentage=66 \
2625	    --buffer_compress_chunk=4096 \
2626	    --directory=$dir \
2627	    --numjobs=$nfiles \
2628	    --rw=write \
2629	    --bs=$bs \
2630	    --filesize=$megs \
2631	    --filename_format='$fname.\$jobnum' >/dev/null"
2632}
2633
2634function get_objnum
2635{
2636	typeset pathname=$1
2637	typeset objnum
2638
2639	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2640	objnum=$(stat -c %i $pathname)
2641	echo $objnum
2642}
2643
2644#
2645# Sync data to the pool
2646#
2647# $1 pool name
2648# $2 boolean to force uberblock (and config including zpool cache file) update
2649#
2650function sync_pool #pool <force>
2651{
2652	typeset pool=${1:-$TESTPOOL}
2653	typeset force=${2:-false}
2654
2655	if [[ $force == true ]]; then
2656		log_must zpool sync -f $pool
2657	else
2658		log_must zpool sync $pool
2659	fi
2660
2661	return 0
2662}
2663
2664#
2665# Prints the current time in seconds since UNIX Epoch.
2666#
2667function current_epoch
2668{
2669	printf '%(%s)T'
2670}
2671
2672#
2673# Get decimal value of global uint32_t variable using mdb.
2674#
2675function mdb_get_uint32
2676{
2677	typeset variable=$1
2678	typeset value
2679
2680	value=$(mdb -k -e "$variable/X | ::eval .=U")
2681	if [[ $? -ne 0 ]]; then
2682		log_fail "Failed to get value of '$variable' from mdb."
2683		return 1
2684	fi
2685
2686	echo $value
2687	return 0
2688}
2689
2690#
2691# Set global uint32_t variable to a decimal value using mdb.
2692#
2693function mdb_set_uint32
2694{
2695	typeset variable=$1
2696	typeset value=$2
2697
2698	mdb -kw -e "$variable/W 0t$value" > /dev/null
2699	if [[ $? -ne 0 ]]; then
2700		echo "Failed to set '$variable' to '$value' in mdb."
2701		return 1
2702	fi
2703
2704	return 0
2705}
2706
2707#
2708# Set global scalar integer variable to a hex value using mdb.
2709# Note: Target should have CTF data loaded.
2710#
2711function mdb_ctf_set_int
2712{
2713	typeset variable=$1
2714	typeset value=$2
2715
2716	mdb -kw -e "$variable/z $value" > /dev/null
2717	if [[ $? -ne 0 ]]; then
2718		echo "Failed to set '$variable' to '$value' in mdb."
2719		return 1
2720	fi
2721
2722	return 0
2723}
2724
2725#
2726# Set a global system tunable (64-bit value)
2727#
2728# $1 tunable name
2729# $2 tunable values
2730#
2731function set_tunable64
2732{
2733	set_tunable_impl "$1" "$2" Z
2734}
2735
2736#
2737# Set a global system tunable (32-bit value)
2738#
2739# $1 tunable name
2740# $2 tunable values
2741#
2742function set_tunable32
2743{
2744	set_tunable_impl "$1" "$2" W
2745}
2746
2747function set_tunable_impl
2748{
2749	typeset tunable="$1"
2750	typeset value="$2"
2751	typeset mdb_cmd="$3"
2752	typeset module="${4:-zfs}"
2753
2754	[[ -z "$tunable" ]] && return 1
2755	[[ -z "$value" ]] && return 1
2756	[[ -z "$mdb_cmd" ]] && return 1
2757
2758	case "$(uname)" in
2759	Linux)
2760		typeset zfs_tunables="/sys/module/$module/parameters"
2761		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2762		echo -n "$value" > "$zfs_tunables/$tunable"
2763		return "$?"
2764		;;
2765	SunOS)
2766		[[ "$module" -eq "zfs" ]] || return 1
2767		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2768		return "$?"
2769		;;
2770	esac
2771}
2772
2773#
2774# Get a global system tunable
2775#
2776# $1 tunable name
2777#
2778function get_tunable
2779{
2780	get_tunable_impl "$1"
2781}
2782
2783function get_tunable_impl
2784{
2785	typeset tunable="$1"
2786	typeset module="${2:-zfs}"
2787
2788	[[ -z "$tunable" ]] && return 1
2789
2790	case "$(uname)" in
2791	Linux)
2792		typeset zfs_tunables="/sys/module/$module/parameters"
2793		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2794		cat $zfs_tunables/$tunable
2795		return "$?"
2796		;;
2797	SunOS)
2798		typeset value=$(mdb -k -e "$tunable/X | ::eval .=U")
2799		if [[ $? -ne 0 ]]; then
2800			log_fail "Failed to get value of '$tunable' from mdb."
2801			return 1
2802		fi
2803		echo $value
2804		return 0
2805		;;
2806	esac
2807
2808	return 1
2809}
2810