xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision 4c75c86ed9514c627ddb82a345adecc7c1e43b91)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright (c) 2017 by Tim Chase. All rights reserved.
27# Copyright (c) 2017 by Nexenta Systems, Inc. All rights reserved.
28# Copyright (c) 2017 Datto Inc.
29# Copyright 2020 Joyent, Inc.
30# Copyright 2024 MNX Cloud, Inc.
31#
32
33. ${STF_TOOLS}/contrib/include/logapi.shlib
34. ${STF_SUITE}/include/math.shlib
35. ${STF_SUITE}/include/blkdev.shlib
36
37# Determine if this is a Linux test system
38#
39# Return 0 if platform Linux, 1 if otherwise
40
41function is_linux
42{
43	if [[ $(uname -o) == "GNU/Linux" ]]; then
44		return 0
45	else
46		return 1
47	fi
48}
49
50# Determine whether a dataset is mounted
51#
52# $1 dataset name
53# $2 filesystem type; optional - defaulted to zfs
54#
55# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
56
57function ismounted
58{
59	typeset fstype=$2
60	[[ -z $fstype ]] && fstype=zfs
61	typeset out dir name ret
62
63	case $fstype in
64		zfs)
65			if [[ "$1" == "/"* ]] ; then
66				for out in $(zfs mount | awk '{print $2}'); do
67					[[ $1 == $out ]] && return 0
68				done
69			else
70				for out in $(zfs mount | awk '{print $1}'); do
71					[[ $1 == $out ]] && return 0
72				done
73			fi
74		;;
75		ufs|nfs)
76			out=$(df -F $fstype $1 2>/dev/null)
77			ret=$?
78			(($ret != 0)) && return $ret
79
80			dir=${out%%\(*}
81			dir=${dir%% *}
82			name=${out##*\(}
83			name=${name%%\)*}
84			name=${name%% *}
85
86			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
87		;;
88	esac
89
90	return 1
91}
92
93# Return 0 if a dataset is mounted; 1 otherwise
94#
95# $1 dataset name
96# $2 filesystem type; optional - defaulted to zfs
97
98function mounted
99{
100	ismounted $1 $2
101	(($? == 0)) && return 0
102	return 1
103}
104
105# Return 0 if a dataset is unmounted; 1 otherwise
106#
107# $1 dataset name
108# $2 filesystem type; optional - defaulted to zfs
109
110function unmounted
111{
112	ismounted $1 $2
113	(($? == 1)) && return 0
114	return 1
115}
116
117# split line on ","
118#
119# $1 - line to split
120
121function splitline
122{
123	echo $1 | sed "s/,/ /g"
124}
125
126function default_setup
127{
128	default_setup_noexit "$@"
129
130	log_pass
131}
132
133#
134# Given a list of disks, setup storage pools and datasets.
135#
136function default_setup_noexit
137{
138	typeset disklist=$1
139	typeset container=$2
140	typeset volume=$3
141
142	if is_global_zone; then
143		if poolexists $TESTPOOL ; then
144			destroy_pool $TESTPOOL
145		fi
146		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
147		log_must zpool create -f $TESTPOOL $disklist
148	else
149		reexport_pool
150	fi
151
152	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
153	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
154
155	log_must zfs create $TESTPOOL/$TESTFS
156	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
157
158	if [[ -n $container ]]; then
159		rm -rf $TESTDIR1  || \
160			log_unresolved Could not remove $TESTDIR1
161		mkdir -p $TESTDIR1 || \
162			log_unresolved Could not create $TESTDIR1
163
164		log_must zfs create $TESTPOOL/$TESTCTR
165		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
166		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
167		log_must zfs set mountpoint=$TESTDIR1 \
168		    $TESTPOOL/$TESTCTR/$TESTFS1
169	fi
170
171	if [[ -n $volume ]]; then
172		if is_global_zone ; then
173			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
174		else
175			log_must zfs create $TESTPOOL/$TESTVOL
176		fi
177	fi
178}
179
180#
181# Given a list of disks, setup a storage pool, file system and
182# a container.
183#
184function default_container_setup
185{
186	typeset disklist=$1
187
188	default_setup "$disklist" "true"
189}
190
191#
192# Given a list of disks, setup a storage pool,file system
193# and a volume.
194#
195function default_volume_setup
196{
197	typeset disklist=$1
198
199	default_setup "$disklist" "" "true"
200}
201
202#
203# Given a list of disks, setup a storage pool,file system,
204# a container and a volume.
205#
206function default_container_volume_setup
207{
208	typeset disklist=$1
209
210	default_setup "$disklist" "true" "true"
211}
212
213#
214# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
215# filesystem
216#
217# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
218# $2 snapshot name. Default, $TESTSNAP
219#
220function create_snapshot
221{
222	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
223	typeset snap=${2:-$TESTSNAP}
224
225	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
226	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
227
228	if snapexists $fs_vol@$snap; then
229		log_fail "$fs_vol@$snap already exists."
230	fi
231	datasetexists $fs_vol || \
232		log_fail "$fs_vol must exist."
233
234	log_must zfs snapshot $fs_vol@$snap
235}
236
237#
238# Create a clone from a snapshot, default clone name is $TESTCLONE.
239#
240# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
241# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
242#
243function create_clone   # snapshot clone
244{
245	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
246	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
247
248	[[ -z $snap ]] && \
249		log_fail "Snapshot name is undefined."
250	[[ -z $clone ]] && \
251		log_fail "Clone name is undefined."
252
253	log_must zfs clone $snap $clone
254}
255
256#
257# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
258# filesystem.
259#
260# $1 Existing filesystem or volume name. Default, $TESTFS
261# $2 Existing snapshot name. Default, $TESTSNAP
262# $3 bookmark name. Default, $TESTBKMARK
263#
264function create_bookmark
265{
266	typeset fs_vol=${1:-$TESTFS}
267	typeset snap=${2:-$TESTSNAP}
268	typeset bkmark=${3:-$TESTBKMARK}
269
270	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
271	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
272	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
273
274	if bkmarkexists $fs_vol#$bkmark; then
275		log_fail "$fs_vol#$bkmark already exists."
276	fi
277	datasetexists $fs_vol || \
278		log_fail "$fs_vol must exist."
279	snapexists $fs_vol@$snap || \
280		log_fail "$fs_vol@$snap must exist."
281
282	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
283}
284
285#
286# Create a temporary clone result of an interrupted resumable 'zfs receive'
287# $1 Destination filesystem name. Must not exist, will be created as the result
288#    of this function along with its %recv temporary clone
289# $2 Source filesystem name. Must not exist, will be created and destroyed
290#
291function create_recv_clone
292{
293	typeset recvfs="$1"
294	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
295	typeset snap="$sendfs@snap1"
296	typeset incr="$sendfs@snap2"
297	typeset mountpoint="$TESTDIR/create_recv_clone"
298	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
299
300	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
301
302	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
303	datasetexists $sendfs && log_fail "Send filesystem must not exist."
304
305	log_must zfs create -o mountpoint="$mountpoint" $sendfs
306	log_must zfs snapshot $snap
307	log_must eval "zfs send $snap | zfs recv -u $recvfs"
308	log_must mkfile 1m "$mountpoint/data"
309	log_must zfs snapshot $incr
310	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
311	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
312	destroy_dataset "$sendfs" "-r"
313	log_must rm -f "$sendfile"
314
315	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
316		log_fail "Error creating temporary $recvfs/%recv clone"
317	fi
318}
319
320function default_mirror_setup
321{
322	default_mirror_setup_noexit $1 $2 $3
323
324	log_pass
325}
326
327function default_mirror_2way_setup
328{
329	default_mirror_setup_noexit $1 $2
330
331	log_pass
332}
333
334#
335# Given a pair of disks, set up a storage pool and dataset for the mirror
336# @parameters: $1 the primary side of the mirror
337#   $2 the secondary side of the mirror
338# @uses: ZPOOL ZFS TESTPOOL TESTFS
339function default_mirror_setup_noexit
340{
341	readonly func="default_mirror_setup_noexit"
342	typeset primary=$1
343	typeset secondary=$2
344
345	[[ -z $primary ]] && \
346		log_fail "$func: No parameters passed"
347	[[ -z $secondary ]] && \
348		log_fail "$func: No secondary partition passed"
349	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
350	log_must zpool create -f $TESTPOOL mirror $@
351	log_must zfs create $TESTPOOL/$TESTFS
352	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
353}
354
355#
356# create a number of mirrors.
357# We create a number($1) of 2 way mirrors using the pairs of disks named
358# on the command line. These mirrors are *not* mounted
359# @parameters: $1 the number of mirrors to create
360#  $... the devices to use to create the mirrors on
361# @uses: ZPOOL ZFS TESTPOOL
362function setup_mirrors
363{
364	typeset -i nmirrors=$1
365
366	shift
367	while ((nmirrors > 0)); do
368		log_must test -n "$1" -a -n "$2"
369		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
370		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
371		shift 2
372		((nmirrors = nmirrors - 1))
373	done
374}
375
376#
377# create a number of raidz pools.
378# We create a number($1) of 2 raidz pools  using the pairs of disks named
379# on the command line. These pools are *not* mounted
380# @parameters: $1 the number of pools to create
381#  $... the devices to use to create the pools on
382# @uses: ZPOOL ZFS TESTPOOL
383function setup_raidzs
384{
385	typeset -i nraidzs=$1
386
387	shift
388	while ((nraidzs > 0)); do
389		log_must test -n "$1" -a -n "$2"
390		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
391		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
392		shift 2
393		((nraidzs = nraidzs - 1))
394	done
395}
396
397#
398# Destroy the configured testpool mirrors.
399# the mirrors are of the form ${TESTPOOL}{number}
400# @uses: ZPOOL ZFS TESTPOOL
401function destroy_mirrors
402{
403	default_cleanup_noexit
404
405	log_pass
406}
407
408#
409# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
410# $1 the list of disks
411#
412function default_raidz_setup
413{
414	typeset disklist="$*"
415	disks=(${disklist[*]})
416
417	if [[ ${#disks[*]} -lt 2 ]]; then
418		log_fail "A raid-z requires a minimum of two disks."
419	fi
420
421	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
422	log_must zpool create -f $TESTPOOL raidz $disklist
423	log_must zfs create $TESTPOOL/$TESTFS
424	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
425
426	log_pass
427}
428
429#
430# Common function used to cleanup storage pools and datasets.
431#
432# Invoked at the start of the test suite to ensure the system
433# is in a known state, and also at the end of each set of
434# sub-tests to ensure errors from one set of tests doesn't
435# impact the execution of the next set.
436
437function default_cleanup
438{
439	default_cleanup_noexit
440
441	log_pass
442}
443
444function default_cleanup_noexit
445{
446	typeset exclude=""
447	typeset pool=""
448	#
449	# Destroying the pool will also destroy any
450	# filesystems it contains.
451	#
452	if is_global_zone; then
453		zfs unmount -a > /dev/null 2>&1
454		exclude=`eval echo \"'(${KEEP})'\"`
455		ALL_POOLS=$(zpool list -H -o name \
456		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
457		# Here, we loop through the pools we're allowed to
458		# destroy, only destroying them if it's safe to do
459		# so.
460		while [ ! -z ${ALL_POOLS} ]
461		do
462			for pool in ${ALL_POOLS}
463			do
464				if safe_to_destroy_pool $pool ;
465				then
466					destroy_pool $pool
467				fi
468				ALL_POOLS=$(zpool list -H -o name \
469				    | grep -v "$NO_POOLS" \
470				    | egrep -v "$exclude")
471			done
472		done
473
474		zfs mount -a
475	else
476		typeset fs=""
477		for fs in $(zfs list -H -o name \
478		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
479			destroy_dataset "$fs" "-Rf"
480		done
481
482		# Need cleanup here to avoid garbage dir left.
483		for fs in $(zfs list -H -o name); do
484			[[ $fs == /$ZONE_POOL ]] && continue
485			[[ -d $fs ]] && log_must rm -rf $fs/*
486		done
487
488		#
489		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
490		# the default value
491		#
492		for fs in $(zfs list -H -o name); do
493			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
494				log_must zfs set reservation=none $fs
495				log_must zfs set recordsize=128K $fs
496				log_must zfs set mountpoint=/$fs $fs
497				typeset enc=""
498				enc=$(get_prop encryption $fs)
499				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
500					[[ "$enc" == "off" ]]; then
501					log_must zfs set checksum=on $fs
502				fi
503				log_must zfs set compression=off $fs
504				log_must zfs set atime=on $fs
505				log_must zfs set devices=off $fs
506				log_must zfs set exec=on $fs
507				log_must zfs set setuid=on $fs
508				log_must zfs set readonly=off $fs
509				log_must zfs set snapdir=hidden $fs
510				log_must zfs set aclmode=groupmask $fs
511				log_must zfs set aclinherit=secure $fs
512			fi
513		done
514	fi
515
516	[[ -d $TESTDIR ]] && \
517		log_must rm -rf $TESTDIR
518}
519
520
521#
522# Common function used to cleanup storage pools, file systems
523# and containers.
524#
525function default_container_cleanup
526{
527	if ! is_global_zone; then
528		reexport_pool
529	fi
530
531	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
532	[[ $? -eq 0 ]] && \
533	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
534
535	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
536	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
537
538	[[ -e $TESTDIR1 ]] && \
539	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
540
541	default_cleanup
542}
543
544#
545# Common function used to cleanup snapshot of file system or volume. Default to
546# delete the file system's snapshot
547#
548# $1 snapshot name
549#
550function destroy_snapshot
551{
552	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
553
554	if ! snapexists $snap; then
555		log_fail "'$snap' does not existed."
556	fi
557
558	#
559	# For the sake of the value which come from 'get_prop' is not equal
560	# to the really mountpoint when the snapshot is unmounted. So, firstly
561	# check and make sure this snapshot's been mounted in current system.
562	#
563	typeset mtpt=""
564	if ismounted $snap; then
565		mtpt=$(get_prop mountpoint $snap)
566		(($? != 0)) && \
567			log_fail "get_prop mountpoint $snap failed."
568	fi
569
570	destroy_dataset $snap
571	[[ $mtpt != "" && -d $mtpt ]] && \
572		log_must rm -rf $mtpt
573}
574
575#
576# Common function used to cleanup clone.
577#
578# $1 clone name
579#
580function destroy_clone
581{
582	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
583
584	if ! datasetexists $clone; then
585		log_fail "'$clone' does not existed."
586	fi
587
588	# With the same reason in destroy_snapshot
589	typeset mtpt=""
590	if ismounted $clone; then
591		mtpt=$(get_prop mountpoint $clone)
592		(($? != 0)) && \
593			log_fail "get_prop mountpoint $clone failed."
594	fi
595
596	destroy_dataset $clone
597	[[ $mtpt != "" && -d $mtpt ]] && \
598		log_must rm -rf $mtpt
599}
600
601#
602# Common function used to cleanup bookmark of file system or volume.  Default
603# to delete the file system's bookmark.
604#
605# $1 bookmark name
606#
607function destroy_bookmark
608{
609	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
610
611	if ! bkmarkexists $bkmark; then
612		log_fail "'$bkmarkp' does not existed."
613	fi
614
615	destroy_dataset $bkmark
616}
617
618# Return 0 if a snapshot exists; $? otherwise
619#
620# $1 - snapshot name
621
622function snapexists
623{
624	zfs list -H -t snapshot "$1" > /dev/null 2>&1
625	return $?
626}
627
628#
629# Return 0 if a bookmark exists; $? otherwise
630#
631# $1 - bookmark name
632#
633function bkmarkexists
634{
635	zfs list -H -t bookmark "$1" > /dev/null 2>&1
636	return $?
637}
638
639#
640# Set a property to a certain value on a dataset.
641# Sets a property of the dataset to the value as passed in.
642# @param:
643#	$1 dataset who's property is being set
644#	$2 property to set
645#	$3 value to set property to
646# @return:
647#	0 if the property could be set.
648#	non-zero otherwise.
649# @use: ZFS
650#
651function dataset_setprop
652{
653	typeset fn=dataset_setprop
654
655	if (($# < 3)); then
656		log_note "$fn: Insufficient parameters (need 3, had $#)"
657		return 1
658	fi
659	typeset output=
660	output=$(zfs set $2=$3 $1 2>&1)
661	typeset rv=$?
662	if ((rv != 0)); then
663		log_note "Setting property on $1 failed."
664		log_note "property $2=$3"
665		log_note "Return Code: $rv"
666		log_note "Output: $output"
667		return $rv
668	fi
669	return 0
670}
671
672#
673# Assign suite defined dataset properties.
674# This function is used to apply the suite's defined default set of
675# properties to a dataset.
676# @parameters: $1 dataset to use
677# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
678# @returns:
679#   0 if the dataset has been altered.
680#   1 if no pool name was passed in.
681#   2 if the dataset could not be found.
682#   3 if the dataset could not have it's properties set.
683#
684function dataset_set_defaultproperties
685{
686	typeset dataset="$1"
687
688	[[ -z $dataset ]] && return 1
689
690	typeset confset=
691	typeset -i found=0
692	for confset in $(zfs list); do
693		if [[ $dataset = $confset ]]; then
694			found=1
695			break
696		fi
697	done
698	[[ $found -eq 0 ]] && return 2
699	if [[ -n $COMPRESSION_PROP ]]; then
700		dataset_setprop $dataset compression $COMPRESSION_PROP || \
701			return 3
702		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
703	fi
704	if [[ -n $CHECKSUM_PROP ]]; then
705		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
706			return 3
707		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
708	fi
709	return 0
710}
711
712#
713# Check a numeric assertion
714# @parameter: $@ the assertion to check
715# @output: big loud notice if assertion failed
716# @use: log_fail
717#
718function assert
719{
720	(($@)) || log_fail "$@"
721}
722
723#
724# Function to format partition size of a disk
725# Given a disk cxtxdx reduces all partitions
726# to 0 size
727#
728function zero_partitions #<whole_disk_name>
729{
730	typeset diskname=$1
731	typeset i
732
733	for i in 0 1 3 4 5 6 7
734	do
735		set_partition $i "" 0mb $diskname
736	done
737}
738
739#
740# Given a slice, size and disk, this function
741# formats the slice to the specified size.
742# Size should be specified with units as per
743# the `format` command requirements eg. 100mb 3gb
744#
745function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
746{
747	typeset -i slicenum=$1
748	typeset start=$2
749	typeset size=$3
750	typeset disk=$4
751	[[ -z $slicenum || -z $size || -z $disk ]] && \
752	    log_fail "The slice, size or disk name is unspecified."
753	typeset format_file=/var/tmp/format_in.$$
754
755	echo "partition" >$format_file
756	echo "$slicenum" >> $format_file
757	echo "" >> $format_file
758	echo "" >> $format_file
759	echo "$start" >> $format_file
760	echo "$size" >> $format_file
761	echo "label" >> $format_file
762	echo "" >> $format_file
763	echo "q" >> $format_file
764	echo "q" >> $format_file
765
766	format -e -s -d $disk -f $format_file
767	typeset ret_val=$?
768	rm -f $format_file
769	[[ $ret_val -ne 0 ]] && \
770	    log_fail "Unable to format $disk slice $slicenum to $size"
771	return 0
772}
773
774#
775# Get the end cyl of the given slice
776#
777function get_endslice #<disk> <slice>
778{
779	typeset disk=$1
780	typeset slice=$2
781	if [[ -z $disk || -z $slice ]] ; then
782		log_fail "The disk name or slice number is unspecified."
783	fi
784
785	disk=${disk#/dev/dsk/}
786	disk=${disk#/dev/rdsk/}
787	disk=${disk%s*}
788
789	typeset -i ratio=0
790	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
791		grep "sectors\/cylinder" | \
792		awk '{print $2}')
793
794	if ((ratio == 0)); then
795		return
796	fi
797
798	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
799		nawk -v token="$slice" '{if ($1==token) print $6}')
800
801	((endcyl = (endcyl + 1) / ratio))
802	echo $endcyl
803}
804
805
806#
807# Given a size,disk and total slice number,  this function formats the
808# disk slices from 0 to the total slice number with the same specified
809# size.
810#
811function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
812{
813	typeset -i i=0
814	typeset slice_size=$1
815	typeset disk_name=$2
816	typeset total_slices=$3
817	typeset cyl
818
819	zero_partitions $disk_name
820	while ((i < $total_slices)); do
821		if ((i == 2)); then
822			((i = i + 1))
823			continue
824		fi
825		set_partition $i "$cyl" $slice_size $disk_name
826		cyl=$(get_endslice $disk_name $i)
827		((i = i+1))
828	done
829}
830
831#
832# This function continues to write to a filenum number of files into dirnum
833# number of directories until either file_write returns an error or the
834# maximum number of files per directory have been written.
835#
836# Usage:
837# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
838#
839# Return value: 0 on success
840#		non 0 on error
841#
842# Where :
843#	destdir:    is the directory where everything is to be created under
844#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
845#	filenum:    the maximum number of files per subdirectory
846#	bytes:	    number of bytes to write
847#	num_writes: numer of types to write out bytes
848#	data:	    the data that will be writen
849#
850#	E.g.
851#	file_fs /testdir 20 25 1024 256 0
852#
853# Note: bytes * num_writes equals the size of the testfile
854#
855function fill_fs # destdir dirnum filenum bytes num_writes data
856{
857	typeset destdir=${1:-$TESTDIR}
858	typeset -i dirnum=${2:-50}
859	typeset -i filenum=${3:-50}
860	typeset -i bytes=${4:-8192}
861	typeset -i num_writes=${5:-10240}
862	typeset data=${6:-0}
863
864	typeset -i odirnum=1
865	typeset -i idirnum=0
866	typeset -i fn=0
867	typeset -i retval=0
868
869	mkdir -p $destdir/$idirnum
870	while (($odirnum > 0)); do
871		if ((dirnum >= 0 && idirnum >= dirnum)); then
872			odirnum=0
873			break
874		fi
875		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
876		    -b $bytes -c $num_writes -d $data
877		retval=$?
878		if (($retval != 0)); then
879			odirnum=0
880			break
881		fi
882		if (($fn >= $filenum)); then
883			fn=0
884			((idirnum = idirnum + 1))
885			mkdir -p $destdir/$idirnum
886		else
887			((fn = fn + 1))
888		fi
889	done
890	return $retval
891}
892
893#
894# Simple function to get the specified property. If unable to
895# get the property then exits.
896#
897# Note property is in 'parsable' format (-p)
898#
899function get_prop # property dataset
900{
901	typeset prop_val
902	typeset prop=$1
903	typeset dataset=$2
904
905	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
906	if [[ $? -ne 0 ]]; then
907		log_note "Unable to get $prop property for dataset " \
908		"$dataset"
909		return 1
910	fi
911
912	echo "$prop_val"
913	return 0
914}
915
916#
917# Simple function to get the specified property of pool. If unable to
918# get the property then exits.
919#
920function get_pool_prop # property pool
921{
922	typeset prop_val
923	typeset prop=$1
924	typeset pool=$2
925
926	if poolexists $pool ; then
927		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
928			awk '{print $3}')
929		if [[ $? -ne 0 ]]; then
930			log_note "Unable to get $prop property for pool " \
931			"$pool"
932			return 1
933		fi
934	else
935		log_note "Pool $pool not exists."
936		return 1
937	fi
938
939	echo $prop_val
940	return 0
941}
942
943# Return 0 if a pool exists; $? otherwise
944#
945# $1 - pool name
946
947function poolexists
948{
949	typeset pool=$1
950
951	if [[ -z $pool ]]; then
952		log_note "No pool name given."
953		return 1
954	fi
955
956	zpool get name "$pool" > /dev/null 2>&1
957	return $?
958}
959
960# Return 0 if all the specified datasets exist; $? otherwise
961#
962# $1-n  dataset name
963function datasetexists
964{
965	if (($# == 0)); then
966		log_note "No dataset name given."
967		return 1
968	fi
969
970	while (($# > 0)); do
971		zfs get name $1 > /dev/null 2>&1 || \
972			return $?
973		shift
974	done
975
976	return 0
977}
978
979# return 0 if none of the specified datasets exists, otherwise return 1.
980#
981# $1-n  dataset name
982function datasetnonexists
983{
984	if (($# == 0)); then
985		log_note "No dataset name given."
986		return 1
987	fi
988
989	while (($# > 0)); do
990		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
991		    && return 1
992		shift
993	done
994
995	return 0
996}
997
998#
999# Given a mountpoint, or a dataset name, determine if it is shared.
1000#
1001# Returns 0 if shared, 1 otherwise.
1002#
1003function is_shared
1004{
1005	typeset fs=$1
1006	typeset mtpt
1007
1008	if [[ $fs != "/"* ]] ; then
1009		if datasetnonexists "$fs" ; then
1010			return 1
1011		else
1012			mtpt=$(get_prop mountpoint "$fs")
1013			case $mtpt in
1014				none|legacy|-) return 1
1015					;;
1016				*)	fs=$mtpt
1017					;;
1018			esac
1019		fi
1020	fi
1021
1022	for mtpt in `share | awk '{print $2}'` ; do
1023		if [[ $mtpt == $fs ]] ; then
1024			return 0
1025		fi
1026	done
1027
1028	typeset stat=$(svcs -H -o STA nfs/server:default)
1029	if [[ $stat != "ON" ]]; then
1030		log_note "Current nfs/server status: $stat"
1031	fi
1032
1033	return 1
1034}
1035
1036#
1037# Given a mountpoint, determine if it is not shared.
1038#
1039# Returns 0 if not shared, 1 otherwise.
1040#
1041function not_shared
1042{
1043	typeset fs=$1
1044
1045	is_shared $fs
1046	if (($? == 0)); then
1047		return 1
1048	fi
1049
1050	return 0
1051}
1052
1053#
1054# Helper function to unshare a mountpoint.
1055#
1056function unshare_fs #fs
1057{
1058	typeset fs=$1
1059
1060	is_shared $fs
1061	if (($? == 0)); then
1062		log_must zfs unshare $fs
1063	fi
1064
1065	return 0
1066}
1067
1068#
1069# Check NFS server status and trigger it online.
1070#
1071function setup_nfs_server
1072{
1073	# Cannot share directory in non-global zone.
1074	#
1075	if ! is_global_zone; then
1076		log_note "Cannot trigger NFS server by sharing in LZ."
1077		return
1078	fi
1079
1080	typeset nfs_fmri="svc:/network/nfs/server:default"
1081	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1082		#
1083		# Only really sharing operation can enable NFS server
1084		# to online permanently.
1085		#
1086		typeset dummy=/tmp/dummy
1087
1088		if [[ -d $dummy ]]; then
1089			log_must rm -rf $dummy
1090		fi
1091
1092		log_must mkdir $dummy
1093		log_must share $dummy
1094
1095		#
1096		# Waiting for fmri's status to be the final status.
1097		# Otherwise, in transition, an asterisk (*) is appended for
1098		# instances, unshare will reverse status to 'DIS' again.
1099		#
1100		# Waiting for 1's at least.
1101		#
1102		log_must sleep 1
1103		timeout=10
1104		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1105		do
1106			log_must sleep 1
1107
1108			((timeout -= 1))
1109		done
1110
1111		log_must unshare $dummy
1112		log_must rm -rf $dummy
1113	fi
1114
1115	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1116}
1117
1118#
1119# To verify whether calling process is in global zone
1120#
1121# Return 0 if in global zone, 1 in non-global zone
1122#
1123function is_global_zone
1124{
1125	typeset cur_zone=$(zonename 2>/dev/null)
1126	if [[ $cur_zone != "global" ]]; then
1127		return 1
1128	fi
1129	return 0
1130}
1131
1132#
1133# Verify whether test is permitted to run from
1134# global zone, local zone, or both
1135#
1136# $1 zone limit, could be "global", "local", or "both"(no limit)
1137#
1138# Return 0 if permitted, otherwise exit with log_unsupported
1139#
1140function verify_runnable # zone limit
1141{
1142	typeset limit=$1
1143
1144	[[ -z $limit ]] && return 0
1145
1146	if is_global_zone ; then
1147		case $limit in
1148			global|both)
1149				;;
1150			local)	log_unsupported "Test is unable to run from "\
1151					"global zone."
1152				;;
1153			*)	log_note "Warning: unknown limit $limit - " \
1154					"use both."
1155				;;
1156		esac
1157	else
1158		case $limit in
1159			local|both)
1160				;;
1161			global)	log_unsupported "Test is unable to run from "\
1162					"local zone."
1163				;;
1164			*)	log_note "Warning: unknown limit $limit - " \
1165					"use both."
1166				;;
1167		esac
1168
1169		reexport_pool
1170	fi
1171
1172	return 0
1173}
1174
1175# Return 0 if create successfully or the pool exists; $? otherwise
1176# Note: In local zones, this function should return 0 silently.
1177#
1178# $1 - pool name
1179# $2-n - [keyword] devs_list
1180
1181function create_pool #pool devs_list
1182{
1183	typeset pool=${1%%/*}
1184
1185	shift
1186
1187	if [[ -z $pool ]]; then
1188		log_note "Missing pool name."
1189		return 1
1190	fi
1191
1192	if poolexists $pool ; then
1193		destroy_pool $pool
1194	fi
1195
1196	if is_global_zone ; then
1197		[[ -d /$pool ]] && rm -rf /$pool
1198		log_must zpool create -f $pool $@
1199	fi
1200
1201	return 0
1202}
1203
1204# Return 0 if destroy successfully or the pool exists; $? otherwise
1205# Note: In local zones, this function should return 0 silently.
1206#
1207# $1 - pool name
1208# Destroy pool with the given parameters.
1209
1210function destroy_pool #pool
1211{
1212	typeset pool=${1%%/*}
1213	typeset mtpt
1214
1215	if [[ -z $pool ]]; then
1216		log_note "No pool name given."
1217		return 1
1218	fi
1219
1220	if is_global_zone ; then
1221		if poolexists "$pool" ; then
1222			mtpt=$(get_prop mountpoint "$pool")
1223
1224			# At times, syseventd activity can cause attempts to
1225			# destroy a pool to fail with EBUSY. We retry a few
1226			# times allowing failures before requiring the destroy
1227			# to succeed.
1228			typeset -i wait_time=10 ret=1 count=0
1229			must=""
1230			while [[ $ret -ne 0 ]]; do
1231				$must zpool destroy -f $pool
1232				ret=$?
1233				[[ $ret -eq 0 ]] && break
1234				log_note "zpool destroy failed with $ret"
1235				[[ count++ -ge 7 ]] && must=log_must
1236				sleep $wait_time
1237			done
1238
1239			[[ -d $mtpt ]] && \
1240				log_must rm -rf $mtpt
1241		else
1242			log_note "Pool does not exist. ($pool)"
1243			return 1
1244		fi
1245	fi
1246
1247	return 0
1248}
1249
1250# Return 0 if created successfully; $? otherwise
1251#
1252# $1 - dataset name
1253# $2-n - dataset options
1254
1255function create_dataset #dataset dataset_options
1256{
1257	typeset dataset=$1
1258
1259	shift
1260
1261	if [[ -z $dataset ]]; then
1262		log_note "Missing dataset name."
1263		return 1
1264	fi
1265
1266	if datasetexists $dataset ; then
1267		destroy_dataset $dataset
1268	fi
1269
1270	log_must zfs create $@ $dataset
1271
1272	return 0
1273}
1274
1275# Return 0 if destroy successfully or the dataset exists; $? otherwise
1276# Note: In local zones, this function should return 0 silently.
1277#
1278# $1 - dataset name
1279# $2 - custom arguments for zfs destroy
1280# Destroy dataset with the given parameters.
1281
1282function destroy_dataset #dataset #args
1283{
1284	typeset dataset=$1
1285	typeset mtpt
1286	typeset args=${2:-""}
1287
1288	if [[ -z $dataset ]]; then
1289		log_note "No dataset name given."
1290		return 1
1291	fi
1292
1293	if is_global_zone ; then
1294		if datasetexists "$dataset" ; then
1295			mtpt=$(get_prop mountpoint "$dataset")
1296			log_must zfs destroy $args $dataset
1297
1298			[[ -d $mtpt ]] && \
1299				log_must rm -rf $mtpt
1300		else
1301			log_note "Dataset does not exist. ($dataset)"
1302			return 1
1303		fi
1304	fi
1305
1306	return 0
1307}
1308
1309#
1310# Firstly, create a pool with 5 datasets. Then, create a single zone and
1311# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1312# and a zvol device to the zone.
1313#
1314# $1 zone name
1315# $2 zone root directory prefix
1316# $3 zone ip
1317#
1318function zfs_zones_setup #zone_name zone_root zone_ip
1319{
1320	typeset zone_name=${1:-$(hostname)-z}
1321	typeset zone_root=${2:-"/zone_root"}
1322	typeset zone_ip=${3:-"10.1.1.10"}
1323	typeset prefix_ctr=$ZONE_CTR
1324	typeset pool_name=$ZONE_POOL
1325	typeset -i cntctr=5
1326	typeset -i i=0
1327
1328	# Create pool and 5 container within it
1329	#
1330	[[ -d /$pool_name ]] && rm -rf /$pool_name
1331	log_must zpool create -f $pool_name $DISKS
1332	while ((i < cntctr)); do
1333		log_must zfs create $pool_name/$prefix_ctr$i
1334		((i += 1))
1335	done
1336
1337	# create a zvol
1338	log_must zfs create -V 1g $pool_name/zone_zvol
1339
1340	#
1341	# If current system support slog, add slog device for pool
1342	#
1343	if verify_slog_support ; then
1344		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1345		log_must mkfile $MINVDEVSIZE $sdevs
1346		log_must zpool add $pool_name log mirror $sdevs
1347	fi
1348
1349	# this isn't supported just yet.
1350	# Create a filesystem. In order to add this to
1351	# the zone, it must have it's mountpoint set to 'legacy'
1352	# log_must zfs create $pool_name/zfs_filesystem
1353	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1354
1355	[[ -d $zone_root ]] && \
1356		log_must rm -rf $zone_root/$zone_name
1357	[[ ! -d $zone_root ]] && \
1358		log_must mkdir -p -m 0700 $zone_root/$zone_name
1359
1360	# Create zone configure file and configure the zone
1361	#
1362	typeset zone_conf=/tmp/zone_conf.$$
1363	echo "create" > $zone_conf
1364	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1365	echo "set autoboot=true" >> $zone_conf
1366	i=0
1367	while ((i < cntctr)); do
1368		echo "add dataset" >> $zone_conf
1369		echo "set name=$pool_name/$prefix_ctr$i" >> \
1370			$zone_conf
1371		echo "end" >> $zone_conf
1372		((i += 1))
1373	done
1374
1375	# add our zvol to the zone
1376	echo "add device" >> $zone_conf
1377	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1378	echo "end" >> $zone_conf
1379
1380	# add a corresponding zvol rdsk to the zone
1381	echo "add device" >> $zone_conf
1382	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1383	echo "end" >> $zone_conf
1384
1385	# once it's supported, we'll add our filesystem to the zone
1386	# echo "add fs" >> $zone_conf
1387	# echo "set type=zfs" >> $zone_conf
1388	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1389	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1390	# echo "end" >> $zone_conf
1391
1392	echo "verify" >> $zone_conf
1393	echo "commit" >> $zone_conf
1394	log_must zonecfg -z $zone_name -f $zone_conf
1395	log_must rm -f $zone_conf
1396
1397	# Install the zone
1398	zoneadm -z $zone_name install
1399	if (($? == 0)); then
1400		log_note "SUCCESS: zoneadm -z $zone_name install"
1401	else
1402		log_fail "FAIL: zoneadm -z $zone_name install"
1403	fi
1404
1405	# Install sysidcfg file
1406	#
1407	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1408	echo "system_locale=C" > $sysidcfg
1409	echo  "terminal=dtterm" >> $sysidcfg
1410	echo  "network_interface=primary {" >> $sysidcfg
1411	echo  "hostname=$zone_name" >> $sysidcfg
1412	echo  "}" >> $sysidcfg
1413	echo  "name_service=NONE" >> $sysidcfg
1414	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1415	echo  "security_policy=NONE" >> $sysidcfg
1416	echo  "timezone=US/Eastern" >> $sysidcfg
1417
1418	# Boot this zone
1419	log_must zoneadm -z $zone_name boot
1420}
1421
1422#
1423# Reexport TESTPOOL & TESTPOOL(1-4)
1424#
1425function reexport_pool
1426{
1427	typeset -i cntctr=5
1428	typeset -i i=0
1429
1430	while ((i < cntctr)); do
1431		if ((i == 0)); then
1432			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1433			if ! ismounted $TESTPOOL; then
1434				log_must zfs mount $TESTPOOL
1435			fi
1436		else
1437			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1438			if eval ! ismounted \$TESTPOOL$i; then
1439				log_must eval zfs mount \$TESTPOOL$i
1440			fi
1441		fi
1442		((i += 1))
1443	done
1444}
1445
1446#
1447# Verify a given disk is online or offline
1448#
1449# Return 0 is pool/disk matches expected state, 1 otherwise
1450#
1451function check_state # pool disk state{online,offline}
1452{
1453	typeset pool=$1
1454	typeset disk=${2#/dev/dsk/}
1455	typeset state=$3
1456
1457	zpool status -v $pool | grep "$disk"  \
1458	    | grep -i "$state" > /dev/null 2>&1
1459
1460	return $?
1461}
1462
1463#
1464# Get the mountpoint of snapshot
1465# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1466# as its mountpoint
1467#
1468function snapshot_mountpoint
1469{
1470	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1471
1472	if [[ $dataset != *@* ]]; then
1473		log_fail "Error name of snapshot '$dataset'."
1474	fi
1475
1476	typeset fs=${dataset%@*}
1477	typeset snap=${dataset#*@}
1478
1479	if [[ -z $fs || -z $snap ]]; then
1480		log_fail "Error name of snapshot '$dataset'."
1481	fi
1482
1483	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1484}
1485
1486#
1487# Given a device and 'ashift' value verify it's correctly set on every label
1488#
1489function verify_ashift # device ashift
1490{
1491	typeset device="$1"
1492	typeset ashift="$2"
1493
1494	zdb -e -lll $device | nawk -v ashift=$ashift '/ashift: / {
1495	    if (ashift != $2)
1496	        exit 1;
1497	    else
1498	        count++;
1499	    } END {
1500	    if (count != 4)
1501	        exit 1;
1502	    else
1503	        exit 0;
1504	    }'
1505
1506	return $?
1507}
1508
1509#
1510# Given a pool and file system, this function will verify the file system
1511# using the zdb internal tool. Note that the pool is exported and imported
1512# to ensure it has consistent state.
1513#
1514function verify_filesys # pool filesystem dir
1515{
1516	typeset pool="$1"
1517	typeset filesys="$2"
1518	typeset zdbout="/tmp/zdbout.$$"
1519
1520	shift
1521	shift
1522	typeset dirs=$@
1523	typeset search_path=""
1524
1525	log_note "Calling zdb to verify filesystem '$filesys'"
1526	zfs unmount -a > /dev/null 2>&1
1527	log_must zpool export $pool
1528
1529	if [[ -n $dirs ]] ; then
1530		for dir in $dirs ; do
1531			search_path="$search_path -d $dir"
1532		done
1533	fi
1534
1535	log_must zpool import $search_path $pool
1536
1537	zdb -cudi $filesys > $zdbout 2>&1
1538	if [[ $? != 0 ]]; then
1539		log_note "Output: zdb -cudi $filesys"
1540		cat $zdbout
1541		log_fail "zdb detected errors with: '$filesys'"
1542	fi
1543
1544	log_must zfs mount -a
1545	log_must rm -rf $zdbout
1546}
1547
1548#
1549# Given a pool issue a scrub and verify that no checksum errors are reported.
1550#
1551function verify_pool
1552{
1553	typeset pool=${1:-$TESTPOOL}
1554
1555	log_must zpool scrub $pool
1556	log_must wait_scrubbed $pool
1557
1558	cksum=$(zpool status $pool | \
1559	    awk '{if ($5 == "CKSUM"){L=1; next} if (L) {print $NF;L=0}}')
1560	if [[ $cksum != 0 ]]; then
1561		log_must zpool status -v
1562	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1563	fi
1564}
1565
1566#
1567# Given a pool, and this function list all disks in the pool
1568#
1569function get_disklist # pool
1570{
1571	typeset disklist=""
1572
1573	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1574	    grep -v "\-\-\-\-\-" | \
1575	    egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
1576
1577	echo $disklist
1578}
1579
1580# /**
1581#  This function kills a given list of processes after a time period. We use
1582#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1583#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1584#  would be listed as FAIL, which we don't want : we're happy with stress tests
1585#  running for a certain amount of time, then finishing.
1586#
1587# @param $1 the time in seconds after which we should terminate these processes
1588# @param $2..$n the processes we wish to terminate.
1589# */
1590function stress_timeout
1591{
1592	typeset -i TIMEOUT=$1
1593	shift
1594	typeset cpids="$@"
1595
1596	log_note "Waiting for child processes($cpids). " \
1597		"It could last dozens of minutes, please be patient ..."
1598	log_must sleep $TIMEOUT
1599
1600	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1601	typeset pid
1602	for pid in $cpids; do
1603		ps -p $pid > /dev/null 2>&1
1604		if (($? == 0)); then
1605			log_must kill -USR1 $pid
1606		fi
1607	done
1608}
1609
1610#
1611# Verify a given hotspare disk is inuse or avail
1612#
1613# Return 0 is pool/disk matches expected state, 1 otherwise
1614#
1615function check_hotspare_state # pool disk state{inuse,avail}
1616{
1617	typeset pool=$1
1618	typeset disk=${2#/dev/dsk/}
1619	typeset state=$3
1620
1621	cur_state=$(get_device_state $pool $disk "spares")
1622
1623	if [[ $state != ${cur_state} ]]; then
1624		return 1
1625	fi
1626	return 0
1627}
1628
1629#
1630# Wait until a hotspare transitions to a given state or times out.
1631#
1632# Return 0 when  pool/disk matches expected state, 1 on timeout.
1633#
1634function wait_hotspare_state # pool disk state timeout
1635{
1636	typeset pool=$1
1637	typeset disk=${2#$/DEV_DSKDIR/}
1638	typeset state=$3
1639	typeset timeout=${4:-60}
1640	typeset -i i=0
1641
1642	while [[ $i -lt $timeout ]]; do
1643		if check_hotspare_state $pool $disk $state; then
1644			return 0
1645		fi
1646
1647		i=$((i+1))
1648		sleep 1
1649	done
1650
1651	return 1
1652}
1653
1654#
1655# Verify a given slog disk is inuse or avail
1656#
1657# Return 0 is pool/disk matches expected state, 1 otherwise
1658#
1659function check_slog_state # pool disk state{online,offline,unavail}
1660{
1661	typeset pool=$1
1662	typeset disk=${2#/dev/dsk/}
1663	typeset state=$3
1664
1665	cur_state=$(get_device_state $pool $disk "logs")
1666
1667	if [[ $state != ${cur_state} ]]; then
1668		return 1
1669	fi
1670	return 0
1671}
1672
1673#
1674# Verify a given vdev disk is inuse or avail
1675#
1676# Return 0 is pool/disk matches expected state, 1 otherwise
1677#
1678function check_vdev_state # pool disk state{online,offline,unavail}
1679{
1680	typeset pool=$1
1681	typeset disk=${2#/dev/dsk/}
1682	typeset state=$3
1683
1684	cur_state=$(get_device_state $pool $disk)
1685
1686	if [[ $state != ${cur_state} ]]; then
1687		return 1
1688	fi
1689	return 0
1690}
1691
1692#
1693# Wait until a vdev transitions to a given state or times out.
1694#
1695# Return 0 when  pool/disk matches expected state, 1 on timeout.
1696#
1697function wait_vdev_state # pool disk state timeout
1698{
1699	typeset pool=$1
1700	typeset disk=${2#$/DEV_DSKDIR/}
1701	typeset state=$3
1702	typeset timeout=${4:-60}
1703	typeset -i i=0
1704
1705	while [[ $i -lt $timeout ]]; do
1706		if check_vdev_state $pool $disk $state; then
1707			return 0
1708		fi
1709
1710		i=$((i+1))
1711		sleep 1
1712	done
1713
1714	return 1
1715}
1716
1717#
1718# Check the output of 'zpool status -v <pool>',
1719# and to see if the content of <token> contain the <keyword> specified.
1720#
1721# Return 0 is contain, 1 otherwise
1722#
1723function check_pool_status # pool token keyword <verbose>
1724{
1725	typeset pool=$1
1726	typeset token=$2
1727	typeset keyword=$3
1728	typeset verbose=${4:-false}
1729
1730	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1731		($1==token) {print $0}')
1732	if [[ $verbose == true ]]; then
1733		log_note $scan
1734	fi
1735	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1736
1737	return $?
1738}
1739
1740#
1741# These 6 following functions are instance of check_pool_status()
1742#	is_pool_resilvering - to check if the pool is resilver in progress
1743#	is_pool_resilvered - to check if the pool is resilver completed
1744#	is_pool_scrubbing - to check if the pool is scrub in progress
1745#	is_pool_scrubbed - to check if the pool is scrub completed
1746#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1747#	is_pool_scrub_paused - to check if the pool has scrub paused
1748#	is_pool_removing - to check if the pool is removing a vdev
1749#	is_pool_removed - to check if the pool is remove completed
1750#
1751function is_pool_resilvering #pool <verbose>
1752{
1753	check_pool_status "$1" "scan" "resilver in progress since " $2
1754	return $?
1755}
1756
1757function is_pool_resilvered #pool <verbose>
1758{
1759	check_pool_status "$1" "scan" "resilvered " $2
1760	return $?
1761}
1762
1763function is_pool_scrubbing #pool <verbose>
1764{
1765	check_pool_status "$1" "scan" "scrub in progress since " $2
1766	return $?
1767}
1768
1769function is_pool_scrubbed #pool <verbose>
1770{
1771	check_pool_status "$1" "scan" "scrub repaired" $2
1772	return $?
1773}
1774
1775function is_pool_scrub_stopped #pool <verbose>
1776{
1777	check_pool_status "$1" "scan" "scrub canceled" $2
1778	return $?
1779}
1780
1781function is_pool_scrub_paused #pool <verbose>
1782{
1783	check_pool_status "$1" "scan" "scrub paused since " $2
1784	return $?
1785}
1786
1787function is_pool_removing #pool
1788{
1789	check_pool_status "$1" "remove" "in progress since "
1790	return $?
1791}
1792
1793function is_pool_removed #pool
1794{
1795	check_pool_status "$1" "remove" "completed on"
1796	return $?
1797}
1798
1799function wait_for_degraded
1800{
1801	typeset pool=$1
1802	typeset timeout=${2:-30}
1803	typeset t0=$SECONDS
1804
1805	while :; do
1806		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1807		log_note "$pool is not yet degraded."
1808		sleep 1
1809		if ((SECONDS - t0 > $timeout)); then
1810			log_note "$pool not degraded after $timeout seconds."
1811			return 1
1812		fi
1813	done
1814
1815	return 0
1816}
1817
1818#
1819# Wait for a pool to be scrubbed
1820#
1821# $1 pool name
1822# $2 number of seconds to wait (optional)
1823#
1824# Returns true when pool has been scrubbed, or false if there's a timeout or if
1825# no scrub was done.
1826#
1827function wait_scrubbed
1828{
1829	typeset pool=${1:-$TESTPOOL}
1830	while true ; do
1831		is_pool_scrubbed $pool && break
1832		log_must sleep 1
1833	done
1834}
1835
1836#
1837# Use create_pool()/destroy_pool() to clean up the infomation in
1838# in the given disk to avoid slice overlapping.
1839#
1840function cleanup_devices #vdevs
1841{
1842	typeset pool="foopool$$"
1843
1844	if poolexists $pool ; then
1845		destroy_pool $pool
1846	fi
1847
1848	create_pool $pool $@
1849	destroy_pool $pool
1850
1851	return 0
1852}
1853
1854#/**
1855# A function to find and locate free disks on a system or from given
1856# disks as the parameter. It works by locating disks that are in use
1857# as swap devices and dump devices, and also disks listed in /etc/vfstab
1858#
1859# $@ given disks to find which are free, default is all disks in
1860# the test system
1861#
1862# @return a string containing the list of available disks
1863#*/
1864function find_disks
1865{
1866	sfi=/tmp/swaplist.$$
1867	dmpi=/tmp/dumpdev.$$
1868	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1869
1870	swap -l > $sfi
1871	dumpadm > $dmpi 2>/dev/null
1872
1873# write an awk script that can process the output of format
1874# to produce a list of disks we know about. Note that we have
1875# to escape "$2" so that the shell doesn't interpret it while
1876# we're creating the awk script.
1877# -------------------
1878	cat > /tmp/find_disks.awk <<EOF
1879#!/bin/nawk -f
1880	BEGIN { FS="."; }
1881
1882	/^Specify disk/{
1883		searchdisks=0;
1884	}
1885
1886	{
1887		if (searchdisks && \$2 !~ "^$"){
1888			split(\$2,arr," ");
1889			print arr[1];
1890		}
1891	}
1892
1893	/^AVAILABLE DISK SELECTIONS:/{
1894		searchdisks=1;
1895	}
1896EOF
1897#---------------------
1898
1899	chmod 755 /tmp/find_disks.awk
1900	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1901	rm /tmp/find_disks.awk
1902
1903	unused=""
1904	for disk in $disks; do
1905	# Check for mounted
1906		grep "${disk}[sp]" /etc/mnttab >/dev/null
1907		(($? == 0)) && continue
1908	# Check for swap
1909		grep "${disk}[sp]" $sfi >/dev/null
1910		(($? == 0)) && continue
1911	# check for dump device
1912		grep "${disk}[sp]" $dmpi >/dev/null
1913		(($? == 0)) && continue
1914	# check to see if this disk hasn't been explicitly excluded
1915	# by a user-set environment variable
1916		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1917		(($? == 0)) && continue
1918		unused_candidates="$unused_candidates $disk"
1919	done
1920	rm $sfi
1921	rm $dmpi
1922
1923# now just check to see if those disks do actually exist
1924# by looking for a device pointing to the first slice in
1925# each case. limit the number to max_finddisksnum
1926	count=0
1927	for disk in $unused_candidates; do
1928		if [ -b /dev/dsk/${disk}s0 ]; then
1929		if [ $count -lt $max_finddisksnum ]; then
1930			unused="$unused $disk"
1931			# do not impose limit if $@ is provided
1932			[[ -z $@ ]] && ((count = count + 1))
1933		fi
1934		fi
1935	done
1936
1937# finally, return our disk list
1938	echo $unused
1939}
1940
1941#
1942# Add specified user to specified group
1943#
1944# $1 group name
1945# $2 user name
1946# $3 base of the homedir (optional)
1947#
1948function add_user #<group_name> <user_name> <basedir>
1949{
1950	typeset gname=$1
1951	typeset uname=$2
1952	typeset basedir=${3:-"/var/tmp"}
1953
1954	if ((${#gname} == 0 || ${#uname} == 0)); then
1955		log_fail "group name or user name are not defined."
1956	fi
1957
1958	log_must useradd -g $gname -d $basedir/$uname -m $uname
1959	log_must passwd -N $uname
1960
1961	return 0
1962}
1963
1964#
1965# Delete the specified user.
1966#
1967# $1 login name
1968# $2 base of the homedir (optional)
1969#
1970function del_user #<logname> <basedir>
1971{
1972	typeset user=$1
1973	typeset basedir=${2:-"/var/tmp"}
1974
1975	if ((${#user} == 0)); then
1976		log_fail "login name is necessary."
1977	fi
1978
1979	if id $user > /dev/null 2>&1; then
1980		log_must userdel $user
1981	fi
1982
1983	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
1984
1985	return 0
1986}
1987
1988#
1989# Select valid gid and create specified group.
1990#
1991# $1 group name
1992#
1993function add_group #<group_name>
1994{
1995	typeset group=$1
1996
1997	if ((${#group} == 0)); then
1998		log_fail "group name is necessary."
1999	fi
2000
2001	# Assign 100 as the base gid
2002	typeset -i gid=100
2003	while true; do
2004		groupadd -g $gid $group > /dev/null 2>&1
2005		typeset -i ret=$?
2006		case $ret in
2007			0) return 0 ;;
2008			# The gid is not  unique
2009			4) ((gid += 1)) ;;
2010			*) return 1 ;;
2011		esac
2012	done
2013}
2014
2015#
2016# Delete the specified group.
2017#
2018# $1 group name
2019#
2020function del_group #<group_name>
2021{
2022	typeset grp=$1
2023	if ((${#grp} == 0)); then
2024		log_fail "group name is necessary."
2025	fi
2026
2027	groupmod -n $grp $grp > /dev/null 2>&1
2028	typeset -i ret=$?
2029	case $ret in
2030		# Group does not exist.
2031		6) return 0 ;;
2032		# Name already exists as a group name
2033		9) log_must groupdel $grp ;;
2034		*) return 1 ;;
2035	esac
2036
2037	return 0
2038}
2039
2040#
2041# This function will return true if it's safe to destroy the pool passed
2042# as argument 1. It checks for pools based on zvols and files, and also
2043# files contained in a pool that may have a different mountpoint.
2044#
2045function safe_to_destroy_pool { # $1 the pool name
2046
2047	typeset pool=""
2048	typeset DONT_DESTROY=""
2049
2050	# We check that by deleting the $1 pool, we're not
2051	# going to pull the rug out from other pools. Do this
2052	# by looking at all other pools, ensuring that they
2053	# aren't built from files or zvols contained in this pool.
2054
2055	for pool in $(zpool list -H -o name)
2056	do
2057		ALTMOUNTPOOL=""
2058
2059		# this is a list of the top-level directories in each of the
2060		# files that make up the path to the files the pool is based on
2061		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2062			awk '{print $1}')
2063
2064		# this is a list of the zvols that make up the pool
2065		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
2066		    | awk '{print $1}')
2067
2068		# also want to determine if it's a file-based pool using an
2069		# alternate mountpoint...
2070		POOL_FILE_DIRS=$(zpool status -v $pool | \
2071					grep / | awk '{print $1}' | \
2072					awk -F/ '{print $2}' | grep -v "dev")
2073
2074		for pooldir in $POOL_FILE_DIRS
2075		do
2076			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2077					grep "${pooldir}$" | awk '{print $1}')
2078
2079			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2080		done
2081
2082
2083		if [ ! -z "$ZVOLPOOL" ]
2084		then
2085			DONT_DESTROY="true"
2086			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2087		fi
2088
2089		if [ ! -z "$FILEPOOL" ]
2090		then
2091			DONT_DESTROY="true"
2092			log_note "Pool $pool is built from $FILEPOOL on $1"
2093		fi
2094
2095		if [ ! -z "$ALTMOUNTPOOL" ]
2096		then
2097			DONT_DESTROY="true"
2098			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2099		fi
2100	done
2101
2102	if [ -z "${DONT_DESTROY}" ]
2103	then
2104		return 0
2105	else
2106		log_note "Warning: it is not safe to destroy $1!"
2107		return 1
2108	fi
2109}
2110
2111#
2112# Get the available ZFS compression options
2113# $1 option type zfs_set|zfs_compress
2114#
2115function get_compress_opts
2116{
2117	typeset COMPRESS_OPTS
2118	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2119			gzip-6 gzip-7 gzip-8 gzip-9"
2120
2121	if [[ $1 == "zfs_compress" ]] ; then
2122		COMPRESS_OPTS="on lzjb"
2123	elif [[ $1 == "zfs_set" ]] ; then
2124		COMPRESS_OPTS="on off lzjb"
2125	fi
2126	typeset valid_opts="$COMPRESS_OPTS"
2127	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2128	if [[ $? -eq 0 ]]; then
2129		valid_opts="$valid_opts $GZIP_OPTS"
2130	fi
2131	echo "$valid_opts"
2132}
2133
2134#
2135# Verify zfs operation with -p option work as expected
2136# $1 operation, value could be create, clone or rename
2137# $2 dataset type, value could be fs or vol
2138# $3 dataset name
2139# $4 new dataset name
2140#
2141function verify_opt_p_ops
2142{
2143	typeset ops=$1
2144	typeset datatype=$2
2145	typeset dataset=$3
2146	typeset newdataset=$4
2147
2148	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2149		log_fail "$datatype is not supported."
2150	fi
2151
2152	# check parameters accordingly
2153	case $ops in
2154		create)
2155			newdataset=$dataset
2156			dataset=""
2157			if [[ $datatype == "vol" ]]; then
2158				ops="create -V $VOLSIZE"
2159			fi
2160			;;
2161		clone)
2162			if [[ -z $newdataset ]]; then
2163				log_fail "newdataset should not be empty" \
2164					"when ops is $ops."
2165			fi
2166			log_must datasetexists $dataset
2167			log_must snapexists $dataset
2168			;;
2169		rename)
2170			if [[ -z $newdataset ]]; then
2171				log_fail "newdataset should not be empty" \
2172					"when ops is $ops."
2173			fi
2174			log_must datasetexists $dataset
2175			log_mustnot snapexists $dataset
2176			;;
2177		*)
2178			log_fail "$ops is not supported."
2179			;;
2180	esac
2181
2182	# make sure the upper level filesystem does not exist
2183	destroy_dataset ${newdataset%/*} "-rRf"
2184
2185	# without -p option, operation will fail
2186	log_mustnot zfs $ops $dataset $newdataset
2187	log_mustnot datasetexists $newdataset ${newdataset%/*}
2188
2189	# with -p option, operation should succeed
2190	log_must zfs $ops -p $dataset $newdataset
2191	if ! datasetexists $newdataset ; then
2192		log_fail "-p option does not work for $ops"
2193	fi
2194
2195	# when $ops is create or clone, redo the operation still return zero
2196	if [[ $ops != "rename" ]]; then
2197		log_must zfs $ops -p $dataset $newdataset
2198	fi
2199
2200	return 0
2201}
2202
2203#
2204# Get configuration of pool
2205# $1 pool name
2206# $2 config name
2207#
2208function get_config
2209{
2210	typeset pool=$1
2211	typeset config=$2
2212	typeset alt_root
2213
2214	if ! poolexists "$pool" ; then
2215		return 1
2216	fi
2217	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2218	if [[ $alt_root == "-" ]]; then
2219		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2220		    '{print $2}')
2221	else
2222		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2223		    '{print $2}')
2224	fi
2225	if [[ -n $value ]] ; then
2226		value=${value#'}
2227		value=${value%'}
2228	fi
2229	echo $value
2230
2231	return 0
2232}
2233
2234#
2235# Privated function. Random select one of items from arguments.
2236#
2237# $1 count
2238# $2-n string
2239#
2240function _random_get
2241{
2242	typeset cnt=$1
2243	shift
2244
2245	typeset str="$@"
2246	typeset -i ind
2247	((ind = RANDOM % cnt + 1))
2248
2249	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2250	echo $ret
2251}
2252
2253#
2254# Random select one of item from arguments which include NONE string
2255#
2256function random_get_with_non
2257{
2258	typeset -i cnt=$#
2259	((cnt =+ 1))
2260
2261	_random_get "$cnt" "$@"
2262}
2263
2264#
2265# Random select one of item from arguments which doesn't include NONE string
2266#
2267function random_get
2268{
2269	_random_get "$#" "$@"
2270}
2271
2272#
2273# Detect if the current system support slog
2274#
2275function verify_slog_support
2276{
2277	typeset dir=/tmp/disk.$$
2278	typeset pool=foo.$$
2279	typeset vdev=$dir/a
2280	typeset sdev=$dir/b
2281
2282	mkdir -p $dir
2283	mkfile $MINVDEVSIZE $vdev $sdev
2284
2285	typeset -i ret=0
2286	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2287		ret=1
2288	fi
2289	rm -r $dir
2290
2291	return $ret
2292}
2293
2294#
2295# The function will generate a dataset name with specific length
2296# $1, the length of the name
2297# $2, the base string to construct the name
2298#
2299function gen_dataset_name
2300{
2301	typeset -i len=$1
2302	typeset basestr="$2"
2303	typeset -i baselen=${#basestr}
2304	typeset -i iter=0
2305	typeset l_name=""
2306
2307	if ((len % baselen == 0)); then
2308		((iter = len / baselen))
2309	else
2310		((iter = len / baselen + 1))
2311	fi
2312	while ((iter > 0)); do
2313		l_name="${l_name}$basestr"
2314
2315		((iter -= 1))
2316	done
2317
2318	echo $l_name
2319}
2320
2321#
2322# Get cksum tuple of dataset
2323# $1 dataset name
2324#
2325# sample zdb output:
2326# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2327# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2328# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2329# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2330function datasetcksum
2331{
2332	typeset cksum
2333	sync
2334	sync_all_pools
2335	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2336		| awk -F= '{print $7}')
2337	echo $cksum
2338}
2339
2340#
2341# Get cksum of file
2342# #1 file path
2343#
2344function checksum
2345{
2346	typeset cksum
2347	cksum=$(cksum $1 | awk '{print $1}')
2348	echo $cksum
2349}
2350
2351#
2352# Get the given disk/slice state from the specific field of the pool
2353#
2354function get_device_state #pool disk field("", "spares","logs")
2355{
2356	typeset pool=$1
2357	typeset disk=${2#/dev/dsk/}
2358	typeset field=${3:-$pool}
2359
2360	state=$(zpool status -v "$pool" 2>/dev/null | \
2361		nawk -v device=$disk -v pool=$pool -v field=$field \
2362		'BEGIN {startconfig=0; startfield=0; }
2363		/config:/ {startconfig=1}
2364		(startconfig==1) && ($1==field) {startfield=1; next;}
2365		(startfield==1) && ($1==device) {print $2; exit;}
2366		(startfield==1) &&
2367		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2368	echo $state
2369}
2370
2371
2372#
2373# print the given directory filesystem type
2374#
2375# $1 directory name
2376#
2377function get_fstype
2378{
2379	typeset dir=$1
2380
2381	if [[ -z $dir ]]; then
2382		log_fail "Usage: get_fstype <directory>"
2383	fi
2384
2385	#
2386	#  $ df -n /
2387	#  /		  : ufs
2388	#
2389	df -n $dir | awk '{print $3}'
2390}
2391
2392#
2393# Given a disk, label it to VTOC regardless what label was on the disk
2394# $1 disk
2395#
2396function labelvtoc
2397{
2398	typeset disk=$1
2399	typeset -i iter=120
2400	typeset -i ret_val=1
2401
2402	if [[ -z $disk ]]; then
2403		log_fail "The disk name is unspecified."
2404	fi
2405	typeset label_file=/var/tmp/labelvtoc.$$
2406	typeset arch=$(uname -p)
2407
2408	if [[ $arch == "i386" ]]; then
2409		log_must fdisk -B ${disk}p0
2410
2411		echo "label" > $label_file
2412		echo "0" >> $label_file
2413		echo "" >> $label_file
2414		echo "q" >> $label_file
2415		echo "q" >> $label_file
2416	elif [[ $arch == "sparc" ]]; then
2417		echo "label" > $label_file
2418		echo "0" >> $label_file
2419		echo "" >> $label_file
2420		echo "" >> $label_file
2421		echo "" >> $label_file
2422		echo "q" >> $label_file
2423	else
2424		log_fail "unknown arch type"
2425	fi
2426
2427	# Disk update from fdisk -B may be delayed
2428	while ((iter > 0)); do
2429		if format -e -s -d $disk -f $label_file ; then
2430			iter=0
2431			ret_val=0
2432		else
2433			sleep 1
2434			((iter -= 1))
2435		fi
2436	done
2437	rm -f $label_file
2438	if ((ret_val != 0)); then
2439		log_fail "unable to label $disk as VTOC."
2440	fi
2441
2442	return 0
2443}
2444
2445#
2446# check if the system was installed as zfsroot or not
2447# return: 0 ture, otherwise false
2448#
2449function is_zfsroot
2450{
2451	df -n / | grep zfs > /dev/null 2>&1
2452	return $?
2453}
2454
2455#
2456# get the root filesystem name if it's zfsroot system.
2457#
2458# return: root filesystem name
2459function get_rootfs
2460{
2461	typeset rootfs=""
2462	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2463		/etc/mnttab)
2464	if [[ -z "$rootfs" ]]; then
2465		log_fail "Can not get rootfs"
2466	fi
2467	zfs list $rootfs > /dev/null 2>&1
2468	if (($? == 0)); then
2469		echo $rootfs
2470	else
2471		log_fail "This is not a zfsroot system."
2472	fi
2473}
2474
2475#
2476# get the rootfs's pool name
2477# return:
2478#       rootpool name
2479#
2480function get_rootpool
2481{
2482	typeset rootfs=""
2483	typeset rootpool=""
2484	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2485		 /etc/mnttab)
2486	if [[ -z "$rootfs" ]]; then
2487		log_fail "Can not get rootpool"
2488	fi
2489	zfs list $rootfs > /dev/null 2>&1
2490	if (($? == 0)); then
2491		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2492		echo $rootpool
2493	else
2494		log_fail "This is not a zfsroot system."
2495	fi
2496}
2497
2498#
2499# Check if the given device is physical device
2500#
2501function is_physical_device #device
2502{
2503	typeset device=${1#/dev/dsk/}
2504	device=${device#/dev/rdsk/}
2505
2506	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2507	return $?
2508}
2509
2510#
2511# Get the directory path of given device
2512#
2513function get_device_dir #device
2514{
2515	typeset device=$1
2516
2517	if ! $(is_physical_device $device) ; then
2518		if [[ $device != "/" ]]; then
2519			device=${device%/*}
2520		fi
2521		echo $device
2522	else
2523		echo "/dev/dsk"
2524	fi
2525}
2526
2527#
2528# Get the package name
2529#
2530function get_package_name
2531{
2532	typeset dirpath=${1:-$STC_NAME}
2533
2534	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2535}
2536
2537#
2538# Get the word numbers from a string separated by white space
2539#
2540function get_word_count
2541{
2542	echo $1 | wc -w
2543}
2544
2545#
2546# To verify if the require numbers of disks is given
2547#
2548function verify_disk_count
2549{
2550	typeset -i min=${2:-1}
2551
2552	typeset -i count=$(get_word_count "$1")
2553
2554	if ((count < min)); then
2555		log_untested "A minimum of $min disks is required to run." \
2556			" You specified $count disk(s)"
2557	fi
2558}
2559
2560function ds_is_volume
2561{
2562	typeset type=$(get_prop type $1)
2563	[[ $type = "volume" ]] && return 0
2564	return 1
2565}
2566
2567function ds_is_filesystem
2568{
2569	typeset type=$(get_prop type $1)
2570	[[ $type = "filesystem" ]] && return 0
2571	return 1
2572}
2573
2574function ds_is_snapshot
2575{
2576	typeset type=$(get_prop type $1)
2577	[[ $type = "snapshot" ]] && return 0
2578	return 1
2579}
2580
2581#
2582# Check if Trusted Extensions are installed and enabled
2583#
2584function is_te_enabled
2585{
2586	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2587	if (($? != 0)); then
2588		return 1
2589	else
2590		return 0
2591	fi
2592}
2593
2594# Utility function to determine if a system has multiple cpus.
2595function is_mp
2596{
2597	(($(psrinfo | wc -l) > 1))
2598}
2599
2600function get_cpu_freq
2601{
2602	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2603}
2604
2605# Run the given command as the user provided.
2606function user_run
2607{
2608	typeset user=$1
2609	shift
2610
2611	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2612	return $?
2613}
2614
2615#
2616# Check if the pool contains the specified vdevs
2617#
2618# $1 pool
2619# $2..n <vdev> ...
2620#
2621# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2622# vdevs is not in the pool, and 2 if pool name is missing.
2623#
2624function vdevs_in_pool
2625{
2626	typeset pool=$1
2627	typeset vdev
2628
2629        if [[ -z $pool ]]; then
2630                log_note "Missing pool name."
2631                return 2
2632        fi
2633
2634	shift
2635
2636	typeset tmpfile=$(mktemp)
2637	zpool list -Hv "$pool" >$tmpfile
2638	for vdev in $@; do
2639		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2640		[[ $? -ne 0 ]] && return 1
2641	done
2642
2643	rm -f $tmpfile
2644
2645	return 0;
2646}
2647
2648function get_max
2649{
2650	typeset -l i max=$1
2651	shift
2652
2653	for i in "$@"; do
2654		max=$(echo $((max > i ? max : i)))
2655	done
2656
2657	echo $max
2658}
2659
2660function get_min
2661{
2662	typeset -l i min=$1
2663	shift
2664
2665	for i in "$@"; do
2666		min=$(echo $((min < i ? min : i)))
2667	done
2668
2669	echo $min
2670}
2671
2672#
2673# Generate a random number between 1 and the argument.
2674#
2675function random
2676{
2677        typeset max=$1
2678        echo $(( ($RANDOM % $max) + 1 ))
2679}
2680
2681# Write data that can be compressed into a directory
2682function write_compressible
2683{
2684	typeset dir=$1
2685	typeset megs=$2
2686	typeset nfiles=${3:-1}
2687	typeset bs=${4:-1024k}
2688	typeset fname=${5:-file}
2689
2690	[[ -d $dir ]] || log_fail "No directory: $dir"
2691
2692	log_must eval "fio \
2693	    --name=job \
2694	    --fallocate=0 \
2695	    --minimal \
2696	    --randrepeat=0 \
2697	    --buffer_compress_percentage=66 \
2698	    --buffer_compress_chunk=4096 \
2699	    --directory=$dir \
2700	    --numjobs=$nfiles \
2701	    --rw=write \
2702	    --bs=$bs \
2703	    --filesize=$megs \
2704	    --filename_format='$fname.\$jobnum' >/dev/null"
2705}
2706
2707function get_objnum
2708{
2709	typeset pathname=$1
2710	typeset objnum
2711
2712	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2713	objnum=$(stat -c %i $pathname)
2714	echo $objnum
2715}
2716
2717#
2718# Sync data to the pool
2719#
2720# $1 pool name
2721# $2 boolean to force uberblock (and config including zpool cache file) update
2722#
2723function sync_pool #pool <force>
2724{
2725	typeset pool=${1:-$TESTPOOL}
2726	typeset force=${2:-false}
2727
2728	if [[ $force == true ]]; then
2729		log_must zpool sync -f $pool
2730	else
2731		log_must zpool sync $pool
2732	fi
2733
2734	return 0
2735}
2736
2737#
2738# Sync all pools
2739#
2740# $1 boolean to force uberblock (and config including zpool cache file) update
2741#
2742function sync_all_pools #<force>
2743{
2744	typeset force=${1:-false}
2745
2746	if [[ $force == true ]]; then
2747		log_must zpool sync -f
2748	else
2749		log_must zpool sync
2750	fi
2751
2752	return 0
2753}
2754
2755#
2756# Wait for zpool 'freeing' property drops to zero.
2757#
2758# $1 pool name
2759#
2760function wait_freeing #pool
2761{
2762	typeset pool=${1:-$TESTPOOL}
2763	while true; do
2764		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
2765		log_must sleep 1
2766	done
2767}
2768
2769#
2770# Prints the current time in seconds since UNIX Epoch.
2771#
2772function current_epoch
2773{
2774	printf '%(%s)T'
2775}
2776
2777#
2778# Wait for every device replace operation to complete
2779#
2780# $1 pool name
2781#
2782function wait_replacing #pool
2783{
2784	typeset pool=${1:-$TESTPOOL}
2785	while true; do
2786		[[ "" == "$(zpool status $pool |
2787		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
2788		log_must sleep 1
2789	done
2790}
2791
2792#
2793# Set a global system tunable (64-bit value)
2794#
2795# $1 tunable name
2796# $2 tunable values
2797#
2798function set_tunable64
2799{
2800	set_tunable_impl "$1" "$2" Z
2801}
2802
2803#
2804# Set a global system tunable (32-bit value)
2805#
2806# $1 tunable name
2807# $2 tunable values
2808#
2809function set_tunable32
2810{
2811	set_tunable_impl "$1" "$2" W
2812}
2813
2814function set_tunable_impl
2815{
2816	typeset tunable="$1"
2817	typeset value="$2"
2818	typeset mdb_cmd="$3"
2819	typeset module="${4:-zfs}"
2820
2821	[[ -z "$tunable" ]] && return 1
2822	[[ -z "$value" ]] && return 1
2823	[[ -z "$mdb_cmd" ]] && return 1
2824
2825	case "$(uname)" in
2826	Linux)
2827		typeset zfs_tunables="/sys/module/$module/parameters"
2828		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2829		cat >"$zfs_tunables/$tunable" <<<"$value"
2830		return $?
2831		;;
2832	SunOS)
2833		[[ "$module" -eq "zfs" ]] || return 1
2834		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2835		return $?
2836		;;
2837	esac
2838}
2839
2840#
2841# Get a global system tunable
2842#
2843# $1 tunable name
2844#
2845function get_tunable
2846{
2847	get_tunable_impl "$1"
2848}
2849
2850function get_tunable_impl
2851{
2852	typeset tunable="$1"
2853	typeset module="${2:-zfs}"
2854
2855	[[ -z "$tunable" ]] && return 1
2856
2857	case "$(uname)" in
2858	Linux)
2859		typeset zfs_tunables="/sys/module/$module/parameters"
2860		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2861		cat $zfs_tunables/$tunable
2862		return $?
2863		;;
2864	SunOS)
2865		typeset value=$(mdb -k -e "$tunable::print | ::eval .=E")
2866		if [[ $? -ne 0 ]]; then
2867			log_fail "Failed to get value of '$tunable' from mdb."
2868			return 1
2869		fi
2870		echo $value
2871		return 0
2872		;;
2873	esac
2874
2875	return 1
2876}
2877
2878function new_fs #<args>
2879{
2880	case "$(uname)" in
2881	FreeBSD)
2882		newfs "$@"
2883		;;
2884	SunOS)
2885		echo y | newfs "$@"
2886		;;
2887	*)
2888		echo y | newfs -v "$@"
2889		;;
2890	esac
2891}
2892
2893#
2894# Wait for the specified arcstat to reach non-zero quiescence.
2895# If echo is 1 echo the value after reaching quiescence, otherwise
2896# if echo is 0 print the arcstat we are waiting on.
2897#
2898function arcstat_quiescence # stat echo
2899{
2900	typeset stat=$1
2901	typeset echo=$2
2902	typeset do_once=true
2903
2904	if [[ $echo -eq 0 ]]; then
2905		echo "Waiting for arcstat $1 quiescence."
2906	fi
2907
2908	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
2909		typeset stat1=$(get_arcstat $stat)
2910		sleep 2
2911		typeset stat2=$(get_arcstat $stat)
2912		do_once=false
2913	done
2914
2915	if [[ $echo -eq 1 ]]; then
2916		echo $stat2
2917	fi
2918}
2919
2920function arcstat_quiescence_noecho # stat
2921{
2922	typeset stat=$1
2923	arcstat_quiescence $stat 0
2924}
2925
2926function arcstat_quiescence_echo # stat
2927{
2928	typeset stat=$1
2929	arcstat_quiescence $stat 1
2930}
2931
2932#
2933# Compute SHA256 digest for given file or stdin if no file given.
2934# Note: file path must not contain spaces
2935#
2936function sha256digest
2937{
2938        typeset file=$1
2939
2940	if [ -x /usr/bin/digest ]; then
2941		/usr/bin/digest -a sha256 $file
2942	elif [ -x /usr/bin/sha256sum ]; then
2943		/usr/bin/sha256sum -b $file | awk '{ print $1 }'
2944	else
2945		echo "Cannot calculate SHA256 digest"
2946		return 1
2947	fi
2948	return 0
2949}
2950
2951function get_arcstat # stat
2952{
2953	if is_linux; then
2954		typeset stat=$1
2955		typeset zfs_arcstats="/proc/spl/kstat/zfs/arcstats"
2956		[[ -f "$zfs_arcstats" ]] || return 1
2957		grep $stat $zfs_arcstats | awk '{print $3}'
2958		return $?
2959	else
2960		kstat -p zfs::arcstats:$1 | awk '{ print $2 }'
2961		return $?
2962	fi
2963}
2964