xref: /illumos-gate/usr/src/test/zfs-tests/include/libtest.shlib (revision 6fc89bfc8e69fd45d8778b2f0ad45efc0ded99ed)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21
22#
23# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24# Use is subject to license terms.
25# Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26# Copyright (c) 2017 by Tim Chase. All rights reserved.
27# Copyright (c) 2017 by Nexenta Systems, Inc. All rights reserved.
28# Copyright (c) 2017 Datto Inc.
29# Copyright 2020 Joyent, Inc.
30# Copyright (c) 2025, Klara, Inc.
31# Copyright 2024 MNX Cloud, Inc.
32#
33
34UNAME=$(uname)
35
36. ${STF_TOOLS}/contrib/include/logapi.shlib
37. ${STF_SUITE}/include/math.shlib
38. ${STF_SUITE}/include/blkdev.shlib
39
40# Determine if this is a Linux test system
41#
42# Return 0 if platform Linux, 1 if otherwise
43
44function is_linux
45{
46	[ "$UNAME" = "Linux" ]
47}
48
49# Determine if this is an illumos test system
50#
51# Return 0 if platform illumos, 1 if otherwise
52function is_illumos
53{
54	[ "$UNAME" = "SunOS" ]
55}
56
57# Determine if this is a FreeBSD test system
58#
59# Return 0 if platform FreeBSD, 1 if otherwise
60
61function is_freebsd
62{
63	[ "$UNAME" = "FreeBSD" ]
64}
65
66# Determine whether a dataset is mounted
67#
68# $1 dataset name
69# $2 filesystem type; optional - defaulted to zfs
70#
71# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
72
73function ismounted
74{
75	typeset fstype=$2
76	[[ -z $fstype ]] && fstype=zfs
77	typeset out dir name ret
78
79	case $fstype in
80		zfs)
81			if [[ "$1" == "/"* ]] ; then
82				for out in $(zfs mount | awk '{print $2}'); do
83					[[ $1 == $out ]] && return 0
84				done
85			else
86				for out in $(zfs mount | awk '{print $1}'); do
87					[[ $1 == $out ]] && return 0
88				done
89			fi
90		;;
91		ufs|nfs)
92			out=$(df -F $fstype $1 2>/dev/null)
93			ret=$?
94			(($ret != 0)) && return $ret
95
96			dir=${out%%\(*}
97			dir=${dir%% *}
98			name=${out##*\(}
99			name=${name%%\)*}
100			name=${name%% *}
101
102			[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
103		;;
104	esac
105
106	return 1
107}
108
109# Return 0 if a dataset is mounted; 1 otherwise
110#
111# $1 dataset name
112# $2 filesystem type; optional - defaulted to zfs
113
114function mounted
115{
116	ismounted $1 $2
117	(($? == 0)) && return 0
118	return 1
119}
120
121# Return 0 if a dataset is unmounted; 1 otherwise
122#
123# $1 dataset name
124# $2 filesystem type; optional - defaulted to zfs
125
126function unmounted
127{
128	ismounted $1 $2
129	(($? == 1)) && return 0
130	return 1
131}
132
133# split line on ","
134#
135# $1 - line to split
136
137function splitline
138{
139	echo $1 | sed "s/,/ /g"
140}
141
142function default_setup
143{
144	default_setup_noexit "$@"
145
146	log_pass
147}
148
149#
150# Given a list of disks, setup storage pools and datasets.
151#
152function default_setup_noexit
153{
154	typeset disklist=$1
155	typeset container=$2
156	typeset volume=$3
157
158	if is_global_zone; then
159		if poolexists $TESTPOOL ; then
160			destroy_pool $TESTPOOL
161		fi
162		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
163		log_must zpool create -f $TESTPOOL $disklist
164	else
165		reexport_pool
166	fi
167
168	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
169	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
170
171	log_must zfs create $TESTPOOL/$TESTFS
172	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
173
174	if [[ -n $container ]]; then
175		rm -rf $TESTDIR1  || \
176			log_unresolved Could not remove $TESTDIR1
177		mkdir -p $TESTDIR1 || \
178			log_unresolved Could not create $TESTDIR1
179
180		log_must zfs create $TESTPOOL/$TESTCTR
181		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
182		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
183		log_must zfs set mountpoint=$TESTDIR1 \
184		    $TESTPOOL/$TESTCTR/$TESTFS1
185	fi
186
187	if [[ -n $volume ]]; then
188		if is_global_zone ; then
189			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
190		else
191			log_must zfs create $TESTPOOL/$TESTVOL
192		fi
193	fi
194}
195
196#
197# Given a list of disks, setup a storage pool, file system and
198# a container.
199#
200function default_container_setup
201{
202	typeset disklist=$1
203
204	default_setup "$disklist" "true"
205}
206
207#
208# Given a list of disks, setup a storage pool,file system
209# and a volume.
210#
211function default_volume_setup
212{
213	typeset disklist=$1
214
215	default_setup "$disklist" "" "true"
216}
217
218#
219# Given a list of disks, setup a storage pool,file system,
220# a container and a volume.
221#
222function default_container_volume_setup
223{
224	typeset disklist=$1
225
226	default_setup "$disklist" "true" "true"
227}
228
229#
230# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
231# filesystem
232#
233# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
234# $2 snapshot name. Default, $TESTSNAP
235#
236function create_snapshot
237{
238	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
239	typeset snap=${2:-$TESTSNAP}
240
241	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
242	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
243
244	if snapexists $fs_vol@$snap; then
245		log_fail "$fs_vol@$snap already exists."
246	fi
247	datasetexists $fs_vol || \
248		log_fail "$fs_vol must exist."
249
250	log_must zfs snapshot $fs_vol@$snap
251}
252
253#
254# Create a clone from a snapshot, default clone name is $TESTCLONE.
255#
256# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
257# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
258#
259function create_clone   # snapshot clone
260{
261	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
262	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
263
264	[[ -z $snap ]] && \
265		log_fail "Snapshot name is undefined."
266	[[ -z $clone ]] && \
267		log_fail "Clone name is undefined."
268
269	log_must zfs clone $snap $clone
270}
271
272#
273# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
274# filesystem.
275#
276# $1 Existing filesystem or volume name. Default, $TESTFS
277# $2 Existing snapshot name. Default, $TESTSNAP
278# $3 bookmark name. Default, $TESTBKMARK
279#
280function create_bookmark
281{
282	typeset fs_vol=${1:-$TESTFS}
283	typeset snap=${2:-$TESTSNAP}
284	typeset bkmark=${3:-$TESTBKMARK}
285
286	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
287	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
288	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
289
290	if bkmarkexists $fs_vol#$bkmark; then
291		log_fail "$fs_vol#$bkmark already exists."
292	fi
293	datasetexists $fs_vol || \
294		log_fail "$fs_vol must exist."
295	snapexists $fs_vol@$snap || \
296		log_fail "$fs_vol@$snap must exist."
297
298	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
299}
300
301#
302# Create a temporary clone result of an interrupted resumable 'zfs receive'
303# $1 Destination filesystem name. Must not exist, will be created as the result
304#    of this function along with its %recv temporary clone
305# $2 Source filesystem name. Must not exist, will be created and destroyed
306#
307function create_recv_clone
308{
309	typeset recvfs="$1"
310	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
311	typeset snap="$sendfs@snap1"
312	typeset incr="$sendfs@snap2"
313	typeset mountpoint="$TESTDIR/create_recv_clone"
314	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
315
316	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
317
318	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
319	datasetexists $sendfs && log_fail "Send filesystem must not exist."
320
321	log_must zfs create -o mountpoint="$mountpoint" $sendfs
322	log_must zfs snapshot $snap
323	log_must eval "zfs send $snap | zfs recv -u $recvfs"
324	log_must mkfile 1m "$mountpoint/data"
325	log_must zfs snapshot $incr
326	log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
327	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
328	destroy_dataset "$sendfs" "-r"
329	log_must rm -f "$sendfile"
330
331	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
332		log_fail "Error creating temporary $recvfs/%recv clone"
333	fi
334}
335
336function default_mirror_setup
337{
338	default_mirror_setup_noexit $1 $2 $3
339
340	log_pass
341}
342
343function default_mirror_2way_setup
344{
345	default_mirror_setup_noexit $1 $2
346
347	log_pass
348}
349
350#
351# Given a pair of disks, set up a storage pool and dataset for the mirror
352# @parameters: $1 the primary side of the mirror
353#   $2 the secondary side of the mirror
354# @uses: ZPOOL ZFS TESTPOOL TESTFS
355function default_mirror_setup_noexit
356{
357	readonly func="default_mirror_setup_noexit"
358	typeset primary=$1
359	typeset secondary=$2
360
361	[[ -z $primary ]] && \
362		log_fail "$func: No parameters passed"
363	[[ -z $secondary ]] && \
364		log_fail "$func: No secondary partition passed"
365	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
366	log_must zpool create -f $TESTPOOL mirror $@
367	log_must zfs create $TESTPOOL/$TESTFS
368	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
369}
370
371#
372# create a number of mirrors.
373# We create a number($1) of 2 way mirrors using the pairs of disks named
374# on the command line. These mirrors are *not* mounted
375# @parameters: $1 the number of mirrors to create
376#  $... the devices to use to create the mirrors on
377# @uses: ZPOOL ZFS TESTPOOL
378function setup_mirrors
379{
380	typeset -i nmirrors=$1
381
382	shift
383	while ((nmirrors > 0)); do
384		log_must test -n "$1" -a -n "$2"
385		[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
386		log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
387		shift 2
388		((nmirrors = nmirrors - 1))
389	done
390}
391
392#
393# create a number of raidz pools.
394# We create a number($1) of 2 raidz pools  using the pairs of disks named
395# on the command line. These pools are *not* mounted
396# @parameters: $1 the number of pools to create
397#  $... the devices to use to create the pools on
398# @uses: ZPOOL ZFS TESTPOOL
399function setup_raidzs
400{
401	typeset -i nraidzs=$1
402
403	shift
404	while ((nraidzs > 0)); do
405		log_must test -n "$1" -a -n "$2"
406		[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
407		log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
408		shift 2
409		((nraidzs = nraidzs - 1))
410	done
411}
412
413#
414# Destroy the configured testpool mirrors.
415# the mirrors are of the form ${TESTPOOL}{number}
416# @uses: ZPOOL ZFS TESTPOOL
417function destroy_mirrors
418{
419	default_cleanup_noexit
420
421	log_pass
422}
423
424#
425# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
426# $1 the list of disks
427#
428function default_raidz_setup
429{
430	typeset disklist="$*"
431	disks=(${disklist[*]})
432
433	if [[ ${#disks[*]} -lt 2 ]]; then
434		log_fail "A raid-z requires a minimum of two disks."
435	fi
436
437	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
438	log_must zpool create -f $TESTPOOL raidz $disklist
439	log_must zfs create $TESTPOOL/$TESTFS
440	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
441
442	log_pass
443}
444
445#
446# Common function used to cleanup storage pools and datasets.
447#
448# Invoked at the start of the test suite to ensure the system
449# is in a known state, and also at the end of each set of
450# sub-tests to ensure errors from one set of tests doesn't
451# impact the execution of the next set.
452
453function default_cleanup
454{
455	default_cleanup_noexit
456
457	log_pass
458}
459
460function default_cleanup_noexit
461{
462	typeset exclude=""
463	typeset pool=""
464	#
465	# Destroying the pool will also destroy any
466	# filesystems it contains.
467	#
468	if is_global_zone; then
469		zfs unmount -a > /dev/null 2>&1
470		exclude=`eval echo \"'(${KEEP})'\"`
471		ALL_POOLS=$(zpool list -H -o name \
472		    | grep -v "$NO_POOLS" | egrep -v "$exclude")
473		# Here, we loop through the pools we're allowed to
474		# destroy, only destroying them if it's safe to do
475		# so.
476		while [ ! -z ${ALL_POOLS} ]
477		do
478			for pool in ${ALL_POOLS}
479			do
480				if safe_to_destroy_pool $pool ;
481				then
482					destroy_pool $pool
483				fi
484				ALL_POOLS=$(zpool list -H -o name \
485				    | grep -v "$NO_POOLS" \
486				    | egrep -v "$exclude")
487			done
488		done
489
490		zfs mount -a
491	else
492		typeset fs=""
493		for fs in $(zfs list -H -o name \
494		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
495			destroy_dataset "$fs" "-Rf"
496		done
497
498		# Need cleanup here to avoid garbage dir left.
499		for fs in $(zfs list -H -o name); do
500			[[ $fs == /$ZONE_POOL ]] && continue
501			[[ -d $fs ]] && log_must rm -rf $fs/*
502		done
503
504		#
505		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
506		# the default value
507		#
508		for fs in $(zfs list -H -o name); do
509			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
510				log_must zfs set reservation=none $fs
511				log_must zfs set recordsize=128K $fs
512				log_must zfs set mountpoint=/$fs $fs
513				typeset enc=""
514				enc=$(get_prop encryption $fs)
515				if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
516					[[ "$enc" == "off" ]]; then
517					log_must zfs set checksum=on $fs
518				fi
519				log_must zfs set compression=off $fs
520				log_must zfs set atime=on $fs
521				log_must zfs set devices=off $fs
522				log_must zfs set exec=on $fs
523				log_must zfs set setuid=on $fs
524				log_must zfs set readonly=off $fs
525				log_must zfs set snapdir=hidden $fs
526				log_must zfs set aclmode=groupmask $fs
527				log_must zfs set aclinherit=secure $fs
528			fi
529		done
530	fi
531
532	[[ -d $TESTDIR ]] && \
533		log_must rm -rf $TESTDIR
534}
535
536
537#
538# Common function used to cleanup storage pools, file systems
539# and containers.
540#
541function default_container_cleanup
542{
543	if ! is_global_zone; then
544		reexport_pool
545	fi
546
547	ismounted $TESTPOOL/$TESTCTR/$TESTFS1
548	[[ $? -eq 0 ]] && \
549	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
550
551	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
552	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
553
554	[[ -e $TESTDIR1 ]] && \
555	    log_must rm -rf $TESTDIR1 > /dev/null 2>&1
556
557	default_cleanup
558}
559
560#
561# Common function used to cleanup snapshot of file system or volume. Default to
562# delete the file system's snapshot
563#
564# $1 snapshot name
565#
566function destroy_snapshot
567{
568	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
569
570	if ! snapexists $snap; then
571		log_fail "'$snap' does not existed."
572	fi
573
574	#
575	# For the sake of the value which come from 'get_prop' is not equal
576	# to the really mountpoint when the snapshot is unmounted. So, firstly
577	# check and make sure this snapshot's been mounted in current system.
578	#
579	typeset mtpt=""
580	if ismounted $snap; then
581		mtpt=$(get_prop mountpoint $snap)
582		(($? != 0)) && \
583			log_fail "get_prop mountpoint $snap failed."
584	fi
585
586	destroy_dataset $snap
587	[[ $mtpt != "" && -d $mtpt ]] && \
588		log_must rm -rf $mtpt
589}
590
591#
592# Common function used to cleanup clone.
593#
594# $1 clone name
595#
596function destroy_clone
597{
598	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
599
600	if ! datasetexists $clone; then
601		log_fail "'$clone' does not existed."
602	fi
603
604	# With the same reason in destroy_snapshot
605	typeset mtpt=""
606	if ismounted $clone; then
607		mtpt=$(get_prop mountpoint $clone)
608		(($? != 0)) && \
609			log_fail "get_prop mountpoint $clone failed."
610	fi
611
612	destroy_dataset $clone
613	[[ $mtpt != "" && -d $mtpt ]] && \
614		log_must rm -rf $mtpt
615}
616
617#
618# Common function used to cleanup bookmark of file system or volume.  Default
619# to delete the file system's bookmark.
620#
621# $1 bookmark name
622#
623function destroy_bookmark
624{
625	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
626
627	if ! bkmarkexists $bkmark; then
628		log_fail "'$bkmarkp' does not existed."
629	fi
630
631	destroy_dataset $bkmark
632}
633
634# Return 0 if a snapshot exists; $? otherwise
635#
636# $1 - snapshot name
637
638function snapexists
639{
640	zfs list -H -t snapshot "$1" > /dev/null 2>&1
641	return $?
642}
643
644#
645# Return 0 if a bookmark exists; $? otherwise
646#
647# $1 - bookmark name
648#
649function bkmarkexists
650{
651	zfs list -H -t bookmark "$1" > /dev/null 2>&1
652	return $?
653}
654
655#
656# Set a property to a certain value on a dataset.
657# Sets a property of the dataset to the value as passed in.
658# @param:
659#	$1 dataset who's property is being set
660#	$2 property to set
661#	$3 value to set property to
662# @return:
663#	0 if the property could be set.
664#	non-zero otherwise.
665# @use: ZFS
666#
667function dataset_setprop
668{
669	typeset fn=dataset_setprop
670
671	if (($# < 3)); then
672		log_note "$fn: Insufficient parameters (need 3, had $#)"
673		return 1
674	fi
675	typeset output=
676	output=$(zfs set $2=$3 $1 2>&1)
677	typeset rv=$?
678	if ((rv != 0)); then
679		log_note "Setting property on $1 failed."
680		log_note "property $2=$3"
681		log_note "Return Code: $rv"
682		log_note "Output: $output"
683		return $rv
684	fi
685	return 0
686}
687
688#
689# Assign suite defined dataset properties.
690# This function is used to apply the suite's defined default set of
691# properties to a dataset.
692# @parameters: $1 dataset to use
693# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
694# @returns:
695#   0 if the dataset has been altered.
696#   1 if no pool name was passed in.
697#   2 if the dataset could not be found.
698#   3 if the dataset could not have it's properties set.
699#
700function dataset_set_defaultproperties
701{
702	typeset dataset="$1"
703
704	[[ -z $dataset ]] && return 1
705
706	typeset confset=
707	typeset -i found=0
708	for confset in $(zfs list); do
709		if [[ $dataset = $confset ]]; then
710			found=1
711			break
712		fi
713	done
714	[[ $found -eq 0 ]] && return 2
715	if [[ -n $COMPRESSION_PROP ]]; then
716		dataset_setprop $dataset compression $COMPRESSION_PROP || \
717			return 3
718		log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
719	fi
720	if [[ -n $CHECKSUM_PROP ]]; then
721		dataset_setprop $dataset checksum $CHECKSUM_PROP || \
722			return 3
723		log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
724	fi
725	return 0
726}
727
728#
729# Check a numeric assertion
730# @parameter: $@ the assertion to check
731# @output: big loud notice if assertion failed
732# @use: log_fail
733#
734function assert
735{
736	(($@)) || log_fail "$@"
737}
738
739#
740# Function to format partition size of a disk
741# Given a disk cxtxdx reduces all partitions
742# to 0 size
743#
744function zero_partitions #<whole_disk_name>
745{
746	typeset diskname=$1
747	typeset i
748
749	for i in 0 1 3 4 5 6 7
750	do
751		set_partition $i "" 0mb $diskname
752	done
753}
754
755#
756# Given a slice, size and disk, this function
757# formats the slice to the specified size.
758# Size should be specified with units as per
759# the `format` command requirements eg. 100mb 3gb
760#
761function set_partition #<slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
762{
763	typeset -i slicenum=$1
764	typeset start=$2
765	typeset size=$3
766	typeset disk=$4
767	[[ -z $slicenum || -z $size || -z $disk ]] && \
768	    log_fail "The slice, size or disk name is unspecified."
769	typeset format_file=/var/tmp/format_in.$$
770
771	echo "partition" >$format_file
772	echo "$slicenum" >> $format_file
773	echo "" >> $format_file
774	echo "" >> $format_file
775	echo "$start" >> $format_file
776	echo "$size" >> $format_file
777	echo "label" >> $format_file
778	echo "" >> $format_file
779	echo "q" >> $format_file
780	echo "q" >> $format_file
781
782	format -e -s -d $disk -f $format_file
783	typeset ret_val=$?
784	rm -f $format_file
785	[[ $ret_val -ne 0 ]] && \
786	    log_fail "Unable to format $disk slice $slicenum to $size"
787	return 0
788}
789
790#
791# Get the end cyl of the given slice
792#
793function get_endslice #<disk> <slice>
794{
795	typeset disk=$1
796	typeset slice=$2
797	if [[ -z $disk || -z $slice ]] ; then
798		log_fail "The disk name or slice number is unspecified."
799	fi
800
801	disk=${disk#/dev/dsk/}
802	disk=${disk#/dev/rdsk/}
803	disk=${disk%s*}
804
805	typeset -i ratio=0
806	ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
807		grep "sectors\/cylinder" | \
808		awk '{print $2}')
809
810	if ((ratio == 0)); then
811		return
812	fi
813
814	typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
815		nawk -v token="$slice" '{if ($1==token) print $6}')
816
817	((endcyl = (endcyl + 1) / ratio))
818	echo $endcyl
819}
820
821
822#
823# Given a size,disk and total slice number,  this function formats the
824# disk slices from 0 to the total slice number with the same specified
825# size.
826#
827function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
828{
829	typeset -i i=0
830	typeset slice_size=$1
831	typeset disk_name=$2
832	typeset total_slices=$3
833	typeset cyl
834
835	zero_partitions $disk_name
836	while ((i < $total_slices)); do
837		if ((i == 2)); then
838			((i = i + 1))
839			continue
840		fi
841		set_partition $i "$cyl" $slice_size $disk_name
842		cyl=$(get_endslice $disk_name $i)
843		((i = i+1))
844	done
845}
846
847#
848# This function continues to write to a filenum number of files into dirnum
849# number of directories until either file_write returns an error or the
850# maximum number of files per directory have been written.
851#
852# Usage:
853# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
854#
855# Return value: 0 on success
856#		non 0 on error
857#
858# Where :
859#	destdir:    is the directory where everything is to be created under
860#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
861#	filenum:    the maximum number of files per subdirectory
862#	bytes:	    number of bytes to write
863#	num_writes: numer of types to write out bytes
864#	data:	    the data that will be writen
865#
866#	E.g.
867#	file_fs /testdir 20 25 1024 256 0
868#
869# Note: bytes * num_writes equals the size of the testfile
870#
871function fill_fs # destdir dirnum filenum bytes num_writes data
872{
873	typeset destdir=${1:-$TESTDIR}
874	typeset -i dirnum=${2:-50}
875	typeset -i filenum=${3:-50}
876	typeset -i bytes=${4:-8192}
877	typeset -i num_writes=${5:-10240}
878	typeset data=${6:-0}
879
880	typeset -i odirnum=1
881	typeset -i idirnum=0
882	typeset -i fn=0
883	typeset -i retval=0
884
885	mkdir -p $destdir/$idirnum
886	while (($odirnum > 0)); do
887		if ((dirnum >= 0 && idirnum >= dirnum)); then
888			odirnum=0
889			break
890		fi
891		file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
892		    -b $bytes -c $num_writes -d $data
893		retval=$?
894		if (($retval != 0)); then
895			odirnum=0
896			break
897		fi
898		if (($fn >= $filenum)); then
899			fn=0
900			((idirnum = idirnum + 1))
901			mkdir -p $destdir/$idirnum
902		else
903			((fn = fn + 1))
904		fi
905	done
906	return $retval
907}
908
909#
910# Simple function to get the specified property. If unable to
911# get the property then exits.
912#
913# Note property is in 'parsable' format (-p)
914#
915function get_prop # property dataset
916{
917	typeset prop_val
918	typeset prop=$1
919	typeset dataset=$2
920
921	prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
922	if [[ $? -ne 0 ]]; then
923		log_note "Unable to get $prop property for dataset " \
924		"$dataset"
925		return 1
926	fi
927
928	echo "$prop_val"
929	return 0
930}
931
932#
933# Simple function to get the specified property of pool. If unable to
934# get the property then exits.
935#
936function get_pool_prop # property pool
937{
938	typeset prop_val
939	typeset prop=$1
940	typeset pool=$2
941
942	if poolexists $pool ; then
943		prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
944			awk '{print $3}')
945		if [[ $? -ne 0 ]]; then
946			log_note "Unable to get $prop property for pool " \
947			"$pool"
948			return 1
949		fi
950	else
951		log_note "Pool $pool not exists."
952		return 1
953	fi
954
955	echo $prop_val
956	return 0
957}
958
959# Return 0 if a pool exists; $? otherwise
960#
961# $1 - pool name
962
963function poolexists
964{
965	typeset pool=$1
966
967	if [[ -z $pool ]]; then
968		log_note "No pool name given."
969		return 1
970	fi
971
972	zpool get name "$pool" > /dev/null 2>&1
973	return $?
974}
975
976# Return 0 if all the specified datasets exist; $? otherwise
977#
978# $1-n  dataset name
979function datasetexists
980{
981	if (($# == 0)); then
982		log_note "No dataset name given."
983		return 1
984	fi
985
986	while (($# > 0)); do
987		zfs get name $1 > /dev/null 2>&1 || \
988			return $?
989		shift
990	done
991
992	return 0
993}
994
995# return 0 if none of the specified datasets exists, otherwise return 1.
996#
997# $1-n  dataset name
998function datasetnonexists
999{
1000	if (($# == 0)); then
1001		log_note "No dataset name given."
1002		return 1
1003	fi
1004
1005	while (($# > 0)); do
1006		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1007		    && return 1
1008		shift
1009	done
1010
1011	return 0
1012}
1013
1014#
1015# Given a mountpoint, or a dataset name, determine if it is shared.
1016#
1017# Returns 0 if shared, 1 otherwise.
1018#
1019function is_shared
1020{
1021	typeset fs=$1
1022	typeset mtpt
1023
1024	if [[ $fs != "/"* ]] ; then
1025		if datasetnonexists "$fs" ; then
1026			return 1
1027		else
1028			mtpt=$(get_prop mountpoint "$fs")
1029			case $mtpt in
1030				none|legacy|-) return 1
1031					;;
1032				*)	fs=$mtpt
1033					;;
1034			esac
1035		fi
1036	fi
1037
1038	for mtpt in `share | awk '{print $2}'` ; do
1039		if [[ $mtpt == $fs ]] ; then
1040			return 0
1041		fi
1042	done
1043
1044	typeset stat=$(svcs -H -o STA nfs/server:default)
1045	if [[ $stat != "ON" ]]; then
1046		log_note "Current nfs/server status: $stat"
1047	fi
1048
1049	return 1
1050}
1051
1052#
1053# Given a mountpoint, determine if it is not shared.
1054#
1055# Returns 0 if not shared, 1 otherwise.
1056#
1057function not_shared
1058{
1059	typeset fs=$1
1060
1061	is_shared $fs
1062	if (($? == 0)); then
1063		return 1
1064	fi
1065
1066	return 0
1067}
1068
1069#
1070# Helper function to unshare a mountpoint.
1071#
1072function unshare_fs #fs
1073{
1074	typeset fs=$1
1075
1076	is_shared $fs
1077	if (($? == 0)); then
1078		log_must zfs unshare $fs
1079	fi
1080
1081	return 0
1082}
1083
1084#
1085# Check NFS server status and trigger it online.
1086#
1087function setup_nfs_server
1088{
1089	# Cannot share directory in non-global zone.
1090	#
1091	if ! is_global_zone; then
1092		log_note "Cannot trigger NFS server by sharing in LZ."
1093		return
1094	fi
1095
1096	typeset nfs_fmri="svc:/network/nfs/server:default"
1097	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1098		#
1099		# Only really sharing operation can enable NFS server
1100		# to online permanently.
1101		#
1102		typeset dummy=/tmp/dummy
1103
1104		if [[ -d $dummy ]]; then
1105			log_must rm -rf $dummy
1106		fi
1107
1108		log_must mkdir $dummy
1109		log_must share $dummy
1110
1111		#
1112		# Waiting for fmri's status to be the final status.
1113		# Otherwise, in transition, an asterisk (*) is appended for
1114		# instances, unshare will reverse status to 'DIS' again.
1115		#
1116		# Waiting for 1's at least.
1117		#
1118		log_must sleep 1
1119		timeout=10
1120		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1121		do
1122			log_must sleep 1
1123
1124			((timeout -= 1))
1125		done
1126
1127		log_must unshare $dummy
1128		log_must rm -rf $dummy
1129	fi
1130
1131	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1132}
1133
1134#
1135# To verify whether calling process is in global zone
1136#
1137# Return 0 if in global zone, 1 in non-global zone
1138#
1139function is_global_zone
1140{
1141	typeset cur_zone=$(zonename 2>/dev/null)
1142	if [[ $cur_zone != "global" ]]; then
1143		return 1
1144	fi
1145	return 0
1146}
1147
1148#
1149# Verify whether test is permitted to run from
1150# global zone, local zone, or both
1151#
1152# $1 zone limit, could be "global", "local", or "both"(no limit)
1153#
1154# Return 0 if permitted, otherwise exit with log_unsupported
1155#
1156function verify_runnable # zone limit
1157{
1158	typeset limit=$1
1159
1160	[[ -z $limit ]] && return 0
1161
1162	if is_global_zone ; then
1163		case $limit in
1164			global|both)
1165				;;
1166			local)	log_unsupported "Test is unable to run from "\
1167					"global zone."
1168				;;
1169			*)	log_note "Warning: unknown limit $limit - " \
1170					"use both."
1171				;;
1172		esac
1173	else
1174		case $limit in
1175			local|both)
1176				;;
1177			global)	log_unsupported "Test is unable to run from "\
1178					"local zone."
1179				;;
1180			*)	log_note "Warning: unknown limit $limit - " \
1181					"use both."
1182				;;
1183		esac
1184
1185		reexport_pool
1186	fi
1187
1188	return 0
1189}
1190
1191# Return 0 if create successfully or the pool exists; $? otherwise
1192# Note: In local zones, this function should return 0 silently.
1193#
1194# $1 - pool name
1195# $2-n - [keyword] devs_list
1196
1197function create_pool #pool devs_list
1198{
1199	typeset pool=${1%%/*}
1200
1201	shift
1202
1203	if [[ -z $pool ]]; then
1204		log_note "Missing pool name."
1205		return 1
1206	fi
1207
1208	if poolexists $pool ; then
1209		destroy_pool $pool
1210	fi
1211
1212	if is_global_zone ; then
1213		[[ -d /$pool ]] && rm -rf /$pool
1214		log_must zpool create -f $pool $@
1215	fi
1216
1217	return 0
1218}
1219
1220# Return 0 if destroy successfully or the pool exists; $? otherwise
1221# Note: In local zones, this function should return 0 silently.
1222#
1223# $1 - pool name
1224# Destroy pool with the given parameters.
1225
1226function destroy_pool #pool
1227{
1228	typeset pool=${1%%/*}
1229	typeset mtpt
1230
1231	if [[ -z $pool ]]; then
1232		log_note "No pool name given."
1233		return 1
1234	fi
1235
1236	if is_global_zone ; then
1237		if poolexists "$pool" ; then
1238			mtpt=$(get_prop mountpoint "$pool")
1239
1240			# At times, syseventd activity can cause attempts to
1241			# destroy a pool to fail with EBUSY. We retry a few
1242			# times allowing failures before requiring the destroy
1243			# to succeed.
1244			typeset -i wait_time=10 ret=1 count=0
1245			must=""
1246			while [[ $ret -ne 0 ]]; do
1247				$must zpool destroy -f $pool
1248				ret=$?
1249				[[ $ret -eq 0 ]] && break
1250				log_note "zpool destroy failed with $ret"
1251				[[ count++ -ge 7 ]] && must=log_must
1252				sleep $wait_time
1253			done
1254
1255			[[ -d $mtpt ]] && \
1256				log_must rm -rf $mtpt
1257		else
1258			log_note "Pool does not exist. ($pool)"
1259			return 1
1260		fi
1261	fi
1262
1263	return 0
1264}
1265
1266# Return 0 if created successfully; $? otherwise
1267#
1268# $1 - dataset name
1269# $2-n - dataset options
1270
1271function create_dataset #dataset dataset_options
1272{
1273	typeset dataset=$1
1274
1275	shift
1276
1277	if [[ -z $dataset ]]; then
1278		log_note "Missing dataset name."
1279		return 1
1280	fi
1281
1282	if datasetexists $dataset ; then
1283		destroy_dataset $dataset
1284	fi
1285
1286	log_must zfs create $@ $dataset
1287
1288	return 0
1289}
1290
1291# Return 0 if destroy successfully or the dataset exists; $? otherwise
1292# Note: In local zones, this function should return 0 silently.
1293#
1294# $1 - dataset name
1295# $2 - custom arguments for zfs destroy
1296# Destroy dataset with the given parameters.
1297
1298function destroy_dataset #dataset #args
1299{
1300	typeset dataset=$1
1301	typeset mtpt
1302	typeset args=${2:-""}
1303
1304	if [[ -z $dataset ]]; then
1305		log_note "No dataset name given."
1306		return 1
1307	fi
1308
1309	if is_global_zone ; then
1310		if datasetexists "$dataset" ; then
1311			mtpt=$(get_prop mountpoint "$dataset")
1312			log_must zfs destroy $args $dataset
1313
1314			[[ -d $mtpt ]] && \
1315				log_must rm -rf $mtpt
1316		else
1317			log_note "Dataset does not exist. ($dataset)"
1318			return 1
1319		fi
1320	fi
1321
1322	return 0
1323}
1324
1325#
1326# Firstly, create a pool with 5 datasets. Then, create a single zone and
1327# export the 5 datasets to it. In addition, we also add a ZFS filesystem
1328# and a zvol device to the zone.
1329#
1330# $1 zone name
1331# $2 zone root directory prefix
1332# $3 zone ip
1333#
1334function zfs_zones_setup #zone_name zone_root zone_ip
1335{
1336	typeset zone_name=${1:-$(hostname)-z}
1337	typeset zone_root=${2:-"/zone_root"}
1338	typeset zone_ip=${3:-"10.1.1.10"}
1339	typeset prefix_ctr=$ZONE_CTR
1340	typeset pool_name=$ZONE_POOL
1341	typeset -i cntctr=5
1342	typeset -i i=0
1343
1344	# Create pool and 5 container within it
1345	#
1346	[[ -d /$pool_name ]] && rm -rf /$pool_name
1347	log_must zpool create -f $pool_name $DISKS
1348	while ((i < cntctr)); do
1349		log_must zfs create $pool_name/$prefix_ctr$i
1350		((i += 1))
1351	done
1352
1353	# create a zvol
1354	log_must zfs create -V 1g $pool_name/zone_zvol
1355
1356	#
1357	# If current system support slog, add slog device for pool
1358	#
1359	if verify_slog_support ; then
1360		typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1361		log_must mkfile $MINVDEVSIZE $sdevs
1362		log_must zpool add $pool_name log mirror $sdevs
1363	fi
1364
1365	# this isn't supported just yet.
1366	# Create a filesystem. In order to add this to
1367	# the zone, it must have it's mountpoint set to 'legacy'
1368	# log_must zfs create $pool_name/zfs_filesystem
1369	# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1370
1371	[[ -d $zone_root ]] && \
1372		log_must rm -rf $zone_root/$zone_name
1373	[[ ! -d $zone_root ]] && \
1374		log_must mkdir -p -m 0700 $zone_root/$zone_name
1375
1376	# Create zone configure file and configure the zone
1377	#
1378	typeset zone_conf=/tmp/zone_conf.$$
1379	echo "create" > $zone_conf
1380	echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1381	echo "set autoboot=true" >> $zone_conf
1382	i=0
1383	while ((i < cntctr)); do
1384		echo "add dataset" >> $zone_conf
1385		echo "set name=$pool_name/$prefix_ctr$i" >> \
1386			$zone_conf
1387		echo "end" >> $zone_conf
1388		((i += 1))
1389	done
1390
1391	# add our zvol to the zone
1392	echo "add device" >> $zone_conf
1393	echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1394	echo "end" >> $zone_conf
1395
1396	# add a corresponding zvol rdsk to the zone
1397	echo "add device" >> $zone_conf
1398	echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1399	echo "end" >> $zone_conf
1400
1401	# once it's supported, we'll add our filesystem to the zone
1402	# echo "add fs" >> $zone_conf
1403	# echo "set type=zfs" >> $zone_conf
1404	# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1405	# echo "set dir=/export/zfs_filesystem" >> $zone_conf
1406	# echo "end" >> $zone_conf
1407
1408	echo "verify" >> $zone_conf
1409	echo "commit" >> $zone_conf
1410	log_must zonecfg -z $zone_name -f $zone_conf
1411	log_must rm -f $zone_conf
1412
1413	# Install the zone
1414	zoneadm -z $zone_name install
1415	if (($? == 0)); then
1416		log_note "SUCCESS: zoneadm -z $zone_name install"
1417	else
1418		log_fail "FAIL: zoneadm -z $zone_name install"
1419	fi
1420
1421	# Install sysidcfg file
1422	#
1423	typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1424	echo "system_locale=C" > $sysidcfg
1425	echo  "terminal=dtterm" >> $sysidcfg
1426	echo  "network_interface=primary {" >> $sysidcfg
1427	echo  "hostname=$zone_name" >> $sysidcfg
1428	echo  "}" >> $sysidcfg
1429	echo  "name_service=NONE" >> $sysidcfg
1430	echo  "root_password=mo791xfZ/SFiw" >> $sysidcfg
1431	echo  "security_policy=NONE" >> $sysidcfg
1432	echo  "timezone=US/Eastern" >> $sysidcfg
1433
1434	# Boot this zone
1435	log_must zoneadm -z $zone_name boot
1436}
1437
1438#
1439# Reexport TESTPOOL & TESTPOOL(1-4)
1440#
1441function reexport_pool
1442{
1443	typeset -i cntctr=5
1444	typeset -i i=0
1445
1446	while ((i < cntctr)); do
1447		if ((i == 0)); then
1448			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1449			if ! ismounted $TESTPOOL; then
1450				log_must zfs mount $TESTPOOL
1451			fi
1452		else
1453			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1454			if eval ! ismounted \$TESTPOOL$i; then
1455				log_must eval zfs mount \$TESTPOOL$i
1456			fi
1457		fi
1458		((i += 1))
1459	done
1460}
1461
1462#
1463# Verify a given disk is online or offline
1464#
1465# Return 0 is pool/disk matches expected state, 1 otherwise
1466#
1467function check_state # pool disk state{online,offline}
1468{
1469	typeset pool=$1
1470	typeset disk=${2#/dev/dsk/}
1471	typeset state=$3
1472
1473	zpool status -v $pool | grep "$disk"  \
1474	    | grep -i "$state" > /dev/null 2>&1
1475
1476	return $?
1477}
1478
1479#
1480# Get the mountpoint of snapshot
1481# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1482# as its mountpoint
1483#
1484function snapshot_mountpoint
1485{
1486	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1487
1488	if [[ $dataset != *@* ]]; then
1489		log_fail "Error name of snapshot '$dataset'."
1490	fi
1491
1492	typeset fs=${dataset%@*}
1493	typeset snap=${dataset#*@}
1494
1495	if [[ -z $fs || -z $snap ]]; then
1496		log_fail "Error name of snapshot '$dataset'."
1497	fi
1498
1499	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1500}
1501
1502#
1503# Given a device and 'ashift' value verify it's correctly set on every label
1504#
1505function verify_ashift # device ashift
1506{
1507	typeset device="$1"
1508	typeset ashift="$2"
1509
1510	zdb -e -lll $device | nawk -v ashift=$ashift '/ashift: / {
1511	    if (ashift != $2)
1512	        exit 1;
1513	    else
1514	        count++;
1515	    } END {
1516	    if (count != 4)
1517	        exit 1;
1518	    else
1519	        exit 0;
1520	    }'
1521
1522	return $?
1523}
1524
1525#
1526# Given a pool and file system, this function will verify the file system
1527# using the zdb internal tool. Note that the pool is exported and imported
1528# to ensure it has consistent state.
1529#
1530function verify_filesys # pool filesystem dir
1531{
1532	typeset pool="$1"
1533	typeset filesys="$2"
1534	typeset zdbout="/tmp/zdbout.$$"
1535
1536	shift
1537	shift
1538	typeset dirs=$@
1539	typeset search_path=""
1540
1541	log_note "Calling zdb to verify filesystem '$filesys'"
1542	zfs unmount -a > /dev/null 2>&1
1543	log_must zpool export $pool
1544
1545	if [[ -n $dirs ]] ; then
1546		for dir in $dirs ; do
1547			search_path="$search_path -d $dir"
1548		done
1549	fi
1550
1551	log_must zpool import $search_path $pool
1552
1553	zdb -cudi $filesys > $zdbout 2>&1
1554	if [[ $? != 0 ]]; then
1555		log_note "Output: zdb -cudi $filesys"
1556		cat $zdbout
1557		log_fail "zdb detected errors with: '$filesys'"
1558	fi
1559
1560	log_must zfs mount -a
1561	log_must rm -rf $zdbout
1562}
1563
1564#
1565# Given a pool issue a scrub and verify that no checksum errors are reported.
1566#
1567function verify_pool
1568{
1569	typeset pool=${1:-$TESTPOOL}
1570
1571	log_must zpool scrub $pool
1572	log_must wait_scrubbed $pool
1573
1574	cksum=$(zpool status $pool | \
1575	    awk '{if ($5 == "CKSUM"){L=1; next} if (L) {print $NF;L=0}}')
1576	if [[ $cksum != 0 ]]; then
1577		log_must zpool status -v
1578	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1579	fi
1580}
1581
1582#
1583# Given a pool, and this function list all disks in the pool
1584#
1585function get_disklist # pool
1586{
1587	typeset disklist=""
1588
1589	disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1590	    grep -v "\-\-\-\-\-" | \
1591	    egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
1592
1593	echo $disklist
1594}
1595
1596# /**
1597#  This function kills a given list of processes after a time period. We use
1598#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1599#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1600#  would be listed as FAIL, which we don't want : we're happy with stress tests
1601#  running for a certain amount of time, then finishing.
1602#
1603# @param $1 the time in seconds after which we should terminate these processes
1604# @param $2..$n the processes we wish to terminate.
1605# */
1606function stress_timeout
1607{
1608	typeset -i TIMEOUT=$1
1609	shift
1610	typeset cpids="$@"
1611
1612	log_note "Waiting for child processes($cpids). " \
1613		"It could last dozens of minutes, please be patient ..."
1614	log_must sleep $TIMEOUT
1615
1616	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1617	typeset pid
1618	for pid in $cpids; do
1619		ps -p $pid > /dev/null 2>&1
1620		if (($? == 0)); then
1621			log_must kill -USR1 $pid
1622		fi
1623	done
1624}
1625
1626#
1627# Verify a given hotspare disk is inuse or avail
1628#
1629# Return 0 is pool/disk matches expected state, 1 otherwise
1630#
1631function check_hotspare_state # pool disk state{inuse,avail}
1632{
1633	typeset pool=$1
1634	typeset disk=${2#/dev/dsk/}
1635	typeset state=$3
1636
1637	cur_state=$(get_device_state $pool $disk "spares")
1638
1639	if [[ $state != ${cur_state} ]]; then
1640		return 1
1641	fi
1642	return 0
1643}
1644
1645#
1646# Wait until a hotspare transitions to a given state or times out.
1647#
1648# Return 0 when  pool/disk matches expected state, 1 on timeout.
1649#
1650function wait_hotspare_state # pool disk state timeout
1651{
1652	typeset pool=$1
1653	typeset disk=${2#$/DEV_DSKDIR/}
1654	typeset state=$3
1655	typeset timeout=${4:-60}
1656	typeset -i i=0
1657
1658	while [[ $i -lt $timeout ]]; do
1659		if check_hotspare_state $pool $disk $state; then
1660			return 0
1661		fi
1662
1663		i=$((i+1))
1664		sleep 1
1665	done
1666
1667	return 1
1668}
1669
1670#
1671# Verify a given slog disk is inuse or avail
1672#
1673# Return 0 is pool/disk matches expected state, 1 otherwise
1674#
1675function check_slog_state # pool disk state{online,offline,unavail}
1676{
1677	typeset pool=$1
1678	typeset disk=${2#/dev/dsk/}
1679	typeset state=$3
1680
1681	cur_state=$(get_device_state $pool $disk "logs")
1682
1683	if [[ $state != ${cur_state} ]]; then
1684		return 1
1685	fi
1686	return 0
1687}
1688
1689#
1690# Verify a given vdev disk is inuse or avail
1691#
1692# Return 0 is pool/disk matches expected state, 1 otherwise
1693#
1694function check_vdev_state # pool disk state{online,offline,unavail}
1695{
1696	typeset pool=$1
1697	typeset disk=${2#/dev/dsk/}
1698	typeset state=$3
1699
1700	cur_state=$(get_device_state $pool $disk)
1701
1702	if [[ $state != ${cur_state} ]]; then
1703		return 1
1704	fi
1705	return 0
1706}
1707
1708#
1709# Wait until a vdev transitions to a given state or times out.
1710#
1711# Return 0 when  pool/disk matches expected state, 1 on timeout.
1712#
1713function wait_vdev_state # pool disk state timeout
1714{
1715	typeset pool=$1
1716	typeset disk=${2#$/DEV_DSKDIR/}
1717	typeset state=$3
1718	typeset timeout=${4:-60}
1719	typeset -i i=0
1720
1721	while [[ $i -lt $timeout ]]; do
1722		if check_vdev_state $pool $disk $state; then
1723			return 0
1724		fi
1725
1726		i=$((i+1))
1727		sleep 1
1728	done
1729
1730	return 1
1731}
1732
1733#
1734# Check the output of 'zpool status -v <pool>',
1735# and to see if the content of <token> contain the <keyword> specified.
1736#
1737# Return 0 is contain, 1 otherwise
1738#
1739function check_pool_status # pool token keyword <verbose>
1740{
1741	typeset pool=$1
1742	typeset token=$2
1743	typeset keyword=$3
1744	typeset verbose=${4:-false}
1745
1746	scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1747		($1==token) {print $0}')
1748	if [[ $verbose == true ]]; then
1749		log_note $scan
1750	fi
1751	echo $scan | grep -i "$keyword" > /dev/null 2>&1
1752
1753	return $?
1754}
1755
1756#
1757# These 6 following functions are instance of check_pool_status()
1758#	is_pool_resilvering - to check if the pool is resilver in progress
1759#	is_pool_resilvered - to check if the pool is resilver completed
1760#	is_pool_scrubbing - to check if the pool is scrub in progress
1761#	is_pool_scrubbed - to check if the pool is scrub completed
1762#	is_pool_scrub_stopped - to check if the pool is scrub stopped
1763#	is_pool_scrub_paused - to check if the pool has scrub paused
1764#	is_pool_removing - to check if the pool is removing a vdev
1765#	is_pool_removed - to check if the pool is remove completed
1766#
1767function is_pool_resilvering #pool <verbose>
1768{
1769	check_pool_status "$1" "scan" "resilver in progress since " $2
1770	return $?
1771}
1772
1773function is_pool_resilvered #pool <verbose>
1774{
1775	check_pool_status "$1" "scan" "resilvered " $2
1776	return $?
1777}
1778
1779function is_pool_scrubbing #pool <verbose>
1780{
1781	check_pool_status "$1" "scan" "scrub in progress since " $2
1782	return $?
1783}
1784
1785function is_pool_scrubbed #pool <verbose>
1786{
1787	check_pool_status "$1" "scan" "scrub repaired" $2
1788	return $?
1789}
1790
1791function is_pool_scrub_stopped #pool <verbose>
1792{
1793	check_pool_status "$1" "scan" "scrub canceled" $2
1794	return $?
1795}
1796
1797function is_pool_scrub_paused #pool <verbose>
1798{
1799	check_pool_status "$1" "scan" "scrub paused since " $2
1800	return $?
1801}
1802
1803function is_pool_removing #pool
1804{
1805	check_pool_status "$1" "remove" "in progress since "
1806	return $?
1807}
1808
1809function is_pool_removed #pool
1810{
1811	check_pool_status "$1" "remove" "completed on"
1812	return $?
1813}
1814
1815function wait_for_degraded
1816{
1817	typeset pool=$1
1818	typeset timeout=${2:-30}
1819	typeset t0=$SECONDS
1820
1821	while :; do
1822		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1823		log_note "$pool is not yet degraded."
1824		sleep 1
1825		if ((SECONDS - t0 > $timeout)); then
1826			log_note "$pool not degraded after $timeout seconds."
1827			return 1
1828		fi
1829	done
1830
1831	return 0
1832}
1833
1834#
1835# Wait for a pool to be scrubbed
1836#
1837# $1 pool name
1838# $2 number of seconds to wait (optional)
1839#
1840# Returns true when pool has been scrubbed, or false if there's a timeout or if
1841# no scrub was done.
1842#
1843function wait_scrubbed
1844{
1845	typeset pool=${1:-$TESTPOOL}
1846	while true ; do
1847		is_pool_scrubbed $pool && break
1848		log_must sleep 1
1849	done
1850}
1851
1852#
1853# Use create_pool()/destroy_pool() to clean up the infomation in
1854# in the given disk to avoid slice overlapping.
1855#
1856function cleanup_devices #vdevs
1857{
1858	typeset pool="foopool$$"
1859
1860	if poolexists $pool ; then
1861		destroy_pool $pool
1862	fi
1863
1864	create_pool $pool $@
1865	destroy_pool $pool
1866
1867	return 0
1868}
1869
1870#/**
1871# A function to find and locate free disks on a system or from given
1872# disks as the parameter. It works by locating disks that are in use
1873# as swap devices and dump devices, and also disks listed in /etc/vfstab
1874#
1875# $@ given disks to find which are free, default is all disks in
1876# the test system
1877#
1878# @return a string containing the list of available disks
1879#*/
1880function find_disks
1881{
1882	sfi=/tmp/swaplist.$$
1883	dmpi=/tmp/dumpdev.$$
1884	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1885
1886	swap -l > $sfi
1887	dumpadm > $dmpi 2>/dev/null
1888
1889# write an awk script that can process the output of format
1890# to produce a list of disks we know about. Note that we have
1891# to escape "$2" so that the shell doesn't interpret it while
1892# we're creating the awk script.
1893# -------------------
1894	cat > /tmp/find_disks.awk <<EOF
1895#!/bin/nawk -f
1896	BEGIN { FS="."; }
1897
1898	/^Specify disk/{
1899		searchdisks=0;
1900	}
1901
1902	{
1903		if (searchdisks && \$2 !~ "^$"){
1904			split(\$2,arr," ");
1905			print arr[1];
1906		}
1907	}
1908
1909	/^AVAILABLE DISK SELECTIONS:/{
1910		searchdisks=1;
1911	}
1912EOF
1913#---------------------
1914
1915	chmod 755 /tmp/find_disks.awk
1916	disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1917	rm /tmp/find_disks.awk
1918
1919	unused=""
1920	for disk in $disks; do
1921	# Check for mounted
1922		grep "${disk}[sp]" /etc/mnttab >/dev/null
1923		(($? == 0)) && continue
1924	# Check for swap
1925		grep "${disk}[sp]" $sfi >/dev/null
1926		(($? == 0)) && continue
1927	# check for dump device
1928		grep "${disk}[sp]" $dmpi >/dev/null
1929		(($? == 0)) && continue
1930	# check to see if this disk hasn't been explicitly excluded
1931	# by a user-set environment variable
1932		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1933		(($? == 0)) && continue
1934		unused_candidates="$unused_candidates $disk"
1935	done
1936	rm $sfi
1937	rm $dmpi
1938
1939# now just check to see if those disks do actually exist
1940# by looking for a device pointing to the first slice in
1941# each case. limit the number to max_finddisksnum
1942	count=0
1943	for disk in $unused_candidates; do
1944		if [ -b /dev/dsk/${disk}s0 ]; then
1945		if [ $count -lt $max_finddisksnum ]; then
1946			unused="$unused $disk"
1947			# do not impose limit if $@ is provided
1948			[[ -z $@ ]] && ((count = count + 1))
1949		fi
1950		fi
1951	done
1952
1953# finally, return our disk list
1954	echo $unused
1955}
1956
1957#
1958# Add specified user to specified group
1959#
1960# $1 group name
1961# $2 user name
1962# $3 base of the homedir (optional)
1963#
1964function add_user #<group_name> <user_name> <basedir>
1965{
1966	typeset gname=$1
1967	typeset uname=$2
1968	typeset basedir=${3:-"/var/tmp"}
1969
1970	if ((${#gname} == 0 || ${#uname} == 0)); then
1971		log_fail "group name or user name are not defined."
1972	fi
1973
1974	log_must useradd -g $gname -d $basedir/$uname -m $uname
1975	log_must passwd -N $uname
1976
1977	return 0
1978}
1979
1980#
1981# Delete the specified user.
1982#
1983# $1 login name
1984# $2 base of the homedir (optional)
1985#
1986function del_user #<logname> <basedir>
1987{
1988	typeset user=$1
1989	typeset basedir=${2:-"/var/tmp"}
1990
1991	if ((${#user} == 0)); then
1992		log_fail "login name is necessary."
1993	fi
1994
1995	if id $user > /dev/null 2>&1; then
1996		log_must userdel $user
1997	fi
1998
1999	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2000
2001	return 0
2002}
2003
2004#
2005# Select valid gid and create specified group.
2006#
2007# $1 group name
2008#
2009function add_group #<group_name>
2010{
2011	typeset group=$1
2012
2013	if ((${#group} == 0)); then
2014		log_fail "group name is necessary."
2015	fi
2016
2017	# Assign 100 as the base gid
2018	typeset -i gid=100
2019	while true; do
2020		groupadd -g $gid $group > /dev/null 2>&1
2021		typeset -i ret=$?
2022		case $ret in
2023			0) return 0 ;;
2024			# The gid is not  unique
2025			4) ((gid += 1)) ;;
2026			*) return 1 ;;
2027		esac
2028	done
2029}
2030
2031#
2032# Delete the specified group.
2033#
2034# $1 group name
2035#
2036function del_group #<group_name>
2037{
2038	typeset grp=$1
2039	if ((${#grp} == 0)); then
2040		log_fail "group name is necessary."
2041	fi
2042
2043	groupmod -n $grp $grp > /dev/null 2>&1
2044	typeset -i ret=$?
2045	case $ret in
2046		# Group does not exist.
2047		6) return 0 ;;
2048		# Name already exists as a group name
2049		9) log_must groupdel $grp ;;
2050		*) return 1 ;;
2051	esac
2052
2053	return 0
2054}
2055
2056#
2057# This function will return true if it's safe to destroy the pool passed
2058# as argument 1. It checks for pools based on zvols and files, and also
2059# files contained in a pool that may have a different mountpoint.
2060#
2061function safe_to_destroy_pool { # $1 the pool name
2062
2063	typeset pool=""
2064	typeset DONT_DESTROY=""
2065
2066	# We check that by deleting the $1 pool, we're not
2067	# going to pull the rug out from other pools. Do this
2068	# by looking at all other pools, ensuring that they
2069	# aren't built from files or zvols contained in this pool.
2070
2071	for pool in $(zpool list -H -o name)
2072	do
2073		ALTMOUNTPOOL=""
2074
2075		# this is a list of the top-level directories in each of the
2076		# files that make up the path to the files the pool is based on
2077		FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2078			awk '{print $1}')
2079
2080		# this is a list of the zvols that make up the pool
2081		ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
2082		    | awk '{print $1}')
2083
2084		# also want to determine if it's a file-based pool using an
2085		# alternate mountpoint...
2086		POOL_FILE_DIRS=$(zpool status -v $pool | \
2087					grep / | awk '{print $1}' | \
2088					awk -F/ '{print $2}' | grep -v "dev")
2089
2090		for pooldir in $POOL_FILE_DIRS
2091		do
2092			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2093					grep "${pooldir}$" | awk '{print $1}')
2094
2095			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2096		done
2097
2098
2099		if [ ! -z "$ZVOLPOOL" ]
2100		then
2101			DONT_DESTROY="true"
2102			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2103		fi
2104
2105		if [ ! -z "$FILEPOOL" ]
2106		then
2107			DONT_DESTROY="true"
2108			log_note "Pool $pool is built from $FILEPOOL on $1"
2109		fi
2110
2111		if [ ! -z "$ALTMOUNTPOOL" ]
2112		then
2113			DONT_DESTROY="true"
2114			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2115		fi
2116	done
2117
2118	if [ -z "${DONT_DESTROY}" ]
2119	then
2120		return 0
2121	else
2122		log_note "Warning: it is not safe to destroy $1!"
2123		return 1
2124	fi
2125}
2126
2127#
2128# Get the available ZFS compression options
2129# $1 option type zfs_set|zfs_compress
2130#
2131function get_compress_opts
2132{
2133	typeset COMPRESS_OPTS
2134	typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2135			gzip-6 gzip-7 gzip-8 gzip-9"
2136
2137	if [[ $1 == "zfs_compress" ]] ; then
2138		COMPRESS_OPTS="on lzjb"
2139	elif [[ $1 == "zfs_set" ]] ; then
2140		COMPRESS_OPTS="on off lzjb"
2141	fi
2142	typeset valid_opts="$COMPRESS_OPTS"
2143	zfs get 2>&1 | grep gzip >/dev/null 2>&1
2144	if [[ $? -eq 0 ]]; then
2145		valid_opts="$valid_opts $GZIP_OPTS"
2146	fi
2147	echo "$valid_opts"
2148}
2149
2150#
2151# Verify zfs operation with -p option work as expected
2152# $1 operation, value could be create, clone or rename
2153# $2 dataset type, value could be fs or vol
2154# $3 dataset name
2155# $4 new dataset name
2156#
2157function verify_opt_p_ops
2158{
2159	typeset ops=$1
2160	typeset datatype=$2
2161	typeset dataset=$3
2162	typeset newdataset=$4
2163
2164	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2165		log_fail "$datatype is not supported."
2166	fi
2167
2168	# check parameters accordingly
2169	case $ops in
2170		create)
2171			newdataset=$dataset
2172			dataset=""
2173			if [[ $datatype == "vol" ]]; then
2174				ops="create -V $VOLSIZE"
2175			fi
2176			;;
2177		clone)
2178			if [[ -z $newdataset ]]; then
2179				log_fail "newdataset should not be empty" \
2180					"when ops is $ops."
2181			fi
2182			log_must datasetexists $dataset
2183			log_must snapexists $dataset
2184			;;
2185		rename)
2186			if [[ -z $newdataset ]]; then
2187				log_fail "newdataset should not be empty" \
2188					"when ops is $ops."
2189			fi
2190			log_must datasetexists $dataset
2191			log_mustnot snapexists $dataset
2192			;;
2193		*)
2194			log_fail "$ops is not supported."
2195			;;
2196	esac
2197
2198	# make sure the upper level filesystem does not exist
2199	destroy_dataset ${newdataset%/*} "-rRf"
2200
2201	# without -p option, operation will fail
2202	log_mustnot zfs $ops $dataset $newdataset
2203	log_mustnot datasetexists $newdataset ${newdataset%/*}
2204
2205	# with -p option, operation should succeed
2206	log_must zfs $ops -p $dataset $newdataset
2207	if ! datasetexists $newdataset ; then
2208		log_fail "-p option does not work for $ops"
2209	fi
2210
2211	# when $ops is create or clone, redo the operation still return zero
2212	if [[ $ops != "rename" ]]; then
2213		log_must zfs $ops -p $dataset $newdataset
2214	fi
2215
2216	return 0
2217}
2218
2219#
2220# Get configuration of pool
2221# $1 pool name
2222# $2 config name
2223#
2224function get_config
2225{
2226	typeset pool=$1
2227	typeset config=$2
2228	typeset alt_root
2229
2230	if ! poolexists "$pool" ; then
2231		return 1
2232	fi
2233	alt_root=$(zpool list -H $pool | awk '{print $NF}')
2234	if [[ $alt_root == "-" ]]; then
2235		value=$(zdb -C $pool | grep "$config:" | awk -F: \
2236		    '{print $2}')
2237	else
2238		value=$(zdb -e $pool | grep "$config:" | awk -F: \
2239		    '{print $2}')
2240	fi
2241	if [[ -n $value ]] ; then
2242		value=${value#'}
2243		value=${value%'}
2244	fi
2245	echo $value
2246
2247	return 0
2248}
2249
2250#
2251# Privated function. Random select one of items from arguments.
2252#
2253# $1 count
2254# $2-n string
2255#
2256function _random_get
2257{
2258	typeset cnt=$1
2259	shift
2260
2261	typeset str="$@"
2262	typeset -i ind
2263	((ind = RANDOM % cnt + 1))
2264
2265	typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2266	echo $ret
2267}
2268
2269#
2270# Random select one of item from arguments which include NONE string
2271#
2272function random_get_with_non
2273{
2274	typeset -i cnt=$#
2275	((cnt =+ 1))
2276
2277	_random_get "$cnt" "$@"
2278}
2279
2280#
2281# Random select one of item from arguments which doesn't include NONE string
2282#
2283function random_get
2284{
2285	_random_get "$#" "$@"
2286}
2287
2288#
2289# Detect if the current system support slog
2290#
2291function verify_slog_support
2292{
2293	typeset dir=/tmp/disk.$$
2294	typeset pool=foo.$$
2295	typeset vdev=$dir/a
2296	typeset sdev=$dir/b
2297
2298	mkdir -p $dir
2299	mkfile $MINVDEVSIZE $vdev $sdev
2300
2301	typeset -i ret=0
2302	if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2303		ret=1
2304	fi
2305	rm -r $dir
2306
2307	return $ret
2308}
2309
2310#
2311# The function will generate a dataset name with specific length
2312# $1, the length of the name
2313# $2, the base string to construct the name
2314#
2315function gen_dataset_name
2316{
2317	typeset -i len=$1
2318	typeset basestr="$2"
2319	typeset -i baselen=${#basestr}
2320	typeset -i iter=0
2321	typeset l_name=""
2322
2323	if ((len % baselen == 0)); then
2324		((iter = len / baselen))
2325	else
2326		((iter = len / baselen + 1))
2327	fi
2328	while ((iter > 0)); do
2329		l_name="${l_name}$basestr"
2330
2331		((iter -= 1))
2332	done
2333
2334	echo $l_name
2335}
2336
2337#
2338# Get cksum tuple of dataset
2339# $1 dataset name
2340#
2341# sample zdb output:
2342# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2343# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2344# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2345# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2346function datasetcksum
2347{
2348	typeset cksum
2349	sync
2350	sync_all_pools
2351	cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2352		| awk -F= '{print $7}')
2353	echo $cksum
2354}
2355
2356#
2357# Get cksum of file
2358# #1 file path
2359#
2360function checksum
2361{
2362	typeset cksum
2363	cksum=$(cksum $1 | awk '{print $1}')
2364	echo $cksum
2365}
2366
2367#
2368# Get the given disk/slice state from the specific field of the pool
2369#
2370function get_device_state #pool disk field("", "spares","logs")
2371{
2372	typeset pool=$1
2373	typeset disk=${2#/dev/dsk/}
2374	typeset field=${3:-$pool}
2375
2376	state=$(zpool status -v "$pool" 2>/dev/null | \
2377		nawk -v device=$disk -v pool=$pool -v field=$field \
2378		'BEGIN {startconfig=0; startfield=0; }
2379		/config:/ {startconfig=1}
2380		(startconfig==1) && ($1==field) {startfield=1; next;}
2381		(startfield==1) && ($1==device) {print $2; exit;}
2382		(startfield==1) &&
2383		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2384	echo $state
2385}
2386
2387
2388#
2389# print the given directory filesystem type
2390#
2391# $1 directory name
2392#
2393function get_fstype
2394{
2395	typeset dir=$1
2396
2397	if [[ -z $dir ]]; then
2398		log_fail "Usage: get_fstype <directory>"
2399	fi
2400
2401	#
2402	#  $ df -n /
2403	#  /		  : ufs
2404	#
2405	df -n $dir | awk '{print $3}'
2406}
2407
2408#
2409# Given a disk, label it to VTOC regardless what label was on the disk
2410# $1 disk
2411#
2412function labelvtoc
2413{
2414	typeset disk=$1
2415	typeset -i iter=120
2416	typeset -i ret_val=1
2417
2418	if [[ -z $disk ]]; then
2419		log_fail "The disk name is unspecified."
2420	fi
2421	typeset label_file=/var/tmp/labelvtoc.$$
2422	typeset arch=$(uname -p)
2423
2424	if [[ $arch == "i386" ]]; then
2425		log_must fdisk -B ${disk}p0
2426
2427		echo "label" > $label_file
2428		echo "0" >> $label_file
2429		echo "" >> $label_file
2430		echo "q" >> $label_file
2431		echo "q" >> $label_file
2432	elif [[ $arch == "sparc" ]]; then
2433		echo "label" > $label_file
2434		echo "0" >> $label_file
2435		echo "" >> $label_file
2436		echo "" >> $label_file
2437		echo "" >> $label_file
2438		echo "q" >> $label_file
2439	else
2440		log_fail "unknown arch type"
2441	fi
2442
2443	# Disk update from fdisk -B may be delayed
2444	while ((iter > 0)); do
2445		if format -e -s -d $disk -f $label_file ; then
2446			iter=0
2447			ret_val=0
2448		else
2449			sleep 1
2450			((iter -= 1))
2451		fi
2452	done
2453	rm -f $label_file
2454	if ((ret_val != 0)); then
2455		log_fail "unable to label $disk as VTOC."
2456	fi
2457
2458	return 0
2459}
2460
2461#
2462# check if the system was installed as zfsroot or not
2463# return: 0 ture, otherwise false
2464#
2465function is_zfsroot
2466{
2467	df -n / | grep zfs > /dev/null 2>&1
2468	return $?
2469}
2470
2471#
2472# get the root filesystem name if it's zfsroot system.
2473#
2474# return: root filesystem name
2475function get_rootfs
2476{
2477	typeset rootfs=""
2478	rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2479		/etc/mnttab)
2480	if [[ -z "$rootfs" ]]; then
2481		log_fail "Can not get rootfs"
2482	fi
2483	zfs list $rootfs > /dev/null 2>&1
2484	if (($? == 0)); then
2485		echo $rootfs
2486	else
2487		log_fail "This is not a zfsroot system."
2488	fi
2489}
2490
2491#
2492# get the rootfs's pool name
2493# return:
2494#       rootpool name
2495#
2496function get_rootpool
2497{
2498	typeset rootfs=""
2499	typeset rootpool=""
2500	rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2501		 /etc/mnttab)
2502	if [[ -z "$rootfs" ]]; then
2503		log_fail "Can not get rootpool"
2504	fi
2505	zfs list $rootfs > /dev/null 2>&1
2506	if (($? == 0)); then
2507		rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2508		echo $rootpool
2509	else
2510		log_fail "This is not a zfsroot system."
2511	fi
2512}
2513
2514#
2515# Check if the given device is physical device
2516#
2517function is_physical_device #device
2518{
2519	typeset device=${1#/dev/dsk/}
2520	device=${device#/dev/rdsk/}
2521
2522	echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2523	return $?
2524}
2525
2526#
2527# Get the directory path of given device
2528#
2529function get_device_dir #device
2530{
2531	typeset device=$1
2532
2533	if ! $(is_physical_device $device) ; then
2534		if [[ $device != "/" ]]; then
2535			device=${device%/*}
2536		fi
2537		echo $device
2538	else
2539		echo "/dev/dsk"
2540	fi
2541}
2542
2543#
2544# Get the package name
2545#
2546function get_package_name
2547{
2548	typeset dirpath=${1:-$STC_NAME}
2549
2550	echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2551}
2552
2553#
2554# Get the word numbers from a string separated by white space
2555#
2556function get_word_count
2557{
2558	echo $1 | wc -w
2559}
2560
2561#
2562# To verify if the require numbers of disks is given
2563#
2564function verify_disk_count
2565{
2566	typeset -i min=${2:-1}
2567
2568	typeset -i count=$(get_word_count "$1")
2569
2570	if ((count < min)); then
2571		log_untested "A minimum of $min disks is required to run." \
2572			" You specified $count disk(s)"
2573	fi
2574}
2575
2576function ds_is_volume
2577{
2578	typeset type=$(get_prop type $1)
2579	[[ $type = "volume" ]] && return 0
2580	return 1
2581}
2582
2583function ds_is_filesystem
2584{
2585	typeset type=$(get_prop type $1)
2586	[[ $type = "filesystem" ]] && return 0
2587	return 1
2588}
2589
2590function ds_is_snapshot
2591{
2592	typeset type=$(get_prop type $1)
2593	[[ $type = "snapshot" ]] && return 0
2594	return 1
2595}
2596
2597#
2598# Check if Trusted Extensions are installed and enabled
2599#
2600function is_te_enabled
2601{
2602	svcs -H -o state labeld 2>/dev/null | grep "enabled"
2603	if (($? != 0)); then
2604		return 1
2605	else
2606		return 0
2607	fi
2608}
2609
2610# Utility function to determine if a system has multiple cpus.
2611function is_mp
2612{
2613	(($(psrinfo | wc -l) > 1))
2614}
2615
2616function get_cpu_freq
2617{
2618	psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2619}
2620
2621# Run the given command as the user provided.
2622function user_run
2623{
2624	typeset user=$1
2625	shift
2626
2627	eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2628	return $?
2629}
2630
2631#
2632# Check if the pool contains the specified vdevs
2633#
2634# $1 pool
2635# $2..n <vdev> ...
2636#
2637# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2638# vdevs is not in the pool, and 2 if pool name is missing.
2639#
2640function vdevs_in_pool
2641{
2642	typeset pool=$1
2643	typeset vdev
2644
2645        if [[ -z $pool ]]; then
2646                log_note "Missing pool name."
2647                return 2
2648        fi
2649
2650	shift
2651
2652	typeset tmpfile=$(mktemp)
2653	zpool list -Hv "$pool" >$tmpfile
2654	for vdev in $@; do
2655		grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2656		[[ $? -ne 0 ]] && return 1
2657	done
2658
2659	rm -f $tmpfile
2660
2661	return 0;
2662}
2663
2664function get_max
2665{
2666	typeset -l i max=$1
2667	shift
2668
2669	for i in "$@"; do
2670		max=$(echo $((max > i ? max : i)))
2671	done
2672
2673	echo $max
2674}
2675
2676function get_min
2677{
2678	typeset -l i min=$1
2679	shift
2680
2681	for i in "$@"; do
2682		min=$(echo $((min < i ? min : i)))
2683	done
2684
2685	echo $min
2686}
2687
2688#
2689# Generate a random number between 1 and the argument.
2690#
2691function random
2692{
2693        typeset max=$1
2694        echo $(( ($RANDOM % $max) + 1 ))
2695}
2696
2697# Write data that can be compressed into a directory
2698function write_compressible
2699{
2700	typeset dir=$1
2701	typeset megs=$2
2702	typeset nfiles=${3:-1}
2703	typeset bs=${4:-1024k}
2704	typeset fname=${5:-file}
2705
2706	[[ -d $dir ]] || log_fail "No directory: $dir"
2707
2708	log_must eval "fio \
2709	    --name=job \
2710	    --fallocate=0 \
2711	    --minimal \
2712	    --randrepeat=0 \
2713	    --buffer_compress_percentage=66 \
2714	    --buffer_compress_chunk=4096 \
2715	    --directory=$dir \
2716	    --numjobs=$nfiles \
2717	    --rw=write \
2718	    --bs=$bs \
2719	    --filesize=$megs \
2720	    --filename_format='$fname.\$jobnum' >/dev/null"
2721}
2722
2723function get_objnum
2724{
2725	typeset pathname=$1
2726	typeset objnum
2727
2728	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2729	objnum=$(stat -c %i $pathname)
2730	echo $objnum
2731}
2732
2733#
2734# Sync data to the pool
2735#
2736# $1 pool name
2737# $2 boolean to force uberblock (and config including zpool cache file) update
2738#
2739function sync_pool #pool <force>
2740{
2741	typeset pool=${1:-$TESTPOOL}
2742	typeset force=${2:-false}
2743
2744	if [[ $force == true ]]; then
2745		log_must zpool sync -f $pool
2746	else
2747		log_must zpool sync $pool
2748	fi
2749
2750	return 0
2751}
2752
2753#
2754# Sync all pools
2755#
2756# $1 boolean to force uberblock (and config including zpool cache file) update
2757#
2758function sync_all_pools #<force>
2759{
2760	typeset force=${1:-false}
2761
2762	if [[ $force == true ]]; then
2763		log_must zpool sync -f
2764	else
2765		log_must zpool sync
2766	fi
2767
2768	return 0
2769}
2770
2771#
2772# Wait for zpool 'freeing' property drops to zero.
2773#
2774# $1 pool name
2775#
2776function wait_freeing #pool
2777{
2778	typeset pool=${1:-$TESTPOOL}
2779	while true; do
2780		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
2781		log_must sleep 1
2782	done
2783}
2784
2785#
2786# Prints the current time in seconds since UNIX Epoch.
2787#
2788function current_epoch
2789{
2790	printf '%(%s)T'
2791}
2792
2793#
2794# Wait for every device replace operation to complete
2795#
2796# $1 pool name
2797#
2798function wait_replacing #pool
2799{
2800	typeset pool=${1:-$TESTPOOL}
2801	while true; do
2802		[[ "" == "$(zpool status $pool |
2803		    awk '/replacing-[0-9]+/ {print $1}')" ]] && break
2804		log_must sleep 1
2805	done
2806}
2807
2808#
2809# Set a global system tunable (64-bit value)
2810#
2811# $1 tunable name
2812# $2 tunable values
2813#
2814function set_tunable64
2815{
2816	set_tunable_impl "$1" "$2" Z
2817}
2818
2819#
2820# Set a global system tunable (32-bit value)
2821#
2822# $1 tunable name
2823# $2 tunable values
2824#
2825function set_tunable32
2826{
2827	set_tunable_impl "$1" "$2" W
2828}
2829
2830function set_tunable_impl
2831{
2832	typeset tunable="$1"
2833	typeset value="$2"
2834	typeset mdb_cmd="$3"
2835	typeset module="${4:-zfs}"
2836
2837	[[ -z "$tunable" ]] && return 1
2838	[[ -z "$value" ]] && return 1
2839	[[ -z "$mdb_cmd" ]] && return 1
2840
2841	case "$(uname)" in
2842	Linux)
2843		typeset zfs_tunables="/sys/module/$module/parameters"
2844		[[ -w "$zfs_tunables/$tunable" ]] || return 1
2845		cat >"$zfs_tunables/$tunable" <<<"$value"
2846		return $?
2847		;;
2848	SunOS)
2849		[[ "$module" -eq "zfs" ]] || return 1
2850		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
2851		return $?
2852		;;
2853	esac
2854}
2855
2856#
2857# Get a global system tunable
2858#
2859# $1 tunable name
2860#
2861function get_tunable
2862{
2863	get_tunable_impl "$1"
2864}
2865
2866function get_tunable_impl
2867{
2868	typeset tunable="$1"
2869	typeset module="${2:-zfs}"
2870
2871	[[ -z "$tunable" ]] && return 1
2872
2873	case "$(uname)" in
2874	Linux)
2875		typeset zfs_tunables="/sys/module/$module/parameters"
2876		[[ -f "$zfs_tunables/$tunable" ]] || return 1
2877		cat $zfs_tunables/$tunable
2878		return $?
2879		;;
2880	SunOS)
2881		typeset value=$(mdb -k -e "$tunable::print | ::eval .=E")
2882		if [[ $? -ne 0 ]]; then
2883			log_fail "Failed to get value of '$tunable' from mdb."
2884			return 1
2885		fi
2886		echo $value
2887		return 0
2888		;;
2889	esac
2890
2891	return 1
2892}
2893
2894function new_fs #<args>
2895{
2896	case "$(uname)" in
2897	FreeBSD)
2898		newfs "$@"
2899		;;
2900	SunOS)
2901		echo y | newfs "$@"
2902		;;
2903	*)
2904		echo y | newfs -v "$@"
2905		;;
2906	esac
2907}
2908
2909#
2910# Wait for the specified arcstat to reach non-zero quiescence.
2911# If echo is 1 echo the value after reaching quiescence, otherwise
2912# if echo is 0 print the arcstat we are waiting on.
2913#
2914function arcstat_quiescence # stat echo
2915{
2916	typeset stat=$1
2917	typeset echo=$2
2918	typeset do_once=true
2919
2920	if [[ $echo -eq 0 ]]; then
2921		echo "Waiting for arcstat $1 quiescence."
2922	fi
2923
2924	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
2925		typeset stat1=$(kstat arcstats.$stat)
2926		sleep 2
2927		typeset stat2=$(kstat arcstats.$stat)
2928		do_once=false
2929	done
2930
2931	if [[ $echo -eq 1 ]]; then
2932		echo $stat2
2933	fi
2934}
2935
2936function arcstat_quiescence_noecho # stat
2937{
2938	typeset stat=$1
2939	arcstat_quiescence $stat 0
2940}
2941
2942function arcstat_quiescence_echo # stat
2943{
2944	typeset stat=$1
2945	arcstat_quiescence $stat 1
2946}
2947
2948#
2949# Compute SHA256 digest for given file or stdin if no file given.
2950# Note: file path must not contain spaces
2951#
2952function sha256digest
2953{
2954        typeset file=$1
2955
2956	if [ -x /usr/bin/digest ]; then
2957		/usr/bin/digest -a sha256 $file
2958	elif [ -x /usr/bin/sha256sum ]; then
2959		/usr/bin/sha256sum -b $file | awk '{ print $1 }'
2960	else
2961		echo "Cannot calculate SHA256 digest"
2962		return 1
2963	fi
2964	return 0
2965}
2966
2967. ${STF_SUITE}/include/kstat.shlib
2968