xref: /freebsd/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib (revision 113e60742ef6ba5c069aa737ee57ba3c2f88b248)
1# SPDX-License-Identifier: CDDL-1.0
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or https://opensource.org/licenses/CDDL-1.0.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21#
22
23#
24# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
25# Copyright (c) 2012, 2020, Delphix. All rights reserved.
26# Copyright (c) 2017, Tim Chase. All rights reserved.
27# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
28# Copyright (c) 2017, Lawrence Livermore National Security LLC.
29# Copyright (c) 2017, Datto Inc. All rights reserved.
30# Copyright (c) 2017, Open-E Inc. All rights reserved.
31# Copyright (c) 2021, The FreeBSD Foundation.
32# Copyright (c) 2025, Klara, Inc.
33# Use is subject to license terms.
34#
35
36. ${STF_SUITE}/include/tunables.cfg
37
38. ${STF_TOOLS}/include/logapi.shlib
39. ${STF_SUITE}/include/math.shlib
40. ${STF_SUITE}/include/blkdev.shlib
41
42
43# On AlmaLinux 9 we will see $PWD = '.' instead of the full path.  This causes
44# some tests to fail.  Fix it up here.
45if [ "$PWD" = "." ] ; then
46	PWD="$(readlink -f $PWD)"
47fi
48
49#
50# Apply constrained path when available.  This is required since the
51# PATH may have been modified by sudo's secure_path behavior.
52#
53if [ -n "$STF_PATH" ]; then
54	export PATH="$STF_PATH"
55fi
56
57#
58# Generic dot version comparison function
59#
60# Returns success when version $1 is greater than or equal to $2.
61#
62function compare_version_gte
63{
64	[ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ]
65}
66
67# Helper function used by linux_version() and freebsd_version()
68# $1, if provided, should be a MAJOR, MAJOR.MINOR or MAJOR.MINOR.PATCH
69# version number
70function kernel_version
71{
72	typeset ver="$1"
73
74	[ -z "$ver" ] && case "$UNAME" in
75	Linux)
76		# Linux version numbers are X.Y.Z followed by optional
77		# vendor/distro specific stuff
78		#   RHEL7:       3.10.0-1160.108.1.el7.x86_64
79		#   Fedora 37:   6.5.12-100.fc37.x86_64
80		#   Debian 12.6: 6.1.0-22-amd64
81		ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
82		;;
83	FreeBSD)
84		# FreeBSD version numbers are X.Y-BRANCH-pZ. Depending on
85		# branch, -pZ may not be present, but this is typically only
86		# on pre-release or true .0 releases, so can be assumed 0
87		# if not present.
88		# eg:
89		#   13.2-RELEASE-p4
90		#   14.1-RELEASE
91		#   15.0-CURRENT
92		ver=$(uname -r | \
93		    grep -Eo "[0-9]+\.[0-9]+(-[A-Z0-9]+-p[0-9]+)?" | \
94		    sed -E "s/-[^-]+-p/./")
95		;;
96	*)
97		# Unknown system
98		log_fail "Don't know how to get kernel version for '$UNAME'"
99		;;
100	esac
101
102	typeset version major minor _
103	IFS='.' read -r version major minor _ <<<"$ver"
104
105	[ -z "$version" ] && version=0
106	[ -z "$major" ] && major=0
107	[ -z "$minor" ] && minor=0
108
109	echo $((version * 100000 + major * 1000 + minor))
110}
111
112# Linux kernel version comparison function
113#
114# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
115#
116# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
117function linux_version {
118	kernel_version "$1"
119}
120
121# FreeBSD version comparison function
122#
123# $1 FreeBSD version ("13.2", "14.0") or blank for installed FreeBSD version
124#
125# Used for comparison: if [ $(freebsd_version) -ge $(freebsd_version "13.2") ]
126function freebsd_version {
127	kernel_version "$1"
128}
129
130# Determine if this is a Linux test system
131#
132# Return 0 if platform Linux, 1 if otherwise
133
134function is_linux
135{
136	[ "$UNAME" = "Linux" ]
137}
138
139# Determine if this is an illumos test system
140#
141# Return 0 if platform illumos, 1 if otherwise
142function is_illumos
143{
144	[ "$UNAME" = "illumos" ]
145}
146
147# Determine if this is a FreeBSD test system
148#
149# Return 0 if platform FreeBSD, 1 if otherwise
150
151function is_freebsd
152{
153	[ "$UNAME" = "FreeBSD" ]
154}
155
156# Determine if this is a 32-bit system
157#
158# Return 0 if platform is 32-bit, 1 if otherwise
159
160function is_32bit
161{
162	[ $(getconf LONG_BIT) = "32" ]
163}
164
165# Determine if kmemleak is enabled
166#
167# Return 0 if kmemleak is enabled, 1 if otherwise
168
169function is_kmemleak
170{
171	is_linux && [ -e /sys/kernel/debug/kmemleak ]
172}
173
174# Determine whether a dataset is mounted
175#
176# $1 dataset name
177# $2 filesystem type; optional - defaulted to zfs
178#
179# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
180
181function ismounted
182{
183	typeset fstype=$2
184	[[ -z $fstype ]] && fstype=zfs
185	typeset out dir name
186
187	case $fstype in
188		zfs)
189			if [[ "$1" == "/"* ]] ; then
190				! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}'
191			else
192				! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}'
193			fi
194		;;
195		ufs|nfs)
196			if is_freebsd; then
197				mount -pt $fstype | while read dev dir _t _flags; do
198					[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
199				done
200			else
201				out=$(df -F $fstype $1 2>/dev/null) || return
202
203				dir=${out%%\(*}
204				dir=${dir%% *}
205				name=${out##*\(}
206				name=${name%%\)*}
207				name=${name%% *}
208
209				[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
210			fi
211		;;
212		ext*)
213			df -t $fstype $1 > /dev/null 2>&1
214		;;
215		zvol)
216			if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
217				link=$(readlink -f $ZVOL_DEVDIR/$1)
218				[[ -n "$link" ]] && \
219					mount | grep -q "^$link" && \
220						return 0
221			fi
222		;;
223		*)
224			false
225		;;
226	esac
227}
228
229# Return 0 if a dataset is mounted; 1 otherwise
230#
231# $1 dataset name
232# $2 filesystem type; optional - defaulted to zfs
233
234function mounted
235{
236	ismounted $1 $2
237}
238
239# Return 0 if a dataset is unmounted; 1 otherwise
240#
241# $1 dataset name
242# $2 filesystem type; optional - defaulted to zfs
243
244function unmounted
245{
246	! ismounted $1 $2
247}
248
249function default_setup
250{
251	default_setup_noexit "$@"
252
253	log_pass
254}
255
256function default_setup_no_mountpoint
257{
258	default_setup_noexit "$1" "$2" "$3" "yes"
259
260	log_pass
261}
262
263#
264# Given a list of disks, setup storage pools and datasets.
265#
266function default_setup_noexit
267{
268	typeset disklist=$1
269	typeset container=$2
270	typeset volume=$3
271	typeset no_mountpoint=$4
272	log_note begin default_setup_noexit
273
274	if is_global_zone; then
275		if poolexists $TESTPOOL ; then
276			destroy_pool $TESTPOOL
277		fi
278		[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
279		log_must zpool create -f $TESTPOOL $disklist
280	else
281		reexport_pool
282	fi
283
284	rm -rf $TESTDIR  || log_unresolved Could not remove $TESTDIR
285	mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
286
287	log_must zfs create $TESTPOOL/$TESTFS
288	if [[ -z $no_mountpoint ]]; then
289		log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
290	fi
291
292	if [[ -n $container ]]; then
293		rm -rf $TESTDIR1  || \
294			log_unresolved Could not remove $TESTDIR1
295		mkdir -p $TESTDIR1 || \
296			log_unresolved Could not create $TESTDIR1
297
298		log_must zfs create $TESTPOOL/$TESTCTR
299		log_must zfs set canmount=off $TESTPOOL/$TESTCTR
300		log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
301		if [[ -z $no_mountpoint ]]; then
302			log_must zfs set mountpoint=$TESTDIR1 \
303			    $TESTPOOL/$TESTCTR/$TESTFS1
304		fi
305	fi
306
307	if [[ -n $volume ]]; then
308		if is_global_zone ; then
309			log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
310			block_device_wait
311		else
312			log_must zfs create $TESTPOOL/$TESTVOL
313		fi
314	fi
315}
316
317#
318# Given a list of disks, setup a storage pool, file system and
319# a container.
320#
321function default_container_setup
322{
323	typeset disklist=$1
324
325	default_setup "$disklist" "true"
326}
327
328#
329# Given a list of disks, setup a storage pool,file system
330# and a volume.
331#
332function default_volume_setup
333{
334	typeset disklist=$1
335
336	default_setup "$disklist" "" "true"
337}
338
339#
340# Given a list of disks, setup a storage pool,file system,
341# a container and a volume.
342#
343function default_container_volume_setup
344{
345	typeset disklist=$1
346
347	default_setup "$disklist" "true" "true"
348}
349
350#
351# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
352# filesystem
353#
354# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
355# $2 snapshot name. Default, $TESTSNAP
356#
357function create_snapshot
358{
359	typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
360	typeset snap=${2:-$TESTSNAP}
361
362	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
363	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
364
365	if snapexists $fs_vol@$snap; then
366		log_fail "$fs_vol@$snap already exists."
367	fi
368	datasetexists $fs_vol || \
369		log_fail "$fs_vol must exist."
370
371	log_must zfs snapshot $fs_vol@$snap
372}
373
374#
375# Create a clone from a snapshot, default clone name is $TESTCLONE.
376#
377# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
378# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
379#
380function create_clone   # snapshot clone
381{
382	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
383	typeset clone=${2:-$TESTPOOL/$TESTCLONE}
384
385	[[ -z $snap ]] && \
386		log_fail "Snapshot name is undefined."
387	[[ -z $clone ]] && \
388		log_fail "Clone name is undefined."
389
390	log_must zfs clone $snap $clone
391}
392
393#
394# Create a bookmark of the given snapshot.  Defaultly create a bookmark on
395# filesystem.
396#
397# $1 Existing filesystem or volume name. Default, $TESTFS
398# $2 Existing snapshot name. Default, $TESTSNAP
399# $3 bookmark name. Default, $TESTBKMARK
400#
401function create_bookmark
402{
403	typeset fs_vol=${1:-$TESTFS}
404	typeset snap=${2:-$TESTSNAP}
405	typeset bkmark=${3:-$TESTBKMARK}
406
407	[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
408	[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
409	[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
410
411	if bkmarkexists $fs_vol#$bkmark; then
412		log_fail "$fs_vol#$bkmark already exists."
413	fi
414	datasetexists $fs_vol || \
415		log_fail "$fs_vol must exist."
416	snapexists $fs_vol@$snap || \
417		log_fail "$fs_vol@$snap must exist."
418
419	log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
420}
421
422#
423# Create a temporary clone result of an interrupted resumable 'zfs receive'
424# $1 Destination filesystem name. Must not exist, will be created as the result
425#    of this function along with its %recv temporary clone
426# $2 Source filesystem name. Must not exist, will be created and destroyed
427#
428function create_recv_clone
429{
430	typeset recvfs="$1"
431	typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
432	typeset snap="$sendfs@snap1"
433	typeset incr="$sendfs@snap2"
434	typeset mountpoint="$TESTDIR/create_recv_clone"
435	typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
436
437	[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
438
439	datasetexists $recvfs && log_fail "Recv filesystem must not exist."
440	datasetexists $sendfs && log_fail "Send filesystem must not exist."
441
442	log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs
443	log_must zfs snapshot $snap
444	log_must eval "zfs send $snap | zfs recv -u $recvfs"
445	log_must mkfile 1m "$mountpoint/data"
446	log_must zfs snapshot $incr
447	log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
448	    iflag=fullblock > $sendfile"
449	log_mustnot eval "zfs recv -su $recvfs < $sendfile"
450	destroy_dataset "$sendfs" "-r"
451	log_must rm -f "$sendfile"
452
453	if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
454		log_fail "Error creating temporary $recvfs/%recv clone"
455	fi
456}
457
458function default_mirror_setup
459{
460	default_mirror_setup_noexit $1 $2 $3
461
462	log_pass
463}
464
465#
466# Given a pair of disks, set up a storage pool and dataset for the mirror
467# @parameters: $1 the primary side of the mirror
468#   $2 the secondary side of the mirror
469# @uses: ZPOOL ZFS TESTPOOL TESTFS
470function default_mirror_setup_noexit
471{
472	readonly func="default_mirror_setup_noexit"
473	typeset primary=$1
474	typeset secondary=$2
475
476	[[ -z $primary ]] && \
477		log_fail "$func: No parameters passed"
478	[[ -z $secondary ]] && \
479		log_fail "$func: No secondary partition passed"
480	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
481	log_must zpool create -f $TESTPOOL mirror $@
482	log_must zfs create $TESTPOOL/$TESTFS
483	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
484}
485
486#
487# Destroy the configured testpool mirrors.
488# the mirrors are of the form ${TESTPOOL}{number}
489# @uses: ZPOOL ZFS TESTPOOL
490function destroy_mirrors
491{
492	default_cleanup_noexit
493
494	log_pass
495}
496
497function default_raidz_setup
498{
499	default_raidz_setup_noexit "$*"
500
501	log_pass
502}
503
504#
505# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
506# $1 the list of disks
507#
508function default_raidz_setup_noexit
509{
510	typeset disklist="$*"
511	disks=(${disklist[*]})
512
513	if [[ ${#disks[*]} -lt 2 ]]; then
514		log_fail "A raid-z requires a minimum of two disks."
515	fi
516
517	[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
518	log_must zpool create -f $TESTPOOL raidz $disklist
519	log_must zfs create $TESTPOOL/$TESTFS
520	log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
521}
522
523#
524# Common function used to cleanup storage pools and datasets.
525#
526# Invoked at the start of the test suite to ensure the system
527# is in a known state, and also at the end of each set of
528# sub-tests to ensure errors from one set of tests doesn't
529# impact the execution of the next set.
530
531function default_cleanup
532{
533	default_cleanup_noexit
534
535	log_pass
536}
537
538#
539# Utility function used to list all available pool names.
540#
541# NOTE: $KEEP is a variable containing pool names, separated by a newline
542# character, that must be excluded from the returned list.
543#
544function get_all_pools
545{
546	zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
547}
548
549function default_cleanup_noexit
550{
551	typeset pool=""
552	#
553	# Destroying the pool will also destroy any
554	# filesystems it contains.
555	#
556	if is_global_zone; then
557		zfs unmount -a > /dev/null 2>&1
558		ALL_POOLS=$(get_all_pools)
559		# Here, we loop through the pools we're allowed to
560		# destroy, only destroying them if it's safe to do
561		# so.
562		while [ ! -z ${ALL_POOLS} ]
563		do
564			for pool in ${ALL_POOLS}
565			do
566				if safe_to_destroy_pool $pool ;
567				then
568					destroy_pool $pool
569				fi
570			done
571			ALL_POOLS=$(get_all_pools)
572		done
573
574		zfs mount -a
575	else
576		typeset fs=""
577		for fs in $(zfs list -H -o name \
578		    | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
579			destroy_dataset "$fs" "-Rf"
580		done
581
582		# Need cleanup here to avoid garbage dir left.
583		for fs in $(zfs list -H -o name); do
584			[[ $fs == /$ZONE_POOL ]] && continue
585			[[ -d $fs ]] && log_must rm -rf $fs/*
586		done
587
588		#
589		# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
590		# the default value
591		#
592		for fs in $(zfs list -H -o name); do
593			if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
594				log_must zfs set reservation=none $fs
595				log_must zfs set recordsize=128K $fs
596				log_must zfs set mountpoint=/$fs $fs
597				typeset enc=$(get_prop encryption $fs)
598				if [ -z "$enc" ] || [ "$enc" = "off" ]; then
599					log_must zfs set checksum=on $fs
600				fi
601				log_must zfs set compression=off $fs
602				log_must zfs set atime=on $fs
603				log_must zfs set devices=off $fs
604				log_must zfs set exec=on $fs
605				log_must zfs set setuid=on $fs
606				log_must zfs set readonly=off $fs
607				log_must zfs set snapdir=hidden $fs
608				log_must zfs set aclmode=groupmask $fs
609				log_must zfs set aclinherit=secure $fs
610			fi
611		done
612	fi
613
614	[[ -d $TESTDIR ]] && \
615		log_must rm -rf $TESTDIR
616
617	disk1=${DISKS%% *}
618	if is_mpath_device $disk1; then
619		delete_partitions
620	fi
621
622	rm -f $TEST_BASE_DIR/{err,out}
623}
624
625
626#
627# Common function used to cleanup storage pools, file systems
628# and containers.
629#
630function default_container_cleanup
631{
632	if ! is_global_zone; then
633		reexport_pool
634	fi
635
636	ismounted $TESTPOOL/$TESTCTR/$TESTFS1 &&
637	    log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
638
639	destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
640	destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
641
642	[[ -e $TESTDIR1 ]] && \
643	    log_must rm -rf $TESTDIR1
644
645	default_cleanup
646}
647
648#
649# Common function used to cleanup snapshot of file system or volume. Default to
650# delete the file system's snapshot
651#
652# $1 snapshot name
653#
654function destroy_snapshot
655{
656	typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
657
658	if ! snapexists $snap; then
659		log_fail "'$snap' does not exist."
660	fi
661
662	#
663	# For the sake of the value which come from 'get_prop' is not equal
664	# to the really mountpoint when the snapshot is unmounted. So, firstly
665	# check and make sure this snapshot's been mounted in current system.
666	#
667	typeset mtpt=""
668	if ismounted $snap; then
669		mtpt=$(get_prop mountpoint $snap)
670	fi
671
672	destroy_dataset "$snap"
673	[[ $mtpt != "" && -d $mtpt ]] && \
674		log_must rm -rf $mtpt
675}
676
677#
678# Common function used to cleanup clone.
679#
680# $1 clone name
681#
682function destroy_clone
683{
684	typeset clone=${1:-$TESTPOOL/$TESTCLONE}
685
686	if ! datasetexists $clone; then
687		log_fail "'$clone' does not existed."
688	fi
689
690	# With the same reason in destroy_snapshot
691	typeset mtpt=""
692	if ismounted $clone; then
693		mtpt=$(get_prop mountpoint $clone)
694	fi
695
696	destroy_dataset "$clone"
697	[[ $mtpt != "" && -d $mtpt ]] && \
698		log_must rm -rf $mtpt
699}
700
701#
702# Common function used to cleanup bookmark of file system or volume.  Default
703# to delete the file system's bookmark.
704#
705# $1 bookmark name
706#
707function destroy_bookmark
708{
709	typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
710
711	if ! bkmarkexists $bkmark; then
712		log_fail "'$bkmarkp' does not existed."
713	fi
714
715	destroy_dataset "$bkmark"
716}
717
718# Return 0 if a snapshot exists; $? otherwise
719#
720# $1 - snapshot name
721
722function snapexists
723{
724	zfs list -H -t snapshot "$1" > /dev/null 2>&1
725}
726
727#
728# Return 0 if a bookmark exists; $? otherwise
729#
730# $1 - bookmark name
731#
732function bkmarkexists
733{
734	zfs list -H -t bookmark "$1" > /dev/null 2>&1
735}
736
737#
738# Return 0 if a hold exists; $? otherwise
739#
740# $1 - hold tag
741# $2 - snapshot name
742#
743function holdexists
744{
745	! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }'
746}
747
748#
749# Set a property to a certain value on a dataset.
750# Sets a property of the dataset to the value as passed in.
751# @param:
752#	$1 dataset who's property is being set
753#	$2 property to set
754#	$3 value to set property to
755# @return:
756#	0 if the property could be set.
757#	non-zero otherwise.
758# @use: ZFS
759#
760function dataset_setprop
761{
762	typeset fn=dataset_setprop
763
764	if (($# < 3)); then
765		log_note "$fn: Insufficient parameters (need 3, had $#)"
766		return 1
767	fi
768	typeset output=
769	output=$(zfs set $2=$3 $1 2>&1)
770	typeset rv=$?
771	if ((rv != 0)); then
772		log_note "Setting property on $1 failed."
773		log_note "property $2=$3"
774		log_note "Return Code: $rv"
775		log_note "Output: $output"
776		return $rv
777	fi
778	return 0
779}
780
781#
782# Check a numeric assertion
783# @parameter: $@ the assertion to check
784# @output: big loud notice if assertion failed
785# @use: log_fail
786#
787function assert
788{
789	(($@)) || log_fail "$@"
790}
791
792#
793# Function to format partition size of a disk
794# Given a disk cxtxdx reduces all partitions
795# to 0 size
796#
797function zero_partitions #<whole_disk_name>
798{
799	typeset diskname=$1
800	typeset i
801
802	if is_freebsd; then
803		gpart destroy -F $diskname
804	elif is_linux; then
805		DSK=$DEV_DSKDIR/$diskname
806		DSK=$(echo $DSK | sed -e "s|//|/|g")
807		log_must parted $DSK -s -- mklabel gpt
808		blockdev --rereadpt $DSK 2>/dev/null
809		block_device_wait
810	else
811		for i in 0 1 3 4 5 6 7
812		do
813			log_must set_partition $i "" 0mb $diskname
814		done
815	fi
816
817	return 0
818}
819
820#
821# Given a slice, size and disk, this function
822# formats the slice to the specified size.
823# Size should be specified with units as per
824# the `format` command requirements eg. 100mb 3gb
825#
826# NOTE: This entire interface is problematic for the Linux parted utility
827# which requires the end of the partition to be specified.  It would be
828# best to retire this interface and replace it with something more flexible.
829# At the moment a best effort is made.
830#
831# arguments: <slice_num> <slice_start> <size_plus_units>  <whole_disk_name>
832function set_partition
833{
834	typeset -i slicenum=$1
835	typeset start=$2
836	typeset size=$3
837	typeset disk=${4#$DEV_DSKDIR/}
838	disk=${disk#$DEV_RDSKDIR/}
839
840	case "$UNAME" in
841	Linux)
842		if [[ -z $size || -z $disk ]]; then
843			log_fail "The size or disk name is unspecified."
844		fi
845		disk=$DEV_DSKDIR/$disk
846		typeset size_mb=${size%%[mMgG]}
847
848		size_mb=${size_mb%%[mMgG][bB]}
849		if [[ ${size:1:1} == 'g' ]]; then
850			((size_mb = size_mb * 1024))
851		fi
852
853		# Create GPT partition table when setting slice 0 or
854		# when the device doesn't already contain a GPT label.
855		parted $disk -s -- print 1 >/dev/null
856		typeset ret_val=$?
857		if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
858			if ! parted $disk -s -- mklabel gpt; then
859				log_note "Failed to create GPT partition table on $disk"
860				return 1
861			fi
862		fi
863
864		# When no start is given align on the first cylinder.
865		if [[ -z "$start" ]]; then
866			start=1
867		fi
868
869		# Determine the cylinder size for the device and using
870		# that calculate the end offset in cylinders.
871		typeset -i cly_size_kb=0
872		cly_size_kb=$(parted -m $disk -s -- unit cyl print |
873			awk -F '[:k.]' 'NR == 3 {print $4}')
874		((end = (size_mb * 1024 / cly_size_kb) + start))
875
876		parted $disk -s -- \
877		    mkpart part$slicenum ${start}cyl ${end}cyl
878		typeset ret_val=$?
879		if [[ $ret_val -ne 0 ]]; then
880			log_note "Failed to create partition $slicenum on $disk"
881			return 1
882		fi
883
884		blockdev --rereadpt $disk 2>/dev/null
885		block_device_wait $disk
886		;;
887	FreeBSD)
888		if [[ -z $size || -z $disk ]]; then
889			log_fail "The size or disk name is unspecified."
890		fi
891		disk=$DEV_DSKDIR/$disk
892
893		if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
894			gpart destroy -F $disk >/dev/null 2>&1
895			if ! gpart create -s GPT $disk; then
896				log_note "Failed to create GPT partition table on $disk"
897				return 1
898			fi
899		fi
900
901		typeset index=$((slicenum + 1))
902
903		if [[ -n $start ]]; then
904			start="-b $start"
905		fi
906		gpart add -t freebsd-zfs $start -s $size -i $index $disk
907		if [[ $ret_val -ne 0 ]]; then
908			log_note "Failed to create partition $slicenum on $disk"
909			return 1
910		fi
911
912		block_device_wait $disk
913		;;
914	*)
915		if [[ -z $slicenum || -z $size || -z $disk ]]; then
916			log_fail "The slice, size or disk name is unspecified."
917		fi
918
919		typeset format_file="$TEST_BASE_DIR"/format_in.$$
920
921		echo "partition" >$format_file
922		echo "$slicenum" >> $format_file
923		echo "" >> $format_file
924		echo "" >> $format_file
925		echo "$start" >> $format_file
926		echo "$size" >> $format_file
927		echo "label" >> $format_file
928		echo "" >> $format_file
929		echo "q" >> $format_file
930		echo "q" >> $format_file
931
932		format -e -s -d $disk -f $format_file
933		typeset ret_val=$?
934		rm -f $format_file
935		;;
936	esac
937
938	if [[ $ret_val -ne 0 ]]; then
939		log_note "Unable to format $disk slice $slicenum to $size"
940		return 1
941	fi
942	return 0
943}
944
945#
946# Delete all partitions on all disks - this is specifically for the use of multipath
947# devices which currently can only be used in the test suite as raw/un-partitioned
948# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
949#
950function delete_partitions
951{
952	typeset disk
953
954	if [[ -z $DISKSARRAY ]]; then
955		DISKSARRAY=$DISKS
956	fi
957
958	if is_linux; then
959		typeset -i part
960		for disk in $DISKSARRAY; do
961			for (( part = 1; part < MAX_PARTITIONS; part++ )); do
962				typeset partition=${disk}${SLICE_PREFIX}${part}
963				parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
964				if lsblk | grep -qF ${partition}; then
965					log_fail "Partition ${partition} not deleted"
966				else
967					log_note "Partition ${partition} deleted"
968				fi
969			done
970		done
971	elif is_freebsd; then
972		for disk in $DISKSARRAY; do
973			if gpart destroy -F $disk; then
974				log_note "Partitions for ${disk} deleted"
975			else
976				log_fail "Partitions for ${disk} not deleted"
977			fi
978		done
979	fi
980}
981
982#
983# Get the end cyl of the given slice
984#
985function get_endslice #<disk> <slice>
986{
987	typeset disk=$1
988	typeset slice=$2
989	if [[ -z $disk || -z $slice ]] ; then
990		log_fail "The disk name or slice number is unspecified."
991	fi
992
993	case "$UNAME" in
994	Linux)
995		endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
996			awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
997		((endcyl = (endcyl + 1)))
998		;;
999	FreeBSD)
1000		disk=${disk#/dev/zvol/}
1001		disk=${disk%p*}
1002		slice=$((slice + 1))
1003		endcyl=$(gpart show $disk | \
1004			awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1005		;;
1006	*)
1007		disk=${disk#/dev/dsk/}
1008		disk=${disk#/dev/rdsk/}
1009		disk=${disk%s*}
1010
1011		typeset -i ratio=0
1012		ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1013		    awk '/sectors\/cylinder/ {print $2}')
1014
1015		if ((ratio == 0)); then
1016			return
1017		fi
1018
1019		typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1020		    awk -v token="$slice" '$1 == token {print $6}')
1021
1022		((endcyl = (endcyl + 1) / ratio))
1023		;;
1024	esac
1025
1026	echo $endcyl
1027}
1028
1029
1030#
1031# Given a size,disk and total slice number,  this function formats the
1032# disk slices from 0 to the total slice number with the same specified
1033# size.
1034#
1035function partition_disk	#<slice_size> <whole_disk_name>	<total_slices>
1036{
1037	typeset -i i=0
1038	typeset slice_size=$1
1039	typeset disk_name=$2
1040	typeset total_slices=$3
1041	typeset cyl
1042
1043	zero_partitions $disk_name
1044	while ((i < $total_slices)); do
1045		if ! is_linux; then
1046			if ((i == 2)); then
1047				((i = i + 1))
1048				continue
1049			fi
1050		fi
1051		log_must set_partition $i "$cyl" $slice_size $disk_name
1052		cyl=$(get_endslice $disk_name $i)
1053		((i = i+1))
1054	done
1055}
1056
1057#
1058# This function continues to write to a filenum number of files into dirnum
1059# number of directories until either file_write returns an error or the
1060# maximum number of files per directory have been written.
1061#
1062# Usage:
1063# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1064#
1065# Return value: 0 on success
1066#		non 0 on error
1067#
1068# Where :
1069#	destdir:    is the directory where everything is to be created under
1070#	dirnum:	    the maximum number of subdirectories to use, -1 no limit
1071#	filenum:    the maximum number of files per subdirectory
1072#	bytes:	    number of bytes to write
1073#	num_writes: number of types to write out bytes
1074#	data:	    the data that will be written
1075#
1076#	E.g.
1077#	fill_fs /testdir 20 25 1024 256 0
1078#
1079# Note: bytes * num_writes equals the size of the testfile
1080#
1081function fill_fs # destdir dirnum filenum bytes num_writes data
1082{
1083	typeset destdir=${1:-$TESTDIR}
1084	typeset -i dirnum=${2:-50}
1085	typeset -i filenum=${3:-50}
1086	typeset -i bytes=${4:-8192}
1087	typeset -i num_writes=${5:-10240}
1088	typeset data=${6:-"R"}
1089
1090	mkdir -p $destdir/{1..$dirnum}
1091	for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1092		file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1093		|| return
1094	done
1095}
1096
1097# Get the specified dataset property in parsable format or fail
1098function get_prop # property dataset
1099{
1100	typeset prop=$1
1101	typeset dataset=$2
1102
1103	zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset"
1104}
1105
1106# Get the specified pool property in parsable format or fail
1107function get_pool_prop # property pool
1108{
1109	typeset prop=$1
1110	typeset pool=$2
1111
1112	zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool"
1113}
1114
1115# Get the specified vdev property in parsable format or fail
1116function get_vdev_prop
1117{
1118	typeset prop="$1"
1119	typeset pool="$2"
1120	typeset vdev="$3"
1121
1122	zpool get -Hpo value "$prop" "$pool" "$vdev" || log_fail "zpool get $prop $pool $vdev"
1123}
1124
1125# Return 0 if a pool exists; $? otherwise
1126#
1127# $1 - pool name
1128
1129function poolexists
1130{
1131	typeset pool=$1
1132
1133	if [[ -z $pool ]]; then
1134		log_note "No pool name given."
1135		return 1
1136	fi
1137
1138	zpool get name "$pool" > /dev/null 2>&1
1139}
1140
1141# Return 0 if all the specified datasets exist; $? otherwise
1142#
1143# $1-n  dataset name
1144function datasetexists
1145{
1146	if (($# == 0)); then
1147		log_note "No dataset name given."
1148		return 1
1149	fi
1150
1151	zfs get name "$@" > /dev/null 2>&1
1152}
1153
1154# return 0 if none of the specified datasets exists, otherwise return 1.
1155#
1156# $1-n  dataset name
1157function datasetnonexists
1158{
1159	if (($# == 0)); then
1160		log_note "No dataset name given."
1161		return 1
1162	fi
1163
1164	while (($# > 0)); do
1165		zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1166		    && return 1
1167		shift
1168	done
1169
1170	return 0
1171}
1172
1173# FreeBSD breaks exports(5) at whitespace and doesn't process escapes
1174# Solaris just breaks
1175#
1176# cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807
1177#
1178# Linux can have spaces (which are \OOO-escaped),
1179# but can't have backslashes because they're parsed recursively
1180function shares_can_have_whitespace
1181{
1182	is_linux
1183}
1184
1185function is_shared_freebsd
1186{
1187	typeset fs=$1
1188
1189	pgrep -q mountd && showmount -E | grep -qx "$fs"
1190}
1191
1192function is_shared_illumos
1193{
1194	typeset fs=$1
1195	typeset mtpt
1196
1197	for mtpt in `share | awk '{print $2}'` ; do
1198		if [[ $mtpt == $fs ]] ; then
1199			return 0
1200		fi
1201	done
1202
1203	typeset stat=$(svcs -H -o STA nfs/server:default)
1204	if [[ $stat != "ON" ]]; then
1205		log_note "Current nfs/server status: $stat"
1206	fi
1207
1208	return 1
1209}
1210
1211function is_shared_linux
1212{
1213	typeset fs=$1
1214	! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}'
1215}
1216
1217#
1218# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1219#
1220# Returns 0 if shared, 1 otherwise.
1221#
1222function is_shared
1223{
1224	typeset fs=$1
1225	typeset mtpt
1226
1227	if [[ $fs != "/"* ]] ; then
1228		if datasetnonexists "$fs" ; then
1229			return 1
1230		else
1231			mtpt=$(get_prop mountpoint "$fs")
1232			case "$mtpt" in
1233				none|legacy|-) return 1
1234					;;
1235				*)	fs=$mtpt
1236					;;
1237			esac
1238		fi
1239	fi
1240
1241	case "$UNAME" in
1242	FreeBSD)	is_shared_freebsd "$fs"	;;
1243	Linux)		is_shared_linux "$fs"	;;
1244	*)		is_shared_illumos "$fs"	;;
1245	esac
1246}
1247
1248function is_exported_illumos
1249{
1250	typeset fs=$1
1251	typeset mtpt _
1252
1253	while read -r mtpt _; do
1254		[ "$mtpt" = "$fs" ] && return
1255	done < /etc/dfs/sharetab
1256
1257	return 1
1258}
1259
1260function is_exported_freebsd
1261{
1262	typeset fs=$1
1263	typeset mtpt _
1264
1265	while read -r mtpt _; do
1266		[ "$mtpt" = "$fs" ] && return
1267	done < /etc/zfs/exports
1268
1269	return 1
1270}
1271
1272function is_exported_linux
1273{
1274	typeset fs=$1
1275	typeset mtpt _
1276
1277	while read -r mtpt _; do
1278		[ "$(printf "$mtpt")" = "$fs" ] && return
1279	done < /etc/exports.d/zfs.exports
1280
1281	return 1
1282}
1283
1284#
1285# Given a mountpoint, or a dataset name, determine if it is exported via
1286# the os-specific NFS exports file.
1287#
1288# Returns 0 if exported, 1 otherwise.
1289#
1290function is_exported
1291{
1292	typeset fs=$1
1293	typeset mtpt
1294
1295	if [[ $fs != "/"* ]] ; then
1296		if datasetnonexists "$fs" ; then
1297			return 1
1298		else
1299			mtpt=$(get_prop mountpoint "$fs")
1300			case $mtpt in
1301				none|legacy|-) return 1
1302					;;
1303				*)	fs=$mtpt
1304					;;
1305			esac
1306		fi
1307	fi
1308
1309	case "$UNAME" in
1310	FreeBSD)	is_exported_freebsd "$fs"	;;
1311	Linux)		is_exported_linux "$fs"	;;
1312	*)		is_exported_illumos "$fs"	;;
1313	esac
1314}
1315
1316#
1317# Given a dataset name determine if it is shared via SMB.
1318#
1319# Returns 0 if shared, 1 otherwise.
1320#
1321function is_shared_smb
1322{
1323	typeset fs=$1
1324
1325	datasetexists "$fs" || return
1326
1327	if is_linux; then
1328		net usershare list | grep -xFq "${fs//[-\/]/_}"
1329	else
1330		log_note "SMB on $UNAME currently unsupported by the test framework"
1331		return 1
1332	fi
1333}
1334
1335#
1336# Given a mountpoint, determine if it is not shared via NFS.
1337#
1338# Returns 0 if not shared, 1 otherwise.
1339#
1340function not_shared
1341{
1342	! is_shared $1
1343}
1344
1345#
1346# Given a dataset determine if it is not shared via SMB.
1347#
1348# Returns 0 if not shared, 1 otherwise.
1349#
1350function not_shared_smb
1351{
1352	! is_shared_smb $1
1353}
1354
1355#
1356# Helper function to unshare a mountpoint.
1357#
1358function unshare_fs #fs
1359{
1360	typeset fs=$1
1361
1362	if is_shared $fs || is_shared_smb $fs; then
1363		log_must zfs unshare $fs
1364	fi
1365}
1366
1367#
1368# Helper function to share a NFS mountpoint.
1369#
1370function share_nfs #fs
1371{
1372	typeset fs=$1
1373
1374	is_shared "$fs" && return
1375
1376	case "$UNAME" in
1377	Linux)
1378		log_must exportfs "*:$fs"
1379		;;
1380	FreeBSD)
1381		typeset mountd
1382		read -r mountd < /var/run/mountd.pid
1383		log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports"
1384		log_must kill -s HUP "$mountd"
1385		;;
1386	*)
1387		log_must share -F nfs "$fs"
1388		;;
1389	esac
1390
1391	return 0
1392}
1393
1394#
1395# Helper function to unshare a NFS mountpoint.
1396#
1397function unshare_nfs #fs
1398{
1399	typeset fs=$1
1400
1401	! is_shared "$fs" && return
1402
1403	case "$UNAME" in
1404	Linux)
1405		log_must exportfs -u "*:$fs"
1406		;;
1407	FreeBSD)
1408		typeset mountd
1409		read -r mountd < /var/run/mountd.pid
1410		awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$
1411		log_must mv /etc/zfs/exports.$$ /etc/zfs/exports
1412		log_must kill -s HUP "$mountd"
1413		;;
1414	*)
1415		log_must unshare -F nfs $fs
1416		;;
1417	esac
1418
1419	return 0
1420}
1421
1422#
1423# Helper function to show NFS shares.
1424#
1425function showshares_nfs
1426{
1427	case "$UNAME" in
1428	Linux)
1429		exportfs -v
1430		;;
1431	FreeBSD)
1432		showmount
1433		;;
1434	*)
1435		share -F nfs
1436		;;
1437	esac
1438}
1439
1440function check_nfs
1441{
1442	case "$UNAME" in
1443	Linux)
1444		exportfs -s
1445		;;
1446	FreeBSD)
1447		showmount -e
1448		;;
1449	*)
1450		log_unsupported "Unknown platform"
1451		;;
1452	esac || log_unsupported "The NFS utilities are not installed"
1453}
1454
1455#
1456# Check NFS server status and trigger it online.
1457#
1458function setup_nfs_server
1459{
1460	# Cannot share directory in non-global zone.
1461	#
1462	if ! is_global_zone; then
1463		log_note "Cannot trigger NFS server by sharing in LZ."
1464		return
1465	fi
1466
1467	if is_linux; then
1468		#
1469		# Re-synchronize /var/lib/nfs/etab with /etc/exports and
1470		# /etc/exports.d./* to provide a clean test environment.
1471		#
1472		log_must exportfs -r
1473
1474		log_note "NFS server must be started prior to running ZTS."
1475		return
1476	elif is_freebsd; then
1477		log_must kill -s HUP $(</var/run/mountd.pid)
1478
1479		log_note "NFS server must be started prior to running ZTS."
1480		return
1481	fi
1482
1483	typeset nfs_fmri="svc:/network/nfs/server:default"
1484	if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1485		#
1486		# Only really sharing operation can enable NFS server
1487		# to online permanently.
1488		#
1489		typeset dummy=/tmp/dummy
1490
1491		if [[ -d $dummy ]]; then
1492			log_must rm -rf $dummy
1493		fi
1494
1495		log_must mkdir $dummy
1496		log_must share $dummy
1497
1498		#
1499		# Waiting for fmri's status to be the final status.
1500		# Otherwise, in transition, an asterisk (*) is appended for
1501		# instances, unshare will reverse status to 'DIS' again.
1502		#
1503		# Waiting for 1's at least.
1504		#
1505		log_must sleep 1
1506		timeout=10
1507		while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1508		do
1509			log_must sleep 1
1510
1511			((timeout -= 1))
1512		done
1513
1514		log_must unshare $dummy
1515		log_must rm -rf $dummy
1516	fi
1517
1518	log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1519}
1520
1521#
1522# To verify whether calling process is in global zone
1523#
1524# Return 0 if in global zone, 1 in non-global zone
1525#
1526function is_global_zone
1527{
1528	if is_linux || is_freebsd; then
1529		return 0
1530	else
1531		typeset cur_zone=$(zonename 2>/dev/null)
1532		[ $cur_zone = "global" ]
1533	fi
1534}
1535
1536#
1537# Verify whether test is permitted to run from
1538# global zone, local zone, or both
1539#
1540# $1 zone limit, could be "global", "local", or "both"(no limit)
1541#
1542# Return 0 if permitted, otherwise exit with log_unsupported
1543#
1544function verify_runnable # zone limit
1545{
1546	typeset limit=$1
1547
1548	[[ -z $limit ]] && return 0
1549
1550	if is_global_zone ; then
1551		case $limit in
1552			global|both)
1553				;;
1554			local)	log_unsupported "Test is unable to run from "\
1555					"global zone."
1556				;;
1557			*)	log_note "Warning: unknown limit $limit - " \
1558					"use both."
1559				;;
1560		esac
1561	else
1562		case $limit in
1563			local|both)
1564				;;
1565			global)	log_unsupported "Test is unable to run from "\
1566					"local zone."
1567				;;
1568			*)	log_note "Warning: unknown limit $limit - " \
1569					"use both."
1570				;;
1571		esac
1572
1573		reexport_pool
1574	fi
1575
1576	return 0
1577}
1578
1579# Return 0 if create successfully or the pool exists; $? otherwise
1580# Note: In local zones, this function should return 0 silently.
1581#
1582# $1 - pool name
1583# $2-n - [keyword] devs_list
1584
1585function create_pool #pool devs_list
1586{
1587	typeset pool=${1%%/*}
1588
1589	shift
1590
1591	if [[ -z $pool ]]; then
1592		log_note "Missing pool name."
1593		return 1
1594	fi
1595
1596	if poolexists $pool ; then
1597		destroy_pool $pool
1598	fi
1599
1600	if is_global_zone ; then
1601		[[ -d /$pool ]] && rm -rf /$pool
1602		log_must zpool create -f $pool $@
1603	fi
1604
1605	return 0
1606}
1607
1608# Return 0 if destroy successfully or the pool exists; $? otherwise
1609# Note: In local zones, this function should return 0 silently.
1610#
1611# $1 - pool name
1612# Destroy pool with the given parameters.
1613
1614function destroy_pool #pool
1615{
1616	typeset pool=${1%%/*}
1617	typeset mtpt
1618
1619	if [[ -z $pool ]]; then
1620		log_note "No pool name given."
1621		return 1
1622	fi
1623
1624	if is_global_zone ; then
1625		if poolexists "$pool" ; then
1626			mtpt=$(get_prop mountpoint "$pool")
1627
1628			# At times, syseventd/udev activity can cause attempts
1629			# to destroy a pool to fail with EBUSY. We retry a few
1630			# times allowing failures before requiring the destroy
1631			# to succeed.
1632			log_must_busy zpool destroy -f $pool
1633
1634			[[ -d $mtpt ]] && \
1635				log_must rm -rf $mtpt
1636		else
1637			log_note "Pool does not exist. ($pool)"
1638			return 1
1639		fi
1640	fi
1641
1642	return 0
1643}
1644
1645# Return 0 if created successfully; $? otherwise
1646#
1647# $1 - dataset name
1648# $2-n - dataset options
1649
1650function create_dataset #dataset dataset_options
1651{
1652	typeset dataset=$1
1653
1654	shift
1655
1656	if [[ -z $dataset ]]; then
1657		log_note "Missing dataset name."
1658		return 1
1659	fi
1660
1661	if datasetexists $dataset ; then
1662		destroy_dataset $dataset
1663	fi
1664
1665	log_must zfs create $@ $dataset
1666
1667	return 0
1668}
1669
1670# Return 0 if destroy successfully or the dataset exists; $? otherwise
1671# Note: In local zones, this function should return 0 silently.
1672#
1673# $1 - dataset name
1674# $2 - custom arguments for zfs destroy
1675# Destroy dataset with the given parameters.
1676
1677function destroy_dataset # dataset [args]
1678{
1679	typeset dataset=$1
1680	typeset mtpt
1681	typeset args=${2:-""}
1682
1683	if [[ -z $dataset ]]; then
1684		log_note "No dataset name given."
1685		return 1
1686	fi
1687
1688	if is_global_zone ; then
1689		if datasetexists "$dataset" ; then
1690			mtpt=$(get_prop mountpoint "$dataset")
1691			log_must_busy zfs destroy $args $dataset
1692
1693			[ -d $mtpt ] && log_must rm -rf $mtpt
1694		else
1695			log_note "Dataset does not exist. ($dataset)"
1696			return 1
1697		fi
1698	fi
1699
1700	return 0
1701}
1702
1703#
1704# Reexport TESTPOOL & TESTPOOL(1-4)
1705#
1706function reexport_pool
1707{
1708	typeset -i cntctr=5
1709	typeset -i i=0
1710
1711	while ((i < cntctr)); do
1712		if ((i == 0)); then
1713			TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1714			if ! ismounted $TESTPOOL; then
1715				log_must zfs mount $TESTPOOL
1716			fi
1717		else
1718			eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1719			if eval ! ismounted \$TESTPOOL$i; then
1720				log_must eval zfs mount \$TESTPOOL$i
1721			fi
1722		fi
1723		((i += 1))
1724	done
1725}
1726
1727#
1728# Verify a given disk or pool state
1729#
1730# Return 0 is pool/disk matches expected state, 1 otherwise
1731#
1732function check_state # pool disk state{online,offline,degraded}
1733{
1734	typeset pool=$1
1735	typeset disk=${2#$DEV_DSKDIR/}
1736	typeset state=$3
1737
1738	[[ -z $pool ]] || [[ -z $state ]] \
1739	    && log_fail "Arguments invalid or missing"
1740
1741	if [[ -z $disk ]]; then
1742		#check pool state only
1743		zpool get -H -o value health $pool | grep -qi "$state"
1744	else
1745		zpool status -v $pool | grep "$disk" | grep -qi "$state"
1746	fi
1747}
1748
1749#
1750# Get the mountpoint of snapshot
1751# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1752# as its mountpoint
1753#
1754function snapshot_mountpoint
1755{
1756	typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1757
1758	if [[ $dataset != *@* ]]; then
1759		log_fail "Error name of snapshot '$dataset'."
1760	fi
1761
1762	typeset fs=${dataset%@*}
1763	typeset snap=${dataset#*@}
1764
1765	if [[ -z $fs || -z $snap ]]; then
1766		log_fail "Error name of snapshot '$dataset'."
1767	fi
1768
1769	echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1770}
1771
1772#
1773# Given a device and 'ashift' value verify it's correctly set on every label
1774#
1775function verify_ashift # device ashift
1776{
1777	typeset device="$1"
1778	typeset ashift="$2"
1779
1780	zdb -e -lll $device | awk -v ashift=$ashift '
1781	    /ashift: / {
1782	        if (ashift != $2)
1783	            exit 1;
1784	        else
1785	            count++;
1786	    }
1787	    END {
1788	        exit (count != 4);
1789	    }'
1790}
1791
1792#
1793# Given a pool and file system, this function will verify the file system
1794# using the zdb internal tool. Note that the pool is exported and imported
1795# to ensure it has consistent state.
1796#
1797function verify_filesys # pool filesystem dir
1798{
1799	typeset pool="$1"
1800	typeset filesys="$2"
1801	typeset zdbout="/tmp/zdbout.$$"
1802
1803	shift
1804	shift
1805	typeset dirs=$@
1806	typeset search_path=""
1807
1808	log_note "Calling zdb to verify filesystem '$filesys'"
1809	zfs unmount -a > /dev/null 2>&1
1810	log_must zpool export $pool
1811
1812	if [[ -n $dirs ]] ; then
1813		for dir in $dirs ; do
1814			search_path="$search_path -d $dir"
1815		done
1816	fi
1817
1818	log_must zpool import $search_path $pool
1819
1820	if ! zdb -cudi $filesys > $zdbout 2>&1; then
1821		log_note "Output: zdb -cudi $filesys"
1822		cat $zdbout
1823		rm -f $zdbout
1824		log_fail "zdb detected errors with: '$filesys'"
1825	fi
1826
1827	log_must zfs mount -a
1828	log_must rm -rf $zdbout
1829}
1830
1831#
1832# Given a pool issue a scrub and verify that no checksum errors are reported.
1833#
1834function verify_pool
1835{
1836	typeset pool=${1:-$TESTPOOL}
1837
1838	log_must zpool scrub $pool
1839	log_must wait_scrubbed $pool
1840
1841	typeset -i cksum=$(zpool status $pool | awk '
1842	    !NF { isvdev = 0 }
1843	    isvdev { errors += $NF }
1844	    /CKSUM$/ { isvdev = 1 }
1845	    END { print errors }
1846	')
1847	if [[ $cksum != 0 ]]; then
1848		log_must zpool status -v
1849	        log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
1850	fi
1851}
1852
1853#
1854# Given a pool, and this function list all disks in the pool
1855#
1856function get_disklist # pool
1857{
1858	echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \
1859	    grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
1860}
1861
1862#
1863# Given a pool, and this function list all disks in the pool with their full
1864# path (like "/dev/sda" instead of "sda").
1865#
1866function get_disklist_fullpath # pool
1867{
1868	get_disklist "-P $1"
1869}
1870
1871
1872
1873# /**
1874#  This function kills a given list of processes after a time period. We use
1875#  this in the stress tests instead of STF_TIMEOUT so that we can have processes
1876#  run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1877#  would be listed as FAIL, which we don't want : we're happy with stress tests
1878#  running for a certain amount of time, then finishing.
1879#
1880# @param $1 the time in seconds after which we should terminate these processes
1881# @param $2..$n the processes we wish to terminate.
1882# */
1883function stress_timeout
1884{
1885	typeset -i TIMEOUT=$1
1886	shift
1887	typeset cpids="$@"
1888
1889	log_note "Waiting for child processes($cpids). " \
1890		"It could last dozens of minutes, please be patient ..."
1891	log_must sleep $TIMEOUT
1892
1893	log_note "Killing child processes after ${TIMEOUT} stress timeout."
1894	typeset pid
1895	for pid in $cpids; do
1896		ps -p $pid > /dev/null 2>&1 &&
1897			log_must kill -USR1 $pid
1898	done
1899}
1900
1901#
1902# Verify a given hotspare disk is inuse or avail
1903#
1904# Return 0 is pool/disk matches expected state, 1 otherwise
1905#
1906function check_hotspare_state # pool disk state{inuse,avail}
1907{
1908	typeset pool=$1
1909	typeset disk=${2#$DEV_DSKDIR/}
1910	typeset state=$3
1911
1912	cur_state=$(get_device_state $pool $disk "spares")
1913
1914	[ $state = $cur_state ]
1915}
1916
1917#
1918# Wait until a hotspare transitions to a given state or times out.
1919#
1920# Return 0 when  pool/disk matches expected state, 1 on timeout.
1921#
1922function wait_hotspare_state # pool disk state timeout
1923{
1924	typeset pool=$1
1925	typeset disk=${2#*$DEV_DSKDIR/}
1926	typeset state=$3
1927	typeset timeout=${4:-60}
1928	typeset -i i=0
1929
1930	while [[ $i -lt $timeout ]]; do
1931		if check_hotspare_state $pool $disk $state; then
1932			return 0
1933		fi
1934
1935		i=$((i+1))
1936		sleep 1
1937	done
1938
1939	return 1
1940}
1941
1942#
1943# Verify a given vdev disk is inuse or avail
1944#
1945# Return 0 is pool/disk matches expected state, 1 otherwise
1946#
1947function check_vdev_state # pool disk state{online,offline,unavail,removed}
1948{
1949	typeset pool=$1
1950	typeset disk=${2#*$DEV_DSKDIR/}
1951	typeset state=$3
1952
1953	cur_state=$(get_device_state $pool $disk)
1954
1955	[ $state = $cur_state ]
1956}
1957
1958#
1959# Wait until a vdev transitions to a given state or times out.
1960#
1961# Return 0 when  pool/disk matches expected state, 1 on timeout.
1962#
1963function wait_vdev_state # pool disk state timeout
1964{
1965	typeset pool=$1
1966	typeset disk=${2#*$DEV_DSKDIR/}
1967	typeset state=$3
1968	typeset timeout=${4:-60}
1969	typeset -i i=0
1970
1971	while [[ $i -lt $timeout ]]; do
1972		if check_vdev_state $pool $disk $state; then
1973			return 0
1974		fi
1975
1976		i=$((i+1))
1977		sleep 1
1978	done
1979
1980	return 1
1981}
1982
1983#
1984# Wait for vdev 'sit_out' property to be cleared.
1985#
1986# $1 pool name
1987# $2 vdev name
1988# $3 timeout
1989#
1990function wait_sit_out #pool vdev timeout
1991{
1992	typeset pool=${1:-$TESTPOOL}
1993	typeset vdev="$2"
1994	typeset timeout=${3:-300}
1995	for (( timer = 0; timer < $timeout; timer++ )); do
1996		if [ "$(get_vdev_prop sit_out "$pool" "$vdev")" = "off" ]; then
1997			return 0
1998		fi
1999		sleep 1;
2000	done
2001
2002	return 1
2003}
2004
2005#
2006# Check the output of 'zpool status -v <pool>',
2007# and to see if the content of <token> contain the <keyword> specified.
2008#
2009# Return 0 is contain, 1 otherwise
2010#
2011function check_pool_status # pool token keyword <verbose>
2012{
2013	typeset pool=$1
2014	typeset token=$2
2015	typeset keyword=$3
2016	typeset verbose=${4:-false}
2017
2018	scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token')
2019	if [[ $verbose == true ]]; then
2020		log_note $scan
2021	fi
2022	echo $scan | grep -qi "$keyword"
2023}
2024
2025#
2026# The following functions are instance of check_pool_status()
2027#	is_pool_resilvering - to check if the pool resilver is in progress
2028#	is_pool_resilvered - to check if the pool resilver is completed
2029#	is_pool_scrubbing - to check if the pool scrub is in progress
2030#	is_pool_scrubbed - to check if the pool scrub is completed
2031#	is_pool_scrub_stopped - to check if the pool scrub is stopped
2032#	is_pool_scrub_paused - to check if the pool scrub has paused
2033#	is_pool_removing - to check if the pool removing is a vdev
2034#	is_pool_removed - to check if the pool remove is completed
2035#	is_pool_discarding - to check if the pool checkpoint is being discarded
2036#	is_pool_replacing - to check if the pool is performing a replacement
2037#
2038function is_pool_resilvering #pool <verbose>
2039{
2040	check_pool_status "$1" "scan" \
2041	    "resilver[ ()0-9A-Za-z:_-]* in progress since" $2
2042}
2043
2044function is_pool_resilvered #pool <verbose>
2045{
2046	check_pool_status "$1" "scan" "resilvered " $2
2047}
2048
2049function is_pool_scrubbing #pool <verbose>
2050{
2051	check_pool_status "$1" "scan" "scrub in progress since " $2
2052}
2053
2054function is_pool_error_scrubbing #pool <verbose>
2055{
2056	check_pool_status "$1" "scrub" "error scrub in progress since " $2
2057	return $?
2058}
2059
2060function is_pool_scrubbed #pool <verbose>
2061{
2062	check_pool_status "$1" "scan" "scrub repaired" $2
2063}
2064
2065function is_pool_scrub_stopped #pool <verbose>
2066{
2067	check_pool_status "$1" "scan" "scrub canceled" $2
2068}
2069
2070function is_pool_error_scrub_stopped #pool <verbose>
2071{
2072	check_pool_status "$1" "scrub" "error scrub canceled on " $2
2073	return $?
2074}
2075
2076function is_pool_scrub_paused #pool <verbose>
2077{
2078	check_pool_status "$1" "scan" "scrub paused since " $2
2079}
2080
2081function is_pool_error_scrub_paused #pool <verbose>
2082{
2083	check_pool_status "$1" "scrub" "error scrub paused since " $2
2084	return $?
2085}
2086
2087function is_pool_removing #pool
2088{
2089	check_pool_status "$1" "remove" "in progress since "
2090}
2091
2092function is_pool_removed #pool
2093{
2094	check_pool_status "$1" "remove" "completed on"
2095}
2096
2097function is_pool_discarding #pool
2098{
2099	check_pool_status "$1" "checkpoint" "discarding"
2100}
2101function is_pool_replacing #pool
2102{
2103	zpool status "$1" | grep -qE 'replacing-[0-9]+'
2104}
2105
2106function wait_for_degraded
2107{
2108	typeset pool=$1
2109	typeset timeout=${2:-30}
2110	typeset t0=$SECONDS
2111
2112	while :; do
2113		[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2114		log_note "$pool is not yet degraded."
2115		sleep 1
2116		if ((SECONDS - t0 > $timeout)); then
2117			log_note "$pool not degraded after $timeout seconds."
2118			return 1
2119		fi
2120	done
2121
2122	return 0
2123}
2124
2125#
2126# Use create_pool()/destroy_pool() to clean up the information in
2127# in the given disk to avoid slice overlapping.
2128#
2129function cleanup_devices #vdevs
2130{
2131	typeset pool="foopool$$"
2132
2133	for vdev in $@; do
2134		zero_partitions $vdev
2135	done
2136
2137	poolexists $pool && destroy_pool $pool
2138	create_pool $pool $@
2139	destroy_pool $pool
2140
2141	return 0
2142}
2143
2144#/**
2145# A function to find and locate free disks on a system or from given
2146# disks as the parameter. It works by locating disks that are in use
2147# as swap devices and dump devices, and also disks listed in /etc/vfstab
2148#
2149# $@ given disks to find which are free, default is all disks in
2150# the test system
2151#
2152# @return a string containing the list of available disks
2153#*/
2154function find_disks
2155{
2156	# Trust provided list, no attempt is made to locate unused devices.
2157	if is_linux || is_freebsd; then
2158		echo "$@"
2159		return
2160	fi
2161
2162
2163	sfi=/tmp/swaplist.$$
2164	dmpi=/tmp/dumpdev.$$
2165	max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2166
2167	swap -l > $sfi
2168	dumpadm > $dmpi 2>/dev/null
2169
2170	disks=${@:-$(echo "" | format -e 2>/dev/null | awk '
2171BEGIN { FS="."; }
2172
2173/^Specify disk/{
2174	searchdisks=0;
2175}
2176
2177{
2178	if (searchdisks && $2 !~ "^$"){
2179		split($2,arr," ");
2180		print arr[1];
2181	}
2182}
2183
2184/^AVAILABLE DISK SELECTIONS:/{
2185	searchdisks=1;
2186}
2187')}
2188
2189	unused=""
2190	for disk in $disks; do
2191	# Check for mounted
2192		grep -q "${disk}[sp]" /etc/mnttab && continue
2193	# Check for swap
2194		grep -q "${disk}[sp]" $sfi && continue
2195	# check for dump device
2196		grep -q "${disk}[sp]" $dmpi && continue
2197	# check to see if this disk hasn't been explicitly excluded
2198	# by a user-set environment variable
2199		echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue
2200		unused_candidates="$unused_candidates $disk"
2201	done
2202	rm $sfi $dmpi
2203
2204# now just check to see if those disks do actually exist
2205# by looking for a device pointing to the first slice in
2206# each case. limit the number to max_finddisksnum
2207	count=0
2208	for disk in $unused_candidates; do
2209		if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2210		    [ $count -lt $max_finddisksnum ]; then
2211			unused="$unused $disk"
2212			# do not impose limit if $@ is provided
2213			[[ -z $@ ]] && ((count = count + 1))
2214		fi
2215	done
2216
2217# finally, return our disk list
2218	echo $unused
2219}
2220
2221function add_user_freebsd #<group_name> <user_name> <basedir>
2222{
2223	typeset group=$1
2224	typeset user=$2
2225	typeset basedir=$3
2226
2227	# Check to see if the user exists.
2228	if id $user > /dev/null 2>&1; then
2229		return 0
2230	fi
2231
2232	# Assign 1000 as the base uid
2233	typeset -i uid=1000
2234	while true; do
2235		pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2236		case $? in
2237			0) break ;;
2238			# The uid is not unique
2239			65) ((uid += 1)) ;;
2240			*) return 1 ;;
2241		esac
2242		if [[ $uid == 65000 ]]; then
2243			log_fail "No user id available under 65000 for $user"
2244		fi
2245	done
2246
2247	# Silence MOTD
2248	touch $basedir/$user/.hushlogin
2249
2250	return 0
2251}
2252
2253#
2254# Delete the specified user.
2255#
2256# $1 login name
2257#
2258function del_user_freebsd #<logname>
2259{
2260	typeset user=$1
2261
2262	if id $user > /dev/null 2>&1; then
2263		log_must pw userdel $user
2264	fi
2265
2266	return 0
2267}
2268
2269#
2270# Select valid gid and create specified group.
2271#
2272# $1 group name
2273#
2274function add_group_freebsd #<group_name>
2275{
2276	typeset group=$1
2277
2278	# See if the group already exists.
2279	if pw groupshow $group >/dev/null 2>&1; then
2280		return 0
2281	fi
2282
2283	# Assign 1000 as the base gid
2284	typeset -i gid=1000
2285	while true; do
2286		pw groupadd -g $gid -n $group > /dev/null 2>&1
2287		case $? in
2288			0) return 0 ;;
2289			# The gid is not  unique
2290			65) ((gid += 1)) ;;
2291			*) return 1 ;;
2292		esac
2293		if [[ $gid == 65000 ]]; then
2294			log_fail "No user id available under 65000 for $group"
2295		fi
2296	done
2297}
2298
2299#
2300# Delete the specified group.
2301#
2302# $1 group name
2303#
2304function del_group_freebsd #<group_name>
2305{
2306	typeset group=$1
2307
2308	pw groupdel -n $group > /dev/null 2>&1
2309	case $? in
2310		# Group does not exist, or was deleted successfully.
2311		0|6|65) return 0 ;;
2312		# Name already exists as a group name
2313		9) log_must pw groupdel $group ;;
2314		*) return 1 ;;
2315	esac
2316
2317	return 0
2318}
2319
2320function add_user_illumos #<group_name> <user_name> <basedir>
2321{
2322	typeset group=$1
2323	typeset user=$2
2324	typeset basedir=$3
2325
2326	log_must useradd -g $group -d $basedir/$user -m $user
2327
2328	return 0
2329}
2330
2331function del_user_illumos #<user_name>
2332{
2333	typeset user=$1
2334
2335	if id $user > /dev/null 2>&1; then
2336		log_must_retry "currently used" 6 userdel $user
2337	fi
2338
2339	return 0
2340}
2341
2342function add_group_illumos #<group_name>
2343{
2344	typeset group=$1
2345
2346	typeset -i gid=100
2347	while true; do
2348		groupadd -g $gid $group > /dev/null 2>&1
2349		case $? in
2350			0) return 0 ;;
2351			# The gid is not  unique
2352			4) ((gid += 1)) ;;
2353			*) return 1 ;;
2354		esac
2355	done
2356}
2357
2358function del_group_illumos #<group_name>
2359{
2360	typeset group=$1
2361
2362	groupmod -n $grp $grp > /dev/null 2>&1
2363	case $? in
2364		# Group does not exist.
2365		6) return 0 ;;
2366		# Name already exists as a group name
2367		9) log_must groupdel $grp ;;
2368		*) return 1 ;;
2369	esac
2370}
2371
2372function add_user_linux #<group_name> <user_name> <basedir>
2373{
2374	typeset group=$1
2375	typeset user=$2
2376	typeset basedir=$3
2377
2378	log_must useradd -g $group -d $basedir/$user -m $user
2379
2380	# Add new users to the same group and the command line utils.
2381	# This allows them to be run out of the original users home
2382	# directory as long as it permissioned to be group readable.
2383	cmd_group=$(stat --format="%G" $(command -v zfs))
2384	log_must usermod -a -G $cmd_group $user
2385
2386	return 0
2387}
2388
2389function del_user_linux #<user_name>
2390{
2391	typeset user=$1
2392
2393	if id $user > /dev/null 2>&1; then
2394		log_must_retry "currently used" 6 userdel $user
2395	fi
2396}
2397
2398function add_group_linux #<group_name>
2399{
2400	typeset group=$1
2401
2402	# Assign 100 as the base gid, a larger value is selected for
2403	# Linux because for many distributions 1000 and under are reserved.
2404	while true; do
2405		groupadd $group > /dev/null 2>&1
2406		case $? in
2407			0) return 0 ;;
2408			*) return 1 ;;
2409		esac
2410	done
2411}
2412
2413function del_group_linux #<group_name>
2414{
2415	typeset group=$1
2416
2417	getent group $group > /dev/null 2>&1
2418	case $? in
2419		# Group does not exist.
2420		2) return 0 ;;
2421		# Name already exists as a group name
2422		0) log_must groupdel $group ;;
2423		*) return 1 ;;
2424	esac
2425
2426	return 0
2427}
2428
2429#
2430# Add specified user to specified group
2431#
2432# $1 group name
2433# $2 user name
2434# $3 base of the homedir (optional)
2435#
2436function add_user #<group_name> <user_name> <basedir>
2437{
2438	typeset group=$1
2439	typeset user=$2
2440	typeset basedir=${3:-"$TEST_BASE_DIR"}
2441
2442	if ((${#group} == 0 || ${#user} == 0)); then
2443		log_fail "group name or user name are not defined."
2444	fi
2445
2446	case "$UNAME" in
2447	FreeBSD)
2448		add_user_freebsd "$group" "$user" "$basedir"
2449		;;
2450	Linux)
2451		add_user_linux "$group" "$user" "$basedir"
2452		;;
2453	*)
2454		add_user_illumos "$group" "$user" "$basedir"
2455		;;
2456	esac
2457
2458	return 0
2459}
2460
2461#
2462# Delete the specified user.
2463#
2464# $1 login name
2465# $2 base of the homedir (optional)
2466#
2467function del_user #<logname> <basedir>
2468{
2469	typeset user=$1
2470	typeset basedir=${2:-"$TEST_BASE_DIR"}
2471
2472	if ((${#user} == 0)); then
2473		log_fail "login name is necessary."
2474	fi
2475
2476	case "$UNAME" in
2477	FreeBSD)
2478		del_user_freebsd "$user"
2479		;;
2480	Linux)
2481		del_user_linux "$user"
2482		;;
2483	*)
2484		del_user_illumos "$user"
2485		;;
2486	esac
2487
2488	[[ -d $basedir/$user ]] && rm -fr $basedir/$user
2489
2490	return 0
2491}
2492
2493#
2494# Select valid gid and create specified group.
2495#
2496# $1 group name
2497#
2498function add_group #<group_name>
2499{
2500	typeset group=$1
2501
2502	if ((${#group} == 0)); then
2503		log_fail "group name is necessary."
2504	fi
2505
2506	case "$UNAME" in
2507	FreeBSD)
2508		add_group_freebsd "$group"
2509		;;
2510	Linux)
2511		add_group_linux "$group"
2512		;;
2513	*)
2514		add_group_illumos "$group"
2515		;;
2516	esac
2517
2518	return 0
2519}
2520
2521#
2522# Delete the specified group.
2523#
2524# $1 group name
2525#
2526function del_group #<group_name>
2527{
2528	typeset group=$1
2529
2530	if ((${#group} == 0)); then
2531		log_fail "group name is necessary."
2532	fi
2533
2534	case "$UNAME" in
2535	FreeBSD)
2536		del_group_freebsd "$group"
2537		;;
2538	Linux)
2539		del_group_linux "$group"
2540		;;
2541	*)
2542		del_group_illumos "$group"
2543		;;
2544	esac
2545
2546	return 0
2547}
2548
2549#
2550# This function will return true if it's safe to destroy the pool passed
2551# as argument 1. It checks for pools based on zvols and files, and also
2552# files contained in a pool that may have a different mountpoint.
2553#
2554function safe_to_destroy_pool { # $1 the pool name
2555
2556	typeset pool=""
2557	typeset DONT_DESTROY=""
2558
2559	# We check that by deleting the $1 pool, we're not
2560	# going to pull the rug out from other pools. Do this
2561	# by looking at all other pools, ensuring that they
2562	# aren't built from files or zvols contained in this pool.
2563
2564	for pool in $(zpool list -H -o name)
2565	do
2566		ALTMOUNTPOOL=""
2567
2568		# this is a list of the top-level directories in each of the
2569		# files that make up the path to the files the pool is based on
2570		FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}')
2571
2572		# this is a list of the zvols that make up the pool
2573		ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}')
2574
2575		# also want to determine if it's a file-based pool using an
2576		# alternate mountpoint...
2577		POOL_FILE_DIRS=$(zpool status -v $pool | \
2578					awk '/\// {print $1}' | \
2579					awk -F/ '!/dev/ {print $2}')
2580
2581		for pooldir in $POOL_FILE_DIRS
2582		do
2583			OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2584					awk -v pd="${pooldir}$" '$0 ~ pd {print $1}')
2585
2586			ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2587		done
2588
2589
2590		if [ ! -z "$ZVOLPOOL" ]
2591		then
2592			DONT_DESTROY="true"
2593			log_note "Pool $pool is built from $ZVOLPOOL on $1"
2594		fi
2595
2596		if [ ! -z "$FILEPOOL" ]
2597		then
2598			DONT_DESTROY="true"
2599			log_note "Pool $pool is built from $FILEPOOL on $1"
2600		fi
2601
2602		if [ ! -z "$ALTMOUNTPOOL" ]
2603		then
2604			DONT_DESTROY="true"
2605			log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2606		fi
2607	done
2608
2609	if [ -z "${DONT_DESTROY}" ]
2610	then
2611		return 0
2612	else
2613		log_note "Warning: it is not safe to destroy $1!"
2614		return 1
2615	fi
2616}
2617
2618#
2619# Verify zfs operation with -p option work as expected
2620# $1 operation, value could be create, clone or rename
2621# $2 dataset type, value could be fs or vol
2622# $3 dataset name
2623# $4 new dataset name
2624#
2625function verify_opt_p_ops
2626{
2627	typeset ops=$1
2628	typeset datatype=$2
2629	typeset dataset=$3
2630	typeset newdataset=$4
2631
2632	if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2633		log_fail "$datatype is not supported."
2634	fi
2635
2636	# check parameters accordingly
2637	case $ops in
2638		create)
2639			newdataset=$dataset
2640			dataset=""
2641			if [[ $datatype == "vol" ]]; then
2642				ops="create -V $VOLSIZE"
2643			fi
2644			;;
2645		clone)
2646			if [[ -z $newdataset ]]; then
2647				log_fail "newdataset should not be empty" \
2648					"when ops is $ops."
2649			fi
2650			log_must datasetexists $dataset
2651			log_must snapexists $dataset
2652			;;
2653		rename)
2654			if [[ -z $newdataset ]]; then
2655				log_fail "newdataset should not be empty" \
2656					"when ops is $ops."
2657			fi
2658			log_must datasetexists $dataset
2659			;;
2660		*)
2661			log_fail "$ops is not supported."
2662			;;
2663	esac
2664
2665	# make sure the upper level filesystem does not exist
2666	destroy_dataset "${newdataset%/*}" "-rRf"
2667
2668	# without -p option, operation will fail
2669	log_mustnot zfs $ops $dataset $newdataset
2670	log_mustnot datasetexists $newdataset ${newdataset%/*}
2671
2672	# with -p option, operation should succeed
2673	log_must zfs $ops -p $dataset $newdataset
2674	block_device_wait
2675
2676	if ! datasetexists $newdataset ; then
2677		log_fail "-p option does not work for $ops"
2678	fi
2679
2680	# when $ops is create or clone, redo the operation still return zero
2681	if [[ $ops != "rename" ]]; then
2682		log_must zfs $ops -p $dataset $newdataset
2683	fi
2684
2685	return 0
2686}
2687
2688#
2689# Get configuration of pool
2690# $1 pool name
2691# $2 config name
2692#
2693function get_config
2694{
2695	typeset pool=$1
2696	typeset config=$2
2697
2698	if ! poolexists "$pool" ; then
2699		return 1
2700	fi
2701	if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then
2702		zdb -e $pool
2703	else
2704		zdb -C $pool
2705	fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}'
2706}
2707
2708#
2709# Privated function. Random select one of items from arguments.
2710#
2711# $1 count
2712# $2-n string
2713#
2714function _random_get
2715{
2716	typeset cnt=$1
2717	shift
2718
2719	typeset str="$@"
2720	typeset -i ind
2721	((ind = RANDOM % cnt + 1))
2722
2723	echo "$str" | cut -f $ind -d ' '
2724}
2725
2726#
2727# Random select one of item from arguments which include NONE string
2728#
2729function random_get_with_non
2730{
2731	typeset -i cnt=$#
2732	((cnt =+ 1))
2733
2734	_random_get "$cnt" "$@"
2735}
2736
2737#
2738# Random select one of item from arguments which doesn't include NONE string
2739#
2740function random_get
2741{
2742	_random_get "$#" "$@"
2743}
2744
2745#
2746# The function will generate a dataset name with specific length
2747# $1, the length of the name
2748# $2, the base string to construct the name
2749#
2750function gen_dataset_name
2751{
2752	typeset -i len=$1
2753	typeset basestr="$2"
2754	typeset -i baselen=${#basestr}
2755	typeset -i iter=0
2756	typeset l_name=""
2757
2758	if ((len % baselen == 0)); then
2759		((iter = len / baselen))
2760	else
2761		((iter = len / baselen + 1))
2762	fi
2763	while ((iter > 0)); do
2764		l_name="${l_name}$basestr"
2765
2766		((iter -= 1))
2767	done
2768
2769	echo $l_name
2770}
2771
2772#
2773# Get cksum tuple of dataset
2774# $1 dataset name
2775#
2776# sample zdb output:
2777# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2778# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2779# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2780# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2781function datasetcksum
2782{
2783	typeset cksum
2784	sync
2785	sync_all_pools
2786	zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}'
2787}
2788
2789#
2790# Get the given disk/slice state from the specific field of the pool
2791#
2792function get_device_state #pool disk field("", "spares","logs")
2793{
2794	typeset pool=$1
2795	typeset disk=${2#$DEV_DSKDIR/}
2796	typeset field=${3:-$pool}
2797
2798	zpool status -v "$pool" 2>/dev/null | \
2799		awk -v device=$disk -v pool=$pool -v field=$field \
2800		'BEGIN {startconfig=0; startfield=0; }
2801		/config:/ {startconfig=1}
2802		(startconfig==1) && ($1==field) {startfield=1; next;}
2803		(startfield==1) && ($1==device) {print $2; exit;}
2804		(startfield==1) &&
2805		($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}'
2806}
2807
2808#
2809# get the root filesystem name if it's zfsroot system.
2810#
2811# return: root filesystem name
2812function get_rootfs
2813{
2814	typeset rootfs=""
2815
2816	if is_freebsd; then
2817		rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
2818	elif ! is_linux; then
2819		rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \
2820			/etc/mnttab)
2821	fi
2822	if [[ -z "$rootfs" ]]; then
2823		log_fail "Can not get rootfs"
2824	fi
2825	if datasetexists $rootfs; then
2826		echo $rootfs
2827	else
2828		log_fail "This is not a zfsroot system."
2829	fi
2830}
2831
2832#
2833# get the rootfs's pool name
2834# return:
2835#       rootpool name
2836#
2837function get_rootpool
2838{
2839	typeset rootfs=$(get_rootfs)
2840	echo ${rootfs%%/*}
2841}
2842
2843#
2844# To verify if the require numbers of disks is given
2845#
2846function verify_disk_count
2847{
2848	typeset -i min=${2:-1}
2849
2850	typeset -i count=$(echo "$1" | wc -w)
2851
2852	if ((count < min)); then
2853		log_untested "A minimum of $min disks is required to run." \
2854			" You specified $count disk(s)"
2855	fi
2856}
2857
2858function ds_is_volume
2859{
2860	typeset type=$(get_prop type $1)
2861	[ $type = "volume" ]
2862}
2863
2864function ds_is_filesystem
2865{
2866	typeset type=$(get_prop type $1)
2867	[ $type = "filesystem" ]
2868}
2869
2870#
2871# Check if Trusted Extensions are installed and enabled
2872#
2873function is_te_enabled
2874{
2875	svcs -H -o state labeld 2>/dev/null | grep -q "enabled"
2876}
2877
2878# Return the number of CPUs (cross-platform)
2879function get_num_cpus
2880{
2881	if is_linux ; then
2882		grep -c '^processor' /proc/cpuinfo
2883	elif is_freebsd; then
2884		sysctl -n kern.smp.cpus
2885	else
2886		psrinfo | wc -l
2887	fi
2888}
2889
2890# Utility function to determine if a system has multiple cpus.
2891function is_mp
2892{
2893	[[ $(get_num_cpus) -gt 1 ]]
2894}
2895
2896function get_cpu_freq
2897{
2898	if is_linux; then
2899		lscpu | awk '/CPU MHz/ { print $3 }'
2900	elif is_freebsd; then
2901		sysctl -n hw.clockrate
2902	else
2903		psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2904	fi
2905}
2906
2907# Run the given command as the user provided.
2908function user_run
2909{
2910	typeset user=$1
2911	shift
2912
2913	log_note "user: $user"
2914	log_note "cmd: $*"
2915
2916	if ! sudo -Eu $user test -x $PATH ; then
2917		log_note "-------------------------------------------------"
2918		log_note "Warning: $user doesn't have permissions on $PATH"
2919		log_note ""
2920		log_note "This usually happens when you're running ZTS locally"
2921		log_note "from inside the ZFS source dir, and are attempting to"
2922		log_note "run a test that calls user_run.  The ephemeral user"
2923		log_note "($user) that ZTS is creating does not have permission"
2924		log_note "to traverse to $PATH, or the binaries in $PATH are"
2925		log_note "not the right permissions."
2926		log_note ""
2927		log_note "To get around this, copy your ZFS source directory"
2928		log_note "to a world-accessible location (like /tmp), and "
2929		log_note "change the permissions on your ZFS source dir "
2930		log_note "to allow access."
2931		log_note ""
2932		log_note "Also, verify that /dev/zfs is RW for others:"
2933		log_note ""
2934		log_note "    sudo chmod o+rw /dev/zfs"
2935		log_note "-------------------------------------------------"
2936	fi
2937
2938	typeset out=$TEST_BASE_DIR/out
2939	typeset err=$TEST_BASE_DIR/err
2940
2941	sudo -Eu $user \
2942	    env PATH="$PATH" ZTS_LOG_SUPPRESS_TIMESTAMP=1 \
2943	    ksh <<<"$*" >$out 2>$err
2944	typeset res=$?
2945	log_note "out: $(<$out)"
2946	log_note "err: $(<$err)"
2947	return $res
2948}
2949
2950#
2951# Check if the pool contains the specified vdevs
2952#
2953# $1 pool
2954# $2..n <vdev> ...
2955#
2956# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2957# vdevs is not in the pool, and 2 if pool name is missing.
2958#
2959function vdevs_in_pool
2960{
2961	typeset pool=$1
2962	typeset vdev
2963
2964	if [[ -z $pool ]]; then
2965		log_note "Missing pool name."
2966		return 2
2967	fi
2968
2969	shift
2970
2971	# We could use 'zpool list' to only get the vdevs of the pool but we
2972	# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
2973	# therefore we use the 'zpool status' output.
2974	typeset tmpfile=$(mktemp)
2975	zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
2976	for vdev in "$@"; do
2977		grep -wq ${vdev##*/} $tmpfile || return 1
2978	done
2979
2980	rm -f $tmpfile
2981	return 0
2982}
2983
2984function get_max
2985{
2986	typeset -l i max=$1
2987	shift
2988
2989	for i in "$@"; do
2990		max=$((max > i ? max : i))
2991	done
2992
2993	echo $max
2994}
2995
2996# Write data that can be compressed into a directory
2997function write_compressible
2998{
2999	typeset dir=$1
3000	typeset megs=$2
3001	typeset nfiles=${3:-1}
3002	typeset bs=${4:-1024k}
3003	typeset fname=${5:-file}
3004
3005	[[ -d $dir ]] || log_fail "No directory: $dir"
3006
3007	# Under Linux fio is not currently used since its behavior can
3008	# differ significantly across versions.  This includes missing
3009	# command line options and cases where the --buffer_compress_*
3010	# options fail to behave as expected.
3011	if is_linux; then
3012		typeset file_bytes=$(to_bytes $megs)
3013		typeset bs_bytes=4096
3014		typeset blocks=$(($file_bytes / $bs_bytes))
3015
3016		for (( i = 0; i < $nfiles; i++ )); do
3017			truncate -s $file_bytes $dir/$fname.$i
3018
3019			# Write every third block to get 66% compression.
3020			for (( j = 0; j < $blocks; j += 3 )); do
3021				dd if=/dev/urandom of=$dir/$fname.$i \
3022				    seek=$j bs=$bs_bytes count=1 \
3023				    conv=notrunc >/dev/null 2>&1
3024			done
3025		done
3026	else
3027		command -v fio > /dev/null || log_unsupported "fio missing"
3028		log_must eval fio \
3029		    --name=job \
3030		    --fallocate=0 \
3031		    --minimal \
3032		    --randrepeat=0 \
3033		    --buffer_compress_percentage=66 \
3034		    --buffer_compress_chunk=4096 \
3035		    --directory="$dir" \
3036		    --numjobs="$nfiles" \
3037		    --nrfiles="$nfiles" \
3038		    --rw=write \
3039		    --bs="$bs" \
3040		    --filesize="$megs" \
3041		    "--filename_format='$fname.\$jobnum' >/dev/null"
3042	fi
3043}
3044
3045function get_objnum
3046{
3047	typeset pathname=$1
3048	typeset objnum
3049
3050	[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3051	if is_freebsd; then
3052		objnum=$(stat -f "%i" $pathname)
3053	else
3054		objnum=$(stat -c %i $pathname)
3055	fi
3056	echo $objnum
3057}
3058
3059#
3060# Sync data to the pool
3061#
3062# $1 pool name
3063# $2 boolean to force uberblock (and config including zpool cache file) update
3064#
3065function sync_pool #pool <force>
3066{
3067	typeset pool=${1:-$TESTPOOL}
3068	typeset force=${2:-false}
3069
3070	if [[ $force == true ]]; then
3071		log_must zpool sync -f $pool
3072	else
3073		log_must zpool sync $pool
3074	fi
3075
3076	return 0
3077}
3078
3079#
3080# Sync all pools
3081#
3082# $1 boolean to force uberblock (and config including zpool cache file) update
3083#
3084function sync_all_pools #<force>
3085{
3086	typeset force=${1:-false}
3087
3088	if [[ $force == true ]]; then
3089		log_must zpool sync -f
3090	else
3091		log_must zpool sync
3092	fi
3093
3094	return 0
3095}
3096
3097#
3098# Wait for zpool 'freeing' property drops to zero.
3099#
3100# $1 pool name
3101#
3102function wait_freeing #pool
3103{
3104	typeset pool=${1:-$TESTPOOL}
3105	while true; do
3106		[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3107		log_must sleep 1
3108	done
3109}
3110
3111#
3112# Wait for every device replace operation to complete
3113#
3114# $1 pool name
3115# $2 timeout
3116#
3117function wait_replacing #pool timeout
3118{
3119	typeset timeout=${2:-300}
3120	typeset pool=${1:-$TESTPOOL}
3121	for (( timer = 0; timer < $timeout; timer++ )); do
3122		is_pool_replacing $pool || break;
3123		sleep 1;
3124	done
3125}
3126
3127# Wait for a pool to be scrubbed
3128#
3129# $1 pool name
3130# $2 timeout
3131#
3132function wait_scrubbed #pool timeout
3133{
3134       typeset timeout=${2:-300}
3135       typeset pool=${1:-$TESTPOOL}
3136       for (( timer = 0; timer < $timeout; timer++ )); do
3137               is_pool_scrubbed $pool && break;
3138               sleep 1;
3139       done
3140}
3141
3142# Backup the zed.rc in our test directory so that we can edit it for our test.
3143#
3144# Returns: Backup file name.  You will need to pass this to zed_rc_restore().
3145function zed_rc_backup
3146{
3147	zedrc_backup="$(mktemp)"
3148	cp $ZEDLET_DIR/zed.rc $zedrc_backup
3149	echo $zedrc_backup
3150}
3151
3152function zed_rc_restore
3153{
3154	mv $1 $ZEDLET_DIR/zed.rc
3155}
3156
3157#
3158# Setup custom environment for the ZED.
3159#
3160# $@ Optional list of zedlets to run under zed.
3161function zed_setup
3162{
3163	if ! is_linux; then
3164		log_unsupported "No zed on $UNAME"
3165	fi
3166
3167	if [[ ! -d $ZEDLET_DIR ]]; then
3168		log_must mkdir $ZEDLET_DIR
3169	fi
3170
3171	if [[ ! -e $VDEVID_CONF ]]; then
3172		log_must touch $VDEVID_CONF
3173	fi
3174
3175	if [[ -e $VDEVID_CONF_ETC ]]; then
3176		log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3177	fi
3178	EXTRA_ZEDLETS=$@
3179
3180	# Create a symlink for /etc/zfs/vdev_id.conf file.
3181	log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3182
3183	# Setup minimal ZED configuration.  Individual test cases should
3184	# add additional ZEDLETs as needed for their specific test.
3185	log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3186	log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3187
3188	# Scripts must only be user writable.
3189	if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3190		saved_umask=$(umask)
3191		log_must umask 0022
3192		for i in $EXTRA_ZEDLETS ; do
3193			log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3194		done
3195		log_must umask $saved_umask
3196	fi
3197
3198	# Customize the zed.rc file to enable the full debug log.
3199	log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3200	echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3201
3202}
3203
3204#
3205# Cleanup custom ZED environment.
3206#
3207# $@ Optional list of zedlets to remove from our test zed.d directory.
3208function zed_cleanup
3209{
3210	if ! is_linux; then
3211		return
3212	fi
3213
3214	for extra_zedlet; do
3215		log_must rm -f ${ZEDLET_DIR}/$extra_zedlet
3216	done
3217	log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \
3218	                $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \
3219	                $ZEDLET_DIR
3220}
3221
3222#
3223# Check if ZED is currently running; if so, returns PIDs
3224#
3225function zed_check
3226{
3227	if ! is_linux; then
3228		return
3229	fi
3230	zedpids="$(pgrep -x zed)"
3231	zedpids2="$(pgrep -x lt-zed)"
3232	echo ${zedpids} ${zedpids2}
3233}
3234
3235#
3236# Check if ZED is currently running, if not start ZED.
3237#
3238function zed_start
3239{
3240	if ! is_linux; then
3241		return
3242	fi
3243
3244	# ZEDLET_DIR=$TEST_BASE_DIR/zed
3245	if [[ ! -d $ZEDLET_DIR ]]; then
3246		log_must mkdir $ZEDLET_DIR
3247	fi
3248
3249	# Verify the ZED is not already running.
3250	zedpids=$(zed_check)
3251	if [ -n "$zedpids" ]; then
3252		# We never, ever, really want it to just keep going if zed
3253		# is already running - usually this implies our test cases
3254		# will break very strangely because whatever we wanted to
3255		# configure zed for won't be listening to our changes in the
3256		# tmpdir
3257		log_fail "ZED already running - ${zedpids}"
3258	else
3259		log_note "Starting ZED"
3260		# run ZED in the background and redirect foreground logging
3261		# output to $ZED_LOG.
3262		log_must truncate -s 0 $ZED_DEBUG_LOG
3263		log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
3264		    "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
3265	fi
3266
3267	return 0
3268}
3269
3270#
3271# Kill ZED process
3272#
3273function zed_stop
3274{
3275	if ! is_linux; then
3276		return ""
3277	fi
3278
3279	log_note "Stopping ZED"
3280	while true; do
3281		zedpids=$(zed_check)
3282		[ ! -n "$zedpids" ] && break
3283
3284		log_must kill $zedpids
3285		sleep 1
3286	done
3287	return 0
3288}
3289
3290#
3291# Drain all zevents
3292#
3293function zed_events_drain
3294{
3295	while [ $(zpool events -H | wc -l) -ne 0 ]; do
3296		sleep 1
3297		zpool events -c >/dev/null
3298	done
3299}
3300
3301# Set a variable in zed.rc to something, un-commenting it in the process.
3302#
3303# $1 variable
3304# $2 value
3305function zed_rc_set
3306{
3307	var="$1"
3308	val="$2"
3309	# Remove the line
3310	cmd="'/$var/d'"
3311	eval sed -i $cmd $ZEDLET_DIR/zed.rc
3312
3313	# Add it at the end
3314	echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3315}
3316
3317
3318#
3319# Check is provided device is being active used as a swap device.
3320#
3321function is_swap_inuse
3322{
3323	typeset device=$1
3324
3325	if [[ -z $device ]] ; then
3326		log_note "No device specified."
3327		return 1
3328	fi
3329
3330	case "$UNAME" in
3331	Linux)
3332		swapon -s | grep -wq $(readlink -f $device)
3333		;;
3334	FreeBSD)
3335		swapctl -l | grep -wq $device
3336		;;
3337	*)
3338		swap -l | grep -wq $device
3339		;;
3340	esac
3341}
3342
3343#
3344# Setup a swap device using the provided device.
3345#
3346function swap_setup
3347{
3348	typeset swapdev=$1
3349
3350	case "$UNAME" in
3351	Linux)
3352		log_must eval "mkswap $swapdev > /dev/null 2>&1"
3353		log_must swapon $swapdev
3354		;;
3355	FreeBSD)
3356		log_must swapctl -a $swapdev
3357		;;
3358	*)
3359    log_must swap -a $swapdev
3360		;;
3361	esac
3362
3363	return 0
3364}
3365
3366#
3367# Cleanup a swap device on the provided device.
3368#
3369function swap_cleanup
3370{
3371	typeset swapdev=$1
3372
3373	if is_swap_inuse $swapdev; then
3374		if is_linux; then
3375			log_must swapoff $swapdev
3376		elif is_freebsd; then
3377			log_must swapoff $swapdev
3378		else
3379			log_must swap -d $swapdev
3380		fi
3381	fi
3382
3383	return 0
3384}
3385
3386#
3387# Set a global system tunable (64-bit value)
3388#
3389# $1 tunable name (use a NAME defined in tunables.cfg)
3390# $2 tunable values
3391#
3392function set_tunable64
3393{
3394	set_tunable_impl "$1" "$2" Z
3395}
3396
3397#
3398# Set a global system tunable (32-bit value)
3399#
3400# $1 tunable name (use a NAME defined in tunables.cfg)
3401# $2 tunable values
3402#
3403function set_tunable32
3404{
3405	set_tunable_impl "$1" "$2" W
3406}
3407
3408function set_tunable_impl
3409{
3410	typeset name="$1"
3411	typeset value="$2"
3412	typeset mdb_cmd="$3"
3413
3414	eval "typeset tunable=\$$name"
3415	case "$tunable" in
3416	UNSUPPORTED)
3417		log_unsupported "Tunable '$name' is unsupported on $UNAME"
3418		;;
3419	"")
3420		log_fail "Tunable '$name' must be added to tunables.cfg"
3421		;;
3422	*)
3423		;;
3424	esac
3425
3426	[[ -z "$value" ]] && return 1
3427	[[ -z "$mdb_cmd" ]] && return 1
3428
3429	case "$UNAME" in
3430	Linux)
3431		typeset zfs_tunables="/sys/module/zfs/parameters"
3432		echo "$value" >"$zfs_tunables/$tunable"
3433		;;
3434	FreeBSD)
3435		sysctl vfs.zfs.$tunable=$value
3436		;;
3437	SunOS)
3438		echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3439		;;
3440	esac
3441}
3442
3443function save_tunable
3444{
3445	if tunable_exists $1 ; then
3446		[[ ! -d $TEST_BASE_DIR ]] && return 1
3447		[[ -e $TEST_BASE_DIR/tunable-$1 ]] && return 2
3448		echo "$(get_tunable """$1""")" > "$TEST_BASE_DIR"/tunable-"$1"
3449	fi
3450}
3451
3452function restore_tunable
3453{
3454	if tunable_exists $1 ; then
3455		[[ ! -e $TEST_BASE_DIR/tunable-$1 ]] && return 1
3456		val="$(cat $TEST_BASE_DIR/tunable-"""$1""")"
3457		set_tunable64 "$1" "$val"
3458		rm $TEST_BASE_DIR/tunable-$1
3459	fi
3460}
3461
3462#
3463# Get a global system tunable
3464#
3465# $1 tunable name (use a NAME defined in tunables.cfg)
3466#
3467function get_tunable
3468{
3469	get_tunable_impl "$1"
3470}
3471
3472function get_tunable_impl
3473{
3474	typeset name="$1"
3475	typeset module="${2:-zfs}"
3476	typeset check_only="$3"
3477
3478	eval "typeset tunable=\$$name"
3479	case "$tunable" in
3480	UNSUPPORTED)
3481		if [ -z "$check_only" ] ; then
3482			log_unsupported "Tunable '$name' is unsupported on $UNAME"
3483		else
3484			return 1
3485		fi
3486		;;
3487	"")
3488		if [ -z "$check_only" ] ; then
3489			log_fail "Tunable '$name' must be added to tunables.cfg"
3490		else
3491			return 1
3492		fi
3493		;;
3494	*)
3495		;;
3496	esac
3497
3498	case "$UNAME" in
3499	Linux)
3500		typeset zfs_tunables="/sys/module/$module/parameters"
3501		cat $zfs_tunables/$tunable
3502		;;
3503	FreeBSD)
3504		sysctl -n vfs.zfs.$tunable
3505		;;
3506	SunOS)
3507		[[ "$module" -eq "zfs" ]] || return 1
3508		;;
3509	esac
3510}
3511
3512# Does a tunable exist?
3513#
3514# $1: Tunable name
3515function tunable_exists
3516{
3517	get_tunable_impl $1 "zfs" 1
3518}
3519
3520#
3521# Compute xxh128sum for given file or stdin if no file given.
3522# Note: file path must not contain spaces
3523#
3524function xxh128digest
3525{
3526	xxh128sum $1 | awk '{print $1}'
3527}
3528
3529#
3530# Compare the xxhash128 digest of two files.
3531#
3532function cmp_xxh128 {
3533	typeset file1=$1
3534	typeset file2=$2
3535
3536	typeset sum1=$(xxh128digest $file1)
3537	typeset sum2=$(xxh128digest $file2)
3538	test "$sum1" = "$sum2"
3539}
3540
3541function new_fs #<args>
3542{
3543	case "$UNAME" in
3544	FreeBSD)
3545		newfs "$@"
3546		;;
3547	*)
3548		echo y | newfs -v "$@"
3549		;;
3550	esac
3551}
3552
3553function stat_size #<path>
3554{
3555	typeset path=$1
3556
3557	case "$UNAME" in
3558	FreeBSD)
3559		stat -f %z "$path"
3560		;;
3561	*)
3562		stat -c %s "$path"
3563		;;
3564	esac
3565}
3566
3567function stat_mtime #<path>
3568{
3569	typeset path=$1
3570
3571	case "$UNAME" in
3572	FreeBSD)
3573		stat -f %m "$path"
3574		;;
3575	*)
3576		stat -c %Y "$path"
3577		;;
3578	esac
3579}
3580
3581function stat_ctime #<path>
3582{
3583	typeset path=$1
3584
3585	case "$UNAME" in
3586	FreeBSD)
3587		stat -f %c "$path"
3588		;;
3589	*)
3590		stat -c %Z "$path"
3591		;;
3592	esac
3593}
3594
3595function stat_crtime #<path>
3596{
3597	typeset path=$1
3598
3599	case "$UNAME" in
3600	FreeBSD)
3601		stat -f %B "$path"
3602		;;
3603	*)
3604		stat -c %W "$path"
3605		;;
3606	esac
3607}
3608
3609function stat_generation #<path>
3610{
3611	typeset path=$1
3612
3613	case "$UNAME" in
3614	Linux)
3615		getversion "${path}"
3616		;;
3617	*)
3618		stat -f %v "${path}"
3619		;;
3620	esac
3621}
3622
3623# Run a command as if it was being run in a TTY.
3624#
3625# Usage:
3626#
3627#    faketty command
3628#
3629function faketty
3630{
3631    if is_freebsd; then
3632        script -q /dev/null env "$@"
3633    else
3634        script --return --quiet -c "$*" /dev/null
3635    fi
3636}
3637
3638#
3639# Produce a random permutation of the integers in a given range (inclusive).
3640#
3641function range_shuffle # begin end
3642{
3643	typeset -i begin=$1
3644	typeset -i end=$2
3645
3646	seq ${begin} ${end} | sort -R
3647}
3648
3649#
3650# Cross-platform xattr helpers
3651#
3652
3653function get_xattr # name path
3654{
3655	typeset name=$1
3656	typeset path=$2
3657
3658	case "$UNAME" in
3659	FreeBSD)
3660		getextattr -qq user "${name}" "${path}"
3661		;;
3662	*)
3663		attr -qg "${name}" "${path}"
3664		;;
3665	esac
3666}
3667
3668function set_xattr # name value path
3669{
3670	typeset name=$1
3671	typeset value=$2
3672	typeset path=$3
3673
3674	case "$UNAME" in
3675	FreeBSD)
3676		setextattr user "${name}" "${value}" "${path}"
3677		;;
3678	*)
3679		attr -qs "${name}" -V "${value}" "${path}"
3680		;;
3681	esac
3682}
3683
3684function set_xattr_stdin # name value
3685{
3686	typeset name=$1
3687	typeset path=$2
3688
3689	case "$UNAME" in
3690	FreeBSD)
3691		setextattr -i user "${name}" "${path}"
3692		;;
3693	*)
3694		attr -qs "${name}" "${path}"
3695		;;
3696	esac
3697}
3698
3699function rm_xattr # name path
3700{
3701	typeset name=$1
3702	typeset path=$2
3703
3704	case "$UNAME" in
3705	FreeBSD)
3706		rmextattr -q user "${name}" "${path}"
3707		;;
3708	*)
3709		attr -qr "${name}" "${path}"
3710		;;
3711	esac
3712}
3713
3714function ls_xattr # path
3715{
3716	typeset path=$1
3717
3718	case "$UNAME" in
3719	FreeBSD)
3720		lsextattr -qq user "${path}"
3721		;;
3722	*)
3723		attr -ql "${path}"
3724		;;
3725	esac
3726}
3727
3728function punch_hole # offset length file
3729{
3730	typeset offset=$1
3731	typeset length=$2
3732	typeset file=$3
3733
3734	case "$UNAME" in
3735	FreeBSD)
3736		truncate -d -o $offset -l $length "$file"
3737		;;
3738	Linux)
3739		fallocate --punch-hole --offset $offset --length $length "$file"
3740		;;
3741	*)
3742		false
3743		;;
3744	esac
3745}
3746
3747function zero_range # offset length file
3748{
3749	typeset offset=$1
3750	typeset length=$2
3751	typeset file=$3
3752
3753	case "$UNAME" in
3754	Linux)
3755		fallocate --zero-range --offset $offset --length $length "$file"
3756		;;
3757	*)
3758		false
3759		;;
3760	esac
3761}
3762
3763#
3764# Wait for the specified arcstat to reach non-zero quiescence.
3765# If echo is 1 echo the value after reaching quiescence, otherwise
3766# if echo is 0 print the arcstat we are waiting on.
3767#
3768function arcstat_quiescence # stat echo
3769{
3770	typeset stat=$1
3771	typeset echo=$2
3772	typeset do_once=true
3773
3774	if [[ $echo -eq 0 ]]; then
3775		echo "Waiting for arcstat $1 quiescence."
3776	fi
3777
3778	while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
3779		typeset stat1=$(kstat arcstats.$stat)
3780		sleep 0.5
3781		typeset stat2=$(kstat arcstats.$stat)
3782		do_once=false
3783	done
3784
3785	if [[ $echo -eq 1 ]]; then
3786		echo $stat2
3787	fi
3788}
3789
3790function arcstat_quiescence_noecho # stat
3791{
3792	typeset stat=$1
3793	arcstat_quiescence $stat 0
3794}
3795
3796function arcstat_quiescence_echo # stat
3797{
3798	typeset stat=$1
3799	arcstat_quiescence $stat 1
3800}
3801
3802#
3803# Given an array of pids, wait until all processes
3804# have completed and check their return status.
3805#
3806function wait_for_children #children
3807{
3808	rv=0
3809	children=("$@")
3810	for child in "${children[@]}"
3811	do
3812		child_exit=0
3813		wait ${child} || child_exit=$?
3814		if [ $child_exit -ne 0 ]; then
3815			echo "child ${child} failed with ${child_exit}"
3816			rv=1
3817		fi
3818	done
3819	return $rv
3820}
3821
3822#
3823# Compare two directory trees recursively in a manner similar to diff(1), but
3824# using rsync. If there are any discrepancies, a summary of the differences are
3825# output and a non-zero error is returned.
3826#
3827# If you're comparing a directory after a ZIL replay, you should set
3828# LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause
3829# directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime
3830# information).
3831#
3832function directory_diff # dir_a dir_b
3833{
3834	dir_a="$1"
3835	dir_b="$2"
3836	zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}"
3837
3838	# If one of the directories doesn't exist, return 2. This is to match the
3839	# semantics of diff.
3840	if ! [ -d "$dir_a" -a -d "$dir_b" ]; then
3841		return 2
3842	fi
3843
3844	# Run rsync with --dry-run --itemize-changes to get something akin to diff
3845	# output, but rsync is far more thorough in detecting differences (diff
3846	# doesn't compare file metadata, and cannot handle special files).
3847	#
3848	# Also make sure to filter out non-user.* xattrs when comparing. On
3849	# SELinux-enabled systems the copied tree will probably have different
3850	# SELinux labels.
3851	args=("-nicaAHX" '--filter=-x! user.*' "--delete")
3852
3853	# NOTE: Quite a few rsync builds do not support --crtimes which would be
3854	# necessary to verify that creation times are being maintained properly.
3855	# Unfortunately because of this we cannot use it unconditionally but we can
3856	# check if this rsync build supports it and use it then. This check is
3857	# based on the same check in the rsync test suite (testsuite/crtimes.test).
3858	#
3859	# We check ctimes even with zil_replay=1 because the ZIL does store
3860	# creation times and we should make sure they match (if the creation times
3861	# do not match there is a "c" entry in one of the columns).
3862	if rsync --version | grep -q "[, ] crtimes"; then
3863		args+=("--crtimes")
3864	else
3865		log_note "This rsync package does not support --crtimes (-N)."
3866	fi
3867
3868	# If we are testing a ZIL replay, we need to ignore timestamp changes.
3869	# Unfortunately --no-times doesn't do what we want -- it will still tell
3870	# you if the timestamps don't match but rsync will set the timestamps to
3871	# the current time (leading to an itemised change entry). It's simpler to
3872	# just filter out those lines.
3873	if [ "$zil_replay" -eq 0 ]; then
3874		filter=("cat")
3875	else
3876		# Different rsync versions have different numbers of columns. So just
3877		# require that aside from the first two, all other columns must be
3878		# blank (literal ".") or a timestamp field ("[tT]").
3879		filter=("grep" "-v" '^\..[.Tt]\+ ')
3880	fi
3881
3882	diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")"
3883	rv=0
3884	if [ -n "$diff" ]; then
3885		echo "$diff"
3886		rv=1
3887	fi
3888	return $rv
3889}
3890
3891#
3892# Compare two directory trees recursively, without checking whether the mtimes
3893# match (creation times will be checked if the available rsync binary supports
3894# it). This is necessary for ZIL replay checks (because the ZIL does not
3895# contain mtimes and thus after a ZIL replay, mtimes won't match).
3896#
3897# This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>.
3898#
3899function replay_directory_diff # dir_a dir_b
3900{
3901	LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@"
3902}
3903
3904#
3905# Put coredumps into $1/core.{basename}
3906#
3907# Output must be saved and passed to pop_coredump_pattern on cleanup
3908#
3909function push_coredump_pattern # dir
3910{
3911	ulimit -c unlimited
3912	case "$UNAME" in
3913	Linux)
3914		cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid
3915		echo "$1/core.%e" >/proc/sys/kernel/core_pattern &&
3916		    echo 0 >/proc/sys/kernel/core_uses_pid
3917		;;
3918	FreeBSD)
3919		sysctl -n kern.corefile
3920		sysctl kern.corefile="$1/core.%N" >/dev/null
3921		;;
3922	*)
3923		# Nothing to output – set only for this shell
3924		coreadm -p "$1/core.%f"
3925		;;
3926	esac
3927}
3928
3929#
3930# Put coredumps back into the default location
3931#
3932function pop_coredump_pattern
3933{
3934	[ -s "$1" ] || return 0
3935	case "$UNAME" in
3936	Linux)
3937		typeset pat pid
3938		{ read -r pat; read -r pid; } < "$1"
3939		echo "$pat" >/proc/sys/kernel/core_pattern &&
3940		    echo "$pid" >/proc/sys/kernel/core_uses_pid
3941		;;
3942	FreeBSD)
3943		sysctl kern.corefile="$(<"$1")" >/dev/null
3944		;;
3945	esac
3946}
3947
3948. ${STF_SUITE}/include/kstat.shlib
3949