xref: /titanic_51/usr/src/lib/brand/shared/zone/uninstall.ksh (revision 80e2ca8596e3435bc3b76f3c597833ea0a87f85e)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21#
22# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23#
24
25#
26# get script name (bname)
27#
28bname=`basename $0`
29
30#
31# common shell script functions
32#
33. /usr/lib/brand/shared/common.ksh
34
35#
36# error messages
37#
38m_usage=$(gettext "Usage: %s: [-hFn]")
39
40m_1_zfs_promote=$(gettext "promoting '%s'.")
41m_1_zfs_destroy=$(gettext "destroying '%s'.")
42m_2_zfs_rename=$(gettext "renaming '%s' to '%s'.")
43m_3_zfs_set=$(gettext "setting property %s='%s' for '%s'.")
44m_rm_r=$(gettext "recursively deleting '%s'.")
45m_rm=$(gettext "deleting '%s'.")
46
47w_no_ds=$(gettext "Warning: no zonepath dataset found.")
48
49f_usage_err=$(gettext "Error: invalid usage")
50f_abort=$(gettext "Error: internal error detected, aborting.")
51f_1_zfs_promote=$(gettext "Error: promoting ZFS dataset '%s'.")
52f_2_zfs_rename=$(gettext "Error: renaming ZFS dataset '%s' to '%s'.")
53f_3_zfs_set=$(gettext "Error: setting ZFS propery %s='%s' for '%s'.")
54f_1_zfs_destroy=$(gettext "Error: destroying ZFS dataset.")
55f_2_zfs_get=$(gettext "Error: reading ZFS dataset property '%s' from '%s'.")
56f_user_snap=$(gettext "Error: user snapshot(s) detected.")
57f_stray_snap=$(gettext "Error: uncloned snapshot(s) detected.")
58f_stray_clone=$(gettext "Error: cloned zone datasets found outsize of zone.")
59f_rm_snap=$(gettext "Error: please delete snapshot(s) and retry uninstall.")
60f_rm_clone=$(gettext "Error: please delete clone(s) and retry uninstall.")
61f_iu_clone=$(gettext "Error: cloned zone dataset(s) in use.")
62f_dis_clone=$(gettext "Error: please stop using clone(s) and retry uninstall.")
63
64#
65# functions
66#
67print_array()
68{
69	typeset -n pa_array=$1
70
71	(( pa_i = 0 ))
72	while (( $pa_i < ${#pa_array[@]} )); do
73		printf "\t${pa_array[$pa_i]}\n"
74		(( pa_i = $pa_i + 1 ))
75	done
76}
77
78usage()
79{
80	printf "$m_usage\n" "$bname"
81	exit $ZONE_SUBPROC_USAGE
82}
83
84usage_err()
85{
86	printf "$f_usage_err\n" >&2
87	usage >&2
88}
89
90rm_zonepath()
91{
92	# cleanup stuff we know about and leave any user data alone
93
94	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
95		printf "$m_rm\n" "$zonepath/SUNWattached.xml"
96	$nop /bin/rm -f "$zonepath/SUNWattached.xml"
97
98	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
99		printf "$m_rm_r\n" "$zonepath/lu"
100	$nop /bin/rm -rf "$zonepath/lu"
101
102	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
103		printf "$m_rm_r\n" "$zonepath/dev"
104	$nop /bin/rm -rf "$zonepath/dev"
105
106	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
107		printf "$m_rm_r\n" "$zonepath/root"
108	$nop /bin/rm -rf "$zonepath/root"
109
110	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
111		printf "$m_rm\n" "$zonepath"
112	$nop /bin/rmdir "$zonepath" 2>/dev/null
113}
114
115zfs_destroy()
116{
117	zd_fs1="$1"
118
119	# first figure out if the target fs has an origin snapshot
120	zd_origin=`/sbin/zfs get -H -o value origin "$zd_fs1"`
121	if [[ $? != 0 ]]; then
122		printf "$f_2_zfs_get\n" origin "$zd_fs1" >&2
123		exit $ZONE_SUBPROC_FATAL
124	fi
125
126	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
127		printf "$m_1_zfs_destroy\n" "$zd_fs1"
128
129	#
130	# note that we specify the '-r' flag so that we destroy any
131	# descendants (filesystems and snapshot) of the specified
132	# filesystem.
133	#
134	$nop /sbin/zfs destroy -r "$zd_fs1"
135	if [[ $? != 0 ]]; then
136		printf "$f_1_zfs_destroy\n" "$zd_fs1" >&2
137		exit $ZONE_SUBPROC_FATAL
138	fi
139
140	[[ "$zd_origin" == "-" ]] && return
141
142	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
143		printf "$m_1_zfs_destroy\n" "$zd_origin"
144
145	$nop /sbin/zfs destroy "$zd_origin" 2>/dev/null
146	#
147	# we ignore errors while trying to destroy the origin since
148	# the origin could have been used as the source for other
149	# clones
150	#
151}
152
153zfs_promote()
154{
155	zp_fs1="$1"
156
157	[[ -z "$opt_n" ]] &&
158		printf "$m_1_zfs_promote\n" "$zp_fs1"
159
160	$nop /sbin/zfs promote "$zp_fs1"
161	if [[ $? != 0 ]]; then
162		printf "$f_1_zfs_promote\n" "$zp_fs1" >&2
163		exit $ZONE_SUBPROC_FATAL
164	fi
165}
166
167zfs_rename()
168{
169	zr_fs1="$1"
170	zr_fs2="$2"
171
172	[[ -z "$opt_n" ]] &&
173		printf "$m_2_zfs_rename\n" "$zr_fs1" "$zr_fs2"
174
175	$nop /sbin/zfs rename "$zr_fs1" "$zr_fs2"
176	if [[ $? != 0 ]]; then
177		printf "$f_2_zfs_rename\n" "$zr_fs1" "$zr_fs2" >&2
178		return 1
179	fi
180	return 0
181}
182
183zfs_set()
184{
185	zs_prop=$1
186	zs_value=$2
187	zs_fs1=$3
188
189	[[ -z "$opt_n" ]] && [[ -n "$opt_v" ]] &&
190		printf "$m_3_zfs_set\n" "$zs_prop" "$zs_value" "$zs_fs1"
191
192	$nop /sbin/zfs set "$zs_prop"="$zs_value" "$zs_fs1"
193	if [[ $? != 0 ]]; then
194		printf "$f_3_zfs_set\n" "$zs_prop" "$zs_value" "$zs_fs1"
195		return 1
196	fi
197	return 0
198}
199
200zfs_set_array()
201{
202	zsa_prop=$1
203	zsa_value=$2
204	typeset -n zsa_array=$3
205	zsa_ignore_errors=$4
206
207	(( zsa_i = 0 ))
208	while (( $zsa_i < ${#zsa_array[@]} )); do
209		zfs_set "$zsa_prop" "$zsa_value" "${zsa_array[$zsa_i]}"
210		[[ $? != 0 ]] && [[ -z "$zsa_ignore_errors" ]] &&
211			return 1
212		(( zsa_i = $zsa_i + 1 ))
213	done
214	return 0
215}
216
217
218(( snap_rename_zbe_i = 1 ))
219(( snap_rename_snap_i = 1 ))
220snap_rename_init()
221{
222	(( snap_rename_zbe_i = 1 ))
223	(( snap_rename_snap_i = 1 ))
224}
225
226snap_rename()
227{
228	eval sr_fs=\${$1}
229	eval sr_snap=\${$2}
230
231	if [[ "$sr_snap" == ~(Elr)(zbe-[0-9][0-9]*) ]]; then
232		sr_snap="zbe-$snap_rename_zbe_i"
233		(( snap_rename_zbe_i = $snap_rename_zbe_i + 1 ))
234	elif [[ "$sr_snap" == ~(Er)(_snap[0-9]*) ]]; then
235		sr_snap=${sr_snap##~(Er)([0-9]*)}
236		sr_snap="${sr_snap}${snap_rename_snap_i}"
237		(( snap_rename_snap_i = $snap_rename_snap_i + 1 ))
238	else
239		printf "$f_user_snap\n" >&2
240		printf "\t$sr_fs@$sr_snap\n" >&2
241		printf "$f_rm_snap\n" >&2
242		exit $ZONE_SUBPROC_FATAL
243	fi
244
245	eval $2="$sr_snap"
246}
247
248# find the dataset associated with $zonepath
249uninstall_get_zonepath_ds()
250{
251	ZONEPATH_DS=`/sbin/zfs list -t filesystem -o name,mountpoint | \
252	    /bin/nawk -v zonepath=$zonepath '{
253		if ($2 == zonepath)
254			print $1
255	}'`
256
257	if [ -z "$ZONEPATH_DS" ]; then
258		# there is no $zonepath dataset
259		rm_zonepath
260		exit $ZONE_SUBPROC_OK
261	fi
262}
263
264# find the dataset associated with $ZONEPATH_DS/ROOT
265uninstall_get_zonepath_root_ds()
266{
267	ZONEPATH_RDS=`/sbin/zfs list -H -t filesystem -o name \
268		$ZONEPATH_DS/ROOT 2>/dev/null`
269
270	if [ -z "$ZONEPATH_RDS" ]; then
271		# there is no $ZONEPATH_DS/ROOT dataset
272		c=`/sbin/zfs list -H -t filesystem -r $ZONEPATH_DS | wc -l`
273		if [ $c = 1 ]; then
274			# $zonepath dataset has no descendents
275			zfs_destroy "$ZONEPATH_DS"
276		fi
277		rm_zonepath
278		exit $ZONE_SUBPROC_OK
279	fi
280}
281
282destroy_zone_dataset()
283{
284	fs=$1
285
286	pool=${fs%%/*}
287
288	# Fastpath.  if there are no snapshots of $fs then just delete it.
289	c=`/sbin/zfs list -H -t snapshot -o name -r $fs | grep "^$fs@" |
290	    LC_ALL=C LANG=C wc -l`
291	if (( $c == 0 )) ; then
292		zfs_destroy "$fs"
293		return
294	fi
295
296	#
297	# This zone BE has snapshots.  This can happen if a zone has
298	# multiple BEs (in which case we have snapshots named "zbe-XXX"),
299	# if this zone has been used as the source for a clone of
300	# another zone (in which case we have snapshots named
301	# "XXX_snap"), or if an administrator has been doing manual
302	# snapshotting.
303	#
304	# To be able to destroy this dataset (which we'll call the
305	# origin) we need to get rid of all it's snapshots.  The "easiest"
306	# way to do this is to:
307	#
308	# - delete any uncloned origin snapshots
309	# - find the oldest clone of the youngest origin snapshot (which
310	#   we'll call the oldest clone)
311	# - check if there are any snapshots naming conflicts between
312	#   the origin and the oldest clone.
313	# - if so, find any clones of those conflicting origin snapshots
314	# - make sure that those clones are not zoned an in-use.
315	# - if any of those clones are zoned, unzone them.
316	# - rename origin snapshots to eliminate naming conflicts
317	# - for any clones that we unzoned, rezone them.
318	# - promote the oldest clone
319	# - destroy the origin and all it's descendants
320	#
321
322	#
323	# Get a list of all the cloned datasets within the zpool
324	# containing the origin filesystem.  Filter out any filesystems
325	# that are descendants of origin because we are planning to
326	# destroy them anyway.
327	#
328	unset clones clones_origin
329	(( clones_c = 0 ))
330	pool=${fs%%/*}
331	LANG=C LC_ALL=C /sbin/zfs list -H -t filesystem -s creation \
332	    -o name,origin -r "$pool" |
333	    while IFS="	" read name origin; do
334
335		# skip non-clone filesystems
336		[[ "$origin" == "-" ]] &&
337			continue
338
339		# skip desendents of the origin we plan to destroy
340		[[ "$name" == ~()(${fs}/*) ]] &&
341			continue
342
343		# record this clone and it's origin
344		clones[$clones_c]="$name"
345		clones_origin[$clones_c]="$origin"
346		(( clones_c = $clones_c + 1 ))
347	done
348
349	#
350	# Now do a sanity check.  Search for clones of a child datasets
351	# of the dataset we want to destroy, that are not themselves
352	# children of the dataset we're going to destroy).  This should
353	# really never happen unless the global zone admin has cloned a
354	# snapshot of a zone filesystem to a location outside of that
355	# zone.  bad admin...
356	#
357	unset stray_clones
358	(( stray_clones_c = 0 ))
359	(( j = 0 ))
360	while (( $j < $clones_c )); do
361		# is the clone origin a descendant of $fs?
362		if [[ "${clones_origin[$j]}" != ~()(${fs}/*) ]]; then
363			# we don't care.
364			(( j = $j + 1 ))
365			continue
366		fi
367		stray_clones[$stray_clones_c]=${clones[$j]}
368		(( stray_clones_c = $stray_clones_c + 1 ))
369		(( j = $j + 1 ))
370	done
371	if (( stray_clones_c > 0 )); then
372		#
373		# sigh.  the admin has done something strange.
374		# tell them to clean it up and retry.
375		#
376		printf "$f_stray_clone\n" >&2
377		print_array stray_clones >&2
378		printf "$f_rm_clone\n" >&2
379		exit $ZONE_SUBPROC_FATAL
380	fi
381
382	# Find all the snapshots of the origin filesystem.
383	unset s_origin
384	(( s_origin_c = 0 ))
385	/sbin/zfs list -H -t snapshot -s creation -o name -r $fs |
386	    grep "^$fs@" | while read name; do
387		s_origin[$s_origin_c]=$name
388		(( s_origin_c = $s_origin_c + 1 ))
389	done
390
391	#
392	# Now go through the origin snapshots and find those which don't
393	# have clones.  We're going to explicity delete these snapshots
394	# before we do the promotion.
395	#
396	unset s_delete
397	(( s_delete_c = 0 ))
398	(( j = 0 ))
399	while (( $j < $s_origin_c )); do
400		(( k = 0 ))
401		while (( $k < $clones_c )); do
402			# if we have a match then break out of this loop
403			[[ "${s_origin[$j]}" == "${clones_origin[$k]}" ]] &&
404				break
405			(( k = $k + 1 ))
406		done
407		if (( $k != $clones_c )); then
408			# this snapshot has a clone, move on to the next one
409			(( j = $j + 1 ))
410			continue
411		fi
412
413		# snapshot has no clones so add it to our delete list
414		s_delete[$s_delete_c]=${s_origin[$j]}
415		(( s_delete_c = $s_delete_c + 1 ))
416		# remove it from the origin snapshot list
417		(( k = $j + 1 ))
418		while (( $k < $s_origin_c )); do
419			s_origin[(( $k - 1 ))]=${s_origin[$k]}
420			(( k = $k + 1 ))
421		done
422		(( s_origin_c = $s_origin_c - 1 ))
423	done
424
425	#
426	# Fastpath.  If there are no remaining snapshots then just
427	# delete the origin filesystem (and all it's descendents) and
428	# move onto the next zone BE.
429	#
430	if (( $s_origin_c == 0 )); then
431		zfs_destroy "$fs"
432		return
433	fi
434
435	# find the youngest snapshot of $fs
436	s_youngest=${s_origin[(( $s_origin_c - 1 ))]}
437
438	# Find the oldest clone of the youngest snapshot of $fs
439	unset s_clone
440	(( j = $clones_c - 1 ))
441	while (( $j >= 0 )); do
442		if [[ "$s_youngest" == "${clones_origin[$j]}" ]]; then
443			s_clone=${clones[$j]}
444			break
445		fi
446		(( j = $j - 1 ))
447	done
448	if [[ -z "$s_clone" ]]; then
449		# uh oh.  something has gone wrong.  bail.
450		printf "$f_stray_snap\n" >&2
451		printf "\t$s_youngest\n" >&2
452		printf "$f_rm_snap\n" >&2
453		exit $ZONE_SUBPROC_FATAL
454	fi
455
456	# create an array of clone snapshot names
457	unset s_clone_s
458	(( s_clone_s_c = 0 ))
459	/sbin/zfs list -H -t snapshot -s creation -o name -r $s_clone |
460	    grep "^$s_clone@" | while read name; do
461		s_clone_s[$s_clone_s_c]=${name##*@}
462		(( s_clone_s_c = $s_clone_s_c + 1 ))
463	done
464
465	# create an arrays of possible origin snapshot renames
466	unset s_origin_snap
467	unset s_rename
468	(( j = 0 ))
469	while (( $j < $s_origin_c )); do
470		s_origin_snap[$j]=${s_origin[$j]##*@}
471		s_rename[$j]=${s_origin[$j]##*@}
472		(( j = $j + 1 ))
473	done
474
475	#
476	# Search for snapshot name collisions between the origin and
477	# oldest clone.  If we find one, generate a new name for the
478	# origin snapshot and re-do the collision check.
479	#
480	snap_rename_init
481	(( j = 0 ))
482	while (( $j < $s_origin_c )); do
483		(( k = 0 ))
484		while (( $k < $s_clone_s_c )); do
485
486			# if there's no naming conflict continue
487			if [[ "${s_rename[$j]}" != "${s_clone_s[$k]}" ]]; then
488				(( k = $k + 1 ))
489				continue
490			fi
491
492			#
493			# The origin snapshot conflicts with a clone
494			# snapshot.  Choose a new name and then restart
495			# then check that against clone snapshot names.
496			#
497			snap_rename fs "s_rename[$j]"
498			(( k = 0 ))
499			continue;
500		done
501
502		# if we didn't rename this snapshot then continue
503		if [[ "${s_rename[$j]}" == "${s_origin_snap[$j]}" ]]; then
504			(( j = $j + 1 ))
505			continue
506		fi
507
508		#
509		# We need to rename this origin snapshot because it
510		# conflicts with a clone snapshot name.  So above we
511		# chose a name that didn't conflict with any other clone
512		# snapshot names.  But we also have to avoid naming
513		# conflicts with any other origin snapshot names.  So
514		# check for that now.
515		#
516		(( k = 0 ))
517		while (( $k < $s_origin_c )); do
518
519			# don't compare against ourself
520			if (( $j == $k )); then
521				(( k = $k + 1 ))
522				continue
523			fi
524
525			# if there's no naming conflict continue
526			if [[ "${s_rename[$j]}" != "${s_rename[$k]}" ]]; then
527				(( k = $k + 1 ))
528				continue
529			fi
530
531			#
532			# The new origin snapshot name conflicts with
533			# another origin snapshot name.  Choose a new
534			# name and then go back to check the new name
535			# for uniqueness against all the clone snapshot
536			# names.
537			#
538			snap_rename fs "s_rename[$j]"
539			continue 2;
540		done
541
542		#
543		# A new unique name has been chosen.  Move on to the
544		# next origin snapshot.
545		#
546		(( j = $j + 1 ))
547		snap_rename_init
548	done
549
550	#
551	# So now we know what snapshots need to be renamed before the
552	# promotion.  But there's an additional problem.  If any of the
553	# filesystems cloned from these snapshots have the "zoned"
554	# attribute set (which is highly likely) or if they are in use
555	# (and can't be unmounted and re-mounted) then the snapshot
556	# rename will fail.  So now we'll search for all the clones of
557	# snapshots we plan to rename and look for ones that are zoned.
558	#
559	# We'll ignore any snapshot clones that may be in use but are
560	# not zoned.  If these clones are in-use, the rename will fail
561	# and we'll abort, there's not much else we can do about it.
562	# But if they are not in use the snapshot rename will unmount
563	# and remount the clone.  This is ok because when the zoned
564	# attribute is off, we know that the clone was originally
565	# mounted from the global zone.  (So unmounting and remounting
566	# it from the global zone is ok.)
567	#
568	# But we'll abort this whole operation if we find any clones
569	# that that are zoned and in use.  (This can happen if another
570	# zone has been cloned from this one and is now booted.)  The
571	# reason we do this is because those zoned filesystems could
572	# have originally mounted from within the zone.  So if we
573	# cleared the zone attribute and did the rename, we'd be
574	# remounting the filesystem from the global zone.  This would
575	# result in the zone losing the ability to unmount the
576	# filesystem, which would be bad.
577	#
578	unset zoned_clones zoned_iu_clones
579	(( zoned_clones_c = 0 ))
580	(( zoned_iu_clones_c = 0 ))
581	(( j = 0 ))
582	# walk through all the clones
583	while (( $j < $clones_c )); do
584		# walk through all the origin snapshots
585		(( k = 0 ))
586		while (( $k < $s_origin_c )); do
587			#
588			# check if this clone originated from a snapshot that
589			# we need to rename.
590			#
591			[[ "${clones_origin[$j]}" == "${s_origin[$k]}" ]] &&
592			    [[ "${s_origin_snap[$k]}" != "${s_rename[$k]}" ]] &&
593				break
594			(( k = $k + 1 ))
595			continue
596		done
597		if (( $k == $s_origin_c )); then
598			# This isn't a clone of a snapshot we want to rename.
599			(( j = $j + 1 ))
600			continue;
601		fi
602
603		# get the zoned attr for this clone.
604		zoned=`LC_ALL=C LANG=C \
605		    /sbin/zfs get -H -o value zoned ${clones[$j]}`
606		if [[ "$zoned" != on ]]; then
607			# This clone isn't zoned so ignore it.
608			(( j = $j + 1 ))
609			continue
610		fi
611
612		# remember this clone so we can muck with it's zoned attr.
613		zoned_clones[$zoned_clones_c]=${clones[$j]}
614		(( zoned_clones_c = $zoned_clones_c + 1 ))
615
616		# check if it's in use
617		mounted=`LC_ALL=C LANG=C \
618		    /sbin/zfs get -H -o value mounted ${clones[$j]}`
619		if [[ "$mounted" != yes ]]; then
620			# Good news.  This clone isn't in use.
621			(( j = $j + 1 ))
622			continue
623		fi
624
625		# Sigh.  This clone is in use so we're destined to fail.
626		zoned_iu_clones[$zoned_iu_clones_c]=${clones[$j]}
627		(( zoned_iu_clones_c = $zoned_iu_clones_c + 1 ))
628
629		# keep looking for errors so we can report them all at once.
630		(( j = $j + 1 ))
631	done
632	if (( zoned_iu_clones_c > 0 )); then
633		#
634		# Tell the admin
635		#
636		printf "$f_iu_clone\n" >&2
637		print_array zoned_iu_clones >&2
638		printf "$f_dis_clone\n" >&2
639		exit $ZONE_SUBPROC_FATAL
640	fi
641
642	#
643	# Ok.  So we're finally done with planning and we can do some
644	# damage.  We're going to:
645	# - destroy unused snapshots
646	# - unzone clones which originate from snapshots we need to rename
647	# - rename conflicting snapshots
648	# - rezone any clones which we unzoned
649	# - promote the oldest clone of the youngest snapshot
650	# - finally destroy the origin filesystem.
651	#
652
653	# delete any unsed snapshot
654	(( j = 0 ))
655	while (( $j < $s_delete_c )); do
656		zfs_destroy "${s_delete[$j]}"
657		(( j = $j + 1 ))
658	done
659
660	# unzone clones
661	zfs_set_array zoned off zoned_clones ||
662		zfs_set_array zoned on zoned_clones 1
663
664	# rename conflicting snapshots
665	(( j = 0 ))
666	while (( $j < $s_origin_c )); do
667		if [[ "${s_origin_snap[$j]}" != "${s_rename[$j]}" ]]; then
668			zfs_rename "${s_origin[$j]}" "$fs@${s_rename[$j]}"
669			if [[ $? != 0 ]]; then
670				# re-zone the clones before aborting
671				zfs_set_array zoned on zoned_clones 1
672				exit $ZONE_SUBPROC_FATAL
673			fi
674		fi
675		(( j = $j + 1 ))
676	done
677
678	# re-zone the clones
679	zfs_set_array zoned on zoned_clones 1
680
681	# promote the youngest clone of the oldest snapshot
682	zfs_promote "$s_clone"
683
684	# destroy the origin filesystem and it's descendants
685	zfs_destroy "$fs"
686}
687
688#
689# This function expects an array named fs_all to exist which is initialized
690# with the zone's ZFS datasets that should be destroyed.  fs_all_c is the
691# count of the number of elements in the array.  ZONEPATH_RDS is the
692# zonepath/root dataset and ZONEPATH_DS is the zonepath dataset.
693#
694destroy_zone_datasets()
695{
696	# Destroy the zone BEs datasets one by one.
697	(( i = 0 ))
698	while (( $i < $fs_all_c )); do
699		fs=${fs_all[$i]}
700
701		destroy_zone_dataset "$fs"
702		(( i = $i + 1 ))
703	done
704
705	#
706	# Check if there are any other datasets left.  There may be datasets
707	# associated with other GZ BEs, so we need to leave things alone in
708	# that case.
709	#
710	c=`/sbin/zfs list -H -t filesystem -r $ZONEPATH_RDS | wc -l`
711	if [ $c = 1 ]; then
712		zfs_destroy "$ZONEPATH_RDS"
713	fi
714	c=`/sbin/zfs list -H -t filesystem -r $ZONEPATH_DS | wc -l`
715	if [ $c = 1 ]; then
716		zfs_destroy "$ZONEPATH_DS"
717	fi
718
719	rm_zonepath
720}
721