xref: /illumos-gate/usr/src/lib/brand/solaris10/zone/common.ksh (revision e0ad97e30ea0a9af63c42d71690b5f387c763420)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21#
22# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23# Use is subject to license terms.
24#
25
26unset LD_LIBRARY_PATH
27PATH=/usr/bin:/usr/sbin
28export PATH
29
30. /usr/lib/brand/shared/common.ksh
31
32# Use the ipkg-brand ZFS property for denoting the zone root's active dataset.
33PROP_ACTIVE="org.opensolaris.libbe:active"
34
35f_sanity_detail=$(gettext  "Missing %s at %s")
36f_sanity_sparse=$(gettext  "Is this a sparse zone image?  The image must be whole-root.")
37f_sanity_vers=$(gettext  "The image release version must be 10 (got %s), the zone is not usable on this system.")
38f_not_s10_image=$(gettext  "%s doesn't look like a Solaris 10 image.")
39f_sanity_nopatch=$(gettext "Unable to determine the image's patch level.")
40f_sanity_downrev=$(gettext "The image patch level is downrev for running in a solaris10 branded zone.\n(patchlist %s)")
41f_need_newer_emul=$(gettext "The image requires a newer version of the solaris10 brand emulation.")
42f_zfs_create=$(gettext "Unable to create the zone's ZFS dataset.")
43f_no_ds=$(gettext "No zonepath dataset; the zonepath must be a ZFS dataset.")
44f_multiple_ds=$(gettext "Multiple active datasets.")
45f_no_active_ds=$(gettext "No active dataset; the zone's ZFS root dataset must be configured as\n\ta zone boot environment.")
46f_zfs_unmount=$(gettext "Unable to unmount the zone's root ZFS dataset (%s).\nIs there a global zone process inside the zone root?\nThe current zone boot environment will remain mounted.\n")
47f_zfs_mount=$(gettext "Unable to mount the zone's ZFS dataset.")
48incompat_options=$(gettext "mutually exclusive options.\n%s")
49
50sanity_ok=$(gettext     "  Sanity Check: Passed.  Looks like a Solaris 10 image.")
51sanity_fail=$(gettext   "  Sanity Check: FAILED (see log for details).")
52
53e_badboot=$(gettext "Zone boot failed")
54e_nosingleuser=$(gettext "ERROR: zone did not finish booting to single-user.")
55e_unconfig=$(gettext "sys-unconfig failed")
56v_unconfig=$(gettext "Performing zone sys-unconfig")
57
58sanity_check()
59{
60	typeset dir="$1"
61	res=0
62
63	#
64	# Check for some required directories and make sure this isn't a
65	# sparse zone image.
66	#
67	checks="etc etc/svc var var/svc"
68	for x in $checks; do
69		if [[ ! -e $dir/$x ]]; then
70			log "$f_sanity_detail" "$x" "$dir"
71			res=1
72		fi
73	done
74	# Files from SUNWcsr and SUNWcsu that are in sparse inherit-pkg-dirs.
75	checks="lib/svc sbin/zonename usr/bin/chmod"
76	for x in $checks; do
77		if [[ ! -e $dir/$x ]]; then
78			log "$f_sanity_detail" "$x" "$dir"
79			log "$f_sanity_sparse"
80			res=1
81		fi
82	done
83
84	#
85	# Check image release to be sure its S10.
86	#
87	image_vers="unknown"
88	if [[ -f $dir/var/sadm/system/admin/INST_RELEASE ]]; then
89		image_vers=$(nawk -F= '{if ($1 == "VERSION") print $2}' \
90		    $dir/var/sadm/system/admin/INST_RELEASE)
91	fi
92
93	if [[ "$image_vers" != "10" ]]; then
94		log "$f_sanity_vers" "$image_vers"
95		res=1
96	fi
97
98	#
99	# Make sure we have the minimal KU patch we support.  These are the
100	# KUs for S10u8.
101	#
102	if [[ $(uname -p) == "i386" ]]; then
103		req_patch="141445-09"
104	else
105		req_patch="141444-09"
106	fi
107
108	for i in $dir/var/sadm/pkg/SUNWcakr*
109	do
110		if [[ ! -d $i || ! -f $i/pkginfo ]]; then
111			log "$f_sanity_nopatch"
112			res=1
113		fi
114	done
115
116	#
117	# Check the core kernel pkg for the required KU patch.
118	#
119	found=0
120	for i in $dir/var/sadm/pkg/SUNWcakr*/pkginfo
121	do
122		patches=$(nawk -F= '{if ($1 == "PATCHLIST") print $2}' $i)
123		for patch in $patches
124		do
125			if [[ $patch == $req_patch ]]; then
126				found=1
127				break
128			fi
129		done
130
131		if (( $found == 1 )); then
132			break
133		fi
134	done
135
136	if (( $found != 1 )); then
137		log "$f_sanity_downrev" "$patches"
138		res=1
139	fi
140
141	#
142	# Check the S10 image for a required version of the emulation.
143	#
144	VERS_FILE=/usr/lib/brand/solaris10/version
145	s10vers_needs=0
146	if [[ -f $dir/$VERS_FILE ]]; then
147		s10vers_needs=$(/usr/bin/egrep -v "^#" $dir/$VERS_FILE)
148	fi
149
150	# Now get the current emulation version.
151	emul_vers=$(/usr/bin/egrep -v "^#" $VERS_FILE)
152
153	# Verify that the emulation can run this version of S10.
154	if (( $s10vers_needs > $emul_vers )); then
155		log "$f_need_newer_emul"
156		res=1
157	fi
158
159	if (( $res != 0 )); then
160		log "$sanity_fail"
161		fatal "$install_fail" "$ZONENAME"
162	fi
163
164	vlog "$sanity_ok"
165}
166
167# Find the active dataset under the zonepath dataset to mount on zonepath/root.
168# $1 ZONEPATH_DS
169get_active_ds() {
170	ACTIVE_DS=`/usr/sbin/zfs list -H -r -t filesystem \
171	    -o name,$PROP_ACTIVE $1/ROOT | \
172	    /usr/bin/nawk ' {
173		if ($1 ~ /ROOT\/[^\/]+$/ && $2 == "on") {
174			print $1
175			if (found == 1)
176				exit 1
177			found = 1
178		}
179	    }'`
180
181	if [ $? -ne 0 ]; then
182		fail_fatal "$f_multiple_ds"
183	fi
184
185	if [ -z "$ACTIVE_DS" ]; then
186		fail_fatal "$f_no_active_ds"
187	fi
188}
189
190#
191# Make sure the active dataset is mounted for the zone.  There are several
192# cases to consider:
193# 1) First boot of the zone, nothing is mounted
194# 2) Zone is halting, active dataset remains the same.
195# 3) Zone is halting, there is a new active dataset to mount.
196#
197mount_active_ds() {
198	mount -p | cut -d' ' -f3 | egrep -s "^$zonepath/root$"
199	if (( $? == 0 )); then
200		# Umount current dataset on the root (it might be an old BE).
201		/usr/sbin/umount $zonepath/root
202		if (( $? != 0 )); then
203			# The umount failed, leave the old BE mounted.  If
204			# there are zone processes (i.e. zsched) in the fs,
205			# then we're umounting because we failed validation
206			# during boot, otherwise, warn about gz process
207			# preventing umount.
208			nproc=`pgrep -z $zonename | wc -l`
209			if (( $nproc == 0 )); then
210                       		printf "$f_zfs_unmount" "$zonepath/root"
211			fi
212			return
213		fi
214	fi
215
216	# Mount active dataset on the root.
217	get_zonepath_ds $zonepath
218	get_active_ds $ZONEPATH_DS
219
220	/usr/sbin/mount -F zfs $ACTIVE_DS $zonepath/root || \
221	    fail_fatal "$f_zfs_mount"
222}
223
224#
225# Set up ZFS dataset hierarchy for the zone root dataset.
226#
227create_active_ds() {
228	# Find the zone's current dataset.  This should have been created by
229	# zoneadm (or the attach hook).
230	get_zonepath_ds $zonepath
231
232	#
233	# We need to tolerate errors while creating the datasets and making the
234	# mountpoint, since these could already exist from an attach scenario.
235	#
236
237	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT >/dev/null 2>&1
238	if (( $? != 0 )); then
239		/usr/sbin/zfs create -o mountpoint=legacy -o zoned=on \
240		    $ZONEPATH_DS/ROOT
241		if (( $? != 0 )); then
242			fail_fatal "$f_zfs_create"
243		fi
244	else
245	       	/usr/sbin/zfs set mountpoint=legacy $ZONEPATH_DS/ROOT \
246		    >/dev/null 2>&1
247	       	/usr/sbin/zfs set zoned=on $ZONEPATH_DS/ROOT \
248		    >/dev/null 2>&1
249	fi
250
251	BENAME=zbe-0
252	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT/$BENAME >/dev/null 2>&1
253	if (( $? != 0 )); then
254	       	/usr/sbin/zfs create -o $PROP_ACTIVE=on -o canmount=noauto \
255		    $ZONEPATH_DS/ROOT/$BENAME >/dev/null 2>&1
256		if (( $? != 0 )); then
257			fail_fatal "$f_zfs_create"
258		fi
259	else
260	       	/usr/sbin/zfs set $PROP_ACTIVE=on $ZONEPATH_DS/ROOT/$BENAME \
261		    >/dev/null 2>&1
262	       	/usr/sbin/zfs set canmount=noauto $ZONEPATH_DS/ROOT/$BENAME \
263		    >/dev/null 2>&1
264	       	/usr/sbin/zfs inherit mountpoint $ZONEPATH_DS/ROOT/$BENAME \
265		    >/dev/null 2>&1
266	       	/usr/sbin/zfs inherit zoned $ZONEPATH_DS/ROOT/$BENAME \
267		    >/dev/null 2>&1
268	fi
269
270	if [ ! -d $ZONEROOT ]; then
271		/usr/bin/mkdir -m 0755 -p $ZONEROOT || \
272		    fail_fatal "$f_mkdir" "$ZONEROOT"
273	fi
274	/usr/bin/chmod 700 $ZONEPATH || fail_fatal "$f_chmod" "$ZONEPATH"
275
276	/usr/sbin/mount -F zfs $ZONEPATH_DS/ROOT/$BENAME $ZONEROOT || \
277		fail_fatal "$f_zfs_mount"
278}
279
280#
281# Before booting the zone we may need to create a few mnt points, just in
282# case they don't exist for some reason.
283#
284# Whenever we reach into the zone while running in the global zone we
285# need to validate that none of the interim directories are symlinks
286# that could cause us to inadvertently modify the global zone.
287#
288mk_zone_dirs() {
289	vlog "$v_mkdirs"
290	if [[ ! -f $ZONEROOT/tmp && ! -d $ZONEROOT/tmp ]]; then
291		mkdir -m 1777 -p $ZONEROOT/tmp || exit $EXIT_CODE
292	fi
293	if [[ ! -f $ZONEROOT/var/run && ! -d $ZONEROOT/var/run ]]; then
294		mkdir -m 1755 -p $ZONEROOT/var/run || exit $EXIT_CODE
295	fi
296	if [[ ! -f $ZONEROOT/var/tmp && ! -d $ZONEROOT/var/tmp ]]; then
297		mkdir -m 1777 -p $ZONEROOT/var/tmp || exit $EXIT_CODE
298	fi
299	if [[ ! -h $ZONEROOT/etc && ! -f $ZONEROOT/etc/mnttab ]]; then
300		/usr/bin/touch $ZONEROOT/etc/mnttab || exit $EXIT_CODE
301		/usr/bin/chmod 444 $ZONEROOT/etc/mnttab || exit $EXIT_CODE
302	fi
303	if [[ ! -f $ZONEROOT/proc && ! -d $ZONEROOT/proc ]]; then
304		mkdir -m 755 -p $ZONEROOT/proc || exit $EXIT_CODE
305	fi
306	if [[ ! -f $ZONEROOT/dev && ! -d $ZONEROOT/dev ]]; then
307		mkdir -m 755 -p $ZONEROOT/dev || exit $EXIT_CODE
308	fi
309	if [[ ! -h $ZONEROOT/etc && ! -h $ZONEROOT/etc/svc && \
310	    ! -d $ZONEROOT/etc/svc ]]; then
311		mkdir -m 755 -p $ZONEROOT/etc/svc/volatile || exit $EXIT_CODE
312	fi
313}
314
315#
316# We're sys-unconfig-ing the zone.  This will normally halt the zone, however
317# there are problems with sys-unconfig and it can hang when the zone is booted
318# to milestone=none.  Sys-unconfig also sometimes hangs halting the zone.
319# Thus, we take some care to workaround these sys-unconfig limitations.
320#
321# On entry we expect the zone to be booted.  We use sys-unconfig -R to make it
322# think its working on an alternate root and let the caller halt the zone.
323#
324sysunconfig_zone() {
325	/usr/sbin/zlogin -S $ZONENAME /usr/sbin/sys-unconfig -R /./ \
326	    >/dev/null 2>&1
327	if (( $? != 0 )); then
328		error "$e_unconfig"
329		return 1
330	fi
331
332	return 0
333}
334