xref: /illumos-gate/usr/src/lib/brand/solaris10/zone/common.ksh (revision e557d412e15c7f384b2ea3bf316a739a0f81cd55)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21#
22# Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23# Use is subject to license terms.
24#
25
26unset LD_LIBRARY_PATH
27PATH=/usr/bin:/usr/sbin
28export PATH
29
30. /usr/lib/brand/shared/common.ksh
31
32# Use the ipkg-brand ZFS property for denoting the zone root's active dataset.
33PROP_ACTIVE="org.opensolaris.libbe:active"
34
35# Values for service tags.
36STCLIENT=/usr/bin/stclient
37ST_PRODUCT_NAME="Solaris 10 Containers"
38ST_PRODUCT_REV="1.0"
39ST_PRODUCT_UUID="urn:uuid:2f459121-dec7-11de-9af7-080020a9ed93"
40
41w_sanity_detail=$(gettext "       WARNING: Skipping image sanity checks.")
42f_sanity_detail=$(gettext  "Missing %s at %s")
43f_sanity_sparse=$(gettext  "Is this a sparse zone image?  The image must be whole-root.")
44f_sanity_vers=$(gettext  "The image release version must be 10 (got %s), the zone is not usable on this system.")
45f_not_s10_image=$(gettext  "%s doesn't look like a Solaris 10 image.")
46f_sanity_nopatch=$(gettext "Unable to determine the image's patch level.")
47f_sanity_downrev=$(gettext "The image patch level is downrev for running in a solaris10 branded zone.\n(patchlist %s)")
48f_need_newer_emul=$(gettext "The image requires a newer version of the solaris10 brand emulation.")
49f_zfs_create=$(gettext "Unable to create the zone's ZFS dataset.")
50f_no_ds=$(gettext "No zonepath dataset; the zonepath must be a ZFS dataset.")
51f_multiple_ds=$(gettext "Multiple active datasets.")
52f_no_active_ds=$(gettext "No active dataset; the zone's ZFS root dataset must be configured as\n\ta zone boot environment.")
53f_zfs_unmount=$(gettext "Unable to unmount the zone's root ZFS dataset (%s).\nIs there a global zone process inside the zone root?\nThe current zone boot environment will remain mounted.\n")
54f_zfs_mount=$(gettext "Unable to mount the zone's ZFS dataset.")
55incompat_options=$(gettext "mutually exclusive options.\n%s")
56
57sanity_ok=$(gettext     "  Sanity Check: Passed.  Looks like a Solaris 10 image.")
58sanity_fail=$(gettext   "  Sanity Check: FAILED (see log for details).")
59
60e_badboot=$(gettext "Zone boot failed")
61e_nosingleuser=$(gettext "ERROR: zone did not finish booting to single-user.")
62e_unconfig=$(gettext "sys-unconfig failed")
63v_unconfig=$(gettext "Performing zone sys-unconfig")
64
65v_no_tags=$(gettext "Service tags facility not present.")
66e_bad_uuid=$(gettext "Failed to get zone UUID")
67v_addtag=$(gettext "Adding service tag: %s")
68v_deltag=$(gettext "Removing service tag: %s")
69e_addtag_fail=$(gettext "Adding service tag failed (error: %s)")
70
71sanity_check()
72{
73	typeset dir="$1"
74	res=0
75
76	#
77	# Check for some required directories and make sure this isn't a
78	# sparse zone image.
79	#
80	checks="etc etc/svc var var/svc"
81	for x in $checks; do
82		if [[ ! -e $dir/$x ]]; then
83			log "$f_sanity_detail" "$x" "$dir"
84			res=1
85		fi
86	done
87	# Files from SUNWcsr and SUNWcsu that are in sparse inherit-pkg-dirs.
88	checks="lib/svc sbin/zonename usr/bin/chmod"
89	for x in $checks; do
90		if [[ ! -e $dir/$x ]]; then
91			log "$f_sanity_detail" "$x" "$dir"
92			log "$f_sanity_sparse"
93			res=1
94		fi
95	done
96
97	if (( $res != 0 )); then
98		log "$sanity_fail"
99		fatal "$install_fail" "$ZONENAME"
100	fi
101
102	if [[ "$SANITY_SKIP" == 1 ]]; then
103		log "$w_sanity_detail"
104		return
105	fi
106
107	#
108	# Check image release to be sure its S10.
109	#
110	image_vers="unknown"
111	if [[ -f $dir/var/sadm/system/admin/INST_RELEASE ]]; then
112		image_vers=$(nawk -F= '{if ($1 == "VERSION") print $2}' \
113		    $dir/var/sadm/system/admin/INST_RELEASE)
114	fi
115
116	if [[ "$image_vers" != "10" ]]; then
117		log "$f_sanity_vers" "$image_vers"
118		res=1
119	fi
120
121	#
122	# Make sure we have the minimal KU patch we support.  These are the
123	# KUs for S10u8.
124	#
125	if [[ $(uname -p) == "i386" ]]; then
126		req_patch="141445-09"
127	else
128		req_patch="141444-09"
129	fi
130
131	for i in $dir/var/sadm/pkg/SUNWcakr*
132	do
133		if [[ ! -d $i || ! -f $i/pkginfo ]]; then
134			log "$f_sanity_nopatch"
135			res=1
136		fi
137	done
138
139	#
140	# Check the core kernel pkg for the required KU patch.
141	#
142	found=0
143	for i in $dir/var/sadm/pkg/SUNWcakr*/pkginfo
144	do
145		patches=$(nawk -F= '{if ($1 == "PATCHLIST") print $2}' $i)
146		for patch in $patches
147		do
148			if [[ $patch == $req_patch ]]; then
149				found=1
150				break
151			fi
152		done
153
154		if (( $found == 1 )); then
155			break
156		fi
157	done
158
159	if (( $found != 1 )); then
160		log "$f_sanity_downrev" "$patches"
161		res=1
162	fi
163
164	#
165	# Check the S10 image for a required version of the emulation.
166	#
167	VERS_FILE=/usr/lib/brand/solaris10/version
168	s10vers_needs=0
169	if [[ -f $dir/$VERS_FILE ]]; then
170		s10vers_needs=$(/usr/bin/egrep -v "^#" $dir/$VERS_FILE)
171	fi
172
173	# Now get the current emulation version.
174	emul_vers=$(/usr/bin/egrep -v "^#" $VERS_FILE)
175
176	# Verify that the emulation can run this version of S10.
177	if (( $s10vers_needs > $emul_vers )); then
178		log "$f_need_newer_emul"
179		res=1
180	fi
181
182	if (( $res != 0 )); then
183		log "$sanity_fail"
184		fatal "$install_fail" "$ZONENAME"
185	fi
186
187	vlog "$sanity_ok"
188}
189
190# Find the active dataset under the zonepath dataset to mount on zonepath/root.
191# $1 ZONEPATH_DS
192get_active_ds() {
193	ACTIVE_DS=`/usr/sbin/zfs list -H -r -t filesystem \
194	    -o name,$PROP_ACTIVE $1/ROOT | \
195	    /usr/bin/nawk ' {
196		if ($1 ~ /ROOT\/[^\/]+$/ && $2 == "on") {
197			print $1
198			if (found == 1)
199				exit 1
200			found = 1
201		}
202	    }'`
203
204	if [ $? -ne 0 ]; then
205		fail_fatal "$f_multiple_ds"
206	fi
207
208	if [ -z "$ACTIVE_DS" ]; then
209		fail_fatal "$f_no_active_ds"
210	fi
211}
212
213#
214# Make sure the active dataset is mounted for the zone.  There are several
215# cases to consider:
216# 1) First boot of the zone, nothing is mounted
217# 2) Zone is halting, active dataset remains the same.
218# 3) Zone is halting, there is a new active dataset to mount.
219#
220mount_active_ds() {
221	mount -p | cut -d' ' -f3 | egrep -s "^$zonepath/root$"
222	if (( $? == 0 )); then
223		# Umount current dataset on the root (it might be an old BE).
224		/usr/sbin/umount $zonepath/root
225		if (( $? != 0 )); then
226			# The umount failed, leave the old BE mounted.  If
227			# there are zone processes (i.e. zsched) in the fs,
228			# then we're umounting because we failed validation
229			# during boot, otherwise, warn about gz process
230			# preventing umount.
231			nproc=`pgrep -z $zonename | wc -l`
232			if (( $nproc == 0 )); then
233                       		printf "$f_zfs_unmount" "$zonepath/root"
234			fi
235			return
236		fi
237	fi
238
239	# Mount active dataset on the root.
240	get_zonepath_ds $zonepath
241	get_active_ds $ZONEPATH_DS
242
243	/usr/sbin/mount -F zfs $ACTIVE_DS $zonepath/root || \
244	    fail_fatal "$f_zfs_mount"
245}
246
247#
248# Set up ZFS dataset hierarchy for the zone root dataset.
249#
250create_active_ds() {
251	# Find the zone's current dataset.  This should have been created by
252	# zoneadm (or the attach hook).
253	get_zonepath_ds $zonepath
254
255	#
256	# We need to tolerate errors while creating the datasets and making the
257	# mountpoint, since these could already exist from an attach scenario.
258	#
259
260	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT >/dev/null 2>&1
261	if (( $? != 0 )); then
262		/usr/sbin/zfs create -o mountpoint=legacy -o zoned=on \
263		    $ZONEPATH_DS/ROOT
264		if (( $? != 0 )); then
265			fail_fatal "$f_zfs_create"
266		fi
267	else
268	       	/usr/sbin/zfs set mountpoint=legacy $ZONEPATH_DS/ROOT \
269		    >/dev/null 2>&1
270	       	/usr/sbin/zfs set zoned=on $ZONEPATH_DS/ROOT \
271		    >/dev/null 2>&1
272	fi
273
274	BENAME=zbe-0
275	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT/$BENAME >/dev/null 2>&1
276	if (( $? != 0 )); then
277	       	/usr/sbin/zfs create -o $PROP_ACTIVE=on -o canmount=noauto \
278		    $ZONEPATH_DS/ROOT/$BENAME >/dev/null 2>&1
279		if (( $? != 0 )); then
280			fail_fatal "$f_zfs_create"
281		fi
282	else
283	       	/usr/sbin/zfs set $PROP_ACTIVE=on $ZONEPATH_DS/ROOT/$BENAME \
284		    >/dev/null 2>&1
285	       	/usr/sbin/zfs set canmount=noauto $ZONEPATH_DS/ROOT/$BENAME \
286		    >/dev/null 2>&1
287	       	/usr/sbin/zfs inherit mountpoint $ZONEPATH_DS/ROOT/$BENAME \
288		    >/dev/null 2>&1
289	       	/usr/sbin/zfs inherit zoned $ZONEPATH_DS/ROOT/$BENAME \
290		    >/dev/null 2>&1
291	fi
292
293	if [ ! -d $ZONEROOT ]; then
294		/usr/bin/mkdir -m 0755 -p $ZONEROOT || \
295		    fail_fatal "$f_mkdir" "$ZONEROOT"
296	fi
297	/usr/bin/chmod 700 $ZONEPATH || fail_fatal "$f_chmod" "$ZONEPATH"
298
299	/usr/sbin/mount -F zfs $ZONEPATH_DS/ROOT/$BENAME $ZONEROOT || \
300		fail_fatal "$f_zfs_mount"
301}
302
303#
304# Before booting the zone we may need to create a few mnt points, just in
305# case they don't exist for some reason.
306#
307# Whenever we reach into the zone while running in the global zone we
308# need to validate that none of the interim directories are symlinks
309# that could cause us to inadvertently modify the global zone.
310#
311mk_zone_dirs() {
312	vlog "$v_mkdirs"
313	if [[ ! -f $ZONEROOT/tmp && ! -d $ZONEROOT/tmp ]]; then
314		mkdir -m 1777 -p $ZONEROOT/tmp || exit $EXIT_CODE
315	fi
316	if [[ ! -f $ZONEROOT/var/run && ! -d $ZONEROOT/var/run ]]; then
317		mkdir -m 1755 -p $ZONEROOT/var/run || exit $EXIT_CODE
318	fi
319	if [[ ! -f $ZONEROOT/var/tmp && ! -d $ZONEROOT/var/tmp ]]; then
320		mkdir -m 1777 -p $ZONEROOT/var/tmp || exit $EXIT_CODE
321	fi
322	if [[ ! -h $ZONEROOT/etc && ! -f $ZONEROOT/etc/mnttab ]]; then
323		/usr/bin/touch $ZONEROOT/etc/mnttab || exit $EXIT_CODE
324		/usr/bin/chmod 444 $ZONEROOT/etc/mnttab || exit $EXIT_CODE
325	fi
326	if [[ ! -f $ZONEROOT/proc && ! -d $ZONEROOT/proc ]]; then
327		mkdir -m 755 -p $ZONEROOT/proc || exit $EXIT_CODE
328	fi
329	if [[ ! -f $ZONEROOT/dev && ! -d $ZONEROOT/dev ]]; then
330		mkdir -m 755 -p $ZONEROOT/dev || exit $EXIT_CODE
331	fi
332	if [[ ! -h $ZONEROOT/etc && ! -h $ZONEROOT/etc/svc && \
333	    ! -d $ZONEROOT/etc/svc ]]; then
334		mkdir -m 755 -p $ZONEROOT/etc/svc/volatile || exit $EXIT_CODE
335	fi
336}
337
338#
339# We're sys-unconfig-ing the zone.  This will normally halt the zone, however
340# there are problems with sys-unconfig and it can hang when the zone is booted
341# to milestone=none.  Sys-unconfig also sometimes hangs halting the zone.
342# Thus, we take some care to workaround these sys-unconfig limitations.
343#
344# On entry we expect the zone to be booted.  We use sys-unconfig -R to make it
345# think its working on an alternate root and let the caller halt the zone.
346#
347sysunconfig_zone() {
348	/usr/sbin/zlogin -S $ZONENAME /usr/sbin/sys-unconfig -R /./ \
349	    >/dev/null 2>&1
350	if (( $? != 0 )); then
351		error "$e_unconfig"
352		return 1
353	fi
354
355	return 0
356}
357
358#
359# Get zone's uuid for service tag.
360#
361get_inst_uuid()
362{
363        typeset ZONENAME="$1"
364
365	ZONEUUID=`zoneadm -z $ZONENAME list -p | nawk -F: '{print $5}'`
366	[[ $? -ne 0 || -z $ZONEUUID ]] && return 1
367
368	INSTANCE_UUID="urn:st:${ZONEUUID}"
369	return 0
370}
371
372#
373# Add a service tag for a given zone.  We use two UUIDs-- the first,
374# the Product UUID, comes from the Sun swoRDFish ontology.  The second
375# is the UUID of the zone itself, which forms the instance UUID.
376#
377add_svc_tag()
378{
379        typeset ZONENAME="$1"
380        typeset SOURCE="$2"
381
382	if [ ! -x $STCLIENT ]; then
383		vlog "$v_no_tags"
384		return 0
385	fi
386
387	get_inst_uuid "$ZONENAME" || (error "$e_bad_uuid"; return 1)
388
389	vlog "$v_addtag" "$INSTANCE_UUID"
390	$STCLIENT -a \
391	    -p "$ST_PRODUCT_NAME" \
392	    -e "$ST_PRODUCT_REV" \
393	    -t "$ST_PRODUCT_UUID" \
394	    -i "$INSTANCE_UUID" \
395	    -P "none" \
396	    -m "Sun" \
397	    -A `uname -p` \
398	    -z "$ZONENAME" \
399	    -S "$SOURCE" >/dev/null 2>&1
400
401	err=$?
402
403	# 226 means "duplicate record," which we can ignore.
404	if [[ $err -ne 0 && $err -ne 226 ]]; then
405		error "$e_addtag_fail" "$err"
406		return 1
407	fi
408	return 0
409}
410
411#
412# Remove a service tag for a given zone.
413#
414del_svc_tag()
415{
416        typeset ZONENAME="$1"
417
418	if [ ! -x $STCLIENT ]; then
419		vlog "$v_no_tags"
420		return 0
421	fi
422
423	get_inst_uuid "$ZONENAME" || (error "$e_bad_uuid"; return 1)
424
425	vlog "$v_deltag" "$INSTANCE_UUID"
426        $STCLIENT -d -i "$INSTANCE_UUID" >/dev/null 2>&1
427	return 0
428}
429