xref: /illumos-gate/usr/src/lib/brand/solaris10/zone/common.ksh (revision 5422785d352a2bb398daceab3d1898a8aa64d006)
1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21#
22# Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23# Use is subject to license terms.
24#
25
26unset LD_LIBRARY_PATH
27PATH=/usr/bin:/usr/sbin
28export PATH
29
30. /usr/lib/brand/shared/common.ksh
31
32# Values for service tags.
33STCLIENT=/usr/bin/stclient
34ST_PRODUCT_NAME="Solaris 10 Containers"
35ST_PRODUCT_REV="1.0"
36ST_PRODUCT_UUID="urn:uuid:2f459121-dec7-11de-9af7-080020a9ed93"
37
38w_sanity_detail=$(gettext "       WARNING: Skipping image sanity checks.")
39f_sanity_detail=$(gettext  "Missing %s at %s")
40f_sanity_sparse=$(gettext  "Is this a sparse zone image?  The image must be whole-root.")
41f_sanity_vers=$(gettext  "The image release version must be 10 (got %s), the zone is not usable on this system.")
42f_not_s10_image=$(gettext  "%s doesn't look like a Solaris 10 image.")
43f_sanity_nopatch=$(gettext "Unable to determine the image's patch level.")
44f_sanity_downrev=$(gettext "The image patch level is downrev for running in a solaris10 branded zone.\n(patchlist %s)")
45f_need_newer_emul=$(gettext "The image requires a newer version of the solaris10 brand emulation.")
46f_zfs_create=$(gettext "Unable to create the zone's ZFS dataset.")
47f_no_ds=$(gettext "No zonepath dataset; the zonepath must be a ZFS dataset.")
48f_multiple_ds=$(gettext "Multiple active datasets.")
49f_no_active_ds=$(gettext "No active dataset; the zone's ZFS root dataset must be configured as\n\ta zone boot environment.")
50f_zfs_unmount=$(gettext "Unable to unmount the zone's root ZFS dataset (%s).\nIs there a global zone process inside the zone root?\nThe current zone boot environment will remain mounted.\n")
51f_zfs_mount=$(gettext "Unable to mount the zone's ZFS dataset.")
52incompat_options=$(gettext "mutually exclusive options.\n%s")
53
54sanity_ok=$(gettext     "  Sanity Check: Passed.  Looks like a Solaris 10 image.")
55sanity_fail=$(gettext   "  Sanity Check: FAILED (see log for details).")
56
57e_badboot=$(gettext "Zone boot failed")
58e_nosingleuser=$(gettext "ERROR: zone did not finish booting to single-user.")
59e_unconfig=$(gettext "sys-unconfig failed")
60v_unconfig=$(gettext "Performing zone sys-unconfig")
61
62v_no_tags=$(gettext "Service tags facility not present.")
63e_bad_uuid=$(gettext "Failed to get zone UUID")
64v_addtag=$(gettext "Adding service tag: %s")
65v_deltag=$(gettext "Removing service tag: %s")
66e_addtag_fail=$(gettext "Adding service tag failed (error: %s)")
67
68sanity_check()
69{
70	typeset dir="$1"
71	res=0
72
73	#
74	# Check for some required directories and make sure this isn't a
75	# sparse zone image.
76	#
77	checks="etc etc/svc var var/svc"
78	for x in $checks; do
79		if [[ ! -e $dir/$x ]]; then
80			log "$f_sanity_detail" "$x" "$dir"
81			res=1
82		fi
83	done
84	# Files from SUNWcsr and SUNWcsu that are in sparse inherit-pkg-dirs.
85	checks="lib/svc sbin/zonename usr/bin/chmod"
86	for x in $checks; do
87		if [[ ! -e $dir/$x ]]; then
88			log "$f_sanity_detail" "$x" "$dir"
89			log "$f_sanity_sparse"
90			res=1
91		fi
92	done
93
94	if (( $res != 0 )); then
95		log "$sanity_fail"
96		fatal "$install_fail" "$ZONENAME"
97	fi
98
99	if [[ "$SANITY_SKIP" == 1 ]]; then
100		log "$w_sanity_detail"
101		return
102	fi
103
104	#
105	# Check image release to be sure its S10.
106	#
107	image_vers="unknown"
108	if [[ -f $dir/var/sadm/system/admin/INST_RELEASE ]]; then
109		image_vers=$(nawk -F= '{if ($1 == "VERSION") print $2}' \
110		    $dir/var/sadm/system/admin/INST_RELEASE)
111	fi
112
113	if [[ "$image_vers" != "10" ]]; then
114		log "$f_sanity_vers" "$image_vers"
115		res=1
116	fi
117
118	#
119	# Make sure we have the minimal KU patch we support.  These are the
120	# KUs for S10u8.
121	#
122	if [[ $(uname -p) == "i386" ]]; then
123		req_patch="141445-09"
124	else
125		req_patch="141444-09"
126	fi
127
128	for i in $dir/var/sadm/pkg/SUNWcakr*
129	do
130		if [[ ! -d $i || ! -f $i/pkginfo ]]; then
131			log "$f_sanity_nopatch"
132			res=1
133		fi
134	done
135
136	#
137	# Check the core kernel pkg for the required KU patch.
138	#
139	found=0
140	for i in $dir/var/sadm/pkg/SUNWcakr*/pkginfo
141	do
142		patches=$(nawk -F= '{if ($1 == "PATCHLIST") print $2}' $i)
143		for patch in $patches
144		do
145			if [[ $patch == $req_patch ]]; then
146				found=1
147				break
148			fi
149		done
150
151		if (( $found == 1 )); then
152			break
153		fi
154	done
155
156	if (( $found != 1 )); then
157		log "$f_sanity_downrev" "$patches"
158		res=1
159	fi
160
161	#
162	# Check the S10 image for a required version of the emulation.
163	#
164	VERS_FILE=/usr/lib/brand/solaris10/version
165	s10vers_needs=0
166	if [[ -f $dir/$VERS_FILE ]]; then
167		s10vers_needs=$(/usr/bin/egrep -v "^#" $dir/$VERS_FILE)
168	fi
169
170	# Now get the current emulation version.
171	emul_vers=$(/usr/bin/egrep -v "^#" $VERS_FILE)
172
173	# Verify that the emulation can run this version of S10.
174	if (( $s10vers_needs > $emul_vers )); then
175		log "$f_need_newer_emul"
176		res=1
177	fi
178
179	if (( $res != 0 )); then
180		log "$sanity_fail"
181		fatal "$install_fail" "$ZONENAME"
182	fi
183
184	vlog "$sanity_ok"
185}
186
187# Find the active dataset under the zonepath dataset to mount on zonepath/root.
188# $1 ZONEPATH_DS
189get_active_ds() {
190	ACTIVE_DS=$1/ROOT/zbe-0
191}
192
193#
194# Make sure the active dataset is mounted for the zone.
195#
196mount_active_ds() {
197	get_zonepath_ds $zonepath
198	get_active_ds $ZONEPATH_DS
199
200	# If already mounted then we're done.
201	mnted=`zfs get -H mounted $ACTIVE_DS | cut -f3`
202	[[ $mnted = "yes" ]] && return
203
204	mount -F zfs $ACTIVE_DS $zonepath/root || fail_fatal "$f_zfs_mount"
205}
206
207#
208# Set up ZFS dataset hierarchy for the zone root dataset.
209#
210create_active_ds() {
211	# Find the zone's current dataset.  This should have been created by
212	# zoneadm (or the attach hook).
213	get_zonepath_ds $zonepath
214
215	#
216	# We need to tolerate errors while creating the datasets and making the
217	# mountpoint, since these could already exist from an attach scenario.
218	#
219
220	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT >/dev/null 2>&1
221	if (( $? != 0 )); then
222		/usr/sbin/zfs create -o mountpoint=legacy -o zoned=on \
223		    $ZONEPATH_DS/ROOT
224		if (( $? != 0 )); then
225			fail_fatal "$f_zfs_create"
226		fi
227	else
228	       	/usr/sbin/zfs set mountpoint=legacy $ZONEPATH_DS/ROOT \
229		    >/dev/null 2>&1
230	       	/usr/sbin/zfs set zoned=on $ZONEPATH_DS/ROOT \
231		    >/dev/null 2>&1
232	fi
233
234	get_active_ds $ZONEPATH_DS
235	zfs list -H -o name $ACTIVE_DS >/dev/null 2>&1
236	if (( $? != 0 )); then
237	       	zfs create -o canmount=noauto $ACTIVE_DS
238		(( $? != 0 )) && fail_fatal "$f_zfs_create"
239	else
240	       	zfs set canmount=noauto $ACTIVE_DS >/dev/null 2>&1
241	       	zfs inherit mountpoint $ACTIVE_DS >/dev/null 2>&1
242	       	zfs inherit zoned $ACTIVE_DS >/dev/null 2>&1
243	fi
244
245	if [ ! -d $ZONEROOT ]; then
246		/usr/bin/mkdir -m 0755 -p $ZONEROOT || \
247		    fail_fatal "$f_mkdir" "$ZONEROOT"
248	fi
249	/usr/bin/chmod 700 $ZONEPATH || fail_fatal "$f_chmod" "$ZONEPATH"
250
251	mount -F zfs $ACTIVE_DS $ZONEROOT || fail_fatal "$f_zfs_mount"
252}
253
254#
255# Before booting the zone we may need to create a few mnt points, just in
256# case they don't exist for some reason.
257#
258# Whenever we reach into the zone while running in the global zone we
259# need to validate that none of the interim directories are symlinks
260# that could cause us to inadvertently modify the global zone.
261#
262mk_zone_dirs() {
263	vlog "$v_mkdirs"
264	if [[ ! -f $ZONEROOT/tmp && ! -d $ZONEROOT/tmp ]]; then
265		mkdir -m 1777 -p $ZONEROOT/tmp || exit $EXIT_CODE
266	fi
267	if [[ ! -f $ZONEROOT/var/run && ! -d $ZONEROOT/var/run ]]; then
268		mkdir -m 1755 -p $ZONEROOT/var/run || exit $EXIT_CODE
269	fi
270	if [[ ! -f $ZONEROOT/var/tmp && ! -d $ZONEROOT/var/tmp ]]; then
271		mkdir -m 1777 -p $ZONEROOT/var/tmp || exit $EXIT_CODE
272	fi
273	if [[ ! -h $ZONEROOT/etc && ! -f $ZONEROOT/etc/mnttab ]]; then
274		/usr/bin/touch $ZONEROOT/etc/mnttab || exit $EXIT_CODE
275		/usr/bin/chmod 444 $ZONEROOT/etc/mnttab || exit $EXIT_CODE
276	fi
277	if [[ ! -f $ZONEROOT/proc && ! -d $ZONEROOT/proc ]]; then
278		mkdir -m 755 -p $ZONEROOT/proc || exit $EXIT_CODE
279	fi
280	if [[ ! -f $ZONEROOT/dev && ! -d $ZONEROOT/dev ]]; then
281		mkdir -m 755 -p $ZONEROOT/dev || exit $EXIT_CODE
282	fi
283	if [[ ! -h $ZONEROOT/etc && ! -h $ZONEROOT/etc/svc && \
284	    ! -d $ZONEROOT/etc/svc ]]; then
285		mkdir -m 755 -p $ZONEROOT/etc/svc/volatile || exit $EXIT_CODE
286	fi
287}
288
289#
290# We're sys-unconfig-ing the zone.  This will normally halt the zone, however
291# there are problems with sys-unconfig and it can hang when the zone is booted
292# to milestone=none.  Sys-unconfig also sometimes hangs halting the zone.
293# Thus, we take some care to workaround these sys-unconfig limitations.
294#
295# On entry we expect the zone to be booted.  We use sys-unconfig -R to make it
296# think its working on an alternate root and let the caller halt the zone.
297#
298sysunconfig_zone() {
299	/usr/sbin/zlogin -S $ZONENAME /usr/sbin/sys-unconfig -R /./ \
300	    >/dev/null 2>&1
301	if (( $? != 0 )); then
302		error "$e_unconfig"
303		return 1
304	fi
305
306	return 0
307}
308
309#
310# Get zone's uuid for service tag.
311#
312get_inst_uuid()
313{
314        typeset ZONENAME="$1"
315
316	ZONEUUID=`zoneadm -z $ZONENAME list -p | nawk -F: '{print $5}'`
317	[[ $? -ne 0 || -z $ZONEUUID ]] && return 1
318
319	INSTANCE_UUID="urn:st:${ZONEUUID}"
320	return 0
321}
322
323#
324# Add a service tag for a given zone.  We use two UUIDs-- the first,
325# the Product UUID, comes from the Sun swoRDFish ontology.  The second
326# is the UUID of the zone itself, which forms the instance UUID.
327#
328add_svc_tag()
329{
330        typeset ZONENAME="$1"
331        typeset SOURCE="$2"
332
333	if [ ! -x $STCLIENT ]; then
334		vlog "$v_no_tags"
335		return 0
336	fi
337
338	get_inst_uuid "$ZONENAME" || (error "$e_bad_uuid"; return 1)
339
340	vlog "$v_addtag" "$INSTANCE_UUID"
341	$STCLIENT -a \
342	    -p "$ST_PRODUCT_NAME" \
343	    -e "$ST_PRODUCT_REV" \
344	    -t "$ST_PRODUCT_UUID" \
345	    -i "$INSTANCE_UUID" \
346	    -P "none" \
347	    -m "Sun" \
348	    -A `uname -p` \
349	    -z "$ZONENAME" \
350	    -S "$SOURCE" >/dev/null 2>&1
351
352	err=$?
353
354	# 226 means "duplicate record," which we can ignore.
355	if [[ $err -ne 0 && $err -ne 226 ]]; then
356		error "$e_addtag_fail" "$err"
357		return 1
358	fi
359	return 0
360}
361
362#
363# Remove a service tag for a given zone.
364#
365del_svc_tag()
366{
367        typeset ZONENAME="$1"
368
369	if [ ! -x $STCLIENT ]; then
370		vlog "$v_no_tags"
371		return 0
372	fi
373
374	get_inst_uuid "$ZONENAME" || (error "$e_bad_uuid"; return 1)
375
376	vlog "$v_deltag" "$INSTANCE_UUID"
377        $STCLIENT -d -i "$INSTANCE_UUID" >/dev/null 2>&1
378	return 0
379}
380