xref: /titanic_41/usr/src/uts/common/os/zone.c (revision 6023a540d24df8406f1bd221f66d71e19332fafd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Zones
29  *
30  *   A zone is a named collection of processes, namespace constraints,
31  *   and other system resources which comprise a secure and manageable
32  *   application containment facility.
33  *
34  *   Zones (represented by the reference counted zone_t) are tracked in
35  *   the kernel in the zonehash.  Elsewhere in the kernel, Zone IDs
36  *   (zoneid_t) are used to track zone association.  Zone IDs are
37  *   dynamically generated when the zone is created; if a persistent
38  *   identifier is needed (core files, accounting logs, audit trail,
39  *   etc.), the zone name should be used.
40  *
41  *
42  *   Global Zone:
43  *
44  *   The global zone (zoneid 0) is automatically associated with all
45  *   system resources that have not been bound to a user-created zone.
46  *   This means that even systems where zones are not in active use
47  *   have a global zone, and all processes, mounts, etc. are
48  *   associated with that zone.  The global zone is generally
49  *   unconstrained in terms of privileges and access, though the usual
50  *   credential and privilege based restrictions apply.
51  *
52  *
53  *   Zone States:
54  *
55  *   The states in which a zone may be in and the transitions are as
56  *   follows:
57  *
58  *   ZONE_IS_UNINITIALIZED: primordial state for a zone. The partially
59  *   initialized zone is added to the list of active zones on the system but
60  *   isn't accessible.
61  *
62  *   ZONE_IS_INITIALIZED: Initialization complete except the ZSD callbacks are
63  *   not yet completed. Not possible to enter the zone, but attributes can
64  *   be retrieved.
65  *
66  *   ZONE_IS_READY: zsched (the kernel dummy process for a zone) is
67  *   ready.  The zone is made visible after the ZSD constructor callbacks are
68  *   executed.  A zone remains in this state until it transitions into
69  *   the ZONE_IS_BOOTING state as a result of a call to zone_boot().
70  *
71  *   ZONE_IS_BOOTING: in this shortlived-state, zsched attempts to start
72  *   init.  Should that fail, the zone proceeds to the ZONE_IS_SHUTTING_DOWN
73  *   state.
74  *
75  *   ZONE_IS_RUNNING: The zone is open for business: zsched has
76  *   successfully started init.   A zone remains in this state until
77  *   zone_shutdown() is called.
78  *
79  *   ZONE_IS_SHUTTING_DOWN: zone_shutdown() has been called, the system is
80  *   killing all processes running in the zone. The zone remains
81  *   in this state until there are no more user processes running in the zone.
82  *   zone_create(), zone_enter(), and zone_destroy() on this zone will fail.
83  *   Since zone_shutdown() is restartable, it may be called successfully
84  *   multiple times for the same zone_t.  Setting of the zone's state to
85  *   ZONE_IS_SHUTTING_DOWN is synchronized with mounts, so VOP_MOUNT() may check
86  *   the zone's status without worrying about it being a moving target.
87  *
88  *   ZONE_IS_EMPTY: zone_shutdown() has been called, and there
89  *   are no more user processes in the zone.  The zone remains in this
90  *   state until there are no more kernel threads associated with the
91  *   zone.  zone_create(), zone_enter(), and zone_destroy() on this zone will
92  *   fail.
93  *
94  *   ZONE_IS_DOWN: All kernel threads doing work on behalf of the zone
95  *   have exited.  zone_shutdown() returns.  Henceforth it is not possible to
96  *   join the zone or create kernel threads therein.
97  *
98  *   ZONE_IS_DYING: zone_destroy() has been called on the zone; zone
99  *   remains in this state until zsched exits.  Calls to zone_find_by_*()
100  *   return NULL from now on.
101  *
102  *   ZONE_IS_DEAD: zsched has exited (zone_ntasks == 0).  There are no
103  *   processes or threads doing work on behalf of the zone.  The zone is
104  *   removed from the list of active zones.  zone_destroy() returns, and
105  *   the zone can be recreated.
106  *
107  *   ZONE_IS_FREE (internal state): zone_ref goes to 0, ZSD destructor
108  *   callbacks are executed, and all memory associated with the zone is
109  *   freed.
110  *
111  *   Threads can wait for the zone to enter a requested state by using
112  *   zone_status_wait() or zone_status_timedwait() with the desired
113  *   state passed in as an argument.  Zone state transitions are
114  *   uni-directional; it is not possible to move back to an earlier state.
115  *
116  *
117  *   Zone-Specific Data:
118  *
119  *   Subsystems needing to maintain zone-specific data can store that
120  *   data using the ZSD mechanism.  This provides a zone-specific data
121  *   store, similar to thread-specific data (see pthread_getspecific(3C)
122  *   or the TSD code in uts/common/disp/thread.c.  Also, ZSD can be used
123  *   to register callbacks to be invoked when a zone is created, shut
124  *   down, or destroyed.  This can be used to initialize zone-specific
125  *   data for new zones and to clean up when zones go away.
126  *
127  *
128  *   Data Structures:
129  *
130  *   The per-zone structure (zone_t) is reference counted, and freed
131  *   when all references are released.  zone_hold and zone_rele can be
132  *   used to adjust the reference count.  In addition, reference counts
133  *   associated with the cred_t structure are tracked separately using
134  *   zone_cred_hold and zone_cred_rele.
135  *
136  *   Pointers to active zone_t's are stored in two hash tables; one
137  *   for searching by id, the other for searching by name.  Lookups
138  *   can be performed on either basis, using zone_find_by_id and
139  *   zone_find_by_name.  Both return zone_t pointers with the zone
140  *   held, so zone_rele should be called when the pointer is no longer
141  *   needed.  Zones can also be searched by path; zone_find_by_path
142  *   returns the zone with which a path name is associated (global
143  *   zone if the path is not within some other zone's file system
144  *   hierarchy).  This currently requires iterating through each zone,
145  *   so it is slower than an id or name search via a hash table.
146  *
147  *
148  *   Locking:
149  *
150  *   zonehash_lock: This is a top-level global lock used to protect the
151  *       zone hash tables and lists.  Zones cannot be created or destroyed
152  *       while this lock is held.
153  *   zone_status_lock: This is a global lock protecting zone state.
154  *       Zones cannot change state while this lock is held.  It also
155  *       protects the list of kernel threads associated with a zone.
156  *   zone_lock: This is a per-zone lock used to protect several fields of
157  *       the zone_t (see <sys/zone.h> for details).  In addition, holding
158  *       this lock means that the zone cannot go away.
159  *   zone_nlwps_lock: This is a per-zone lock used to protect the fields
160  *	 related to the zone.max-lwps rctl.
161  *   zone_mem_lock: This is a per-zone lock used to protect the fields
162  *	 related to the zone.max-locked-memory and zone.max-swap rctls.
163  *   zsd_key_lock: This is a global lock protecting the key state for ZSD.
164  *   zone_deathrow_lock: This is a global lock protecting the "deathrow"
165  *       list (a list of zones in the ZONE_IS_DEAD state).
166  *
167  *   Ordering requirements:
168  *       pool_lock --> cpu_lock --> zonehash_lock --> zone_status_lock -->
169  *       	zone_lock --> zsd_key_lock --> pidlock --> p_lock
170  *
171  *   When taking zone_mem_lock or zone_nlwps_lock, the lock ordering is:
172  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
173  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
174  *
175  *   Blocking memory allocations are permitted while holding any of the
176  *   zone locks.
177  *
178  *
179  *   System Call Interface:
180  *
181  *   The zone subsystem can be managed and queried from user level with
182  *   the following system calls (all subcodes of the primary "zone"
183  *   system call):
184  *   - zone_create: creates a zone with selected attributes (name,
185  *     root path, privileges, resource controls, ZFS datasets)
186  *   - zone_enter: allows the current process to enter a zone
187  *   - zone_getattr: reports attributes of a zone
188  *   - zone_setattr: set attributes of a zone
189  *   - zone_boot: set 'init' running for the zone
190  *   - zone_list: lists all zones active in the system
191  *   - zone_lookup: looks up zone id based on name
192  *   - zone_shutdown: initiates shutdown process (see states above)
193  *   - zone_destroy: completes shutdown process (see states above)
194  *
195  */
196 
197 #include <sys/priv_impl.h>
198 #include <sys/cred.h>
199 #include <c2/audit.h>
200 #include <sys/debug.h>
201 #include <sys/file.h>
202 #include <sys/kmem.h>
203 #include <sys/kstat.h>
204 #include <sys/mutex.h>
205 #include <sys/note.h>
206 #include <sys/pathname.h>
207 #include <sys/proc.h>
208 #include <sys/project.h>
209 #include <sys/sysevent.h>
210 #include <sys/task.h>
211 #include <sys/systm.h>
212 #include <sys/types.h>
213 #include <sys/utsname.h>
214 #include <sys/vnode.h>
215 #include <sys/vfs.h>
216 #include <sys/systeminfo.h>
217 #include <sys/policy.h>
218 #include <sys/cred_impl.h>
219 #include <sys/contract_impl.h>
220 #include <sys/contract/process_impl.h>
221 #include <sys/class.h>
222 #include <sys/pool.h>
223 #include <sys/pool_pset.h>
224 #include <sys/pset.h>
225 #include <sys/sysmacros.h>
226 #include <sys/callb.h>
227 #include <sys/vmparam.h>
228 #include <sys/corectl.h>
229 #include <sys/ipc_impl.h>
230 
231 #include <sys/door.h>
232 #include <sys/cpuvar.h>
233 #include <sys/sdt.h>
234 
235 #include <sys/uadmin.h>
236 #include <sys/session.h>
237 #include <sys/cmn_err.h>
238 #include <sys/modhash.h>
239 #include <sys/sunddi.h>
240 #include <sys/nvpair.h>
241 #include <sys/rctl.h>
242 #include <sys/fss.h>
243 #include <sys/brand.h>
244 #include <sys/zone.h>
245 #include <net/if.h>
246 #include <sys/cpucaps.h>
247 #include <vm/seg.h>
248 
249 /*
250  * cv used to signal that all references to the zone have been released.  This
251  * needs to be global since there may be multiple waiters, and the first to
252  * wake up will free the zone_t, hence we cannot use zone->zone_cv.
253  */
254 static kcondvar_t zone_destroy_cv;
255 /*
256  * Lock used to serialize access to zone_cv.  This could have been per-zone,
257  * but then we'd need another lock for zone_destroy_cv, and why bother?
258  */
259 static kmutex_t zone_status_lock;
260 
261 /*
262  * ZSD-related global variables.
263  */
264 static kmutex_t zsd_key_lock;	/* protects the following two */
265 /*
266  * The next caller of zone_key_create() will be assigned a key of ++zsd_keyval.
267  */
268 static zone_key_t zsd_keyval = 0;
269 /*
270  * Global list of registered keys.  We use this when a new zone is created.
271  */
272 static list_t zsd_registered_keys;
273 
274 int zone_hash_size = 256;
275 static mod_hash_t *zonehashbyname, *zonehashbyid, *zonehashbylabel;
276 static kmutex_t zonehash_lock;
277 static uint_t zonecount;
278 static id_space_t *zoneid_space;
279 
280 /*
281  * The global zone (aka zone0) is the all-seeing, all-knowing zone in which the
282  * kernel proper runs, and which manages all other zones.
283  *
284  * Although not declared as static, the variable "zone0" should not be used
285  * except for by code that needs to reference the global zone early on in boot,
286  * before it is fully initialized.  All other consumers should use
287  * 'global_zone'.
288  */
289 zone_t zone0;
290 zone_t *global_zone = NULL;	/* Set when the global zone is initialized */
291 
292 /*
293  * List of active zones, protected by zonehash_lock.
294  */
295 static list_t zone_active;
296 
297 /*
298  * List of destroyed zones that still have outstanding cred references.
299  * Used for debugging.  Uses a separate lock to avoid lock ordering
300  * problems in zone_free.
301  */
302 static list_t zone_deathrow;
303 static kmutex_t zone_deathrow_lock;
304 
305 /* number of zones is limited by virtual interface limit in IP */
306 uint_t maxzones = 8192;
307 
308 /* Event channel to sent zone state change notifications */
309 evchan_t *zone_event_chan;
310 
311 /*
312  * This table holds the mapping from kernel zone states to
313  * states visible in the state notification API.
314  * The idea is that we only expose "obvious" states and
315  * do not expose states which are just implementation details.
316  */
317 const char  *zone_status_table[] = {
318 	ZONE_EVENT_UNINITIALIZED,	/* uninitialized */
319 	ZONE_EVENT_INITIALIZED,		/* initialized */
320 	ZONE_EVENT_READY,		/* ready */
321 	ZONE_EVENT_READY,		/* booting */
322 	ZONE_EVENT_RUNNING,		/* running */
323 	ZONE_EVENT_SHUTTING_DOWN,	/* shutting_down */
324 	ZONE_EVENT_SHUTTING_DOWN,	/* empty */
325 	ZONE_EVENT_SHUTTING_DOWN,	/* down */
326 	ZONE_EVENT_SHUTTING_DOWN,	/* dying */
327 	ZONE_EVENT_UNINITIALIZED,	/* dead */
328 };
329 
330 /*
331  * This isn't static so lint doesn't complain.
332  */
333 rctl_hndl_t rc_zone_cpu_shares;
334 rctl_hndl_t rc_zone_locked_mem;
335 rctl_hndl_t rc_zone_max_swap;
336 rctl_hndl_t rc_zone_cpu_cap;
337 rctl_hndl_t rc_zone_nlwps;
338 rctl_hndl_t rc_zone_shmmax;
339 rctl_hndl_t rc_zone_shmmni;
340 rctl_hndl_t rc_zone_semmni;
341 rctl_hndl_t rc_zone_msgmni;
342 /*
343  * Synchronization primitives used to synchronize between mounts and zone
344  * creation/destruction.
345  */
346 static int mounts_in_progress;
347 static kcondvar_t mount_cv;
348 static kmutex_t mount_lock;
349 
350 const char * const zone_default_initname = "/sbin/init";
351 static char * const zone_prefix = "/zone/";
352 static int zone_shutdown(zoneid_t zoneid);
353 static int zone_add_datalink(zoneid_t, char *);
354 static int zone_remove_datalink(zoneid_t, char *);
355 static int zone_check_datalink(zoneid_t *, char *);
356 static int zone_list_datalink(zoneid_t, int *, char *);
357 
358 typedef boolean_t zsd_applyfn_t(kmutex_t *, boolean_t, zone_t *, zone_key_t);
359 
360 static void zsd_apply_all_zones(zsd_applyfn_t *, zone_key_t);
361 static void zsd_apply_all_keys(zsd_applyfn_t *, zone_t *);
362 static boolean_t zsd_apply_create(kmutex_t *, boolean_t, zone_t *, zone_key_t);
363 static boolean_t zsd_apply_shutdown(kmutex_t *, boolean_t, zone_t *,
364     zone_key_t);
365 static boolean_t zsd_apply_destroy(kmutex_t *, boolean_t, zone_t *, zone_key_t);
366 static boolean_t zsd_wait_for_creator(zone_t *, struct zsd_entry *,
367     kmutex_t *);
368 static boolean_t zsd_wait_for_inprogress(zone_t *, struct zsd_entry *,
369     kmutex_t *);
370 
371 /*
372  * Bump this number when you alter the zone syscall interfaces; this is
373  * because we need to have support for previous API versions in libc
374  * to support patching; libc calls into the kernel to determine this number.
375  *
376  * Version 1 of the API is the version originally shipped with Solaris 10
377  * Version 2 alters the zone_create system call in order to support more
378  *     arguments by moving the args into a structure; and to do better
379  *     error reporting when zone_create() fails.
380  * Version 3 alters the zone_create system call in order to support the
381  *     import of ZFS datasets to zones.
382  * Version 4 alters the zone_create system call in order to support
383  *     Trusted Extensions.
384  * Version 5 alters the zone_boot system call, and converts its old
385  *     bootargs parameter to be set by the zone_setattr API instead.
386  * Version 6 adds the flag argument to zone_create.
387  */
388 static const int ZONE_SYSCALL_API_VERSION = 6;
389 
390 /*
391  * Certain filesystems (such as NFS and autofs) need to know which zone
392  * the mount is being placed in.  Because of this, we need to be able to
393  * ensure that a zone isn't in the process of being created such that
394  * nfs_mount() thinks it is in the global zone, while by the time it
395  * gets added the list of mounted zones, it ends up on zoneA's mount
396  * list.
397  *
398  * The following functions: block_mounts()/resume_mounts() and
399  * mount_in_progress()/mount_completed() are used by zones and the VFS
400  * layer (respectively) to synchronize zone creation and new mounts.
401  *
402  * The semantics are like a reader-reader lock such that there may
403  * either be multiple mounts (or zone creations, if that weren't
404  * serialized by zonehash_lock) in progress at the same time, but not
405  * both.
406  *
407  * We use cv's so the user can ctrl-C out of the operation if it's
408  * taking too long.
409  *
410  * The semantics are such that there is unfair bias towards the
411  * "current" operation.  This means that zone creations may starve if
412  * there is a rapid succession of new mounts coming in to the system, or
413  * there is a remote possibility that zones will be created at such a
414  * rate that new mounts will not be able to proceed.
415  */
416 /*
417  * Prevent new mounts from progressing to the point of calling
418  * VFS_MOUNT().  If there are already mounts in this "region", wait for
419  * them to complete.
420  */
421 static int
422 block_mounts(void)
423 {
424 	int retval = 0;
425 
426 	/*
427 	 * Since it may block for a long time, block_mounts() shouldn't be
428 	 * called with zonehash_lock held.
429 	 */
430 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
431 	mutex_enter(&mount_lock);
432 	while (mounts_in_progress > 0) {
433 		if (cv_wait_sig(&mount_cv, &mount_lock) == 0)
434 			goto signaled;
435 	}
436 	/*
437 	 * A negative value of mounts_in_progress indicates that mounts
438 	 * have been blocked by (-mounts_in_progress) different callers.
439 	 */
440 	mounts_in_progress--;
441 	retval = 1;
442 signaled:
443 	mutex_exit(&mount_lock);
444 	return (retval);
445 }
446 
447 /*
448  * The VFS layer may progress with new mounts as far as we're concerned.
449  * Allow them to progress if we were the last obstacle.
450  */
451 static void
452 resume_mounts(void)
453 {
454 	mutex_enter(&mount_lock);
455 	if (++mounts_in_progress == 0)
456 		cv_broadcast(&mount_cv);
457 	mutex_exit(&mount_lock);
458 }
459 
460 /*
461  * The VFS layer is busy with a mount; zones should wait until all
462  * mounts are completed to progress.
463  */
464 void
465 mount_in_progress(void)
466 {
467 	mutex_enter(&mount_lock);
468 	while (mounts_in_progress < 0)
469 		cv_wait(&mount_cv, &mount_lock);
470 	mounts_in_progress++;
471 	mutex_exit(&mount_lock);
472 }
473 
474 /*
475  * VFS is done with one mount; wake up any waiting block_mounts()
476  * callers if this is the last mount.
477  */
478 void
479 mount_completed(void)
480 {
481 	mutex_enter(&mount_lock);
482 	if (--mounts_in_progress == 0)
483 		cv_broadcast(&mount_cv);
484 	mutex_exit(&mount_lock);
485 }
486 
487 /*
488  * ZSD routines.
489  *
490  * Zone Specific Data (ZSD) is modeled after Thread Specific Data as
491  * defined by the pthread_key_create() and related interfaces.
492  *
493  * Kernel subsystems may register one or more data items and/or
494  * callbacks to be executed when a zone is created, shutdown, or
495  * destroyed.
496  *
497  * Unlike the thread counterpart, destructor callbacks will be executed
498  * even if the data pointer is NULL and/or there are no constructor
499  * callbacks, so it is the responsibility of such callbacks to check for
500  * NULL data values if necessary.
501  *
502  * The locking strategy and overall picture is as follows:
503  *
504  * When someone calls zone_key_create(), a template ZSD entry is added to the
505  * global list "zsd_registered_keys", protected by zsd_key_lock.  While
506  * holding that lock all the existing zones are marked as
507  * ZSD_CREATE_NEEDED and a copy of the ZSD entry added to the per-zone
508  * zone_zsd list (protected by zone_lock). The global list is updated first
509  * (under zone_key_lock) to make sure that newly created zones use the
510  * most recent list of keys. Then under zonehash_lock we walk the zones
511  * and mark them.  Similar locking is used in zone_key_delete().
512  *
513  * The actual create, shutdown, and destroy callbacks are done without
514  * holding any lock. And zsd_flags are used to ensure that the operations
515  * completed so that when zone_key_create (and zone_create) is done, as well as
516  * zone_key_delete (and zone_destroy) is done, all the necessary callbacks
517  * are completed.
518  *
519  * When new zones are created constructor callbacks for all registered ZSD
520  * entries will be called. That also uses the above two phases of marking
521  * what needs to be done, and then running the callbacks without holding
522  * any locks.
523  *
524  * The framework does not provide any locking around zone_getspecific() and
525  * zone_setspecific() apart from that needed for internal consistency, so
526  * callers interested in atomic "test-and-set" semantics will need to provide
527  * their own locking.
528  */
529 
530 /*
531  * Helper function to find the zsd_entry associated with the key in the
532  * given list.
533  */
534 static struct zsd_entry *
535 zsd_find(list_t *l, zone_key_t key)
536 {
537 	struct zsd_entry *zsd;
538 
539 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
540 		if (zsd->zsd_key == key) {
541 			return (zsd);
542 		}
543 	}
544 	return (NULL);
545 }
546 
547 /*
548  * Helper function to find the zsd_entry associated with the key in the
549  * given list. Move it to the front of the list.
550  */
551 static struct zsd_entry *
552 zsd_find_mru(list_t *l, zone_key_t key)
553 {
554 	struct zsd_entry *zsd;
555 
556 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
557 		if (zsd->zsd_key == key) {
558 			/*
559 			 * Move to head of list to keep list in MRU order.
560 			 */
561 			if (zsd != list_head(l)) {
562 				list_remove(l, zsd);
563 				list_insert_head(l, zsd);
564 			}
565 			return (zsd);
566 		}
567 	}
568 	return (NULL);
569 }
570 
571 void
572 zone_key_create(zone_key_t *keyp, void *(*create)(zoneid_t),
573     void (*shutdown)(zoneid_t, void *), void (*destroy)(zoneid_t, void *))
574 {
575 	struct zsd_entry *zsdp;
576 	struct zsd_entry *t;
577 	struct zone *zone;
578 	zone_key_t  key;
579 
580 	zsdp = kmem_zalloc(sizeof (*zsdp), KM_SLEEP);
581 	zsdp->zsd_data = NULL;
582 	zsdp->zsd_create = create;
583 	zsdp->zsd_shutdown = shutdown;
584 	zsdp->zsd_destroy = destroy;
585 
586 	/*
587 	 * Insert in global list of callbacks. Makes future zone creations
588 	 * see it.
589 	 */
590 	mutex_enter(&zsd_key_lock);
591 	*keyp = key = zsdp->zsd_key = ++zsd_keyval;
592 	ASSERT(zsd_keyval != 0);
593 	list_insert_tail(&zsd_registered_keys, zsdp);
594 	mutex_exit(&zsd_key_lock);
595 
596 	/*
597 	 * Insert for all existing zones and mark them as needing
598 	 * a create callback.
599 	 */
600 	mutex_enter(&zonehash_lock);	/* stop the world */
601 	for (zone = list_head(&zone_active); zone != NULL;
602 	    zone = list_next(&zone_active, zone)) {
603 		zone_status_t status;
604 
605 		mutex_enter(&zone->zone_lock);
606 
607 		/* Skip zones that are on the way down or not yet up */
608 		status = zone_status_get(zone);
609 		if (status >= ZONE_IS_DOWN ||
610 		    status == ZONE_IS_UNINITIALIZED) {
611 			mutex_exit(&zone->zone_lock);
612 			continue;
613 		}
614 
615 		t = zsd_find_mru(&zone->zone_zsd, key);
616 		if (t != NULL) {
617 			/*
618 			 * A zsd_configure already inserted it after
619 			 * we dropped zsd_key_lock above.
620 			 */
621 			mutex_exit(&zone->zone_lock);
622 			continue;
623 		}
624 		t = kmem_zalloc(sizeof (*t), KM_SLEEP);
625 		t->zsd_key = key;
626 		t->zsd_create = create;
627 		t->zsd_shutdown = shutdown;
628 		t->zsd_destroy = destroy;
629 		if (create != NULL) {
630 			t->zsd_flags = ZSD_CREATE_NEEDED;
631 			DTRACE_PROBE2(zsd__create__needed,
632 			    zone_t *, zone, zone_key_t, key);
633 		}
634 		list_insert_tail(&zone->zone_zsd, t);
635 		mutex_exit(&zone->zone_lock);
636 	}
637 	mutex_exit(&zonehash_lock);
638 
639 	if (create != NULL) {
640 		/* Now call the create callback for this key */
641 		zsd_apply_all_zones(zsd_apply_create, key);
642 	}
643 }
644 
645 /*
646  * Function called when a module is being unloaded, or otherwise wishes
647  * to unregister its ZSD key and callbacks.
648  *
649  * Remove from the global list and determine the functions that need to
650  * be called under a global lock. Then call the functions without
651  * holding any locks. Finally free up the zone_zsd entries. (The apply
652  * functions need to access the zone_zsd entries to find zsd_data etc.)
653  */
654 int
655 zone_key_delete(zone_key_t key)
656 {
657 	struct zsd_entry *zsdp = NULL;
658 	zone_t *zone;
659 
660 	mutex_enter(&zsd_key_lock);
661 	zsdp = zsd_find_mru(&zsd_registered_keys, key);
662 	if (zsdp == NULL) {
663 		mutex_exit(&zsd_key_lock);
664 		return (-1);
665 	}
666 	list_remove(&zsd_registered_keys, zsdp);
667 	mutex_exit(&zsd_key_lock);
668 
669 	mutex_enter(&zonehash_lock);
670 	for (zone = list_head(&zone_active); zone != NULL;
671 	    zone = list_next(&zone_active, zone)) {
672 		struct zsd_entry *del;
673 
674 		mutex_enter(&zone->zone_lock);
675 		del = zsd_find_mru(&zone->zone_zsd, key);
676 		if (del == NULL) {
677 			/*
678 			 * Somebody else got here first e.g the zone going
679 			 * away.
680 			 */
681 			mutex_exit(&zone->zone_lock);
682 			continue;
683 		}
684 		ASSERT(del->zsd_shutdown == zsdp->zsd_shutdown);
685 		ASSERT(del->zsd_destroy == zsdp->zsd_destroy);
686 		if (del->zsd_shutdown != NULL &&
687 		    (del->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
688 			del->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
689 			DTRACE_PROBE2(zsd__shutdown__needed,
690 			    zone_t *, zone, zone_key_t, key);
691 		}
692 		if (del->zsd_destroy != NULL &&
693 		    (del->zsd_flags & ZSD_DESTROY_ALL) == 0) {
694 			del->zsd_flags |= ZSD_DESTROY_NEEDED;
695 			DTRACE_PROBE2(zsd__destroy__needed,
696 			    zone_t *, zone, zone_key_t, key);
697 		}
698 		mutex_exit(&zone->zone_lock);
699 	}
700 	mutex_exit(&zonehash_lock);
701 	kmem_free(zsdp, sizeof (*zsdp));
702 
703 	/* Now call the shutdown and destroy callback for this key */
704 	zsd_apply_all_zones(zsd_apply_shutdown, key);
705 	zsd_apply_all_zones(zsd_apply_destroy, key);
706 
707 	/* Now we can free up the zsdp structures in each zone */
708 	mutex_enter(&zonehash_lock);
709 	for (zone = list_head(&zone_active); zone != NULL;
710 	    zone = list_next(&zone_active, zone)) {
711 		struct zsd_entry *del;
712 
713 		mutex_enter(&zone->zone_lock);
714 		del = zsd_find(&zone->zone_zsd, key);
715 		if (del != NULL) {
716 			list_remove(&zone->zone_zsd, del);
717 			ASSERT(!(del->zsd_flags & ZSD_ALL_INPROGRESS));
718 			kmem_free(del, sizeof (*del));
719 		}
720 		mutex_exit(&zone->zone_lock);
721 	}
722 	mutex_exit(&zonehash_lock);
723 
724 	return (0);
725 }
726 
727 /*
728  * ZSD counterpart of pthread_setspecific().
729  *
730  * Since all zsd callbacks, including those with no create function,
731  * have an entry in zone_zsd, if the key is registered it is part of
732  * the zone_zsd list.
733  * Return an error if the key wasn't registerd.
734  */
735 int
736 zone_setspecific(zone_key_t key, zone_t *zone, const void *data)
737 {
738 	struct zsd_entry *t;
739 
740 	mutex_enter(&zone->zone_lock);
741 	t = zsd_find_mru(&zone->zone_zsd, key);
742 	if (t != NULL) {
743 		/*
744 		 * Replace old value with new
745 		 */
746 		t->zsd_data = (void *)data;
747 		mutex_exit(&zone->zone_lock);
748 		return (0);
749 	}
750 	mutex_exit(&zone->zone_lock);
751 	return (-1);
752 }
753 
754 /*
755  * ZSD counterpart of pthread_getspecific().
756  */
757 void *
758 zone_getspecific(zone_key_t key, zone_t *zone)
759 {
760 	struct zsd_entry *t;
761 	void *data;
762 
763 	mutex_enter(&zone->zone_lock);
764 	t = zsd_find_mru(&zone->zone_zsd, key);
765 	data = (t == NULL ? NULL : t->zsd_data);
766 	mutex_exit(&zone->zone_lock);
767 	return (data);
768 }
769 
770 /*
771  * Function used to initialize a zone's list of ZSD callbacks and data
772  * when the zone is being created.  The callbacks are initialized from
773  * the template list (zsd_registered_keys). The constructor callback is
774  * executed later (once the zone exists and with locks dropped).
775  */
776 static void
777 zone_zsd_configure(zone_t *zone)
778 {
779 	struct zsd_entry *zsdp;
780 	struct zsd_entry *t;
781 
782 	ASSERT(MUTEX_HELD(&zonehash_lock));
783 	ASSERT(list_head(&zone->zone_zsd) == NULL);
784 	mutex_enter(&zone->zone_lock);
785 	mutex_enter(&zsd_key_lock);
786 	for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL;
787 	    zsdp = list_next(&zsd_registered_keys, zsdp)) {
788 		/*
789 		 * Since this zone is ZONE_IS_UNCONFIGURED, zone_key_create
790 		 * should not have added anything to it.
791 		 */
792 		ASSERT(zsd_find(&zone->zone_zsd, zsdp->zsd_key) == NULL);
793 
794 		t = kmem_zalloc(sizeof (*t), KM_SLEEP);
795 		t->zsd_key = zsdp->zsd_key;
796 		t->zsd_create = zsdp->zsd_create;
797 		t->zsd_shutdown = zsdp->zsd_shutdown;
798 		t->zsd_destroy = zsdp->zsd_destroy;
799 		if (zsdp->zsd_create != NULL) {
800 			t->zsd_flags = ZSD_CREATE_NEEDED;
801 			DTRACE_PROBE2(zsd__create__needed,
802 			    zone_t *, zone, zone_key_t, zsdp->zsd_key);
803 		}
804 		list_insert_tail(&zone->zone_zsd, t);
805 	}
806 	mutex_exit(&zsd_key_lock);
807 	mutex_exit(&zone->zone_lock);
808 }
809 
810 enum zsd_callback_type { ZSD_CREATE, ZSD_SHUTDOWN, ZSD_DESTROY };
811 
812 /*
813  * Helper function to execute shutdown or destructor callbacks.
814  */
815 static void
816 zone_zsd_callbacks(zone_t *zone, enum zsd_callback_type ct)
817 {
818 	struct zsd_entry *t;
819 
820 	ASSERT(ct == ZSD_SHUTDOWN || ct == ZSD_DESTROY);
821 	ASSERT(ct != ZSD_SHUTDOWN || zone_status_get(zone) >= ZONE_IS_EMPTY);
822 	ASSERT(ct != ZSD_DESTROY || zone_status_get(zone) >= ZONE_IS_DOWN);
823 
824 	/*
825 	 * Run the callback solely based on what is registered for the zone
826 	 * in zone_zsd. The global list can change independently of this
827 	 * as keys are registered and unregistered and we don't register new
828 	 * callbacks for a zone that is in the process of going away.
829 	 */
830 	mutex_enter(&zone->zone_lock);
831 	for (t = list_head(&zone->zone_zsd); t != NULL;
832 	    t = list_next(&zone->zone_zsd, t)) {
833 		zone_key_t key = t->zsd_key;
834 
835 		/* Skip if no callbacks registered */
836 
837 		if (ct == ZSD_SHUTDOWN) {
838 			if (t->zsd_shutdown != NULL &&
839 			    (t->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
840 				t->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
841 				DTRACE_PROBE2(zsd__shutdown__needed,
842 				    zone_t *, zone, zone_key_t, key);
843 			}
844 		} else {
845 			if (t->zsd_destroy != NULL &&
846 			    (t->zsd_flags & ZSD_DESTROY_ALL) == 0) {
847 				t->zsd_flags |= ZSD_DESTROY_NEEDED;
848 				DTRACE_PROBE2(zsd__destroy__needed,
849 				    zone_t *, zone, zone_key_t, key);
850 			}
851 		}
852 	}
853 	mutex_exit(&zone->zone_lock);
854 
855 	/* Now call the shutdown and destroy callback for this key */
856 	zsd_apply_all_keys(zsd_apply_shutdown, zone);
857 	zsd_apply_all_keys(zsd_apply_destroy, zone);
858 
859 }
860 
861 /*
862  * Called when the zone is going away; free ZSD-related memory, and
863  * destroy the zone_zsd list.
864  */
865 static void
866 zone_free_zsd(zone_t *zone)
867 {
868 	struct zsd_entry *t, *next;
869 
870 	/*
871 	 * Free all the zsd_entry's we had on this zone.
872 	 */
873 	mutex_enter(&zone->zone_lock);
874 	for (t = list_head(&zone->zone_zsd); t != NULL; t = next) {
875 		next = list_next(&zone->zone_zsd, t);
876 		list_remove(&zone->zone_zsd, t);
877 		ASSERT(!(t->zsd_flags & ZSD_ALL_INPROGRESS));
878 		kmem_free(t, sizeof (*t));
879 	}
880 	list_destroy(&zone->zone_zsd);
881 	mutex_exit(&zone->zone_lock);
882 
883 }
884 
885 /*
886  * Apply a function to all zones for particular key value.
887  *
888  * The applyfn has to drop zonehash_lock if it does some work, and
889  * then reacquire it before it returns.
890  * When the lock is dropped we don't follow list_next even
891  * if it is possible to do so without any hazards. This is
892  * because we want the design to allow for the list of zones
893  * to change in any arbitrary way during the time the
894  * lock was dropped.
895  *
896  * It is safe to restart the loop at list_head since the applyfn
897  * changes the zsd_flags as it does work, so a subsequent
898  * pass through will have no effect in applyfn, hence the loop will terminate
899  * in at worst O(N^2).
900  */
901 static void
902 zsd_apply_all_zones(zsd_applyfn_t *applyfn, zone_key_t key)
903 {
904 	zone_t *zone;
905 
906 	mutex_enter(&zonehash_lock);
907 	zone = list_head(&zone_active);
908 	while (zone != NULL) {
909 		if ((applyfn)(&zonehash_lock, B_FALSE, zone, key)) {
910 			/* Lock dropped - restart at head */
911 			zone = list_head(&zone_active);
912 		} else {
913 			zone = list_next(&zone_active, zone);
914 		}
915 	}
916 	mutex_exit(&zonehash_lock);
917 }
918 
919 /*
920  * Apply a function to all keys for a particular zone.
921  *
922  * The applyfn has to drop zonehash_lock if it does some work, and
923  * then reacquire it before it returns.
924  * When the lock is dropped we don't follow list_next even
925  * if it is possible to do so without any hazards. This is
926  * because we want the design to allow for the list of zsd callbacks
927  * to change in any arbitrary way during the time the
928  * lock was dropped.
929  *
930  * It is safe to restart the loop at list_head since the applyfn
931  * changes the zsd_flags as it does work, so a subsequent
932  * pass through will have no effect in applyfn, hence the loop will terminate
933  * in at worst O(N^2).
934  */
935 static void
936 zsd_apply_all_keys(zsd_applyfn_t *applyfn, zone_t *zone)
937 {
938 	struct zsd_entry *t;
939 
940 	mutex_enter(&zone->zone_lock);
941 	t = list_head(&zone->zone_zsd);
942 	while (t != NULL) {
943 		if ((applyfn)(NULL, B_TRUE, zone, t->zsd_key)) {
944 			/* Lock dropped - restart at head */
945 			t = list_head(&zone->zone_zsd);
946 		} else {
947 			t = list_next(&zone->zone_zsd, t);
948 		}
949 	}
950 	mutex_exit(&zone->zone_lock);
951 }
952 
953 /*
954  * Call the create function for the zone and key if CREATE_NEEDED
955  * is set.
956  * If some other thread gets here first and sets CREATE_INPROGRESS, then
957  * we wait for that thread to complete so that we can ensure that
958  * all the callbacks are done when we've looped over all zones/keys.
959  *
960  * When we call the create function, we drop the global held by the
961  * caller, and return true to tell the caller it needs to re-evalute the
962  * state.
963  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
964  * remains held on exit.
965  */
966 static boolean_t
967 zsd_apply_create(kmutex_t *lockp, boolean_t zone_lock_held,
968     zone_t *zone, zone_key_t key)
969 {
970 	void *result;
971 	struct zsd_entry *t;
972 	boolean_t dropped;
973 
974 	if (lockp != NULL) {
975 		ASSERT(MUTEX_HELD(lockp));
976 	}
977 	if (zone_lock_held) {
978 		ASSERT(MUTEX_HELD(&zone->zone_lock));
979 	} else {
980 		mutex_enter(&zone->zone_lock);
981 	}
982 
983 	t = zsd_find(&zone->zone_zsd, key);
984 	if (t == NULL) {
985 		/*
986 		 * Somebody else got here first e.g the zone going
987 		 * away.
988 		 */
989 		if (!zone_lock_held)
990 			mutex_exit(&zone->zone_lock);
991 		return (B_FALSE);
992 	}
993 	dropped = B_FALSE;
994 	if (zsd_wait_for_inprogress(zone, t, lockp))
995 		dropped = B_TRUE;
996 
997 	if (t->zsd_flags & ZSD_CREATE_NEEDED) {
998 		t->zsd_flags &= ~ZSD_CREATE_NEEDED;
999 		t->zsd_flags |= ZSD_CREATE_INPROGRESS;
1000 		DTRACE_PROBE2(zsd__create__inprogress,
1001 		    zone_t *, zone, zone_key_t, key);
1002 		mutex_exit(&zone->zone_lock);
1003 		if (lockp != NULL)
1004 			mutex_exit(lockp);
1005 
1006 		dropped = B_TRUE;
1007 		ASSERT(t->zsd_create != NULL);
1008 		DTRACE_PROBE2(zsd__create__start,
1009 		    zone_t *, zone, zone_key_t, key);
1010 
1011 		result = (*t->zsd_create)(zone->zone_id);
1012 
1013 		DTRACE_PROBE2(zsd__create__end,
1014 		    zone_t *, zone, voidn *, result);
1015 
1016 		ASSERT(result != NULL);
1017 		if (lockp != NULL)
1018 			mutex_enter(lockp);
1019 		mutex_enter(&zone->zone_lock);
1020 		t->zsd_data = result;
1021 		t->zsd_flags &= ~ZSD_CREATE_INPROGRESS;
1022 		t->zsd_flags |= ZSD_CREATE_COMPLETED;
1023 		cv_broadcast(&t->zsd_cv);
1024 		DTRACE_PROBE2(zsd__create__completed,
1025 		    zone_t *, zone, zone_key_t, key);
1026 	}
1027 	if (!zone_lock_held)
1028 		mutex_exit(&zone->zone_lock);
1029 	return (dropped);
1030 }
1031 
1032 /*
1033  * Call the shutdown function for the zone and key if SHUTDOWN_NEEDED
1034  * is set.
1035  * If some other thread gets here first and sets *_INPROGRESS, then
1036  * we wait for that thread to complete so that we can ensure that
1037  * all the callbacks are done when we've looped over all zones/keys.
1038  *
1039  * When we call the shutdown function, we drop the global held by the
1040  * caller, and return true to tell the caller it needs to re-evalute the
1041  * state.
1042  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1043  * remains held on exit.
1044  */
1045 static boolean_t
1046 zsd_apply_shutdown(kmutex_t *lockp, boolean_t zone_lock_held,
1047     zone_t *zone, zone_key_t key)
1048 {
1049 	struct zsd_entry *t;
1050 	void *data;
1051 	boolean_t dropped;
1052 
1053 	if (lockp != NULL) {
1054 		ASSERT(MUTEX_HELD(lockp));
1055 	}
1056 	if (zone_lock_held) {
1057 		ASSERT(MUTEX_HELD(&zone->zone_lock));
1058 	} else {
1059 		mutex_enter(&zone->zone_lock);
1060 	}
1061 
1062 	t = zsd_find(&zone->zone_zsd, key);
1063 	if (t == NULL) {
1064 		/*
1065 		 * Somebody else got here first e.g the zone going
1066 		 * away.
1067 		 */
1068 		if (!zone_lock_held)
1069 			mutex_exit(&zone->zone_lock);
1070 		return (B_FALSE);
1071 	}
1072 	dropped = B_FALSE;
1073 	if (zsd_wait_for_creator(zone, t, lockp))
1074 		dropped = B_TRUE;
1075 
1076 	if (zsd_wait_for_inprogress(zone, t, lockp))
1077 		dropped = B_TRUE;
1078 
1079 	if (t->zsd_flags & ZSD_SHUTDOWN_NEEDED) {
1080 		t->zsd_flags &= ~ZSD_SHUTDOWN_NEEDED;
1081 		t->zsd_flags |= ZSD_SHUTDOWN_INPROGRESS;
1082 		DTRACE_PROBE2(zsd__shutdown__inprogress,
1083 		    zone_t *, zone, zone_key_t, key);
1084 		mutex_exit(&zone->zone_lock);
1085 		if (lockp != NULL)
1086 			mutex_exit(lockp);
1087 		dropped = B_TRUE;
1088 
1089 		ASSERT(t->zsd_shutdown != NULL);
1090 		data = t->zsd_data;
1091 
1092 		DTRACE_PROBE2(zsd__shutdown__start,
1093 		    zone_t *, zone, zone_key_t, key);
1094 
1095 		(t->zsd_shutdown)(zone->zone_id, data);
1096 		DTRACE_PROBE2(zsd__shutdown__end,
1097 		    zone_t *, zone, zone_key_t, key);
1098 
1099 		if (lockp != NULL)
1100 			mutex_enter(lockp);
1101 		mutex_enter(&zone->zone_lock);
1102 		t->zsd_flags &= ~ZSD_SHUTDOWN_INPROGRESS;
1103 		t->zsd_flags |= ZSD_SHUTDOWN_COMPLETED;
1104 		cv_broadcast(&t->zsd_cv);
1105 		DTRACE_PROBE2(zsd__shutdown__completed,
1106 		    zone_t *, zone, zone_key_t, key);
1107 	}
1108 	if (!zone_lock_held)
1109 		mutex_exit(&zone->zone_lock);
1110 	return (dropped);
1111 }
1112 
1113 /*
1114  * Call the destroy function for the zone and key if DESTROY_NEEDED
1115  * is set.
1116  * If some other thread gets here first and sets *_INPROGRESS, then
1117  * we wait for that thread to complete so that we can ensure that
1118  * all the callbacks are done when we've looped over all zones/keys.
1119  *
1120  * When we call the destroy function, we drop the global held by the
1121  * caller, and return true to tell the caller it needs to re-evalute the
1122  * state.
1123  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1124  * remains held on exit.
1125  */
1126 static boolean_t
1127 zsd_apply_destroy(kmutex_t *lockp, boolean_t zone_lock_held,
1128     zone_t *zone, zone_key_t key)
1129 {
1130 	struct zsd_entry *t;
1131 	void *data;
1132 	boolean_t dropped;
1133 
1134 	if (lockp != NULL) {
1135 		ASSERT(MUTEX_HELD(lockp));
1136 	}
1137 	if (zone_lock_held) {
1138 		ASSERT(MUTEX_HELD(&zone->zone_lock));
1139 	} else {
1140 		mutex_enter(&zone->zone_lock);
1141 	}
1142 
1143 	t = zsd_find(&zone->zone_zsd, key);
1144 	if (t == NULL) {
1145 		/*
1146 		 * Somebody else got here first e.g the zone going
1147 		 * away.
1148 		 */
1149 		if (!zone_lock_held)
1150 			mutex_exit(&zone->zone_lock);
1151 		return (B_FALSE);
1152 	}
1153 	dropped = B_FALSE;
1154 	if (zsd_wait_for_creator(zone, t, lockp))
1155 		dropped = B_TRUE;
1156 
1157 	if (zsd_wait_for_inprogress(zone, t, lockp))
1158 		dropped = B_TRUE;
1159 
1160 	if (t->zsd_flags & ZSD_DESTROY_NEEDED) {
1161 		t->zsd_flags &= ~ZSD_DESTROY_NEEDED;
1162 		t->zsd_flags |= ZSD_DESTROY_INPROGRESS;
1163 		DTRACE_PROBE2(zsd__destroy__inprogress,
1164 		    zone_t *, zone, zone_key_t, key);
1165 		mutex_exit(&zone->zone_lock);
1166 		if (lockp != NULL)
1167 			mutex_exit(lockp);
1168 		dropped = B_TRUE;
1169 
1170 		ASSERT(t->zsd_destroy != NULL);
1171 		data = t->zsd_data;
1172 		DTRACE_PROBE2(zsd__destroy__start,
1173 		    zone_t *, zone, zone_key_t, key);
1174 
1175 		(t->zsd_destroy)(zone->zone_id, data);
1176 		DTRACE_PROBE2(zsd__destroy__end,
1177 		    zone_t *, zone, zone_key_t, key);
1178 
1179 		if (lockp != NULL)
1180 			mutex_enter(lockp);
1181 		mutex_enter(&zone->zone_lock);
1182 		t->zsd_data = NULL;
1183 		t->zsd_flags &= ~ZSD_DESTROY_INPROGRESS;
1184 		t->zsd_flags |= ZSD_DESTROY_COMPLETED;
1185 		cv_broadcast(&t->zsd_cv);
1186 		DTRACE_PROBE2(zsd__destroy__completed,
1187 		    zone_t *, zone, zone_key_t, key);
1188 	}
1189 	if (!zone_lock_held)
1190 		mutex_exit(&zone->zone_lock);
1191 	return (dropped);
1192 }
1193 
1194 /*
1195  * Wait for any CREATE_NEEDED flag to be cleared.
1196  * Returns true if lockp was temporarily dropped while waiting.
1197  */
1198 static boolean_t
1199 zsd_wait_for_creator(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
1200 {
1201 	boolean_t dropped = B_FALSE;
1202 
1203 	while (t->zsd_flags & ZSD_CREATE_NEEDED) {
1204 		DTRACE_PROBE2(zsd__wait__for__creator,
1205 		    zone_t *, zone, struct zsd_entry *, t);
1206 		if (lockp != NULL) {
1207 			dropped = B_TRUE;
1208 			mutex_exit(lockp);
1209 		}
1210 		cv_wait(&t->zsd_cv, &zone->zone_lock);
1211 		if (lockp != NULL) {
1212 			/* First drop zone_lock to preserve order */
1213 			mutex_exit(&zone->zone_lock);
1214 			mutex_enter(lockp);
1215 			mutex_enter(&zone->zone_lock);
1216 		}
1217 	}
1218 	return (dropped);
1219 }
1220 
1221 /*
1222  * Wait for any INPROGRESS flag to be cleared.
1223  * Returns true if lockp was temporarily dropped while waiting.
1224  */
1225 static boolean_t
1226 zsd_wait_for_inprogress(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
1227 {
1228 	boolean_t dropped = B_FALSE;
1229 
1230 	while (t->zsd_flags & ZSD_ALL_INPROGRESS) {
1231 		DTRACE_PROBE2(zsd__wait__for__inprogress,
1232 		    zone_t *, zone, struct zsd_entry *, t);
1233 		if (lockp != NULL) {
1234 			dropped = B_TRUE;
1235 			mutex_exit(lockp);
1236 		}
1237 		cv_wait(&t->zsd_cv, &zone->zone_lock);
1238 		if (lockp != NULL) {
1239 			/* First drop zone_lock to preserve order */
1240 			mutex_exit(&zone->zone_lock);
1241 			mutex_enter(lockp);
1242 			mutex_enter(&zone->zone_lock);
1243 		}
1244 	}
1245 	return (dropped);
1246 }
1247 
1248 /*
1249  * Frees memory associated with the zone dataset list.
1250  */
1251 static void
1252 zone_free_datasets(zone_t *zone)
1253 {
1254 	zone_dataset_t *t, *next;
1255 
1256 	for (t = list_head(&zone->zone_datasets); t != NULL; t = next) {
1257 		next = list_next(&zone->zone_datasets, t);
1258 		list_remove(&zone->zone_datasets, t);
1259 		kmem_free(t->zd_dataset, strlen(t->zd_dataset) + 1);
1260 		kmem_free(t, sizeof (*t));
1261 	}
1262 	list_destroy(&zone->zone_datasets);
1263 }
1264 
1265 /*
1266  * zone.cpu-shares resource control support.
1267  */
1268 /*ARGSUSED*/
1269 static rctl_qty_t
1270 zone_cpu_shares_usage(rctl_t *rctl, struct proc *p)
1271 {
1272 	ASSERT(MUTEX_HELD(&p->p_lock));
1273 	return (p->p_zone->zone_shares);
1274 }
1275 
1276 /*ARGSUSED*/
1277 static int
1278 zone_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1279     rctl_qty_t nv)
1280 {
1281 	ASSERT(MUTEX_HELD(&p->p_lock));
1282 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1283 	if (e->rcep_p.zone == NULL)
1284 		return (0);
1285 
1286 	e->rcep_p.zone->zone_shares = nv;
1287 	return (0);
1288 }
1289 
1290 static rctl_ops_t zone_cpu_shares_ops = {
1291 	rcop_no_action,
1292 	zone_cpu_shares_usage,
1293 	zone_cpu_shares_set,
1294 	rcop_no_test
1295 };
1296 
1297 /*
1298  * zone.cpu-cap resource control support.
1299  */
1300 /*ARGSUSED*/
1301 static rctl_qty_t
1302 zone_cpu_cap_get(rctl_t *rctl, struct proc *p)
1303 {
1304 	ASSERT(MUTEX_HELD(&p->p_lock));
1305 	return (cpucaps_zone_get(p->p_zone));
1306 }
1307 
1308 /*ARGSUSED*/
1309 static int
1310 zone_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1311     rctl_qty_t nv)
1312 {
1313 	zone_t *zone = e->rcep_p.zone;
1314 
1315 	ASSERT(MUTEX_HELD(&p->p_lock));
1316 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1317 
1318 	if (zone == NULL)
1319 		return (0);
1320 
1321 	/*
1322 	 * set cap to the new value.
1323 	 */
1324 	return (cpucaps_zone_set(zone, nv));
1325 }
1326 
1327 static rctl_ops_t zone_cpu_cap_ops = {
1328 	rcop_no_action,
1329 	zone_cpu_cap_get,
1330 	zone_cpu_cap_set,
1331 	rcop_no_test
1332 };
1333 
1334 /*ARGSUSED*/
1335 static rctl_qty_t
1336 zone_lwps_usage(rctl_t *r, proc_t *p)
1337 {
1338 	rctl_qty_t nlwps;
1339 	zone_t *zone = p->p_zone;
1340 
1341 	ASSERT(MUTEX_HELD(&p->p_lock));
1342 
1343 	mutex_enter(&zone->zone_nlwps_lock);
1344 	nlwps = zone->zone_nlwps;
1345 	mutex_exit(&zone->zone_nlwps_lock);
1346 
1347 	return (nlwps);
1348 }
1349 
1350 /*ARGSUSED*/
1351 static int
1352 zone_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
1353     rctl_qty_t incr, uint_t flags)
1354 {
1355 	rctl_qty_t nlwps;
1356 
1357 	ASSERT(MUTEX_HELD(&p->p_lock));
1358 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1359 	if (e->rcep_p.zone == NULL)
1360 		return (0);
1361 	ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
1362 	nlwps = e->rcep_p.zone->zone_nlwps;
1363 
1364 	if (nlwps + incr > rcntl->rcv_value)
1365 		return (1);
1366 
1367 	return (0);
1368 }
1369 
1370 /*ARGSUSED*/
1371 static int
1372 zone_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
1373 {
1374 	ASSERT(MUTEX_HELD(&p->p_lock));
1375 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1376 	if (e->rcep_p.zone == NULL)
1377 		return (0);
1378 	e->rcep_p.zone->zone_nlwps_ctl = nv;
1379 	return (0);
1380 }
1381 
1382 static rctl_ops_t zone_lwps_ops = {
1383 	rcop_no_action,
1384 	zone_lwps_usage,
1385 	zone_lwps_set,
1386 	zone_lwps_test,
1387 };
1388 
1389 /*ARGSUSED*/
1390 static int
1391 zone_shmmax_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1392     rctl_qty_t incr, uint_t flags)
1393 {
1394 	rctl_qty_t v;
1395 	ASSERT(MUTEX_HELD(&p->p_lock));
1396 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1397 	v = e->rcep_p.zone->zone_shmmax + incr;
1398 	if (v > rval->rcv_value)
1399 		return (1);
1400 	return (0);
1401 }
1402 
1403 static rctl_ops_t zone_shmmax_ops = {
1404 	rcop_no_action,
1405 	rcop_no_usage,
1406 	rcop_no_set,
1407 	zone_shmmax_test
1408 };
1409 
1410 /*ARGSUSED*/
1411 static int
1412 zone_shmmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1413     rctl_qty_t incr, uint_t flags)
1414 {
1415 	rctl_qty_t v;
1416 	ASSERT(MUTEX_HELD(&p->p_lock));
1417 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1418 	v = e->rcep_p.zone->zone_ipc.ipcq_shmmni + incr;
1419 	if (v > rval->rcv_value)
1420 		return (1);
1421 	return (0);
1422 }
1423 
1424 static rctl_ops_t zone_shmmni_ops = {
1425 	rcop_no_action,
1426 	rcop_no_usage,
1427 	rcop_no_set,
1428 	zone_shmmni_test
1429 };
1430 
1431 /*ARGSUSED*/
1432 static int
1433 zone_semmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1434     rctl_qty_t incr, uint_t flags)
1435 {
1436 	rctl_qty_t v;
1437 	ASSERT(MUTEX_HELD(&p->p_lock));
1438 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1439 	v = e->rcep_p.zone->zone_ipc.ipcq_semmni + incr;
1440 	if (v > rval->rcv_value)
1441 		return (1);
1442 	return (0);
1443 }
1444 
1445 static rctl_ops_t zone_semmni_ops = {
1446 	rcop_no_action,
1447 	rcop_no_usage,
1448 	rcop_no_set,
1449 	zone_semmni_test
1450 };
1451 
1452 /*ARGSUSED*/
1453 static int
1454 zone_msgmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1455     rctl_qty_t incr, uint_t flags)
1456 {
1457 	rctl_qty_t v;
1458 	ASSERT(MUTEX_HELD(&p->p_lock));
1459 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1460 	v = e->rcep_p.zone->zone_ipc.ipcq_msgmni + incr;
1461 	if (v > rval->rcv_value)
1462 		return (1);
1463 	return (0);
1464 }
1465 
1466 static rctl_ops_t zone_msgmni_ops = {
1467 	rcop_no_action,
1468 	rcop_no_usage,
1469 	rcop_no_set,
1470 	zone_msgmni_test
1471 };
1472 
1473 /*ARGSUSED*/
1474 static rctl_qty_t
1475 zone_locked_mem_usage(rctl_t *rctl, struct proc *p)
1476 {
1477 	rctl_qty_t q;
1478 	ASSERT(MUTEX_HELD(&p->p_lock));
1479 	mutex_enter(&p->p_zone->zone_mem_lock);
1480 	q = p->p_zone->zone_locked_mem;
1481 	mutex_exit(&p->p_zone->zone_mem_lock);
1482 	return (q);
1483 }
1484 
1485 /*ARGSUSED*/
1486 static int
1487 zone_locked_mem_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1488     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1489 {
1490 	rctl_qty_t q;
1491 	zone_t *z;
1492 
1493 	z = e->rcep_p.zone;
1494 	ASSERT(MUTEX_HELD(&p->p_lock));
1495 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1496 	q = z->zone_locked_mem;
1497 	if (q + incr > rcntl->rcv_value)
1498 		return (1);
1499 	return (0);
1500 }
1501 
1502 /*ARGSUSED*/
1503 static int
1504 zone_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1505     rctl_qty_t nv)
1506 {
1507 	ASSERT(MUTEX_HELD(&p->p_lock));
1508 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1509 	if (e->rcep_p.zone == NULL)
1510 		return (0);
1511 	e->rcep_p.zone->zone_locked_mem_ctl = nv;
1512 	return (0);
1513 }
1514 
1515 static rctl_ops_t zone_locked_mem_ops = {
1516 	rcop_no_action,
1517 	zone_locked_mem_usage,
1518 	zone_locked_mem_set,
1519 	zone_locked_mem_test
1520 };
1521 
1522 /*ARGSUSED*/
1523 static rctl_qty_t
1524 zone_max_swap_usage(rctl_t *rctl, struct proc *p)
1525 {
1526 	rctl_qty_t q;
1527 	zone_t *z = p->p_zone;
1528 
1529 	ASSERT(MUTEX_HELD(&p->p_lock));
1530 	mutex_enter(&z->zone_mem_lock);
1531 	q = z->zone_max_swap;
1532 	mutex_exit(&z->zone_mem_lock);
1533 	return (q);
1534 }
1535 
1536 /*ARGSUSED*/
1537 static int
1538 zone_max_swap_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1539     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1540 {
1541 	rctl_qty_t q;
1542 	zone_t *z;
1543 
1544 	z = e->rcep_p.zone;
1545 	ASSERT(MUTEX_HELD(&p->p_lock));
1546 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1547 	q = z->zone_max_swap;
1548 	if (q + incr > rcntl->rcv_value)
1549 		return (1);
1550 	return (0);
1551 }
1552 
1553 /*ARGSUSED*/
1554 static int
1555 zone_max_swap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1556     rctl_qty_t nv)
1557 {
1558 	ASSERT(MUTEX_HELD(&p->p_lock));
1559 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1560 	if (e->rcep_p.zone == NULL)
1561 		return (0);
1562 	e->rcep_p.zone->zone_max_swap_ctl = nv;
1563 	return (0);
1564 }
1565 
1566 static rctl_ops_t zone_max_swap_ops = {
1567 	rcop_no_action,
1568 	zone_max_swap_usage,
1569 	zone_max_swap_set,
1570 	zone_max_swap_test
1571 };
1572 
1573 /*
1574  * Helper function to brand the zone with a unique ID.
1575  */
1576 static void
1577 zone_uniqid(zone_t *zone)
1578 {
1579 	static uint64_t uniqid = 0;
1580 
1581 	ASSERT(MUTEX_HELD(&zonehash_lock));
1582 	zone->zone_uniqid = uniqid++;
1583 }
1584 
1585 /*
1586  * Returns a held pointer to the "kcred" for the specified zone.
1587  */
1588 struct cred *
1589 zone_get_kcred(zoneid_t zoneid)
1590 {
1591 	zone_t *zone;
1592 	cred_t *cr;
1593 
1594 	if ((zone = zone_find_by_id(zoneid)) == NULL)
1595 		return (NULL);
1596 	cr = zone->zone_kcred;
1597 	crhold(cr);
1598 	zone_rele(zone);
1599 	return (cr);
1600 }
1601 
1602 static int
1603 zone_lockedmem_kstat_update(kstat_t *ksp, int rw)
1604 {
1605 	zone_t *zone = ksp->ks_private;
1606 	zone_kstat_t *zk = ksp->ks_data;
1607 
1608 	if (rw == KSTAT_WRITE)
1609 		return (EACCES);
1610 
1611 	zk->zk_usage.value.ui64 = zone->zone_locked_mem;
1612 	zk->zk_value.value.ui64 = zone->zone_locked_mem_ctl;
1613 	return (0);
1614 }
1615 
1616 static int
1617 zone_swapresv_kstat_update(kstat_t *ksp, int rw)
1618 {
1619 	zone_t *zone = ksp->ks_private;
1620 	zone_kstat_t *zk = ksp->ks_data;
1621 
1622 	if (rw == KSTAT_WRITE)
1623 		return (EACCES);
1624 
1625 	zk->zk_usage.value.ui64 = zone->zone_max_swap;
1626 	zk->zk_value.value.ui64 = zone->zone_max_swap_ctl;
1627 	return (0);
1628 }
1629 
1630 static void
1631 zone_kstat_create(zone_t *zone)
1632 {
1633 	kstat_t *ksp;
1634 	zone_kstat_t *zk;
1635 
1636 	ksp = rctl_kstat_create_zone(zone, "lockedmem", KSTAT_TYPE_NAMED,
1637 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
1638 	    KSTAT_FLAG_VIRTUAL);
1639 
1640 	if (ksp == NULL)
1641 		return;
1642 
1643 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
1644 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
1645 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
1646 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
1647 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
1648 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
1649 	ksp->ks_update = zone_lockedmem_kstat_update;
1650 	ksp->ks_private = zone;
1651 	kstat_install(ksp);
1652 
1653 	zone->zone_lockedmem_kstat = ksp;
1654 
1655 	ksp = rctl_kstat_create_zone(zone, "swapresv", KSTAT_TYPE_NAMED,
1656 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
1657 	    KSTAT_FLAG_VIRTUAL);
1658 
1659 	if (ksp == NULL)
1660 		return;
1661 
1662 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
1663 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
1664 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
1665 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
1666 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
1667 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
1668 	ksp->ks_update = zone_swapresv_kstat_update;
1669 	ksp->ks_private = zone;
1670 	kstat_install(ksp);
1671 
1672 	zone->zone_swapresv_kstat = ksp;
1673 }
1674 
1675 static void
1676 zone_kstat_delete(zone_t *zone)
1677 {
1678 	void *data;
1679 
1680 	if (zone->zone_lockedmem_kstat != NULL) {
1681 		data = zone->zone_lockedmem_kstat->ks_data;
1682 		kstat_delete(zone->zone_lockedmem_kstat);
1683 		kmem_free(data, sizeof (zone_kstat_t));
1684 	}
1685 	if (zone->zone_swapresv_kstat != NULL) {
1686 		data = zone->zone_swapresv_kstat->ks_data;
1687 		kstat_delete(zone->zone_swapresv_kstat);
1688 		kmem_free(data, sizeof (zone_kstat_t));
1689 	}
1690 }
1691 
1692 /*
1693  * Called very early on in boot to initialize the ZSD list so that
1694  * zone_key_create() can be called before zone_init().  It also initializes
1695  * portions of zone0 which may be used before zone_init() is called.  The
1696  * variable "global_zone" will be set when zone0 is fully initialized by
1697  * zone_init().
1698  */
1699 void
1700 zone_zsd_init(void)
1701 {
1702 	mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL);
1703 	mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL);
1704 	list_create(&zsd_registered_keys, sizeof (struct zsd_entry),
1705 	    offsetof(struct zsd_entry, zsd_linkage));
1706 	list_create(&zone_active, sizeof (zone_t),
1707 	    offsetof(zone_t, zone_linkage));
1708 	list_create(&zone_deathrow, sizeof (zone_t),
1709 	    offsetof(zone_t, zone_linkage));
1710 
1711 	mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL);
1712 	mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
1713 	mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
1714 	zone0.zone_shares = 1;
1715 	zone0.zone_nlwps = 0;
1716 	zone0.zone_nlwps_ctl = INT_MAX;
1717 	zone0.zone_locked_mem = 0;
1718 	zone0.zone_locked_mem_ctl = UINT64_MAX;
1719 	ASSERT(zone0.zone_max_swap == 0);
1720 	zone0.zone_max_swap_ctl = UINT64_MAX;
1721 	zone0.zone_shmmax = 0;
1722 	zone0.zone_ipc.ipcq_shmmni = 0;
1723 	zone0.zone_ipc.ipcq_semmni = 0;
1724 	zone0.zone_ipc.ipcq_msgmni = 0;
1725 	zone0.zone_name = GLOBAL_ZONENAME;
1726 	zone0.zone_nodename = utsname.nodename;
1727 	zone0.zone_domain = srpc_domain;
1728 	zone0.zone_hostid = HW_INVALID_HOSTID;
1729 	zone0.zone_ref = 1;
1730 	zone0.zone_id = GLOBAL_ZONEID;
1731 	zone0.zone_status = ZONE_IS_RUNNING;
1732 	zone0.zone_rootpath = "/";
1733 	zone0.zone_rootpathlen = 2;
1734 	zone0.zone_psetid = ZONE_PS_INVAL;
1735 	zone0.zone_ncpus = 0;
1736 	zone0.zone_ncpus_online = 0;
1737 	zone0.zone_proc_initpid = 1;
1738 	zone0.zone_initname = initname;
1739 	zone0.zone_lockedmem_kstat = NULL;
1740 	zone0.zone_swapresv_kstat = NULL;
1741 	list_create(&zone0.zone_zsd, sizeof (struct zsd_entry),
1742 	    offsetof(struct zsd_entry, zsd_linkage));
1743 	list_insert_head(&zone_active, &zone0);
1744 
1745 	/*
1746 	 * The root filesystem is not mounted yet, so zone_rootvp cannot be set
1747 	 * to anything meaningful.  It is assigned to be 'rootdir' in
1748 	 * vfs_mountroot().
1749 	 */
1750 	zone0.zone_rootvp = NULL;
1751 	zone0.zone_vfslist = NULL;
1752 	zone0.zone_bootargs = initargs;
1753 	zone0.zone_privset = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
1754 	/*
1755 	 * The global zone has all privileges
1756 	 */
1757 	priv_fillset(zone0.zone_privset);
1758 	/*
1759 	 * Add p0 to the global zone
1760 	 */
1761 	zone0.zone_zsched = &p0;
1762 	p0.p_zone = &zone0;
1763 }
1764 
1765 /*
1766  * Compute a hash value based on the contents of the label and the DOI.  The
1767  * hash algorithm is somewhat arbitrary, but is based on the observation that
1768  * humans will likely pick labels that differ by amounts that work out to be
1769  * multiples of the number of hash chains, and thus stirring in some primes
1770  * should help.
1771  */
1772 static uint_t
1773 hash_bylabel(void *hdata, mod_hash_key_t key)
1774 {
1775 	const ts_label_t *lab = (ts_label_t *)key;
1776 	const uint32_t *up, *ue;
1777 	uint_t hash;
1778 	int i;
1779 
1780 	_NOTE(ARGUNUSED(hdata));
1781 
1782 	hash = lab->tsl_doi + (lab->tsl_doi << 1);
1783 	/* we depend on alignment of label, but not representation */
1784 	up = (const uint32_t *)&lab->tsl_label;
1785 	ue = up + sizeof (lab->tsl_label) / sizeof (*up);
1786 	i = 1;
1787 	while (up < ue) {
1788 		/* using 2^n + 1, 1 <= n <= 16 as source of many primes */
1789 		hash += *up + (*up << ((i % 16) + 1));
1790 		up++;
1791 		i++;
1792 	}
1793 	return (hash);
1794 }
1795 
1796 /*
1797  * All that mod_hash cares about here is zero (equal) versus non-zero (not
1798  * equal).  This may need to be changed if less than / greater than is ever
1799  * needed.
1800  */
1801 static int
1802 hash_labelkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
1803 {
1804 	ts_label_t *lab1 = (ts_label_t *)key1;
1805 	ts_label_t *lab2 = (ts_label_t *)key2;
1806 
1807 	return (label_equal(lab1, lab2) ? 0 : 1);
1808 }
1809 
1810 /*
1811  * Called by main() to initialize the zones framework.
1812  */
1813 void
1814 zone_init(void)
1815 {
1816 	rctl_dict_entry_t *rde;
1817 	rctl_val_t *dval;
1818 	rctl_set_t *set;
1819 	rctl_alloc_gp_t *gp;
1820 	rctl_entity_p_t e;
1821 	int res;
1822 
1823 	ASSERT(curproc == &p0);
1824 
1825 	/*
1826 	 * Create ID space for zone IDs.  ID 0 is reserved for the
1827 	 * global zone.
1828 	 */
1829 	zoneid_space = id_space_create("zoneid_space", 1, MAX_ZONEID);
1830 
1831 	/*
1832 	 * Initialize generic zone resource controls, if any.
1833 	 */
1834 	rc_zone_cpu_shares = rctl_register("zone.cpu-shares",
1835 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_NEVER |
1836 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
1837 	    FSS_MAXSHARES, FSS_MAXSHARES, &zone_cpu_shares_ops);
1838 
1839 	rc_zone_cpu_cap = rctl_register("zone.cpu-cap",
1840 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_ALWAYS |
1841 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |RCTL_GLOBAL_SYSLOG_NEVER |
1842 	    RCTL_GLOBAL_INFINITE,
1843 	    MAXCAP, MAXCAP, &zone_cpu_cap_ops);
1844 
1845 	rc_zone_nlwps = rctl_register("zone.max-lwps", RCENTITY_ZONE,
1846 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
1847 	    INT_MAX, INT_MAX, &zone_lwps_ops);
1848 	/*
1849 	 * System V IPC resource controls
1850 	 */
1851 	rc_zone_msgmni = rctl_register("zone.max-msg-ids",
1852 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1853 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_msgmni_ops);
1854 
1855 	rc_zone_semmni = rctl_register("zone.max-sem-ids",
1856 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1857 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_semmni_ops);
1858 
1859 	rc_zone_shmmni = rctl_register("zone.max-shm-ids",
1860 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1861 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_shmmni_ops);
1862 
1863 	rc_zone_shmmax = rctl_register("zone.max-shm-memory",
1864 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1865 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &zone_shmmax_ops);
1866 
1867 	/*
1868 	 * Create a rctl_val with PRIVILEGED, NOACTION, value = 1.  Then attach
1869 	 * this at the head of the rctl_dict_entry for ``zone.cpu-shares''.
1870 	 */
1871 	dval = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
1872 	bzero(dval, sizeof (rctl_val_t));
1873 	dval->rcv_value = 1;
1874 	dval->rcv_privilege = RCPRIV_PRIVILEGED;
1875 	dval->rcv_flagaction = RCTL_LOCAL_NOACTION;
1876 	dval->rcv_action_recip_pid = -1;
1877 
1878 	rde = rctl_dict_lookup("zone.cpu-shares");
1879 	(void) rctl_val_list_insert(&rde->rcd_default_value, dval);
1880 
1881 	rc_zone_locked_mem = rctl_register("zone.max-locked-memory",
1882 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
1883 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
1884 	    &zone_locked_mem_ops);
1885 
1886 	rc_zone_max_swap = rctl_register("zone.max-swap",
1887 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
1888 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
1889 	    &zone_max_swap_ops);
1890 
1891 	/*
1892 	 * Initialize the ``global zone''.
1893 	 */
1894 	set = rctl_set_create();
1895 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
1896 	mutex_enter(&p0.p_lock);
1897 	e.rcep_p.zone = &zone0;
1898 	e.rcep_t = RCENTITY_ZONE;
1899 	zone0.zone_rctls = rctl_set_init(RCENTITY_ZONE, &p0, &e, set,
1900 	    gp);
1901 
1902 	zone0.zone_nlwps = p0.p_lwpcnt;
1903 	zone0.zone_ntasks = 1;
1904 	mutex_exit(&p0.p_lock);
1905 	zone0.zone_restart_init = B_TRUE;
1906 	zone0.zone_brand = &native_brand;
1907 	rctl_prealloc_destroy(gp);
1908 	/*
1909 	 * pool_default hasn't been initialized yet, so we let pool_init()
1910 	 * take care of making sure the global zone is in the default pool.
1911 	 */
1912 
1913 	/*
1914 	 * Initialize global zone kstats
1915 	 */
1916 	zone_kstat_create(&zone0);
1917 
1918 	/*
1919 	 * Initialize zone label.
1920 	 * mlp are initialized when tnzonecfg is loaded.
1921 	 */
1922 	zone0.zone_slabel = l_admin_low;
1923 	rw_init(&zone0.zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
1924 	label_hold(l_admin_low);
1925 
1926 	mutex_enter(&zonehash_lock);
1927 	zone_uniqid(&zone0);
1928 	ASSERT(zone0.zone_uniqid == GLOBAL_ZONEUNIQID);
1929 
1930 	zonehashbyid = mod_hash_create_idhash("zone_by_id", zone_hash_size,
1931 	    mod_hash_null_valdtor);
1932 	zonehashbyname = mod_hash_create_strhash("zone_by_name",
1933 	    zone_hash_size, mod_hash_null_valdtor);
1934 	/*
1935 	 * maintain zonehashbylabel only for labeled systems
1936 	 */
1937 	if (is_system_labeled())
1938 		zonehashbylabel = mod_hash_create_extended("zone_by_label",
1939 		    zone_hash_size, mod_hash_null_keydtor,
1940 		    mod_hash_null_valdtor, hash_bylabel, NULL,
1941 		    hash_labelkey_cmp, KM_SLEEP);
1942 	zonecount = 1;
1943 
1944 	(void) mod_hash_insert(zonehashbyid, (mod_hash_key_t)GLOBAL_ZONEID,
1945 	    (mod_hash_val_t)&zone0);
1946 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)zone0.zone_name,
1947 	    (mod_hash_val_t)&zone0);
1948 	if (is_system_labeled()) {
1949 		zone0.zone_flags |= ZF_HASHED_LABEL;
1950 		(void) mod_hash_insert(zonehashbylabel,
1951 		    (mod_hash_key_t)zone0.zone_slabel, (mod_hash_val_t)&zone0);
1952 	}
1953 	mutex_exit(&zonehash_lock);
1954 
1955 	/*
1956 	 * We avoid setting zone_kcred until now, since kcred is initialized
1957 	 * sometime after zone_zsd_init() and before zone_init().
1958 	 */
1959 	zone0.zone_kcred = kcred;
1960 	/*
1961 	 * The global zone is fully initialized (except for zone_rootvp which
1962 	 * will be set when the root filesystem is mounted).
1963 	 */
1964 	global_zone = &zone0;
1965 
1966 	/*
1967 	 * Setup an event channel to send zone status change notifications on
1968 	 */
1969 	res = sysevent_evc_bind(ZONE_EVENT_CHANNEL, &zone_event_chan,
1970 	    EVCH_CREAT);
1971 
1972 	if (res)
1973 		panic("Sysevent_evc_bind failed during zone setup.\n");
1974 
1975 }
1976 
1977 static void
1978 zone_free(zone_t *zone)
1979 {
1980 	ASSERT(zone != global_zone);
1981 	ASSERT(zone->zone_ntasks == 0);
1982 	ASSERT(zone->zone_nlwps == 0);
1983 	ASSERT(zone->zone_cred_ref == 0);
1984 	ASSERT(zone->zone_kcred == NULL);
1985 	ASSERT(zone_status_get(zone) == ZONE_IS_DEAD ||
1986 	    zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
1987 
1988 	/*
1989 	 * Remove any zone caps.
1990 	 */
1991 	cpucaps_zone_remove(zone);
1992 
1993 	ASSERT(zone->zone_cpucap == NULL);
1994 
1995 	/* remove from deathrow list */
1996 	if (zone_status_get(zone) == ZONE_IS_DEAD) {
1997 		ASSERT(zone->zone_ref == 0);
1998 		mutex_enter(&zone_deathrow_lock);
1999 		list_remove(&zone_deathrow, zone);
2000 		mutex_exit(&zone_deathrow_lock);
2001 	}
2002 
2003 	zone_free_zsd(zone);
2004 	zone_free_datasets(zone);
2005 
2006 	if (zone->zone_rootvp != NULL)
2007 		VN_RELE(zone->zone_rootvp);
2008 	if (zone->zone_rootpath)
2009 		kmem_free(zone->zone_rootpath, zone->zone_rootpathlen);
2010 	if (zone->zone_name != NULL)
2011 		kmem_free(zone->zone_name, ZONENAME_MAX);
2012 	if (zone->zone_slabel != NULL)
2013 		label_rele(zone->zone_slabel);
2014 	if (zone->zone_nodename != NULL)
2015 		kmem_free(zone->zone_nodename, _SYS_NMLN);
2016 	if (zone->zone_domain != NULL)
2017 		kmem_free(zone->zone_domain, _SYS_NMLN);
2018 	if (zone->zone_privset != NULL)
2019 		kmem_free(zone->zone_privset, sizeof (priv_set_t));
2020 	if (zone->zone_rctls != NULL)
2021 		rctl_set_free(zone->zone_rctls);
2022 	if (zone->zone_bootargs != NULL)
2023 		kmem_free(zone->zone_bootargs, strlen(zone->zone_bootargs) + 1);
2024 	if (zone->zone_initname != NULL)
2025 		kmem_free(zone->zone_initname, strlen(zone->zone_initname) + 1);
2026 	id_free(zoneid_space, zone->zone_id);
2027 	mutex_destroy(&zone->zone_lock);
2028 	cv_destroy(&zone->zone_cv);
2029 	rw_destroy(&zone->zone_mlps.mlpl_rwlock);
2030 	kmem_free(zone, sizeof (zone_t));
2031 }
2032 
2033 /*
2034  * See block comment at the top of this file for information about zone
2035  * status values.
2036  */
2037 /*
2038  * Convenience function for setting zone status.
2039  */
2040 static void
2041 zone_status_set(zone_t *zone, zone_status_t status)
2042 {
2043 
2044 	nvlist_t *nvl = NULL;
2045 	ASSERT(MUTEX_HELD(&zone_status_lock));
2046 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE &&
2047 	    status >= zone_status_get(zone));
2048 
2049 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) ||
2050 	    nvlist_add_string(nvl, ZONE_CB_NAME, zone->zone_name) ||
2051 	    nvlist_add_string(nvl, ZONE_CB_NEWSTATE,
2052 	    zone_status_table[status]) ||
2053 	    nvlist_add_string(nvl, ZONE_CB_OLDSTATE,
2054 	    zone_status_table[zone->zone_status]) ||
2055 	    nvlist_add_int32(nvl, ZONE_CB_ZONEID, zone->zone_id) ||
2056 	    nvlist_add_uint64(nvl, ZONE_CB_TIMESTAMP, (uint64_t)gethrtime()) ||
2057 	    sysevent_evc_publish(zone_event_chan, ZONE_EVENT_STATUS_CLASS,
2058 	    ZONE_EVENT_STATUS_SUBCLASS, "sun.com", "kernel", nvl, EVCH_SLEEP)) {
2059 #ifdef DEBUG
2060 		(void) printf(
2061 		    "Failed to allocate and send zone state change event.\n");
2062 #endif
2063 	}
2064 	nvlist_free(nvl);
2065 
2066 	zone->zone_status = status;
2067 
2068 	cv_broadcast(&zone->zone_cv);
2069 }
2070 
2071 /*
2072  * Public function to retrieve the zone status.  The zone status may
2073  * change after it is retrieved.
2074  */
2075 zone_status_t
2076 zone_status_get(zone_t *zone)
2077 {
2078 	return (zone->zone_status);
2079 }
2080 
2081 static int
2082 zone_set_bootargs(zone_t *zone, const char *zone_bootargs)
2083 {
2084 	char *bootargs = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
2085 	int err = 0;
2086 
2087 	ASSERT(zone != global_zone);
2088 	if ((err = copyinstr(zone_bootargs, bootargs, BOOTARGS_MAX, NULL)) != 0)
2089 		goto done;	/* EFAULT or ENAMETOOLONG */
2090 
2091 	if (zone->zone_bootargs != NULL)
2092 		kmem_free(zone->zone_bootargs, strlen(zone->zone_bootargs) + 1);
2093 
2094 	zone->zone_bootargs = kmem_alloc(strlen(bootargs) + 1, KM_SLEEP);
2095 	(void) strcpy(zone->zone_bootargs, bootargs);
2096 
2097 done:
2098 	kmem_free(bootargs, BOOTARGS_MAX);
2099 	return (err);
2100 }
2101 
2102 static int
2103 zone_set_brand(zone_t *zone, const char *brand)
2104 {
2105 	struct brand_attr *attrp;
2106 	brand_t *bp;
2107 
2108 	attrp = kmem_alloc(sizeof (struct brand_attr), KM_SLEEP);
2109 	if (copyin(brand, attrp, sizeof (struct brand_attr)) != 0) {
2110 		kmem_free(attrp, sizeof (struct brand_attr));
2111 		return (EFAULT);
2112 	}
2113 
2114 	bp = brand_register_zone(attrp);
2115 	kmem_free(attrp, sizeof (struct brand_attr));
2116 	if (bp == NULL)
2117 		return (EINVAL);
2118 
2119 	/*
2120 	 * This is the only place where a zone can change it's brand.
2121 	 * We already need to hold zone_status_lock to check the zone
2122 	 * status, so we'll just use that lock to serialize zone
2123 	 * branding requests as well.
2124 	 */
2125 	mutex_enter(&zone_status_lock);
2126 
2127 	/* Re-Branding is not allowed and the zone can't be booted yet */
2128 	if ((ZONE_IS_BRANDED(zone)) ||
2129 	    (zone_status_get(zone) >= ZONE_IS_BOOTING)) {
2130 		mutex_exit(&zone_status_lock);
2131 		brand_unregister_zone(bp);
2132 		return (EINVAL);
2133 	}
2134 
2135 	if (is_system_labeled() &&
2136 	    strncmp(attrp->ba_brandname, NATIVE_BRAND_NAME, MAXNAMELEN) != 0) {
2137 		mutex_exit(&zone_status_lock);
2138 		brand_unregister_zone(bp);
2139 		return (EPERM);
2140 	}
2141 
2142 	/* set up the brand specific data */
2143 	zone->zone_brand = bp;
2144 	ZBROP(zone)->b_init_brand_data(zone);
2145 
2146 	mutex_exit(&zone_status_lock);
2147 	return (0);
2148 }
2149 
2150 static int
2151 zone_set_initname(zone_t *zone, const char *zone_initname)
2152 {
2153 	char initname[INITNAME_SZ];
2154 	size_t len;
2155 	int err = 0;
2156 
2157 	ASSERT(zone != global_zone);
2158 	if ((err = copyinstr(zone_initname, initname, INITNAME_SZ, &len)) != 0)
2159 		return (err);	/* EFAULT or ENAMETOOLONG */
2160 
2161 	if (zone->zone_initname != NULL)
2162 		kmem_free(zone->zone_initname, strlen(zone->zone_initname) + 1);
2163 
2164 	zone->zone_initname = kmem_alloc(strlen(initname) + 1, KM_SLEEP);
2165 	(void) strcpy(zone->zone_initname, initname);
2166 	return (0);
2167 }
2168 
2169 static int
2170 zone_set_phys_mcap(zone_t *zone, const uint64_t *zone_mcap)
2171 {
2172 	uint64_t mcap;
2173 	int err = 0;
2174 
2175 	if ((err = copyin(zone_mcap, &mcap, sizeof (uint64_t))) == 0)
2176 		zone->zone_phys_mcap = mcap;
2177 
2178 	return (err);
2179 }
2180 
2181 static int
2182 zone_set_sched_class(zone_t *zone, const char *new_class)
2183 {
2184 	char sched_class[PC_CLNMSZ];
2185 	id_t classid;
2186 	int err;
2187 
2188 	ASSERT(zone != global_zone);
2189 	if ((err = copyinstr(new_class, sched_class, PC_CLNMSZ, NULL)) != 0)
2190 		return (err);	/* EFAULT or ENAMETOOLONG */
2191 
2192 	if (getcid(sched_class, &classid) != 0 || classid == syscid)
2193 		return (set_errno(EINVAL));
2194 	zone->zone_defaultcid = classid;
2195 	ASSERT(zone->zone_defaultcid > 0 &&
2196 	    zone->zone_defaultcid < loaded_classes);
2197 
2198 	return (0);
2199 }
2200 
2201 /*
2202  * Block indefinitely waiting for (zone_status >= status)
2203  */
2204 void
2205 zone_status_wait(zone_t *zone, zone_status_t status)
2206 {
2207 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2208 
2209 	mutex_enter(&zone_status_lock);
2210 	while (zone->zone_status < status) {
2211 		cv_wait(&zone->zone_cv, &zone_status_lock);
2212 	}
2213 	mutex_exit(&zone_status_lock);
2214 }
2215 
2216 /*
2217  * Private CPR-safe version of zone_status_wait().
2218  */
2219 static void
2220 zone_status_wait_cpr(zone_t *zone, zone_status_t status, char *str)
2221 {
2222 	callb_cpr_t cprinfo;
2223 
2224 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2225 
2226 	CALLB_CPR_INIT(&cprinfo, &zone_status_lock, callb_generic_cpr,
2227 	    str);
2228 	mutex_enter(&zone_status_lock);
2229 	while (zone->zone_status < status) {
2230 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
2231 		cv_wait(&zone->zone_cv, &zone_status_lock);
2232 		CALLB_CPR_SAFE_END(&cprinfo, &zone_status_lock);
2233 	}
2234 	/*
2235 	 * zone_status_lock is implicitly released by the following.
2236 	 */
2237 	CALLB_CPR_EXIT(&cprinfo);
2238 }
2239 
2240 /*
2241  * Block until zone enters requested state or signal is received.  Return (0)
2242  * if signaled, non-zero otherwise.
2243  */
2244 int
2245 zone_status_wait_sig(zone_t *zone, zone_status_t status)
2246 {
2247 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2248 
2249 	mutex_enter(&zone_status_lock);
2250 	while (zone->zone_status < status) {
2251 		if (!cv_wait_sig(&zone->zone_cv, &zone_status_lock)) {
2252 			mutex_exit(&zone_status_lock);
2253 			return (0);
2254 		}
2255 	}
2256 	mutex_exit(&zone_status_lock);
2257 	return (1);
2258 }
2259 
2260 /*
2261  * Block until the zone enters the requested state or the timeout expires,
2262  * whichever happens first.  Return (-1) if operation timed out, time remaining
2263  * otherwise.
2264  */
2265 clock_t
2266 zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status)
2267 {
2268 	clock_t timeleft = 0;
2269 
2270 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2271 
2272 	mutex_enter(&zone_status_lock);
2273 	while (zone->zone_status < status && timeleft != -1) {
2274 		timeleft = cv_timedwait(&zone->zone_cv, &zone_status_lock, tim);
2275 	}
2276 	mutex_exit(&zone_status_lock);
2277 	return (timeleft);
2278 }
2279 
2280 /*
2281  * Block until the zone enters the requested state, the current process is
2282  * signaled,  or the timeout expires, whichever happens first.  Return (-1) if
2283  * operation timed out, 0 if signaled, time remaining otherwise.
2284  */
2285 clock_t
2286 zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status)
2287 {
2288 	clock_t timeleft = tim - lbolt;
2289 
2290 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2291 
2292 	mutex_enter(&zone_status_lock);
2293 	while (zone->zone_status < status) {
2294 		timeleft = cv_timedwait_sig(&zone->zone_cv, &zone_status_lock,
2295 		    tim);
2296 		if (timeleft <= 0)
2297 			break;
2298 	}
2299 	mutex_exit(&zone_status_lock);
2300 	return (timeleft);
2301 }
2302 
2303 /*
2304  * Zones have two reference counts: one for references from credential
2305  * structures (zone_cred_ref), and one (zone_ref) for everything else.
2306  * This is so we can allow a zone to be rebooted while there are still
2307  * outstanding cred references, since certain drivers cache dblks (which
2308  * implicitly results in cached creds).  We wait for zone_ref to drop to
2309  * 0 (actually 1), but not zone_cred_ref.  The zone structure itself is
2310  * later freed when the zone_cred_ref drops to 0, though nothing other
2311  * than the zone id and privilege set should be accessed once the zone
2312  * is "dead".
2313  *
2314  * A debugging flag, zone_wait_for_cred, can be set to a non-zero value
2315  * to force halt/reboot to block waiting for the zone_cred_ref to drop
2316  * to 0.  This can be useful to flush out other sources of cached creds
2317  * that may be less innocuous than the driver case.
2318  */
2319 
2320 int zone_wait_for_cred = 0;
2321 
2322 static void
2323 zone_hold_locked(zone_t *z)
2324 {
2325 	ASSERT(MUTEX_HELD(&z->zone_lock));
2326 	z->zone_ref++;
2327 	ASSERT(z->zone_ref != 0);
2328 }
2329 
2330 void
2331 zone_hold(zone_t *z)
2332 {
2333 	mutex_enter(&z->zone_lock);
2334 	zone_hold_locked(z);
2335 	mutex_exit(&z->zone_lock);
2336 }
2337 
2338 /*
2339  * If the non-cred ref count drops to 1 and either the cred ref count
2340  * is 0 or we aren't waiting for cred references, the zone is ready to
2341  * be destroyed.
2342  */
2343 #define	ZONE_IS_UNREF(zone)	((zone)->zone_ref == 1 && \
2344 	    (!zone_wait_for_cred || (zone)->zone_cred_ref == 0))
2345 
2346 void
2347 zone_rele(zone_t *z)
2348 {
2349 	boolean_t wakeup;
2350 
2351 	mutex_enter(&z->zone_lock);
2352 	ASSERT(z->zone_ref != 0);
2353 	z->zone_ref--;
2354 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
2355 		/* no more refs, free the structure */
2356 		mutex_exit(&z->zone_lock);
2357 		zone_free(z);
2358 		return;
2359 	}
2360 	/* signal zone_destroy so the zone can finish halting */
2361 	wakeup = (ZONE_IS_UNREF(z) && zone_status_get(z) >= ZONE_IS_DEAD);
2362 	mutex_exit(&z->zone_lock);
2363 
2364 	if (wakeup) {
2365 		/*
2366 		 * Grabbing zonehash_lock here effectively synchronizes with
2367 		 * zone_destroy() to avoid missed signals.
2368 		 */
2369 		mutex_enter(&zonehash_lock);
2370 		cv_broadcast(&zone_destroy_cv);
2371 		mutex_exit(&zonehash_lock);
2372 	}
2373 }
2374 
2375 void
2376 zone_cred_hold(zone_t *z)
2377 {
2378 	mutex_enter(&z->zone_lock);
2379 	z->zone_cred_ref++;
2380 	ASSERT(z->zone_cred_ref != 0);
2381 	mutex_exit(&z->zone_lock);
2382 }
2383 
2384 void
2385 zone_cred_rele(zone_t *z)
2386 {
2387 	boolean_t wakeup;
2388 
2389 	mutex_enter(&z->zone_lock);
2390 	ASSERT(z->zone_cred_ref != 0);
2391 	z->zone_cred_ref--;
2392 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
2393 		/* no more refs, free the structure */
2394 		mutex_exit(&z->zone_lock);
2395 		zone_free(z);
2396 		return;
2397 	}
2398 	/*
2399 	 * If zone_destroy is waiting for the cred references to drain
2400 	 * out, and they have, signal it.
2401 	 */
2402 	wakeup = (zone_wait_for_cred && ZONE_IS_UNREF(z) &&
2403 	    zone_status_get(z) >= ZONE_IS_DEAD);
2404 	mutex_exit(&z->zone_lock);
2405 
2406 	if (wakeup) {
2407 		/*
2408 		 * Grabbing zonehash_lock here effectively synchronizes with
2409 		 * zone_destroy() to avoid missed signals.
2410 		 */
2411 		mutex_enter(&zonehash_lock);
2412 		cv_broadcast(&zone_destroy_cv);
2413 		mutex_exit(&zonehash_lock);
2414 	}
2415 }
2416 
2417 void
2418 zone_task_hold(zone_t *z)
2419 {
2420 	mutex_enter(&z->zone_lock);
2421 	z->zone_ntasks++;
2422 	ASSERT(z->zone_ntasks != 0);
2423 	mutex_exit(&z->zone_lock);
2424 }
2425 
2426 void
2427 zone_task_rele(zone_t *zone)
2428 {
2429 	uint_t refcnt;
2430 
2431 	mutex_enter(&zone->zone_lock);
2432 	ASSERT(zone->zone_ntasks != 0);
2433 	refcnt = --zone->zone_ntasks;
2434 	if (refcnt > 1)	{	/* Common case */
2435 		mutex_exit(&zone->zone_lock);
2436 		return;
2437 	}
2438 	zone_hold_locked(zone);	/* so we can use the zone_t later */
2439 	mutex_exit(&zone->zone_lock);
2440 	if (refcnt == 1) {
2441 		/*
2442 		 * See if the zone is shutting down.
2443 		 */
2444 		mutex_enter(&zone_status_lock);
2445 		if (zone_status_get(zone) != ZONE_IS_SHUTTING_DOWN) {
2446 			goto out;
2447 		}
2448 
2449 		/*
2450 		 * Make sure the ntasks didn't change since we
2451 		 * dropped zone_lock.
2452 		 */
2453 		mutex_enter(&zone->zone_lock);
2454 		if (refcnt != zone->zone_ntasks) {
2455 			mutex_exit(&zone->zone_lock);
2456 			goto out;
2457 		}
2458 		mutex_exit(&zone->zone_lock);
2459 
2460 		/*
2461 		 * No more user processes in the zone.  The zone is empty.
2462 		 */
2463 		zone_status_set(zone, ZONE_IS_EMPTY);
2464 		goto out;
2465 	}
2466 
2467 	ASSERT(refcnt == 0);
2468 	/*
2469 	 * zsched has exited; the zone is dead.
2470 	 */
2471 	zone->zone_zsched = NULL;		/* paranoia */
2472 	mutex_enter(&zone_status_lock);
2473 	zone_status_set(zone, ZONE_IS_DEAD);
2474 out:
2475 	mutex_exit(&zone_status_lock);
2476 	zone_rele(zone);
2477 }
2478 
2479 zoneid_t
2480 getzoneid(void)
2481 {
2482 	return (curproc->p_zone->zone_id);
2483 }
2484 
2485 /*
2486  * Internal versions of zone_find_by_*().  These don't zone_hold() or
2487  * check the validity of a zone's state.
2488  */
2489 static zone_t *
2490 zone_find_all_by_id(zoneid_t zoneid)
2491 {
2492 	mod_hash_val_t hv;
2493 	zone_t *zone = NULL;
2494 
2495 	ASSERT(MUTEX_HELD(&zonehash_lock));
2496 
2497 	if (mod_hash_find(zonehashbyid,
2498 	    (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
2499 		zone = (zone_t *)hv;
2500 	return (zone);
2501 }
2502 
2503 static zone_t *
2504 zone_find_all_by_label(const ts_label_t *label)
2505 {
2506 	mod_hash_val_t hv;
2507 	zone_t *zone = NULL;
2508 
2509 	ASSERT(MUTEX_HELD(&zonehash_lock));
2510 
2511 	/*
2512 	 * zonehashbylabel is not maintained for unlabeled systems
2513 	 */
2514 	if (!is_system_labeled())
2515 		return (NULL);
2516 	if (mod_hash_find(zonehashbylabel, (mod_hash_key_t)label, &hv) == 0)
2517 		zone = (zone_t *)hv;
2518 	return (zone);
2519 }
2520 
2521 static zone_t *
2522 zone_find_all_by_name(char *name)
2523 {
2524 	mod_hash_val_t hv;
2525 	zone_t *zone = NULL;
2526 
2527 	ASSERT(MUTEX_HELD(&zonehash_lock));
2528 
2529 	if (mod_hash_find(zonehashbyname, (mod_hash_key_t)name, &hv) == 0)
2530 		zone = (zone_t *)hv;
2531 	return (zone);
2532 }
2533 
2534 /*
2535  * Public interface for looking up a zone by zoneid.  Only returns the zone if
2536  * it is fully initialized, and has not yet begun the zone_destroy() sequence.
2537  * Caller must call zone_rele() once it is done with the zone.
2538  *
2539  * The zone may begin the zone_destroy() sequence immediately after this
2540  * function returns, but may be safely used until zone_rele() is called.
2541  */
2542 zone_t *
2543 zone_find_by_id(zoneid_t zoneid)
2544 {
2545 	zone_t *zone;
2546 	zone_status_t status;
2547 
2548 	mutex_enter(&zonehash_lock);
2549 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
2550 		mutex_exit(&zonehash_lock);
2551 		return (NULL);
2552 	}
2553 	status = zone_status_get(zone);
2554 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2555 		/*
2556 		 * For all practical purposes the zone doesn't exist.
2557 		 */
2558 		mutex_exit(&zonehash_lock);
2559 		return (NULL);
2560 	}
2561 	zone_hold(zone);
2562 	mutex_exit(&zonehash_lock);
2563 	return (zone);
2564 }
2565 
2566 /*
2567  * Similar to zone_find_by_id, but using zone label as the key.
2568  */
2569 zone_t *
2570 zone_find_by_label(const ts_label_t *label)
2571 {
2572 	zone_t *zone;
2573 	zone_status_t status;
2574 
2575 	mutex_enter(&zonehash_lock);
2576 	if ((zone = zone_find_all_by_label(label)) == NULL) {
2577 		mutex_exit(&zonehash_lock);
2578 		return (NULL);
2579 	}
2580 
2581 	status = zone_status_get(zone);
2582 	if (status > ZONE_IS_DOWN) {
2583 		/*
2584 		 * For all practical purposes the zone doesn't exist.
2585 		 */
2586 		mutex_exit(&zonehash_lock);
2587 		return (NULL);
2588 	}
2589 	zone_hold(zone);
2590 	mutex_exit(&zonehash_lock);
2591 	return (zone);
2592 }
2593 
2594 /*
2595  * Similar to zone_find_by_id, but using zone name as the key.
2596  */
2597 zone_t *
2598 zone_find_by_name(char *name)
2599 {
2600 	zone_t *zone;
2601 	zone_status_t status;
2602 
2603 	mutex_enter(&zonehash_lock);
2604 	if ((zone = zone_find_all_by_name(name)) == NULL) {
2605 		mutex_exit(&zonehash_lock);
2606 		return (NULL);
2607 	}
2608 	status = zone_status_get(zone);
2609 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2610 		/*
2611 		 * For all practical purposes the zone doesn't exist.
2612 		 */
2613 		mutex_exit(&zonehash_lock);
2614 		return (NULL);
2615 	}
2616 	zone_hold(zone);
2617 	mutex_exit(&zonehash_lock);
2618 	return (zone);
2619 }
2620 
2621 /*
2622  * Similar to zone_find_by_id(), using the path as a key.  For instance,
2623  * if there is a zone "foo" rooted at /foo/root, and the path argument
2624  * is "/foo/root/proc", it will return the held zone_t corresponding to
2625  * zone "foo".
2626  *
2627  * zone_find_by_path() always returns a non-NULL value, since at the
2628  * very least every path will be contained in the global zone.
2629  *
2630  * As with the other zone_find_by_*() functions, the caller is
2631  * responsible for zone_rele()ing the return value of this function.
2632  */
2633 zone_t *
2634 zone_find_by_path(const char *path)
2635 {
2636 	zone_t *zone;
2637 	zone_t *zret = NULL;
2638 	zone_status_t status;
2639 
2640 	if (path == NULL) {
2641 		/*
2642 		 * Call from rootconf().
2643 		 */
2644 		zone_hold(global_zone);
2645 		return (global_zone);
2646 	}
2647 	ASSERT(*path == '/');
2648 	mutex_enter(&zonehash_lock);
2649 	for (zone = list_head(&zone_active); zone != NULL;
2650 	    zone = list_next(&zone_active, zone)) {
2651 		if (ZONE_PATH_VISIBLE(path, zone))
2652 			zret = zone;
2653 	}
2654 	ASSERT(zret != NULL);
2655 	status = zone_status_get(zret);
2656 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2657 		/*
2658 		 * Zone practically doesn't exist.
2659 		 */
2660 		zret = global_zone;
2661 	}
2662 	zone_hold(zret);
2663 	mutex_exit(&zonehash_lock);
2664 	return (zret);
2665 }
2666 
2667 /*
2668  * Get the number of cpus visible to this zone.  The system-wide global
2669  * 'ncpus' is returned if pools are disabled, the caller is in the
2670  * global zone, or a NULL zone argument is passed in.
2671  */
2672 int
2673 zone_ncpus_get(zone_t *zone)
2674 {
2675 	int myncpus = zone == NULL ? 0 : zone->zone_ncpus;
2676 
2677 	return (myncpus != 0 ? myncpus : ncpus);
2678 }
2679 
2680 /*
2681  * Get the number of online cpus visible to this zone.  The system-wide
2682  * global 'ncpus_online' is returned if pools are disabled, the caller
2683  * is in the global zone, or a NULL zone argument is passed in.
2684  */
2685 int
2686 zone_ncpus_online_get(zone_t *zone)
2687 {
2688 	int myncpus_online = zone == NULL ? 0 : zone->zone_ncpus_online;
2689 
2690 	return (myncpus_online != 0 ? myncpus_online : ncpus_online);
2691 }
2692 
2693 /*
2694  * Return the pool to which the zone is currently bound.
2695  */
2696 pool_t *
2697 zone_pool_get(zone_t *zone)
2698 {
2699 	ASSERT(pool_lock_held());
2700 
2701 	return (zone->zone_pool);
2702 }
2703 
2704 /*
2705  * Set the zone's pool pointer and update the zone's visibility to match
2706  * the resources in the new pool.
2707  */
2708 void
2709 zone_pool_set(zone_t *zone, pool_t *pool)
2710 {
2711 	ASSERT(pool_lock_held());
2712 	ASSERT(MUTEX_HELD(&cpu_lock));
2713 
2714 	zone->zone_pool = pool;
2715 	zone_pset_set(zone, pool->pool_pset->pset_id);
2716 }
2717 
2718 /*
2719  * Return the cached value of the id of the processor set to which the
2720  * zone is currently bound.  The value will be ZONE_PS_INVAL if the pools
2721  * facility is disabled.
2722  */
2723 psetid_t
2724 zone_pset_get(zone_t *zone)
2725 {
2726 	ASSERT(MUTEX_HELD(&cpu_lock));
2727 
2728 	return (zone->zone_psetid);
2729 }
2730 
2731 /*
2732  * Set the cached value of the id of the processor set to which the zone
2733  * is currently bound.  Also update the zone's visibility to match the
2734  * resources in the new processor set.
2735  */
2736 void
2737 zone_pset_set(zone_t *zone, psetid_t newpsetid)
2738 {
2739 	psetid_t oldpsetid;
2740 
2741 	ASSERT(MUTEX_HELD(&cpu_lock));
2742 	oldpsetid = zone_pset_get(zone);
2743 
2744 	if (oldpsetid == newpsetid)
2745 		return;
2746 	/*
2747 	 * Global zone sees all.
2748 	 */
2749 	if (zone != global_zone) {
2750 		zone->zone_psetid = newpsetid;
2751 		if (newpsetid != ZONE_PS_INVAL)
2752 			pool_pset_visibility_add(newpsetid, zone);
2753 		if (oldpsetid != ZONE_PS_INVAL)
2754 			pool_pset_visibility_remove(oldpsetid, zone);
2755 	}
2756 	/*
2757 	 * Disabling pools, so we should start using the global values
2758 	 * for ncpus and ncpus_online.
2759 	 */
2760 	if (newpsetid == ZONE_PS_INVAL) {
2761 		zone->zone_ncpus = 0;
2762 		zone->zone_ncpus_online = 0;
2763 	}
2764 }
2765 
2766 /*
2767  * Walk the list of active zones and issue the provided callback for
2768  * each of them.
2769  *
2770  * Caller must not be holding any locks that may be acquired under
2771  * zonehash_lock.  See comment at the beginning of the file for a list of
2772  * common locks and their interactions with zones.
2773  */
2774 int
2775 zone_walk(int (*cb)(zone_t *, void *), void *data)
2776 {
2777 	zone_t *zone;
2778 	int ret = 0;
2779 	zone_status_t status;
2780 
2781 	mutex_enter(&zonehash_lock);
2782 	for (zone = list_head(&zone_active); zone != NULL;
2783 	    zone = list_next(&zone_active, zone)) {
2784 		/*
2785 		 * Skip zones that shouldn't be externally visible.
2786 		 */
2787 		status = zone_status_get(zone);
2788 		if (status < ZONE_IS_READY || status > ZONE_IS_DOWN)
2789 			continue;
2790 		/*
2791 		 * Bail immediately if any callback invocation returns a
2792 		 * non-zero value.
2793 		 */
2794 		ret = (*cb)(zone, data);
2795 		if (ret != 0)
2796 			break;
2797 	}
2798 	mutex_exit(&zonehash_lock);
2799 	return (ret);
2800 }
2801 
2802 static int
2803 zone_set_root(zone_t *zone, const char *upath)
2804 {
2805 	vnode_t *vp;
2806 	int trycount;
2807 	int error = 0;
2808 	char *path;
2809 	struct pathname upn, pn;
2810 	size_t pathlen;
2811 
2812 	if ((error = pn_get((char *)upath, UIO_USERSPACE, &upn)) != 0)
2813 		return (error);
2814 
2815 	pn_alloc(&pn);
2816 
2817 	/* prevent infinite loop */
2818 	trycount = 10;
2819 	for (;;) {
2820 		if (--trycount <= 0) {
2821 			error = ESTALE;
2822 			goto out;
2823 		}
2824 
2825 		if ((error = lookuppn(&upn, &pn, FOLLOW, NULLVPP, &vp)) == 0) {
2826 			/*
2827 			 * VOP_ACCESS() may cover 'vp' with a new
2828 			 * filesystem, if 'vp' is an autoFS vnode.
2829 			 * Get the new 'vp' if so.
2830 			 */
2831 			if ((error =
2832 			    VOP_ACCESS(vp, VEXEC, 0, CRED(), NULL)) == 0 &&
2833 			    (!vn_ismntpt(vp) ||
2834 			    (error = traverse(&vp)) == 0)) {
2835 				pathlen = pn.pn_pathlen + 2;
2836 				path = kmem_alloc(pathlen, KM_SLEEP);
2837 				(void) strncpy(path, pn.pn_path,
2838 				    pn.pn_pathlen + 1);
2839 				path[pathlen - 2] = '/';
2840 				path[pathlen - 1] = '\0';
2841 				pn_free(&pn);
2842 				pn_free(&upn);
2843 
2844 				/* Success! */
2845 				break;
2846 			}
2847 			VN_RELE(vp);
2848 		}
2849 		if (error != ESTALE)
2850 			goto out;
2851 	}
2852 
2853 	ASSERT(error == 0);
2854 	zone->zone_rootvp = vp;		/* we hold a reference to vp */
2855 	zone->zone_rootpath = path;
2856 	zone->zone_rootpathlen = pathlen;
2857 	if (pathlen > 5 && strcmp(path + pathlen - 5, "/lu/") == 0)
2858 		zone->zone_flags |= ZF_IS_SCRATCH;
2859 	return (0);
2860 
2861 out:
2862 	pn_free(&pn);
2863 	pn_free(&upn);
2864 	return (error);
2865 }
2866 
2867 #define	isalnum(c)	(((c) >= '0' && (c) <= '9') || \
2868 			((c) >= 'a' && (c) <= 'z') || \
2869 			((c) >= 'A' && (c) <= 'Z'))
2870 
2871 static int
2872 zone_set_name(zone_t *zone, const char *uname)
2873 {
2874 	char *kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
2875 	size_t len;
2876 	int i, err;
2877 
2878 	if ((err = copyinstr(uname, kname, ZONENAME_MAX, &len)) != 0) {
2879 		kmem_free(kname, ZONENAME_MAX);
2880 		return (err);	/* EFAULT or ENAMETOOLONG */
2881 	}
2882 
2883 	/* must be less than ZONENAME_MAX */
2884 	if (len == ZONENAME_MAX && kname[ZONENAME_MAX - 1] != '\0') {
2885 		kmem_free(kname, ZONENAME_MAX);
2886 		return (EINVAL);
2887 	}
2888 
2889 	/*
2890 	 * Name must start with an alphanumeric and must contain only
2891 	 * alphanumerics, '-', '_' and '.'.
2892 	 */
2893 	if (!isalnum(kname[0])) {
2894 		kmem_free(kname, ZONENAME_MAX);
2895 		return (EINVAL);
2896 	}
2897 	for (i = 1; i < len - 1; i++) {
2898 		if (!isalnum(kname[i]) && kname[i] != '-' && kname[i] != '_' &&
2899 		    kname[i] != '.') {
2900 			kmem_free(kname, ZONENAME_MAX);
2901 			return (EINVAL);
2902 		}
2903 	}
2904 
2905 	zone->zone_name = kname;
2906 	return (0);
2907 }
2908 
2909 /*
2910  * Gets the 32-bit hostid of the specified zone as an unsigned int.  If 'zonep'
2911  * is NULL or it points to a zone with no hostid emulation, then the machine's
2912  * hostid (i.e., the global zone's hostid) is returned.  This function returns
2913  * zero if neither the zone nor the host machine (global zone) have hostids.  It
2914  * returns HW_INVALID_HOSTID if the function attempts to return the machine's
2915  * hostid and the machine's hostid is invalid.
2916  */
2917 uint32_t
2918 zone_get_hostid(zone_t *zonep)
2919 {
2920 	unsigned long machine_hostid;
2921 
2922 	if (zonep == NULL || zonep->zone_hostid == HW_INVALID_HOSTID) {
2923 		if (ddi_strtoul(hw_serial, NULL, 10, &machine_hostid) != 0)
2924 			return (HW_INVALID_HOSTID);
2925 		return ((uint32_t)machine_hostid);
2926 	}
2927 	return (zonep->zone_hostid);
2928 }
2929 
2930 /*
2931  * Similar to thread_create(), but makes sure the thread is in the appropriate
2932  * zone's zsched process (curproc->p_zone->zone_zsched) before returning.
2933  */
2934 /*ARGSUSED*/
2935 kthread_t *
2936 zthread_create(
2937     caddr_t stk,
2938     size_t stksize,
2939     void (*proc)(),
2940     void *arg,
2941     size_t len,
2942     pri_t pri)
2943 {
2944 	kthread_t *t;
2945 	zone_t *zone = curproc->p_zone;
2946 	proc_t *pp = zone->zone_zsched;
2947 
2948 	zone_hold(zone);	/* Reference to be dropped when thread exits */
2949 
2950 	/*
2951 	 * No-one should be trying to create threads if the zone is shutting
2952 	 * down and there aren't any kernel threads around.  See comment
2953 	 * in zthread_exit().
2954 	 */
2955 	ASSERT(!(zone->zone_kthreads == NULL &&
2956 	    zone_status_get(zone) >= ZONE_IS_EMPTY));
2957 	/*
2958 	 * Create a thread, but don't let it run until we've finished setting
2959 	 * things up.
2960 	 */
2961 	t = thread_create(stk, stksize, proc, arg, len, pp, TS_STOPPED, pri);
2962 	ASSERT(t->t_forw == NULL);
2963 	mutex_enter(&zone_status_lock);
2964 	if (zone->zone_kthreads == NULL) {
2965 		t->t_forw = t->t_back = t;
2966 	} else {
2967 		kthread_t *tx = zone->zone_kthreads;
2968 
2969 		t->t_forw = tx;
2970 		t->t_back = tx->t_back;
2971 		tx->t_back->t_forw = t;
2972 		tx->t_back = t;
2973 	}
2974 	zone->zone_kthreads = t;
2975 	mutex_exit(&zone_status_lock);
2976 
2977 	mutex_enter(&pp->p_lock);
2978 	t->t_proc_flag |= TP_ZTHREAD;
2979 	project_rele(t->t_proj);
2980 	t->t_proj = project_hold(pp->p_task->tk_proj);
2981 
2982 	/*
2983 	 * Setup complete, let it run.
2984 	 */
2985 	thread_lock(t);
2986 	t->t_schedflag |= TS_ALLSTART;
2987 	setrun_locked(t);
2988 	thread_unlock(t);
2989 
2990 	mutex_exit(&pp->p_lock);
2991 
2992 	return (t);
2993 }
2994 
2995 /*
2996  * Similar to thread_exit().  Must be called by threads created via
2997  * zthread_exit().
2998  */
2999 void
3000 zthread_exit(void)
3001 {
3002 	kthread_t *t = curthread;
3003 	proc_t *pp = curproc;
3004 	zone_t *zone = pp->p_zone;
3005 
3006 	mutex_enter(&zone_status_lock);
3007 
3008 	/*
3009 	 * Reparent to p0
3010 	 */
3011 	kpreempt_disable();
3012 	mutex_enter(&pp->p_lock);
3013 	t->t_proc_flag &= ~TP_ZTHREAD;
3014 	t->t_procp = &p0;
3015 	hat_thread_exit(t);
3016 	mutex_exit(&pp->p_lock);
3017 	kpreempt_enable();
3018 
3019 	if (t->t_back == t) {
3020 		ASSERT(t->t_forw == t);
3021 		/*
3022 		 * If the zone is empty, once the thread count
3023 		 * goes to zero no further kernel threads can be
3024 		 * created.  This is because if the creator is a process
3025 		 * in the zone, then it must have exited before the zone
3026 		 * state could be set to ZONE_IS_EMPTY.
3027 		 * Otherwise, if the creator is a kernel thread in the
3028 		 * zone, the thread count is non-zero.
3029 		 *
3030 		 * This really means that non-zone kernel threads should
3031 		 * not create zone kernel threads.
3032 		 */
3033 		zone->zone_kthreads = NULL;
3034 		if (zone_status_get(zone) == ZONE_IS_EMPTY) {
3035 			zone_status_set(zone, ZONE_IS_DOWN);
3036 			/*
3037 			 * Remove any CPU caps on this zone.
3038 			 */
3039 			cpucaps_zone_remove(zone);
3040 		}
3041 	} else {
3042 		t->t_forw->t_back = t->t_back;
3043 		t->t_back->t_forw = t->t_forw;
3044 		if (zone->zone_kthreads == t)
3045 			zone->zone_kthreads = t->t_forw;
3046 	}
3047 	mutex_exit(&zone_status_lock);
3048 	zone_rele(zone);
3049 	thread_exit();
3050 	/* NOTREACHED */
3051 }
3052 
3053 static void
3054 zone_chdir(vnode_t *vp, vnode_t **vpp, proc_t *pp)
3055 {
3056 	vnode_t *oldvp;
3057 
3058 	/* we're going to hold a reference here to the directory */
3059 	VN_HOLD(vp);
3060 
3061 	if (audit_active)	/* update abs cwd/root path see c2audit.c */
3062 		audit_chdirec(vp, vpp);
3063 
3064 	mutex_enter(&pp->p_lock);
3065 	oldvp = *vpp;
3066 	*vpp = vp;
3067 	mutex_exit(&pp->p_lock);
3068 	if (oldvp != NULL)
3069 		VN_RELE(oldvp);
3070 }
3071 
3072 /*
3073  * Convert an rctl value represented by an nvlist_t into an rctl_val_t.
3074  */
3075 static int
3076 nvlist2rctlval(nvlist_t *nvl, rctl_val_t *rv)
3077 {
3078 	nvpair_t *nvp = NULL;
3079 	boolean_t priv_set = B_FALSE;
3080 	boolean_t limit_set = B_FALSE;
3081 	boolean_t action_set = B_FALSE;
3082 
3083 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
3084 		const char *name;
3085 		uint64_t ui64;
3086 
3087 		name = nvpair_name(nvp);
3088 		if (nvpair_type(nvp) != DATA_TYPE_UINT64)
3089 			return (EINVAL);
3090 		(void) nvpair_value_uint64(nvp, &ui64);
3091 		if (strcmp(name, "privilege") == 0) {
3092 			/*
3093 			 * Currently only privileged values are allowed, but
3094 			 * this may change in the future.
3095 			 */
3096 			if (ui64 != RCPRIV_PRIVILEGED)
3097 				return (EINVAL);
3098 			rv->rcv_privilege = ui64;
3099 			priv_set = B_TRUE;
3100 		} else if (strcmp(name, "limit") == 0) {
3101 			rv->rcv_value = ui64;
3102 			limit_set = B_TRUE;
3103 		} else if (strcmp(name, "action") == 0) {
3104 			if (ui64 != RCTL_LOCAL_NOACTION &&
3105 			    ui64 != RCTL_LOCAL_DENY)
3106 				return (EINVAL);
3107 			rv->rcv_flagaction = ui64;
3108 			action_set = B_TRUE;
3109 		} else {
3110 			return (EINVAL);
3111 		}
3112 	}
3113 
3114 	if (!(priv_set && limit_set && action_set))
3115 		return (EINVAL);
3116 	rv->rcv_action_signal = 0;
3117 	rv->rcv_action_recipient = NULL;
3118 	rv->rcv_action_recip_pid = -1;
3119 	rv->rcv_firing_time = 0;
3120 
3121 	return (0);
3122 }
3123 
3124 /*
3125  * Non-global zone version of start_init.
3126  */
3127 void
3128 zone_start_init(void)
3129 {
3130 	proc_t *p = ttoproc(curthread);
3131 	zone_t *z = p->p_zone;
3132 
3133 	ASSERT(!INGLOBALZONE(curproc));
3134 
3135 	/*
3136 	 * For all purposes (ZONE_ATTR_INITPID and restart_init),
3137 	 * storing just the pid of init is sufficient.
3138 	 */
3139 	z->zone_proc_initpid = p->p_pid;
3140 
3141 	/*
3142 	 * We maintain zone_boot_err so that we can return the cause of the
3143 	 * failure back to the caller of the zone_boot syscall.
3144 	 */
3145 	p->p_zone->zone_boot_err = start_init_common();
3146 
3147 	/*
3148 	 * We will prevent booting zones from becoming running zones if the
3149 	 * global zone is shutting down.
3150 	 */
3151 	mutex_enter(&zone_status_lock);
3152 	if (z->zone_boot_err != 0 || zone_status_get(global_zone) >=
3153 	    ZONE_IS_SHUTTING_DOWN) {
3154 		/*
3155 		 * Make sure we are still in the booting state-- we could have
3156 		 * raced and already be shutting down, or even further along.
3157 		 */
3158 		if (zone_status_get(z) == ZONE_IS_BOOTING) {
3159 			zone_status_set(z, ZONE_IS_SHUTTING_DOWN);
3160 		}
3161 		mutex_exit(&zone_status_lock);
3162 		/* It's gone bad, dispose of the process */
3163 		if (proc_exit(CLD_EXITED, z->zone_boot_err) != 0) {
3164 			mutex_enter(&p->p_lock);
3165 			ASSERT(p->p_flag & SEXITLWPS);
3166 			lwp_exit();
3167 		}
3168 	} else {
3169 		if (zone_status_get(z) == ZONE_IS_BOOTING)
3170 			zone_status_set(z, ZONE_IS_RUNNING);
3171 		mutex_exit(&zone_status_lock);
3172 		/* cause the process to return to userland. */
3173 		lwp_rtt();
3174 	}
3175 }
3176 
3177 struct zsched_arg {
3178 	zone_t *zone;
3179 	nvlist_t *nvlist;
3180 };
3181 
3182 /*
3183  * Per-zone "sched" workalike.  The similarity to "sched" doesn't have
3184  * anything to do with scheduling, but rather with the fact that
3185  * per-zone kernel threads are parented to zsched, just like regular
3186  * kernel threads are parented to sched (p0).
3187  *
3188  * zsched is also responsible for launching init for the zone.
3189  */
3190 static void
3191 zsched(void *arg)
3192 {
3193 	struct zsched_arg *za = arg;
3194 	proc_t *pp = curproc;
3195 	proc_t *initp = proc_init;
3196 	zone_t *zone = za->zone;
3197 	cred_t *cr, *oldcred;
3198 	rctl_set_t *set;
3199 	rctl_alloc_gp_t *gp;
3200 	contract_t *ct = NULL;
3201 	task_t *tk, *oldtk;
3202 	rctl_entity_p_t e;
3203 	kproject_t *pj;
3204 
3205 	nvlist_t *nvl = za->nvlist;
3206 	nvpair_t *nvp = NULL;
3207 
3208 	bcopy("zsched", PTOU(pp)->u_psargs, sizeof ("zsched"));
3209 	bcopy("zsched", PTOU(pp)->u_comm, sizeof ("zsched"));
3210 	PTOU(pp)->u_argc = 0;
3211 	PTOU(pp)->u_argv = NULL;
3212 	PTOU(pp)->u_envp = NULL;
3213 	closeall(P_FINFO(pp));
3214 
3215 	/*
3216 	 * We are this zone's "zsched" process.  As the zone isn't generally
3217 	 * visible yet we don't need to grab any locks before initializing its
3218 	 * zone_proc pointer.
3219 	 */
3220 	zone_hold(zone);  /* this hold is released by zone_destroy() */
3221 	zone->zone_zsched = pp;
3222 	mutex_enter(&pp->p_lock);
3223 	pp->p_zone = zone;
3224 	mutex_exit(&pp->p_lock);
3225 
3226 	/*
3227 	 * Disassociate process from its 'parent'; parent ourselves to init
3228 	 * (pid 1) and change other values as needed.
3229 	 */
3230 	sess_create();
3231 
3232 	mutex_enter(&pidlock);
3233 	proc_detach(pp);
3234 	pp->p_ppid = 1;
3235 	pp->p_flag |= SZONETOP;
3236 	pp->p_ancpid = 1;
3237 	pp->p_parent = initp;
3238 	pp->p_psibling = NULL;
3239 	if (initp->p_child)
3240 		initp->p_child->p_psibling = pp;
3241 	pp->p_sibling = initp->p_child;
3242 	initp->p_child = pp;
3243 
3244 	/* Decrement what newproc() incremented. */
3245 	upcount_dec(crgetruid(CRED()), GLOBAL_ZONEID);
3246 	/*
3247 	 * Our credentials are about to become kcred-like, so we don't care
3248 	 * about the caller's ruid.
3249 	 */
3250 	upcount_inc(crgetruid(kcred), zone->zone_id);
3251 	mutex_exit(&pidlock);
3252 
3253 	/*
3254 	 * getting out of global zone, so decrement lwp counts
3255 	 */
3256 	pj = pp->p_task->tk_proj;
3257 	mutex_enter(&global_zone->zone_nlwps_lock);
3258 	pj->kpj_nlwps -= pp->p_lwpcnt;
3259 	global_zone->zone_nlwps -= pp->p_lwpcnt;
3260 	mutex_exit(&global_zone->zone_nlwps_lock);
3261 
3262 	/*
3263 	 * Decrement locked memory counts on old zone and project.
3264 	 */
3265 	mutex_enter(&global_zone->zone_mem_lock);
3266 	global_zone->zone_locked_mem -= pp->p_locked_mem;
3267 	pj->kpj_data.kpd_locked_mem -= pp->p_locked_mem;
3268 	mutex_exit(&global_zone->zone_mem_lock);
3269 
3270 	/*
3271 	 * Create and join a new task in project '0' of this zone.
3272 	 *
3273 	 * We don't need to call holdlwps() since we know we're the only lwp in
3274 	 * this process.
3275 	 *
3276 	 * task_join() returns with p_lock held.
3277 	 */
3278 	tk = task_create(0, zone);
3279 	mutex_enter(&cpu_lock);
3280 	oldtk = task_join(tk, 0);
3281 
3282 	pj = pp->p_task->tk_proj;
3283 
3284 	mutex_enter(&zone->zone_mem_lock);
3285 	zone->zone_locked_mem += pp->p_locked_mem;
3286 	pj->kpj_data.kpd_locked_mem += pp->p_locked_mem;
3287 	mutex_exit(&zone->zone_mem_lock);
3288 
3289 	/*
3290 	 * add lwp counts to zsched's zone, and increment project's task count
3291 	 * due to the task created in the above tasksys_settaskid
3292 	 */
3293 
3294 	mutex_enter(&zone->zone_nlwps_lock);
3295 	pj->kpj_nlwps += pp->p_lwpcnt;
3296 	pj->kpj_ntasks += 1;
3297 	zone->zone_nlwps += pp->p_lwpcnt;
3298 	mutex_exit(&zone->zone_nlwps_lock);
3299 
3300 	mutex_exit(&curproc->p_lock);
3301 	mutex_exit(&cpu_lock);
3302 	task_rele(oldtk);
3303 
3304 	/*
3305 	 * The process was created by a process in the global zone, hence the
3306 	 * credentials are wrong.  We might as well have kcred-ish credentials.
3307 	 */
3308 	cr = zone->zone_kcred;
3309 	crhold(cr);
3310 	mutex_enter(&pp->p_crlock);
3311 	oldcred = pp->p_cred;
3312 	pp->p_cred = cr;
3313 	mutex_exit(&pp->p_crlock);
3314 	crfree(oldcred);
3315 
3316 	/*
3317 	 * Hold credentials again (for thread)
3318 	 */
3319 	crhold(cr);
3320 
3321 	/*
3322 	 * p_lwpcnt can't change since this is a kernel process.
3323 	 */
3324 	crset(pp, cr);
3325 
3326 	/*
3327 	 * Chroot
3328 	 */
3329 	zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_cdir, pp);
3330 	zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_rdir, pp);
3331 
3332 	/*
3333 	 * Initialize zone's rctl set.
3334 	 */
3335 	set = rctl_set_create();
3336 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
3337 	mutex_enter(&pp->p_lock);
3338 	e.rcep_p.zone = zone;
3339 	e.rcep_t = RCENTITY_ZONE;
3340 	zone->zone_rctls = rctl_set_init(RCENTITY_ZONE, pp, &e, set, gp);
3341 	mutex_exit(&pp->p_lock);
3342 	rctl_prealloc_destroy(gp);
3343 
3344 	/*
3345 	 * Apply the rctls passed in to zone_create().  This is basically a list
3346 	 * assignment: all of the old values are removed and the new ones
3347 	 * inserted.  That is, if an empty list is passed in, all values are
3348 	 * removed.
3349 	 */
3350 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
3351 		rctl_dict_entry_t *rde;
3352 		rctl_hndl_t hndl;
3353 		char *name;
3354 		nvlist_t **nvlarray;
3355 		uint_t i, nelem;
3356 		int error;	/* For ASSERT()s */
3357 
3358 		name = nvpair_name(nvp);
3359 		hndl = rctl_hndl_lookup(name);
3360 		ASSERT(hndl != -1);
3361 		rde = rctl_dict_lookup_hndl(hndl);
3362 		ASSERT(rde != NULL);
3363 
3364 		for (; /* ever */; ) {
3365 			rctl_val_t oval;
3366 
3367 			mutex_enter(&pp->p_lock);
3368 			error = rctl_local_get(hndl, NULL, &oval, pp);
3369 			mutex_exit(&pp->p_lock);
3370 			ASSERT(error == 0);	/* Can't fail for RCTL_FIRST */
3371 			ASSERT(oval.rcv_privilege != RCPRIV_BASIC);
3372 			if (oval.rcv_privilege == RCPRIV_SYSTEM)
3373 				break;
3374 			mutex_enter(&pp->p_lock);
3375 			error = rctl_local_delete(hndl, &oval, pp);
3376 			mutex_exit(&pp->p_lock);
3377 			ASSERT(error == 0);
3378 		}
3379 		error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem);
3380 		ASSERT(error == 0);
3381 		for (i = 0; i < nelem; i++) {
3382 			rctl_val_t *nvalp;
3383 
3384 			nvalp = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
3385 			error = nvlist2rctlval(nvlarray[i], nvalp);
3386 			ASSERT(error == 0);
3387 			/*
3388 			 * rctl_local_insert can fail if the value being
3389 			 * inserted is a duplicate; this is OK.
3390 			 */
3391 			mutex_enter(&pp->p_lock);
3392 			if (rctl_local_insert(hndl, nvalp, pp) != 0)
3393 				kmem_cache_free(rctl_val_cache, nvalp);
3394 			mutex_exit(&pp->p_lock);
3395 		}
3396 	}
3397 	/*
3398 	 * Tell the world that we're done setting up.
3399 	 *
3400 	 * At this point we want to set the zone status to ZONE_IS_INITIALIZED
3401 	 * and atomically set the zone's processor set visibility.  Once
3402 	 * we drop pool_lock() this zone will automatically get updated
3403 	 * to reflect any future changes to the pools configuration.
3404 	 *
3405 	 * Note that after we drop the locks below (zonehash_lock in
3406 	 * particular) other operations such as a zone_getattr call can
3407 	 * now proceed and observe the zone. That is the reason for doing a
3408 	 * state transition to the INITIALIZED state.
3409 	 */
3410 	pool_lock();
3411 	mutex_enter(&cpu_lock);
3412 	mutex_enter(&zonehash_lock);
3413 	zone_uniqid(zone);
3414 	zone_zsd_configure(zone);
3415 	if (pool_state == POOL_ENABLED)
3416 		zone_pset_set(zone, pool_default->pool_pset->pset_id);
3417 	mutex_enter(&zone_status_lock);
3418 	ASSERT(zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
3419 	zone_status_set(zone, ZONE_IS_INITIALIZED);
3420 	mutex_exit(&zone_status_lock);
3421 	mutex_exit(&zonehash_lock);
3422 	mutex_exit(&cpu_lock);
3423 	pool_unlock();
3424 
3425 	/* Now call the create callback for this key */
3426 	zsd_apply_all_keys(zsd_apply_create, zone);
3427 
3428 	/* The callbacks are complete. Mark ZONE_IS_READY */
3429 	mutex_enter(&zone_status_lock);
3430 	ASSERT(zone_status_get(zone) == ZONE_IS_INITIALIZED);
3431 	zone_status_set(zone, ZONE_IS_READY);
3432 	mutex_exit(&zone_status_lock);
3433 
3434 	/*
3435 	 * Once we see the zone transition to the ZONE_IS_BOOTING state,
3436 	 * we launch init, and set the state to running.
3437 	 */
3438 	zone_status_wait_cpr(zone, ZONE_IS_BOOTING, "zsched");
3439 
3440 	if (zone_status_get(zone) == ZONE_IS_BOOTING) {
3441 		id_t cid;
3442 
3443 		/*
3444 		 * Ok, this is a little complicated.  We need to grab the
3445 		 * zone's pool's scheduling class ID; note that by now, we
3446 		 * are already bound to a pool if we need to be (zoneadmd
3447 		 * will have done that to us while we're in the READY
3448 		 * state).  *But* the scheduling class for the zone's 'init'
3449 		 * must be explicitly passed to newproc, which doesn't
3450 		 * respect pool bindings.
3451 		 *
3452 		 * We hold the pool_lock across the call to newproc() to
3453 		 * close the obvious race: the pool's scheduling class
3454 		 * could change before we manage to create the LWP with
3455 		 * classid 'cid'.
3456 		 */
3457 		pool_lock();
3458 		if (zone->zone_defaultcid > 0)
3459 			cid = zone->zone_defaultcid;
3460 		else
3461 			cid = pool_get_class(zone->zone_pool);
3462 		if (cid == -1)
3463 			cid = defaultcid;
3464 
3465 		/*
3466 		 * If this fails, zone_boot will ultimately fail.  The
3467 		 * state of the zone will be set to SHUTTING_DOWN-- userland
3468 		 * will have to tear down the zone, and fail, or try again.
3469 		 */
3470 		if ((zone->zone_boot_err = newproc(zone_start_init, NULL, cid,
3471 		    minclsyspri - 1, &ct)) != 0) {
3472 			mutex_enter(&zone_status_lock);
3473 			zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
3474 			mutex_exit(&zone_status_lock);
3475 		}
3476 		pool_unlock();
3477 	}
3478 
3479 	/*
3480 	 * Wait for zone_destroy() to be called.  This is what we spend
3481 	 * most of our life doing.
3482 	 */
3483 	zone_status_wait_cpr(zone, ZONE_IS_DYING, "zsched");
3484 
3485 	if (ct)
3486 		/*
3487 		 * At this point the process contract should be empty.
3488 		 * (Though if it isn't, it's not the end of the world.)
3489 		 */
3490 		VERIFY(contract_abandon(ct, curproc, B_TRUE) == 0);
3491 
3492 	/*
3493 	 * Allow kcred to be freed when all referring processes
3494 	 * (including this one) go away.  We can't just do this in
3495 	 * zone_free because we need to wait for the zone_cred_ref to
3496 	 * drop to 0 before calling zone_free, and the existence of
3497 	 * zone_kcred will prevent that.  Thus, we call crfree here to
3498 	 * balance the crdup in zone_create.  The crhold calls earlier
3499 	 * in zsched will be dropped when the thread and process exit.
3500 	 */
3501 	crfree(zone->zone_kcred);
3502 	zone->zone_kcred = NULL;
3503 
3504 	exit(CLD_EXITED, 0);
3505 }
3506 
3507 /*
3508  * Helper function to determine if there are any submounts of the
3509  * provided path.  Used to make sure the zone doesn't "inherit" any
3510  * mounts from before it is created.
3511  */
3512 static uint_t
3513 zone_mount_count(const char *rootpath)
3514 {
3515 	vfs_t *vfsp;
3516 	uint_t count = 0;
3517 	size_t rootpathlen = strlen(rootpath);
3518 
3519 	/*
3520 	 * Holding zonehash_lock prevents race conditions with
3521 	 * vfs_list_add()/vfs_list_remove() since we serialize with
3522 	 * zone_find_by_path().
3523 	 */
3524 	ASSERT(MUTEX_HELD(&zonehash_lock));
3525 	/*
3526 	 * The rootpath must end with a '/'
3527 	 */
3528 	ASSERT(rootpath[rootpathlen - 1] == '/');
3529 
3530 	/*
3531 	 * This intentionally does not count the rootpath itself if that
3532 	 * happens to be a mount point.
3533 	 */
3534 	vfs_list_read_lock();
3535 	vfsp = rootvfs;
3536 	do {
3537 		if (strncmp(rootpath, refstr_value(vfsp->vfs_mntpt),
3538 		    rootpathlen) == 0)
3539 			count++;
3540 		vfsp = vfsp->vfs_next;
3541 	} while (vfsp != rootvfs);
3542 	vfs_list_unlock();
3543 	return (count);
3544 }
3545 
3546 /*
3547  * Helper function to make sure that a zone created on 'rootpath'
3548  * wouldn't end up containing other zones' rootpaths.
3549  */
3550 static boolean_t
3551 zone_is_nested(const char *rootpath)
3552 {
3553 	zone_t *zone;
3554 	size_t rootpathlen = strlen(rootpath);
3555 	size_t len;
3556 
3557 	ASSERT(MUTEX_HELD(&zonehash_lock));
3558 
3559 	/*
3560 	 * zone_set_root() appended '/' and '\0' at the end of rootpath
3561 	 */
3562 	if ((rootpathlen <= 3) && (rootpath[0] == '/') &&
3563 	    (rootpath[1] == '/') && (rootpath[2] == '\0'))
3564 		return (B_TRUE);
3565 
3566 	for (zone = list_head(&zone_active); zone != NULL;
3567 	    zone = list_next(&zone_active, zone)) {
3568 		if (zone == global_zone)
3569 			continue;
3570 		len = strlen(zone->zone_rootpath);
3571 		if (strncmp(rootpath, zone->zone_rootpath,
3572 		    MIN(rootpathlen, len)) == 0)
3573 			return (B_TRUE);
3574 	}
3575 	return (B_FALSE);
3576 }
3577 
3578 static int
3579 zone_set_privset(zone_t *zone, const priv_set_t *zone_privs,
3580     size_t zone_privssz)
3581 {
3582 	priv_set_t *privs = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
3583 
3584 	if (zone_privssz < sizeof (priv_set_t))
3585 		return (set_errno(ENOMEM));
3586 
3587 	if (copyin(zone_privs, privs, sizeof (priv_set_t))) {
3588 		kmem_free(privs, sizeof (priv_set_t));
3589 		return (EFAULT);
3590 	}
3591 
3592 	zone->zone_privset = privs;
3593 	return (0);
3594 }
3595 
3596 /*
3597  * We make creative use of nvlists to pass in rctls from userland.  The list is
3598  * a list of the following structures:
3599  *
3600  * (name = rctl_name, value = nvpair_list_array)
3601  *
3602  * Where each element of the nvpair_list_array is of the form:
3603  *
3604  * [(name = "privilege", value = RCPRIV_PRIVILEGED),
3605  * 	(name = "limit", value = uint64_t),
3606  * 	(name = "action", value = (RCTL_LOCAL_NOACTION || RCTL_LOCAL_DENY))]
3607  */
3608 static int
3609 parse_rctls(caddr_t ubuf, size_t buflen, nvlist_t **nvlp)
3610 {
3611 	nvpair_t *nvp = NULL;
3612 	nvlist_t *nvl = NULL;
3613 	char *kbuf;
3614 	int error;
3615 	rctl_val_t rv;
3616 
3617 	*nvlp = NULL;
3618 
3619 	if (buflen == 0)
3620 		return (0);
3621 
3622 	if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL)
3623 		return (ENOMEM);
3624 	if (copyin(ubuf, kbuf, buflen)) {
3625 		error = EFAULT;
3626 		goto out;
3627 	}
3628 	if (nvlist_unpack(kbuf, buflen, &nvl, KM_SLEEP) != 0) {
3629 		/*
3630 		 * nvl may have been allocated/free'd, but the value set to
3631 		 * non-NULL, so we reset it here.
3632 		 */
3633 		nvl = NULL;
3634 		error = EINVAL;
3635 		goto out;
3636 	}
3637 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
3638 		rctl_dict_entry_t *rde;
3639 		rctl_hndl_t hndl;
3640 		nvlist_t **nvlarray;
3641 		uint_t i, nelem;
3642 		char *name;
3643 
3644 		error = EINVAL;
3645 		name = nvpair_name(nvp);
3646 		if (strncmp(nvpair_name(nvp), "zone.", sizeof ("zone.") - 1)
3647 		    != 0 || nvpair_type(nvp) != DATA_TYPE_NVLIST_ARRAY) {
3648 			goto out;
3649 		}
3650 		if ((hndl = rctl_hndl_lookup(name)) == -1) {
3651 			goto out;
3652 		}
3653 		rde = rctl_dict_lookup_hndl(hndl);
3654 		error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem);
3655 		ASSERT(error == 0);
3656 		for (i = 0; i < nelem; i++) {
3657 			if (error = nvlist2rctlval(nvlarray[i], &rv))
3658 				goto out;
3659 		}
3660 		if (rctl_invalid_value(rde, &rv)) {
3661 			error = EINVAL;
3662 			goto out;
3663 		}
3664 	}
3665 	error = 0;
3666 	*nvlp = nvl;
3667 out:
3668 	kmem_free(kbuf, buflen);
3669 	if (error && nvl != NULL)
3670 		nvlist_free(nvl);
3671 	return (error);
3672 }
3673 
3674 int
3675 zone_create_error(int er_error, int er_ext, int *er_out) {
3676 	if (er_out != NULL) {
3677 		if (copyout(&er_ext, er_out, sizeof (int))) {
3678 			return (set_errno(EFAULT));
3679 		}
3680 	}
3681 	return (set_errno(er_error));
3682 }
3683 
3684 static int
3685 zone_set_label(zone_t *zone, const bslabel_t *lab, uint32_t doi)
3686 {
3687 	ts_label_t *tsl;
3688 	bslabel_t blab;
3689 
3690 	/* Get label from user */
3691 	if (copyin(lab, &blab, sizeof (blab)) != 0)
3692 		return (EFAULT);
3693 	tsl = labelalloc(&blab, doi, KM_NOSLEEP);
3694 	if (tsl == NULL)
3695 		return (ENOMEM);
3696 
3697 	zone->zone_slabel = tsl;
3698 	return (0);
3699 }
3700 
3701 /*
3702  * Parses a comma-separated list of ZFS datasets into a per-zone dictionary.
3703  */
3704 static int
3705 parse_zfs(zone_t *zone, caddr_t ubuf, size_t buflen)
3706 {
3707 	char *kbuf;
3708 	char *dataset, *next;
3709 	zone_dataset_t *zd;
3710 	size_t len;
3711 
3712 	if (ubuf == NULL || buflen == 0)
3713 		return (0);
3714 
3715 	if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL)
3716 		return (ENOMEM);
3717 
3718 	if (copyin(ubuf, kbuf, buflen) != 0) {
3719 		kmem_free(kbuf, buflen);
3720 		return (EFAULT);
3721 	}
3722 
3723 	dataset = next = kbuf;
3724 	for (;;) {
3725 		zd = kmem_alloc(sizeof (zone_dataset_t), KM_SLEEP);
3726 
3727 		next = strchr(dataset, ',');
3728 
3729 		if (next == NULL)
3730 			len = strlen(dataset);
3731 		else
3732 			len = next - dataset;
3733 
3734 		zd->zd_dataset = kmem_alloc(len + 1, KM_SLEEP);
3735 		bcopy(dataset, zd->zd_dataset, len);
3736 		zd->zd_dataset[len] = '\0';
3737 
3738 		list_insert_head(&zone->zone_datasets, zd);
3739 
3740 		if (next == NULL)
3741 			break;
3742 
3743 		dataset = next + 1;
3744 	}
3745 
3746 	kmem_free(kbuf, buflen);
3747 	return (0);
3748 }
3749 
3750 /*
3751  * System call to create/initialize a new zone named 'zone_name', rooted
3752  * at 'zone_root', with a zone-wide privilege limit set of 'zone_privs',
3753  * and initialized with the zone-wide rctls described in 'rctlbuf', and
3754  * with labeling set by 'match', 'doi', and 'label'.
3755  *
3756  * If extended error is non-null, we may use it to return more detailed
3757  * error information.
3758  */
3759 static zoneid_t
3760 zone_create(const char *zone_name, const char *zone_root,
3761     const priv_set_t *zone_privs, size_t zone_privssz,
3762     caddr_t rctlbuf, size_t rctlbufsz,
3763     caddr_t zfsbuf, size_t zfsbufsz, int *extended_error,
3764     int match, uint32_t doi, const bslabel_t *label,
3765     int flags)
3766 {
3767 	struct zsched_arg zarg;
3768 	nvlist_t *rctls = NULL;
3769 	proc_t *pp = curproc;
3770 	zone_t *zone, *ztmp;
3771 	zoneid_t zoneid;
3772 	int error;
3773 	int error2 = 0;
3774 	char *str;
3775 	cred_t *zkcr;
3776 	boolean_t insert_label_hash;
3777 
3778 	if (secpolicy_zone_config(CRED()) != 0)
3779 		return (set_errno(EPERM));
3780 
3781 	/* can't boot zone from within chroot environment */
3782 	if (PTOU(pp)->u_rdir != NULL && PTOU(pp)->u_rdir != rootdir)
3783 		return (zone_create_error(ENOTSUP, ZE_CHROOTED,
3784 		    extended_error));
3785 
3786 	zone = kmem_zalloc(sizeof (zone_t), KM_SLEEP);
3787 	zoneid = zone->zone_id = id_alloc(zoneid_space);
3788 	zone->zone_status = ZONE_IS_UNINITIALIZED;
3789 	zone->zone_pool = pool_default;
3790 	zone->zone_pool_mod = gethrtime();
3791 	zone->zone_psetid = ZONE_PS_INVAL;
3792 	zone->zone_ncpus = 0;
3793 	zone->zone_ncpus_online = 0;
3794 	zone->zone_restart_init = B_TRUE;
3795 	zone->zone_brand = &native_brand;
3796 	zone->zone_initname = NULL;
3797 	mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL);
3798 	mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
3799 	mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
3800 	cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL);
3801 	list_create(&zone->zone_zsd, sizeof (struct zsd_entry),
3802 	    offsetof(struct zsd_entry, zsd_linkage));
3803 	list_create(&zone->zone_datasets, sizeof (zone_dataset_t),
3804 	    offsetof(zone_dataset_t, zd_linkage));
3805 	rw_init(&zone->zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
3806 
3807 	if (flags & ZCF_NET_EXCL) {
3808 		zone->zone_flags |= ZF_NET_EXCL;
3809 	}
3810 
3811 	if ((error = zone_set_name(zone, zone_name)) != 0) {
3812 		zone_free(zone);
3813 		return (zone_create_error(error, 0, extended_error));
3814 	}
3815 
3816 	if ((error = zone_set_root(zone, zone_root)) != 0) {
3817 		zone_free(zone);
3818 		return (zone_create_error(error, 0, extended_error));
3819 	}
3820 	if ((error = zone_set_privset(zone, zone_privs, zone_privssz)) != 0) {
3821 		zone_free(zone);
3822 		return (zone_create_error(error, 0, extended_error));
3823 	}
3824 
3825 	/* initialize node name to be the same as zone name */
3826 	zone->zone_nodename = kmem_alloc(_SYS_NMLN, KM_SLEEP);
3827 	(void) strncpy(zone->zone_nodename, zone->zone_name, _SYS_NMLN);
3828 	zone->zone_nodename[_SYS_NMLN - 1] = '\0';
3829 
3830 	zone->zone_domain = kmem_alloc(_SYS_NMLN, KM_SLEEP);
3831 	zone->zone_domain[0] = '\0';
3832 	zone->zone_hostid = HW_INVALID_HOSTID;
3833 	zone->zone_shares = 1;
3834 	zone->zone_shmmax = 0;
3835 	zone->zone_ipc.ipcq_shmmni = 0;
3836 	zone->zone_ipc.ipcq_semmni = 0;
3837 	zone->zone_ipc.ipcq_msgmni = 0;
3838 	zone->zone_bootargs = NULL;
3839 	zone->zone_initname =
3840 	    kmem_alloc(strlen(zone_default_initname) + 1, KM_SLEEP);
3841 	(void) strcpy(zone->zone_initname, zone_default_initname);
3842 	zone->zone_nlwps = 0;
3843 	zone->zone_nlwps_ctl = INT_MAX;
3844 	zone->zone_locked_mem = 0;
3845 	zone->zone_locked_mem_ctl = UINT64_MAX;
3846 	zone->zone_max_swap = 0;
3847 	zone->zone_max_swap_ctl = UINT64_MAX;
3848 	zone0.zone_lockedmem_kstat = NULL;
3849 	zone0.zone_swapresv_kstat = NULL;
3850 
3851 	/*
3852 	 * Zsched initializes the rctls.
3853 	 */
3854 	zone->zone_rctls = NULL;
3855 
3856 	if ((error = parse_rctls(rctlbuf, rctlbufsz, &rctls)) != 0) {
3857 		zone_free(zone);
3858 		return (zone_create_error(error, 0, extended_error));
3859 	}
3860 
3861 	if ((error = parse_zfs(zone, zfsbuf, zfsbufsz)) != 0) {
3862 		zone_free(zone);
3863 		return (set_errno(error));
3864 	}
3865 
3866 	/*
3867 	 * Read in the trusted system parameters:
3868 	 * match flag and sensitivity label.
3869 	 */
3870 	zone->zone_match = match;
3871 	if (is_system_labeled() && !(zone->zone_flags & ZF_IS_SCRATCH)) {
3872 		/* Fail if requested to set doi to anything but system's doi */
3873 		if (doi != 0 && doi != default_doi) {
3874 			zone_free(zone);
3875 			return (set_errno(EINVAL));
3876 		}
3877 		/* Always apply system's doi to the zone */
3878 		error = zone_set_label(zone, label, default_doi);
3879 		if (error != 0) {
3880 			zone_free(zone);
3881 			return (set_errno(error));
3882 		}
3883 		insert_label_hash = B_TRUE;
3884 	} else {
3885 		/* all zones get an admin_low label if system is not labeled */
3886 		zone->zone_slabel = l_admin_low;
3887 		label_hold(l_admin_low);
3888 		insert_label_hash = B_FALSE;
3889 	}
3890 
3891 	/*
3892 	 * Stop all lwps since that's what normally happens as part of fork().
3893 	 * This needs to happen before we grab any locks to avoid deadlock
3894 	 * (another lwp in the process could be waiting for the held lock).
3895 	 */
3896 	if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK)) {
3897 		zone_free(zone);
3898 		if (rctls)
3899 			nvlist_free(rctls);
3900 		return (zone_create_error(error, 0, extended_error));
3901 	}
3902 
3903 	if (block_mounts() == 0) {
3904 		mutex_enter(&pp->p_lock);
3905 		if (curthread != pp->p_agenttp)
3906 			continuelwps(pp);
3907 		mutex_exit(&pp->p_lock);
3908 		zone_free(zone);
3909 		if (rctls)
3910 			nvlist_free(rctls);
3911 		return (zone_create_error(error, 0, extended_error));
3912 	}
3913 
3914 	/*
3915 	 * Set up credential for kernel access.  After this, any errors
3916 	 * should go through the dance in errout rather than calling
3917 	 * zone_free directly.
3918 	 */
3919 	zone->zone_kcred = crdup(kcred);
3920 	crsetzone(zone->zone_kcred, zone);
3921 	priv_intersect(zone->zone_privset, &CR_PPRIV(zone->zone_kcred));
3922 	priv_intersect(zone->zone_privset, &CR_EPRIV(zone->zone_kcred));
3923 	priv_intersect(zone->zone_privset, &CR_IPRIV(zone->zone_kcred));
3924 	priv_intersect(zone->zone_privset, &CR_LPRIV(zone->zone_kcred));
3925 
3926 	mutex_enter(&zonehash_lock);
3927 	/*
3928 	 * Make sure zone doesn't already exist.
3929 	 *
3930 	 * If the system and zone are labeled,
3931 	 * make sure no other zone exists that has the same label.
3932 	 */
3933 	if ((ztmp = zone_find_all_by_name(zone->zone_name)) != NULL ||
3934 	    (insert_label_hash &&
3935 	    (ztmp = zone_find_all_by_label(zone->zone_slabel)) != NULL)) {
3936 		zone_status_t status;
3937 
3938 		status = zone_status_get(ztmp);
3939 		if (status == ZONE_IS_READY || status == ZONE_IS_RUNNING)
3940 			error = EEXIST;
3941 		else
3942 			error = EBUSY;
3943 
3944 		if (insert_label_hash)
3945 			error2 = ZE_LABELINUSE;
3946 
3947 		goto errout;
3948 	}
3949 
3950 	/*
3951 	 * Don't allow zone creations which would cause one zone's rootpath to
3952 	 * be accessible from that of another (non-global) zone.
3953 	 */
3954 	if (zone_is_nested(zone->zone_rootpath)) {
3955 		error = EBUSY;
3956 		goto errout;
3957 	}
3958 
3959 	ASSERT(zonecount != 0);		/* check for leaks */
3960 	if (zonecount + 1 > maxzones) {
3961 		error = ENOMEM;
3962 		goto errout;
3963 	}
3964 
3965 	if (zone_mount_count(zone->zone_rootpath) != 0) {
3966 		error = EBUSY;
3967 		error2 = ZE_AREMOUNTS;
3968 		goto errout;
3969 	}
3970 
3971 	/*
3972 	 * Zone is still incomplete, but we need to drop all locks while
3973 	 * zsched() initializes this zone's kernel process.  We
3974 	 * optimistically add the zone to the hashtable and associated
3975 	 * lists so a parallel zone_create() doesn't try to create the
3976 	 * same zone.
3977 	 */
3978 	zonecount++;
3979 	(void) mod_hash_insert(zonehashbyid,
3980 	    (mod_hash_key_t)(uintptr_t)zone->zone_id,
3981 	    (mod_hash_val_t)(uintptr_t)zone);
3982 	str = kmem_alloc(strlen(zone->zone_name) + 1, KM_SLEEP);
3983 	(void) strcpy(str, zone->zone_name);
3984 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)str,
3985 	    (mod_hash_val_t)(uintptr_t)zone);
3986 	if (insert_label_hash) {
3987 		(void) mod_hash_insert(zonehashbylabel,
3988 		    (mod_hash_key_t)zone->zone_slabel, (mod_hash_val_t)zone);
3989 		zone->zone_flags |= ZF_HASHED_LABEL;
3990 	}
3991 
3992 	/*
3993 	 * Insert into active list.  At this point there are no 'hold's
3994 	 * on the zone, but everyone else knows not to use it, so we can
3995 	 * continue to use it.  zsched() will do a zone_hold() if the
3996 	 * newproc() is successful.
3997 	 */
3998 	list_insert_tail(&zone_active, zone);
3999 	mutex_exit(&zonehash_lock);
4000 
4001 	zarg.zone = zone;
4002 	zarg.nvlist = rctls;
4003 	/*
4004 	 * The process, task, and project rctls are probably wrong;
4005 	 * we need an interface to get the default values of all rctls,
4006 	 * and initialize zsched appropriately.  I'm not sure that that
4007 	 * makes much of a difference, though.
4008 	 */
4009 	if (error = newproc(zsched, (void *)&zarg, syscid, minclsyspri, NULL)) {
4010 		/*
4011 		 * We need to undo all globally visible state.
4012 		 */
4013 		mutex_enter(&zonehash_lock);
4014 		list_remove(&zone_active, zone);
4015 		if (zone->zone_flags & ZF_HASHED_LABEL) {
4016 			ASSERT(zone->zone_slabel != NULL);
4017 			(void) mod_hash_destroy(zonehashbylabel,
4018 			    (mod_hash_key_t)zone->zone_slabel);
4019 		}
4020 		(void) mod_hash_destroy(zonehashbyname,
4021 		    (mod_hash_key_t)(uintptr_t)zone->zone_name);
4022 		(void) mod_hash_destroy(zonehashbyid,
4023 		    (mod_hash_key_t)(uintptr_t)zone->zone_id);
4024 		ASSERT(zonecount > 1);
4025 		zonecount--;
4026 		goto errout;
4027 	}
4028 
4029 	/*
4030 	 * Zone creation can't fail from now on.
4031 	 */
4032 
4033 	/*
4034 	 * Create zone kstats
4035 	 */
4036 	zone_kstat_create(zone);
4037 
4038 	/*
4039 	 * Let the other lwps continue.
4040 	 */
4041 	mutex_enter(&pp->p_lock);
4042 	if (curthread != pp->p_agenttp)
4043 		continuelwps(pp);
4044 	mutex_exit(&pp->p_lock);
4045 
4046 	/*
4047 	 * Wait for zsched to finish initializing the zone.
4048 	 */
4049 	zone_status_wait(zone, ZONE_IS_READY);
4050 	/*
4051 	 * The zone is fully visible, so we can let mounts progress.
4052 	 */
4053 	resume_mounts();
4054 	if (rctls)
4055 		nvlist_free(rctls);
4056 
4057 	return (zoneid);
4058 
4059 errout:
4060 	mutex_exit(&zonehash_lock);
4061 	/*
4062 	 * Let the other lwps continue.
4063 	 */
4064 	mutex_enter(&pp->p_lock);
4065 	if (curthread != pp->p_agenttp)
4066 		continuelwps(pp);
4067 	mutex_exit(&pp->p_lock);
4068 
4069 	resume_mounts();
4070 	if (rctls)
4071 		nvlist_free(rctls);
4072 	/*
4073 	 * There is currently one reference to the zone, a cred_ref from
4074 	 * zone_kcred.  To free the zone, we call crfree, which will call
4075 	 * zone_cred_rele, which will call zone_free.
4076 	 */
4077 	ASSERT(zone->zone_cred_ref == 1);	/* for zone_kcred */
4078 	ASSERT(zone->zone_kcred->cr_ref == 1);
4079 	ASSERT(zone->zone_ref == 0);
4080 	zkcr = zone->zone_kcred;
4081 	zone->zone_kcred = NULL;
4082 	crfree(zkcr);				/* triggers call to zone_free */
4083 	return (zone_create_error(error, error2, extended_error));
4084 }
4085 
4086 /*
4087  * Cause the zone to boot.  This is pretty simple, since we let zoneadmd do
4088  * the heavy lifting.  initname is the path to the program to launch
4089  * at the "top" of the zone; if this is NULL, we use the system default,
4090  * which is stored at zone_default_initname.
4091  */
4092 static int
4093 zone_boot(zoneid_t zoneid)
4094 {
4095 	int err;
4096 	zone_t *zone;
4097 
4098 	if (secpolicy_zone_config(CRED()) != 0)
4099 		return (set_errno(EPERM));
4100 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
4101 		return (set_errno(EINVAL));
4102 
4103 	mutex_enter(&zonehash_lock);
4104 	/*
4105 	 * Look for zone under hash lock to prevent races with calls to
4106 	 * zone_shutdown, zone_destroy, etc.
4107 	 */
4108 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4109 		mutex_exit(&zonehash_lock);
4110 		return (set_errno(EINVAL));
4111 	}
4112 
4113 	mutex_enter(&zone_status_lock);
4114 	if (zone_status_get(zone) != ZONE_IS_READY) {
4115 		mutex_exit(&zone_status_lock);
4116 		mutex_exit(&zonehash_lock);
4117 		return (set_errno(EINVAL));
4118 	}
4119 	zone_status_set(zone, ZONE_IS_BOOTING);
4120 	mutex_exit(&zone_status_lock);
4121 
4122 	zone_hold(zone);	/* so we can use the zone_t later */
4123 	mutex_exit(&zonehash_lock);
4124 
4125 	if (zone_status_wait_sig(zone, ZONE_IS_RUNNING) == 0) {
4126 		zone_rele(zone);
4127 		return (set_errno(EINTR));
4128 	}
4129 
4130 	/*
4131 	 * Boot (starting init) might have failed, in which case the zone
4132 	 * will go to the SHUTTING_DOWN state; an appropriate errno will
4133 	 * be placed in zone->zone_boot_err, and so we return that.
4134 	 */
4135 	err = zone->zone_boot_err;
4136 	zone_rele(zone);
4137 	return (err ? set_errno(err) : 0);
4138 }
4139 
4140 /*
4141  * Kills all user processes in the zone, waiting for them all to exit
4142  * before returning.
4143  */
4144 static int
4145 zone_empty(zone_t *zone)
4146 {
4147 	int waitstatus;
4148 
4149 	/*
4150 	 * We need to drop zonehash_lock before killing all
4151 	 * processes, otherwise we'll deadlock with zone_find_*
4152 	 * which can be called from the exit path.
4153 	 */
4154 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
4155 	while ((waitstatus = zone_status_timedwait_sig(zone, lbolt + hz,
4156 	    ZONE_IS_EMPTY)) == -1) {
4157 		killall(zone->zone_id);
4158 	}
4159 	/*
4160 	 * return EINTR if we were signaled
4161 	 */
4162 	if (waitstatus == 0)
4163 		return (EINTR);
4164 	return (0);
4165 }
4166 
4167 /*
4168  * This function implements the policy for zone visibility.
4169  *
4170  * In standard Solaris, a non-global zone can only see itself.
4171  *
4172  * In Trusted Extensions, a labeled zone can lookup any zone whose label
4173  * it dominates. For this test, the label of the global zone is treated as
4174  * admin_high so it is special-cased instead of being checked for dominance.
4175  *
4176  * Returns true if zone attributes are viewable, false otherwise.
4177  */
4178 static boolean_t
4179 zone_list_access(zone_t *zone)
4180 {
4181 
4182 	if (curproc->p_zone == global_zone ||
4183 	    curproc->p_zone == zone) {
4184 		return (B_TRUE);
4185 	} else if (is_system_labeled() && !(zone->zone_flags & ZF_IS_SCRATCH)) {
4186 		bslabel_t *curproc_label;
4187 		bslabel_t *zone_label;
4188 
4189 		curproc_label = label2bslabel(curproc->p_zone->zone_slabel);
4190 		zone_label = label2bslabel(zone->zone_slabel);
4191 
4192 		if (zone->zone_id != GLOBAL_ZONEID &&
4193 		    bldominates(curproc_label, zone_label)) {
4194 			return (B_TRUE);
4195 		} else {
4196 			return (B_FALSE);
4197 		}
4198 	} else {
4199 		return (B_FALSE);
4200 	}
4201 }
4202 
4203 /*
4204  * Systemcall to start the zone's halt sequence.  By the time this
4205  * function successfully returns, all user processes and kernel threads
4206  * executing in it will have exited, ZSD shutdown callbacks executed,
4207  * and the zone status set to ZONE_IS_DOWN.
4208  *
4209  * It is possible that the call will interrupt itself if the caller is the
4210  * parent of any process running in the zone, and doesn't have SIGCHLD blocked.
4211  */
4212 static int
4213 zone_shutdown(zoneid_t zoneid)
4214 {
4215 	int error;
4216 	zone_t *zone;
4217 	zone_status_t status;
4218 
4219 	if (secpolicy_zone_config(CRED()) != 0)
4220 		return (set_errno(EPERM));
4221 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
4222 		return (set_errno(EINVAL));
4223 
4224 	/*
4225 	 * Block mounts so that VFS_MOUNT() can get an accurate view of
4226 	 * the zone's status with regards to ZONE_IS_SHUTTING down.
4227 	 *
4228 	 * e.g. NFS can fail the mount if it determines that the zone
4229 	 * has already begun the shutdown sequence.
4230 	 */
4231 	if (block_mounts() == 0)
4232 		return (set_errno(EINTR));
4233 	mutex_enter(&zonehash_lock);
4234 	/*
4235 	 * Look for zone under hash lock to prevent races with other
4236 	 * calls to zone_shutdown and zone_destroy.
4237 	 */
4238 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4239 		mutex_exit(&zonehash_lock);
4240 		resume_mounts();
4241 		return (set_errno(EINVAL));
4242 	}
4243 	mutex_enter(&zone_status_lock);
4244 	status = zone_status_get(zone);
4245 	/*
4246 	 * Fail if the zone isn't fully initialized yet.
4247 	 */
4248 	if (status < ZONE_IS_READY) {
4249 		mutex_exit(&zone_status_lock);
4250 		mutex_exit(&zonehash_lock);
4251 		resume_mounts();
4252 		return (set_errno(EINVAL));
4253 	}
4254 	/*
4255 	 * If conditions required for zone_shutdown() to return have been met,
4256 	 * return success.
4257 	 */
4258 	if (status >= ZONE_IS_DOWN) {
4259 		mutex_exit(&zone_status_lock);
4260 		mutex_exit(&zonehash_lock);
4261 		resume_mounts();
4262 		return (0);
4263 	}
4264 	/*
4265 	 * If zone_shutdown() hasn't been called before, go through the motions.
4266 	 * If it has, there's nothing to do but wait for the kernel threads to
4267 	 * drain.
4268 	 */
4269 	if (status < ZONE_IS_EMPTY) {
4270 		uint_t ntasks;
4271 
4272 		mutex_enter(&zone->zone_lock);
4273 		if ((ntasks = zone->zone_ntasks) != 1) {
4274 			/*
4275 			 * There's still stuff running.
4276 			 */
4277 			zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
4278 		}
4279 		mutex_exit(&zone->zone_lock);
4280 		if (ntasks == 1) {
4281 			/*
4282 			 * The only way to create another task is through
4283 			 * zone_enter(), which will block until we drop
4284 			 * zonehash_lock.  The zone is empty.
4285 			 */
4286 			if (zone->zone_kthreads == NULL) {
4287 				/*
4288 				 * Skip ahead to ZONE_IS_DOWN
4289 				 */
4290 				zone_status_set(zone, ZONE_IS_DOWN);
4291 			} else {
4292 				zone_status_set(zone, ZONE_IS_EMPTY);
4293 			}
4294 		}
4295 	}
4296 	zone_hold(zone);	/* so we can use the zone_t later */
4297 	mutex_exit(&zone_status_lock);
4298 	mutex_exit(&zonehash_lock);
4299 	resume_mounts();
4300 
4301 	if (error = zone_empty(zone)) {
4302 		zone_rele(zone);
4303 		return (set_errno(error));
4304 	}
4305 	/*
4306 	 * After the zone status goes to ZONE_IS_DOWN this zone will no
4307 	 * longer be notified of changes to the pools configuration, so
4308 	 * in order to not end up with a stale pool pointer, we point
4309 	 * ourselves at the default pool and remove all resource
4310 	 * visibility.  This is especially important as the zone_t may
4311 	 * languish on the deathrow for a very long time waiting for
4312 	 * cred's to drain out.
4313 	 *
4314 	 * This rebinding of the zone can happen multiple times
4315 	 * (presumably due to interrupted or parallel systemcalls)
4316 	 * without any adverse effects.
4317 	 */
4318 	if (pool_lock_intr() != 0) {
4319 		zone_rele(zone);
4320 		return (set_errno(EINTR));
4321 	}
4322 	if (pool_state == POOL_ENABLED) {
4323 		mutex_enter(&cpu_lock);
4324 		zone_pool_set(zone, pool_default);
4325 		/*
4326 		 * The zone no longer needs to be able to see any cpus.
4327 		 */
4328 		zone_pset_set(zone, ZONE_PS_INVAL);
4329 		mutex_exit(&cpu_lock);
4330 	}
4331 	pool_unlock();
4332 
4333 	/*
4334 	 * ZSD shutdown callbacks can be executed multiple times, hence
4335 	 * it is safe to not be holding any locks across this call.
4336 	 */
4337 	zone_zsd_callbacks(zone, ZSD_SHUTDOWN);
4338 
4339 	mutex_enter(&zone_status_lock);
4340 	if (zone->zone_kthreads == NULL && zone_status_get(zone) < ZONE_IS_DOWN)
4341 		zone_status_set(zone, ZONE_IS_DOWN);
4342 	mutex_exit(&zone_status_lock);
4343 
4344 	/*
4345 	 * Wait for kernel threads to drain.
4346 	 */
4347 	if (!zone_status_wait_sig(zone, ZONE_IS_DOWN)) {
4348 		zone_rele(zone);
4349 		return (set_errno(EINTR));
4350 	}
4351 
4352 	/*
4353 	 * Zone can be become down/destroyable even if the above wait
4354 	 * returns EINTR, so any code added here may never execute.
4355 	 * (i.e. don't add code here)
4356 	 */
4357 
4358 	zone_rele(zone);
4359 	return (0);
4360 }
4361 
4362 /*
4363  * Systemcall entry point to finalize the zone halt process.  The caller
4364  * must have already successfully called zone_shutdown().
4365  *
4366  * Upon successful completion, the zone will have been fully destroyed:
4367  * zsched will have exited, destructor callbacks executed, and the zone
4368  * removed from the list of active zones.
4369  */
4370 static int
4371 zone_destroy(zoneid_t zoneid)
4372 {
4373 	uint64_t uniqid;
4374 	zone_t *zone;
4375 	zone_status_t status;
4376 
4377 	if (secpolicy_zone_config(CRED()) != 0)
4378 		return (set_errno(EPERM));
4379 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
4380 		return (set_errno(EINVAL));
4381 
4382 	mutex_enter(&zonehash_lock);
4383 	/*
4384 	 * Look for zone under hash lock to prevent races with other
4385 	 * calls to zone_destroy.
4386 	 */
4387 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4388 		mutex_exit(&zonehash_lock);
4389 		return (set_errno(EINVAL));
4390 	}
4391 
4392 	if (zone_mount_count(zone->zone_rootpath) != 0) {
4393 		mutex_exit(&zonehash_lock);
4394 		return (set_errno(EBUSY));
4395 	}
4396 	mutex_enter(&zone_status_lock);
4397 	status = zone_status_get(zone);
4398 	if (status < ZONE_IS_DOWN) {
4399 		mutex_exit(&zone_status_lock);
4400 		mutex_exit(&zonehash_lock);
4401 		return (set_errno(EBUSY));
4402 	} else if (status == ZONE_IS_DOWN) {
4403 		zone_status_set(zone, ZONE_IS_DYING); /* Tell zsched to exit */
4404 	}
4405 	mutex_exit(&zone_status_lock);
4406 	zone_hold(zone);
4407 	mutex_exit(&zonehash_lock);
4408 
4409 	/*
4410 	 * wait for zsched to exit
4411 	 */
4412 	zone_status_wait(zone, ZONE_IS_DEAD);
4413 	zone_zsd_callbacks(zone, ZSD_DESTROY);
4414 	zone->zone_netstack = NULL;
4415 	uniqid = zone->zone_uniqid;
4416 	zone_rele(zone);
4417 	zone = NULL;	/* potentially free'd */
4418 
4419 	mutex_enter(&zonehash_lock);
4420 	for (; /* ever */; ) {
4421 		boolean_t unref;
4422 
4423 		if ((zone = zone_find_all_by_id(zoneid)) == NULL ||
4424 		    zone->zone_uniqid != uniqid) {
4425 			/*
4426 			 * The zone has gone away.  Necessary conditions
4427 			 * are met, so we return success.
4428 			 */
4429 			mutex_exit(&zonehash_lock);
4430 			return (0);
4431 		}
4432 		mutex_enter(&zone->zone_lock);
4433 		unref = ZONE_IS_UNREF(zone);
4434 		mutex_exit(&zone->zone_lock);
4435 		if (unref) {
4436 			/*
4437 			 * There is only one reference to the zone -- that
4438 			 * added when the zone was added to the hashtables --
4439 			 * and things will remain this way until we drop
4440 			 * zonehash_lock... we can go ahead and cleanup the
4441 			 * zone.
4442 			 */
4443 			break;
4444 		}
4445 
4446 		if (cv_wait_sig(&zone_destroy_cv, &zonehash_lock) == 0) {
4447 			/* Signaled */
4448 			mutex_exit(&zonehash_lock);
4449 			return (set_errno(EINTR));
4450 		}
4451 
4452 	}
4453 
4454 	/*
4455 	 * Remove CPU cap for this zone now since we're not going to
4456 	 * fail below this point.
4457 	 */
4458 	cpucaps_zone_remove(zone);
4459 
4460 	/* Get rid of the zone's kstats */
4461 	zone_kstat_delete(zone);
4462 
4463 	/* free brand specific data */
4464 	if (ZONE_IS_BRANDED(zone))
4465 		ZBROP(zone)->b_free_brand_data(zone);
4466 
4467 	/* Say goodbye to brand framework. */
4468 	brand_unregister_zone(zone->zone_brand);
4469 
4470 	/*
4471 	 * It is now safe to let the zone be recreated; remove it from the
4472 	 * lists.  The memory will not be freed until the last cred
4473 	 * reference goes away.
4474 	 */
4475 	ASSERT(zonecount > 1);	/* must be > 1; can't destroy global zone */
4476 	zonecount--;
4477 	/* remove from active list and hash tables */
4478 	list_remove(&zone_active, zone);
4479 	(void) mod_hash_destroy(zonehashbyname,
4480 	    (mod_hash_key_t)zone->zone_name);
4481 	(void) mod_hash_destroy(zonehashbyid,
4482 	    (mod_hash_key_t)(uintptr_t)zone->zone_id);
4483 	if (zone->zone_flags & ZF_HASHED_LABEL)
4484 		(void) mod_hash_destroy(zonehashbylabel,
4485 		    (mod_hash_key_t)zone->zone_slabel);
4486 	mutex_exit(&zonehash_lock);
4487 
4488 	/*
4489 	 * Release the root vnode; we're not using it anymore.  Nor should any
4490 	 * other thread that might access it exist.
4491 	 */
4492 	if (zone->zone_rootvp != NULL) {
4493 		VN_RELE(zone->zone_rootvp);
4494 		zone->zone_rootvp = NULL;
4495 	}
4496 
4497 	/* add to deathrow list */
4498 	mutex_enter(&zone_deathrow_lock);
4499 	list_insert_tail(&zone_deathrow, zone);
4500 	mutex_exit(&zone_deathrow_lock);
4501 
4502 	/*
4503 	 * Drop last reference (which was added by zsched()), this will
4504 	 * free the zone unless there are outstanding cred references.
4505 	 */
4506 	zone_rele(zone);
4507 	return (0);
4508 }
4509 
4510 /*
4511  * Systemcall entry point for zone_getattr(2).
4512  */
4513 static ssize_t
4514 zone_getattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
4515 {
4516 	size_t size;
4517 	int error = 0, err;
4518 	zone_t *zone;
4519 	char *zonepath;
4520 	char *outstr;
4521 	zone_status_t zone_status;
4522 	pid_t initpid;
4523 	boolean_t global = (curzone == global_zone);
4524 	boolean_t inzone = (curzone->zone_id == zoneid);
4525 	ushort_t flags;
4526 
4527 	mutex_enter(&zonehash_lock);
4528 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4529 		mutex_exit(&zonehash_lock);
4530 		return (set_errno(EINVAL));
4531 	}
4532 	zone_status = zone_status_get(zone);
4533 	if (zone_status < ZONE_IS_INITIALIZED) {
4534 		mutex_exit(&zonehash_lock);
4535 		return (set_errno(EINVAL));
4536 	}
4537 	zone_hold(zone);
4538 	mutex_exit(&zonehash_lock);
4539 
4540 	/*
4541 	 * If not in the global zone, don't show information about other zones,
4542 	 * unless the system is labeled and the local zone's label dominates
4543 	 * the other zone.
4544 	 */
4545 	if (!zone_list_access(zone)) {
4546 		zone_rele(zone);
4547 		return (set_errno(EINVAL));
4548 	}
4549 
4550 	switch (attr) {
4551 	case ZONE_ATTR_ROOT:
4552 		if (global) {
4553 			/*
4554 			 * Copy the path to trim the trailing "/" (except for
4555 			 * the global zone).
4556 			 */
4557 			if (zone != global_zone)
4558 				size = zone->zone_rootpathlen - 1;
4559 			else
4560 				size = zone->zone_rootpathlen;
4561 			zonepath = kmem_alloc(size, KM_SLEEP);
4562 			bcopy(zone->zone_rootpath, zonepath, size);
4563 			zonepath[size - 1] = '\0';
4564 		} else {
4565 			if (inzone || !is_system_labeled()) {
4566 				/*
4567 				 * Caller is not in the global zone.
4568 				 * if the query is on the current zone
4569 				 * or the system is not labeled,
4570 				 * just return faked-up path for current zone.
4571 				 */
4572 				zonepath = "/";
4573 				size = 2;
4574 			} else {
4575 				/*
4576 				 * Return related path for current zone.
4577 				 */
4578 				int prefix_len = strlen(zone_prefix);
4579 				int zname_len = strlen(zone->zone_name);
4580 
4581 				size = prefix_len + zname_len + 1;
4582 				zonepath = kmem_alloc(size, KM_SLEEP);
4583 				bcopy(zone_prefix, zonepath, prefix_len);
4584 				bcopy(zone->zone_name, zonepath +
4585 				    prefix_len, zname_len);
4586 				zonepath[size - 1] = '\0';
4587 			}
4588 		}
4589 		if (bufsize > size)
4590 			bufsize = size;
4591 		if (buf != NULL) {
4592 			err = copyoutstr(zonepath, buf, bufsize, NULL);
4593 			if (err != 0 && err != ENAMETOOLONG)
4594 				error = EFAULT;
4595 		}
4596 		if (global || (is_system_labeled() && !inzone))
4597 			kmem_free(zonepath, size);
4598 		break;
4599 
4600 	case ZONE_ATTR_NAME:
4601 		size = strlen(zone->zone_name) + 1;
4602 		if (bufsize > size)
4603 			bufsize = size;
4604 		if (buf != NULL) {
4605 			err = copyoutstr(zone->zone_name, buf, bufsize, NULL);
4606 			if (err != 0 && err != ENAMETOOLONG)
4607 				error = EFAULT;
4608 		}
4609 		break;
4610 
4611 	case ZONE_ATTR_STATUS:
4612 		/*
4613 		 * Since we're not holding zonehash_lock, the zone status
4614 		 * may be anything; leave it up to userland to sort it out.
4615 		 */
4616 		size = sizeof (zone_status);
4617 		if (bufsize > size)
4618 			bufsize = size;
4619 		zone_status = zone_status_get(zone);
4620 		if (buf != NULL &&
4621 		    copyout(&zone_status, buf, bufsize) != 0)
4622 			error = EFAULT;
4623 		break;
4624 	case ZONE_ATTR_FLAGS:
4625 		size = sizeof (zone->zone_flags);
4626 		if (bufsize > size)
4627 			bufsize = size;
4628 		flags = zone->zone_flags;
4629 		if (buf != NULL &&
4630 		    copyout(&flags, buf, bufsize) != 0)
4631 			error = EFAULT;
4632 		break;
4633 	case ZONE_ATTR_PRIVSET:
4634 		size = sizeof (priv_set_t);
4635 		if (bufsize > size)
4636 			bufsize = size;
4637 		if (buf != NULL &&
4638 		    copyout(zone->zone_privset, buf, bufsize) != 0)
4639 			error = EFAULT;
4640 		break;
4641 	case ZONE_ATTR_UNIQID:
4642 		size = sizeof (zone->zone_uniqid);
4643 		if (bufsize > size)
4644 			bufsize = size;
4645 		if (buf != NULL &&
4646 		    copyout(&zone->zone_uniqid, buf, bufsize) != 0)
4647 			error = EFAULT;
4648 		break;
4649 	case ZONE_ATTR_POOLID:
4650 		{
4651 			pool_t *pool;
4652 			poolid_t poolid;
4653 
4654 			if (pool_lock_intr() != 0) {
4655 				error = EINTR;
4656 				break;
4657 			}
4658 			pool = zone_pool_get(zone);
4659 			poolid = pool->pool_id;
4660 			pool_unlock();
4661 			size = sizeof (poolid);
4662 			if (bufsize > size)
4663 				bufsize = size;
4664 			if (buf != NULL && copyout(&poolid, buf, size) != 0)
4665 				error = EFAULT;
4666 		}
4667 		break;
4668 	case ZONE_ATTR_SLBL:
4669 		size = sizeof (bslabel_t);
4670 		if (bufsize > size)
4671 			bufsize = size;
4672 		if (zone->zone_slabel == NULL)
4673 			error = EINVAL;
4674 		else if (buf != NULL &&
4675 		    copyout(label2bslabel(zone->zone_slabel), buf,
4676 		    bufsize) != 0)
4677 			error = EFAULT;
4678 		break;
4679 	case ZONE_ATTR_INITPID:
4680 		size = sizeof (initpid);
4681 		if (bufsize > size)
4682 			bufsize = size;
4683 		initpid = zone->zone_proc_initpid;
4684 		if (initpid == -1) {
4685 			error = ESRCH;
4686 			break;
4687 		}
4688 		if (buf != NULL &&
4689 		    copyout(&initpid, buf, bufsize) != 0)
4690 			error = EFAULT;
4691 		break;
4692 	case ZONE_ATTR_BRAND:
4693 		size = strlen(zone->zone_brand->b_name) + 1;
4694 
4695 		if (bufsize > size)
4696 			bufsize = size;
4697 		if (buf != NULL) {
4698 			err = copyoutstr(zone->zone_brand->b_name, buf,
4699 			    bufsize, NULL);
4700 			if (err != 0 && err != ENAMETOOLONG)
4701 				error = EFAULT;
4702 		}
4703 		break;
4704 	case ZONE_ATTR_INITNAME:
4705 		size = strlen(zone->zone_initname) + 1;
4706 		if (bufsize > size)
4707 			bufsize = size;
4708 		if (buf != NULL) {
4709 			err = copyoutstr(zone->zone_initname, buf, bufsize,
4710 			    NULL);
4711 			if (err != 0 && err != ENAMETOOLONG)
4712 				error = EFAULT;
4713 		}
4714 		break;
4715 	case ZONE_ATTR_BOOTARGS:
4716 		if (zone->zone_bootargs == NULL)
4717 			outstr = "";
4718 		else
4719 			outstr = zone->zone_bootargs;
4720 		size = strlen(outstr) + 1;
4721 		if (bufsize > size)
4722 			bufsize = size;
4723 		if (buf != NULL) {
4724 			err = copyoutstr(outstr, buf, bufsize, NULL);
4725 			if (err != 0 && err != ENAMETOOLONG)
4726 				error = EFAULT;
4727 		}
4728 		break;
4729 	case ZONE_ATTR_PHYS_MCAP:
4730 		size = sizeof (zone->zone_phys_mcap);
4731 		if (bufsize > size)
4732 			bufsize = size;
4733 		if (buf != NULL &&
4734 		    copyout(&zone->zone_phys_mcap, buf, bufsize) != 0)
4735 			error = EFAULT;
4736 		break;
4737 	case ZONE_ATTR_SCHED_CLASS:
4738 		mutex_enter(&class_lock);
4739 
4740 		if (zone->zone_defaultcid >= loaded_classes)
4741 			outstr = "";
4742 		else
4743 			outstr = sclass[zone->zone_defaultcid].cl_name;
4744 		size = strlen(outstr) + 1;
4745 		if (bufsize > size)
4746 			bufsize = size;
4747 		if (buf != NULL) {
4748 			err = copyoutstr(outstr, buf, bufsize, NULL);
4749 			if (err != 0 && err != ENAMETOOLONG)
4750 				error = EFAULT;
4751 		}
4752 
4753 		mutex_exit(&class_lock);
4754 		break;
4755 	case ZONE_ATTR_HOSTID:
4756 		if (zone->zone_hostid != HW_INVALID_HOSTID &&
4757 		    bufsize == sizeof (zone->zone_hostid)) {
4758 			size = sizeof (zone->zone_hostid);
4759 			if (buf != NULL && copyout(&zone->zone_hostid, buf,
4760 			    bufsize) != 0)
4761 				error = EFAULT;
4762 		} else {
4763 			error = EINVAL;
4764 		}
4765 		break;
4766 	default:
4767 		if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone)) {
4768 			size = bufsize;
4769 			error = ZBROP(zone)->b_getattr(zone, attr, buf, &size);
4770 		} else {
4771 			error = EINVAL;
4772 		}
4773 	}
4774 	zone_rele(zone);
4775 
4776 	if (error)
4777 		return (set_errno(error));
4778 	return ((ssize_t)size);
4779 }
4780 
4781 /*
4782  * Systemcall entry point for zone_setattr(2).
4783  */
4784 /*ARGSUSED*/
4785 static int
4786 zone_setattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
4787 {
4788 	zone_t *zone;
4789 	zone_status_t zone_status;
4790 	int err;
4791 
4792 	if (secpolicy_zone_config(CRED()) != 0)
4793 		return (set_errno(EPERM));
4794 
4795 	/*
4796 	 * Only the ZONE_ATTR_PHYS_MCAP attribute can be set on the
4797 	 * global zone.
4798 	 */
4799 	if (zoneid == GLOBAL_ZONEID && attr != ZONE_ATTR_PHYS_MCAP) {
4800 		return (set_errno(EINVAL));
4801 	}
4802 
4803 	mutex_enter(&zonehash_lock);
4804 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4805 		mutex_exit(&zonehash_lock);
4806 		return (set_errno(EINVAL));
4807 	}
4808 	zone_hold(zone);
4809 	mutex_exit(&zonehash_lock);
4810 
4811 	/*
4812 	 * At present most attributes can only be set on non-running,
4813 	 * non-global zones.
4814 	 */
4815 	zone_status = zone_status_get(zone);
4816 	if (attr != ZONE_ATTR_PHYS_MCAP && zone_status > ZONE_IS_READY)
4817 		goto done;
4818 
4819 	switch (attr) {
4820 	case ZONE_ATTR_INITNAME:
4821 		err = zone_set_initname(zone, (const char *)buf);
4822 		break;
4823 	case ZONE_ATTR_BOOTARGS:
4824 		err = zone_set_bootargs(zone, (const char *)buf);
4825 		break;
4826 	case ZONE_ATTR_BRAND:
4827 		err = zone_set_brand(zone, (const char *)buf);
4828 		break;
4829 	case ZONE_ATTR_PHYS_MCAP:
4830 		err = zone_set_phys_mcap(zone, (const uint64_t *)buf);
4831 		break;
4832 	case ZONE_ATTR_SCHED_CLASS:
4833 		err = zone_set_sched_class(zone, (const char *)buf);
4834 		break;
4835 	case ZONE_ATTR_HOSTID:
4836 		if (bufsize == sizeof (zone->zone_hostid)) {
4837 			if (copyin(buf, &zone->zone_hostid, bufsize) == 0)
4838 				err = 0;
4839 			else
4840 				err = EFAULT;
4841 		} else {
4842 			err = EINVAL;
4843 		}
4844 		break;
4845 	default:
4846 		if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone))
4847 			err = ZBROP(zone)->b_setattr(zone, attr, buf, bufsize);
4848 		else
4849 			err = EINVAL;
4850 	}
4851 
4852 done:
4853 	zone_rele(zone);
4854 	return (err != 0 ? set_errno(err) : 0);
4855 }
4856 
4857 /*
4858  * Return zero if the process has at least one vnode mapped in to its
4859  * address space which shouldn't be allowed to change zones.
4860  *
4861  * Also return zero if the process has any shared mappings which reserve
4862  * swap.  This is because the counting for zone.max-swap does not allow swap
4863  * reservation to be shared between zones.  zone swap reservation is counted
4864  * on zone->zone_max_swap.
4865  */
4866 static int
4867 as_can_change_zones(void)
4868 {
4869 	proc_t *pp = curproc;
4870 	struct seg *seg;
4871 	struct as *as = pp->p_as;
4872 	vnode_t *vp;
4873 	int allow = 1;
4874 
4875 	ASSERT(pp->p_as != &kas);
4876 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
4877 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
4878 
4879 		/*
4880 		 * Cannot enter zone with shared anon memory which
4881 		 * reserves swap.  See comment above.
4882 		 */
4883 		if (seg_can_change_zones(seg) == B_FALSE) {
4884 			allow = 0;
4885 			break;
4886 		}
4887 		/*
4888 		 * if we can't get a backing vnode for this segment then skip
4889 		 * it.
4890 		 */
4891 		vp = NULL;
4892 		if (SEGOP_GETVP(seg, seg->s_base, &vp) != 0 || vp == NULL)
4893 			continue;
4894 		if (!vn_can_change_zones(vp)) { /* bail on first match */
4895 			allow = 0;
4896 			break;
4897 		}
4898 	}
4899 	AS_LOCK_EXIT(as, &as->a_lock);
4900 	return (allow);
4901 }
4902 
4903 /*
4904  * Count swap reserved by curproc's address space
4905  */
4906 static size_t
4907 as_swresv(void)
4908 {
4909 	proc_t *pp = curproc;
4910 	struct seg *seg;
4911 	struct as *as = pp->p_as;
4912 	size_t swap = 0;
4913 
4914 	ASSERT(pp->p_as != &kas);
4915 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
4916 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg))
4917 		swap += seg_swresv(seg);
4918 
4919 	return (swap);
4920 }
4921 
4922 /*
4923  * Systemcall entry point for zone_enter().
4924  *
4925  * The current process is injected into said zone.  In the process
4926  * it will change its project membership, privileges, rootdir/cwd,
4927  * zone-wide rctls, and pool association to match those of the zone.
4928  *
4929  * The first zone_enter() called while the zone is in the ZONE_IS_READY
4930  * state will transition it to ZONE_IS_RUNNING.  Processes may only
4931  * enter a zone that is "ready" or "running".
4932  */
4933 static int
4934 zone_enter(zoneid_t zoneid)
4935 {
4936 	zone_t *zone;
4937 	vnode_t *vp;
4938 	proc_t *pp = curproc;
4939 	contract_t *ct;
4940 	cont_process_t *ctp;
4941 	task_t *tk, *oldtk;
4942 	kproject_t *zone_proj0;
4943 	cred_t *cr, *newcr;
4944 	pool_t *oldpool, *newpool;
4945 	sess_t *sp;
4946 	uid_t uid;
4947 	zone_status_t status;
4948 	int err = 0;
4949 	rctl_entity_p_t e;
4950 	size_t swap;
4951 	kthread_id_t t;
4952 
4953 	if (secpolicy_zone_config(CRED()) != 0)
4954 		return (set_errno(EPERM));
4955 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
4956 		return (set_errno(EINVAL));
4957 
4958 	/*
4959 	 * Stop all lwps so we don't need to hold a lock to look at
4960 	 * curproc->p_zone.  This needs to happen before we grab any
4961 	 * locks to avoid deadlock (another lwp in the process could
4962 	 * be waiting for the held lock).
4963 	 */
4964 	if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK))
4965 		return (set_errno(EINTR));
4966 
4967 	/*
4968 	 * Make sure we're not changing zones with files open or mapped in
4969 	 * to our address space which shouldn't be changing zones.
4970 	 */
4971 	if (!files_can_change_zones()) {
4972 		err = EBADF;
4973 		goto out;
4974 	}
4975 	if (!as_can_change_zones()) {
4976 		err = EFAULT;
4977 		goto out;
4978 	}
4979 
4980 	mutex_enter(&zonehash_lock);
4981 	if (pp->p_zone != global_zone) {
4982 		mutex_exit(&zonehash_lock);
4983 		err = EINVAL;
4984 		goto out;
4985 	}
4986 
4987 	zone = zone_find_all_by_id(zoneid);
4988 	if (zone == NULL) {
4989 		mutex_exit(&zonehash_lock);
4990 		err = EINVAL;
4991 		goto out;
4992 	}
4993 
4994 	/*
4995 	 * To prevent processes in a zone from holding contracts on
4996 	 * extrazonal resources, and to avoid process contract
4997 	 * memberships which span zones, contract holders and processes
4998 	 * which aren't the sole members of their encapsulating process
4999 	 * contracts are not allowed to zone_enter.
5000 	 */
5001 	ctp = pp->p_ct_process;
5002 	ct = &ctp->conp_contract;
5003 	mutex_enter(&ct->ct_lock);
5004 	mutex_enter(&pp->p_lock);
5005 	if ((avl_numnodes(&pp->p_ct_held) != 0) || (ctp->conp_nmembers != 1)) {
5006 		mutex_exit(&pp->p_lock);
5007 		mutex_exit(&ct->ct_lock);
5008 		mutex_exit(&zonehash_lock);
5009 		err = EINVAL;
5010 		goto out;
5011 	}
5012 
5013 	/*
5014 	 * Moreover, we don't allow processes whose encapsulating
5015 	 * process contracts have inherited extrazonal contracts.
5016 	 * While it would be easier to eliminate all process contracts
5017 	 * with inherited contracts, we need to be able to give a
5018 	 * restarted init (or other zone-penetrating process) its
5019 	 * predecessor's contracts.
5020 	 */
5021 	if (ctp->conp_ninherited != 0) {
5022 		contract_t *next;
5023 		for (next = list_head(&ctp->conp_inherited); next;
5024 		    next = list_next(&ctp->conp_inherited, next)) {
5025 			if (contract_getzuniqid(next) != zone->zone_uniqid) {
5026 				mutex_exit(&pp->p_lock);
5027 				mutex_exit(&ct->ct_lock);
5028 				mutex_exit(&zonehash_lock);
5029 				err = EINVAL;
5030 				goto out;
5031 			}
5032 		}
5033 	}
5034 
5035 	mutex_exit(&pp->p_lock);
5036 	mutex_exit(&ct->ct_lock);
5037 
5038 	status = zone_status_get(zone);
5039 	if (status < ZONE_IS_READY || status >= ZONE_IS_SHUTTING_DOWN) {
5040 		/*
5041 		 * Can't join
5042 		 */
5043 		mutex_exit(&zonehash_lock);
5044 		err = EINVAL;
5045 		goto out;
5046 	}
5047 
5048 	/*
5049 	 * Make sure new priv set is within the permitted set for caller
5050 	 */
5051 	if (!priv_issubset(zone->zone_privset, &CR_OPPRIV(CRED()))) {
5052 		mutex_exit(&zonehash_lock);
5053 		err = EPERM;
5054 		goto out;
5055 	}
5056 	/*
5057 	 * We want to momentarily drop zonehash_lock while we optimistically
5058 	 * bind curproc to the pool it should be running in.  This is safe
5059 	 * since the zone can't disappear (we have a hold on it).
5060 	 */
5061 	zone_hold(zone);
5062 	mutex_exit(&zonehash_lock);
5063 
5064 	/*
5065 	 * Grab pool_lock to keep the pools configuration from changing
5066 	 * and to stop ourselves from getting rebound to another pool
5067 	 * until we join the zone.
5068 	 */
5069 	if (pool_lock_intr() != 0) {
5070 		zone_rele(zone);
5071 		err = EINTR;
5072 		goto out;
5073 	}
5074 	ASSERT(secpolicy_pool(CRED()) == 0);
5075 	/*
5076 	 * Bind ourselves to the pool currently associated with the zone.
5077 	 */
5078 	oldpool = curproc->p_pool;
5079 	newpool = zone_pool_get(zone);
5080 	if (pool_state == POOL_ENABLED && newpool != oldpool &&
5081 	    (err = pool_do_bind(newpool, P_PID, P_MYID,
5082 	    POOL_BIND_ALL)) != 0) {
5083 		pool_unlock();
5084 		zone_rele(zone);
5085 		goto out;
5086 	}
5087 
5088 	/*
5089 	 * Grab cpu_lock now; we'll need it later when we call
5090 	 * task_join().
5091 	 */
5092 	mutex_enter(&cpu_lock);
5093 	mutex_enter(&zonehash_lock);
5094 	/*
5095 	 * Make sure the zone hasn't moved on since we dropped zonehash_lock.
5096 	 */
5097 	if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) {
5098 		/*
5099 		 * Can't join anymore.
5100 		 */
5101 		mutex_exit(&zonehash_lock);
5102 		mutex_exit(&cpu_lock);
5103 		if (pool_state == POOL_ENABLED &&
5104 		    newpool != oldpool)
5105 			(void) pool_do_bind(oldpool, P_PID, P_MYID,
5106 			    POOL_BIND_ALL);
5107 		pool_unlock();
5108 		zone_rele(zone);
5109 		err = EINVAL;
5110 		goto out;
5111 	}
5112 
5113 	/*
5114 	 * a_lock must be held while transfering locked memory and swap
5115 	 * reservation from the global zone to the non global zone because
5116 	 * asynchronous faults on the processes' address space can lock
5117 	 * memory and reserve swap via MCL_FUTURE and MAP_NORESERVE
5118 	 * segments respectively.
5119 	 */
5120 	AS_LOCK_ENTER(pp->as, &pp->p_as->a_lock, RW_WRITER);
5121 	swap = as_swresv();
5122 	mutex_enter(&pp->p_lock);
5123 	zone_proj0 = zone->zone_zsched->p_task->tk_proj;
5124 	/* verify that we do not exceed and task or lwp limits */
5125 	mutex_enter(&zone->zone_nlwps_lock);
5126 	/* add new lwps to zone and zone's proj0 */
5127 	zone_proj0->kpj_nlwps += pp->p_lwpcnt;
5128 	zone->zone_nlwps += pp->p_lwpcnt;
5129 	/* add 1 task to zone's proj0 */
5130 	zone_proj0->kpj_ntasks += 1;
5131 	mutex_exit(&zone->zone_nlwps_lock);
5132 
5133 	mutex_enter(&zone->zone_mem_lock);
5134 	zone->zone_locked_mem += pp->p_locked_mem;
5135 	zone_proj0->kpj_data.kpd_locked_mem += pp->p_locked_mem;
5136 	zone->zone_max_swap += swap;
5137 	mutex_exit(&zone->zone_mem_lock);
5138 
5139 	mutex_enter(&(zone_proj0->kpj_data.kpd_crypto_lock));
5140 	zone_proj0->kpj_data.kpd_crypto_mem += pp->p_crypto_mem;
5141 	mutex_exit(&(zone_proj0->kpj_data.kpd_crypto_lock));
5142 
5143 	/* remove lwps from proc's old zone and old project */
5144 	mutex_enter(&pp->p_zone->zone_nlwps_lock);
5145 	pp->p_zone->zone_nlwps -= pp->p_lwpcnt;
5146 	pp->p_task->tk_proj->kpj_nlwps -= pp->p_lwpcnt;
5147 	mutex_exit(&pp->p_zone->zone_nlwps_lock);
5148 
5149 	mutex_enter(&pp->p_zone->zone_mem_lock);
5150 	pp->p_zone->zone_locked_mem -= pp->p_locked_mem;
5151 	pp->p_task->tk_proj->kpj_data.kpd_locked_mem -= pp->p_locked_mem;
5152 	pp->p_zone->zone_max_swap -= swap;
5153 	mutex_exit(&pp->p_zone->zone_mem_lock);
5154 
5155 	mutex_enter(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
5156 	pp->p_task->tk_proj->kpj_data.kpd_crypto_mem -= pp->p_crypto_mem;
5157 	mutex_exit(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
5158 
5159 	mutex_exit(&pp->p_lock);
5160 	AS_LOCK_EXIT(pp->p_as, &pp->p_as->a_lock);
5161 
5162 	/*
5163 	 * Joining the zone cannot fail from now on.
5164 	 *
5165 	 * This means that a lot of the following code can be commonized and
5166 	 * shared with zsched().
5167 	 */
5168 
5169 	/*
5170 	 * If the process contract fmri was inherited, we need to
5171 	 * flag this so that any contract status will not leak
5172 	 * extra zone information, svc_fmri in this case
5173 	 */
5174 	if (ctp->conp_svc_ctid != ct->ct_id) {
5175 		mutex_enter(&ct->ct_lock);
5176 		ctp->conp_svc_zone_enter = ct->ct_id;
5177 		mutex_exit(&ct->ct_lock);
5178 	}
5179 
5180 	/*
5181 	 * Reset the encapsulating process contract's zone.
5182 	 */
5183 	ASSERT(ct->ct_mzuniqid == GLOBAL_ZONEUNIQID);
5184 	contract_setzuniqid(ct, zone->zone_uniqid);
5185 
5186 	/*
5187 	 * Create a new task and associate the process with the project keyed
5188 	 * by (projid,zoneid).
5189 	 *
5190 	 * We might as well be in project 0; the global zone's projid doesn't
5191 	 * make much sense in a zone anyhow.
5192 	 *
5193 	 * This also increments zone_ntasks, and returns with p_lock held.
5194 	 */
5195 	tk = task_create(0, zone);
5196 	oldtk = task_join(tk, 0);
5197 	mutex_exit(&cpu_lock);
5198 
5199 	pp->p_flag |= SZONETOP;
5200 	pp->p_zone = zone;
5201 
5202 	/*
5203 	 * call RCTLOP_SET functions on this proc
5204 	 */
5205 	e.rcep_p.zone = zone;
5206 	e.rcep_t = RCENTITY_ZONE;
5207 	(void) rctl_set_dup(NULL, NULL, pp, &e, zone->zone_rctls, NULL,
5208 	    RCD_CALLBACK);
5209 	mutex_exit(&pp->p_lock);
5210 
5211 	/*
5212 	 * We don't need to hold any of zsched's locks here; not only do we know
5213 	 * the process and zone aren't going away, we know its session isn't
5214 	 * changing either.
5215 	 *
5216 	 * By joining zsched's session here, we mimic the behavior in the
5217 	 * global zone of init's sid being the pid of sched.  We extend this
5218 	 * to all zlogin-like zone_enter()'ing processes as well.
5219 	 */
5220 	mutex_enter(&pidlock);
5221 	sp = zone->zone_zsched->p_sessp;
5222 	sess_hold(zone->zone_zsched);
5223 	mutex_enter(&pp->p_lock);
5224 	pgexit(pp);
5225 	sess_rele(pp->p_sessp, B_TRUE);
5226 	pp->p_sessp = sp;
5227 	pgjoin(pp, zone->zone_zsched->p_pidp);
5228 
5229 	/*
5230 	 * If any threads are scheduled to be placed on zone wait queue they
5231 	 * should abandon the idea since the wait queue is changing.
5232 	 * We need to be holding pidlock & p_lock to do this.
5233 	 */
5234 	if ((t = pp->p_tlist) != NULL) {
5235 		do {
5236 			thread_lock(t);
5237 			/*
5238 			 * Kick this thread so that he doesn't sit
5239 			 * on a wrong wait queue.
5240 			 */
5241 			if (ISWAITING(t))
5242 				setrun_locked(t);
5243 
5244 			if (t->t_schedflag & TS_ANYWAITQ)
5245 				t->t_schedflag &= ~ TS_ANYWAITQ;
5246 
5247 			thread_unlock(t);
5248 		} while ((t = t->t_forw) != pp->p_tlist);
5249 	}
5250 
5251 	/*
5252 	 * If there is a default scheduling class for the zone and it is not
5253 	 * the class we are currently in, change all of the threads in the
5254 	 * process to the new class.  We need to be holding pidlock & p_lock
5255 	 * when we call parmsset so this is a good place to do it.
5256 	 */
5257 	if (zone->zone_defaultcid > 0 &&
5258 	    zone->zone_defaultcid != curthread->t_cid) {
5259 		pcparms_t pcparms;
5260 
5261 		pcparms.pc_cid = zone->zone_defaultcid;
5262 		pcparms.pc_clparms[0] = 0;
5263 
5264 		/*
5265 		 * If setting the class fails, we still want to enter the zone.
5266 		 */
5267 		if ((t = pp->p_tlist) != NULL) {
5268 			do {
5269 				(void) parmsset(&pcparms, t);
5270 			} while ((t = t->t_forw) != pp->p_tlist);
5271 		}
5272 	}
5273 
5274 	mutex_exit(&pp->p_lock);
5275 	mutex_exit(&pidlock);
5276 
5277 	mutex_exit(&zonehash_lock);
5278 	/*
5279 	 * We're firmly in the zone; let pools progress.
5280 	 */
5281 	pool_unlock();
5282 	task_rele(oldtk);
5283 	/*
5284 	 * We don't need to retain a hold on the zone since we already
5285 	 * incremented zone_ntasks, so the zone isn't going anywhere.
5286 	 */
5287 	zone_rele(zone);
5288 
5289 	/*
5290 	 * Chroot
5291 	 */
5292 	vp = zone->zone_rootvp;
5293 	zone_chdir(vp, &PTOU(pp)->u_cdir, pp);
5294 	zone_chdir(vp, &PTOU(pp)->u_rdir, pp);
5295 
5296 	/*
5297 	 * Change process credentials
5298 	 */
5299 	newcr = cralloc();
5300 	mutex_enter(&pp->p_crlock);
5301 	cr = pp->p_cred;
5302 	crcopy_to(cr, newcr);
5303 	crsetzone(newcr, zone);
5304 	pp->p_cred = newcr;
5305 
5306 	/*
5307 	 * Restrict all process privilege sets to zone limit
5308 	 */
5309 	priv_intersect(zone->zone_privset, &CR_PPRIV(newcr));
5310 	priv_intersect(zone->zone_privset, &CR_EPRIV(newcr));
5311 	priv_intersect(zone->zone_privset, &CR_IPRIV(newcr));
5312 	priv_intersect(zone->zone_privset, &CR_LPRIV(newcr));
5313 	mutex_exit(&pp->p_crlock);
5314 	crset(pp, newcr);
5315 
5316 	/*
5317 	 * Adjust upcount to reflect zone entry.
5318 	 */
5319 	uid = crgetruid(newcr);
5320 	mutex_enter(&pidlock);
5321 	upcount_dec(uid, GLOBAL_ZONEID);
5322 	upcount_inc(uid, zoneid);
5323 	mutex_exit(&pidlock);
5324 
5325 	/*
5326 	 * Set up core file path and content.
5327 	 */
5328 	set_core_defaults();
5329 
5330 out:
5331 	/*
5332 	 * Let the other lwps continue.
5333 	 */
5334 	mutex_enter(&pp->p_lock);
5335 	if (curthread != pp->p_agenttp)
5336 		continuelwps(pp);
5337 	mutex_exit(&pp->p_lock);
5338 
5339 	return (err != 0 ? set_errno(err) : 0);
5340 }
5341 
5342 /*
5343  * Systemcall entry point for zone_list(2).
5344  *
5345  * Processes running in a (non-global) zone only see themselves.
5346  * On labeled systems, they see all zones whose label they dominate.
5347  */
5348 static int
5349 zone_list(zoneid_t *zoneidlist, uint_t *numzones)
5350 {
5351 	zoneid_t *zoneids;
5352 	zone_t *zone, *myzone;
5353 	uint_t user_nzones, real_nzones;
5354 	uint_t domi_nzones;
5355 	int error;
5356 
5357 	if (copyin(numzones, &user_nzones, sizeof (uint_t)) != 0)
5358 		return (set_errno(EFAULT));
5359 
5360 	myzone = curproc->p_zone;
5361 	if (myzone != global_zone) {
5362 		bslabel_t *mybslab;
5363 
5364 		if (!is_system_labeled()) {
5365 			/* just return current zone */
5366 			real_nzones = domi_nzones = 1;
5367 			zoneids = kmem_alloc(sizeof (zoneid_t), KM_SLEEP);
5368 			zoneids[0] = myzone->zone_id;
5369 		} else {
5370 			/* return all zones that are dominated */
5371 			mutex_enter(&zonehash_lock);
5372 			real_nzones = zonecount;
5373 			domi_nzones = 0;
5374 			if (real_nzones > 0) {
5375 				zoneids = kmem_alloc(real_nzones *
5376 				    sizeof (zoneid_t), KM_SLEEP);
5377 				mybslab = label2bslabel(myzone->zone_slabel);
5378 				for (zone = list_head(&zone_active);
5379 				    zone != NULL;
5380 				    zone = list_next(&zone_active, zone)) {
5381 					if (zone->zone_id == GLOBAL_ZONEID)
5382 						continue;
5383 					if (zone != myzone &&
5384 					    (zone->zone_flags & ZF_IS_SCRATCH))
5385 						continue;
5386 					/*
5387 					 * Note that a label always dominates
5388 					 * itself, so myzone is always included
5389 					 * in the list.
5390 					 */
5391 					if (bldominates(mybslab,
5392 					    label2bslabel(zone->zone_slabel))) {
5393 						zoneids[domi_nzones++] =
5394 						    zone->zone_id;
5395 					}
5396 				}
5397 			}
5398 			mutex_exit(&zonehash_lock);
5399 		}
5400 	} else {
5401 		mutex_enter(&zonehash_lock);
5402 		real_nzones = zonecount;
5403 		domi_nzones = 0;
5404 		if (real_nzones > 0) {
5405 			zoneids = kmem_alloc(real_nzones * sizeof (zoneid_t),
5406 			    KM_SLEEP);
5407 			for (zone = list_head(&zone_active); zone != NULL;
5408 			    zone = list_next(&zone_active, zone))
5409 				zoneids[domi_nzones++] = zone->zone_id;
5410 			ASSERT(domi_nzones == real_nzones);
5411 		}
5412 		mutex_exit(&zonehash_lock);
5413 	}
5414 
5415 	/*
5416 	 * If user has allocated space for fewer entries than we found, then
5417 	 * return only up to his limit.  Either way, tell him exactly how many
5418 	 * we found.
5419 	 */
5420 	if (domi_nzones < user_nzones)
5421 		user_nzones = domi_nzones;
5422 	error = 0;
5423 	if (copyout(&domi_nzones, numzones, sizeof (uint_t)) != 0) {
5424 		error = EFAULT;
5425 	} else if (zoneidlist != NULL && user_nzones != 0) {
5426 		if (copyout(zoneids, zoneidlist,
5427 		    user_nzones * sizeof (zoneid_t)) != 0)
5428 			error = EFAULT;
5429 	}
5430 
5431 	if (real_nzones > 0)
5432 		kmem_free(zoneids, real_nzones * sizeof (zoneid_t));
5433 
5434 	if (error != 0)
5435 		return (set_errno(error));
5436 	else
5437 		return (0);
5438 }
5439 
5440 /*
5441  * Systemcall entry point for zone_lookup(2).
5442  *
5443  * Non-global zones are only able to see themselves and (on labeled systems)
5444  * the zones they dominate.
5445  */
5446 static zoneid_t
5447 zone_lookup(const char *zone_name)
5448 {
5449 	char *kname;
5450 	zone_t *zone;
5451 	zoneid_t zoneid;
5452 	int err;
5453 
5454 	if (zone_name == NULL) {
5455 		/* return caller's zone id */
5456 		return (getzoneid());
5457 	}
5458 
5459 	kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
5460 	if ((err = copyinstr(zone_name, kname, ZONENAME_MAX, NULL)) != 0) {
5461 		kmem_free(kname, ZONENAME_MAX);
5462 		return (set_errno(err));
5463 	}
5464 
5465 	mutex_enter(&zonehash_lock);
5466 	zone = zone_find_all_by_name(kname);
5467 	kmem_free(kname, ZONENAME_MAX);
5468 	/*
5469 	 * In a non-global zone, can only lookup global and own name.
5470 	 * In Trusted Extensions zone label dominance rules apply.
5471 	 */
5472 	if (zone == NULL ||
5473 	    zone_status_get(zone) < ZONE_IS_READY ||
5474 	    !zone_list_access(zone)) {
5475 		mutex_exit(&zonehash_lock);
5476 		return (set_errno(EINVAL));
5477 	} else {
5478 		zoneid = zone->zone_id;
5479 		mutex_exit(&zonehash_lock);
5480 		return (zoneid);
5481 	}
5482 }
5483 
5484 static int
5485 zone_version(int *version_arg)
5486 {
5487 	int version = ZONE_SYSCALL_API_VERSION;
5488 
5489 	if (copyout(&version, version_arg, sizeof (int)) != 0)
5490 		return (set_errno(EFAULT));
5491 	return (0);
5492 }
5493 
5494 /* ARGSUSED */
5495 long
5496 zone(int cmd, void *arg1, void *arg2, void *arg3, void *arg4)
5497 {
5498 	zone_def zs;
5499 
5500 	switch (cmd) {
5501 	case ZONE_CREATE:
5502 		if (get_udatamodel() == DATAMODEL_NATIVE) {
5503 			if (copyin(arg1, &zs, sizeof (zone_def))) {
5504 				return (set_errno(EFAULT));
5505 			}
5506 		} else {
5507 #ifdef _SYSCALL32_IMPL
5508 			zone_def32 zs32;
5509 
5510 			if (copyin(arg1, &zs32, sizeof (zone_def32))) {
5511 				return (set_errno(EFAULT));
5512 			}
5513 			zs.zone_name =
5514 			    (const char *)(unsigned long)zs32.zone_name;
5515 			zs.zone_root =
5516 			    (const char *)(unsigned long)zs32.zone_root;
5517 			zs.zone_privs =
5518 			    (const struct priv_set *)
5519 			    (unsigned long)zs32.zone_privs;
5520 			zs.zone_privssz = zs32.zone_privssz;
5521 			zs.rctlbuf = (caddr_t)(unsigned long)zs32.rctlbuf;
5522 			zs.rctlbufsz = zs32.rctlbufsz;
5523 			zs.zfsbuf = (caddr_t)(unsigned long)zs32.zfsbuf;
5524 			zs.zfsbufsz = zs32.zfsbufsz;
5525 			zs.extended_error =
5526 			    (int *)(unsigned long)zs32.extended_error;
5527 			zs.match = zs32.match;
5528 			zs.doi = zs32.doi;
5529 			zs.label = (const bslabel_t *)(uintptr_t)zs32.label;
5530 			zs.flags = zs32.flags;
5531 #else
5532 			panic("get_udatamodel() returned bogus result\n");
5533 #endif
5534 		}
5535 
5536 		return (zone_create(zs.zone_name, zs.zone_root,
5537 		    zs.zone_privs, zs.zone_privssz,
5538 		    (caddr_t)zs.rctlbuf, zs.rctlbufsz,
5539 		    (caddr_t)zs.zfsbuf, zs.zfsbufsz,
5540 		    zs.extended_error, zs.match, zs.doi,
5541 		    zs.label, zs.flags));
5542 	case ZONE_BOOT:
5543 		return (zone_boot((zoneid_t)(uintptr_t)arg1));
5544 	case ZONE_DESTROY:
5545 		return (zone_destroy((zoneid_t)(uintptr_t)arg1));
5546 	case ZONE_GETATTR:
5547 		return (zone_getattr((zoneid_t)(uintptr_t)arg1,
5548 		    (int)(uintptr_t)arg2, arg3, (size_t)arg4));
5549 	case ZONE_SETATTR:
5550 		return (zone_setattr((zoneid_t)(uintptr_t)arg1,
5551 		    (int)(uintptr_t)arg2, arg3, (size_t)arg4));
5552 	case ZONE_ENTER:
5553 		return (zone_enter((zoneid_t)(uintptr_t)arg1));
5554 	case ZONE_LIST:
5555 		return (zone_list((zoneid_t *)arg1, (uint_t *)arg2));
5556 	case ZONE_SHUTDOWN:
5557 		return (zone_shutdown((zoneid_t)(uintptr_t)arg1));
5558 	case ZONE_LOOKUP:
5559 		return (zone_lookup((const char *)arg1));
5560 	case ZONE_VERSION:
5561 		return (zone_version((int *)arg1));
5562 	case ZONE_ADD_DATALINK:
5563 		return (zone_add_datalink((zoneid_t)(uintptr_t)arg1,
5564 		    (char *)arg2));
5565 	case ZONE_DEL_DATALINK:
5566 		return (zone_remove_datalink((zoneid_t)(uintptr_t)arg1,
5567 		    (char *)arg2));
5568 	case ZONE_CHECK_DATALINK:
5569 		return (zone_check_datalink((zoneid_t *)arg1, (char *)arg2));
5570 	case ZONE_LIST_DATALINK:
5571 		return (zone_list_datalink((zoneid_t)(uintptr_t)arg1,
5572 		    (int *)arg2, (char *)arg3));
5573 	default:
5574 		return (set_errno(EINVAL));
5575 	}
5576 }
5577 
5578 struct zarg {
5579 	zone_t *zone;
5580 	zone_cmd_arg_t arg;
5581 };
5582 
5583 static int
5584 zone_lookup_door(const char *zone_name, door_handle_t *doorp)
5585 {
5586 	char *buf;
5587 	size_t buflen;
5588 	int error;
5589 
5590 	buflen = sizeof (ZONE_DOOR_PATH) + strlen(zone_name);
5591 	buf = kmem_alloc(buflen, KM_SLEEP);
5592 	(void) snprintf(buf, buflen, ZONE_DOOR_PATH, zone_name);
5593 	error = door_ki_open(buf, doorp);
5594 	kmem_free(buf, buflen);
5595 	return (error);
5596 }
5597 
5598 static void
5599 zone_release_door(door_handle_t *doorp)
5600 {
5601 	door_ki_rele(*doorp);
5602 	*doorp = NULL;
5603 }
5604 
5605 static void
5606 zone_ki_call_zoneadmd(struct zarg *zargp)
5607 {
5608 	door_handle_t door = NULL;
5609 	door_arg_t darg, save_arg;
5610 	char *zone_name;
5611 	size_t zone_namelen;
5612 	zoneid_t zoneid;
5613 	zone_t *zone;
5614 	zone_cmd_arg_t arg;
5615 	uint64_t uniqid;
5616 	size_t size;
5617 	int error;
5618 	int retry;
5619 
5620 	zone = zargp->zone;
5621 	arg = zargp->arg;
5622 	kmem_free(zargp, sizeof (*zargp));
5623 
5624 	zone_namelen = strlen(zone->zone_name) + 1;
5625 	zone_name = kmem_alloc(zone_namelen, KM_SLEEP);
5626 	bcopy(zone->zone_name, zone_name, zone_namelen);
5627 	zoneid = zone->zone_id;
5628 	uniqid = zone->zone_uniqid;
5629 	/*
5630 	 * zoneadmd may be down, but at least we can empty out the zone.
5631 	 * We can ignore the return value of zone_empty() since we're called
5632 	 * from a kernel thread and know we won't be delivered any signals.
5633 	 */
5634 	ASSERT(curproc == &p0);
5635 	(void) zone_empty(zone);
5636 	ASSERT(zone_status_get(zone) >= ZONE_IS_EMPTY);
5637 	zone_rele(zone);
5638 
5639 	size = sizeof (arg);
5640 	darg.rbuf = (char *)&arg;
5641 	darg.data_ptr = (char *)&arg;
5642 	darg.rsize = size;
5643 	darg.data_size = size;
5644 	darg.desc_ptr = NULL;
5645 	darg.desc_num = 0;
5646 
5647 	save_arg = darg;
5648 	/*
5649 	 * Since we're not holding a reference to the zone, any number of
5650 	 * things can go wrong, including the zone disappearing before we get a
5651 	 * chance to talk to zoneadmd.
5652 	 */
5653 	for (retry = 0; /* forever */; retry++) {
5654 		if (door == NULL &&
5655 		    (error = zone_lookup_door(zone_name, &door)) != 0) {
5656 			goto next;
5657 		}
5658 		ASSERT(door != NULL);
5659 
5660 		if ((error = door_ki_upcall_limited(door, &darg, NULL,
5661 		    SIZE_MAX, 0)) == 0) {
5662 			break;
5663 		}
5664 		switch (error) {
5665 		case EINTR:
5666 			/* FALLTHROUGH */
5667 		case EAGAIN:	/* process may be forking */
5668 			/*
5669 			 * Back off for a bit
5670 			 */
5671 			break;
5672 		case EBADF:
5673 			zone_release_door(&door);
5674 			if (zone_lookup_door(zone_name, &door) != 0) {
5675 				/*
5676 				 * zoneadmd may be dead, but it may come back to
5677 				 * life later.
5678 				 */
5679 				break;
5680 			}
5681 			break;
5682 		default:
5683 			cmn_err(CE_WARN,
5684 			    "zone_ki_call_zoneadmd: door_ki_upcall error %d\n",
5685 			    error);
5686 			goto out;
5687 		}
5688 next:
5689 		/*
5690 		 * If this isn't the same zone_t that we originally had in mind,
5691 		 * then this is the same as if two kadmin requests come in at
5692 		 * the same time: the first one wins.  This means we lose, so we
5693 		 * bail.
5694 		 */
5695 		if ((zone = zone_find_by_id(zoneid)) == NULL) {
5696 			/*
5697 			 * Problem is solved.
5698 			 */
5699 			break;
5700 		}
5701 		if (zone->zone_uniqid != uniqid) {
5702 			/*
5703 			 * zoneid recycled
5704 			 */
5705 			zone_rele(zone);
5706 			break;
5707 		}
5708 		/*
5709 		 * We could zone_status_timedwait(), but there doesn't seem to
5710 		 * be much point in doing that (plus, it would mean that
5711 		 * zone_free() isn't called until this thread exits).
5712 		 */
5713 		zone_rele(zone);
5714 		delay(hz);
5715 		darg = save_arg;
5716 	}
5717 out:
5718 	if (door != NULL) {
5719 		zone_release_door(&door);
5720 	}
5721 	kmem_free(zone_name, zone_namelen);
5722 	thread_exit();
5723 }
5724 
5725 /*
5726  * Entry point for uadmin() to tell the zone to go away or reboot.  Analog to
5727  * kadmin().  The caller is a process in the zone.
5728  *
5729  * In order to shutdown the zone, we will hand off control to zoneadmd
5730  * (running in the global zone) via a door.  We do a half-hearted job at
5731  * killing all processes in the zone, create a kernel thread to contact
5732  * zoneadmd, and make note of the "uniqid" of the zone.  The uniqid is
5733  * a form of generation number used to let zoneadmd (as well as
5734  * zone_destroy()) know exactly which zone they're re talking about.
5735  */
5736 int
5737 zone_kadmin(int cmd, int fcn, const char *mdep, cred_t *credp)
5738 {
5739 	struct zarg *zargp;
5740 	zone_cmd_t zcmd;
5741 	zone_t *zone;
5742 
5743 	zone = curproc->p_zone;
5744 	ASSERT(getzoneid() != GLOBAL_ZONEID);
5745 
5746 	switch (cmd) {
5747 	case A_SHUTDOWN:
5748 		switch (fcn) {
5749 		case AD_HALT:
5750 		case AD_POWEROFF:
5751 			zcmd = Z_HALT;
5752 			break;
5753 		case AD_BOOT:
5754 			zcmd = Z_REBOOT;
5755 			break;
5756 		case AD_IBOOT:
5757 		case AD_SBOOT:
5758 		case AD_SIBOOT:
5759 		case AD_NOSYNC:
5760 			return (ENOTSUP);
5761 		default:
5762 			return (EINVAL);
5763 		}
5764 		break;
5765 	case A_REBOOT:
5766 		zcmd = Z_REBOOT;
5767 		break;
5768 	case A_FTRACE:
5769 	case A_REMOUNT:
5770 	case A_FREEZE:
5771 	case A_DUMP:
5772 		return (ENOTSUP);
5773 	default:
5774 		ASSERT(cmd != A_SWAPCTL);	/* handled by uadmin() */
5775 		return (EINVAL);
5776 	}
5777 
5778 	if (secpolicy_zone_admin(credp, B_FALSE))
5779 		return (EPERM);
5780 	mutex_enter(&zone_status_lock);
5781 
5782 	/*
5783 	 * zone_status can't be ZONE_IS_EMPTY or higher since curproc
5784 	 * is in the zone.
5785 	 */
5786 	ASSERT(zone_status_get(zone) < ZONE_IS_EMPTY);
5787 	if (zone_status_get(zone) > ZONE_IS_RUNNING) {
5788 		/*
5789 		 * This zone is already on its way down.
5790 		 */
5791 		mutex_exit(&zone_status_lock);
5792 		return (0);
5793 	}
5794 	/*
5795 	 * Prevent future zone_enter()s
5796 	 */
5797 	zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
5798 	mutex_exit(&zone_status_lock);
5799 
5800 	/*
5801 	 * Kill everyone now and call zoneadmd later.
5802 	 * zone_ki_call_zoneadmd() will do a more thorough job of this
5803 	 * later.
5804 	 */
5805 	killall(zone->zone_id);
5806 	/*
5807 	 * Now, create the thread to contact zoneadmd and do the rest of the
5808 	 * work.  This thread can't be created in our zone otherwise
5809 	 * zone_destroy() would deadlock.
5810 	 */
5811 	zargp = kmem_zalloc(sizeof (*zargp), KM_SLEEP);
5812 	zargp->arg.cmd = zcmd;
5813 	zargp->arg.uniqid = zone->zone_uniqid;
5814 	zargp->zone = zone;
5815 	(void) strcpy(zargp->arg.locale, "C");
5816 	/* mdep was already copied in for us by uadmin */
5817 	if (mdep != NULL)
5818 		(void) strlcpy(zargp->arg.bootbuf, mdep,
5819 		    sizeof (zargp->arg.bootbuf));
5820 	zone_hold(zone);
5821 
5822 	(void) thread_create(NULL, 0, zone_ki_call_zoneadmd, zargp, 0, &p0,
5823 	    TS_RUN, minclsyspri);
5824 	exit(CLD_EXITED, 0);
5825 
5826 	return (EINVAL);
5827 }
5828 
5829 /*
5830  * Entry point so kadmin(A_SHUTDOWN, ...) can set the global zone's
5831  * status to ZONE_IS_SHUTTING_DOWN.
5832  *
5833  * This function also shuts down all running zones to ensure that they won't
5834  * fork new processes.
5835  */
5836 void
5837 zone_shutdown_global(void)
5838 {
5839 	zone_t *current_zonep;
5840 
5841 	ASSERT(INGLOBALZONE(curproc));
5842 	mutex_enter(&zonehash_lock);
5843 	mutex_enter(&zone_status_lock);
5844 
5845 	/* Modify the global zone's status first. */
5846 	ASSERT(zone_status_get(global_zone) == ZONE_IS_RUNNING);
5847 	zone_status_set(global_zone, ZONE_IS_SHUTTING_DOWN);
5848 
5849 	/*
5850 	 * Now change the states of all running zones to ZONE_IS_SHUTTING_DOWN.
5851 	 * We don't mark all zones with ZONE_IS_SHUTTING_DOWN because doing so
5852 	 * could cause assertions to fail (e.g., assertions about a zone's
5853 	 * state during initialization, readying, or booting) or produce races.
5854 	 * We'll let threads continue to initialize and ready new zones: they'll
5855 	 * fail to boot the new zones when they see that the global zone is
5856 	 * shutting down.
5857 	 */
5858 	for (current_zonep = list_head(&zone_active); current_zonep != NULL;
5859 	    current_zonep = list_next(&zone_active, current_zonep)) {
5860 		if (zone_status_get(current_zonep) == ZONE_IS_RUNNING)
5861 			zone_status_set(current_zonep, ZONE_IS_SHUTTING_DOWN);
5862 	}
5863 	mutex_exit(&zone_status_lock);
5864 	mutex_exit(&zonehash_lock);
5865 }
5866 
5867 /*
5868  * Returns true if the named dataset is visible in the current zone.
5869  * The 'write' parameter is set to 1 if the dataset is also writable.
5870  */
5871 int
5872 zone_dataset_visible(const char *dataset, int *write)
5873 {
5874 	zone_dataset_t *zd;
5875 	size_t len;
5876 	zone_t *zone = curproc->p_zone;
5877 
5878 	if (dataset[0] == '\0')
5879 		return (0);
5880 
5881 	/*
5882 	 * Walk the list once, looking for datasets which match exactly, or
5883 	 * specify a dataset underneath an exported dataset.  If found, return
5884 	 * true and note that it is writable.
5885 	 */
5886 	for (zd = list_head(&zone->zone_datasets); zd != NULL;
5887 	    zd = list_next(&zone->zone_datasets, zd)) {
5888 
5889 		len = strlen(zd->zd_dataset);
5890 		if (strlen(dataset) >= len &&
5891 		    bcmp(dataset, zd->zd_dataset, len) == 0 &&
5892 		    (dataset[len] == '\0' || dataset[len] == '/' ||
5893 		    dataset[len] == '@')) {
5894 			if (write)
5895 				*write = 1;
5896 			return (1);
5897 		}
5898 	}
5899 
5900 	/*
5901 	 * Walk the list a second time, searching for datasets which are parents
5902 	 * of exported datasets.  These should be visible, but read-only.
5903 	 *
5904 	 * Note that we also have to support forms such as 'pool/dataset/', with
5905 	 * a trailing slash.
5906 	 */
5907 	for (zd = list_head(&zone->zone_datasets); zd != NULL;
5908 	    zd = list_next(&zone->zone_datasets, zd)) {
5909 
5910 		len = strlen(dataset);
5911 		if (dataset[len - 1] == '/')
5912 			len--;	/* Ignore trailing slash */
5913 		if (len < strlen(zd->zd_dataset) &&
5914 		    bcmp(dataset, zd->zd_dataset, len) == 0 &&
5915 		    zd->zd_dataset[len] == '/') {
5916 			if (write)
5917 				*write = 0;
5918 			return (1);
5919 		}
5920 	}
5921 
5922 	return (0);
5923 }
5924 
5925 /*
5926  * zone_find_by_any_path() -
5927  *
5928  * kernel-private routine similar to zone_find_by_path(), but which
5929  * effectively compares against zone paths rather than zonerootpath
5930  * (i.e., the last component of zonerootpaths, which should be "root/",
5931  * are not compared.)  This is done in order to accurately identify all
5932  * paths, whether zone-visible or not, including those which are parallel
5933  * to /root/, such as /dev/, /home/, etc...
5934  *
5935  * If the specified path does not fall under any zone path then global
5936  * zone is returned.
5937  *
5938  * The treat_abs parameter indicates whether the path should be treated as
5939  * an absolute path although it does not begin with "/".  (This supports
5940  * nfs mount syntax such as host:any/path.)
5941  *
5942  * The caller is responsible for zone_rele of the returned zone.
5943  */
5944 zone_t *
5945 zone_find_by_any_path(const char *path, boolean_t treat_abs)
5946 {
5947 	zone_t *zone;
5948 	int path_offset = 0;
5949 
5950 	if (path == NULL) {
5951 		zone_hold(global_zone);
5952 		return (global_zone);
5953 	}
5954 
5955 	if (*path != '/') {
5956 		ASSERT(treat_abs);
5957 		path_offset = 1;
5958 	}
5959 
5960 	mutex_enter(&zonehash_lock);
5961 	for (zone = list_head(&zone_active); zone != NULL;
5962 	    zone = list_next(&zone_active, zone)) {
5963 		char	*c;
5964 		size_t	pathlen;
5965 		char *rootpath_start;
5966 
5967 		if (zone == global_zone)	/* skip global zone */
5968 			continue;
5969 
5970 		/* scan backwards to find start of last component */
5971 		c = zone->zone_rootpath + zone->zone_rootpathlen - 2;
5972 		do {
5973 			c--;
5974 		} while (*c != '/');
5975 
5976 		pathlen = c - zone->zone_rootpath + 1 - path_offset;
5977 		rootpath_start = (zone->zone_rootpath + path_offset);
5978 		if (strncmp(path, rootpath_start, pathlen) == 0)
5979 			break;
5980 	}
5981 	if (zone == NULL)
5982 		zone = global_zone;
5983 	zone_hold(zone);
5984 	mutex_exit(&zonehash_lock);
5985 	return (zone);
5986 }
5987 
5988 /* List of data link names which are accessible from the zone */
5989 struct dlnamelist {
5990 	char			dlnl_name[LIFNAMSIZ];
5991 	struct dlnamelist	*dlnl_next;
5992 };
5993 
5994 
5995 /*
5996  * Check whether the datalink name (dlname) itself is present.
5997  * Return true if found.
5998  */
5999 static boolean_t
6000 zone_dlname(zone_t *zone, char *dlname)
6001 {
6002 	struct dlnamelist *dlnl;
6003 	boolean_t found = B_FALSE;
6004 
6005 	mutex_enter(&zone->zone_lock);
6006 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
6007 		if (strncmp(dlnl->dlnl_name, dlname, LIFNAMSIZ) == 0) {
6008 			found = B_TRUE;
6009 			break;
6010 		}
6011 	}
6012 	mutex_exit(&zone->zone_lock);
6013 	return (found);
6014 }
6015 
6016 /*
6017  * Add an data link name for the zone. Does not check for duplicates.
6018  */
6019 static int
6020 zone_add_datalink(zoneid_t zoneid, char *dlname)
6021 {
6022 	struct dlnamelist *dlnl;
6023 	zone_t *zone;
6024 	zone_t *thiszone;
6025 	int err;
6026 
6027 	dlnl = kmem_zalloc(sizeof (struct dlnamelist), KM_SLEEP);
6028 	if ((err = copyinstr(dlname, dlnl->dlnl_name, LIFNAMSIZ, NULL)) != 0) {
6029 		kmem_free(dlnl, sizeof (struct dlnamelist));
6030 		return (set_errno(err));
6031 	}
6032 
6033 	thiszone = zone_find_by_id(zoneid);
6034 	if (thiszone == NULL) {
6035 		kmem_free(dlnl, sizeof (struct dlnamelist));
6036 		return (set_errno(ENXIO));
6037 	}
6038 
6039 	/*
6040 	 * Verify that the datalink name isn't already used by a different
6041 	 * zone while allowing duplicate entries for the same zone (e.g. due
6042 	 * to both using IPv4 and IPv6 on an interface)
6043 	 */
6044 	mutex_enter(&zonehash_lock);
6045 	for (zone = list_head(&zone_active); zone != NULL;
6046 	    zone = list_next(&zone_active, zone)) {
6047 		if (zone->zone_id == zoneid)
6048 			continue;
6049 
6050 		if (zone_dlname(zone, dlnl->dlnl_name)) {
6051 			mutex_exit(&zonehash_lock);
6052 			zone_rele(thiszone);
6053 			kmem_free(dlnl, sizeof (struct dlnamelist));
6054 			return (set_errno(EPERM));
6055 		}
6056 	}
6057 	mutex_enter(&thiszone->zone_lock);
6058 	dlnl->dlnl_next = thiszone->zone_dl_list;
6059 	thiszone->zone_dl_list = dlnl;
6060 	mutex_exit(&thiszone->zone_lock);
6061 	mutex_exit(&zonehash_lock);
6062 	zone_rele(thiszone);
6063 	return (0);
6064 }
6065 
6066 static int
6067 zone_remove_datalink(zoneid_t zoneid, char *dlname)
6068 {
6069 	struct dlnamelist *dlnl, *odlnl, **dlnlp;
6070 	zone_t *zone;
6071 	int err;
6072 
6073 	dlnl = kmem_zalloc(sizeof (struct dlnamelist), KM_SLEEP);
6074 	if ((err = copyinstr(dlname, dlnl->dlnl_name, LIFNAMSIZ, NULL)) != 0) {
6075 		kmem_free(dlnl, sizeof (struct dlnamelist));
6076 		return (set_errno(err));
6077 	}
6078 	zone = zone_find_by_id(zoneid);
6079 	if (zone == NULL) {
6080 		kmem_free(dlnl, sizeof (struct dlnamelist));
6081 		return (set_errno(EINVAL));
6082 	}
6083 
6084 	mutex_enter(&zone->zone_lock);
6085 	/* Look for match */
6086 	dlnlp = &zone->zone_dl_list;
6087 	while (*dlnlp != NULL) {
6088 		if (strncmp(dlnl->dlnl_name, (*dlnlp)->dlnl_name,
6089 		    LIFNAMSIZ) == 0)
6090 			goto found;
6091 		dlnlp = &((*dlnlp)->dlnl_next);
6092 	}
6093 	mutex_exit(&zone->zone_lock);
6094 	zone_rele(zone);
6095 	kmem_free(dlnl, sizeof (struct dlnamelist));
6096 	return (set_errno(ENXIO));
6097 
6098 found:
6099 	odlnl = *dlnlp;
6100 	*dlnlp = (*dlnlp)->dlnl_next;
6101 	kmem_free(odlnl, sizeof (struct dlnamelist));
6102 
6103 	mutex_exit(&zone->zone_lock);
6104 	zone_rele(zone);
6105 	kmem_free(dlnl, sizeof (struct dlnamelist));
6106 	return (0);
6107 }
6108 
6109 /*
6110  * Using the zoneidp as ALL_ZONES, we can lookup which zone is using datalink
6111  * name (dlname); otherwise we just check if the specified zoneidp has access
6112  * to the datalink name.
6113  */
6114 static int
6115 zone_check_datalink(zoneid_t *zoneidp, char *dlname)
6116 {
6117 	zoneid_t id;
6118 	char *dln;
6119 	zone_t *zone;
6120 	int err = 0;
6121 	boolean_t allzones = B_FALSE;
6122 
6123 	if (copyin(zoneidp, &id, sizeof (id)) != 0) {
6124 		return (set_errno(EFAULT));
6125 	}
6126 	dln = kmem_zalloc(LIFNAMSIZ, KM_SLEEP);
6127 	if ((err = copyinstr(dlname, dln, LIFNAMSIZ, NULL)) != 0) {
6128 		kmem_free(dln, LIFNAMSIZ);
6129 		return (set_errno(err));
6130 	}
6131 
6132 	if (id == ALL_ZONES)
6133 		allzones = B_TRUE;
6134 
6135 	/*
6136 	 * Check whether datalink name is already used.
6137 	 */
6138 	mutex_enter(&zonehash_lock);
6139 	for (zone = list_head(&zone_active); zone != NULL;
6140 	    zone = list_next(&zone_active, zone)) {
6141 		if (allzones || (id == zone->zone_id)) {
6142 			if (!zone_dlname(zone, dln))
6143 				continue;
6144 			if (allzones)
6145 				err = copyout(&zone->zone_id, zoneidp,
6146 				    sizeof (*zoneidp));
6147 
6148 			mutex_exit(&zonehash_lock);
6149 			kmem_free(dln, LIFNAMSIZ);
6150 			return (err ? set_errno(EFAULT) : 0);
6151 		}
6152 	}
6153 
6154 	/* datalink name is not found in any active zone. */
6155 	mutex_exit(&zonehash_lock);
6156 	kmem_free(dln, LIFNAMSIZ);
6157 	return (set_errno(ENXIO));
6158 }
6159 
6160 /*
6161  * Get the names of the datalinks assigned to a zone.
6162  * Here *nump is the number of datalinks, and the assumption
6163  * is that the caller will guarantee that the the supplied buffer is
6164  * big enough to hold at least #*nump datalink names, that is,
6165  * LIFNAMSIZ X *nump
6166  * On return, *nump will be the "new" number of datalinks, if it
6167  * ever changed.
6168  */
6169 static int
6170 zone_list_datalink(zoneid_t zoneid, int *nump, char *buf)
6171 {
6172 	int num, dlcount;
6173 	zone_t *zone;
6174 	struct dlnamelist *dlnl;
6175 	char *ptr;
6176 
6177 	if (copyin(nump, &dlcount, sizeof (dlcount)) != 0)
6178 		return (set_errno(EFAULT));
6179 
6180 	zone = zone_find_by_id(zoneid);
6181 	if (zone == NULL) {
6182 		return (set_errno(ENXIO));
6183 	}
6184 
6185 	num = 0;
6186 	mutex_enter(&zone->zone_lock);
6187 	ptr = buf;
6188 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
6189 		/*
6190 		 * If the list changed and the new number is bigger
6191 		 * than what the caller supplied, just count, don't
6192 		 * do copyout
6193 		 */
6194 		if (++num > dlcount)
6195 			continue;
6196 		if (copyout(dlnl->dlnl_name, ptr, LIFNAMSIZ) != 0) {
6197 			mutex_exit(&zone->zone_lock);
6198 			zone_rele(zone);
6199 			return (set_errno(EFAULT));
6200 		}
6201 		ptr += LIFNAMSIZ;
6202 	}
6203 	mutex_exit(&zone->zone_lock);
6204 	zone_rele(zone);
6205 
6206 	/* Increased or decreased, caller should be notified. */
6207 	if (num != dlcount) {
6208 		if (copyout(&num, nump, sizeof (num)) != 0) {
6209 			return (set_errno(EFAULT));
6210 		}
6211 	}
6212 	return (0);
6213 }
6214 
6215 /*
6216  * Public interface for looking up a zone by zoneid. It's a customized version
6217  * for netstack_zone_create(). It can only be called from the zsd create
6218  * callbacks, since it doesn't have reference on the zone structure hence if
6219  * it is called elsewhere the zone could disappear after the zonehash_lock
6220  * is dropped.
6221  *
6222  * Furthermore it
6223  * 1. Doesn't check the status of the zone.
6224  * 2. It will be called even before zone_init is called, in that case the
6225  *    address of zone0 is returned directly, and netstack_zone_create()
6226  *    will only assign a value to zone0.zone_netstack, won't break anything.
6227  * 3. Returns without the zone being held.
6228  */
6229 zone_t *
6230 zone_find_by_id_nolock(zoneid_t zoneid)
6231 {
6232 	zone_t *zone;
6233 
6234 	mutex_enter(&zonehash_lock);
6235 	if (zonehashbyid == NULL)
6236 		zone = &zone0;
6237 	else
6238 		zone = zone_find_all_by_id(zoneid);
6239 	mutex_exit(&zonehash_lock);
6240 	return (zone);
6241 }
6242 
6243 /*
6244  * Walk the datalinks for a given zone
6245  */
6246 int
6247 zone_datalink_walk(zoneid_t zoneid, int (*cb)(const char *, void *), void *data)
6248 {
6249 	zone_t *zone;
6250 	struct dlnamelist *dlnl;
6251 	int ret = 0;
6252 
6253 	if ((zone = zone_find_by_id(zoneid)) == NULL)
6254 		return (ENOENT);
6255 
6256 	mutex_enter(&zone->zone_lock);
6257 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
6258 		if ((ret = (*cb)(dlnl->dlnl_name, data)) != 0)
6259 			break;
6260 	}
6261 	mutex_exit(&zone->zone_lock);
6262 	zone_rele(zone);
6263 	return (ret);
6264 }
6265