1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 30 /* 31 * Zones 32 * 33 * A zone is a named collection of processes, namespace constraints, 34 * and other system resources which comprise a secure and manageable 35 * application containment facility. 36 * 37 * Zones (represented by the reference counted zone_t) are tracked in 38 * the kernel in the zonehash. Elsewhere in the kernel, Zone IDs 39 * (zoneid_t) are used to track zone association. Zone IDs are 40 * dynamically generated when the zone is created; if a persistent 41 * identifier is needed (core files, accounting logs, audit trail, 42 * etc.), the zone name should be used. 43 * 44 * 45 * Global Zone: 46 * 47 * The global zone (zoneid 0) is automatically associated with all 48 * system resources that have not been bound to a user-created zone. 49 * This means that even systems where zones are not in active use 50 * have a global zone, and all processes, mounts, etc. are 51 * associated with that zone. The global zone is generally 52 * unconstrained in terms of privileges and access, though the usual 53 * credential and privilege based restrictions apply. 54 * 55 * 56 * Zone States: 57 * 58 * The states in which a zone may be in and the transitions are as 59 * follows: 60 * 61 * ZONE_IS_UNINITIALIZED: primordial state for a zone. The partially 62 * initialized zone is added to the list of active zones on the system but 63 * isn't accessible. 64 * 65 * ZONE_IS_READY: zsched (the kernel dummy process for a zone) is 66 * ready. The zone is made visible after the ZSD constructor callbacks are 67 * executed. A zone remains in this state until it transitions into 68 * the ZONE_IS_BOOTING state as a result of a call to zone_boot(). 69 * 70 * ZONE_IS_BOOTING: in this shortlived-state, zsched attempts to start 71 * init. Should that fail, the zone proceeds to the ZONE_IS_SHUTTING_DOWN 72 * state. 73 * 74 * ZONE_IS_RUNNING: The zone is open for business: zsched has 75 * successfully started init. A zone remains in this state until 76 * zone_shutdown() is called. 77 * 78 * ZONE_IS_SHUTTING_DOWN: zone_shutdown() has been called, the system is 79 * killing all processes running in the zone. The zone remains 80 * in this state until there are no more user processes running in the zone. 81 * zone_create(), zone_enter(), and zone_destroy() on this zone will fail. 82 * Since zone_shutdown() is restartable, it may be called successfully 83 * multiple times for the same zone_t. Setting of the zone's state to 84 * ZONE_IS_SHUTTING_DOWN is synchronized with mounts, so VOP_MOUNT() may check 85 * the zone's status without worrying about it being a moving target. 86 * 87 * ZONE_IS_EMPTY: zone_shutdown() has been called, and there 88 * are no more user processes in the zone. The zone remains in this 89 * state until there are no more kernel threads associated with the 90 * zone. zone_create(), zone_enter(), and zone_destroy() on this zone will 91 * fail. 92 * 93 * ZONE_IS_DOWN: All kernel threads doing work on behalf of the zone 94 * have exited. zone_shutdown() returns. Henceforth it is not possible to 95 * join the zone or create kernel threads therein. 96 * 97 * ZONE_IS_DYING: zone_destroy() has been called on the zone; zone 98 * remains in this state until zsched exits. Calls to zone_find_by_*() 99 * return NULL from now on. 100 * 101 * ZONE_IS_DEAD: zsched has exited (zone_ntasks == 0). There are no 102 * processes or threads doing work on behalf of the zone. The zone is 103 * removed from the list of active zones. zone_destroy() returns, and 104 * the zone can be recreated. 105 * 106 * ZONE_IS_FREE (internal state): zone_ref goes to 0, ZSD destructor 107 * callbacks are executed, and all memory associated with the zone is 108 * freed. 109 * 110 * Threads can wait for the zone to enter a requested state by using 111 * zone_status_wait() or zone_status_timedwait() with the desired 112 * state passed in as an argument. Zone state transitions are 113 * uni-directional; it is not possible to move back to an earlier state. 114 * 115 * 116 * Zone-Specific Data: 117 * 118 * Subsystems needing to maintain zone-specific data can store that 119 * data using the ZSD mechanism. This provides a zone-specific data 120 * store, similar to thread-specific data (see pthread_getspecific(3C) 121 * or the TSD code in uts/common/disp/thread.c. Also, ZSD can be used 122 * to register callbacks to be invoked when a zone is created, shut 123 * down, or destroyed. This can be used to initialize zone-specific 124 * data for new zones and to clean up when zones go away. 125 * 126 * 127 * Data Structures: 128 * 129 * The per-zone structure (zone_t) is reference counted, and freed 130 * when all references are released. zone_hold and zone_rele can be 131 * used to adjust the reference count. In addition, reference counts 132 * associated with the cred_t structure are tracked separately using 133 * zone_cred_hold and zone_cred_rele. 134 * 135 * Pointers to active zone_t's are stored in two hash tables; one 136 * for searching by id, the other for searching by name. Lookups 137 * can be performed on either basis, using zone_find_by_id and 138 * zone_find_by_name. Both return zone_t pointers with the zone 139 * held, so zone_rele should be called when the pointer is no longer 140 * needed. Zones can also be searched by path; zone_find_by_path 141 * returns the zone with which a path name is associated (global 142 * zone if the path is not within some other zone's file system 143 * hierarchy). This currently requires iterating through each zone, 144 * so it is slower than an id or name search via a hash table. 145 * 146 * 147 * Locking: 148 * 149 * zonehash_lock: This is a top-level global lock used to protect the 150 * zone hash tables and lists. Zones cannot be created or destroyed 151 * while this lock is held. 152 * zone_status_lock: This is a global lock protecting zone state. 153 * Zones cannot change state while this lock is held. It also 154 * protects the list of kernel threads associated with a zone. 155 * zone_lock: This is a per-zone lock used to protect several fields of 156 * the zone_t (see <sys/zone.h> for details). In addition, holding 157 * this lock means that the zone cannot go away. 158 * zsd_key_lock: This is a global lock protecting the key state for ZSD. 159 * zone_deathrow_lock: This is a global lock protecting the "deathrow" 160 * list (a list of zones in the ZONE_IS_DEAD state). 161 * 162 * Ordering requirements: 163 * pool_lock --> cpu_lock --> zonehash_lock --> zone_status_lock --> 164 * zone_lock --> zsd_key_lock --> pidlock --> p_lock 165 * 166 * Blocking memory allocations are permitted while holding any of the 167 * zone locks. 168 * 169 * 170 * System Call Interface: 171 * 172 * The zone subsystem can be managed and queried from user level with 173 * the following system calls (all subcodes of the primary "zone" 174 * system call): 175 * - zone_create: creates a zone with selected attributes (name, 176 * root path, privileges, resource controls, ZFS datasets) 177 * - zone_enter: allows the current process to enter a zone 178 * - zone_getattr: reports attributes of a zone 179 * - zone_list: lists all zones active in the system 180 * - zone_lookup: looks up zone id based on name 181 * - zone_shutdown: initiates shutdown process (see states above) 182 * - zone_destroy: completes shutdown process (see states above) 183 * 184 */ 185 186 #include <sys/priv_impl.h> 187 #include <sys/cred.h> 188 #include <c2/audit.h> 189 #include <sys/ddi.h> 190 #include <sys/debug.h> 191 #include <sys/file.h> 192 #include <sys/kmem.h> 193 #include <sys/mutex.h> 194 #include <sys/pathname.h> 195 #include <sys/proc.h> 196 #include <sys/project.h> 197 #include <sys/sysevent.h> 198 #include <sys/task.h> 199 #include <sys/systm.h> 200 #include <sys/types.h> 201 #include <sys/utsname.h> 202 #include <sys/vnode.h> 203 #include <sys/vfs.h> 204 #include <sys/systeminfo.h> 205 #include <sys/policy.h> 206 #include <sys/cred_impl.h> 207 #include <sys/contract_impl.h> 208 #include <sys/contract/process_impl.h> 209 #include <sys/class.h> 210 #include <sys/pool.h> 211 #include <sys/pool_pset.h> 212 #include <sys/pset.h> 213 #include <sys/log.h> 214 #include <sys/sysmacros.h> 215 #include <sys/callb.h> 216 #include <sys/vmparam.h> 217 #include <sys/corectl.h> 218 219 #include <sys/door.h> 220 #include <sys/cpuvar.h> 221 #include <sys/fs/snode.h> 222 223 #include <sys/uadmin.h> 224 #include <sys/session.h> 225 #include <sys/cmn_err.h> 226 #include <sys/modhash.h> 227 #include <sys/nvpair.h> 228 #include <sys/rctl.h> 229 #include <sys/fss.h> 230 #include <sys/zone.h> 231 232 /* 233 * cv used to signal that all references to the zone have been released. This 234 * needs to be global since there may be multiple waiters, and the first to 235 * wake up will free the zone_t, hence we cannot use zone->zone_cv. 236 */ 237 static kcondvar_t zone_destroy_cv; 238 /* 239 * Lock used to serialize access to zone_cv. This could have been per-zone, 240 * but then we'd need another lock for zone_destroy_cv, and why bother? 241 */ 242 static kmutex_t zone_status_lock; 243 244 /* 245 * ZSD-related global variables. 246 */ 247 static kmutex_t zsd_key_lock; /* protects the following two */ 248 /* 249 * The next caller of zone_key_create() will be assigned a key of ++zsd_keyval. 250 */ 251 static zone_key_t zsd_keyval = 0; 252 /* 253 * Global list of registered keys. We use this when a new zone is created. 254 */ 255 static list_t zsd_registered_keys; 256 257 int zone_hash_size = 256; 258 static mod_hash_t *zonehashbyname, *zonehashbyid; 259 static kmutex_t zonehash_lock; 260 static uint_t zonecount; 261 static id_space_t *zoneid_space; 262 263 /* 264 * The global zone (aka zone0) is the all-seeing, all-knowing zone in which the 265 * kernel proper runs, and which manages all other zones. 266 * 267 * Although not declared as static, the variable "zone0" should not be used 268 * except for by code that needs to reference the global zone early on in boot, 269 * before it is fully initialized. All other consumers should use 270 * 'global_zone'. 271 */ 272 zone_t zone0; 273 zone_t *global_zone = NULL; /* Set when the global zone is initialized */ 274 275 /* 276 * List of active zones, protected by zonehash_lock. 277 */ 278 static list_t zone_active; 279 280 /* 281 * List of destroyed zones that still have outstanding cred references. 282 * Used for debugging. Uses a separate lock to avoid lock ordering 283 * problems in zone_free. 284 */ 285 static list_t zone_deathrow; 286 static kmutex_t zone_deathrow_lock; 287 288 /* number of zones is limited by virtual interface limit in IP */ 289 uint_t maxzones = 8192; 290 291 /* Event channel to sent zone state change notifications */ 292 evchan_t *zone_event_chan; 293 294 /* 295 * This table holds the mapping from kernel zone states to 296 * states visible in the state notification API. 297 * The idea is that we only expose "obvious" states and 298 * do not expose states which are just implementation details. 299 */ 300 const char *zone_status_table[] = { 301 ZONE_EVENT_UNINITIALIZED, /* uninitialized */ 302 ZONE_EVENT_READY, /* ready */ 303 ZONE_EVENT_READY, /* booting */ 304 ZONE_EVENT_RUNNING, /* running */ 305 ZONE_EVENT_SHUTTING_DOWN, /* shutting_down */ 306 ZONE_EVENT_SHUTTING_DOWN, /* empty */ 307 ZONE_EVENT_SHUTTING_DOWN, /* down */ 308 ZONE_EVENT_SHUTTING_DOWN, /* dying */ 309 ZONE_EVENT_UNINITIALIZED, /* dead */ 310 }; 311 312 /* 313 * This isn't static so lint doesn't complain. 314 */ 315 rctl_hndl_t rc_zone_cpu_shares; 316 rctl_hndl_t rc_zone_nlwps; 317 /* 318 * Synchronization primitives used to synchronize between mounts and zone 319 * creation/destruction. 320 */ 321 static int mounts_in_progress; 322 static kcondvar_t mount_cv; 323 static kmutex_t mount_lock; 324 325 const char * const zone_initname = "/sbin/init"; 326 327 static int zone_shutdown(zoneid_t zoneid); 328 329 /* 330 * Bump this number when you alter the zone syscall interfaces; this is 331 * because we need to have support for previous API versions in libc 332 * to support patching; libc calls into the kernel to determine this number. 333 * 334 * Version 1 of the API is the version originally shipped with Solaris 10 335 * Version 2 alters the zone_create system call in order to support more 336 * arguments by moving the args into a structure; and to do better 337 * error reporting when zone_create() fails. 338 * Version 3 alters the zone_create system call in order to support the 339 * import of ZFS datasets to zones. 340 */ 341 static const int ZONE_SYSCALL_API_VERSION = 3; 342 343 /* 344 * Certain filesystems (such as NFS and autofs) need to know which zone 345 * the mount is being placed in. Because of this, we need to be able to 346 * ensure that a zone isn't in the process of being created such that 347 * nfs_mount() thinks it is in the global zone, while by the time it 348 * gets added the list of mounted zones, it ends up on zoneA's mount 349 * list. 350 * 351 * The following functions: block_mounts()/resume_mounts() and 352 * mount_in_progress()/mount_completed() are used by zones and the VFS 353 * layer (respectively) to synchronize zone creation and new mounts. 354 * 355 * The semantics are like a reader-reader lock such that there may 356 * either be multiple mounts (or zone creations, if that weren't 357 * serialized by zonehash_lock) in progress at the same time, but not 358 * both. 359 * 360 * We use cv's so the user can ctrl-C out of the operation if it's 361 * taking too long. 362 * 363 * The semantics are such that there is unfair bias towards the 364 * "current" operation. This means that zone creations may starve if 365 * there is a rapid succession of new mounts coming in to the system, or 366 * there is a remote possibility that zones will be created at such a 367 * rate that new mounts will not be able to proceed. 368 */ 369 /* 370 * Prevent new mounts from progressing to the point of calling 371 * VFS_MOUNT(). If there are already mounts in this "region", wait for 372 * them to complete. 373 */ 374 static int 375 block_mounts(void) 376 { 377 int retval = 0; 378 379 /* 380 * Since it may block for a long time, block_mounts() shouldn't be 381 * called with zonehash_lock held. 382 */ 383 ASSERT(MUTEX_NOT_HELD(&zonehash_lock)); 384 mutex_enter(&mount_lock); 385 while (mounts_in_progress > 0) { 386 if (cv_wait_sig(&mount_cv, &mount_lock) == 0) 387 goto signaled; 388 } 389 /* 390 * A negative value of mounts_in_progress indicates that mounts 391 * have been blocked by (-mounts_in_progress) different callers. 392 */ 393 mounts_in_progress--; 394 retval = 1; 395 signaled: 396 mutex_exit(&mount_lock); 397 return (retval); 398 } 399 400 /* 401 * The VFS layer may progress with new mounts as far as we're concerned. 402 * Allow them to progress if we were the last obstacle. 403 */ 404 static void 405 resume_mounts(void) 406 { 407 mutex_enter(&mount_lock); 408 if (++mounts_in_progress == 0) 409 cv_broadcast(&mount_cv); 410 mutex_exit(&mount_lock); 411 } 412 413 /* 414 * The VFS layer is busy with a mount; zones should wait until all 415 * mounts are completed to progress. 416 */ 417 void 418 mount_in_progress(void) 419 { 420 mutex_enter(&mount_lock); 421 while (mounts_in_progress < 0) 422 cv_wait(&mount_cv, &mount_lock); 423 mounts_in_progress++; 424 mutex_exit(&mount_lock); 425 } 426 427 /* 428 * VFS is done with one mount; wake up any waiting block_mounts() 429 * callers if this is the last mount. 430 */ 431 void 432 mount_completed(void) 433 { 434 mutex_enter(&mount_lock); 435 if (--mounts_in_progress == 0) 436 cv_broadcast(&mount_cv); 437 mutex_exit(&mount_lock); 438 } 439 440 /* 441 * ZSD routines. 442 * 443 * Zone Specific Data (ZSD) is modeled after Thread Specific Data as 444 * defined by the pthread_key_create() and related interfaces. 445 * 446 * Kernel subsystems may register one or more data items and/or 447 * callbacks to be executed when a zone is created, shutdown, or 448 * destroyed. 449 * 450 * Unlike the thread counterpart, destructor callbacks will be executed 451 * even if the data pointer is NULL and/or there are no constructor 452 * callbacks, so it is the responsibility of such callbacks to check for 453 * NULL data values if necessary. 454 * 455 * The locking strategy and overall picture is as follows: 456 * 457 * When someone calls zone_key_create(), a template ZSD entry is added to the 458 * global list "zsd_registered_keys", protected by zsd_key_lock. The 459 * constructor callback is called immediately on all existing zones, and a 460 * copy of the ZSD entry added to the per-zone zone_zsd list (protected by 461 * zone_lock). As this operation requires the list of zones, the list of 462 * registered keys, and the per-zone list of ZSD entries to remain constant 463 * throughout the entire operation, it must grab zonehash_lock, zone_lock for 464 * all existing zones, and zsd_key_lock, in that order. Similar locking is 465 * needed when zone_key_delete() is called. It is thus sufficient to hold 466 * zsd_key_lock *or* zone_lock to prevent additions to or removals from the 467 * per-zone zone_zsd list. 468 * 469 * Note that this implementation does not make a copy of the ZSD entry if a 470 * constructor callback is not provided. A zone_getspecific() on such an 471 * uninitialized ZSD entry will return NULL. 472 * 473 * When new zones are created constructor callbacks for all registered ZSD 474 * entries will be called. 475 * 476 * The framework does not provide any locking around zone_getspecific() and 477 * zone_setspecific() apart from that needed for internal consistency, so 478 * callers interested in atomic "test-and-set" semantics will need to provide 479 * their own locking. 480 */ 481 void 482 zone_key_create(zone_key_t *keyp, void *(*create)(zoneid_t), 483 void (*shutdown)(zoneid_t, void *), void (*destroy)(zoneid_t, void *)) 484 { 485 struct zsd_entry *zsdp; 486 struct zsd_entry *t; 487 struct zone *zone; 488 489 zsdp = kmem_alloc(sizeof (*zsdp), KM_SLEEP); 490 zsdp->zsd_data = NULL; 491 zsdp->zsd_create = create; 492 zsdp->zsd_shutdown = shutdown; 493 zsdp->zsd_destroy = destroy; 494 495 mutex_enter(&zonehash_lock); /* stop the world */ 496 for (zone = list_head(&zone_active); zone != NULL; 497 zone = list_next(&zone_active, zone)) 498 mutex_enter(&zone->zone_lock); /* lock all zones */ 499 500 mutex_enter(&zsd_key_lock); 501 *keyp = zsdp->zsd_key = ++zsd_keyval; 502 ASSERT(zsd_keyval != 0); 503 list_insert_tail(&zsd_registered_keys, zsdp); 504 mutex_exit(&zsd_key_lock); 505 506 if (create != NULL) { 507 for (zone = list_head(&zone_active); zone != NULL; 508 zone = list_next(&zone_active, zone)) { 509 t = kmem_alloc(sizeof (*t), KM_SLEEP); 510 t->zsd_key = *keyp; 511 t->zsd_data = (*create)(zone->zone_id); 512 t->zsd_create = create; 513 t->zsd_shutdown = shutdown; 514 t->zsd_destroy = destroy; 515 list_insert_tail(&zone->zone_zsd, t); 516 } 517 } 518 for (zone = list_head(&zone_active); zone != NULL; 519 zone = list_next(&zone_active, zone)) 520 mutex_exit(&zone->zone_lock); 521 mutex_exit(&zonehash_lock); 522 } 523 524 /* 525 * Helper function to find the zsd_entry associated with the key in the 526 * given list. 527 */ 528 static struct zsd_entry * 529 zsd_find(list_t *l, zone_key_t key) 530 { 531 struct zsd_entry *zsd; 532 533 for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) { 534 if (zsd->zsd_key == key) { 535 /* 536 * Move to head of list to keep list in MRU order. 537 */ 538 if (zsd != list_head(l)) { 539 list_remove(l, zsd); 540 list_insert_head(l, zsd); 541 } 542 return (zsd); 543 } 544 } 545 return (NULL); 546 } 547 548 /* 549 * Function called when a module is being unloaded, or otherwise wishes 550 * to unregister its ZSD key and callbacks. 551 */ 552 int 553 zone_key_delete(zone_key_t key) 554 { 555 struct zsd_entry *zsdp = NULL; 556 zone_t *zone; 557 558 mutex_enter(&zonehash_lock); /* Zone create/delete waits for us */ 559 for (zone = list_head(&zone_active); zone != NULL; 560 zone = list_next(&zone_active, zone)) 561 mutex_enter(&zone->zone_lock); /* lock all zones */ 562 563 mutex_enter(&zsd_key_lock); 564 zsdp = zsd_find(&zsd_registered_keys, key); 565 if (zsdp == NULL) 566 goto notfound; 567 list_remove(&zsd_registered_keys, zsdp); 568 mutex_exit(&zsd_key_lock); 569 570 for (zone = list_head(&zone_active); zone != NULL; 571 zone = list_next(&zone_active, zone)) { 572 struct zsd_entry *del; 573 void *data; 574 575 if (!(zone->zone_flags & ZF_DESTROYED)) { 576 del = zsd_find(&zone->zone_zsd, key); 577 if (del != NULL) { 578 data = del->zsd_data; 579 ASSERT(del->zsd_shutdown == zsdp->zsd_shutdown); 580 ASSERT(del->zsd_destroy == zsdp->zsd_destroy); 581 list_remove(&zone->zone_zsd, del); 582 kmem_free(del, sizeof (*del)); 583 } else { 584 data = NULL; 585 } 586 if (zsdp->zsd_shutdown) 587 zsdp->zsd_shutdown(zone->zone_id, data); 588 if (zsdp->zsd_destroy) 589 zsdp->zsd_destroy(zone->zone_id, data); 590 } 591 mutex_exit(&zone->zone_lock); 592 } 593 mutex_exit(&zonehash_lock); 594 kmem_free(zsdp, sizeof (*zsdp)); 595 return (0); 596 597 notfound: 598 mutex_exit(&zsd_key_lock); 599 for (zone = list_head(&zone_active); zone != NULL; 600 zone = list_next(&zone_active, zone)) 601 mutex_exit(&zone->zone_lock); 602 mutex_exit(&zonehash_lock); 603 return (-1); 604 } 605 606 /* 607 * ZSD counterpart of pthread_setspecific(). 608 */ 609 int 610 zone_setspecific(zone_key_t key, zone_t *zone, const void *data) 611 { 612 struct zsd_entry *t; 613 struct zsd_entry *zsdp = NULL; 614 615 mutex_enter(&zone->zone_lock); 616 t = zsd_find(&zone->zone_zsd, key); 617 if (t != NULL) { 618 /* 619 * Replace old value with new 620 */ 621 t->zsd_data = (void *)data; 622 mutex_exit(&zone->zone_lock); 623 return (0); 624 } 625 /* 626 * If there was no previous value, go through the list of registered 627 * keys. 628 * 629 * We avoid grabbing zsd_key_lock until we are sure we need it; this is 630 * necessary for shutdown callbacks to be able to execute without fear 631 * of deadlock. 632 */ 633 mutex_enter(&zsd_key_lock); 634 zsdp = zsd_find(&zsd_registered_keys, key); 635 if (zsdp == NULL) { /* Key was not registered */ 636 mutex_exit(&zsd_key_lock); 637 mutex_exit(&zone->zone_lock); 638 return (-1); 639 } 640 641 /* 642 * Add a zsd_entry to this zone, using the template we just retrieved 643 * to initialize the constructor and destructor(s). 644 */ 645 t = kmem_alloc(sizeof (*t), KM_SLEEP); 646 t->zsd_key = key; 647 t->zsd_data = (void *)data; 648 t->zsd_create = zsdp->zsd_create; 649 t->zsd_shutdown = zsdp->zsd_shutdown; 650 t->zsd_destroy = zsdp->zsd_destroy; 651 list_insert_tail(&zone->zone_zsd, t); 652 mutex_exit(&zsd_key_lock); 653 mutex_exit(&zone->zone_lock); 654 return (0); 655 } 656 657 /* 658 * ZSD counterpart of pthread_getspecific(). 659 */ 660 void * 661 zone_getspecific(zone_key_t key, zone_t *zone) 662 { 663 struct zsd_entry *t; 664 void *data; 665 666 mutex_enter(&zone->zone_lock); 667 t = zsd_find(&zone->zone_zsd, key); 668 data = (t == NULL ? NULL : t->zsd_data); 669 mutex_exit(&zone->zone_lock); 670 return (data); 671 } 672 673 /* 674 * Function used to initialize a zone's list of ZSD callbacks and data 675 * when the zone is being created. The callbacks are initialized from 676 * the template list (zsd_registered_keys), and the constructor 677 * callback executed (if one exists). 678 * 679 * This is called before the zone is made publicly available, hence no 680 * need to grab zone_lock. 681 * 682 * Although we grab and release zsd_key_lock, new entries cannot be 683 * added to or removed from the zsd_registered_keys list until we 684 * release zonehash_lock, so there isn't a window for a 685 * zone_key_create() to come in after we've dropped zsd_key_lock but 686 * before the zone is added to the zone list, such that the constructor 687 * callbacks aren't executed for the new zone. 688 */ 689 static void 690 zone_zsd_configure(zone_t *zone) 691 { 692 struct zsd_entry *zsdp; 693 struct zsd_entry *t; 694 zoneid_t zoneid = zone->zone_id; 695 696 ASSERT(MUTEX_HELD(&zonehash_lock)); 697 ASSERT(list_head(&zone->zone_zsd) == NULL); 698 mutex_enter(&zsd_key_lock); 699 for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL; 700 zsdp = list_next(&zsd_registered_keys, zsdp)) { 701 if (zsdp->zsd_create != NULL) { 702 t = kmem_alloc(sizeof (*t), KM_SLEEP); 703 t->zsd_key = zsdp->zsd_key; 704 t->zsd_create = zsdp->zsd_create; 705 t->zsd_data = (*t->zsd_create)(zoneid); 706 t->zsd_shutdown = zsdp->zsd_shutdown; 707 t->zsd_destroy = zsdp->zsd_destroy; 708 list_insert_tail(&zone->zone_zsd, t); 709 } 710 } 711 mutex_exit(&zsd_key_lock); 712 } 713 714 enum zsd_callback_type { ZSD_CREATE, ZSD_SHUTDOWN, ZSD_DESTROY }; 715 716 /* 717 * Helper function to execute shutdown or destructor callbacks. 718 */ 719 static void 720 zone_zsd_callbacks(zone_t *zone, enum zsd_callback_type ct) 721 { 722 struct zsd_entry *zsdp; 723 struct zsd_entry *t; 724 zoneid_t zoneid = zone->zone_id; 725 726 ASSERT(ct == ZSD_SHUTDOWN || ct == ZSD_DESTROY); 727 ASSERT(ct != ZSD_SHUTDOWN || zone_status_get(zone) >= ZONE_IS_EMPTY); 728 ASSERT(ct != ZSD_DESTROY || zone_status_get(zone) >= ZONE_IS_DOWN); 729 730 mutex_enter(&zone->zone_lock); 731 if (ct == ZSD_DESTROY) { 732 if (zone->zone_flags & ZF_DESTROYED) { 733 /* 734 * Make sure destructors are only called once. 735 */ 736 mutex_exit(&zone->zone_lock); 737 return; 738 } 739 zone->zone_flags |= ZF_DESTROYED; 740 } 741 mutex_exit(&zone->zone_lock); 742 743 /* 744 * Both zsd_key_lock and zone_lock need to be held in order to add or 745 * remove a ZSD key, (either globally as part of 746 * zone_key_create()/zone_key_delete(), or on a per-zone basis, as is 747 * possible through zone_setspecific()), so it's sufficient to hold 748 * zsd_key_lock here. 749 * 750 * This is a good thing, since we don't want to recursively try to grab 751 * zone_lock if a callback attempts to do something like a crfree() or 752 * zone_rele(). 753 */ 754 mutex_enter(&zsd_key_lock); 755 for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL; 756 zsdp = list_next(&zsd_registered_keys, zsdp)) { 757 zone_key_t key = zsdp->zsd_key; 758 759 /* Skip if no callbacks registered */ 760 if (ct == ZSD_SHUTDOWN && zsdp->zsd_shutdown == NULL) 761 continue; 762 if (ct == ZSD_DESTROY && zsdp->zsd_destroy == NULL) 763 continue; 764 /* 765 * Call the callback with the zone-specific data if we can find 766 * any, otherwise with NULL. 767 */ 768 t = zsd_find(&zone->zone_zsd, key); 769 if (t != NULL) { 770 if (ct == ZSD_SHUTDOWN) { 771 t->zsd_shutdown(zoneid, t->zsd_data); 772 } else { 773 ASSERT(ct == ZSD_DESTROY); 774 t->zsd_destroy(zoneid, t->zsd_data); 775 } 776 } else { 777 if (ct == ZSD_SHUTDOWN) { 778 zsdp->zsd_shutdown(zoneid, NULL); 779 } else { 780 ASSERT(ct == ZSD_DESTROY); 781 zsdp->zsd_destroy(zoneid, NULL); 782 } 783 } 784 } 785 mutex_exit(&zsd_key_lock); 786 } 787 788 /* 789 * Called when the zone is going away; free ZSD-related memory, and 790 * destroy the zone_zsd list. 791 */ 792 static void 793 zone_free_zsd(zone_t *zone) 794 { 795 struct zsd_entry *t, *next; 796 797 /* 798 * Free all the zsd_entry's we had on this zone. 799 */ 800 for (t = list_head(&zone->zone_zsd); t != NULL; t = next) { 801 next = list_next(&zone->zone_zsd, t); 802 list_remove(&zone->zone_zsd, t); 803 kmem_free(t, sizeof (*t)); 804 } 805 list_destroy(&zone->zone_zsd); 806 } 807 808 /* 809 * Frees memory associated with the zone dataset list. 810 */ 811 static void 812 zone_free_datasets(zone_t *zone) 813 { 814 zone_dataset_t *t, *next; 815 816 for (t = list_head(&zone->zone_datasets); t != NULL; t = next) { 817 next = list_next(&zone->zone_datasets, t); 818 list_remove(&zone->zone_datasets, t); 819 kmem_free(t->zd_dataset, strlen(t->zd_dataset) + 1); 820 kmem_free(t, sizeof (*t)); 821 } 822 list_destroy(&zone->zone_datasets); 823 } 824 825 /* 826 * zone.cpu-shares resource control support. 827 */ 828 /*ARGSUSED*/ 829 static rctl_qty_t 830 zone_cpu_shares_usage(rctl_t *rctl, struct proc *p) 831 { 832 ASSERT(MUTEX_HELD(&p->p_lock)); 833 return (p->p_zone->zone_shares); 834 } 835 836 /*ARGSUSED*/ 837 static int 838 zone_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, 839 rctl_qty_t nv) 840 { 841 ASSERT(MUTEX_HELD(&p->p_lock)); 842 ASSERT(e->rcep_t == RCENTITY_ZONE); 843 if (e->rcep_p.zone == NULL) 844 return (0); 845 846 e->rcep_p.zone->zone_shares = nv; 847 return (0); 848 } 849 850 static rctl_ops_t zone_cpu_shares_ops = { 851 rcop_no_action, 852 zone_cpu_shares_usage, 853 zone_cpu_shares_set, 854 rcop_no_test 855 }; 856 857 /*ARGSUSED*/ 858 static rctl_qty_t 859 zone_lwps_usage(rctl_t *r, proc_t *p) 860 { 861 rctl_qty_t nlwps; 862 zone_t *zone = p->p_zone; 863 864 ASSERT(MUTEX_HELD(&p->p_lock)); 865 866 mutex_enter(&zone->zone_nlwps_lock); 867 nlwps = zone->zone_nlwps; 868 mutex_exit(&zone->zone_nlwps_lock); 869 870 return (nlwps); 871 } 872 873 /*ARGSUSED*/ 874 static int 875 zone_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl, 876 rctl_qty_t incr, uint_t flags) 877 { 878 rctl_qty_t nlwps; 879 880 ASSERT(MUTEX_HELD(&p->p_lock)); 881 ASSERT(e->rcep_t == RCENTITY_ZONE); 882 if (e->rcep_p.zone == NULL) 883 return (0); 884 ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock))); 885 nlwps = e->rcep_p.zone->zone_nlwps; 886 887 if (nlwps + incr > rcntl->rcv_value) 888 return (1); 889 890 return (0); 891 } 892 893 /*ARGSUSED*/ 894 static int 895 zone_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv) { 896 897 ASSERT(MUTEX_HELD(&p->p_lock)); 898 ASSERT(e->rcep_t == RCENTITY_ZONE); 899 if (e->rcep_p.zone == NULL) 900 return (0); 901 e->rcep_p.zone->zone_nlwps_ctl = nv; 902 return (0); 903 } 904 905 static rctl_ops_t zone_lwps_ops = { 906 rcop_no_action, 907 zone_lwps_usage, 908 zone_lwps_set, 909 zone_lwps_test, 910 }; 911 912 /* 913 * Helper function to brand the zone with a unique ID. 914 */ 915 static void 916 zone_uniqid(zone_t *zone) 917 { 918 static uint64_t uniqid = 0; 919 920 ASSERT(MUTEX_HELD(&zonehash_lock)); 921 zone->zone_uniqid = uniqid++; 922 } 923 924 /* 925 * Returns a held pointer to the "kcred" for the specified zone. 926 */ 927 struct cred * 928 zone_get_kcred(zoneid_t zoneid) 929 { 930 zone_t *zone; 931 cred_t *cr; 932 933 if ((zone = zone_find_by_id(zoneid)) == NULL) 934 return (NULL); 935 cr = zone->zone_kcred; 936 crhold(cr); 937 zone_rele(zone); 938 return (cr); 939 } 940 941 /* 942 * Called very early on in boot to initialize the ZSD list so that 943 * zone_key_create() can be called before zone_init(). It also initializes 944 * portions of zone0 which may be used before zone_init() is called. The 945 * variable "global_zone" will be set when zone0 is fully initialized by 946 * zone_init(). 947 */ 948 void 949 zone_zsd_init(void) 950 { 951 mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL); 952 mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL); 953 list_create(&zsd_registered_keys, sizeof (struct zsd_entry), 954 offsetof(struct zsd_entry, zsd_linkage)); 955 list_create(&zone_active, sizeof (zone_t), 956 offsetof(zone_t, zone_linkage)); 957 list_create(&zone_deathrow, sizeof (zone_t), 958 offsetof(zone_t, zone_linkage)); 959 960 mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL); 961 mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL); 962 zone0.zone_shares = 1; 963 zone0.zone_nlwps_ctl = INT_MAX; 964 zone0.zone_name = GLOBAL_ZONENAME; 965 zone0.zone_nodename = utsname.nodename; 966 zone0.zone_domain = srpc_domain; 967 zone0.zone_ref = 1; 968 zone0.zone_id = GLOBAL_ZONEID; 969 zone0.zone_status = ZONE_IS_RUNNING; 970 zone0.zone_rootpath = "/"; 971 zone0.zone_rootpathlen = 2; 972 zone0.zone_psetid = ZONE_PS_INVAL; 973 zone0.zone_ncpus = 0; 974 zone0.zone_ncpus_online = 0; 975 zone0.zone_proc_initpid = 1; 976 list_create(&zone0.zone_zsd, sizeof (struct zsd_entry), 977 offsetof(struct zsd_entry, zsd_linkage)); 978 list_insert_head(&zone_active, &zone0); 979 980 /* 981 * The root filesystem is not mounted yet, so zone_rootvp cannot be set 982 * to anything meaningful. It is assigned to be 'rootdir' in 983 * vfs_mountroot(). 984 */ 985 zone0.zone_rootvp = NULL; 986 zone0.zone_vfslist = NULL; 987 zone0.zone_bootargs = NULL; 988 zone0.zone_privset = kmem_alloc(sizeof (priv_set_t), KM_SLEEP); 989 /* 990 * The global zone has all privileges 991 */ 992 priv_fillset(zone0.zone_privset); 993 /* 994 * Add p0 to the global zone 995 */ 996 zone0.zone_zsched = &p0; 997 p0.p_zone = &zone0; 998 } 999 1000 /* 1001 * Called by main() to initialize the zones framework. 1002 */ 1003 void 1004 zone_init(void) 1005 { 1006 rctl_dict_entry_t *rde; 1007 rctl_val_t *dval; 1008 rctl_set_t *set; 1009 rctl_alloc_gp_t *gp; 1010 rctl_entity_p_t e; 1011 int res; 1012 1013 ASSERT(curproc == &p0); 1014 1015 /* 1016 * Create ID space for zone IDs. ID 0 is reserved for the 1017 * global zone. 1018 */ 1019 zoneid_space = id_space_create("zoneid_space", 1, MAX_ZONEID); 1020 1021 /* 1022 * Initialize generic zone resource controls, if any. 1023 */ 1024 rc_zone_cpu_shares = rctl_register("zone.cpu-shares", 1025 RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_NEVER | 1026 RCTL_GLOBAL_NOBASIC | 1027 RCTL_GLOBAL_COUNT, FSS_MAXSHARES, FSS_MAXSHARES, 1028 &zone_cpu_shares_ops); 1029 1030 rc_zone_nlwps = rctl_register("zone.max-lwps", RCENTITY_ZONE, 1031 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT, 1032 INT_MAX, INT_MAX, &zone_lwps_ops); 1033 /* 1034 * Create a rctl_val with PRIVILEGED, NOACTION, value = 1. Then attach 1035 * this at the head of the rctl_dict_entry for ``zone.cpu-shares''. 1036 */ 1037 dval = kmem_cache_alloc(rctl_val_cache, KM_SLEEP); 1038 bzero(dval, sizeof (rctl_val_t)); 1039 dval->rcv_value = 1; 1040 dval->rcv_privilege = RCPRIV_PRIVILEGED; 1041 dval->rcv_flagaction = RCTL_LOCAL_NOACTION; 1042 dval->rcv_action_recip_pid = -1; 1043 1044 rde = rctl_dict_lookup("zone.cpu-shares"); 1045 (void) rctl_val_list_insert(&rde->rcd_default_value, dval); 1046 1047 /* 1048 * Initialize the ``global zone''. 1049 */ 1050 set = rctl_set_create(); 1051 gp = rctl_set_init_prealloc(RCENTITY_ZONE); 1052 mutex_enter(&p0.p_lock); 1053 e.rcep_p.zone = &zone0; 1054 e.rcep_t = RCENTITY_ZONE; 1055 zone0.zone_rctls = rctl_set_init(RCENTITY_ZONE, &p0, &e, set, 1056 gp); 1057 1058 zone0.zone_nlwps = p0.p_lwpcnt; 1059 zone0.zone_ntasks = 1; 1060 mutex_exit(&p0.p_lock); 1061 rctl_prealloc_destroy(gp); 1062 /* 1063 * pool_default hasn't been initialized yet, so we let pool_init() take 1064 * care of making the global zone is in the default pool. 1065 */ 1066 mutex_enter(&zonehash_lock); 1067 zone_uniqid(&zone0); 1068 ASSERT(zone0.zone_uniqid == GLOBAL_ZONEUNIQID); 1069 mutex_exit(&zonehash_lock); 1070 zonehashbyid = mod_hash_create_idhash("zone_by_id", zone_hash_size, 1071 mod_hash_null_valdtor); 1072 zonehashbyname = mod_hash_create_strhash("zone_by_name", 1073 zone_hash_size, mod_hash_null_valdtor); 1074 zonecount = 1; 1075 1076 (void) mod_hash_insert(zonehashbyid, (mod_hash_key_t)GLOBAL_ZONEID, 1077 (mod_hash_val_t)&zone0); 1078 (void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)zone0.zone_name, 1079 (mod_hash_val_t)&zone0); 1080 /* 1081 * We avoid setting zone_kcred until now, since kcred is initialized 1082 * sometime after zone_zsd_init() and before zone_init(). 1083 */ 1084 zone0.zone_kcred = kcred; 1085 /* 1086 * The global zone is fully initialized (except for zone_rootvp which 1087 * will be set when the root filesystem is mounted). 1088 */ 1089 global_zone = &zone0; 1090 1091 /* 1092 * Setup an event channel to send zone status change notifications on 1093 */ 1094 res = sysevent_evc_bind(ZONE_EVENT_CHANNEL, &zone_event_chan, 1095 EVCH_CREAT); 1096 1097 if (res) 1098 panic("Sysevent_evc_bind failed during zone setup.\n"); 1099 } 1100 1101 static void 1102 zone_free(zone_t *zone) 1103 { 1104 ASSERT(zone != global_zone); 1105 ASSERT(zone->zone_ntasks == 0); 1106 ASSERT(zone->zone_nlwps == 0); 1107 ASSERT(zone->zone_cred_ref == 0); 1108 ASSERT(zone->zone_kcred == NULL); 1109 ASSERT(zone_status_get(zone) == ZONE_IS_DEAD || 1110 zone_status_get(zone) == ZONE_IS_UNINITIALIZED); 1111 1112 /* remove from deathrow list */ 1113 if (zone_status_get(zone) == ZONE_IS_DEAD) { 1114 ASSERT(zone->zone_ref == 0); 1115 mutex_enter(&zone_deathrow_lock); 1116 list_remove(&zone_deathrow, zone); 1117 mutex_exit(&zone_deathrow_lock); 1118 } 1119 1120 zone_free_zsd(zone); 1121 zone_free_datasets(zone); 1122 1123 if (zone->zone_rootvp != NULL) 1124 VN_RELE(zone->zone_rootvp); 1125 if (zone->zone_rootpath) 1126 kmem_free(zone->zone_rootpath, zone->zone_rootpathlen); 1127 if (zone->zone_name != NULL) 1128 kmem_free(zone->zone_name, ZONENAME_MAX); 1129 if (zone->zone_nodename != NULL) 1130 kmem_free(zone->zone_nodename, _SYS_NMLN); 1131 if (zone->zone_domain != NULL) 1132 kmem_free(zone->zone_domain, _SYS_NMLN); 1133 if (zone->zone_privset != NULL) 1134 kmem_free(zone->zone_privset, sizeof (priv_set_t)); 1135 if (zone->zone_rctls != NULL) 1136 rctl_set_free(zone->zone_rctls); 1137 if (zone->zone_bootargs != NULL) 1138 kmem_free(zone->zone_bootargs, ZONEBOOTARGS_MAX); 1139 id_free(zoneid_space, zone->zone_id); 1140 mutex_destroy(&zone->zone_lock); 1141 cv_destroy(&zone->zone_cv); 1142 kmem_free(zone, sizeof (zone_t)); 1143 } 1144 1145 /* 1146 * See block comment at the top of this file for information about zone 1147 * status values. 1148 */ 1149 /* 1150 * Convenience function for setting zone status. 1151 */ 1152 static void 1153 zone_status_set(zone_t *zone, zone_status_t status) 1154 { 1155 1156 nvlist_t *nvl = NULL; 1157 ASSERT(MUTEX_HELD(&zone_status_lock)); 1158 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE && 1159 status >= zone_status_get(zone)); 1160 1161 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) || 1162 nvlist_add_string(nvl, ZONE_CB_NAME, zone->zone_name) || 1163 nvlist_add_string(nvl, ZONE_CB_NEWSTATE, 1164 zone_status_table[status]) || 1165 nvlist_add_string(nvl, ZONE_CB_OLDSTATE, 1166 zone_status_table[zone->zone_status]) || 1167 nvlist_add_int32(nvl, ZONE_CB_ZONEID, zone->zone_id) || 1168 nvlist_add_uint64(nvl, ZONE_CB_TIMESTAMP, (uint64_t)gethrtime()) || 1169 sysevent_evc_publish(zone_event_chan, ZONE_EVENT_STATUS_CLASS, 1170 ZONE_EVENT_STATUS_SUBCLASS, 1171 "sun.com", "kernel", nvl, EVCH_SLEEP)) { 1172 #ifdef DEBUG 1173 (void) printf( 1174 "Failed to allocate and send zone state change event.\n"); 1175 #endif 1176 } 1177 nvlist_free(nvl); 1178 1179 zone->zone_status = status; 1180 1181 cv_broadcast(&zone->zone_cv); 1182 } 1183 1184 /* 1185 * Public function to retrieve the zone status. The zone status may 1186 * change after it is retrieved. 1187 */ 1188 zone_status_t 1189 zone_status_get(zone_t *zone) 1190 { 1191 return (zone->zone_status); 1192 } 1193 1194 static int 1195 zone_set_bootargs(zone_t *zone, const char *zone_bootargs) 1196 { 1197 char *bootargs = kmem_zalloc(ZONEBOOTARGS_MAX, KM_SLEEP); 1198 size_t len; 1199 int err; 1200 1201 err = copyinstr(zone_bootargs, bootargs, ZONEBOOTARGS_MAX - 1, &len); 1202 if (err != 0) { 1203 kmem_free(bootargs, ZONEBOOTARGS_MAX); 1204 return (err); /* EFAULT or ENAMETOOLONG */ 1205 } 1206 bootargs[len] = '\0'; 1207 1208 ASSERT(zone->zone_bootargs == NULL); 1209 zone->zone_bootargs = bootargs; 1210 return (0); 1211 } 1212 1213 /* 1214 * Block indefinitely waiting for (zone_status >= status) 1215 */ 1216 void 1217 zone_status_wait(zone_t *zone, zone_status_t status) 1218 { 1219 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE); 1220 1221 mutex_enter(&zone_status_lock); 1222 while (zone->zone_status < status) { 1223 cv_wait(&zone->zone_cv, &zone_status_lock); 1224 } 1225 mutex_exit(&zone_status_lock); 1226 } 1227 1228 /* 1229 * Private CPR-safe version of zone_status_wait(). 1230 */ 1231 static void 1232 zone_status_wait_cpr(zone_t *zone, zone_status_t status, char *str) 1233 { 1234 callb_cpr_t cprinfo; 1235 1236 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE); 1237 1238 CALLB_CPR_INIT(&cprinfo, &zone_status_lock, callb_generic_cpr, 1239 str); 1240 mutex_enter(&zone_status_lock); 1241 while (zone->zone_status < status) { 1242 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1243 cv_wait(&zone->zone_cv, &zone_status_lock); 1244 CALLB_CPR_SAFE_END(&cprinfo, &zone_status_lock); 1245 } 1246 /* 1247 * zone_status_lock is implicitly released by the following. 1248 */ 1249 CALLB_CPR_EXIT(&cprinfo); 1250 } 1251 1252 /* 1253 * Block until zone enters requested state or signal is received. Return (0) 1254 * if signaled, non-zero otherwise. 1255 */ 1256 int 1257 zone_status_wait_sig(zone_t *zone, zone_status_t status) 1258 { 1259 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE); 1260 1261 mutex_enter(&zone_status_lock); 1262 while (zone->zone_status < status) { 1263 if (!cv_wait_sig(&zone->zone_cv, &zone_status_lock)) { 1264 mutex_exit(&zone_status_lock); 1265 return (0); 1266 } 1267 } 1268 mutex_exit(&zone_status_lock); 1269 return (1); 1270 } 1271 1272 /* 1273 * Block until the zone enters the requested state or the timeout expires, 1274 * whichever happens first. Return (-1) if operation timed out, time remaining 1275 * otherwise. 1276 */ 1277 clock_t 1278 zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status) 1279 { 1280 clock_t timeleft = 0; 1281 1282 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE); 1283 1284 mutex_enter(&zone_status_lock); 1285 while (zone->zone_status < status && timeleft != -1) { 1286 timeleft = cv_timedwait(&zone->zone_cv, &zone_status_lock, tim); 1287 } 1288 mutex_exit(&zone_status_lock); 1289 return (timeleft); 1290 } 1291 1292 /* 1293 * Block until the zone enters the requested state, the current process is 1294 * signaled, or the timeout expires, whichever happens first. Return (-1) if 1295 * operation timed out, 0 if signaled, time remaining otherwise. 1296 */ 1297 clock_t 1298 zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status) 1299 { 1300 clock_t timeleft = tim - lbolt; 1301 1302 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE); 1303 1304 mutex_enter(&zone_status_lock); 1305 while (zone->zone_status < status) { 1306 timeleft = cv_timedwait_sig(&zone->zone_cv, &zone_status_lock, 1307 tim); 1308 if (timeleft <= 0) 1309 break; 1310 } 1311 mutex_exit(&zone_status_lock); 1312 return (timeleft); 1313 } 1314 1315 /* 1316 * Zones have two reference counts: one for references from credential 1317 * structures (zone_cred_ref), and one (zone_ref) for everything else. 1318 * This is so we can allow a zone to be rebooted while there are still 1319 * outstanding cred references, since certain drivers cache dblks (which 1320 * implicitly results in cached creds). We wait for zone_ref to drop to 1321 * 0 (actually 1), but not zone_cred_ref. The zone structure itself is 1322 * later freed when the zone_cred_ref drops to 0, though nothing other 1323 * than the zone id and privilege set should be accessed once the zone 1324 * is "dead". 1325 * 1326 * A debugging flag, zone_wait_for_cred, can be set to a non-zero value 1327 * to force halt/reboot to block waiting for the zone_cred_ref to drop 1328 * to 0. This can be useful to flush out other sources of cached creds 1329 * that may be less innocuous than the driver case. 1330 */ 1331 1332 int zone_wait_for_cred = 0; 1333 1334 static void 1335 zone_hold_locked(zone_t *z) 1336 { 1337 ASSERT(MUTEX_HELD(&z->zone_lock)); 1338 z->zone_ref++; 1339 ASSERT(z->zone_ref != 0); 1340 } 1341 1342 void 1343 zone_hold(zone_t *z) 1344 { 1345 mutex_enter(&z->zone_lock); 1346 zone_hold_locked(z); 1347 mutex_exit(&z->zone_lock); 1348 } 1349 1350 /* 1351 * If the non-cred ref count drops to 1 and either the cred ref count 1352 * is 0 or we aren't waiting for cred references, the zone is ready to 1353 * be destroyed. 1354 */ 1355 #define ZONE_IS_UNREF(zone) ((zone)->zone_ref == 1 && \ 1356 (!zone_wait_for_cred || (zone)->zone_cred_ref == 0)) 1357 1358 void 1359 zone_rele(zone_t *z) 1360 { 1361 boolean_t wakeup; 1362 1363 mutex_enter(&z->zone_lock); 1364 ASSERT(z->zone_ref != 0); 1365 z->zone_ref--; 1366 if (z->zone_ref == 0 && z->zone_cred_ref == 0) { 1367 /* no more refs, free the structure */ 1368 mutex_exit(&z->zone_lock); 1369 zone_free(z); 1370 return; 1371 } 1372 /* signal zone_destroy so the zone can finish halting */ 1373 wakeup = (ZONE_IS_UNREF(z) && zone_status_get(z) >= ZONE_IS_DEAD); 1374 mutex_exit(&z->zone_lock); 1375 1376 if (wakeup) { 1377 /* 1378 * Grabbing zonehash_lock here effectively synchronizes with 1379 * zone_destroy() to avoid missed signals. 1380 */ 1381 mutex_enter(&zonehash_lock); 1382 cv_broadcast(&zone_destroy_cv); 1383 mutex_exit(&zonehash_lock); 1384 } 1385 } 1386 1387 void 1388 zone_cred_hold(zone_t *z) 1389 { 1390 mutex_enter(&z->zone_lock); 1391 z->zone_cred_ref++; 1392 ASSERT(z->zone_cred_ref != 0); 1393 mutex_exit(&z->zone_lock); 1394 } 1395 1396 void 1397 zone_cred_rele(zone_t *z) 1398 { 1399 boolean_t wakeup; 1400 1401 mutex_enter(&z->zone_lock); 1402 ASSERT(z->zone_cred_ref != 0); 1403 z->zone_cred_ref--; 1404 if (z->zone_ref == 0 && z->zone_cred_ref == 0) { 1405 /* no more refs, free the structure */ 1406 mutex_exit(&z->zone_lock); 1407 zone_free(z); 1408 return; 1409 } 1410 /* 1411 * If zone_destroy is waiting for the cred references to drain 1412 * out, and they have, signal it. 1413 */ 1414 wakeup = (zone_wait_for_cred && ZONE_IS_UNREF(z) && 1415 zone_status_get(z) >= ZONE_IS_DEAD); 1416 mutex_exit(&z->zone_lock); 1417 1418 if (wakeup) { 1419 /* 1420 * Grabbing zonehash_lock here effectively synchronizes with 1421 * zone_destroy() to avoid missed signals. 1422 */ 1423 mutex_enter(&zonehash_lock); 1424 cv_broadcast(&zone_destroy_cv); 1425 mutex_exit(&zonehash_lock); 1426 } 1427 } 1428 1429 void 1430 zone_task_hold(zone_t *z) 1431 { 1432 mutex_enter(&z->zone_lock); 1433 z->zone_ntasks++; 1434 ASSERT(z->zone_ntasks != 0); 1435 mutex_exit(&z->zone_lock); 1436 } 1437 1438 void 1439 zone_task_rele(zone_t *zone) 1440 { 1441 uint_t refcnt; 1442 1443 mutex_enter(&zone->zone_lock); 1444 ASSERT(zone->zone_ntasks != 0); 1445 refcnt = --zone->zone_ntasks; 1446 if (refcnt > 1) { /* Common case */ 1447 mutex_exit(&zone->zone_lock); 1448 return; 1449 } 1450 zone_hold_locked(zone); /* so we can use the zone_t later */ 1451 mutex_exit(&zone->zone_lock); 1452 if (refcnt == 1) { 1453 /* 1454 * See if the zone is shutting down. 1455 */ 1456 mutex_enter(&zone_status_lock); 1457 if (zone_status_get(zone) != ZONE_IS_SHUTTING_DOWN) { 1458 goto out; 1459 } 1460 1461 /* 1462 * Make sure the ntasks didn't change since we 1463 * dropped zone_lock. 1464 */ 1465 mutex_enter(&zone->zone_lock); 1466 if (refcnt != zone->zone_ntasks) { 1467 mutex_exit(&zone->zone_lock); 1468 goto out; 1469 } 1470 mutex_exit(&zone->zone_lock); 1471 1472 /* 1473 * No more user processes in the zone. The zone is empty. 1474 */ 1475 zone_status_set(zone, ZONE_IS_EMPTY); 1476 goto out; 1477 } 1478 1479 ASSERT(refcnt == 0); 1480 /* 1481 * zsched has exited; the zone is dead. 1482 */ 1483 zone->zone_zsched = NULL; /* paranoia */ 1484 mutex_enter(&zone_status_lock); 1485 zone_status_set(zone, ZONE_IS_DEAD); 1486 out: 1487 mutex_exit(&zone_status_lock); 1488 zone_rele(zone); 1489 } 1490 1491 zoneid_t 1492 getzoneid(void) 1493 { 1494 return (curproc->p_zone->zone_id); 1495 } 1496 1497 /* 1498 * Internal versions of zone_find_by_*(). These don't zone_hold() or 1499 * check the validity of a zone's state. 1500 */ 1501 static zone_t * 1502 zone_find_all_by_id(zoneid_t zoneid) 1503 { 1504 mod_hash_val_t hv; 1505 zone_t *zone = NULL; 1506 1507 ASSERT(MUTEX_HELD(&zonehash_lock)); 1508 1509 if (mod_hash_find(zonehashbyid, 1510 (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0) 1511 zone = (zone_t *)hv; 1512 return (zone); 1513 } 1514 1515 static zone_t * 1516 zone_find_all_by_name(char *name) 1517 { 1518 mod_hash_val_t hv; 1519 zone_t *zone = NULL; 1520 1521 ASSERT(MUTEX_HELD(&zonehash_lock)); 1522 1523 if (mod_hash_find(zonehashbyname, (mod_hash_key_t)name, &hv) == 0) 1524 zone = (zone_t *)hv; 1525 return (zone); 1526 } 1527 1528 /* 1529 * Public interface for looking up a zone by zoneid. Only returns the zone if 1530 * it is fully initialized, and has not yet begun the zone_destroy() sequence. 1531 * Caller must call zone_rele() once it is done with the zone. 1532 * 1533 * The zone may begin the zone_destroy() sequence immediately after this 1534 * function returns, but may be safely used until zone_rele() is called. 1535 */ 1536 zone_t * 1537 zone_find_by_id(zoneid_t zoneid) 1538 { 1539 zone_t *zone; 1540 zone_status_t status; 1541 1542 mutex_enter(&zonehash_lock); 1543 if ((zone = zone_find_all_by_id(zoneid)) == NULL) { 1544 mutex_exit(&zonehash_lock); 1545 return (NULL); 1546 } 1547 status = zone_status_get(zone); 1548 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) { 1549 /* 1550 * For all practical purposes the zone doesn't exist. 1551 */ 1552 mutex_exit(&zonehash_lock); 1553 return (NULL); 1554 } 1555 zone_hold(zone); 1556 mutex_exit(&zonehash_lock); 1557 return (zone); 1558 } 1559 1560 /* 1561 * Similar to zone_find_by_id, but using zone name as the key. 1562 */ 1563 zone_t * 1564 zone_find_by_name(char *name) 1565 { 1566 zone_t *zone; 1567 zone_status_t status; 1568 1569 mutex_enter(&zonehash_lock); 1570 if ((zone = zone_find_all_by_name(name)) == NULL) { 1571 mutex_exit(&zonehash_lock); 1572 return (NULL); 1573 } 1574 status = zone_status_get(zone); 1575 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) { 1576 /* 1577 * For all practical purposes the zone doesn't exist. 1578 */ 1579 mutex_exit(&zonehash_lock); 1580 return (NULL); 1581 } 1582 zone_hold(zone); 1583 mutex_exit(&zonehash_lock); 1584 return (zone); 1585 } 1586 1587 /* 1588 * Similar to zone_find_by_id(), using the path as a key. For instance, 1589 * if there is a zone "foo" rooted at /foo/root, and the path argument 1590 * is "/foo/root/proc", it will return the held zone_t corresponding to 1591 * zone "foo". 1592 * 1593 * zone_find_by_path() always returns a non-NULL value, since at the 1594 * very least every path will be contained in the global zone. 1595 * 1596 * As with the other zone_find_by_*() functions, the caller is 1597 * responsible for zone_rele()ing the return value of this function. 1598 */ 1599 zone_t * 1600 zone_find_by_path(const char *path) 1601 { 1602 zone_t *zone; 1603 zone_t *zret = NULL; 1604 zone_status_t status; 1605 1606 if (path == NULL) { 1607 /* 1608 * Call from rootconf(). 1609 */ 1610 zone_hold(global_zone); 1611 return (global_zone); 1612 } 1613 ASSERT(*path == '/'); 1614 mutex_enter(&zonehash_lock); 1615 for (zone = list_head(&zone_active); zone != NULL; 1616 zone = list_next(&zone_active, zone)) { 1617 if (ZONE_PATH_VISIBLE(path, zone)) 1618 zret = zone; 1619 } 1620 ASSERT(zret != NULL); 1621 status = zone_status_get(zret); 1622 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) { 1623 /* 1624 * Zone practically doesn't exist. 1625 */ 1626 zret = global_zone; 1627 } 1628 zone_hold(zret); 1629 mutex_exit(&zonehash_lock); 1630 return (zret); 1631 } 1632 1633 /* 1634 * Get the number of cpus visible to this zone. The system-wide global 1635 * 'ncpus' is returned if pools are disabled, the caller is in the 1636 * global zone, or a NULL zone argument is passed in. 1637 */ 1638 int 1639 zone_ncpus_get(zone_t *zone) 1640 { 1641 int myncpus = zone == NULL ? 0 : zone->zone_ncpus; 1642 1643 return (myncpus != 0 ? myncpus : ncpus); 1644 } 1645 1646 /* 1647 * Get the number of online cpus visible to this zone. The system-wide 1648 * global 'ncpus_online' is returned if pools are disabled, the caller 1649 * is in the global zone, or a NULL zone argument is passed in. 1650 */ 1651 int 1652 zone_ncpus_online_get(zone_t *zone) 1653 { 1654 int myncpus_online = zone == NULL ? 0 : zone->zone_ncpus_online; 1655 1656 return (myncpus_online != 0 ? myncpus_online : ncpus_online); 1657 } 1658 1659 /* 1660 * Return the pool to which the zone is currently bound. 1661 */ 1662 pool_t * 1663 zone_pool_get(zone_t *zone) 1664 { 1665 ASSERT(pool_lock_held()); 1666 1667 return (zone->zone_pool); 1668 } 1669 1670 /* 1671 * Set the zone's pool pointer and update the zone's visibility to match 1672 * the resources in the new pool. 1673 */ 1674 void 1675 zone_pool_set(zone_t *zone, pool_t *pool) 1676 { 1677 ASSERT(pool_lock_held()); 1678 ASSERT(MUTEX_HELD(&cpu_lock)); 1679 1680 zone->zone_pool = pool; 1681 zone_pset_set(zone, pool->pool_pset->pset_id); 1682 } 1683 1684 /* 1685 * Return the cached value of the id of the processor set to which the 1686 * zone is currently bound. The value will be ZONE_PS_INVAL if the pools 1687 * facility is disabled. 1688 */ 1689 psetid_t 1690 zone_pset_get(zone_t *zone) 1691 { 1692 ASSERT(MUTEX_HELD(&cpu_lock)); 1693 1694 return (zone->zone_psetid); 1695 } 1696 1697 /* 1698 * Set the cached value of the id of the processor set to which the zone 1699 * is currently bound. Also update the zone's visibility to match the 1700 * resources in the new processor set. 1701 */ 1702 void 1703 zone_pset_set(zone_t *zone, psetid_t newpsetid) 1704 { 1705 psetid_t oldpsetid; 1706 1707 ASSERT(MUTEX_HELD(&cpu_lock)); 1708 oldpsetid = zone_pset_get(zone); 1709 1710 if (oldpsetid == newpsetid) 1711 return; 1712 /* 1713 * Global zone sees all. 1714 */ 1715 if (zone != global_zone) { 1716 zone->zone_psetid = newpsetid; 1717 if (newpsetid != ZONE_PS_INVAL) 1718 pool_pset_visibility_add(newpsetid, zone); 1719 if (oldpsetid != ZONE_PS_INVAL) 1720 pool_pset_visibility_remove(oldpsetid, zone); 1721 } 1722 /* 1723 * Disabling pools, so we should start using the global values 1724 * for ncpus and ncpus_online. 1725 */ 1726 if (newpsetid == ZONE_PS_INVAL) { 1727 zone->zone_ncpus = 0; 1728 zone->zone_ncpus_online = 0; 1729 } 1730 } 1731 1732 /* 1733 * Walk the list of active zones and issue the provided callback for 1734 * each of them. 1735 * 1736 * Caller must not be holding any locks that may be acquired under 1737 * zonehash_lock. See comment at the beginning of the file for a list of 1738 * common locks and their interactions with zones. 1739 */ 1740 int 1741 zone_walk(int (*cb)(zone_t *, void *), void *data) 1742 { 1743 zone_t *zone; 1744 int ret = 0; 1745 zone_status_t status; 1746 1747 mutex_enter(&zonehash_lock); 1748 for (zone = list_head(&zone_active); zone != NULL; 1749 zone = list_next(&zone_active, zone)) { 1750 /* 1751 * Skip zones that shouldn't be externally visible. 1752 */ 1753 status = zone_status_get(zone); 1754 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) 1755 continue; 1756 /* 1757 * Bail immediately if any callback invocation returns a 1758 * non-zero value. 1759 */ 1760 ret = (*cb)(zone, data); 1761 if (ret != 0) 1762 break; 1763 } 1764 mutex_exit(&zonehash_lock); 1765 return (ret); 1766 } 1767 1768 static int 1769 zone_set_root(zone_t *zone, const char *upath) 1770 { 1771 vnode_t *vp; 1772 int trycount; 1773 int error = 0; 1774 char *path; 1775 struct pathname upn, pn; 1776 size_t pathlen; 1777 1778 if ((error = pn_get((char *)upath, UIO_USERSPACE, &upn)) != 0) 1779 return (error); 1780 1781 pn_alloc(&pn); 1782 1783 /* prevent infinite loop */ 1784 trycount = 10; 1785 for (;;) { 1786 if (--trycount <= 0) { 1787 error = ESTALE; 1788 goto out; 1789 } 1790 1791 if ((error = lookuppn(&upn, &pn, FOLLOW, NULLVPP, &vp)) == 0) { 1792 /* 1793 * VOP_ACCESS() may cover 'vp' with a new 1794 * filesystem, if 'vp' is an autoFS vnode. 1795 * Get the new 'vp' if so. 1796 */ 1797 if ((error = VOP_ACCESS(vp, VEXEC, 0, CRED())) == 0 && 1798 (vp->v_vfsmountedhere == NULL || 1799 (error = traverse(&vp)) == 0)) { 1800 pathlen = pn.pn_pathlen + 2; 1801 path = kmem_alloc(pathlen, KM_SLEEP); 1802 (void) strncpy(path, pn.pn_path, 1803 pn.pn_pathlen + 1); 1804 path[pathlen - 2] = '/'; 1805 path[pathlen - 1] = '\0'; 1806 pn_free(&pn); 1807 pn_free(&upn); 1808 1809 /* Success! */ 1810 break; 1811 } 1812 VN_RELE(vp); 1813 } 1814 if (error != ESTALE) 1815 goto out; 1816 } 1817 1818 ASSERT(error == 0); 1819 zone->zone_rootvp = vp; /* we hold a reference to vp */ 1820 zone->zone_rootpath = path; 1821 zone->zone_rootpathlen = pathlen; 1822 return (0); 1823 1824 out: 1825 pn_free(&pn); 1826 pn_free(&upn); 1827 return (error); 1828 } 1829 1830 #define isalnum(c) (((c) >= '0' && (c) <= '9') || \ 1831 ((c) >= 'a' && (c) <= 'z') || \ 1832 ((c) >= 'A' && (c) <= 'Z')) 1833 1834 static int 1835 zone_set_name(zone_t *zone, const char *uname) 1836 { 1837 char *kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP); 1838 size_t len; 1839 int i, err; 1840 1841 if ((err = copyinstr(uname, kname, ZONENAME_MAX, &len)) != 0) { 1842 kmem_free(kname, ZONENAME_MAX); 1843 return (err); /* EFAULT or ENAMETOOLONG */ 1844 } 1845 1846 /* must be less than ZONENAME_MAX */ 1847 if (len == ZONENAME_MAX && kname[ZONENAME_MAX - 1] != '\0') { 1848 kmem_free(kname, ZONENAME_MAX); 1849 return (EINVAL); 1850 } 1851 1852 /* 1853 * Name must start with an alphanumeric and must contain only 1854 * alphanumerics, '-', '_' and '.'. 1855 */ 1856 if (!isalnum(kname[0])) { 1857 kmem_free(kname, ZONENAME_MAX); 1858 return (EINVAL); 1859 } 1860 for (i = 1; i < len - 1; i++) { 1861 if (!isalnum(kname[i]) && kname[i] != '-' && kname[i] != '_' && 1862 kname[i] != '.') { 1863 kmem_free(kname, ZONENAME_MAX); 1864 return (EINVAL); 1865 } 1866 } 1867 1868 zone->zone_name = kname; 1869 return (0); 1870 } 1871 1872 /* 1873 * Similar to thread_create(), but makes sure the thread is in the appropriate 1874 * zone's zsched process (curproc->p_zone->zone_zsched) before returning. 1875 */ 1876 /*ARGSUSED*/ 1877 kthread_t * 1878 zthread_create( 1879 caddr_t stk, 1880 size_t stksize, 1881 void (*proc)(), 1882 void *arg, 1883 size_t len, 1884 pri_t pri) 1885 { 1886 kthread_t *t; 1887 zone_t *zone = curproc->p_zone; 1888 proc_t *pp = zone->zone_zsched; 1889 1890 zone_hold(zone); /* Reference to be dropped when thread exits */ 1891 1892 /* 1893 * No-one should be trying to create threads if the zone is shutting 1894 * down and there aren't any kernel threads around. See comment 1895 * in zthread_exit(). 1896 */ 1897 ASSERT(!(zone->zone_kthreads == NULL && 1898 zone_status_get(zone) >= ZONE_IS_EMPTY)); 1899 /* 1900 * Create a thread, but don't let it run until we've finished setting 1901 * things up. 1902 */ 1903 t = thread_create(stk, stksize, proc, arg, len, pp, TS_STOPPED, pri); 1904 ASSERT(t->t_forw == NULL); 1905 mutex_enter(&zone_status_lock); 1906 if (zone->zone_kthreads == NULL) { 1907 t->t_forw = t->t_back = t; 1908 } else { 1909 kthread_t *tx = zone->zone_kthreads; 1910 1911 t->t_forw = tx; 1912 t->t_back = tx->t_back; 1913 tx->t_back->t_forw = t; 1914 tx->t_back = t; 1915 } 1916 zone->zone_kthreads = t; 1917 mutex_exit(&zone_status_lock); 1918 1919 mutex_enter(&pp->p_lock); 1920 t->t_proc_flag |= TP_ZTHREAD; 1921 project_rele(t->t_proj); 1922 t->t_proj = project_hold(pp->p_task->tk_proj); 1923 1924 /* 1925 * Setup complete, let it run. 1926 */ 1927 thread_lock(t); 1928 t->t_schedflag |= TS_ALLSTART; 1929 setrun_locked(t); 1930 thread_unlock(t); 1931 1932 mutex_exit(&pp->p_lock); 1933 1934 return (t); 1935 } 1936 1937 /* 1938 * Similar to thread_exit(). Must be called by threads created via 1939 * zthread_exit(). 1940 */ 1941 void 1942 zthread_exit(void) 1943 { 1944 kthread_t *t = curthread; 1945 proc_t *pp = curproc; 1946 zone_t *zone = pp->p_zone; 1947 1948 mutex_enter(&zone_status_lock); 1949 1950 /* 1951 * Reparent to p0 1952 */ 1953 kpreempt_disable(); 1954 mutex_enter(&pp->p_lock); 1955 t->t_proc_flag &= ~TP_ZTHREAD; 1956 t->t_procp = &p0; 1957 hat_thread_exit(t); 1958 mutex_exit(&pp->p_lock); 1959 kpreempt_enable(); 1960 1961 if (t->t_back == t) { 1962 ASSERT(t->t_forw == t); 1963 /* 1964 * If the zone is empty, once the thread count 1965 * goes to zero no further kernel threads can be 1966 * created. This is because if the creator is a process 1967 * in the zone, then it must have exited before the zone 1968 * state could be set to ZONE_IS_EMPTY. 1969 * Otherwise, if the creator is a kernel thread in the 1970 * zone, the thread count is non-zero. 1971 * 1972 * This really means that non-zone kernel threads should 1973 * not create zone kernel threads. 1974 */ 1975 zone->zone_kthreads = NULL; 1976 if (zone_status_get(zone) == ZONE_IS_EMPTY) { 1977 zone_status_set(zone, ZONE_IS_DOWN); 1978 } 1979 } else { 1980 t->t_forw->t_back = t->t_back; 1981 t->t_back->t_forw = t->t_forw; 1982 if (zone->zone_kthreads == t) 1983 zone->zone_kthreads = t->t_forw; 1984 } 1985 mutex_exit(&zone_status_lock); 1986 zone_rele(zone); 1987 thread_exit(); 1988 /* NOTREACHED */ 1989 } 1990 1991 static void 1992 zone_chdir(vnode_t *vp, vnode_t **vpp, proc_t *pp) 1993 { 1994 vnode_t *oldvp; 1995 1996 /* we're going to hold a reference here to the directory */ 1997 VN_HOLD(vp); 1998 1999 #ifdef C2_AUDIT 2000 if (audit_active) /* update abs cwd/root path see c2audit.c */ 2001 audit_chdirec(vp, vpp); 2002 #endif 2003 2004 mutex_enter(&pp->p_lock); 2005 oldvp = *vpp; 2006 *vpp = vp; 2007 mutex_exit(&pp->p_lock); 2008 if (oldvp != NULL) 2009 VN_RELE(oldvp); 2010 } 2011 2012 /* 2013 * Convert an rctl value represented by an nvlist_t into an rctl_val_t. 2014 */ 2015 static int 2016 nvlist2rctlval(nvlist_t *nvl, rctl_val_t *rv) 2017 { 2018 nvpair_t *nvp = NULL; 2019 boolean_t priv_set = B_FALSE; 2020 boolean_t limit_set = B_FALSE; 2021 boolean_t action_set = B_FALSE; 2022 2023 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { 2024 const char *name; 2025 uint64_t ui64; 2026 2027 name = nvpair_name(nvp); 2028 if (nvpair_type(nvp) != DATA_TYPE_UINT64) 2029 return (EINVAL); 2030 (void) nvpair_value_uint64(nvp, &ui64); 2031 if (strcmp(name, "privilege") == 0) { 2032 /* 2033 * Currently only privileged values are allowed, but 2034 * this may change in the future. 2035 */ 2036 if (ui64 != RCPRIV_PRIVILEGED) 2037 return (EINVAL); 2038 rv->rcv_privilege = ui64; 2039 priv_set = B_TRUE; 2040 } else if (strcmp(name, "limit") == 0) { 2041 rv->rcv_value = ui64; 2042 limit_set = B_TRUE; 2043 } else if (strcmp(name, "action") == 0) { 2044 if (ui64 != RCTL_LOCAL_NOACTION && 2045 ui64 != RCTL_LOCAL_DENY) 2046 return (EINVAL); 2047 rv->rcv_flagaction = ui64; 2048 action_set = B_TRUE; 2049 } else { 2050 return (EINVAL); 2051 } 2052 } 2053 2054 if (!(priv_set && limit_set && action_set)) 2055 return (EINVAL); 2056 rv->rcv_action_signal = 0; 2057 rv->rcv_action_recipient = NULL; 2058 rv->rcv_action_recip_pid = -1; 2059 rv->rcv_firing_time = 0; 2060 2061 return (0); 2062 } 2063 2064 void 2065 zone_icode(void) 2066 { 2067 proc_t *p = ttoproc(curthread); 2068 struct core_globals *cg; 2069 2070 /* 2071 * For all purposes (ZONE_ATTR_INITPID and restart_init), 2072 * storing just the pid of init is sufficient. 2073 */ 2074 p->p_zone->zone_proc_initpid = p->p_pid; 2075 2076 /* 2077 * Allocate user address space and stack segment 2078 */ 2079 2080 p->p_cstime = p->p_stime = p->p_cutime = p->p_utime = 0; 2081 p->p_usrstack = (caddr_t)USRSTACK32; 2082 p->p_model = DATAMODEL_ILP32; 2083 p->p_stkprot = PROT_ZFOD & ~PROT_EXEC; 2084 p->p_datprot = PROT_ZFOD & ~PROT_EXEC; 2085 p->p_stk_ctl = INT32_MAX; 2086 2087 p->p_as = as_alloc(); 2088 p->p_as->a_userlimit = (caddr_t)USERLIMIT32; 2089 (void) hat_setup(p->p_as->a_hat, HAT_INIT); 2090 2091 cg = zone_getspecific(core_zone_key, p->p_zone); 2092 ASSERT(cg != NULL); 2093 corectl_path_hold(cg->core_default_path); 2094 corectl_content_hold(cg->core_default_content); 2095 p->p_corefile = cg->core_default_path; 2096 p->p_content = cg->core_default_content; 2097 2098 init_mstate(curthread, LMS_SYSTEM); 2099 2100 p->p_zone->zone_boot_err = exec_init(zone_initname, 0, 2101 p->p_zone->zone_bootargs); 2102 2103 mutex_enter(&zone_status_lock); 2104 if (p->p_zone->zone_boot_err != 0) { 2105 /* 2106 * Make sure we are still in the booting state-- we could have 2107 * raced and already be shutting down, or even further along. 2108 */ 2109 if (zone_status_get(p->p_zone) == ZONE_IS_BOOTING) 2110 zone_status_set(p->p_zone, ZONE_IS_SHUTTING_DOWN); 2111 mutex_exit(&zone_status_lock); 2112 /* It's gone bad, dispose of the process */ 2113 if (proc_exit(CLD_EXITED, p->p_zone->zone_boot_err) != 0) { 2114 mutex_enter(&p->p_lock); 2115 ASSERT(p->p_flag & SEXITLWPS); 2116 lwp_exit(); 2117 } 2118 } else { 2119 if (zone_status_get(p->p_zone) == ZONE_IS_BOOTING) 2120 zone_status_set(p->p_zone, ZONE_IS_RUNNING); 2121 mutex_exit(&zone_status_lock); 2122 /* cause the process to return to userland. */ 2123 lwp_rtt(); 2124 } 2125 } 2126 2127 struct zsched_arg { 2128 zone_t *zone; 2129 nvlist_t *nvlist; 2130 }; 2131 2132 /* 2133 * Per-zone "sched" workalike. The similarity to "sched" doesn't have 2134 * anything to do with scheduling, but rather with the fact that 2135 * per-zone kernel threads are parented to zsched, just like regular 2136 * kernel threads are parented to sched (p0). 2137 * 2138 * zsched is also responsible for launching init for the zone. 2139 */ 2140 static void 2141 zsched(void *arg) 2142 { 2143 struct zsched_arg *za = arg; 2144 proc_t *pp = curproc; 2145 proc_t *initp = proc_init; 2146 zone_t *zone = za->zone; 2147 cred_t *cr, *oldcred; 2148 rctl_set_t *set; 2149 rctl_alloc_gp_t *gp; 2150 contract_t *ct = NULL; 2151 task_t *tk, *oldtk; 2152 rctl_entity_p_t e; 2153 kproject_t *pj; 2154 2155 nvlist_t *nvl = za->nvlist; 2156 nvpair_t *nvp = NULL; 2157 2158 bcopy("zsched", u.u_psargs, sizeof ("zsched")); 2159 bcopy("zsched", u.u_comm, sizeof ("zsched")); 2160 u.u_argc = 0; 2161 u.u_argv = NULL; 2162 u.u_envp = NULL; 2163 closeall(P_FINFO(pp)); 2164 2165 /* 2166 * We are this zone's "zsched" process. As the zone isn't generally 2167 * visible yet we don't need to grab any locks before initializing its 2168 * zone_proc pointer. 2169 */ 2170 zone_hold(zone); /* this hold is released by zone_destroy() */ 2171 zone->zone_zsched = pp; 2172 mutex_enter(&pp->p_lock); 2173 pp->p_zone = zone; 2174 mutex_exit(&pp->p_lock); 2175 2176 /* 2177 * Disassociate process from its 'parent'; parent ourselves to init 2178 * (pid 1) and change other values as needed. 2179 */ 2180 sess_create(); 2181 2182 mutex_enter(&pidlock); 2183 proc_detach(pp); 2184 pp->p_ppid = 1; 2185 pp->p_flag |= SZONETOP; 2186 pp->p_ancpid = 1; 2187 pp->p_parent = initp; 2188 pp->p_psibling = NULL; 2189 if (initp->p_child) 2190 initp->p_child->p_psibling = pp; 2191 pp->p_sibling = initp->p_child; 2192 initp->p_child = pp; 2193 2194 /* Decrement what newproc() incremented. */ 2195 upcount_dec(crgetruid(CRED()), GLOBAL_ZONEID); 2196 /* 2197 * Our credentials are about to become kcred-like, so we don't care 2198 * about the caller's ruid. 2199 */ 2200 upcount_inc(crgetruid(kcred), zone->zone_id); 2201 mutex_exit(&pidlock); 2202 2203 /* 2204 * getting out of global zone, so decrement lwp counts 2205 */ 2206 pj = pp->p_task->tk_proj; 2207 mutex_enter(&global_zone->zone_nlwps_lock); 2208 pj->kpj_nlwps -= pp->p_lwpcnt; 2209 global_zone->zone_nlwps -= pp->p_lwpcnt; 2210 mutex_exit(&global_zone->zone_nlwps_lock); 2211 2212 /* 2213 * Create and join a new task in project '0' of this zone. 2214 * 2215 * We don't need to call holdlwps() since we know we're the only lwp in 2216 * this process. 2217 * 2218 * task_join() returns with p_lock held. 2219 */ 2220 tk = task_create(0, zone); 2221 mutex_enter(&cpu_lock); 2222 oldtk = task_join(tk, 0); 2223 mutex_exit(&curproc->p_lock); 2224 mutex_exit(&cpu_lock); 2225 task_rele(oldtk); 2226 2227 /* 2228 * add lwp counts to zsched's zone, and increment project's task count 2229 * due to the task created in the above tasksys_settaskid 2230 */ 2231 pj = pp->p_task->tk_proj; 2232 mutex_enter(&zone->zone_nlwps_lock); 2233 pj->kpj_nlwps += pp->p_lwpcnt; 2234 pj->kpj_ntasks += 1; 2235 zone->zone_nlwps += pp->p_lwpcnt; 2236 mutex_exit(&zone->zone_nlwps_lock); 2237 2238 /* 2239 * The process was created by a process in the global zone, hence the 2240 * credentials are wrong. We might as well have kcred-ish credentials. 2241 */ 2242 cr = zone->zone_kcred; 2243 crhold(cr); 2244 mutex_enter(&pp->p_crlock); 2245 oldcred = pp->p_cred; 2246 pp->p_cred = cr; 2247 mutex_exit(&pp->p_crlock); 2248 crfree(oldcred); 2249 2250 /* 2251 * Hold credentials again (for thread) 2252 */ 2253 crhold(cr); 2254 2255 /* 2256 * p_lwpcnt can't change since this is a kernel process. 2257 */ 2258 crset(pp, cr); 2259 2260 /* 2261 * Chroot 2262 */ 2263 zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_cdir, pp); 2264 zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_rdir, pp); 2265 2266 /* 2267 * Initialize zone's rctl set. 2268 */ 2269 set = rctl_set_create(); 2270 gp = rctl_set_init_prealloc(RCENTITY_ZONE); 2271 mutex_enter(&pp->p_lock); 2272 e.rcep_p.zone = zone; 2273 e.rcep_t = RCENTITY_ZONE; 2274 zone->zone_rctls = rctl_set_init(RCENTITY_ZONE, pp, &e, set, gp); 2275 mutex_exit(&pp->p_lock); 2276 rctl_prealloc_destroy(gp); 2277 2278 /* 2279 * Apply the rctls passed in to zone_create(). This is basically a list 2280 * assignment: all of the old values are removed and the new ones 2281 * inserted. That is, if an empty list is passed in, all values are 2282 * removed. 2283 */ 2284 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { 2285 rctl_dict_entry_t *rde; 2286 rctl_hndl_t hndl; 2287 char *name; 2288 nvlist_t **nvlarray; 2289 uint_t i, nelem; 2290 int error; /* For ASSERT()s */ 2291 2292 name = nvpair_name(nvp); 2293 hndl = rctl_hndl_lookup(name); 2294 ASSERT(hndl != -1); 2295 rde = rctl_dict_lookup_hndl(hndl); 2296 ASSERT(rde != NULL); 2297 2298 for (; /* ever */; ) { 2299 rctl_val_t oval; 2300 2301 mutex_enter(&pp->p_lock); 2302 error = rctl_local_get(hndl, NULL, &oval, pp); 2303 mutex_exit(&pp->p_lock); 2304 ASSERT(error == 0); /* Can't fail for RCTL_FIRST */ 2305 ASSERT(oval.rcv_privilege != RCPRIV_BASIC); 2306 if (oval.rcv_privilege == RCPRIV_SYSTEM) 2307 break; 2308 mutex_enter(&pp->p_lock); 2309 error = rctl_local_delete(hndl, &oval, pp); 2310 mutex_exit(&pp->p_lock); 2311 ASSERT(error == 0); 2312 } 2313 error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem); 2314 ASSERT(error == 0); 2315 for (i = 0; i < nelem; i++) { 2316 rctl_val_t *nvalp; 2317 2318 nvalp = kmem_cache_alloc(rctl_val_cache, KM_SLEEP); 2319 error = nvlist2rctlval(nvlarray[i], nvalp); 2320 ASSERT(error == 0); 2321 /* 2322 * rctl_local_insert can fail if the value being 2323 * inserted is a duplicate; this is OK. 2324 */ 2325 mutex_enter(&pp->p_lock); 2326 if (rctl_local_insert(hndl, nvalp, pp) != 0) 2327 kmem_cache_free(rctl_val_cache, nvalp); 2328 mutex_exit(&pp->p_lock); 2329 } 2330 } 2331 /* 2332 * Tell the world that we're done setting up. 2333 * 2334 * At this point we want to set the zone status to ZONE_IS_READY 2335 * and atomically set the zone's processor set visibility. Once 2336 * we drop pool_lock() this zone will automatically get updated 2337 * to reflect any future changes to the pools configuration. 2338 */ 2339 pool_lock(); 2340 mutex_enter(&cpu_lock); 2341 mutex_enter(&zonehash_lock); 2342 zone_uniqid(zone); 2343 zone_zsd_configure(zone); 2344 if (pool_state == POOL_ENABLED) 2345 zone_pset_set(zone, pool_default->pool_pset->pset_id); 2346 mutex_enter(&zone_status_lock); 2347 ASSERT(zone_status_get(zone) == ZONE_IS_UNINITIALIZED); 2348 zone_status_set(zone, ZONE_IS_READY); 2349 mutex_exit(&zone_status_lock); 2350 mutex_exit(&zonehash_lock); 2351 mutex_exit(&cpu_lock); 2352 pool_unlock(); 2353 2354 /* 2355 * Once we see the zone transition to the ZONE_IS_BOOTING state, 2356 * we launch init, and set the state to running. 2357 */ 2358 zone_status_wait_cpr(zone, ZONE_IS_BOOTING, "zsched"); 2359 2360 if (zone_status_get(zone) == ZONE_IS_BOOTING) { 2361 id_t cid; 2362 2363 /* 2364 * Ok, this is a little complicated. We need to grab the 2365 * zone's pool's scheduling class ID; note that by now, we 2366 * are already bound to a pool if we need to be (zoneadmd 2367 * will have done that to us while we're in the READY 2368 * state). *But* the scheduling class for the zone's 'init' 2369 * must be explicitly passed to newproc, which doesn't 2370 * respect pool bindings. 2371 * 2372 * We hold the pool_lock across the call to newproc() to 2373 * close the obvious race: the pool's scheduling class 2374 * could change before we manage to create the LWP with 2375 * classid 'cid'. 2376 */ 2377 pool_lock(); 2378 cid = pool_get_class(zone->zone_pool); 2379 if (cid == -1) 2380 cid = defaultcid; 2381 2382 /* 2383 * If this fails, zone_boot will ultimately fail. The 2384 * state of the zone will be set to SHUTTING_DOWN-- userland 2385 * will have to tear down the zone, and fail, or try again. 2386 */ 2387 if ((zone->zone_boot_err = newproc(zone_icode, NULL, cid, 2388 minclsyspri - 1, &ct)) != 0) { 2389 mutex_enter(&zone_status_lock); 2390 zone_status_set(zone, ZONE_IS_SHUTTING_DOWN); 2391 mutex_exit(&zone_status_lock); 2392 } 2393 pool_unlock(); 2394 } 2395 2396 /* 2397 * Wait for zone_destroy() to be called. This is what we spend 2398 * most of our life doing. 2399 */ 2400 zone_status_wait_cpr(zone, ZONE_IS_DYING, "zsched"); 2401 2402 if (ct) 2403 /* 2404 * At this point the process contract should be empty. 2405 * (Though if it isn't, it's not the end of the world.) 2406 */ 2407 VERIFY(contract_abandon(ct, curproc, B_TRUE) == 0); 2408 2409 /* 2410 * Allow kcred to be freed when all referring processes 2411 * (including this one) go away. We can't just do this in 2412 * zone_free because we need to wait for the zone_cred_ref to 2413 * drop to 0 before calling zone_free, and the existence of 2414 * zone_kcred will prevent that. Thus, we call crfree here to 2415 * balance the crdup in zone_create. The crhold calls earlier 2416 * in zsched will be dropped when the thread and process exit. 2417 */ 2418 crfree(zone->zone_kcred); 2419 zone->zone_kcred = NULL; 2420 2421 exit(CLD_EXITED, 0); 2422 } 2423 2424 /* 2425 * Helper function to determine if there are any submounts of the 2426 * provided path. Used to make sure the zone doesn't "inherit" any 2427 * mounts from before it is created. 2428 */ 2429 static uint_t 2430 zone_mount_count(const char *rootpath) 2431 { 2432 vfs_t *vfsp; 2433 uint_t count = 0; 2434 size_t rootpathlen = strlen(rootpath); 2435 2436 /* 2437 * Holding zonehash_lock prevents race conditions with 2438 * vfs_list_add()/vfs_list_remove() since we serialize with 2439 * zone_find_by_path(). 2440 */ 2441 ASSERT(MUTEX_HELD(&zonehash_lock)); 2442 /* 2443 * The rootpath must end with a '/' 2444 */ 2445 ASSERT(rootpath[rootpathlen - 1] == '/'); 2446 2447 /* 2448 * This intentionally does not count the rootpath itself if that 2449 * happens to be a mount point. 2450 */ 2451 vfs_list_read_lock(); 2452 vfsp = rootvfs; 2453 do { 2454 if (strncmp(rootpath, refstr_value(vfsp->vfs_mntpt), 2455 rootpathlen) == 0) 2456 count++; 2457 vfsp = vfsp->vfs_next; 2458 } while (vfsp != rootvfs); 2459 vfs_list_unlock(); 2460 return (count); 2461 } 2462 2463 /* 2464 * Helper function to make sure that a zone created on 'rootpath' 2465 * wouldn't end up containing other zones' rootpaths. 2466 */ 2467 static boolean_t 2468 zone_is_nested(const char *rootpath) 2469 { 2470 zone_t *zone; 2471 size_t rootpathlen = strlen(rootpath); 2472 size_t len; 2473 2474 ASSERT(MUTEX_HELD(&zonehash_lock)); 2475 2476 for (zone = list_head(&zone_active); zone != NULL; 2477 zone = list_next(&zone_active, zone)) { 2478 if (zone == global_zone) 2479 continue; 2480 len = strlen(zone->zone_rootpath); 2481 if (strncmp(rootpath, zone->zone_rootpath, 2482 MIN(rootpathlen, len)) == 0) 2483 return (B_TRUE); 2484 } 2485 return (B_FALSE); 2486 } 2487 2488 static int 2489 zone_set_privset(zone_t *zone, const priv_set_t *zone_privs, 2490 size_t zone_privssz) 2491 { 2492 priv_set_t *privs = kmem_alloc(sizeof (priv_set_t), KM_SLEEP); 2493 2494 if (zone_privssz < sizeof (priv_set_t)) 2495 return (set_errno(ENOMEM)); 2496 2497 if (copyin(zone_privs, privs, sizeof (priv_set_t))) { 2498 kmem_free(privs, sizeof (priv_set_t)); 2499 return (EFAULT); 2500 } 2501 2502 zone->zone_privset = privs; 2503 return (0); 2504 } 2505 2506 /* 2507 * We make creative use of nvlists to pass in rctls from userland. The list is 2508 * a list of the following structures: 2509 * 2510 * (name = rctl_name, value = nvpair_list_array) 2511 * 2512 * Where each element of the nvpair_list_array is of the form: 2513 * 2514 * [(name = "privilege", value = RCPRIV_PRIVILEGED), 2515 * (name = "limit", value = uint64_t), 2516 * (name = "action", value = (RCTL_LOCAL_NOACTION || RCTL_LOCAL_DENY))] 2517 */ 2518 static int 2519 parse_rctls(caddr_t ubuf, size_t buflen, nvlist_t **nvlp) 2520 { 2521 nvpair_t *nvp = NULL; 2522 nvlist_t *nvl = NULL; 2523 char *kbuf; 2524 int error; 2525 rctl_val_t rv; 2526 2527 *nvlp = NULL; 2528 2529 if (buflen == 0) 2530 return (0); 2531 2532 if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL) 2533 return (ENOMEM); 2534 if (copyin(ubuf, kbuf, buflen)) { 2535 error = EFAULT; 2536 goto out; 2537 } 2538 if (nvlist_unpack(kbuf, buflen, &nvl, KM_SLEEP) != 0) { 2539 /* 2540 * nvl may have been allocated/free'd, but the value set to 2541 * non-NULL, so we reset it here. 2542 */ 2543 nvl = NULL; 2544 error = EINVAL; 2545 goto out; 2546 } 2547 while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) { 2548 rctl_dict_entry_t *rde; 2549 rctl_hndl_t hndl; 2550 nvlist_t **nvlarray; 2551 uint_t i, nelem; 2552 char *name; 2553 2554 error = EINVAL; 2555 name = nvpair_name(nvp); 2556 if (strncmp(nvpair_name(nvp), "zone.", sizeof ("zone.") - 1) 2557 != 0 || nvpair_type(nvp) != DATA_TYPE_NVLIST_ARRAY) { 2558 goto out; 2559 } 2560 if ((hndl = rctl_hndl_lookup(name)) == -1) { 2561 goto out; 2562 } 2563 rde = rctl_dict_lookup_hndl(hndl); 2564 error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem); 2565 ASSERT(error == 0); 2566 for (i = 0; i < nelem; i++) { 2567 if (error = nvlist2rctlval(nvlarray[i], &rv)) 2568 goto out; 2569 } 2570 if (rctl_invalid_value(rde, &rv)) { 2571 error = EINVAL; 2572 goto out; 2573 } 2574 } 2575 error = 0; 2576 *nvlp = nvl; 2577 out: 2578 kmem_free(kbuf, buflen); 2579 if (error && nvl != NULL) 2580 nvlist_free(nvl); 2581 return (error); 2582 } 2583 2584 int 2585 zone_create_error(int er_error, int er_ext, int *er_out) { 2586 if (er_out != NULL) { 2587 if (copyout(&er_ext, er_out, sizeof (int))) { 2588 return (set_errno(EFAULT)); 2589 } 2590 } 2591 return (set_errno(er_error)); 2592 } 2593 2594 /* 2595 * Parses a comma-separated list of ZFS datasets into a per-zone dictionary. 2596 */ 2597 static int 2598 parse_zfs(zone_t *zone, caddr_t ubuf, size_t buflen) 2599 { 2600 char *kbuf; 2601 char *dataset, *next; 2602 zone_dataset_t *zd; 2603 size_t len; 2604 2605 if (ubuf == NULL || buflen == 0) 2606 return (0); 2607 2608 if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL) 2609 return (ENOMEM); 2610 2611 if (copyin(ubuf, kbuf, buflen) != 0) { 2612 kmem_free(kbuf, buflen); 2613 return (EFAULT); 2614 } 2615 2616 dataset = next = kbuf; 2617 for (;;) { 2618 zd = kmem_alloc(sizeof (zone_dataset_t), KM_SLEEP); 2619 2620 next = strchr(dataset, ','); 2621 2622 if (next == NULL) 2623 len = strlen(dataset); 2624 else 2625 len = next - dataset; 2626 2627 zd->zd_dataset = kmem_alloc(len + 1, KM_SLEEP); 2628 bcopy(dataset, zd->zd_dataset, len); 2629 zd->zd_dataset[len] = '\0'; 2630 2631 list_insert_head(&zone->zone_datasets, zd); 2632 2633 if (next == NULL) 2634 break; 2635 2636 dataset = next + 1; 2637 } 2638 2639 kmem_free(kbuf, buflen); 2640 return (0); 2641 } 2642 2643 /* 2644 * System call to create/initialize a new zone named 'zone_name', rooted 2645 * at 'zone_root', with a zone-wide privilege limit set of 'zone_privs', 2646 * and initialized with the zone-wide rctls described in 'rctlbuf'. 2647 * 2648 * If extended error is non-null, we may use it to return more detailed 2649 * error information. 2650 */ 2651 static zoneid_t 2652 zone_create(const char *zone_name, const char *zone_root, 2653 const priv_set_t *zone_privs, size_t zone_privssz, 2654 caddr_t rctlbuf, size_t rctlbufsz, 2655 caddr_t zfsbuf, size_t zfsbufsz, int *extended_error) 2656 { 2657 struct zsched_arg zarg; 2658 nvlist_t *rctls = NULL; 2659 proc_t *pp = curproc; 2660 zone_t *zone, *ztmp; 2661 zoneid_t zoneid; 2662 int error; 2663 int error2 = 0; 2664 char *str; 2665 cred_t *zkcr; 2666 2667 if (secpolicy_zone_config(CRED()) != 0) 2668 return (set_errno(EPERM)); 2669 2670 /* can't boot zone from within chroot environment */ 2671 if (PTOU(pp)->u_rdir != NULL && PTOU(pp)->u_rdir != rootdir) 2672 return (zone_create_error(ENOTSUP, ZE_CHROOTED, 2673 extended_error)); 2674 2675 zone = kmem_zalloc(sizeof (zone_t), KM_SLEEP); 2676 zoneid = zone->zone_id = id_alloc(zoneid_space); 2677 zone->zone_status = ZONE_IS_UNINITIALIZED; 2678 zone->zone_pool = pool_default; 2679 zone->zone_pool_mod = gethrtime(); 2680 zone->zone_psetid = ZONE_PS_INVAL; 2681 zone->zone_ncpus = 0; 2682 zone->zone_ncpus_online = 0; 2683 mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL); 2684 mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL); 2685 cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL); 2686 list_create(&zone->zone_zsd, sizeof (struct zsd_entry), 2687 offsetof(struct zsd_entry, zsd_linkage)); 2688 list_create(&zone->zone_datasets, sizeof (zone_dataset_t), 2689 offsetof(zone_dataset_t, zd_linkage)); 2690 2691 if ((error = zone_set_name(zone, zone_name)) != 0) { 2692 zone_free(zone); 2693 return (zone_create_error(error, 0, extended_error)); 2694 } 2695 2696 if ((error = zone_set_root(zone, zone_root)) != 0) { 2697 zone_free(zone); 2698 return (zone_create_error(error, 0, extended_error)); 2699 } 2700 if ((error = zone_set_privset(zone, zone_privs, zone_privssz)) != 0) { 2701 zone_free(zone); 2702 return (zone_create_error(error, 0, extended_error)); 2703 } 2704 2705 /* initialize node name to be the same as zone name */ 2706 zone->zone_nodename = kmem_alloc(_SYS_NMLN, KM_SLEEP); 2707 (void) strncpy(zone->zone_nodename, zone->zone_name, _SYS_NMLN); 2708 zone->zone_nodename[_SYS_NMLN - 1] = '\0'; 2709 2710 zone->zone_domain = kmem_alloc(_SYS_NMLN, KM_SLEEP); 2711 zone->zone_domain[0] = '\0'; 2712 zone->zone_shares = 1; 2713 zone->zone_bootargs = NULL; 2714 2715 /* 2716 * Zsched initializes the rctls. 2717 */ 2718 zone->zone_rctls = NULL; 2719 2720 if ((error = parse_rctls(rctlbuf, rctlbufsz, &rctls)) != 0) { 2721 zone_free(zone); 2722 return (zone_create_error(error, 0, extended_error)); 2723 } 2724 2725 if ((error = parse_zfs(zone, zfsbuf, zfsbufsz)) != 0) { 2726 zone_free(zone); 2727 return (set_errno(error)); 2728 } 2729 2730 /* 2731 * Stop all lwps since that's what normally happens as part of fork(). 2732 * This needs to happen before we grab any locks to avoid deadlock 2733 * (another lwp in the process could be waiting for the held lock). 2734 */ 2735 if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK)) { 2736 zone_free(zone); 2737 if (rctls) 2738 nvlist_free(rctls); 2739 return (zone_create_error(error, 0, extended_error)); 2740 } 2741 2742 if (block_mounts() == 0) { 2743 mutex_enter(&pp->p_lock); 2744 if (curthread != pp->p_agenttp) 2745 continuelwps(pp); 2746 mutex_exit(&pp->p_lock); 2747 zone_free(zone); 2748 if (rctls) 2749 nvlist_free(rctls); 2750 return (zone_create_error(error, 0, extended_error)); 2751 } 2752 2753 /* 2754 * Set up credential for kernel access. After this, any errors 2755 * should go through the dance in errout rather than calling 2756 * zone_free directly. 2757 */ 2758 zone->zone_kcred = crdup(kcred); 2759 crsetzone(zone->zone_kcred, zone); 2760 priv_intersect(zone->zone_privset, &CR_PPRIV(zone->zone_kcred)); 2761 priv_intersect(zone->zone_privset, &CR_EPRIV(zone->zone_kcred)); 2762 priv_intersect(zone->zone_privset, &CR_IPRIV(zone->zone_kcred)); 2763 priv_intersect(zone->zone_privset, &CR_LPRIV(zone->zone_kcred)); 2764 2765 mutex_enter(&zonehash_lock); 2766 /* 2767 * Make sure zone doesn't already exist. 2768 */ 2769 if ((ztmp = zone_find_all_by_name(zone->zone_name)) != NULL) { 2770 zone_status_t status; 2771 2772 status = zone_status_get(ztmp); 2773 if (status == ZONE_IS_READY || status == ZONE_IS_RUNNING) 2774 error = EEXIST; 2775 else 2776 error = EBUSY; 2777 goto errout; 2778 } 2779 2780 /* 2781 * Don't allow zone creations which would cause one zone's rootpath to 2782 * be accessible from that of another (non-global) zone. 2783 */ 2784 if (zone_is_nested(zone->zone_rootpath)) { 2785 error = EBUSY; 2786 goto errout; 2787 } 2788 2789 ASSERT(zonecount != 0); /* check for leaks */ 2790 if (zonecount + 1 > maxzones) { 2791 error = ENOMEM; 2792 goto errout; 2793 } 2794 2795 if (zone_mount_count(zone->zone_rootpath) != 0) { 2796 error = EBUSY; 2797 error2 = ZE_AREMOUNTS; 2798 goto errout; 2799 } 2800 2801 /* 2802 * Zone is still incomplete, but we need to drop all locks while 2803 * zsched() initializes this zone's kernel process. We 2804 * optimistically add the zone to the hashtable and associated 2805 * lists so a parallel zone_create() doesn't try to create the 2806 * same zone. 2807 */ 2808 zonecount++; 2809 (void) mod_hash_insert(zonehashbyid, 2810 (mod_hash_key_t)(uintptr_t)zone->zone_id, 2811 (mod_hash_val_t)(uintptr_t)zone); 2812 str = kmem_alloc(strlen(zone->zone_name) + 1, KM_SLEEP); 2813 (void) strcpy(str, zone->zone_name); 2814 (void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)str, 2815 (mod_hash_val_t)(uintptr_t)zone); 2816 /* 2817 * Insert into active list. At this point there are no 'hold's 2818 * on the zone, but everyone else knows not to use it, so we can 2819 * continue to use it. zsched() will do a zone_hold() if the 2820 * newproc() is successful. 2821 */ 2822 list_insert_tail(&zone_active, zone); 2823 mutex_exit(&zonehash_lock); 2824 2825 zarg.zone = zone; 2826 zarg.nvlist = rctls; 2827 /* 2828 * The process, task, and project rctls are probably wrong; 2829 * we need an interface to get the default values of all rctls, 2830 * and initialize zsched appropriately. I'm not sure that that 2831 * makes much of a difference, though. 2832 */ 2833 if (error = newproc(zsched, (void *)&zarg, syscid, minclsyspri, NULL)) { 2834 /* 2835 * We need to undo all globally visible state. 2836 */ 2837 mutex_enter(&zonehash_lock); 2838 list_remove(&zone_active, zone); 2839 (void) mod_hash_destroy(zonehashbyname, 2840 (mod_hash_key_t)(uintptr_t)zone->zone_name); 2841 (void) mod_hash_destroy(zonehashbyid, 2842 (mod_hash_key_t)(uintptr_t)zone->zone_id); 2843 ASSERT(zonecount > 1); 2844 zonecount--; 2845 goto errout; 2846 } 2847 2848 /* 2849 * Zone creation can't fail from now on. 2850 */ 2851 2852 /* 2853 * Let the other lwps continue. 2854 */ 2855 mutex_enter(&pp->p_lock); 2856 if (curthread != pp->p_agenttp) 2857 continuelwps(pp); 2858 mutex_exit(&pp->p_lock); 2859 2860 /* 2861 * Wait for zsched to finish initializing the zone. 2862 */ 2863 zone_status_wait(zone, ZONE_IS_READY); 2864 /* 2865 * The zone is fully visible, so we can let mounts progress. 2866 */ 2867 resume_mounts(); 2868 if (rctls) 2869 nvlist_free(rctls); 2870 2871 return (zoneid); 2872 2873 errout: 2874 mutex_exit(&zonehash_lock); 2875 /* 2876 * Let the other lwps continue. 2877 */ 2878 mutex_enter(&pp->p_lock); 2879 if (curthread != pp->p_agenttp) 2880 continuelwps(pp); 2881 mutex_exit(&pp->p_lock); 2882 2883 resume_mounts(); 2884 if (rctls) 2885 nvlist_free(rctls); 2886 /* 2887 * There is currently one reference to the zone, a cred_ref from 2888 * zone_kcred. To free the zone, we call crfree, which will call 2889 * zone_cred_rele, which will call zone_free. 2890 */ 2891 ASSERT(zone->zone_cred_ref == 1); /* for zone_kcred */ 2892 ASSERT(zone->zone_kcred->cr_ref == 1); 2893 ASSERT(zone->zone_ref == 0); 2894 zkcr = zone->zone_kcred; 2895 zone->zone_kcred = NULL; 2896 crfree(zkcr); /* triggers call to zone_free */ 2897 return (zone_create_error(error, error2, extended_error)); 2898 } 2899 2900 /* 2901 * Cause the zone to boot. This is pretty simple, since we let zoneadmd do 2902 * the heavy lifting. 2903 */ 2904 static int 2905 zone_boot(zoneid_t zoneid, const char *bootargs) 2906 { 2907 int err; 2908 zone_t *zone; 2909 2910 if (secpolicy_zone_config(CRED()) != 0) 2911 return (set_errno(EPERM)); 2912 if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID) 2913 return (set_errno(EINVAL)); 2914 2915 mutex_enter(&zonehash_lock); 2916 /* 2917 * Look for zone under hash lock to prevent races with calls to 2918 * zone_shutdown, zone_destroy, etc. 2919 */ 2920 if ((zone = zone_find_all_by_id(zoneid)) == NULL) { 2921 mutex_exit(&zonehash_lock); 2922 return (set_errno(EINVAL)); 2923 } 2924 2925 if ((err = zone_set_bootargs(zone, bootargs)) != 0) { 2926 mutex_exit(&zonehash_lock); 2927 return (set_errno(err)); 2928 } 2929 2930 mutex_enter(&zone_status_lock); 2931 if (zone_status_get(zone) != ZONE_IS_READY) { 2932 mutex_exit(&zone_status_lock); 2933 mutex_exit(&zonehash_lock); 2934 return (set_errno(EINVAL)); 2935 } 2936 zone_status_set(zone, ZONE_IS_BOOTING); 2937 mutex_exit(&zone_status_lock); 2938 2939 zone_hold(zone); /* so we can use the zone_t later */ 2940 mutex_exit(&zonehash_lock); 2941 2942 if (zone_status_wait_sig(zone, ZONE_IS_RUNNING) == 0) { 2943 zone_rele(zone); 2944 return (set_errno(EINTR)); 2945 } 2946 2947 /* 2948 * Boot (starting init) might have failed, in which case the zone 2949 * will go to the SHUTTING_DOWN state; an appropriate errno will 2950 * be placed in zone->zone_boot_err, and so we return that. 2951 */ 2952 err = zone->zone_boot_err; 2953 zone_rele(zone); 2954 return (err ? set_errno(err) : 0); 2955 } 2956 2957 /* 2958 * Kills all user processes in the zone, waiting for them all to exit 2959 * before returning. 2960 */ 2961 static int 2962 zone_empty(zone_t *zone) 2963 { 2964 int waitstatus; 2965 2966 /* 2967 * We need to drop zonehash_lock before killing all 2968 * processes, otherwise we'll deadlock with zone_find_* 2969 * which can be called from the exit path. 2970 */ 2971 ASSERT(MUTEX_NOT_HELD(&zonehash_lock)); 2972 while ((waitstatus = zone_status_timedwait_sig(zone, lbolt + hz, 2973 ZONE_IS_EMPTY)) == -1) { 2974 killall(zone->zone_id); 2975 } 2976 /* 2977 * return EINTR if we were signaled 2978 */ 2979 if (waitstatus == 0) 2980 return (EINTR); 2981 return (0); 2982 } 2983 2984 /* 2985 * Systemcall to start the zone's halt sequence. By the time this 2986 * function successfully returns, all user processes and kernel threads 2987 * executing in it will have exited, ZSD shutdown callbacks executed, 2988 * and the zone status set to ZONE_IS_DOWN. 2989 * 2990 * It is possible that the call will interrupt itself if the caller is the 2991 * parent of any process running in the zone, and doesn't have SIGCHLD blocked. 2992 */ 2993 static int 2994 zone_shutdown(zoneid_t zoneid) 2995 { 2996 int error; 2997 zone_t *zone; 2998 zone_status_t status; 2999 3000 if (secpolicy_zone_config(CRED()) != 0) 3001 return (set_errno(EPERM)); 3002 if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID) 3003 return (set_errno(EINVAL)); 3004 3005 /* 3006 * Block mounts so that VFS_MOUNT() can get an accurate view of 3007 * the zone's status with regards to ZONE_IS_SHUTTING down. 3008 * 3009 * e.g. NFS can fail the mount if it determines that the zone 3010 * has already begun the shutdown sequence. 3011 */ 3012 if (block_mounts() == 0) 3013 return (set_errno(EINTR)); 3014 mutex_enter(&zonehash_lock); 3015 /* 3016 * Look for zone under hash lock to prevent races with other 3017 * calls to zone_shutdown and zone_destroy. 3018 */ 3019 if ((zone = zone_find_all_by_id(zoneid)) == NULL) { 3020 mutex_exit(&zonehash_lock); 3021 resume_mounts(); 3022 return (set_errno(EINVAL)); 3023 } 3024 mutex_enter(&zone_status_lock); 3025 status = zone_status_get(zone); 3026 /* 3027 * Fail if the zone isn't fully initialized yet. 3028 */ 3029 if (status < ZONE_IS_READY) { 3030 mutex_exit(&zone_status_lock); 3031 mutex_exit(&zonehash_lock); 3032 resume_mounts(); 3033 return (set_errno(EINVAL)); 3034 } 3035 /* 3036 * If conditions required for zone_shutdown() to return have been met, 3037 * return success. 3038 */ 3039 if (status >= ZONE_IS_DOWN) { 3040 mutex_exit(&zone_status_lock); 3041 mutex_exit(&zonehash_lock); 3042 resume_mounts(); 3043 return (0); 3044 } 3045 /* 3046 * If zone_shutdown() hasn't been called before, go through the motions. 3047 * If it has, there's nothing to do but wait for the kernel threads to 3048 * drain. 3049 */ 3050 if (status < ZONE_IS_EMPTY) { 3051 uint_t ntasks; 3052 3053 mutex_enter(&zone->zone_lock); 3054 if ((ntasks = zone->zone_ntasks) != 1) { 3055 /* 3056 * There's still stuff running. 3057 */ 3058 zone_status_set(zone, ZONE_IS_SHUTTING_DOWN); 3059 } 3060 mutex_exit(&zone->zone_lock); 3061 if (ntasks == 1) { 3062 /* 3063 * The only way to create another task is through 3064 * zone_enter(), which will block until we drop 3065 * zonehash_lock. The zone is empty. 3066 */ 3067 if (zone->zone_kthreads == NULL) { 3068 /* 3069 * Skip ahead to ZONE_IS_DOWN 3070 */ 3071 zone_status_set(zone, ZONE_IS_DOWN); 3072 } else { 3073 zone_status_set(zone, ZONE_IS_EMPTY); 3074 } 3075 } 3076 } 3077 zone_hold(zone); /* so we can use the zone_t later */ 3078 mutex_exit(&zone_status_lock); 3079 mutex_exit(&zonehash_lock); 3080 resume_mounts(); 3081 3082 if (error = zone_empty(zone)) { 3083 zone_rele(zone); 3084 return (set_errno(error)); 3085 } 3086 /* 3087 * After the zone status goes to ZONE_IS_DOWN this zone will no 3088 * longer be notified of changes to the pools configuration, so 3089 * in order to not end up with a stale pool pointer, we point 3090 * ourselves at the default pool and remove all resource 3091 * visibility. This is especially important as the zone_t may 3092 * languish on the deathrow for a very long time waiting for 3093 * cred's to drain out. 3094 * 3095 * This rebinding of the zone can happen multiple times 3096 * (presumably due to interrupted or parallel systemcalls) 3097 * without any adverse effects. 3098 */ 3099 if (pool_lock_intr() != 0) { 3100 zone_rele(zone); 3101 return (set_errno(EINTR)); 3102 } 3103 if (pool_state == POOL_ENABLED) { 3104 mutex_enter(&cpu_lock); 3105 zone_pool_set(zone, pool_default); 3106 /* 3107 * The zone no longer needs to be able to see any cpus. 3108 */ 3109 zone_pset_set(zone, ZONE_PS_INVAL); 3110 mutex_exit(&cpu_lock); 3111 } 3112 pool_unlock(); 3113 3114 /* 3115 * ZSD shutdown callbacks can be executed multiple times, hence 3116 * it is safe to not be holding any locks across this call. 3117 */ 3118 zone_zsd_callbacks(zone, ZSD_SHUTDOWN); 3119 3120 mutex_enter(&zone_status_lock); 3121 if (zone->zone_kthreads == NULL && zone_status_get(zone) < ZONE_IS_DOWN) 3122 zone_status_set(zone, ZONE_IS_DOWN); 3123 mutex_exit(&zone_status_lock); 3124 3125 /* 3126 * Wait for kernel threads to drain. 3127 */ 3128 if (!zone_status_wait_sig(zone, ZONE_IS_DOWN)) { 3129 zone_rele(zone); 3130 return (set_errno(EINTR)); 3131 } 3132 zone_rele(zone); 3133 return (0); 3134 } 3135 3136 /* 3137 * Systemcall entry point to finalize the zone halt process. The caller 3138 * must have already successfully callefd zone_shutdown(). 3139 * 3140 * Upon successful completion, the zone will have been fully destroyed: 3141 * zsched will have exited, destructor callbacks executed, and the zone 3142 * removed from the list of active zones. 3143 */ 3144 static int 3145 zone_destroy(zoneid_t zoneid) 3146 { 3147 uint64_t uniqid; 3148 zone_t *zone; 3149 zone_status_t status; 3150 3151 if (secpolicy_zone_config(CRED()) != 0) 3152 return (set_errno(EPERM)); 3153 if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID) 3154 return (set_errno(EINVAL)); 3155 3156 mutex_enter(&zonehash_lock); 3157 /* 3158 * Look for zone under hash lock to prevent races with other 3159 * calls to zone_destroy. 3160 */ 3161 if ((zone = zone_find_all_by_id(zoneid)) == NULL) { 3162 mutex_exit(&zonehash_lock); 3163 return (set_errno(EINVAL)); 3164 } 3165 3166 if (zone_mount_count(zone->zone_rootpath) != 0) { 3167 mutex_exit(&zonehash_lock); 3168 return (set_errno(EBUSY)); 3169 } 3170 mutex_enter(&zone_status_lock); 3171 status = zone_status_get(zone); 3172 if (status < ZONE_IS_DOWN) { 3173 mutex_exit(&zone_status_lock); 3174 mutex_exit(&zonehash_lock); 3175 return (set_errno(EBUSY)); 3176 } else if (status == ZONE_IS_DOWN) { 3177 zone_status_set(zone, ZONE_IS_DYING); /* Tell zsched to exit */ 3178 } 3179 mutex_exit(&zone_status_lock); 3180 zone_hold(zone); 3181 mutex_exit(&zonehash_lock); 3182 3183 /* 3184 * wait for zsched to exit 3185 */ 3186 zone_status_wait(zone, ZONE_IS_DEAD); 3187 zone_zsd_callbacks(zone, ZSD_DESTROY); 3188 uniqid = zone->zone_uniqid; 3189 zone_rele(zone); 3190 zone = NULL; /* potentially free'd */ 3191 3192 mutex_enter(&zonehash_lock); 3193 for (; /* ever */; ) { 3194 boolean_t unref; 3195 3196 if ((zone = zone_find_all_by_id(zoneid)) == NULL || 3197 zone->zone_uniqid != uniqid) { 3198 /* 3199 * The zone has gone away. Necessary conditions 3200 * are met, so we return success. 3201 */ 3202 mutex_exit(&zonehash_lock); 3203 return (0); 3204 } 3205 mutex_enter(&zone->zone_lock); 3206 unref = ZONE_IS_UNREF(zone); 3207 mutex_exit(&zone->zone_lock); 3208 if (unref) { 3209 /* 3210 * There is only one reference to the zone -- that 3211 * added when the zone was added to the hashtables -- 3212 * and things will remain this way until we drop 3213 * zonehash_lock... we can go ahead and cleanup the 3214 * zone. 3215 */ 3216 break; 3217 } 3218 3219 if (cv_wait_sig(&zone_destroy_cv, &zonehash_lock) == 0) { 3220 /* Signaled */ 3221 mutex_exit(&zonehash_lock); 3222 return (set_errno(EINTR)); 3223 } 3224 3225 } 3226 3227 /* 3228 * It is now safe to let the zone be recreated; remove it from the 3229 * lists. The memory will not be freed until the last cred 3230 * reference goes away. 3231 */ 3232 ASSERT(zonecount > 1); /* must be > 1; can't destroy global zone */ 3233 zonecount--; 3234 /* remove from active list and hash tables */ 3235 list_remove(&zone_active, zone); 3236 (void) mod_hash_destroy(zonehashbyname, 3237 (mod_hash_key_t)zone->zone_name); 3238 (void) mod_hash_destroy(zonehashbyid, 3239 (mod_hash_key_t)(uintptr_t)zone->zone_id); 3240 mutex_exit(&zonehash_lock); 3241 3242 /* 3243 * Release the root vnode; we're not using it anymore. Nor should any 3244 * other thread that might access it exist. 3245 */ 3246 if (zone->zone_rootvp != NULL) { 3247 VN_RELE(zone->zone_rootvp); 3248 zone->zone_rootvp = NULL; 3249 } 3250 3251 /* add to deathrow list */ 3252 mutex_enter(&zone_deathrow_lock); 3253 list_insert_tail(&zone_deathrow, zone); 3254 mutex_exit(&zone_deathrow_lock); 3255 3256 /* 3257 * Drop last reference (which was added by zsched()), this will 3258 * free the zone unless there are outstanding cred references. 3259 */ 3260 zone_rele(zone); 3261 return (0); 3262 } 3263 3264 /* 3265 * Systemcall entry point for zone_getattr(2). 3266 */ 3267 static ssize_t 3268 zone_getattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize) 3269 { 3270 size_t size; 3271 int error = 0, err; 3272 zone_t *zone; 3273 char *zonepath; 3274 zone_status_t zone_status; 3275 pid_t initpid; 3276 boolean_t global = (curproc->p_zone == global_zone); 3277 3278 mutex_enter(&zonehash_lock); 3279 if ((zone = zone_find_all_by_id(zoneid)) == NULL) { 3280 mutex_exit(&zonehash_lock); 3281 return (set_errno(EINVAL)); 3282 } 3283 zone_status = zone_status_get(zone); 3284 if (zone_status < ZONE_IS_READY) { 3285 mutex_exit(&zonehash_lock); 3286 return (set_errno(EINVAL)); 3287 } 3288 zone_hold(zone); 3289 mutex_exit(&zonehash_lock); 3290 3291 /* 3292 * If not in the global zone, don't show information about other zones. 3293 */ 3294 if (!global && curproc->p_zone != zone) { 3295 zone_rele(zone); 3296 return (set_errno(EINVAL)); 3297 } 3298 3299 switch (attr) { 3300 case ZONE_ATTR_ROOT: 3301 if (global) { 3302 /* 3303 * Copy the path to trim the trailing "/" (except for 3304 * the global zone). 3305 */ 3306 if (zone != global_zone) 3307 size = zone->zone_rootpathlen - 1; 3308 else 3309 size = zone->zone_rootpathlen; 3310 zonepath = kmem_alloc(size, KM_SLEEP); 3311 bcopy(zone->zone_rootpath, zonepath, size); 3312 zonepath[size - 1] = '\0'; 3313 } else { 3314 /* 3315 * Caller is not in the global zone, just return 3316 * faked-up path for current zone. 3317 */ 3318 zonepath = "/"; 3319 size = 2; 3320 } 3321 if (bufsize > size) 3322 bufsize = size; 3323 if (buf != NULL) { 3324 err = copyoutstr(zonepath, buf, bufsize, NULL); 3325 if (err != 0 && err != ENAMETOOLONG) 3326 error = EFAULT; 3327 } 3328 if (global) 3329 kmem_free(zonepath, size); 3330 break; 3331 3332 case ZONE_ATTR_NAME: 3333 size = strlen(zone->zone_name) + 1; 3334 if (bufsize > size) 3335 bufsize = size; 3336 if (buf != NULL) { 3337 err = copyoutstr(zone->zone_name, buf, bufsize, NULL); 3338 if (err != 0 && err != ENAMETOOLONG) 3339 error = EFAULT; 3340 } 3341 break; 3342 3343 case ZONE_ATTR_STATUS: 3344 /* 3345 * Since we're not holding zonehash_lock, the zone status 3346 * may be anything; leave it up to userland to sort it out. 3347 */ 3348 size = sizeof (zone_status); 3349 if (bufsize > size) 3350 bufsize = size; 3351 zone_status = zone_status_get(zone); 3352 if (buf != NULL && 3353 copyout(&zone_status, buf, bufsize) != 0) 3354 error = EFAULT; 3355 break; 3356 case ZONE_ATTR_PRIVSET: 3357 size = sizeof (priv_set_t); 3358 if (bufsize > size) 3359 bufsize = size; 3360 if (buf != NULL && 3361 copyout(zone->zone_privset, buf, bufsize) != 0) 3362 error = EFAULT; 3363 break; 3364 case ZONE_ATTR_UNIQID: 3365 size = sizeof (zone->zone_uniqid); 3366 if (bufsize > size) 3367 bufsize = size; 3368 if (buf != NULL && 3369 copyout(&zone->zone_uniqid, buf, bufsize) != 0) 3370 error = EFAULT; 3371 break; 3372 case ZONE_ATTR_POOLID: 3373 { 3374 pool_t *pool; 3375 poolid_t poolid; 3376 3377 if (pool_lock_intr() != 0) { 3378 error = EINTR; 3379 break; 3380 } 3381 pool = zone_pool_get(zone); 3382 poolid = pool->pool_id; 3383 pool_unlock(); 3384 size = sizeof (poolid); 3385 if (bufsize > size) 3386 bufsize = size; 3387 if (buf != NULL && copyout(&poolid, buf, size) != 0) 3388 error = EFAULT; 3389 } 3390 break; 3391 case ZONE_ATTR_INITPID: 3392 size = sizeof (initpid); 3393 if (bufsize > size) 3394 bufsize = size; 3395 initpid = zone->zone_proc_initpid; 3396 if (initpid == -1) { 3397 error = ESRCH; 3398 break; 3399 } 3400 if (buf != NULL && 3401 copyout(&initpid, buf, bufsize) != 0) 3402 error = EFAULT; 3403 break; 3404 default: 3405 error = EINVAL; 3406 } 3407 zone_rele(zone); 3408 3409 if (error) 3410 return (set_errno(error)); 3411 return ((ssize_t)size); 3412 } 3413 3414 /* 3415 * Return zero if the process has at least one vnode mapped in to its 3416 * address space which shouldn't be allowed to change zones. 3417 */ 3418 static int 3419 as_can_change_zones(void) 3420 { 3421 proc_t *pp = curproc; 3422 struct seg *seg; 3423 struct as *as = pp->p_as; 3424 vnode_t *vp; 3425 int allow = 1; 3426 3427 ASSERT(pp->p_as != &kas); 3428 AS_LOCK_ENTER(&as, &as->a_lock, RW_READER); 3429 for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { 3430 /* 3431 * if we can't get a backing vnode for this segment then skip 3432 * it. 3433 */ 3434 vp = NULL; 3435 if (SEGOP_GETVP(seg, seg->s_base, &vp) != 0 || vp == NULL) 3436 continue; 3437 if (!vn_can_change_zones(vp)) { /* bail on first match */ 3438 allow = 0; 3439 break; 3440 } 3441 } 3442 AS_LOCK_EXIT(&as, &as->a_lock); 3443 return (allow); 3444 } 3445 3446 /* 3447 * Systemcall entry point for zone_enter(). 3448 * 3449 * The current process is injected into said zone. In the process 3450 * it will change its project membership, privileges, rootdir/cwd, 3451 * zone-wide rctls, and pool association to match those of the zone. 3452 * 3453 * The first zone_enter() called while the zone is in the ZONE_IS_READY 3454 * state will transition it to ZONE_IS_RUNNING. Processes may only 3455 * enter a zone that is "ready" or "running". 3456 */ 3457 static int 3458 zone_enter(zoneid_t zoneid) 3459 { 3460 zone_t *zone; 3461 vnode_t *vp; 3462 proc_t *pp = curproc; 3463 contract_t *ct; 3464 cont_process_t *ctp; 3465 task_t *tk, *oldtk; 3466 kproject_t *zone_proj0; 3467 cred_t *cr, *newcr; 3468 pool_t *oldpool, *newpool; 3469 sess_t *sp; 3470 uid_t uid; 3471 zone_status_t status; 3472 int err = 0; 3473 rctl_entity_p_t e; 3474 3475 if (secpolicy_zone_config(CRED()) != 0) 3476 return (set_errno(EPERM)); 3477 if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID) 3478 return (set_errno(EINVAL)); 3479 3480 /* 3481 * Stop all lwps so we don't need to hold a lock to look at 3482 * curproc->p_zone. This needs to happen before we grab any 3483 * locks to avoid deadlock (another lwp in the process could 3484 * be waiting for the held lock). 3485 */ 3486 if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK)) 3487 return (set_errno(EINTR)); 3488 3489 /* 3490 * Make sure we're not changing zones with files open or mapped in 3491 * to our address space which shouldn't be changing zones. 3492 */ 3493 if (!files_can_change_zones()) { 3494 err = EBADF; 3495 goto out; 3496 } 3497 if (!as_can_change_zones()) { 3498 err = EFAULT; 3499 goto out; 3500 } 3501 3502 mutex_enter(&zonehash_lock); 3503 if (pp->p_zone != global_zone) { 3504 mutex_exit(&zonehash_lock); 3505 err = EINVAL; 3506 goto out; 3507 } 3508 3509 zone = zone_find_all_by_id(zoneid); 3510 if (zone == NULL) { 3511 mutex_exit(&zonehash_lock); 3512 err = EINVAL; 3513 goto out; 3514 } 3515 3516 /* 3517 * To prevent processes in a zone from holding contracts on 3518 * extrazonal resources, and to avoid process contract 3519 * memberships which span zones, contract holders and processes 3520 * which aren't the sole members of their encapsulating process 3521 * contracts are not allowed to zone_enter. 3522 */ 3523 ctp = pp->p_ct_process; 3524 ct = &ctp->conp_contract; 3525 mutex_enter(&ct->ct_lock); 3526 mutex_enter(&pp->p_lock); 3527 if ((avl_numnodes(&pp->p_ct_held) != 0) || (ctp->conp_nmembers != 1)) { 3528 mutex_exit(&pp->p_lock); 3529 mutex_exit(&ct->ct_lock); 3530 mutex_exit(&zonehash_lock); 3531 pool_unlock(); 3532 err = EINVAL; 3533 goto out; 3534 } 3535 3536 /* 3537 * Moreover, we don't allow processes whose encapsulating 3538 * process contracts have inherited extrazonal contracts. 3539 * While it would be easier to eliminate all process contracts 3540 * with inherited contracts, we need to be able to give a 3541 * restarted init (or other zone-penetrating process) its 3542 * predecessor's contracts. 3543 */ 3544 if (ctp->conp_ninherited != 0) { 3545 contract_t *next; 3546 for (next = list_head(&ctp->conp_inherited); next; 3547 next = list_next(&ctp->conp_inherited, next)) { 3548 if (contract_getzuniqid(next) != zone->zone_uniqid) { 3549 mutex_exit(&pp->p_lock); 3550 mutex_exit(&ct->ct_lock); 3551 mutex_exit(&zonehash_lock); 3552 pool_unlock(); 3553 err = EINVAL; 3554 goto out; 3555 } 3556 } 3557 } 3558 mutex_exit(&pp->p_lock); 3559 mutex_exit(&ct->ct_lock); 3560 3561 status = zone_status_get(zone); 3562 if (status < ZONE_IS_READY || status >= ZONE_IS_SHUTTING_DOWN) { 3563 /* 3564 * Can't join 3565 */ 3566 mutex_exit(&zonehash_lock); 3567 err = EINVAL; 3568 goto out; 3569 } 3570 3571 /* 3572 * Make sure new priv set is within the permitted set for caller 3573 */ 3574 if (!priv_issubset(zone->zone_privset, &CR_OPPRIV(CRED()))) { 3575 mutex_exit(&zonehash_lock); 3576 err = EPERM; 3577 goto out; 3578 } 3579 /* 3580 * We want to momentarily drop zonehash_lock while we optimistically 3581 * bind curproc to the pool it should be running in. This is safe 3582 * since the zone can't disappear (we have a hold on it). 3583 */ 3584 zone_hold(zone); 3585 mutex_exit(&zonehash_lock); 3586 3587 /* 3588 * Grab pool_lock to keep the pools configuration from changing 3589 * and to stop ourselves from getting rebound to another pool 3590 * until we join the zone. 3591 */ 3592 if (pool_lock_intr() != 0) { 3593 zone_rele(zone); 3594 err = EINTR; 3595 goto out; 3596 } 3597 ASSERT(secpolicy_pool(CRED()) == 0); 3598 /* 3599 * Bind ourselves to the pool currently associated with the zone. 3600 */ 3601 oldpool = curproc->p_pool; 3602 newpool = zone_pool_get(zone); 3603 if (pool_state == POOL_ENABLED && newpool != oldpool && 3604 (err = pool_do_bind(newpool, P_PID, P_MYID, 3605 POOL_BIND_ALL)) != 0) { 3606 pool_unlock(); 3607 zone_rele(zone); 3608 goto out; 3609 } 3610 3611 /* 3612 * Grab cpu_lock now; we'll need it later when we call 3613 * task_join(). 3614 */ 3615 mutex_enter(&cpu_lock); 3616 mutex_enter(&zonehash_lock); 3617 /* 3618 * Make sure the zone hasn't moved on since we dropped zonehash_lock. 3619 */ 3620 if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) { 3621 /* 3622 * Can't join anymore. 3623 */ 3624 mutex_exit(&zonehash_lock); 3625 mutex_exit(&cpu_lock); 3626 if (pool_state == POOL_ENABLED && 3627 newpool != oldpool) 3628 (void) pool_do_bind(oldpool, P_PID, P_MYID, 3629 POOL_BIND_ALL); 3630 pool_unlock(); 3631 zone_rele(zone); 3632 err = EINVAL; 3633 goto out; 3634 } 3635 3636 mutex_enter(&pp->p_lock); 3637 zone_proj0 = zone->zone_zsched->p_task->tk_proj; 3638 /* verify that we do not exceed and task or lwp limits */ 3639 mutex_enter(&zone->zone_nlwps_lock); 3640 /* add new lwps to zone and zone's proj0 */ 3641 zone_proj0->kpj_nlwps += pp->p_lwpcnt; 3642 zone->zone_nlwps += pp->p_lwpcnt; 3643 /* add 1 task to zone's proj0 */ 3644 zone_proj0->kpj_ntasks += 1; 3645 mutex_exit(&pp->p_lock); 3646 mutex_exit(&zone->zone_nlwps_lock); 3647 3648 /* remove lwps from proc's old zone and old project */ 3649 mutex_enter(&pp->p_zone->zone_nlwps_lock); 3650 pp->p_zone->zone_nlwps -= pp->p_lwpcnt; 3651 pp->p_task->tk_proj->kpj_nlwps -= pp->p_lwpcnt; 3652 mutex_exit(&pp->p_zone->zone_nlwps_lock); 3653 3654 /* 3655 * Joining the zone cannot fail from now on. 3656 * 3657 * This means that a lot of the following code can be commonized and 3658 * shared with zsched(). 3659 */ 3660 3661 /* 3662 * Reset the encapsulating process contract's zone. 3663 */ 3664 ASSERT(ct->ct_mzuniqid == GLOBAL_ZONEUNIQID); 3665 contract_setzuniqid(ct, zone->zone_uniqid); 3666 3667 /* 3668 * Create a new task and associate the process with the project keyed 3669 * by (projid,zoneid). 3670 * 3671 * We might as well be in project 0; the global zone's projid doesn't 3672 * make much sense in a zone anyhow. 3673 * 3674 * This also increments zone_ntasks, and returns with p_lock held. 3675 */ 3676 tk = task_create(0, zone); 3677 oldtk = task_join(tk, 0); 3678 mutex_exit(&cpu_lock); 3679 3680 pp->p_flag |= SZONETOP; 3681 pp->p_zone = zone; 3682 3683 /* 3684 * call RCTLOP_SET functions on this proc 3685 */ 3686 e.rcep_p.zone = zone; 3687 e.rcep_t = RCENTITY_ZONE; 3688 (void) rctl_set_dup(NULL, NULL, pp, &e, zone->zone_rctls, NULL, 3689 RCD_CALLBACK); 3690 mutex_exit(&pp->p_lock); 3691 3692 /* 3693 * We don't need to hold any of zsched's locks here; not only do we know 3694 * the process and zone aren't going away, we know its session isn't 3695 * changing either. 3696 * 3697 * By joining zsched's session here, we mimic the behavior in the 3698 * global zone of init's sid being the pid of sched. We extend this 3699 * to all zlogin-like zone_enter()'ing processes as well. 3700 */ 3701 mutex_enter(&pidlock); 3702 sp = zone->zone_zsched->p_sessp; 3703 SESS_HOLD(sp); 3704 mutex_enter(&pp->p_lock); 3705 pgexit(pp); 3706 SESS_RELE(pp->p_sessp); 3707 pp->p_sessp = sp; 3708 pgjoin(pp, zone->zone_zsched->p_pidp); 3709 mutex_exit(&pp->p_lock); 3710 mutex_exit(&pidlock); 3711 3712 mutex_exit(&zonehash_lock); 3713 /* 3714 * We're firmly in the zone; let pools progress. 3715 */ 3716 pool_unlock(); 3717 task_rele(oldtk); 3718 /* 3719 * We don't need to retain a hold on the zone since we already 3720 * incremented zone_ntasks, so the zone isn't going anywhere. 3721 */ 3722 zone_rele(zone); 3723 3724 /* 3725 * Chroot 3726 */ 3727 vp = zone->zone_rootvp; 3728 zone_chdir(vp, &PTOU(pp)->u_cdir, pp); 3729 zone_chdir(vp, &PTOU(pp)->u_rdir, pp); 3730 3731 /* 3732 * Change process credentials 3733 */ 3734 newcr = cralloc(); 3735 mutex_enter(&pp->p_crlock); 3736 cr = pp->p_cred; 3737 crcopy_to(cr, newcr); 3738 crsetzone(newcr, zone); 3739 pp->p_cred = newcr; 3740 3741 /* 3742 * Restrict all process privilege sets to zone limit 3743 */ 3744 priv_intersect(zone->zone_privset, &CR_PPRIV(newcr)); 3745 priv_intersect(zone->zone_privset, &CR_EPRIV(newcr)); 3746 priv_intersect(zone->zone_privset, &CR_IPRIV(newcr)); 3747 priv_intersect(zone->zone_privset, &CR_LPRIV(newcr)); 3748 mutex_exit(&pp->p_crlock); 3749 crset(pp, newcr); 3750 3751 /* 3752 * Adjust upcount to reflect zone entry. 3753 */ 3754 uid = crgetruid(newcr); 3755 mutex_enter(&pidlock); 3756 upcount_dec(uid, GLOBAL_ZONEID); 3757 upcount_inc(uid, zoneid); 3758 mutex_exit(&pidlock); 3759 3760 /* 3761 * Set up core file path and content. 3762 */ 3763 set_core_defaults(); 3764 3765 out: 3766 /* 3767 * Let the other lwps continue. 3768 */ 3769 mutex_enter(&pp->p_lock); 3770 if (curthread != pp->p_agenttp) 3771 continuelwps(pp); 3772 mutex_exit(&pp->p_lock); 3773 3774 return (err != 0 ? set_errno(err) : 0); 3775 } 3776 3777 /* 3778 * Systemcall entry point for zone_list(2). 3779 * 3780 * Processes running in a (non-global) zone only see themselves. 3781 */ 3782 static int 3783 zone_list(zoneid_t *zoneidlist, uint_t *numzones) 3784 { 3785 zoneid_t *zoneids; 3786 zone_t *zone; 3787 uint_t user_nzones, real_nzones; 3788 int error = 0; 3789 uint_t i; 3790 3791 if (copyin(numzones, &user_nzones, sizeof (uint_t)) != 0) 3792 return (set_errno(EFAULT)); 3793 3794 if (curproc->p_zone != global_zone) { 3795 /* just return current zone */ 3796 real_nzones = 1; 3797 zoneids = kmem_alloc(sizeof (zoneid_t), KM_SLEEP); 3798 zoneids[0] = curproc->p_zone->zone_id; 3799 } else { 3800 mutex_enter(&zonehash_lock); 3801 real_nzones = zonecount; 3802 if (real_nzones) { 3803 zoneids = kmem_alloc(real_nzones * sizeof (zoneid_t), 3804 KM_SLEEP); 3805 i = 0; 3806 for (zone = list_head(&zone_active); zone != NULL; 3807 zone = list_next(&zone_active, zone)) 3808 zoneids[i++] = zone->zone_id; 3809 ASSERT(i == real_nzones); 3810 } 3811 mutex_exit(&zonehash_lock); 3812 } 3813 3814 if (user_nzones > real_nzones) 3815 user_nzones = real_nzones; 3816 3817 if (copyout(&real_nzones, numzones, sizeof (uint_t)) != 0) 3818 error = EFAULT; 3819 else if (zoneidlist != NULL && user_nzones != 0) { 3820 if (copyout(zoneids, zoneidlist, 3821 user_nzones * sizeof (zoneid_t)) != 0) 3822 error = EFAULT; 3823 } 3824 3825 if (real_nzones) 3826 kmem_free(zoneids, real_nzones * sizeof (zoneid_t)); 3827 3828 if (error) 3829 return (set_errno(error)); 3830 else 3831 return (0); 3832 } 3833 3834 /* 3835 * Systemcall entry point for zone_lookup(2). 3836 * 3837 * Non-global zones are only able to see themselves. 3838 */ 3839 static zoneid_t 3840 zone_lookup(const char *zone_name) 3841 { 3842 char *kname; 3843 zone_t *zone; 3844 zoneid_t zoneid; 3845 int err; 3846 3847 if (zone_name == NULL) { 3848 /* return caller's zone id */ 3849 return (getzoneid()); 3850 } 3851 3852 kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP); 3853 if ((err = copyinstr(zone_name, kname, ZONENAME_MAX, NULL)) != 0) { 3854 kmem_free(kname, ZONENAME_MAX); 3855 return (set_errno(err)); 3856 } 3857 3858 mutex_enter(&zonehash_lock); 3859 zone = zone_find_all_by_name(kname); 3860 kmem_free(kname, ZONENAME_MAX); 3861 if (zone == NULL || zone_status_get(zone) < ZONE_IS_READY || 3862 (curproc->p_zone != global_zone && curproc->p_zone != zone)) { 3863 /* in non-global zone, can only lookup own name */ 3864 mutex_exit(&zonehash_lock); 3865 return (set_errno(EINVAL)); 3866 } 3867 zoneid = zone->zone_id; 3868 mutex_exit(&zonehash_lock); 3869 return (zoneid); 3870 } 3871 3872 static int 3873 zone_version(int *version_arg) 3874 { 3875 int version = ZONE_SYSCALL_API_VERSION; 3876 3877 if (copyout(&version, version_arg, sizeof (int)) != 0) 3878 return (set_errno(EFAULT)); 3879 return (0); 3880 } 3881 3882 /* ARGSUSED */ 3883 long 3884 zone(int cmd, void *arg1, void *arg2, void *arg3, void *arg4) 3885 { 3886 zone_def zs; 3887 3888 switch (cmd) { 3889 case ZONE_CREATE: 3890 if (get_udatamodel() == DATAMODEL_NATIVE) { 3891 if (copyin(arg1, &zs, sizeof (zone_def))) { 3892 return (set_errno(EFAULT)); 3893 } 3894 } else { 3895 #ifdef _SYSCALL32_IMPL 3896 zone_def32 zs32; 3897 3898 if (copyin(arg1, &zs32, sizeof (zone_def32))) { 3899 return (set_errno(EFAULT)); 3900 } 3901 zs.zone_name = 3902 (const char *)(unsigned long)zs32.zone_name; 3903 zs.zone_root = 3904 (const char *)(unsigned long)zs32.zone_root; 3905 zs.zone_privs = 3906 (const struct priv_set *) 3907 (unsigned long)zs32.zone_privs; 3908 zs.rctlbuf = (caddr_t)(unsigned long)zs32.rctlbuf; 3909 zs.rctlbufsz = zs32.rctlbufsz; 3910 zs.zfsbuf = (caddr_t)(unsigned long)zs32.zfsbuf; 3911 zs.zfsbufsz = zs32.zfsbufsz; 3912 zs.extended_error = 3913 (int *)(unsigned long)zs32.extended_error; 3914 #else 3915 panic("get_udatamodel() returned bogus result\n"); 3916 #endif 3917 } 3918 3919 return (zone_create(zs.zone_name, zs.zone_root, 3920 zs.zone_privs, zs.zone_privssz, 3921 (caddr_t)zs.rctlbuf, zs.rctlbufsz, 3922 (caddr_t)zs.zfsbuf, zs.zfsbufsz, 3923 zs.extended_error)); 3924 case ZONE_BOOT: 3925 return (zone_boot((zoneid_t)(uintptr_t)arg1, 3926 (const char *)arg2)); 3927 case ZONE_DESTROY: 3928 return (zone_destroy((zoneid_t)(uintptr_t)arg1)); 3929 case ZONE_GETATTR: 3930 return (zone_getattr((zoneid_t)(uintptr_t)arg1, 3931 (int)(uintptr_t)arg2, arg3, (size_t)arg4)); 3932 case ZONE_ENTER: 3933 return (zone_enter((zoneid_t)(uintptr_t)arg1)); 3934 case ZONE_LIST: 3935 return (zone_list((zoneid_t *)arg1, (uint_t *)arg2)); 3936 case ZONE_SHUTDOWN: 3937 return (zone_shutdown((zoneid_t)(uintptr_t)arg1)); 3938 case ZONE_LOOKUP: 3939 return (zone_lookup((const char *)arg1)); 3940 case ZONE_VERSION: 3941 return (zone_version((int *)arg1)); 3942 default: 3943 return (set_errno(EINVAL)); 3944 } 3945 } 3946 3947 struct zarg { 3948 zone_t *zone; 3949 zone_cmd_arg_t arg; 3950 }; 3951 3952 static int 3953 zone_lookup_door(const char *zone_name, door_handle_t *doorp) 3954 { 3955 char *buf; 3956 size_t buflen; 3957 int error; 3958 3959 buflen = sizeof (ZONE_DOOR_PATH) + strlen(zone_name); 3960 buf = kmem_alloc(buflen, KM_SLEEP); 3961 (void) snprintf(buf, buflen, ZONE_DOOR_PATH, zone_name); 3962 error = door_ki_open(buf, doorp); 3963 kmem_free(buf, buflen); 3964 return (error); 3965 } 3966 3967 static void 3968 zone_release_door(door_handle_t *doorp) 3969 { 3970 door_ki_rele(*doorp); 3971 *doorp = NULL; 3972 } 3973 3974 static void 3975 zone_ki_call_zoneadmd(struct zarg *zargp) 3976 { 3977 door_handle_t door = NULL; 3978 door_arg_t darg, save_arg; 3979 char *zone_name; 3980 size_t zone_namelen; 3981 zoneid_t zoneid; 3982 zone_t *zone; 3983 zone_cmd_arg_t arg; 3984 uint64_t uniqid; 3985 size_t size; 3986 int error; 3987 int retry; 3988 3989 zone = zargp->zone; 3990 arg = zargp->arg; 3991 kmem_free(zargp, sizeof (*zargp)); 3992 3993 zone_namelen = strlen(zone->zone_name) + 1; 3994 zone_name = kmem_alloc(zone_namelen, KM_SLEEP); 3995 bcopy(zone->zone_name, zone_name, zone_namelen); 3996 zoneid = zone->zone_id; 3997 uniqid = zone->zone_uniqid; 3998 /* 3999 * zoneadmd may be down, but at least we can empty out the zone. 4000 * We can ignore the return value of zone_empty() since we're called 4001 * from a kernel thread and know we won't be delivered any signals. 4002 */ 4003 ASSERT(curproc == &p0); 4004 (void) zone_empty(zone); 4005 ASSERT(zone_status_get(zone) >= ZONE_IS_EMPTY); 4006 zone_rele(zone); 4007 4008 size = sizeof (arg); 4009 darg.rbuf = (char *)&arg; 4010 darg.data_ptr = (char *)&arg; 4011 darg.rsize = size; 4012 darg.data_size = size; 4013 darg.desc_ptr = NULL; 4014 darg.desc_num = 0; 4015 4016 save_arg = darg; 4017 /* 4018 * Since we're not holding a reference to the zone, any number of 4019 * things can go wrong, including the zone disappearing before we get a 4020 * chance to talk to zoneadmd. 4021 */ 4022 for (retry = 0; /* forever */; retry++) { 4023 if (door == NULL && 4024 (error = zone_lookup_door(zone_name, &door)) != 0) { 4025 goto next; 4026 } 4027 ASSERT(door != NULL); 4028 4029 if ((error = door_ki_upcall(door, &darg)) == 0) { 4030 break; 4031 } 4032 switch (error) { 4033 case EINTR: 4034 /* FALLTHROUGH */ 4035 case EAGAIN: /* process may be forking */ 4036 /* 4037 * Back off for a bit 4038 */ 4039 break; 4040 case EBADF: 4041 zone_release_door(&door); 4042 if (zone_lookup_door(zone_name, &door) != 0) { 4043 /* 4044 * zoneadmd may be dead, but it may come back to 4045 * life later. 4046 */ 4047 break; 4048 } 4049 break; 4050 default: 4051 cmn_err(CE_WARN, 4052 "zone_ki_call_zoneadmd: door_ki_upcall error %d\n", 4053 error); 4054 goto out; 4055 } 4056 next: 4057 /* 4058 * If this isn't the same zone_t that we originally had in mind, 4059 * then this is the same as if two kadmin requests come in at 4060 * the same time: the first one wins. This means we lose, so we 4061 * bail. 4062 */ 4063 if ((zone = zone_find_by_id(zoneid)) == NULL) { 4064 /* 4065 * Problem is solved. 4066 */ 4067 break; 4068 } 4069 if (zone->zone_uniqid != uniqid) { 4070 /* 4071 * zoneid recycled 4072 */ 4073 zone_rele(zone); 4074 break; 4075 } 4076 /* 4077 * We could zone_status_timedwait(), but there doesn't seem to 4078 * be much point in doing that (plus, it would mean that 4079 * zone_free() isn't called until this thread exits). 4080 */ 4081 zone_rele(zone); 4082 delay(hz); 4083 darg = save_arg; 4084 } 4085 out: 4086 if (door != NULL) { 4087 zone_release_door(&door); 4088 } 4089 kmem_free(zone_name, zone_namelen); 4090 thread_exit(); 4091 } 4092 4093 /* 4094 * Entry point for uadmin() to tell the zone to go away or reboot. The caller 4095 * is a process in the zone to be modified. 4096 * 4097 * In order to shutdown the zone, we will hand off control to zoneadmd 4098 * (running in the global zone) via a door. We do a half-hearted job at 4099 * killing all processes in the zone, create a kernel thread to contact 4100 * zoneadmd, and make note of the "uniqid" of the zone. The uniqid is 4101 * a form of generation number used to let zoneadmd (as well as 4102 * zone_destroy()) know exactly which zone they're re talking about. 4103 */ 4104 int 4105 zone_uadmin(int cmd, int fcn, cred_t *credp) 4106 { 4107 struct zarg *zargp; 4108 zone_cmd_t zcmd; 4109 zone_t *zone; 4110 4111 zone = curproc->p_zone; 4112 ASSERT(getzoneid() != GLOBAL_ZONEID); 4113 4114 switch (cmd) { 4115 case A_SHUTDOWN: 4116 switch (fcn) { 4117 case AD_HALT: 4118 case AD_POWEROFF: 4119 zcmd = Z_HALT; 4120 break; 4121 case AD_BOOT: 4122 zcmd = Z_REBOOT; 4123 break; 4124 case AD_IBOOT: 4125 case AD_SBOOT: 4126 case AD_SIBOOT: 4127 case AD_NOSYNC: 4128 return (ENOTSUP); 4129 default: 4130 return (EINVAL); 4131 } 4132 break; 4133 case A_REBOOT: 4134 zcmd = Z_REBOOT; 4135 break; 4136 case A_FTRACE: 4137 case A_REMOUNT: 4138 case A_FREEZE: 4139 case A_DUMP: 4140 return (ENOTSUP); 4141 default: 4142 ASSERT(cmd != A_SWAPCTL); /* handled by uadmin() */ 4143 return (EINVAL); 4144 } 4145 4146 if (secpolicy_zone_admin(credp, B_FALSE)) 4147 return (EPERM); 4148 mutex_enter(&zone_status_lock); 4149 /* 4150 * zone_status can't be ZONE_IS_EMPTY or higher since curproc 4151 * is in the zone. 4152 */ 4153 ASSERT(zone_status_get(zone) < ZONE_IS_EMPTY); 4154 if (zone_status_get(zone) > ZONE_IS_RUNNING) { 4155 /* 4156 * This zone is already on its way down. 4157 */ 4158 mutex_exit(&zone_status_lock); 4159 return (0); 4160 } 4161 /* 4162 * Prevent future zone_enter()s 4163 */ 4164 zone_status_set(zone, ZONE_IS_SHUTTING_DOWN); 4165 mutex_exit(&zone_status_lock); 4166 4167 /* 4168 * Kill everyone now and call zoneadmd later. 4169 * zone_ki_call_zoneadmd() will do a more thorough job of this 4170 * later. 4171 */ 4172 killall(zone->zone_id); 4173 /* 4174 * Now, create the thread to contact zoneadmd and do the rest of the 4175 * work. This thread can't be created in our zone otherwise 4176 * zone_destroy() would deadlock. 4177 */ 4178 zargp = kmem_alloc(sizeof (*zargp), KM_SLEEP); 4179 zargp->arg.cmd = zcmd; 4180 zargp->arg.uniqid = zone->zone_uniqid; 4181 (void) strcpy(zargp->arg.locale, "C"); 4182 zone_hold(zargp->zone = zone); 4183 4184 (void) thread_create(NULL, 0, zone_ki_call_zoneadmd, zargp, 0, &p0, 4185 TS_RUN, minclsyspri); 4186 exit(CLD_EXITED, 0); 4187 4188 return (EINVAL); 4189 } 4190 4191 /* 4192 * Entry point so kadmin(A_SHUTDOWN, ...) can set the global zone's 4193 * status to ZONE_IS_SHUTTING_DOWN. 4194 */ 4195 void 4196 zone_shutdown_global(void) 4197 { 4198 ASSERT(curproc->p_zone == global_zone); 4199 4200 mutex_enter(&zone_status_lock); 4201 ASSERT(zone_status_get(global_zone) == ZONE_IS_RUNNING); 4202 zone_status_set(global_zone, ZONE_IS_SHUTTING_DOWN); 4203 mutex_exit(&zone_status_lock); 4204 } 4205 4206 /* 4207 * Returns true if the named dataset is visible in the current zone. 4208 * The 'write' parameter is set to 1 if the dataset is also writable. 4209 */ 4210 int 4211 zone_dataset_visible(const char *dataset, int *write) 4212 { 4213 zone_dataset_t *zd; 4214 size_t len; 4215 zone_t *zone = curproc->p_zone; 4216 4217 if (dataset[0] == '\0') 4218 return (0); 4219 4220 /* 4221 * Walk the list once, looking for datasets which match exactly, or 4222 * specify a dataset underneath an exported dataset. If found, return 4223 * true and note that it is writable. 4224 */ 4225 for (zd = list_head(&zone->zone_datasets); zd != NULL; 4226 zd = list_next(&zone->zone_datasets, zd)) { 4227 4228 len = strlen(zd->zd_dataset); 4229 if (strlen(dataset) >= len && 4230 bcmp(dataset, zd->zd_dataset, len) == 0 && 4231 (dataset[len] == '\0' || dataset[len] == '/' || 4232 dataset[len] == '@')) { 4233 if (write) 4234 *write = 1; 4235 return (1); 4236 } 4237 } 4238 4239 /* 4240 * Walk the list a second time, searching for datasets which are parents 4241 * of exported datasets. These should be visible, but read-only. 4242 * 4243 * Note that we also have to support forms such as 'pool/dataset/', with 4244 * a trailing slash. 4245 */ 4246 for (zd = list_head(&zone->zone_datasets); zd != NULL; 4247 zd = list_next(&zone->zone_datasets, zd)) { 4248 4249 len = strlen(dataset); 4250 if (dataset[len - 1] == '/') 4251 len--; /* Ignore trailing slash */ 4252 if (len < strlen(zd->zd_dataset) && 4253 bcmp(dataset, zd->zd_dataset, len) == 0 && 4254 zd->zd_dataset[len] == '/') { 4255 if (write) 4256 *write = 0; 4257 return (1); 4258 } 4259 } 4260 4261 return (0); 4262 } 4263