1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * graph.c - master restarter graph engine 30 * 31 * The graph engine keeps a dependency graph of all service instances on the 32 * system, as recorded in the repository. It decides when services should 33 * be brought up or down based on service states and dependencies and sends 34 * commands to restarters to effect any changes. It also executes 35 * administrator commands sent by svcadm via the repository. 36 * 37 * The graph is stored in uu_list_t *dgraph and its vertices are 38 * graph_vertex_t's, each of which has a name and an integer id unique to 39 * its name (see dict.c). A vertex's type attribute designates the type 40 * of object it represents: GVT_INST for service instances, GVT_SVC for 41 * service objects (since service instances may depend on another service, 42 * rather than service instance), GVT_FILE for files (which services may 43 * depend on), and GVT_GROUP for dependencies on multiple objects. GVT_GROUP 44 * vertices are necessary because dependency lists may have particular 45 * grouping types (require any, require all, optional, or exclude) and 46 * event-propagation characteristics. 47 * 48 * The initial graph is built by libscf_populate_graph() invoking 49 * dgraph_add_instance() for each instance in the repository. The function 50 * adds a GVT_SVC vertex for the service if one does not already exist, adds 51 * a GVT_INST vertex named by the FMRI of the instance, and sets up the edges. 52 * The resulting web of vertices & edges associated with an instance's vertex 53 * includes 54 * 55 * - an edge from the GVT_SVC vertex for the instance's service 56 * 57 * - an edge to the GVT_INST vertex of the instance's resarter, if its 58 * restarter is not svc.startd 59 * 60 * - edges from other GVT_INST vertices if the instance is a restarter 61 * 62 * - for each dependency property group in the instance's "running" 63 * snapshot, an edge to a GVT_GROUP vertex named by the FMRI of the 64 * instance and the name of the property group 65 * 66 * - for each value of the "entities" property in each dependency property 67 * group, an edge from the corresponding GVT_GROUP vertex to a 68 * GVT_INST, GVT_SVC, or GVT_FILE vertex 69 * 70 * - edges from GVT_GROUP vertices for each dependent instance 71 * 72 * After the edges are set up the vertex's GV_CONFIGURED flag is set. If 73 * there are problems, or if a service is mentioned in a dependency but does 74 * not exist in the repository, the GV_CONFIGURED flag will be clear. 75 * 76 * The graph and all of its vertices are protected by the dgraph_lock mutex. 77 * See restarter.c for more information. 78 * 79 * The properties of an instance fall into two classes: immediate and 80 * snapshotted. Immediate properties should have an immediate effect when 81 * changed. Snapshotted properties should be read from a snapshot, so they 82 * only change when the snapshot changes. The immediate properties used by 83 * the graph engine are general/enabled, general/restarter, and the properties 84 * in the restarter_actions property group. Since they are immediate, they 85 * are not read out of a snapshot. The snapshotted properties used by the 86 * graph engine are those in the property groups with type "dependency" and 87 * are read out of the "running" snapshot. The "running" snapshot is created 88 * by the the graph engine as soon as possible, and it is updated, along with 89 * in-core copies of the data (dependency information for the graph engine) on 90 * receipt of the refresh command from svcadm. In addition, the graph engine 91 * updates the "start" snapshot from the "running" snapshot whenever a service 92 * comes online. 93 */ 94 95 #include <sys/uadmin.h> 96 #include <sys/wait.h> 97 98 #include <assert.h> 99 #include <errno.h> 100 #include <fcntl.h> 101 #include <libscf.h> 102 #include <libscf_priv.h> 103 #include <libuutil.h> 104 #include <locale.h> 105 #include <poll.h> 106 #include <pthread.h> 107 #include <signal.h> 108 #include <stddef.h> 109 #include <stdio.h> 110 #include <stdlib.h> 111 #include <string.h> 112 #include <strings.h> 113 #include <sys/statvfs.h> 114 #include <sys/uadmin.h> 115 #include <zone.h> 116 117 #include "startd.h" 118 #include "protocol.h" 119 120 121 #define MILESTONE_NONE ((graph_vertex_t *)1) 122 123 #define CONSOLE_LOGIN_FMRI "svc:/system/console-login:default" 124 #define FS_MINIMAL_FMRI "svc:/system/filesystem/minimal:default" 125 126 static uu_list_pool_t *graph_edge_pool, *graph_vertex_pool; 127 static uu_list_t *dgraph; 128 static pthread_mutex_t dgraph_lock; 129 130 /* 131 * milestone indicates the current subgraph. When NULL, it is the entire 132 * graph. When MILESTONE_NONE, it is the empty graph. Otherwise, it is all 133 * services on which the target vertex depends. 134 */ 135 static graph_vertex_t *milestone = NULL; 136 static boolean_t initial_milestone_set = B_FALSE; 137 static pthread_cond_t initial_milestone_cv = PTHREAD_COND_INITIALIZER; 138 139 /* protected by dgraph_lock */ 140 static boolean_t sulogin_thread_running = B_FALSE; 141 static boolean_t sulogin_running = B_FALSE; 142 static boolean_t console_login_ready = B_FALSE; 143 144 /* Number of services to come down to complete milestone transition. */ 145 static uint_t non_subgraph_svcs; 146 147 /* 148 * These variables indicate what should be done when we reach the milestone 149 * target milestone, i.e., when non_subgraph_svcs == 0. They are acted upon in 150 * dgraph_set_instance_state(). 151 */ 152 static int halting = -1; 153 static boolean_t go_single_user_mode = B_FALSE; 154 static boolean_t go_to_level1 = B_FALSE; 155 156 /* 157 * This tracks the legacy runlevel to ensure we signal init and manage 158 * utmpx entries correctly. 159 */ 160 static char current_runlevel = '\0'; 161 162 /* Number of single user threads currently running */ 163 static pthread_mutex_t single_user_thread_lock; 164 static int single_user_thread_count = 0; 165 166 /* Statistics for dependency cycle-checking */ 167 static u_longlong_t dep_inserts = 0; 168 static u_longlong_t dep_cycle_ns = 0; 169 static u_longlong_t dep_insert_ns = 0; 170 171 172 static const char * const emsg_invalid_restarter = 173 "Restarter FMRI for %s is invalid. Transitioning to maintenance.\n"; 174 static const char * const console_login_fmri = CONSOLE_LOGIN_FMRI; 175 static const char * const single_user_fmri = SCF_MILESTONE_SINGLE_USER; 176 static const char * const multi_user_fmri = SCF_MILESTONE_MULTI_USER; 177 static const char * const multi_user_svr_fmri = SCF_MILESTONE_MULTI_USER_SERVER; 178 179 180 /* 181 * These services define the system being "up". If none of them can come 182 * online, then we will run sulogin on the console. Note that the install ones 183 * are for the miniroot and when installing CDs after the first. can_come_up() 184 * does the decision making, and an sulogin_thread() runs sulogin, which can be 185 * started by dgraph_set_instance_state() or single_user_thread(). 186 * 187 * NOTE: can_come_up() relies on SCF_MILESTONE_SINGLE_USER being the first 188 * entry, which is only used when booting_to_single_user (boot -s) is set. 189 * This is because when doing a "boot -s", sulogin is started from specials.c 190 * after milestone/single-user comes online, for backwards compatibility. 191 * In this case, SCF_MILESTONE_SINGLE_USER needs to be part of up_svcs 192 * to ensure sulogin will be spawned if milestone/single-user cannot be reached. 193 */ 194 static const char * const up_svcs[] = { 195 SCF_MILESTONE_SINGLE_USER, 196 CONSOLE_LOGIN_FMRI, 197 "svc:/system/install-setup:default", 198 "svc:/system/install:default", 199 NULL 200 }; 201 202 /* This array must have an element for each non-NULL element of up_svcs[]. */ 203 static graph_vertex_t *up_svcs_p[] = { NULL, NULL, NULL, NULL }; 204 205 /* These are for seed repository magic. See can_come_up(). */ 206 static const char * const manifest_import = 207 "svc:/system/manifest-import:default"; 208 static graph_vertex_t *manifest_import_p = NULL; 209 210 211 static char target_milestone_as_runlevel(void); 212 static void graph_runlevel_changed(char rl, int online); 213 static int dgraph_set_milestone(const char *, scf_handle_t *, boolean_t); 214 static void vertex_send_event(graph_vertex_t *v, restarter_event_type_t e); 215 static boolean_t should_be_in_subgraph(graph_vertex_t *v); 216 217 /* 218 * graph_vertex_compare() 219 * This function can compare either int *id or * graph_vertex_t *gv 220 * values, as the vertex id is always the first element of a 221 * graph_vertex structure. 222 */ 223 /* ARGSUSED */ 224 static int 225 graph_vertex_compare(const void *lc_arg, const void *rc_arg, void *private) 226 { 227 int lc_id = ((const graph_vertex_t *)lc_arg)->gv_id; 228 int rc_id = *(int *)rc_arg; 229 230 if (lc_id > rc_id) 231 return (1); 232 if (lc_id < rc_id) 233 return (-1); 234 return (0); 235 } 236 237 void 238 graph_init() 239 { 240 graph_edge_pool = startd_list_pool_create("graph_edges", 241 sizeof (graph_edge_t), offsetof(graph_edge_t, ge_link), NULL, 242 UU_LIST_POOL_DEBUG); 243 assert(graph_edge_pool != NULL); 244 245 graph_vertex_pool = startd_list_pool_create("graph_vertices", 246 sizeof (graph_vertex_t), offsetof(graph_vertex_t, gv_link), 247 graph_vertex_compare, UU_LIST_POOL_DEBUG); 248 assert(graph_vertex_pool != NULL); 249 250 (void) pthread_mutex_init(&dgraph_lock, &mutex_attrs); 251 (void) pthread_mutex_init(&single_user_thread_lock, &mutex_attrs); 252 dgraph = startd_list_create(graph_vertex_pool, NULL, UU_LIST_SORTED); 253 assert(dgraph != NULL); 254 255 if (!st->st_initial) 256 current_runlevel = utmpx_get_runlevel(); 257 258 log_framework(LOG_DEBUG, "Initialized graph\n"); 259 } 260 261 static graph_vertex_t * 262 vertex_get_by_name(const char *name) 263 { 264 int id; 265 266 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 267 268 id = dict_lookup_byname(name); 269 if (id == -1) 270 return (NULL); 271 272 return (uu_list_find(dgraph, &id, NULL, NULL)); 273 } 274 275 static graph_vertex_t * 276 vertex_get_by_id(int id) 277 { 278 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 279 280 if (id == -1) 281 return (NULL); 282 283 return (uu_list_find(dgraph, &id, NULL, NULL)); 284 } 285 286 /* 287 * Creates a new vertex with the given name, adds it to the graph, and returns 288 * a pointer to it. The graph lock must be held by this thread on entry. 289 */ 290 static graph_vertex_t * 291 graph_add_vertex(const char *name) 292 { 293 int id; 294 graph_vertex_t *v; 295 void *p; 296 uu_list_index_t idx; 297 298 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 299 300 id = dict_insert(name); 301 302 v = startd_zalloc(sizeof (*v)); 303 304 v->gv_id = id; 305 306 v->gv_name = startd_alloc(strlen(name) + 1); 307 (void) strcpy(v->gv_name, name); 308 309 v->gv_dependencies = startd_list_create(graph_edge_pool, v, 0); 310 v->gv_dependents = startd_list_create(graph_edge_pool, v, 0); 311 312 p = uu_list_find(dgraph, &id, NULL, &idx); 313 assert(p == NULL); 314 315 uu_list_node_init(v, &v->gv_link, graph_vertex_pool); 316 uu_list_insert(dgraph, v, idx); 317 318 return (v); 319 } 320 321 /* 322 * Removes v from the graph and frees it. The graph should be locked by this 323 * thread, and v should have no edges associated with it. 324 */ 325 static void 326 graph_remove_vertex(graph_vertex_t *v) 327 { 328 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 329 330 assert(uu_list_numnodes(v->gv_dependencies) == 0); 331 assert(uu_list_numnodes(v->gv_dependents) == 0); 332 333 startd_free(v->gv_name, strlen(v->gv_name) + 1); 334 uu_list_destroy(v->gv_dependencies); 335 uu_list_destroy(v->gv_dependents); 336 uu_list_remove(dgraph, v); 337 338 startd_free(v, sizeof (graph_vertex_t)); 339 } 340 341 static void 342 graph_add_edge(graph_vertex_t *fv, graph_vertex_t *tv) 343 { 344 graph_edge_t *e, *re; 345 int r; 346 347 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 348 349 e = startd_alloc(sizeof (graph_edge_t)); 350 re = startd_alloc(sizeof (graph_edge_t)); 351 352 e->ge_parent = fv; 353 e->ge_vertex = tv; 354 355 re->ge_parent = tv; 356 re->ge_vertex = fv; 357 358 uu_list_node_init(e, &e->ge_link, graph_edge_pool); 359 r = uu_list_insert_before(fv->gv_dependencies, NULL, e); 360 assert(r == 0); 361 362 uu_list_node_init(re, &re->ge_link, graph_edge_pool); 363 r = uu_list_insert_before(tv->gv_dependents, NULL, re); 364 assert(r == 0); 365 } 366 367 static void 368 graph_remove_edge(graph_vertex_t *v, graph_vertex_t *dv) 369 { 370 graph_edge_t *e; 371 372 for (e = uu_list_first(v->gv_dependencies); 373 e != NULL; 374 e = uu_list_next(v->gv_dependencies, e)) { 375 if (e->ge_vertex == dv) { 376 uu_list_remove(v->gv_dependencies, e); 377 startd_free(e, sizeof (graph_edge_t)); 378 break; 379 } 380 } 381 382 for (e = uu_list_first(dv->gv_dependents); 383 e != NULL; 384 e = uu_list_next(dv->gv_dependents, e)) { 385 if (e->ge_vertex == v) { 386 uu_list_remove(dv->gv_dependents, e); 387 startd_free(e, sizeof (graph_edge_t)); 388 break; 389 } 390 } 391 } 392 393 static void 394 graph_walk_dependents(graph_vertex_t *v, void (*func)(graph_vertex_t *, void *), 395 void *arg) 396 { 397 graph_edge_t *e; 398 399 for (e = uu_list_first(v->gv_dependents); 400 e != NULL; 401 e = uu_list_next(v->gv_dependents, e)) 402 func(e->ge_vertex, arg); 403 } 404 405 static void 406 graph_walk_dependencies(graph_vertex_t *v, void (*func)(graph_vertex_t *, 407 void *), void *arg) 408 { 409 graph_edge_t *e; 410 411 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 412 413 for (e = uu_list_first(v->gv_dependencies); 414 e != NULL; 415 e = uu_list_next(v->gv_dependencies, e)) { 416 417 func(e->ge_vertex, arg); 418 } 419 } 420 421 /* 422 * Generic graph walking function. 423 * 424 * Given a vertex, this function will walk either dependencies 425 * (WALK_DEPENDENCIES) or dependents (WALK_DEPENDENTS) of a vertex recursively 426 * for the entire graph. It will avoid cycles and never visit the same vertex 427 * twice. 428 * 429 * We avoid traversing exclusion dependencies, because they are allowed to 430 * create cycles in the graph. When propagating satisfiability, there is no 431 * need to walk exclusion dependencies because exclude_all_satisfied() doesn't 432 * test for satisfiability. 433 * 434 * The walker takes two callbacks. The first is called before examining the 435 * dependents of each vertex. The second is called on each vertex after 436 * examining its dependents. This allows is_path_to() to construct a path only 437 * after the target vertex has been found. 438 */ 439 typedef enum { 440 WALK_DEPENDENTS, 441 WALK_DEPENDENCIES 442 } graph_walk_dir_t; 443 444 typedef int (*graph_walk_cb_t)(graph_vertex_t *, void *); 445 446 typedef struct graph_walk_info { 447 graph_walk_dir_t gi_dir; 448 uchar_t *gi_visited; /* vertex bitmap */ 449 int (*gi_pre)(graph_vertex_t *, void *); 450 void (*gi_post)(graph_vertex_t *, void *); 451 void *gi_arg; /* callback arg */ 452 int gi_ret; /* return value */ 453 } graph_walk_info_t; 454 455 static int 456 graph_walk_recurse(graph_edge_t *e, graph_walk_info_t *gip) 457 { 458 uu_list_t *list; 459 int r; 460 graph_vertex_t *v = e->ge_vertex; 461 int i; 462 uint_t b; 463 464 i = v->gv_id / 8; 465 b = 1 << (v->gv_id % 8); 466 467 /* 468 * Check to see if we've visited this vertex already. 469 */ 470 if (gip->gi_visited[i] & b) 471 return (UU_WALK_NEXT); 472 473 gip->gi_visited[i] |= b; 474 475 /* 476 * Don't follow exclusions. 477 */ 478 if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL) 479 return (UU_WALK_NEXT); 480 481 /* 482 * Call pre-visit callback. If this doesn't terminate the walk, 483 * continue search. 484 */ 485 if ((gip->gi_ret = gip->gi_pre(v, gip->gi_arg)) == UU_WALK_NEXT) { 486 /* 487 * Recurse using appropriate list. 488 */ 489 if (gip->gi_dir == WALK_DEPENDENTS) 490 list = v->gv_dependents; 491 else 492 list = v->gv_dependencies; 493 494 r = uu_list_walk(list, (uu_walk_fn_t *)graph_walk_recurse, 495 gip, 0); 496 assert(r == 0); 497 } 498 499 /* 500 * Callbacks must return either UU_WALK_NEXT or UU_WALK_DONE. 501 */ 502 assert(gip->gi_ret == UU_WALK_NEXT || gip->gi_ret == UU_WALK_DONE); 503 504 /* 505 * If given a post-callback, call the function for every vertex. 506 */ 507 if (gip->gi_post != NULL) 508 (void) gip->gi_post(v, gip->gi_arg); 509 510 /* 511 * Preserve the callback's return value. If the callback returns 512 * UU_WALK_DONE, then we propagate that to the caller in order to 513 * terminate the walk. 514 */ 515 return (gip->gi_ret); 516 } 517 518 static void 519 graph_walk(graph_vertex_t *v, graph_walk_dir_t dir, 520 int (*pre)(graph_vertex_t *, void *), 521 void (*post)(graph_vertex_t *, void *), void *arg) 522 { 523 graph_walk_info_t gi; 524 graph_edge_t fake; 525 size_t sz = dictionary->dict_new_id / 8 + 1; 526 527 gi.gi_visited = startd_zalloc(sz); 528 gi.gi_pre = pre; 529 gi.gi_post = post; 530 gi.gi_arg = arg; 531 gi.gi_dir = dir; 532 gi.gi_ret = 0; 533 534 /* 535 * Fake up an edge for the first iteration 536 */ 537 fake.ge_vertex = v; 538 (void) graph_walk_recurse(&fake, &gi); 539 540 startd_free(gi.gi_visited, sz); 541 } 542 543 typedef struct child_search { 544 int id; /* id of vertex to look for */ 545 uint_t depth; /* recursion depth */ 546 /* 547 * While the vertex is not found, path is NULL. After the search, if 548 * the vertex was found then path should point to a -1-terminated 549 * array of vertex id's which constitute the path to the vertex. 550 */ 551 int *path; 552 } child_search_t; 553 554 static int 555 child_pre(graph_vertex_t *v, void *arg) 556 { 557 child_search_t *cs = arg; 558 559 cs->depth++; 560 561 if (v->gv_id == cs->id) { 562 cs->path = startd_alloc((cs->depth + 1) * sizeof (int)); 563 cs->path[cs->depth] = -1; 564 return (UU_WALK_DONE); 565 } 566 567 return (UU_WALK_NEXT); 568 } 569 570 static void 571 child_post(graph_vertex_t *v, void *arg) 572 { 573 child_search_t *cs = arg; 574 575 cs->depth--; 576 577 if (cs->path != NULL) 578 cs->path[cs->depth] = v->gv_id; 579 } 580 581 /* 582 * Look for a path from from to to. If one exists, returns a pointer to 583 * a NULL-terminated array of pointers to the vertices along the path. If 584 * there is no path, returns NULL. 585 */ 586 static int * 587 is_path_to(graph_vertex_t *from, graph_vertex_t *to) 588 { 589 child_search_t cs; 590 591 cs.id = to->gv_id; 592 cs.depth = 0; 593 cs.path = NULL; 594 595 graph_walk(from, WALK_DEPENDENCIES, child_pre, child_post, &cs); 596 597 return (cs.path); 598 } 599 600 /* 601 * Given an array of int's as returned by is_path_to, allocates a string of 602 * their names joined by newlines. Returns the size of the allocated buffer 603 * in *sz and frees path. 604 */ 605 static void 606 path_to_str(int *path, char **cpp, size_t *sz) 607 { 608 int i; 609 graph_vertex_t *v; 610 size_t allocd, new_allocd; 611 char *new, *name; 612 613 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 614 assert(path[0] != -1); 615 616 allocd = 1; 617 *cpp = startd_alloc(1); 618 (*cpp)[0] = '\0'; 619 620 for (i = 0; path[i] != -1; ++i) { 621 name = NULL; 622 623 v = vertex_get_by_id(path[i]); 624 625 if (v == NULL) 626 name = "<deleted>"; 627 else if (v->gv_type == GVT_INST || v->gv_type == GVT_SVC) 628 name = v->gv_name; 629 630 if (name != NULL) { 631 new_allocd = allocd + strlen(name) + 1; 632 new = startd_alloc(new_allocd); 633 (void) strcpy(new, *cpp); 634 (void) strcat(new, name); 635 (void) strcat(new, "\n"); 636 637 startd_free(*cpp, allocd); 638 639 *cpp = new; 640 allocd = new_allocd; 641 } 642 } 643 644 startd_free(path, sizeof (int) * (i + 1)); 645 646 *sz = allocd; 647 } 648 649 650 /* 651 * This function along with run_sulogin() implements an exclusion relationship 652 * between system/console-login and sulogin. run_sulogin() will fail if 653 * system/console-login is online, and the graph engine should call 654 * graph_clogin_start() to bring system/console-login online, which defers the 655 * start if sulogin is running. 656 */ 657 static void 658 graph_clogin_start(graph_vertex_t *v) 659 { 660 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 661 662 if (sulogin_running) 663 console_login_ready = B_TRUE; 664 else 665 vertex_send_event(v, RESTARTER_EVENT_TYPE_START); 666 } 667 668 static void 669 graph_su_start(graph_vertex_t *v) 670 { 671 /* 672 * /etc/inittab used to have the initial /sbin/rcS as a 'sysinit' 673 * entry with a runlevel of 'S', before jumping to the final 674 * target runlevel (as set in initdefault). We mimic that legacy 675 * behavior here. 676 */ 677 utmpx_set_runlevel('S', '0', B_FALSE); 678 vertex_send_event(v, RESTARTER_EVENT_TYPE_START); 679 } 680 681 static void 682 graph_post_su_online(void) 683 { 684 graph_runlevel_changed('S', 1); 685 } 686 687 static void 688 graph_post_su_disable(void) 689 { 690 graph_runlevel_changed('S', 0); 691 } 692 693 static void 694 graph_post_mu_online(void) 695 { 696 graph_runlevel_changed('2', 1); 697 } 698 699 static void 700 graph_post_mu_disable(void) 701 { 702 graph_runlevel_changed('2', 0); 703 } 704 705 static void 706 graph_post_mus_online(void) 707 { 708 graph_runlevel_changed('3', 1); 709 } 710 711 static void 712 graph_post_mus_disable(void) 713 { 714 graph_runlevel_changed('3', 0); 715 } 716 717 static struct special_vertex_info { 718 const char *name; 719 void (*start_f)(graph_vertex_t *); 720 void (*post_online_f)(void); 721 void (*post_disable_f)(void); 722 } special_vertices[] = { 723 { CONSOLE_LOGIN_FMRI, graph_clogin_start, NULL, NULL }, 724 { SCF_MILESTONE_SINGLE_USER, graph_su_start, 725 graph_post_su_online, graph_post_su_disable }, 726 { SCF_MILESTONE_MULTI_USER, NULL, 727 graph_post_mu_online, graph_post_mu_disable }, 728 { SCF_MILESTONE_MULTI_USER_SERVER, NULL, 729 graph_post_mus_online, graph_post_mus_disable }, 730 { NULL }, 731 }; 732 733 734 void 735 vertex_send_event(graph_vertex_t *v, restarter_event_type_t e) 736 { 737 switch (e) { 738 case RESTARTER_EVENT_TYPE_ADD_INSTANCE: 739 assert(v->gv_state == RESTARTER_STATE_UNINIT); 740 741 MUTEX_LOCK(&st->st_load_lock); 742 st->st_load_instances++; 743 MUTEX_UNLOCK(&st->st_load_lock); 744 break; 745 746 case RESTARTER_EVENT_TYPE_ENABLE: 747 log_framework(LOG_DEBUG, "Enabling %s.\n", v->gv_name); 748 assert(v->gv_state == RESTARTER_STATE_UNINIT || 749 v->gv_state == RESTARTER_STATE_DISABLED || 750 v->gv_state == RESTARTER_STATE_MAINT); 751 break; 752 753 case RESTARTER_EVENT_TYPE_DISABLE: 754 case RESTARTER_EVENT_TYPE_ADMIN_DISABLE: 755 log_framework(LOG_DEBUG, "Disabling %s.\n", v->gv_name); 756 assert(v->gv_state != RESTARTER_STATE_DISABLED); 757 break; 758 759 case RESTARTER_EVENT_TYPE_STOP: 760 log_framework(LOG_DEBUG, "Stopping %s.\n", v->gv_name); 761 assert(v->gv_state == RESTARTER_STATE_DEGRADED || 762 v->gv_state == RESTARTER_STATE_ONLINE); 763 break; 764 765 case RESTARTER_EVENT_TYPE_START: 766 log_framework(LOG_DEBUG, "Starting %s.\n", v->gv_name); 767 assert(v->gv_state == RESTARTER_STATE_OFFLINE); 768 break; 769 770 case RESTARTER_EVENT_TYPE_REMOVE_INSTANCE: 771 case RESTARTER_EVENT_TYPE_ADMIN_DEGRADED: 772 case RESTARTER_EVENT_TYPE_ADMIN_REFRESH: 773 case RESTARTER_EVENT_TYPE_ADMIN_RESTART: 774 case RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF: 775 case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON: 776 case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE: 777 case RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE: 778 case RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY: 779 break; 780 781 default: 782 #ifndef NDEBUG 783 uu_warn("%s:%d: Bad event %d.\n", __FILE__, __LINE__, e); 784 #endif 785 abort(); 786 } 787 788 restarter_protocol_send_event(v->gv_name, v->gv_restarter_channel, e); 789 } 790 791 static void 792 graph_unset_restarter(graph_vertex_t *v) 793 { 794 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 795 assert(v->gv_flags & GV_CONFIGURED); 796 797 vertex_send_event(v, RESTARTER_EVENT_TYPE_REMOVE_INSTANCE); 798 799 if (v->gv_restarter_id != -1) { 800 graph_vertex_t *rv; 801 802 rv = vertex_get_by_id(v->gv_restarter_id); 803 graph_remove_edge(v, rv); 804 } 805 806 v->gv_restarter_id = -1; 807 v->gv_restarter_channel = NULL; 808 } 809 810 static void 811 delete_depgroup(graph_vertex_t *v) 812 { 813 graph_edge_t *e; 814 graph_vertex_t *dv; 815 816 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 817 assert(v->gv_type == GVT_GROUP); 818 assert(uu_list_numnodes(v->gv_dependents) == 0); 819 820 while ((e = uu_list_first(v->gv_dependencies)) != NULL) { 821 dv = e->ge_vertex; 822 823 graph_remove_edge(v, dv); 824 825 switch (dv->gv_type) { 826 case GVT_INST: /* instance dependency */ 827 break; 828 829 case GVT_SVC: /* service dependency */ 830 if (uu_list_numnodes(dv->gv_dependents) == 0 && 831 uu_list_numnodes(dv->gv_dependencies) == 0) 832 graph_remove_vertex(dv); 833 break; 834 835 case GVT_FILE: /* file dependency */ 836 assert(uu_list_numnodes(dv->gv_dependencies) == 0); 837 if (uu_list_numnodes(dv->gv_dependents) == 0) 838 graph_remove_vertex(dv); 839 break; 840 841 default: 842 #ifndef NDEBUG 843 uu_warn("%s:%d: Unexpected node type %d", __FILE__, 844 __LINE__, dv->gv_type); 845 #endif 846 abort(); 847 } 848 } 849 850 graph_remove_vertex(v); 851 } 852 853 static int 854 delete_instance_deps_cb(graph_edge_t *e, void **ptrs) 855 { 856 graph_vertex_t *v = ptrs[0]; 857 boolean_t delete_restarter_dep = (boolean_t)ptrs[1]; 858 graph_vertex_t *dv; 859 860 dv = e->ge_vertex; 861 862 /* 863 * We have four possibilities here: 864 * - GVT_INST: restarter 865 * - GVT_GROUP - GVT_INST: instance dependency 866 * - GVT_GROUP - GVT_SVC - GV_INST: service dependency 867 * - GVT_GROUP - GVT_FILE: file dependency 868 */ 869 switch (dv->gv_type) { 870 case GVT_INST: /* restarter */ 871 assert(dv->gv_id == v->gv_restarter_id); 872 if (delete_restarter_dep) 873 graph_remove_edge(v, dv); 874 break; 875 876 case GVT_GROUP: /* pg dependency */ 877 graph_remove_edge(v, dv); 878 delete_depgroup(dv); 879 break; 880 881 case GVT_FILE: 882 /* These are currently not direct dependencies */ 883 884 default: 885 #ifndef NDEBUG 886 uu_warn("%s:%d: Bad vertex type %d.\n", __FILE__, __LINE__, 887 dv->gv_type); 888 #endif 889 abort(); 890 } 891 892 return (UU_WALK_NEXT); 893 } 894 895 static void 896 delete_instance_dependencies(graph_vertex_t *v, boolean_t delete_restarter_dep) 897 { 898 void *ptrs[2]; 899 int r; 900 901 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 902 assert(v->gv_type == GVT_INST); 903 904 ptrs[0] = v; 905 ptrs[1] = (void *)delete_restarter_dep; 906 907 r = uu_list_walk(v->gv_dependencies, 908 (uu_walk_fn_t *)delete_instance_deps_cb, &ptrs, UU_WALK_ROBUST); 909 assert(r == 0); 910 } 911 912 /* 913 * int graph_insert_vertex_unconfigured() 914 * Insert a vertex without sending any restarter events. If the vertex 915 * already exists or creation is successful, return a pointer to it in *vp. 916 * 917 * If type is not GVT_GROUP, dt can remain unset. 918 * 919 * Returns 0, EEXIST, or EINVAL if the arguments are invalid (i.e., fmri 920 * doesn't agree with type, or type doesn't agree with dt). 921 */ 922 static int 923 graph_insert_vertex_unconfigured(const char *fmri, gv_type_t type, 924 depgroup_type_t dt, restarter_error_t rt, graph_vertex_t **vp) 925 { 926 int r; 927 int i; 928 929 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 930 931 switch (type) { 932 case GVT_SVC: 933 case GVT_INST: 934 if (strncmp(fmri, "svc:", sizeof ("svc:") - 1) != 0) 935 return (EINVAL); 936 break; 937 938 case GVT_FILE: 939 if (strncmp(fmri, "file:", sizeof ("file:") - 1) != 0) 940 return (EINVAL); 941 break; 942 943 case GVT_GROUP: 944 if (dt <= 0 || rt < 0) 945 return (EINVAL); 946 break; 947 948 default: 949 #ifndef NDEBUG 950 uu_warn("%s:%d: Unknown type %d.\n", __FILE__, __LINE__, type); 951 #endif 952 abort(); 953 } 954 955 *vp = vertex_get_by_name(fmri); 956 if (*vp != NULL) 957 return (EEXIST); 958 959 *vp = graph_add_vertex(fmri); 960 961 (*vp)->gv_type = type; 962 (*vp)->gv_depgroup = dt; 963 (*vp)->gv_restart = rt; 964 965 (*vp)->gv_flags = 0; 966 (*vp)->gv_state = RESTARTER_STATE_NONE; 967 968 for (i = 0; special_vertices[i].name != NULL; ++i) { 969 if (strcmp(fmri, special_vertices[i].name) == 0) { 970 (*vp)->gv_start_f = special_vertices[i].start_f; 971 (*vp)->gv_post_online_f = 972 special_vertices[i].post_online_f; 973 (*vp)->gv_post_disable_f = 974 special_vertices[i].post_disable_f; 975 break; 976 } 977 } 978 979 (*vp)->gv_restarter_id = -1; 980 (*vp)->gv_restarter_channel = 0; 981 982 if (type == GVT_INST) { 983 char *sfmri; 984 graph_vertex_t *sv; 985 986 sfmri = inst_fmri_to_svc_fmri(fmri); 987 sv = vertex_get_by_name(sfmri); 988 if (sv == NULL) { 989 r = graph_insert_vertex_unconfigured(sfmri, GVT_SVC, 0, 990 0, &sv); 991 assert(r == 0); 992 } 993 startd_free(sfmri, max_scf_fmri_size); 994 995 graph_add_edge(sv, *vp); 996 } 997 998 /* 999 * If this vertex is in the subgraph, mark it as so, for both 1000 * GVT_INST and GVT_SERVICE verteces. 1001 * A GVT_SERVICE vertex can only be in the subgraph if another instance 1002 * depends on it, in which case it's already been added to the graph 1003 * and marked as in the subgraph (by refresh_vertex()). If a 1004 * GVT_SERVICE vertex was freshly added (by the code above), it means 1005 * that it has no dependents, and cannot be in the subgraph. 1006 * Regardless of this, we still check that gv_flags includes 1007 * GV_INSUBGRAPH in the event that future behavior causes the above 1008 * code to add a GVT_SERVICE vertex which should be in the subgraph. 1009 */ 1010 1011 (*vp)->gv_flags |= (should_be_in_subgraph(*vp)? GV_INSUBGRAPH : 0); 1012 1013 return (0); 1014 } 1015 1016 /* 1017 * Returns 0 on success or ELOOP if the dependency would create a cycle. 1018 */ 1019 static int 1020 graph_insert_dependency(graph_vertex_t *fv, graph_vertex_t *tv, int **pathp) 1021 { 1022 hrtime_t now; 1023 1024 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1025 1026 /* cycle detection */ 1027 now = gethrtime(); 1028 1029 /* Don't follow exclusions. */ 1030 if (!(fv->gv_type == GVT_GROUP && 1031 fv->gv_depgroup == DEPGRP_EXCLUDE_ALL)) { 1032 *pathp = is_path_to(tv, fv); 1033 if (*pathp) 1034 return (ELOOP); 1035 } 1036 1037 dep_cycle_ns += gethrtime() - now; 1038 ++dep_inserts; 1039 now = gethrtime(); 1040 1041 graph_add_edge(fv, tv); 1042 1043 dep_insert_ns += gethrtime() - now; 1044 1045 /* Check if the dependency adds the "to" vertex to the subgraph */ 1046 tv->gv_flags |= (should_be_in_subgraph(tv) ? GV_INSUBGRAPH : 0); 1047 1048 return (0); 1049 } 1050 1051 static int 1052 inst_running(graph_vertex_t *v) 1053 { 1054 assert(v->gv_type == GVT_INST); 1055 1056 if (v->gv_state == RESTARTER_STATE_ONLINE || 1057 v->gv_state == RESTARTER_STATE_DEGRADED) 1058 return (1); 1059 1060 return (0); 1061 } 1062 1063 /* 1064 * The dependency evaluation functions return 1065 * 1 - dependency satisfied 1066 * 0 - dependency unsatisfied 1067 * -1 - dependency unsatisfiable (without administrator intervention) 1068 * 1069 * The functions also take a boolean satbility argument. When true, the 1070 * functions may recurse in order to determine satisfiability. 1071 */ 1072 static int require_any_satisfied(graph_vertex_t *, boolean_t); 1073 static int dependency_satisfied(graph_vertex_t *, boolean_t); 1074 1075 /* 1076 * A require_all dependency is unsatisfied if any elements are unsatisfied. It 1077 * is unsatisfiable if any elements are unsatisfiable. 1078 */ 1079 static int 1080 require_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1081 { 1082 graph_edge_t *edge; 1083 int i; 1084 boolean_t any_unsatisfied; 1085 1086 if (uu_list_numnodes(groupv->gv_dependencies) == 0) 1087 return (1); 1088 1089 any_unsatisfied = B_FALSE; 1090 1091 for (edge = uu_list_first(groupv->gv_dependencies); 1092 edge != NULL; 1093 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1094 i = dependency_satisfied(edge->ge_vertex, satbility); 1095 if (i == 1) 1096 continue; 1097 1098 log_framework(LOG_DEBUG, 1099 "require_all(%s): %s is unsatisfi%s.\n", groupv->gv_name, 1100 edge->ge_vertex->gv_name, i == 0 ? "ed" : "able"); 1101 1102 if (!satbility) 1103 return (0); 1104 1105 if (i == -1) 1106 return (-1); 1107 1108 any_unsatisfied = B_TRUE; 1109 } 1110 1111 return (any_unsatisfied ? 0 : 1); 1112 } 1113 1114 /* 1115 * A require_any dependency is satisfied if any element is satisfied. It is 1116 * satisfiable if any element is satisfiable. 1117 */ 1118 static int 1119 require_any_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1120 { 1121 graph_edge_t *edge; 1122 int s; 1123 boolean_t satisfiable; 1124 1125 if (uu_list_numnodes(groupv->gv_dependencies) == 0) 1126 return (1); 1127 1128 satisfiable = B_FALSE; 1129 1130 for (edge = uu_list_first(groupv->gv_dependencies); 1131 edge != NULL; 1132 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1133 s = dependency_satisfied(edge->ge_vertex, satbility); 1134 1135 if (s == 1) 1136 return (1); 1137 1138 log_framework(LOG_DEBUG, 1139 "require_any(%s): %s is unsatisfi%s.\n", 1140 groupv->gv_name, edge->ge_vertex->gv_name, 1141 s == 0 ? "ed" : "able"); 1142 1143 if (satbility && s == 0) 1144 satisfiable = B_TRUE; 1145 } 1146 1147 return (!satbility || satisfiable ? 0 : -1); 1148 } 1149 1150 /* 1151 * An optional_all dependency only considers elements which are configured, 1152 * enabled, and not in maintenance. If any are unsatisfied, then the dependency 1153 * is unsatisfied. 1154 * 1155 * Offline dependencies which are waiting for a dependency to come online are 1156 * unsatisfied. Offline dependences which cannot possibly come online 1157 * (unsatisfiable) are always considered satisfied. 1158 */ 1159 static int 1160 optional_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1161 { 1162 graph_edge_t *edge; 1163 graph_vertex_t *v; 1164 boolean_t any_qualified; 1165 boolean_t any_unsatisfied; 1166 int i; 1167 1168 any_qualified = B_FALSE; 1169 any_unsatisfied = B_FALSE; 1170 1171 for (edge = uu_list_first(groupv->gv_dependencies); 1172 edge != NULL; 1173 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1174 v = edge->ge_vertex; 1175 1176 switch (v->gv_type) { 1177 case GVT_INST: 1178 /* Skip missing or disabled instances */ 1179 if ((v->gv_flags & (GV_CONFIGURED | GV_ENABLED)) != 1180 (GV_CONFIGURED | GV_ENABLED)) 1181 continue; 1182 1183 if (v->gv_state == RESTARTER_STATE_MAINT) 1184 continue; 1185 1186 any_qualified = B_TRUE; 1187 if (v->gv_state == RESTARTER_STATE_OFFLINE) { 1188 /* 1189 * For offline dependencies, treat unsatisfiable 1190 * as satisfied. 1191 */ 1192 i = dependency_satisfied(v, B_TRUE); 1193 if (i == -1) 1194 i = 1; 1195 } else if (v->gv_state == RESTARTER_STATE_DISABLED) { 1196 /* 1197 * The service is enabled, but hasn't 1198 * transitioned out of disabled yet. Treat it 1199 * as unsatisfied (not unsatisfiable). 1200 */ 1201 i = 0; 1202 } else { 1203 i = dependency_satisfied(v, satbility); 1204 } 1205 break; 1206 1207 case GVT_FILE: 1208 any_qualified = B_TRUE; 1209 i = dependency_satisfied(v, satbility); 1210 1211 break; 1212 1213 case GVT_SVC: { 1214 boolean_t svc_any_qualified; 1215 boolean_t svc_satisfied; 1216 boolean_t svc_satisfiable; 1217 graph_vertex_t *v2; 1218 graph_edge_t *e2; 1219 1220 svc_any_qualified = B_FALSE; 1221 svc_satisfied = B_FALSE; 1222 svc_satisfiable = B_FALSE; 1223 1224 for (e2 = uu_list_first(v->gv_dependencies); 1225 e2 != NULL; 1226 e2 = uu_list_next(v->gv_dependencies, e2)) { 1227 v2 = e2->ge_vertex; 1228 assert(v2->gv_type == GVT_INST); 1229 1230 if ((v2->gv_flags & 1231 (GV_CONFIGURED | GV_ENABLED)) != 1232 (GV_CONFIGURED | GV_ENABLED)) 1233 continue; 1234 1235 if (v2->gv_state == RESTARTER_STATE_MAINT) 1236 continue; 1237 1238 svc_any_qualified = B_TRUE; 1239 1240 if (v2->gv_state == RESTARTER_STATE_OFFLINE) { 1241 /* 1242 * For offline dependencies, treat 1243 * unsatisfiable as satisfied. 1244 */ 1245 i = dependency_satisfied(v2, B_TRUE); 1246 if (i == -1) 1247 i = 1; 1248 } else if (v2->gv_state == 1249 RESTARTER_STATE_DISABLED) { 1250 i = 0; 1251 } else { 1252 i = dependency_satisfied(v2, satbility); 1253 } 1254 1255 if (i == 1) { 1256 svc_satisfied = B_TRUE; 1257 break; 1258 } 1259 if (i == 0) 1260 svc_satisfiable = B_TRUE; 1261 } 1262 1263 if (!svc_any_qualified) 1264 continue; 1265 any_qualified = B_TRUE; 1266 if (svc_satisfied) { 1267 i = 1; 1268 } else if (svc_satisfiable) { 1269 i = 0; 1270 } else { 1271 i = -1; 1272 } 1273 break; 1274 } 1275 1276 case GVT_GROUP: 1277 default: 1278 #ifndef NDEBUG 1279 uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, 1280 __LINE__, v->gv_type); 1281 #endif 1282 abort(); 1283 } 1284 1285 if (i == 1) 1286 continue; 1287 1288 log_framework(LOG_DEBUG, 1289 "optional_all(%s): %s is unsatisfi%s.\n", groupv->gv_name, 1290 v->gv_name, i == 0 ? "ed" : "able"); 1291 1292 if (!satbility) 1293 return (0); 1294 if (i == -1) 1295 return (-1); 1296 any_unsatisfied = B_TRUE; 1297 } 1298 1299 if (!any_qualified) 1300 return (1); 1301 1302 return (any_unsatisfied ? 0 : 1); 1303 } 1304 1305 /* 1306 * An exclude_all dependency is unsatisfied if any non-service element is 1307 * satisfied or any service instance which is configured, enabled, and not in 1308 * maintenance is satisfied. Usually when unsatisfied, it is also 1309 * unsatisfiable. 1310 */ 1311 #define LOG_EXCLUDE(u, v) \ 1312 log_framework(LOG_DEBUG, "exclude_all(%s): %s is satisfied.\n", \ 1313 (u)->gv_name, (v)->gv_name) 1314 1315 /* ARGSUSED */ 1316 static int 1317 exclude_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1318 { 1319 graph_edge_t *edge, *e2; 1320 graph_vertex_t *v, *v2; 1321 1322 for (edge = uu_list_first(groupv->gv_dependencies); 1323 edge != NULL; 1324 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1325 v = edge->ge_vertex; 1326 1327 switch (v->gv_type) { 1328 case GVT_INST: 1329 if ((v->gv_flags & GV_CONFIGURED) == 0) 1330 continue; 1331 1332 switch (v->gv_state) { 1333 case RESTARTER_STATE_ONLINE: 1334 case RESTARTER_STATE_DEGRADED: 1335 LOG_EXCLUDE(groupv, v); 1336 return (v->gv_flags & GV_ENABLED ? -1 : 0); 1337 1338 case RESTARTER_STATE_OFFLINE: 1339 case RESTARTER_STATE_UNINIT: 1340 LOG_EXCLUDE(groupv, v); 1341 return (0); 1342 1343 case RESTARTER_STATE_DISABLED: 1344 case RESTARTER_STATE_MAINT: 1345 continue; 1346 1347 default: 1348 #ifndef NDEBUG 1349 uu_warn("%s:%d: Unexpected vertex state %d.\n", 1350 __FILE__, __LINE__, v->gv_state); 1351 #endif 1352 abort(); 1353 } 1354 /* NOTREACHED */ 1355 1356 case GVT_SVC: 1357 break; 1358 1359 case GVT_FILE: 1360 if (!file_ready(v)) 1361 continue; 1362 LOG_EXCLUDE(groupv, v); 1363 return (-1); 1364 1365 case GVT_GROUP: 1366 default: 1367 #ifndef NDEBUG 1368 uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, 1369 __LINE__, v->gv_type); 1370 #endif 1371 abort(); 1372 } 1373 1374 /* v represents a service */ 1375 if (uu_list_numnodes(v->gv_dependencies) == 0) 1376 continue; 1377 1378 for (e2 = uu_list_first(v->gv_dependencies); 1379 e2 != NULL; 1380 e2 = uu_list_next(v->gv_dependencies, e2)) { 1381 v2 = e2->ge_vertex; 1382 assert(v2->gv_type == GVT_INST); 1383 1384 if ((v2->gv_flags & GV_CONFIGURED) == 0) 1385 continue; 1386 1387 switch (v2->gv_state) { 1388 case RESTARTER_STATE_ONLINE: 1389 case RESTARTER_STATE_DEGRADED: 1390 LOG_EXCLUDE(groupv, v2); 1391 return (v2->gv_flags & GV_ENABLED ? -1 : 0); 1392 1393 case RESTARTER_STATE_OFFLINE: 1394 case RESTARTER_STATE_UNINIT: 1395 LOG_EXCLUDE(groupv, v2); 1396 return (0); 1397 1398 case RESTARTER_STATE_DISABLED: 1399 case RESTARTER_STATE_MAINT: 1400 continue; 1401 1402 default: 1403 #ifndef NDEBUG 1404 uu_warn("%s:%d: Unexpected vertex type %d.\n", 1405 __FILE__, __LINE__, v2->gv_type); 1406 #endif 1407 abort(); 1408 } 1409 } 1410 } 1411 1412 return (1); 1413 } 1414 1415 /* 1416 * int instance_satisfied() 1417 * Determine if all the dependencies are satisfied for the supplied instance 1418 * vertex. Return 1 if they are, 0 if they aren't, and -1 if they won't be 1419 * without administrator intervention. 1420 */ 1421 static int 1422 instance_satisfied(graph_vertex_t *v, boolean_t satbility) 1423 { 1424 assert(v->gv_type == GVT_INST); 1425 assert(!inst_running(v)); 1426 1427 return (require_all_satisfied(v, satbility)); 1428 } 1429 1430 /* 1431 * Decide whether v can satisfy a dependency. v can either be a child of 1432 * a group vertex, or of an instance vertex. 1433 */ 1434 static int 1435 dependency_satisfied(graph_vertex_t *v, boolean_t satbility) 1436 { 1437 switch (v->gv_type) { 1438 case GVT_INST: 1439 if ((v->gv_flags & GV_CONFIGURED) == 0) 1440 return (-1); 1441 1442 switch (v->gv_state) { 1443 case RESTARTER_STATE_ONLINE: 1444 case RESTARTER_STATE_DEGRADED: 1445 return (1); 1446 1447 case RESTARTER_STATE_OFFLINE: 1448 if (!satbility) 1449 return (0); 1450 return (instance_satisfied(v, satbility) != -1 ? 1451 0 : -1); 1452 1453 case RESTARTER_STATE_DISABLED: 1454 case RESTARTER_STATE_MAINT: 1455 return (-1); 1456 1457 case RESTARTER_STATE_UNINIT: 1458 return (0); 1459 1460 default: 1461 #ifndef NDEBUG 1462 uu_warn("%s:%d: Unexpected vertex state %d.\n", 1463 __FILE__, __LINE__, v->gv_state); 1464 #endif 1465 abort(); 1466 /* NOTREACHED */ 1467 } 1468 1469 case GVT_SVC: 1470 if (uu_list_numnodes(v->gv_dependencies) == 0) 1471 return (-1); 1472 return (require_any_satisfied(v, satbility)); 1473 1474 case GVT_FILE: 1475 /* i.e., we assume files will not be automatically generated */ 1476 return (file_ready(v) ? 1 : -1); 1477 1478 case GVT_GROUP: 1479 break; 1480 1481 default: 1482 #ifndef NDEBUG 1483 uu_warn("%s:%d: Unexpected node type %d.\n", __FILE__, __LINE__, 1484 v->gv_type); 1485 #endif 1486 abort(); 1487 /* NOTREACHED */ 1488 } 1489 1490 switch (v->gv_depgroup) { 1491 case DEPGRP_REQUIRE_ANY: 1492 return (require_any_satisfied(v, satbility)); 1493 1494 case DEPGRP_REQUIRE_ALL: 1495 return (require_all_satisfied(v, satbility)); 1496 1497 case DEPGRP_OPTIONAL_ALL: 1498 return (optional_all_satisfied(v, satbility)); 1499 1500 case DEPGRP_EXCLUDE_ALL: 1501 return (exclude_all_satisfied(v, satbility)); 1502 1503 default: 1504 #ifndef NDEBUG 1505 uu_warn("%s:%d: Unknown dependency grouping %d.\n", __FILE__, 1506 __LINE__, v->gv_depgroup); 1507 #endif 1508 abort(); 1509 } 1510 } 1511 1512 static void 1513 start_if_satisfied(graph_vertex_t *v) 1514 { 1515 if (v->gv_state == RESTARTER_STATE_OFFLINE && 1516 instance_satisfied(v, B_FALSE) == 1) { 1517 if (v->gv_start_f == NULL) 1518 vertex_send_event(v, RESTARTER_EVENT_TYPE_START); 1519 else 1520 v->gv_start_f(v); 1521 } 1522 } 1523 1524 /* 1525 * propagate_satbility() 1526 * 1527 * This function is used when the given vertex changes state in such a way that 1528 * one of its dependents may become unsatisfiable. This happens when an 1529 * instance transitions between offline -> online, or from !running -> 1530 * maintenance, as well as when an instance is removed from the graph. 1531 * 1532 * We have to walk the all dependents, since optional_all dependencies several 1533 * levels up could become (un)satisfied, instead of unsatisfiable. For example, 1534 * 1535 * +-----+ optional_all +-----+ require_all +-----+ 1536 * | A |--------------->| B |-------------->| C | 1537 * +-----+ +-----+ +-----+ 1538 * 1539 * offline -> maintenance 1540 * 1541 * If C goes into maintenance, it's not enough simply to check B. Because A has 1542 * an optional dependency, what was previously an unsatisfiable situation is now 1543 * satisfied (B will never come online, even though its state hasn't changed). 1544 * 1545 * Note that it's not necessary to continue examining dependents after reaching 1546 * an optional_all dependency. It's not possible for an optional_all dependency 1547 * to change satisfiability without also coming online, in which case we get a 1548 * start event and propagation continues naturally. However, it does no harm to 1549 * continue propagating satisfiability (as it is a relatively rare event), and 1550 * keeps the walker code simple and generic. 1551 */ 1552 /*ARGSUSED*/ 1553 static int 1554 satbility_cb(graph_vertex_t *v, void *arg) 1555 { 1556 if (v->gv_type == GVT_INST) 1557 start_if_satisfied(v); 1558 1559 return (UU_WALK_NEXT); 1560 } 1561 1562 static void 1563 propagate_satbility(graph_vertex_t *v) 1564 { 1565 graph_walk(v, WALK_DEPENDENTS, satbility_cb, NULL, NULL); 1566 } 1567 1568 static void propagate_stop(graph_vertex_t *, void *); 1569 1570 /* ARGSUSED */ 1571 static void 1572 propagate_start(graph_vertex_t *v, void *arg) 1573 { 1574 switch (v->gv_type) { 1575 case GVT_INST: 1576 start_if_satisfied(v); 1577 break; 1578 1579 case GVT_GROUP: 1580 if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) { 1581 graph_walk_dependents(v, propagate_stop, 1582 (void *)RERR_RESTART); 1583 break; 1584 } 1585 /* FALLTHROUGH */ 1586 1587 case GVT_SVC: 1588 graph_walk_dependents(v, propagate_start, NULL); 1589 break; 1590 1591 case GVT_FILE: 1592 #ifndef NDEBUG 1593 uu_warn("%s:%d: propagate_start() encountered GVT_FILE.\n", 1594 __FILE__, __LINE__); 1595 #endif 1596 abort(); 1597 /* NOTREACHED */ 1598 1599 default: 1600 #ifndef NDEBUG 1601 uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__, 1602 v->gv_type); 1603 #endif 1604 abort(); 1605 } 1606 } 1607 1608 static void 1609 propagate_stop(graph_vertex_t *v, void *arg) 1610 { 1611 graph_edge_t *e; 1612 graph_vertex_t *svc; 1613 restarter_error_t err = (restarter_error_t)arg; 1614 1615 switch (v->gv_type) { 1616 case GVT_INST: 1617 /* Restarter */ 1618 if (err > RERR_NONE && inst_running(v)) 1619 vertex_send_event(v, RESTARTER_EVENT_TYPE_STOP); 1620 break; 1621 1622 case GVT_SVC: 1623 graph_walk_dependents(v, propagate_stop, arg); 1624 break; 1625 1626 case GVT_FILE: 1627 #ifndef NDEBUG 1628 uu_warn("%s:%d: propagate_stop() encountered GVT_FILE.\n", 1629 __FILE__, __LINE__); 1630 #endif 1631 abort(); 1632 /* NOTREACHED */ 1633 1634 case GVT_GROUP: 1635 if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) { 1636 graph_walk_dependents(v, propagate_start, NULL); 1637 break; 1638 } 1639 1640 if (err == RERR_NONE || err > v->gv_restart) 1641 break; 1642 1643 assert(uu_list_numnodes(v->gv_dependents) == 1); 1644 e = uu_list_first(v->gv_dependents); 1645 svc = e->ge_vertex; 1646 1647 if (inst_running(svc)) 1648 vertex_send_event(svc, RESTARTER_EVENT_TYPE_STOP); 1649 break; 1650 1651 default: 1652 #ifndef NDEBUG 1653 uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__, 1654 v->gv_type); 1655 #endif 1656 abort(); 1657 } 1658 } 1659 1660 /* 1661 * void graph_enable_by_vertex() 1662 * If admin is non-zero, this is an administrative request for change 1663 * of the enabled property. Thus, send the ADMIN_DISABLE rather than 1664 * a plain DISABLE restarter event. 1665 */ 1666 static void 1667 graph_enable_by_vertex(graph_vertex_t *vertex, int enable, int admin) 1668 { 1669 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1670 assert((vertex->gv_flags & GV_CONFIGURED)); 1671 1672 vertex->gv_flags = (vertex->gv_flags & ~GV_ENABLED) | 1673 (enable ? GV_ENABLED : 0); 1674 1675 if (enable) { 1676 if (vertex->gv_state != RESTARTER_STATE_OFFLINE && 1677 vertex->gv_state != RESTARTER_STATE_DEGRADED && 1678 vertex->gv_state != RESTARTER_STATE_ONLINE) 1679 vertex_send_event(vertex, RESTARTER_EVENT_TYPE_ENABLE); 1680 } else { 1681 if (vertex->gv_state != RESTARTER_STATE_DISABLED) { 1682 if (admin) 1683 vertex_send_event(vertex, 1684 RESTARTER_EVENT_TYPE_ADMIN_DISABLE); 1685 else 1686 vertex_send_event(vertex, 1687 RESTARTER_EVENT_TYPE_DISABLE); 1688 } 1689 } 1690 1691 /* 1692 * Wait for state update from restarter before sending _START or 1693 * _STOP. 1694 */ 1695 } 1696 1697 static int configure_vertex(graph_vertex_t *, scf_instance_t *); 1698 1699 /* 1700 * Set the restarter for v to fmri_arg. That is, make sure a vertex for 1701 * fmri_arg exists, make v depend on it, and send _ADD_INSTANCE for v. If 1702 * v is already configured and fmri_arg indicates the current restarter, do 1703 * nothing. If v is configured and fmri_arg is a new restarter, delete v's 1704 * dependency on the restarter, send _REMOVE_INSTANCE for v, and set the new 1705 * restarter. Returns 0 on success, EINVAL if the FMRI is invalid, 1706 * ECONNABORTED if the repository connection is broken, and ELOOP 1707 * if the dependency would create a cycle. In the last case, *pathp will 1708 * point to a -1-terminated array of ids which compose the path from v to 1709 * restarter_fmri. 1710 */ 1711 int 1712 graph_change_restarter(graph_vertex_t *v, const char *fmri_arg, scf_handle_t *h, 1713 int **pathp) 1714 { 1715 char *restarter_fmri = NULL; 1716 graph_vertex_t *rv; 1717 int err; 1718 int id; 1719 1720 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1721 1722 if (fmri_arg[0] != '\0') { 1723 err = fmri_canonify(fmri_arg, &restarter_fmri, B_TRUE); 1724 if (err != 0) { 1725 assert(err == EINVAL); 1726 return (err); 1727 } 1728 } 1729 1730 if (restarter_fmri == NULL || 1731 strcmp(restarter_fmri, SCF_SERVICE_STARTD) == 0) { 1732 if (v->gv_flags & GV_CONFIGURED) { 1733 if (v->gv_restarter_id == -1) { 1734 if (restarter_fmri != NULL) 1735 startd_free(restarter_fmri, 1736 max_scf_fmri_size); 1737 return (0); 1738 } 1739 1740 graph_unset_restarter(v); 1741 } 1742 1743 /* Master restarter, nothing to do. */ 1744 v->gv_restarter_id = -1; 1745 v->gv_restarter_channel = NULL; 1746 vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE); 1747 return (0); 1748 } 1749 1750 if (v->gv_flags & GV_CONFIGURED) { 1751 id = dict_lookup_byname(restarter_fmri); 1752 if (id != -1 && v->gv_restarter_id == id) { 1753 startd_free(restarter_fmri, max_scf_fmri_size); 1754 return (0); 1755 } 1756 1757 graph_unset_restarter(v); 1758 } 1759 1760 err = graph_insert_vertex_unconfigured(restarter_fmri, GVT_INST, 0, 1761 RERR_NONE, &rv); 1762 startd_free(restarter_fmri, max_scf_fmri_size); 1763 assert(err == 0 || err == EEXIST); 1764 1765 if (rv->gv_delegate_initialized == 0) { 1766 rv->gv_delegate_channel = restarter_protocol_init_delegate( 1767 rv->gv_name); 1768 rv->gv_delegate_initialized = 1; 1769 } 1770 v->gv_restarter_id = rv->gv_id; 1771 v->gv_restarter_channel = rv->gv_delegate_channel; 1772 1773 err = graph_insert_dependency(v, rv, pathp); 1774 if (err != 0) { 1775 assert(err == ELOOP); 1776 return (ELOOP); 1777 } 1778 1779 vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE); 1780 1781 if (!(rv->gv_flags & GV_CONFIGURED)) { 1782 scf_instance_t *inst; 1783 1784 err = libscf_fmri_get_instance(h, rv->gv_name, &inst); 1785 switch (err) { 1786 case 0: 1787 err = configure_vertex(rv, inst); 1788 scf_instance_destroy(inst); 1789 switch (err) { 1790 case 0: 1791 case ECANCELED: 1792 break; 1793 1794 case ECONNABORTED: 1795 return (ECONNABORTED); 1796 1797 default: 1798 bad_error("configure_vertex", err); 1799 } 1800 break; 1801 1802 case ECONNABORTED: 1803 return (ECONNABORTED); 1804 1805 case ENOENT: 1806 break; 1807 1808 case ENOTSUP: 1809 /* 1810 * The fmri doesn't specify an instance - translate 1811 * to EINVAL. 1812 */ 1813 return (EINVAL); 1814 1815 case EINVAL: 1816 default: 1817 bad_error("libscf_fmri_get_instance", err); 1818 } 1819 } 1820 1821 return (0); 1822 } 1823 1824 1825 /* 1826 * Add all of the instances of the service named by fmri to the graph. 1827 * Returns 1828 * 0 - success 1829 * ENOENT - service indicated by fmri does not exist 1830 * 1831 * In both cases *reboundp will be B_TRUE if the handle was rebound, or B_FALSE 1832 * otherwise. 1833 */ 1834 static int 1835 add_service(const char *fmri, scf_handle_t *h, boolean_t *reboundp) 1836 { 1837 scf_service_t *svc; 1838 scf_instance_t *inst; 1839 scf_iter_t *iter; 1840 char *inst_fmri; 1841 int ret, r; 1842 1843 *reboundp = B_FALSE; 1844 1845 svc = safe_scf_service_create(h); 1846 inst = safe_scf_instance_create(h); 1847 iter = safe_scf_iter_create(h); 1848 inst_fmri = startd_alloc(max_scf_fmri_size); 1849 1850 rebound: 1851 if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL, 1852 SCF_DECODE_FMRI_EXACT) != 0) { 1853 switch (scf_error()) { 1854 case SCF_ERROR_CONNECTION_BROKEN: 1855 default: 1856 libscf_handle_rebind(h); 1857 *reboundp = B_TRUE; 1858 goto rebound; 1859 1860 case SCF_ERROR_NOT_FOUND: 1861 ret = ENOENT; 1862 goto out; 1863 1864 case SCF_ERROR_INVALID_ARGUMENT: 1865 case SCF_ERROR_CONSTRAINT_VIOLATED: 1866 case SCF_ERROR_NOT_BOUND: 1867 case SCF_ERROR_HANDLE_MISMATCH: 1868 bad_error("scf_handle_decode_fmri", scf_error()); 1869 } 1870 } 1871 1872 if (scf_iter_service_instances(iter, svc) != 0) { 1873 switch (scf_error()) { 1874 case SCF_ERROR_CONNECTION_BROKEN: 1875 default: 1876 libscf_handle_rebind(h); 1877 *reboundp = B_TRUE; 1878 goto rebound; 1879 1880 case SCF_ERROR_DELETED: 1881 ret = ENOENT; 1882 goto out; 1883 1884 case SCF_ERROR_HANDLE_MISMATCH: 1885 case SCF_ERROR_NOT_BOUND: 1886 case SCF_ERROR_NOT_SET: 1887 bad_error("scf_iter_service_instances", scf_error()) 1888 } 1889 } 1890 1891 for (;;) { 1892 r = scf_iter_next_instance(iter, inst); 1893 if (r == 0) 1894 break; 1895 if (r != 1) { 1896 switch (scf_error()) { 1897 case SCF_ERROR_CONNECTION_BROKEN: 1898 default: 1899 libscf_handle_rebind(h); 1900 *reboundp = B_TRUE; 1901 goto rebound; 1902 1903 case SCF_ERROR_DELETED: 1904 ret = ENOENT; 1905 goto out; 1906 1907 case SCF_ERROR_HANDLE_MISMATCH: 1908 case SCF_ERROR_NOT_BOUND: 1909 case SCF_ERROR_NOT_SET: 1910 case SCF_ERROR_INVALID_ARGUMENT: 1911 bad_error("scf_iter_next_instance", 1912 scf_error()); 1913 } 1914 } 1915 1916 if (scf_instance_to_fmri(inst, inst_fmri, max_scf_fmri_size) < 1917 0) { 1918 switch (scf_error()) { 1919 case SCF_ERROR_CONNECTION_BROKEN: 1920 libscf_handle_rebind(h); 1921 *reboundp = B_TRUE; 1922 goto rebound; 1923 1924 case SCF_ERROR_DELETED: 1925 continue; 1926 1927 case SCF_ERROR_NOT_BOUND: 1928 case SCF_ERROR_NOT_SET: 1929 bad_error("scf_instance_to_fmri", scf_error()); 1930 } 1931 } 1932 1933 r = dgraph_add_instance(inst_fmri, inst, B_FALSE); 1934 switch (r) { 1935 case 0: 1936 case ECANCELED: 1937 break; 1938 1939 case EEXIST: 1940 continue; 1941 1942 case ECONNABORTED: 1943 libscf_handle_rebind(h); 1944 *reboundp = B_TRUE; 1945 goto rebound; 1946 1947 case EINVAL: 1948 default: 1949 bad_error("dgraph_add_instance", r); 1950 } 1951 } 1952 1953 ret = 0; 1954 1955 out: 1956 startd_free(inst_fmri, max_scf_fmri_size); 1957 scf_iter_destroy(iter); 1958 scf_instance_destroy(inst); 1959 scf_service_destroy(svc); 1960 return (ret); 1961 } 1962 1963 struct depfmri_info { 1964 graph_vertex_t *v; /* GVT_GROUP vertex */ 1965 gv_type_t type; /* type of dependency */ 1966 const char *inst_fmri; /* FMRI of parental GVT_INST vert. */ 1967 const char *pg_name; /* Name of dependency pg */ 1968 scf_handle_t *h; 1969 int err; /* return error code */ 1970 int **pathp; /* return circular dependency path */ 1971 }; 1972 1973 /* 1974 * Find or create a vertex for fmri and make info->v depend on it. 1975 * Returns 1976 * 0 - success 1977 * nonzero - failure 1978 * 1979 * On failure, sets info->err to 1980 * EINVAL - fmri is invalid 1981 * fmri does not match info->type 1982 * ELOOP - Adding the dependency creates a circular dependency. *info->pathp 1983 * will point to an array of the ids of the members of the cycle. 1984 * ECONNABORTED - repository connection was broken 1985 * ECONNRESET - succeeded, but repository connection was reset 1986 */ 1987 static int 1988 process_dependency_fmri(const char *fmri, struct depfmri_info *info) 1989 { 1990 int err; 1991 graph_vertex_t *depgroup_v, *v; 1992 char *fmri_copy, *cfmri; 1993 size_t fmri_copy_sz; 1994 const char *scope, *service, *instance, *pg; 1995 scf_instance_t *inst; 1996 boolean_t rebound; 1997 1998 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1999 2000 /* Get or create vertex for FMRI */ 2001 depgroup_v = info->v; 2002 2003 if (strncmp(fmri, "file:", sizeof ("file:") - 1) == 0) { 2004 if (info->type != GVT_FILE) { 2005 log_framework(LOG_NOTICE, 2006 "FMRI \"%s\" is not allowed for the \"%s\" " 2007 "dependency's type of instance %s.\n", fmri, 2008 info->pg_name, info->inst_fmri); 2009 return (info->err = EINVAL); 2010 } 2011 2012 err = graph_insert_vertex_unconfigured(fmri, info->type, 0, 2013 RERR_NONE, &v); 2014 switch (err) { 2015 case 0: 2016 break; 2017 2018 case EEXIST: 2019 assert(v->gv_type == GVT_FILE); 2020 break; 2021 2022 case EINVAL: /* prevented above */ 2023 default: 2024 bad_error("graph_insert_vertex_unconfigured", err); 2025 } 2026 } else { 2027 if (info->type != GVT_INST) { 2028 log_framework(LOG_NOTICE, 2029 "FMRI \"%s\" is not allowed for the \"%s\" " 2030 "dependency's type of instance %s.\n", fmri, 2031 info->pg_name, info->inst_fmri); 2032 return (info->err = EINVAL); 2033 } 2034 2035 /* 2036 * We must canonify fmri & add a vertex for it. 2037 */ 2038 fmri_copy_sz = strlen(fmri) + 1; 2039 fmri_copy = startd_alloc(fmri_copy_sz); 2040 (void) strcpy(fmri_copy, fmri); 2041 2042 /* Determine if the FMRI is a property group or instance */ 2043 if (scf_parse_svc_fmri(fmri_copy, &scope, &service, 2044 &instance, &pg, NULL) != 0) { 2045 startd_free(fmri_copy, fmri_copy_sz); 2046 log_framework(LOG_NOTICE, 2047 "Dependency \"%s\" of %s has invalid FMRI " 2048 "\"%s\".\n", info->pg_name, info->inst_fmri, 2049 fmri); 2050 return (info->err = EINVAL); 2051 } 2052 2053 if (service == NULL || pg != NULL) { 2054 startd_free(fmri_copy, fmri_copy_sz); 2055 log_framework(LOG_NOTICE, 2056 "Dependency \"%s\" of %s does not designate a " 2057 "service or instance.\n", info->pg_name, 2058 info->inst_fmri); 2059 return (info->err = EINVAL); 2060 } 2061 2062 if (scope == NULL || strcmp(scope, SCF_SCOPE_LOCAL) == 0) { 2063 cfmri = uu_msprintf("svc:/%s%s%s", 2064 service, instance ? ":" : "", instance ? instance : 2065 ""); 2066 } else { 2067 cfmri = uu_msprintf("svc://%s/%s%s%s", 2068 scope, service, instance ? ":" : "", instance ? 2069 instance : ""); 2070 } 2071 2072 startd_free(fmri_copy, fmri_copy_sz); 2073 2074 err = graph_insert_vertex_unconfigured(cfmri, instance ? 2075 GVT_INST : GVT_SVC, instance ? 0 : DEPGRP_REQUIRE_ANY, 2076 RERR_NONE, &v); 2077 uu_free(cfmri); 2078 switch (err) { 2079 case 0: 2080 break; 2081 2082 case EEXIST: 2083 /* Verify v. */ 2084 if (instance != NULL) 2085 assert(v->gv_type == GVT_INST); 2086 else 2087 assert(v->gv_type == GVT_SVC); 2088 break; 2089 2090 default: 2091 bad_error("graph_insert_vertex_unconfigured", err); 2092 } 2093 } 2094 2095 /* Add dependency from depgroup_v to new vertex */ 2096 info->err = graph_insert_dependency(depgroup_v, v, info->pathp); 2097 switch (info->err) { 2098 case 0: 2099 break; 2100 2101 case ELOOP: 2102 return (ELOOP); 2103 2104 default: 2105 bad_error("graph_insert_dependency", info->err); 2106 } 2107 2108 /* This must be after we insert the dependency, to avoid looping. */ 2109 switch (v->gv_type) { 2110 case GVT_INST: 2111 if ((v->gv_flags & GV_CONFIGURED) != 0) 2112 break; 2113 2114 inst = safe_scf_instance_create(info->h); 2115 2116 rebound = B_FALSE; 2117 2118 rebound: 2119 err = libscf_lookup_instance(v->gv_name, inst); 2120 switch (err) { 2121 case 0: 2122 err = configure_vertex(v, inst); 2123 switch (err) { 2124 case 0: 2125 case ECANCELED: 2126 break; 2127 2128 case ECONNABORTED: 2129 libscf_handle_rebind(info->h); 2130 rebound = B_TRUE; 2131 goto rebound; 2132 2133 default: 2134 bad_error("configure_vertex", err); 2135 } 2136 break; 2137 2138 case ENOENT: 2139 break; 2140 2141 case ECONNABORTED: 2142 libscf_handle_rebind(info->h); 2143 rebound = B_TRUE; 2144 goto rebound; 2145 2146 case EINVAL: 2147 case ENOTSUP: 2148 default: 2149 bad_error("libscf_fmri_get_instance", err); 2150 } 2151 2152 scf_instance_destroy(inst); 2153 2154 if (rebound) 2155 return (info->err = ECONNRESET); 2156 break; 2157 2158 case GVT_SVC: 2159 (void) add_service(v->gv_name, info->h, &rebound); 2160 if (rebound) 2161 return (info->err = ECONNRESET); 2162 } 2163 2164 return (0); 2165 } 2166 2167 struct deppg_info { 2168 graph_vertex_t *v; /* GVT_INST vertex */ 2169 int err; /* return error */ 2170 int **pathp; /* return circular dependency path */ 2171 }; 2172 2173 /* 2174 * Make info->v depend on a new GVT_GROUP node for this property group, 2175 * and then call process_dependency_fmri() for the values of the entity 2176 * property. Return 0 on success, or if something goes wrong return nonzero 2177 * and set info->err to ECONNABORTED, EINVAL, or the error code returned by 2178 * process_dependency_fmri(). 2179 */ 2180 static int 2181 process_dependency_pg(scf_propertygroup_t *pg, struct deppg_info *info) 2182 { 2183 scf_handle_t *h; 2184 depgroup_type_t deptype; 2185 struct depfmri_info linfo; 2186 char *fmri, *pg_name; 2187 size_t fmri_sz; 2188 graph_vertex_t *depgrp; 2189 scf_property_t *prop; 2190 int err; 2191 int empty; 2192 scf_error_t scferr; 2193 ssize_t len; 2194 2195 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2196 2197 h = scf_pg_handle(pg); 2198 2199 pg_name = startd_alloc(max_scf_name_size); 2200 2201 len = scf_pg_get_name(pg, pg_name, max_scf_name_size); 2202 if (len < 0) { 2203 startd_free(pg_name, max_scf_name_size); 2204 switch (scf_error()) { 2205 case SCF_ERROR_CONNECTION_BROKEN: 2206 default: 2207 return (info->err = ECONNABORTED); 2208 2209 case SCF_ERROR_DELETED: 2210 return (info->err = 0); 2211 2212 case SCF_ERROR_NOT_SET: 2213 bad_error("scf_pg_get_name", scf_error()); 2214 } 2215 } 2216 2217 /* 2218 * Skip over empty dependency groups. Since dependency property 2219 * groups are updated atomically, they are either empty or 2220 * fully populated. 2221 */ 2222 empty = depgroup_empty(h, pg); 2223 if (empty < 0) { 2224 log_error(LOG_INFO, 2225 "Error reading dependency group \"%s\" of %s: %s\n", 2226 pg_name, info->v->gv_name, scf_strerror(scf_error())); 2227 startd_free(pg_name, max_scf_name_size); 2228 return (info->err = EINVAL); 2229 2230 } else if (empty == 1) { 2231 log_framework(LOG_DEBUG, 2232 "Ignoring empty dependency group \"%s\" of %s\n", 2233 pg_name, info->v->gv_name); 2234 startd_free(pg_name, max_scf_name_size); 2235 return (info->err = 0); 2236 } 2237 2238 fmri_sz = strlen(info->v->gv_name) + 1 + len + 1; 2239 fmri = startd_alloc(fmri_sz); 2240 2241 (void) snprintf(fmri, max_scf_name_size, "%s>%s", info->v->gv_name, 2242 pg_name); 2243 2244 /* Validate the pg before modifying the graph */ 2245 deptype = depgroup_read_grouping(h, pg); 2246 if (deptype == DEPGRP_UNSUPPORTED) { 2247 log_error(LOG_INFO, 2248 "Dependency \"%s\" of %s has an unknown grouping value.\n", 2249 pg_name, info->v->gv_name); 2250 startd_free(fmri, fmri_sz); 2251 startd_free(pg_name, max_scf_name_size); 2252 return (info->err = EINVAL); 2253 } 2254 2255 prop = safe_scf_property_create(h); 2256 2257 if (scf_pg_get_property(pg, SCF_PROPERTY_ENTITIES, prop) != 0) { 2258 scferr = scf_error(); 2259 scf_property_destroy(prop); 2260 if (scferr == SCF_ERROR_DELETED) { 2261 startd_free(fmri, fmri_sz); 2262 startd_free(pg_name, max_scf_name_size); 2263 return (info->err = 0); 2264 } else if (scferr != SCF_ERROR_NOT_FOUND) { 2265 startd_free(fmri, fmri_sz); 2266 startd_free(pg_name, max_scf_name_size); 2267 return (info->err = ECONNABORTED); 2268 } 2269 2270 log_error(LOG_INFO, 2271 "Dependency \"%s\" of %s is missing a \"%s\" property.\n", 2272 pg_name, info->v->gv_name, SCF_PROPERTY_ENTITIES); 2273 2274 startd_free(fmri, fmri_sz); 2275 startd_free(pg_name, max_scf_name_size); 2276 2277 return (info->err = EINVAL); 2278 } 2279 2280 /* Create depgroup vertex for pg */ 2281 err = graph_insert_vertex_unconfigured(fmri, GVT_GROUP, deptype, 2282 depgroup_read_restart(h, pg), &depgrp); 2283 assert(err == 0); 2284 startd_free(fmri, fmri_sz); 2285 2286 /* Add dependency from inst vertex to new vertex */ 2287 err = graph_insert_dependency(info->v, depgrp, info->pathp); 2288 /* ELOOP can't happen because this should be a new vertex */ 2289 assert(err == 0); 2290 2291 linfo.v = depgrp; 2292 linfo.type = depgroup_read_scheme(h, pg); 2293 linfo.inst_fmri = info->v->gv_name; 2294 linfo.pg_name = pg_name; 2295 linfo.h = h; 2296 linfo.err = 0; 2297 linfo.pathp = info->pathp; 2298 err = walk_property_astrings(prop, (callback_t)process_dependency_fmri, 2299 &linfo); 2300 2301 scf_property_destroy(prop); 2302 startd_free(pg_name, max_scf_name_size); 2303 2304 switch (err) { 2305 case 0: 2306 case EINTR: 2307 return (info->err = linfo.err); 2308 2309 case ECONNABORTED: 2310 case EINVAL: 2311 return (info->err = err); 2312 2313 case ECANCELED: 2314 return (info->err = 0); 2315 2316 case ECONNRESET: 2317 return (info->err = ECONNABORTED); 2318 2319 default: 2320 bad_error("walk_property_astrings", err); 2321 /* NOTREACHED */ 2322 } 2323 } 2324 2325 /* 2326 * Build the dependency info for v from the repository. Returns 0 on success, 2327 * ECONNABORTED on repository disconnection, EINVAL if the repository 2328 * configuration is invalid, and ELOOP if a dependency would cause a cycle. 2329 * In the last case, *pathp will point to a -1-terminated array of ids which 2330 * constitute the rest of the dependency cycle. 2331 */ 2332 static int 2333 set_dependencies(graph_vertex_t *v, scf_instance_t *inst, int **pathp) 2334 { 2335 struct deppg_info info; 2336 int err; 2337 uint_t old_configured; 2338 2339 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2340 2341 /* 2342 * Mark the vertex as configured during dependency insertion to avoid 2343 * dependency cycles (which can appear in the graph if one of the 2344 * vertices is an exclusion-group). 2345 */ 2346 old_configured = v->gv_flags & GV_CONFIGURED; 2347 v->gv_flags |= GV_CONFIGURED; 2348 2349 info.err = 0; 2350 info.v = v; 2351 info.pathp = pathp; 2352 2353 err = walk_dependency_pgs(inst, (callback_t)process_dependency_pg, 2354 &info); 2355 2356 if (!old_configured) 2357 v->gv_flags &= ~GV_CONFIGURED; 2358 2359 switch (err) { 2360 case 0: 2361 case EINTR: 2362 return (info.err); 2363 2364 case ECONNABORTED: 2365 return (ECONNABORTED); 2366 2367 case ECANCELED: 2368 /* Should get delete event, so return 0. */ 2369 return (0); 2370 2371 default: 2372 bad_error("walk_dependency_pgs", err); 2373 /* NOTREACHED */ 2374 } 2375 } 2376 2377 2378 static void 2379 handle_cycle(const char *fmri, int *path) 2380 { 2381 const char *cp; 2382 size_t sz; 2383 2384 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2385 2386 path_to_str(path, (char **)&cp, &sz); 2387 2388 log_error(LOG_ERR, "Putting service %s into maintenance " 2389 "because it completes a dependency cycle:\n%s", fmri ? fmri : "?", 2390 cp); 2391 2392 startd_free((void *)cp, sz); 2393 } 2394 2395 /* 2396 * When run on the dependencies of a vertex, populates list with 2397 * graph_edge_t's which point to the instance vertices (no GVT_GROUP nodes) 2398 * on which the vertex depends. 2399 */ 2400 static int 2401 append_insts(graph_edge_t *e, uu_list_t *list) 2402 { 2403 graph_vertex_t *v = e->ge_vertex; 2404 graph_edge_t *new; 2405 int r; 2406 2407 switch (v->gv_type) { 2408 case GVT_INST: 2409 case GVT_SVC: 2410 break; 2411 2412 case GVT_GROUP: 2413 r = uu_list_walk(v->gv_dependencies, 2414 (uu_walk_fn_t *)append_insts, list, 0); 2415 assert(r == 0); 2416 return (UU_WALK_NEXT); 2417 2418 case GVT_FILE: 2419 return (UU_WALK_NEXT); 2420 2421 default: 2422 #ifndef NDEBUG 2423 uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, 2424 __LINE__, v->gv_type); 2425 #endif 2426 abort(); 2427 } 2428 2429 new = startd_alloc(sizeof (*new)); 2430 new->ge_vertex = v; 2431 uu_list_node_init(new, &new->ge_link, graph_edge_pool); 2432 r = uu_list_insert_before(list, NULL, new); 2433 assert(r == 0); 2434 return (UU_WALK_NEXT); 2435 } 2436 2437 static boolean_t 2438 should_be_in_subgraph(graph_vertex_t *v) 2439 { 2440 graph_edge_t *e; 2441 2442 if (v == milestone) 2443 return (B_TRUE); 2444 2445 /* 2446 * v is in the subgraph if any of its dependents are in the subgraph. 2447 * Except for EXCLUDE_ALL dependents. And OPTIONAL dependents only 2448 * count if we're enabled. 2449 */ 2450 for (e = uu_list_first(v->gv_dependents); 2451 e != NULL; 2452 e = uu_list_next(v->gv_dependents, e)) { 2453 graph_vertex_t *dv = e->ge_vertex; 2454 2455 if (!(dv->gv_flags & GV_INSUBGRAPH)) 2456 continue; 2457 2458 /* 2459 * Don't include instances that are optional and disabled. 2460 */ 2461 if (v->gv_type == GVT_INST && dv->gv_type == GVT_SVC) { 2462 2463 int in = 0; 2464 graph_edge_t *ee; 2465 2466 for (ee = uu_list_first(dv->gv_dependents); 2467 ee != NULL; 2468 ee = uu_list_next(dv->gv_dependents, ee)) { 2469 2470 graph_vertex_t *ddv = e->ge_vertex; 2471 2472 if (ddv->gv_type == GVT_GROUP && 2473 ddv->gv_depgroup == DEPGRP_EXCLUDE_ALL) 2474 continue; 2475 2476 if (ddv->gv_type == GVT_GROUP && 2477 ddv->gv_depgroup == DEPGRP_OPTIONAL_ALL && 2478 !(v->gv_flags & GV_ENBLD_NOOVR)) 2479 continue; 2480 2481 in = 1; 2482 } 2483 if (!in) 2484 continue; 2485 } 2486 if (v->gv_type == GVT_INST && 2487 dv->gv_type == GVT_GROUP && 2488 dv->gv_depgroup == DEPGRP_OPTIONAL_ALL && 2489 !(v->gv_flags & GV_ENBLD_NOOVR)) 2490 continue; 2491 2492 /* Don't include excluded services and instances */ 2493 if (dv->gv_type == GVT_GROUP && 2494 dv->gv_depgroup == DEPGRP_EXCLUDE_ALL) 2495 continue; 2496 2497 return (B_TRUE); 2498 } 2499 2500 return (B_FALSE); 2501 } 2502 2503 /* 2504 * Ensures that GV_INSUBGRAPH is set properly for v and its descendents. If 2505 * any bits change, manipulate the repository appropriately. Returns 0 or 2506 * ECONNABORTED. 2507 */ 2508 static int 2509 eval_subgraph(graph_vertex_t *v, scf_handle_t *h) 2510 { 2511 boolean_t old = (v->gv_flags & GV_INSUBGRAPH) != 0; 2512 boolean_t new; 2513 graph_edge_t *e; 2514 scf_instance_t *inst; 2515 int ret = 0, r; 2516 2517 assert(milestone != NULL && milestone != MILESTONE_NONE); 2518 2519 new = should_be_in_subgraph(v); 2520 2521 if (new == old) 2522 return (0); 2523 2524 log_framework(LOG_DEBUG, new ? "Adding %s to the subgraph.\n" : 2525 "Removing %s from the subgraph.\n", v->gv_name); 2526 2527 v->gv_flags = (v->gv_flags & ~GV_INSUBGRAPH) | 2528 (new ? GV_INSUBGRAPH : 0); 2529 2530 if (v->gv_type == GVT_INST && (v->gv_flags & GV_CONFIGURED)) { 2531 int err; 2532 2533 get_inst: 2534 err = libscf_fmri_get_instance(h, v->gv_name, &inst); 2535 if (err != 0) { 2536 switch (err) { 2537 case ECONNABORTED: 2538 libscf_handle_rebind(h); 2539 ret = ECONNABORTED; 2540 goto get_inst; 2541 2542 case ENOENT: 2543 break; 2544 2545 case EINVAL: 2546 case ENOTSUP: 2547 default: 2548 bad_error("libscf_fmri_get_instance", err); 2549 } 2550 } else { 2551 const char *f; 2552 2553 if (new) { 2554 err = libscf_delete_enable_ovr(inst); 2555 f = "libscf_delete_enable_ovr"; 2556 } else { 2557 err = libscf_set_enable_ovr(inst, 0); 2558 f = "libscf_set_enable_ovr"; 2559 } 2560 scf_instance_destroy(inst); 2561 switch (err) { 2562 case 0: 2563 case ECANCELED: 2564 break; 2565 2566 case ECONNABORTED: 2567 libscf_handle_rebind(h); 2568 /* 2569 * We must continue so the graph is updated, 2570 * but we must return ECONNABORTED so any 2571 * libscf state held by any callers is reset. 2572 */ 2573 ret = ECONNABORTED; 2574 goto get_inst; 2575 2576 case EROFS: 2577 case EPERM: 2578 log_error(LOG_WARNING, 2579 "Could not set %s/%s for %s: %s.\n", 2580 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 2581 v->gv_name, strerror(err)); 2582 break; 2583 2584 default: 2585 bad_error(f, err); 2586 } 2587 } 2588 } 2589 2590 for (e = uu_list_first(v->gv_dependencies); 2591 e != NULL; 2592 e = uu_list_next(v->gv_dependencies, e)) { 2593 r = eval_subgraph(e->ge_vertex, h); 2594 if (r != 0) { 2595 assert(r == ECONNABORTED); 2596 ret = ECONNABORTED; 2597 } 2598 } 2599 2600 return (ret); 2601 } 2602 2603 /* 2604 * Delete the (property group) dependencies of v & create new ones based on 2605 * inst. If doing so would create a cycle, log a message and put the instance 2606 * into maintenance. Update GV_INSUBGRAPH flags as necessary. Returns 0 or 2607 * ECONNABORTED. 2608 */ 2609 static int 2610 refresh_vertex(graph_vertex_t *v, scf_instance_t *inst) 2611 { 2612 int err; 2613 int *path; 2614 char *fmri; 2615 int r; 2616 scf_handle_t *h = scf_instance_handle(inst); 2617 uu_list_t *old_deps; 2618 int ret = 0; 2619 graph_edge_t *e; 2620 2621 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2622 assert(v->gv_type == GVT_INST); 2623 2624 log_framework(LOG_DEBUG, "Graph engine: Refreshing %s.\n", v->gv_name); 2625 2626 if (milestone > MILESTONE_NONE) { 2627 /* 2628 * In case some of v's dependencies are being deleted we must 2629 * make a list of them now for GV_INSUBGRAPH-flag evaluation 2630 * after the new dependencies are in place. 2631 */ 2632 old_deps = startd_list_create(graph_edge_pool, NULL, 0); 2633 2634 err = uu_list_walk(v->gv_dependencies, 2635 (uu_walk_fn_t *)append_insts, old_deps, 0); 2636 assert(err == 0); 2637 } 2638 2639 delete_instance_dependencies(v, B_FALSE); 2640 2641 err = set_dependencies(v, inst, &path); 2642 switch (err) { 2643 case 0: 2644 break; 2645 2646 case ECONNABORTED: 2647 ret = err; 2648 goto out; 2649 2650 case EINVAL: 2651 case ELOOP: 2652 r = libscf_instance_get_fmri(inst, &fmri); 2653 switch (r) { 2654 case 0: 2655 break; 2656 2657 case ECONNABORTED: 2658 ret = ECONNABORTED; 2659 goto out; 2660 2661 case ECANCELED: 2662 ret = 0; 2663 goto out; 2664 2665 default: 2666 bad_error("libscf_instance_get_fmri", r); 2667 } 2668 2669 if (err == EINVAL) { 2670 log_error(LOG_ERR, "Transitioning %s " 2671 "to maintenance due to misconfiguration.\n", 2672 fmri ? fmri : "?"); 2673 vertex_send_event(v, 2674 RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY); 2675 } else { 2676 handle_cycle(fmri, path); 2677 vertex_send_event(v, 2678 RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE); 2679 } 2680 startd_free(fmri, max_scf_fmri_size); 2681 ret = 0; 2682 goto out; 2683 2684 default: 2685 bad_error("set_dependencies", err); 2686 } 2687 2688 if (milestone > MILESTONE_NONE) { 2689 boolean_t aborted = B_FALSE; 2690 2691 for (e = uu_list_first(old_deps); 2692 e != NULL; 2693 e = uu_list_next(old_deps, e)) { 2694 if (eval_subgraph(e->ge_vertex, h) == 2695 ECONNABORTED) 2696 aborted = B_TRUE; 2697 } 2698 2699 for (e = uu_list_first(v->gv_dependencies); 2700 e != NULL; 2701 e = uu_list_next(v->gv_dependencies, e)) { 2702 if (eval_subgraph(e->ge_vertex, h) == 2703 ECONNABORTED) 2704 aborted = B_TRUE; 2705 } 2706 2707 if (aborted) { 2708 ret = ECONNABORTED; 2709 goto out; 2710 } 2711 } 2712 2713 if (v->gv_state == RESTARTER_STATE_OFFLINE) { 2714 if (instance_satisfied(v, B_FALSE) == 1) { 2715 if (v->gv_start_f == NULL) 2716 vertex_send_event(v, 2717 RESTARTER_EVENT_TYPE_START); 2718 else 2719 v->gv_start_f(v); 2720 } 2721 } 2722 2723 ret = 0; 2724 2725 out: 2726 if (milestone > MILESTONE_NONE) { 2727 void *cookie = NULL; 2728 2729 while ((e = uu_list_teardown(old_deps, &cookie)) != NULL) 2730 startd_free(e, sizeof (*e)); 2731 2732 uu_list_destroy(old_deps); 2733 } 2734 2735 return (ret); 2736 } 2737 2738 /* 2739 * Set up v according to inst. That is, make sure it depends on its 2740 * restarter and set up its dependencies. Send the ADD_INSTANCE command to 2741 * the restarter, and send ENABLE or DISABLE as appropriate. 2742 * 2743 * Returns 0 on success, ECONNABORTED on repository disconnection, or 2744 * ECANCELED if inst is deleted. 2745 */ 2746 static int 2747 configure_vertex(graph_vertex_t *v, scf_instance_t *inst) 2748 { 2749 scf_handle_t *h; 2750 scf_propertygroup_t *pg; 2751 scf_snapshot_t *snap; 2752 char *restarter_fmri = startd_alloc(max_scf_value_size); 2753 int enabled, enabled_ovr; 2754 int err; 2755 int *path; 2756 2757 restarter_fmri[0] = '\0'; 2758 2759 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2760 assert(v->gv_type == GVT_INST); 2761 assert((v->gv_flags & GV_CONFIGURED) == 0); 2762 2763 /* GV_INSUBGRAPH should already be set properly. */ 2764 assert(should_be_in_subgraph(v) == 2765 ((v->gv_flags & GV_INSUBGRAPH) != 0)); 2766 2767 log_framework(LOG_DEBUG, "Graph adding %s.\n", v->gv_name); 2768 2769 h = scf_instance_handle(inst); 2770 2771 /* 2772 * If the instance does not have a restarter property group, 2773 * initialize its state to uninitialized/none, in case the restarter 2774 * is not enabled. 2775 */ 2776 pg = safe_scf_pg_create(h); 2777 2778 if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, pg) != 0) { 2779 instance_data_t idata; 2780 uint_t count = 0, msecs = ALLOC_DELAY; 2781 2782 switch (scf_error()) { 2783 case SCF_ERROR_NOT_FOUND: 2784 break; 2785 2786 case SCF_ERROR_CONNECTION_BROKEN: 2787 default: 2788 scf_pg_destroy(pg); 2789 return (ECONNABORTED); 2790 2791 case SCF_ERROR_DELETED: 2792 scf_pg_destroy(pg); 2793 return (ECANCELED); 2794 2795 case SCF_ERROR_NOT_SET: 2796 bad_error("scf_instance_get_pg", scf_error()); 2797 } 2798 2799 switch (err = libscf_instance_get_fmri(inst, 2800 (char **)&idata.i_fmri)) { 2801 case 0: 2802 break; 2803 2804 case ECONNABORTED: 2805 case ECANCELED: 2806 scf_pg_destroy(pg); 2807 return (err); 2808 2809 default: 2810 bad_error("libscf_instance_get_fmri", err); 2811 } 2812 2813 idata.i_state = RESTARTER_STATE_NONE; 2814 idata.i_next_state = RESTARTER_STATE_NONE; 2815 2816 init_state: 2817 switch (err = _restarter_commit_states(h, &idata, 2818 RESTARTER_STATE_UNINIT, RESTARTER_STATE_NONE, NULL)) { 2819 case 0: 2820 break; 2821 2822 case ENOMEM: 2823 ++count; 2824 if (count < ALLOC_RETRY) { 2825 (void) poll(NULL, 0, msecs); 2826 msecs *= ALLOC_DELAY_MULT; 2827 goto init_state; 2828 } 2829 2830 uu_die("Insufficient memory.\n"); 2831 /* NOTREACHED */ 2832 2833 case ECONNABORTED: 2834 scf_pg_destroy(pg); 2835 return (ECONNABORTED); 2836 2837 case ENOENT: 2838 scf_pg_destroy(pg); 2839 return (ECANCELED); 2840 2841 case EPERM: 2842 case EACCES: 2843 case EROFS: 2844 log_error(LOG_NOTICE, "Could not initialize state for " 2845 "%s: %s.\n", idata.i_fmri, strerror(err)); 2846 break; 2847 2848 case EINVAL: 2849 default: 2850 bad_error("_restarter_commit_states", err); 2851 } 2852 2853 startd_free((void *)idata.i_fmri, max_scf_fmri_size); 2854 } 2855 2856 scf_pg_destroy(pg); 2857 2858 if (milestone != NULL) { 2859 /* 2860 * Make sure the enable-override is set properly before we 2861 * read whether we should be enabled. 2862 */ 2863 if (milestone == MILESTONE_NONE || 2864 !(v->gv_flags & GV_INSUBGRAPH)) { 2865 switch (err = libscf_set_enable_ovr(inst, 0)) { 2866 case 0: 2867 break; 2868 2869 case ECONNABORTED: 2870 case ECANCELED: 2871 return (err); 2872 2873 case EROFS: 2874 log_error(LOG_WARNING, 2875 "Could not set %s/%s for %s: %s.\n", 2876 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 2877 v->gv_name, strerror(err)); 2878 break; 2879 2880 case EPERM: 2881 uu_die("Permission denied.\n"); 2882 /* NOTREACHED */ 2883 2884 default: 2885 bad_error("libscf_set_enable_ovr", err); 2886 } 2887 } else { 2888 assert(v->gv_flags & GV_INSUBGRAPH); 2889 switch (err = libscf_delete_enable_ovr(inst)) { 2890 case 0: 2891 break; 2892 2893 case ECONNABORTED: 2894 case ECANCELED: 2895 return (err); 2896 2897 case EPERM: 2898 uu_die("Permission denied.\n"); 2899 /* NOTREACHED */ 2900 2901 default: 2902 bad_error("libscf_delete_enable_ovr", err); 2903 } 2904 } 2905 } 2906 2907 err = libscf_get_basic_instance_data(h, inst, v->gv_name, &enabled, 2908 &enabled_ovr, &restarter_fmri); 2909 switch (err) { 2910 case 0: 2911 break; 2912 2913 case ECONNABORTED: 2914 case ECANCELED: 2915 startd_free(restarter_fmri, max_scf_value_size); 2916 return (err); 2917 2918 case ENOENT: 2919 log_framework(LOG_DEBUG, 2920 "Ignoring %s because it has no general property group.\n", 2921 v->gv_name); 2922 startd_free(restarter_fmri, max_scf_value_size); 2923 return (0); 2924 2925 default: 2926 bad_error("libscf_get_basic_instance_data", err); 2927 } 2928 2929 if (enabled == -1) { 2930 startd_free(restarter_fmri, max_scf_value_size); 2931 return (0); 2932 } 2933 2934 v->gv_flags = (v->gv_flags & ~GV_ENBLD_NOOVR) | 2935 (enabled ? GV_ENBLD_NOOVR : 0); 2936 2937 if (enabled_ovr != -1) 2938 enabled = enabled_ovr; 2939 2940 v->gv_state = RESTARTER_STATE_UNINIT; 2941 2942 snap = libscf_get_or_make_running_snapshot(inst, v->gv_name, B_TRUE); 2943 scf_snapshot_destroy(snap); 2944 2945 /* Set up the restarter. (Sends _ADD_INSTANCE on success.) */ 2946 err = graph_change_restarter(v, restarter_fmri, h, &path); 2947 if (err != 0) { 2948 instance_data_t idata; 2949 uint_t count = 0, msecs = ALLOC_DELAY; 2950 const char *reason; 2951 2952 if (err == ECONNABORTED) { 2953 startd_free(restarter_fmri, max_scf_value_size); 2954 return (err); 2955 } 2956 2957 assert(err == EINVAL || err == ELOOP); 2958 2959 if (err == EINVAL) { 2960 log_framework(LOG_WARNING, emsg_invalid_restarter, 2961 v->gv_name); 2962 reason = "invalid_restarter"; 2963 } else { 2964 handle_cycle(v->gv_name, path); 2965 reason = "dependency_cycle"; 2966 } 2967 2968 startd_free(restarter_fmri, max_scf_value_size); 2969 2970 /* 2971 * We didn't register the instance with the restarter, so we 2972 * must set maintenance mode ourselves. 2973 */ 2974 err = libscf_instance_get_fmri(inst, (char **)&idata.i_fmri); 2975 if (err != 0) { 2976 assert(err == ECONNABORTED || err == ECANCELED); 2977 return (err); 2978 } 2979 2980 idata.i_state = RESTARTER_STATE_NONE; 2981 idata.i_next_state = RESTARTER_STATE_NONE; 2982 2983 set_maint: 2984 switch (err = _restarter_commit_states(h, &idata, 2985 RESTARTER_STATE_MAINT, RESTARTER_STATE_NONE, reason)) { 2986 case 0: 2987 break; 2988 2989 case ENOMEM: 2990 ++count; 2991 if (count < ALLOC_RETRY) { 2992 (void) poll(NULL, 0, msecs); 2993 msecs *= ALLOC_DELAY_MULT; 2994 goto set_maint; 2995 } 2996 2997 uu_die("Insufficient memory.\n"); 2998 /* NOTREACHED */ 2999 3000 case ECONNABORTED: 3001 return (ECONNABORTED); 3002 3003 case ENOENT: 3004 return (ECANCELED); 3005 3006 case EPERM: 3007 case EACCES: 3008 case EROFS: 3009 log_error(LOG_NOTICE, "Could not initialize state for " 3010 "%s: %s.\n", idata.i_fmri, strerror(err)); 3011 break; 3012 3013 case EINVAL: 3014 default: 3015 bad_error("_restarter_commit_states", err); 3016 } 3017 3018 startd_free((void *)idata.i_fmri, max_scf_fmri_size); 3019 3020 v->gv_state = RESTARTER_STATE_MAINT; 3021 3022 goto out; 3023 } 3024 startd_free(restarter_fmri, max_scf_value_size); 3025 3026 /* Add all the other dependencies. */ 3027 err = refresh_vertex(v, inst); 3028 if (err != 0) { 3029 assert(err == ECONNABORTED); 3030 return (err); 3031 } 3032 3033 out: 3034 v->gv_flags |= GV_CONFIGURED; 3035 3036 graph_enable_by_vertex(v, enabled, 0); 3037 3038 return (0); 3039 } 3040 3041 static void 3042 do_uadmin(void) 3043 { 3044 int fd, left; 3045 struct statvfs vfs; 3046 3047 const char * const resetting = "/etc/svc/volatile/resetting"; 3048 3049 fd = creat(resetting, 0777); 3050 if (fd >= 0) 3051 startd_close(fd); 3052 else 3053 uu_warn("Could not create \"%s\"", resetting); 3054 3055 /* Kill dhcpagent if we're not using nfs for root */ 3056 if ((statvfs("/", &vfs) == 0) && 3057 (strncmp(vfs.f_basetype, "nfs", sizeof ("nfs") - 1) != 0)) 3058 (void) system("/usr/bin/pkill -x -u 0 dhcpagent"); 3059 3060 (void) system("/usr/sbin/killall"); 3061 left = 5; 3062 while (left > 0) 3063 left = sleep(left); 3064 3065 (void) system("/usr/sbin/killall 9"); 3066 left = 10; 3067 while (left > 0) 3068 left = sleep(left); 3069 3070 sync(); 3071 sync(); 3072 sync(); 3073 3074 (void) system("/sbin/umountall"); 3075 (void) system("/sbin/umount /tmp >/dev/null 2>&1"); 3076 (void) system("/sbin/umount /var/adm >/dev/null 2>&1"); 3077 (void) system("/sbin/umount /var/run >/dev/null 2>&1"); 3078 (void) system("/sbin/umount /var >/dev/null 2>&1"); 3079 (void) system("/sbin/umount /usr >/dev/null 2>&1"); 3080 3081 uu_warn("The system is down.\n"); 3082 3083 (void) uadmin(A_SHUTDOWN, halting, NULL); 3084 uu_warn("uadmin() failed"); 3085 3086 if (remove(resetting) != 0 && errno != ENOENT) 3087 uu_warn("Could not remove \"%s\"", resetting); 3088 } 3089 3090 /* 3091 * If any of the up_svcs[] are online or satisfiable, return true. If they are 3092 * all missing, disabled, in maintenance, or unsatisfiable, return false. 3093 */ 3094 boolean_t 3095 can_come_up(void) 3096 { 3097 int i; 3098 3099 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3100 3101 /* 3102 * If we are booting to single user (boot -s), 3103 * SCF_MILESTONE_SINGLE_USER is needed to come up because startd 3104 * spawns sulogin after single-user is online (see specials.c). 3105 */ 3106 i = (booting_to_single_user ? 0 : 1); 3107 3108 for (; up_svcs[i] != NULL; ++i) { 3109 if (up_svcs_p[i] == NULL) { 3110 up_svcs_p[i] = vertex_get_by_name(up_svcs[i]); 3111 3112 if (up_svcs_p[i] == NULL) 3113 continue; 3114 } 3115 3116 /* 3117 * Ignore unconfigured services (the ones that have been 3118 * mentioned in a dependency from other services, but do 3119 * not exist in the repository). Services which exist 3120 * in the repository but don't have general/enabled 3121 * property will be also ignored. 3122 */ 3123 if (!(up_svcs_p[i]->gv_flags & GV_CONFIGURED)) 3124 continue; 3125 3126 switch (up_svcs_p[i]->gv_state) { 3127 case RESTARTER_STATE_ONLINE: 3128 case RESTARTER_STATE_DEGRADED: 3129 /* 3130 * Deactivate verbose boot once a login service has been 3131 * reached. 3132 */ 3133 st->st_log_login_reached = 1; 3134 /*FALLTHROUGH*/ 3135 case RESTARTER_STATE_UNINIT: 3136 return (B_TRUE); 3137 3138 case RESTARTER_STATE_OFFLINE: 3139 if (instance_satisfied(up_svcs_p[i], B_TRUE) != -1) 3140 return (B_TRUE); 3141 log_framework(LOG_DEBUG, 3142 "can_come_up(): %s is unsatisfiable.\n", 3143 up_svcs_p[i]->gv_name); 3144 continue; 3145 3146 case RESTARTER_STATE_DISABLED: 3147 case RESTARTER_STATE_MAINT: 3148 log_framework(LOG_DEBUG, 3149 "can_come_up(): %s is in state %s.\n", 3150 up_svcs_p[i]->gv_name, 3151 instance_state_str[up_svcs_p[i]->gv_state]); 3152 continue; 3153 3154 default: 3155 #ifndef NDEBUG 3156 uu_warn("%s:%d: Unexpected vertex state %d.\n", 3157 __FILE__, __LINE__, up_svcs_p[i]->gv_state); 3158 #endif 3159 abort(); 3160 } 3161 } 3162 3163 /* 3164 * In the seed repository, console-login is unsatisfiable because 3165 * services are missing. To behave correctly in that case we don't want 3166 * to return false until manifest-import is online. 3167 */ 3168 3169 if (manifest_import_p == NULL) { 3170 manifest_import_p = vertex_get_by_name(manifest_import); 3171 3172 if (manifest_import_p == NULL) 3173 return (B_FALSE); 3174 } 3175 3176 switch (manifest_import_p->gv_state) { 3177 case RESTARTER_STATE_ONLINE: 3178 case RESTARTER_STATE_DEGRADED: 3179 case RESTARTER_STATE_DISABLED: 3180 case RESTARTER_STATE_MAINT: 3181 break; 3182 3183 case RESTARTER_STATE_OFFLINE: 3184 if (instance_satisfied(manifest_import_p, B_TRUE) == -1) 3185 break; 3186 /* FALLTHROUGH */ 3187 3188 case RESTARTER_STATE_UNINIT: 3189 return (B_TRUE); 3190 } 3191 3192 return (B_FALSE); 3193 } 3194 3195 /* 3196 * Runs sulogin. Returns 3197 * 0 - success 3198 * EALREADY - sulogin is already running 3199 * EBUSY - console-login is running 3200 */ 3201 static int 3202 run_sulogin(const char *msg) 3203 { 3204 graph_vertex_t *v; 3205 3206 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3207 3208 if (sulogin_running) 3209 return (EALREADY); 3210 3211 v = vertex_get_by_name(console_login_fmri); 3212 if (v != NULL && inst_running(v)) 3213 return (EBUSY); 3214 3215 sulogin_running = B_TRUE; 3216 3217 MUTEX_UNLOCK(&dgraph_lock); 3218 3219 fork_sulogin(B_FALSE, msg); 3220 3221 MUTEX_LOCK(&dgraph_lock); 3222 3223 sulogin_running = B_FALSE; 3224 3225 if (console_login_ready) { 3226 v = vertex_get_by_name(console_login_fmri); 3227 3228 if (v != NULL && v->gv_state == RESTARTER_STATE_OFFLINE && 3229 !inst_running(v)) { 3230 if (v->gv_start_f == NULL) 3231 vertex_send_event(v, 3232 RESTARTER_EVENT_TYPE_START); 3233 else 3234 v->gv_start_f(v); 3235 } 3236 3237 console_login_ready = B_FALSE; 3238 } 3239 3240 return (0); 3241 } 3242 3243 /* 3244 * The sulogin thread runs sulogin while can_come_up() is false. run_sulogin() 3245 * keeps sulogin from stepping on console-login's toes. 3246 */ 3247 /* ARGSUSED */ 3248 static void * 3249 sulogin_thread(void *unused) 3250 { 3251 MUTEX_LOCK(&dgraph_lock); 3252 3253 assert(sulogin_thread_running); 3254 3255 do 3256 (void) run_sulogin("Console login service(s) cannot run\n"); 3257 while (!can_come_up()); 3258 3259 sulogin_thread_running = B_FALSE; 3260 MUTEX_UNLOCK(&dgraph_lock); 3261 3262 return (NULL); 3263 } 3264 3265 /* ARGSUSED */ 3266 void * 3267 single_user_thread(void *unused) 3268 { 3269 uint_t left; 3270 scf_handle_t *h; 3271 scf_instance_t *inst; 3272 scf_property_t *prop; 3273 scf_value_t *val; 3274 const char *msg; 3275 char *buf; 3276 int r; 3277 3278 MUTEX_LOCK(&single_user_thread_lock); 3279 single_user_thread_count++; 3280 3281 if (!booting_to_single_user) { 3282 /* 3283 * From rcS.sh: Look for ttymon, in.telnetd, in.rlogind and 3284 * processes in their process groups so they can be terminated. 3285 */ 3286 (void) fputs("svc.startd: Killing user processes: ", stdout); 3287 (void) system("/usr/sbin/killall"); 3288 (void) system("/usr/sbin/killall 9"); 3289 (void) system("/usr/bin/pkill -TERM -v -u 0,1"); 3290 3291 left = 5; 3292 while (left > 0) 3293 left = sleep(left); 3294 3295 (void) system("/usr/bin/pkill -KILL -v -u 0,1"); 3296 (void) puts("done."); 3297 } 3298 3299 if (go_single_user_mode || booting_to_single_user) { 3300 msg = "SINGLE USER MODE\n"; 3301 } else { 3302 assert(go_to_level1); 3303 3304 fork_rc_script('1', "start", B_TRUE); 3305 3306 uu_warn("The system is ready for administration.\n"); 3307 3308 msg = ""; 3309 } 3310 3311 MUTEX_UNLOCK(&single_user_thread_lock); 3312 3313 for (;;) { 3314 MUTEX_LOCK(&dgraph_lock); 3315 r = run_sulogin(msg); 3316 MUTEX_UNLOCK(&dgraph_lock); 3317 if (r == 0) 3318 break; 3319 3320 assert(r == EALREADY || r == EBUSY); 3321 3322 left = 3; 3323 while (left > 0) 3324 left = sleep(left); 3325 } 3326 3327 MUTEX_LOCK(&single_user_thread_lock); 3328 3329 /* 3330 * If another single user thread has started, let it finish changing 3331 * the run level. 3332 */ 3333 if (single_user_thread_count > 1) { 3334 single_user_thread_count--; 3335 MUTEX_UNLOCK(&single_user_thread_lock); 3336 return (NULL); 3337 } 3338 3339 h = libscf_handle_create_bound_loop(); 3340 inst = scf_instance_create(h); 3341 prop = safe_scf_property_create(h); 3342 val = safe_scf_value_create(h); 3343 buf = startd_alloc(max_scf_fmri_size); 3344 3345 lookup: 3346 if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, inst, 3347 NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) { 3348 switch (scf_error()) { 3349 case SCF_ERROR_NOT_FOUND: 3350 r = libscf_create_self(h); 3351 if (r == 0) 3352 goto lookup; 3353 assert(r == ECONNABORTED); 3354 /* FALLTHROUGH */ 3355 3356 case SCF_ERROR_CONNECTION_BROKEN: 3357 libscf_handle_rebind(h); 3358 goto lookup; 3359 3360 case SCF_ERROR_INVALID_ARGUMENT: 3361 case SCF_ERROR_CONSTRAINT_VIOLATED: 3362 case SCF_ERROR_NOT_BOUND: 3363 case SCF_ERROR_HANDLE_MISMATCH: 3364 default: 3365 bad_error("scf_handle_decode_fmri", scf_error()); 3366 } 3367 } 3368 3369 MUTEX_LOCK(&dgraph_lock); 3370 3371 r = libscf_inst_delete_prop(inst, SCF_PG_OPTIONS_OVR, 3372 SCF_PROPERTY_MILESTONE); 3373 switch (r) { 3374 case 0: 3375 case ECANCELED: 3376 break; 3377 3378 case ECONNABORTED: 3379 MUTEX_UNLOCK(&dgraph_lock); 3380 libscf_handle_rebind(h); 3381 goto lookup; 3382 3383 case EPERM: 3384 case EACCES: 3385 case EROFS: 3386 log_error(LOG_WARNING, "Could not clear temporary milestone: " 3387 "%s.\n", strerror(r)); 3388 break; 3389 3390 default: 3391 bad_error("libscf_inst_delete_prop", r); 3392 } 3393 3394 MUTEX_UNLOCK(&dgraph_lock); 3395 3396 r = libscf_get_milestone(inst, prop, val, buf, max_scf_fmri_size); 3397 switch (r) { 3398 case ECANCELED: 3399 case ENOENT: 3400 case EINVAL: 3401 (void) strcpy(buf, "all"); 3402 /* FALLTHROUGH */ 3403 3404 case 0: 3405 uu_warn("Returning to milestone %s.\n", buf); 3406 break; 3407 3408 case ECONNABORTED: 3409 libscf_handle_rebind(h); 3410 goto lookup; 3411 3412 default: 3413 bad_error("libscf_get_milestone", r); 3414 } 3415 3416 r = dgraph_set_milestone(buf, h, B_FALSE); 3417 switch (r) { 3418 case 0: 3419 case ECONNRESET: 3420 case EALREADY: 3421 case EINVAL: 3422 case ENOENT: 3423 break; 3424 3425 default: 3426 bad_error("dgraph_set_milestone", r); 3427 } 3428 3429 /* 3430 * See graph_runlevel_changed(). 3431 */ 3432 MUTEX_LOCK(&dgraph_lock); 3433 utmpx_set_runlevel(target_milestone_as_runlevel(), 'S', B_TRUE); 3434 MUTEX_UNLOCK(&dgraph_lock); 3435 3436 startd_free(buf, max_scf_fmri_size); 3437 scf_value_destroy(val); 3438 scf_property_destroy(prop); 3439 scf_instance_destroy(inst); 3440 scf_handle_destroy(h); 3441 3442 /* 3443 * We'll give ourselves 3 seconds to respond to all of the enablings 3444 * that setting the milestone should have created before checking 3445 * whether to run sulogin. 3446 */ 3447 left = 3; 3448 while (left > 0) 3449 left = sleep(left); 3450 3451 MUTEX_LOCK(&dgraph_lock); 3452 /* 3453 * Clearing these variables will allow the sulogin thread to run. We 3454 * check here in case there aren't any more state updates anytime soon. 3455 */ 3456 go_to_level1 = go_single_user_mode = booting_to_single_user = B_FALSE; 3457 if (!sulogin_thread_running && !can_come_up()) { 3458 (void) startd_thread_create(sulogin_thread, NULL); 3459 sulogin_thread_running = B_TRUE; 3460 } 3461 MUTEX_UNLOCK(&dgraph_lock); 3462 single_user_thread_count--; 3463 MUTEX_UNLOCK(&single_user_thread_lock); 3464 return (NULL); 3465 } 3466 3467 3468 /* 3469 * Dependency graph operations API. These are handle-independent thread-safe 3470 * graph manipulation functions which are the entry points for the event 3471 * threads below. 3472 */ 3473 3474 /* 3475 * If a configured vertex exists for inst_fmri, return EEXIST. If no vertex 3476 * exists for inst_fmri, add one. Then fetch the restarter from inst, make 3477 * this vertex dependent on it, and send _ADD_INSTANCE to the restarter. 3478 * Fetch whether the instance should be enabled from inst and send _ENABLE or 3479 * _DISABLE as appropriate. Finally rummage through inst's dependency 3480 * property groups and add vertices and edges as appropriate. If anything 3481 * goes wrong after sending _ADD_INSTANCE, send _ADMIN_MAINT_ON to put the 3482 * instance in maintenance. Don't send _START or _STOP until we get a state 3483 * update in case we're being restarted and the service is already running. 3484 * 3485 * To support booting to a milestone, we must also make sure all dependencies 3486 * encountered are configured, if they exist in the repository. 3487 * 3488 * Returns 0 on success, ECONNABORTED on repository disconnection, EINVAL if 3489 * inst_fmri is an invalid (or not canonical) FMRI, ECANCELED if inst is 3490 * deleted, or EEXIST if a configured vertex for inst_fmri already exists. 3491 */ 3492 int 3493 dgraph_add_instance(const char *inst_fmri, scf_instance_t *inst, 3494 boolean_t lock_graph) 3495 { 3496 graph_vertex_t *v; 3497 int err; 3498 3499 if (strcmp(inst_fmri, SCF_SERVICE_STARTD) == 0) 3500 return (0); 3501 3502 /* Check for a vertex for inst_fmri. */ 3503 if (lock_graph) { 3504 MUTEX_LOCK(&dgraph_lock); 3505 } else { 3506 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3507 } 3508 3509 v = vertex_get_by_name(inst_fmri); 3510 3511 if (v != NULL) { 3512 assert(v->gv_type == GVT_INST); 3513 3514 if (v->gv_flags & GV_CONFIGURED) { 3515 if (lock_graph) 3516 MUTEX_UNLOCK(&dgraph_lock); 3517 return (EEXIST); 3518 } 3519 } else { 3520 /* Add the vertex. */ 3521 err = graph_insert_vertex_unconfigured(inst_fmri, GVT_INST, 0, 3522 RERR_NONE, &v); 3523 if (err != 0) { 3524 assert(err == EINVAL); 3525 if (lock_graph) 3526 MUTEX_UNLOCK(&dgraph_lock); 3527 return (EINVAL); 3528 } 3529 } 3530 3531 err = configure_vertex(v, inst); 3532 3533 if (lock_graph) 3534 MUTEX_UNLOCK(&dgraph_lock); 3535 3536 return (err); 3537 } 3538 3539 /* 3540 * Locate the vertex for this property group's instance. If it doesn't exist 3541 * or is unconfigured, call dgraph_add_instance() & return. Otherwise fetch 3542 * the restarter for the instance, and if it has changed, send 3543 * _REMOVE_INSTANCE to the old restarter, remove the dependency, make sure the 3544 * new restarter has a vertex, add a new dependency, and send _ADD_INSTANCE to 3545 * the new restarter. Then fetch whether the instance should be enabled, and 3546 * if it is different from what we had, or if we changed the restarter, send 3547 * the appropriate _ENABLE or _DISABLE command. 3548 * 3549 * Returns 0 on success, ENOTSUP if the pg's parent is not an instance, 3550 * ECONNABORTED on repository disconnection, ECANCELED if the instance is 3551 * deleted, or -1 if the instance's general property group is deleted or if 3552 * its enabled property is misconfigured. 3553 */ 3554 static int 3555 dgraph_update_general(scf_propertygroup_t *pg) 3556 { 3557 scf_handle_t *h; 3558 scf_instance_t *inst; 3559 char *fmri; 3560 char *restarter_fmri; 3561 graph_vertex_t *v; 3562 int err; 3563 int enabled, enabled_ovr; 3564 int oldflags; 3565 3566 /* Find the vertex for this service */ 3567 h = scf_pg_handle(pg); 3568 3569 inst = safe_scf_instance_create(h); 3570 3571 if (scf_pg_get_parent_instance(pg, inst) != 0) { 3572 switch (scf_error()) { 3573 case SCF_ERROR_CONSTRAINT_VIOLATED: 3574 return (ENOTSUP); 3575 3576 case SCF_ERROR_CONNECTION_BROKEN: 3577 default: 3578 return (ECONNABORTED); 3579 3580 case SCF_ERROR_DELETED: 3581 return (0); 3582 3583 case SCF_ERROR_NOT_SET: 3584 bad_error("scf_pg_get_parent_instance", scf_error()); 3585 } 3586 } 3587 3588 err = libscf_instance_get_fmri(inst, &fmri); 3589 switch (err) { 3590 case 0: 3591 break; 3592 3593 case ECONNABORTED: 3594 scf_instance_destroy(inst); 3595 return (ECONNABORTED); 3596 3597 case ECANCELED: 3598 scf_instance_destroy(inst); 3599 return (0); 3600 3601 default: 3602 bad_error("libscf_instance_get_fmri", err); 3603 } 3604 3605 log_framework(LOG_DEBUG, 3606 "Graph engine: Reloading general properties for %s.\n", fmri); 3607 3608 MUTEX_LOCK(&dgraph_lock); 3609 3610 v = vertex_get_by_name(fmri); 3611 if (v == NULL || !(v->gv_flags & GV_CONFIGURED)) { 3612 /* Will get the up-to-date properties. */ 3613 MUTEX_UNLOCK(&dgraph_lock); 3614 err = dgraph_add_instance(fmri, inst, B_TRUE); 3615 startd_free(fmri, max_scf_fmri_size); 3616 scf_instance_destroy(inst); 3617 return (err == ECANCELED ? 0 : err); 3618 } 3619 3620 /* Read enabled & restarter from repository. */ 3621 restarter_fmri = startd_alloc(max_scf_value_size); 3622 err = libscf_get_basic_instance_data(h, inst, v->gv_name, &enabled, 3623 &enabled_ovr, &restarter_fmri); 3624 if (err != 0 || enabled == -1) { 3625 MUTEX_UNLOCK(&dgraph_lock); 3626 scf_instance_destroy(inst); 3627 startd_free(fmri, max_scf_fmri_size); 3628 3629 switch (err) { 3630 case ENOENT: 3631 case 0: 3632 startd_free(restarter_fmri, max_scf_value_size); 3633 return (-1); 3634 3635 case ECONNABORTED: 3636 case ECANCELED: 3637 startd_free(restarter_fmri, max_scf_value_size); 3638 return (err); 3639 3640 default: 3641 bad_error("libscf_get_basic_instance_data", err); 3642 } 3643 } 3644 3645 oldflags = v->gv_flags; 3646 v->gv_flags = (v->gv_flags & ~GV_ENBLD_NOOVR) | 3647 (enabled ? GV_ENBLD_NOOVR : 0); 3648 3649 if (enabled_ovr != -1) 3650 enabled = enabled_ovr; 3651 3652 /* 3653 * If GV_ENBLD_NOOVR has changed, then we need to re-evaluate the 3654 * subgraph. 3655 */ 3656 if (milestone > MILESTONE_NONE && v->gv_flags != oldflags) 3657 (void) eval_subgraph(v, h); 3658 3659 scf_instance_destroy(inst); 3660 3661 /* Ignore restarter change for now. */ 3662 3663 startd_free(restarter_fmri, max_scf_value_size); 3664 startd_free(fmri, max_scf_fmri_size); 3665 3666 /* 3667 * Always send _ENABLE or _DISABLE. We could avoid this if the 3668 * restarter didn't change and the enabled value didn't change, but 3669 * that's not easy to check and improbable anyway, so we'll just do 3670 * this. 3671 */ 3672 graph_enable_by_vertex(v, enabled, 1); 3673 3674 MUTEX_UNLOCK(&dgraph_lock); 3675 3676 return (0); 3677 } 3678 3679 /* 3680 * Delete all of the property group dependencies of v, update inst's running 3681 * snapshot, and add the dependencies in the new snapshot. If any of the new 3682 * dependencies would create a cycle, send _ADMIN_MAINT_ON. Otherwise 3683 * reevaluate v's dependencies, send _START or _STOP as appropriate, and do 3684 * the same for v's dependents. 3685 * 3686 * Returns 3687 * 0 - success 3688 * ECONNABORTED - repository connection broken 3689 * ECANCELED - inst was deleted 3690 * EINVAL - inst is invalid (e.g., missing general/enabled) 3691 * -1 - libscf_snapshots_refresh() failed 3692 */ 3693 static int 3694 dgraph_refresh_instance(graph_vertex_t *v, scf_instance_t *inst) 3695 { 3696 int r; 3697 int enabled; 3698 3699 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3700 assert(v->gv_type == GVT_INST); 3701 3702 /* Only refresh services with valid general/enabled properties. */ 3703 r = libscf_get_basic_instance_data(scf_instance_handle(inst), inst, 3704 v->gv_name, &enabled, NULL, NULL); 3705 switch (r) { 3706 case 0: 3707 break; 3708 3709 case ECONNABORTED: 3710 case ECANCELED: 3711 return (r); 3712 3713 case ENOENT: 3714 log_framework(LOG_DEBUG, 3715 "Ignoring %s because it has no general property group.\n", 3716 v->gv_name); 3717 return (EINVAL); 3718 3719 default: 3720 bad_error("libscf_get_basic_instance_data", r); 3721 } 3722 3723 if (enabled == -1) 3724 return (EINVAL); 3725 3726 r = libscf_snapshots_refresh(inst, v->gv_name); 3727 if (r != 0) { 3728 if (r != -1) 3729 bad_error("libscf_snapshots_refresh", r); 3730 3731 /* error logged */ 3732 return (r); 3733 } 3734 3735 r = refresh_vertex(v, inst); 3736 if (r != 0 && r != ECONNABORTED) 3737 bad_error("refresh_vertex", r); 3738 return (r); 3739 } 3740 3741 /* 3742 * Returns 1 if any instances which directly depend on the passed instance 3743 * (or it's service) are running. 3744 */ 3745 static int 3746 has_running_nonsubgraph_dependents(graph_vertex_t *v) 3747 { 3748 graph_vertex_t *vv; 3749 graph_edge_t *e; 3750 3751 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3752 3753 for (e = uu_list_first(v->gv_dependents); 3754 e != NULL; 3755 e = uu_list_next(v->gv_dependents, e)) { 3756 3757 vv = e->ge_vertex; 3758 if (vv->gv_type == GVT_INST) { 3759 if (inst_running(vv) && 3760 ((vv->gv_flags & GV_INSUBGRAPH) == 0)) 3761 return (1); 3762 } else { 3763 /* 3764 * For dependency group or service vertices, keep 3765 * traversing to see if instances are running. 3766 */ 3767 if (has_running_nonsubgraph_dependents(vv)) 3768 return (1); 3769 } 3770 } 3771 return (0); 3772 } 3773 3774 /* 3775 * For the dependency, disable the instance which makes up the dependency if 3776 * it is not in the subgraph and running. If the dependency instance is in 3777 * the subgraph or it is not running, continue by disabling all of it's 3778 * non-subgraph dependencies. 3779 */ 3780 static void 3781 disable_nonsubgraph_dependencies(graph_vertex_t *v, void *arg) 3782 { 3783 int r; 3784 scf_handle_t *h = (scf_handle_t *)arg; 3785 scf_instance_t *inst = NULL; 3786 3787 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3788 3789 /* Continue recursing non-inst nodes */ 3790 if (v->gv_type != GVT_INST) 3791 goto recurse; 3792 3793 /* 3794 * For instances that are in the subgraph or already not running, 3795 * skip and attempt to disable their non-dependencies. 3796 */ 3797 if ((v->gv_flags & GV_INSUBGRAPH) || (!inst_running(v))) 3798 goto recurse; 3799 3800 /* 3801 * If not all this instance's dependents have stopped 3802 * running, do not disable. 3803 */ 3804 if (has_running_nonsubgraph_dependents(v)) 3805 return; 3806 3807 inst = scf_instance_create(h); 3808 if (inst == NULL) { 3809 log_error(LOG_WARNING, "Unable to gracefully disable instance:" 3810 " %s due to lack of resources\n", v->gv_name); 3811 goto disable; 3812 } 3813 again: 3814 r = scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, inst, 3815 NULL, NULL, SCF_DECODE_FMRI_EXACT); 3816 if (r != 0) { 3817 switch (scf_error()) { 3818 case SCF_ERROR_CONNECTION_BROKEN: 3819 libscf_handle_rebind(h); 3820 goto again; 3821 3822 case SCF_ERROR_NOT_FOUND: 3823 goto recurse; 3824 3825 case SCF_ERROR_HANDLE_MISMATCH: 3826 case SCF_ERROR_INVALID_ARGUMENT: 3827 case SCF_ERROR_CONSTRAINT_VIOLATED: 3828 case SCF_ERROR_NOT_BOUND: 3829 default: 3830 bad_error("scf_handle_decode_fmri", 3831 scf_error()); 3832 } 3833 } 3834 r = libscf_set_enable_ovr(inst, 0); 3835 switch (r) { 3836 case 0: 3837 scf_instance_destroy(inst); 3838 return; 3839 case ECANCELED: 3840 scf_instance_destroy(inst); 3841 goto recurse; 3842 case ECONNABORTED: 3843 libscf_handle_rebind(h); 3844 goto again; 3845 case EPERM: 3846 case EROFS: 3847 log_error(LOG_WARNING, 3848 "Could not set %s/%s for %s: %s.\n", 3849 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 3850 v->gv_name, strerror(r)); 3851 goto disable; 3852 default: 3853 bad_error("libscf_set_enable_ovr", r); 3854 } 3855 disable: 3856 graph_enable_by_vertex(v, 0, 0); 3857 return; 3858 recurse: 3859 graph_walk_dependencies(v, disable_nonsubgraph_dependencies, 3860 arg); 3861 } 3862 3863 /* 3864 * Find the vertex for inst_name. If it doesn't exist, return ENOENT. 3865 * Otherwise set its state to state. If the instance has entered a state 3866 * which requires automatic action, take it (Uninitialized: do 3867 * dgraph_refresh_instance() without the snapshot update. Disabled: if the 3868 * instance should be enabled, send _ENABLE. Offline: if the instance should 3869 * be disabled, send _DISABLE, and if its dependencies are satisfied, send 3870 * _START. Online, Degraded: if the instance wasn't running, update its start 3871 * snapshot. Maintenance: no action.) 3872 * 3873 * Also fails with ECONNABORTED, or EINVAL if state is invalid. 3874 */ 3875 static int 3876 dgraph_set_instance_state(scf_handle_t *h, const char *inst_name, 3877 restarter_instance_state_t state, restarter_error_t serr) 3878 { 3879 graph_vertex_t *v; 3880 int err = 0, r; 3881 int was_running, up_or_down; 3882 restarter_instance_state_t old_state; 3883 3884 MUTEX_LOCK(&dgraph_lock); 3885 3886 v = vertex_get_by_name(inst_name); 3887 if (v == NULL) { 3888 MUTEX_UNLOCK(&dgraph_lock); 3889 return (ENOENT); 3890 } 3891 3892 switch (state) { 3893 case RESTARTER_STATE_UNINIT: 3894 case RESTARTER_STATE_DISABLED: 3895 case RESTARTER_STATE_OFFLINE: 3896 case RESTARTER_STATE_ONLINE: 3897 case RESTARTER_STATE_DEGRADED: 3898 case RESTARTER_STATE_MAINT: 3899 break; 3900 3901 default: 3902 MUTEX_UNLOCK(&dgraph_lock); 3903 return (EINVAL); 3904 } 3905 3906 log_framework(LOG_DEBUG, "Graph noting %s %s -> %s.\n", v->gv_name, 3907 instance_state_str[v->gv_state], instance_state_str[state]); 3908 3909 old_state = v->gv_state; 3910 was_running = inst_running(v); 3911 3912 v->gv_state = state; 3913 3914 up_or_down = was_running ^ inst_running(v); 3915 3916 if (up_or_down && milestone != NULL && !inst_running(v) && 3917 ((v->gv_flags & GV_INSUBGRAPH) == 0 || 3918 milestone == MILESTONE_NONE)) { 3919 --non_subgraph_svcs; 3920 if (non_subgraph_svcs == 0) { 3921 if (halting != -1) { 3922 do_uadmin(); 3923 } else if (go_single_user_mode || go_to_level1) { 3924 (void) startd_thread_create(single_user_thread, 3925 NULL); 3926 } 3927 } else { 3928 graph_walk_dependencies(v, 3929 disable_nonsubgraph_dependencies, (void *)h); 3930 } 3931 } 3932 3933 switch (state) { 3934 case RESTARTER_STATE_UNINIT: { 3935 scf_instance_t *inst; 3936 3937 /* Initialize instance by refreshing it. */ 3938 3939 err = libscf_fmri_get_instance(h, v->gv_name, &inst); 3940 switch (err) { 3941 case 0: 3942 break; 3943 3944 case ECONNABORTED: 3945 MUTEX_UNLOCK(&dgraph_lock); 3946 return (ECONNABORTED); 3947 3948 case ENOENT: 3949 MUTEX_UNLOCK(&dgraph_lock); 3950 return (0); 3951 3952 case EINVAL: 3953 case ENOTSUP: 3954 default: 3955 bad_error("libscf_fmri_get_instance", err); 3956 } 3957 3958 err = refresh_vertex(v, inst); 3959 if (err == 0) 3960 graph_enable_by_vertex(v, v->gv_flags & GV_ENABLED, 0); 3961 3962 scf_instance_destroy(inst); 3963 break; 3964 } 3965 3966 case RESTARTER_STATE_DISABLED: 3967 /* 3968 * If the instance should be disabled, no problem. Otherwise, 3969 * send an enable command, which should result in the instance 3970 * moving to OFFLINE. 3971 */ 3972 if (v->gv_flags & GV_ENABLED) { 3973 vertex_send_event(v, RESTARTER_EVENT_TYPE_ENABLE); 3974 } else if (was_running && v->gv_post_disable_f) { 3975 v->gv_post_disable_f(); 3976 } 3977 break; 3978 3979 case RESTARTER_STATE_OFFLINE: 3980 /* 3981 * If the instance should be enabled, see if we can start it. 3982 * Otherwise send a disable command. 3983 */ 3984 if (v->gv_flags & GV_ENABLED) { 3985 if (instance_satisfied(v, B_FALSE) == 1) { 3986 if (v->gv_start_f == NULL) { 3987 vertex_send_event(v, 3988 RESTARTER_EVENT_TYPE_START); 3989 } else { 3990 v->gv_start_f(v); 3991 } 3992 } else { 3993 log_framework(LOG_DEBUG, 3994 "Dependencies of %s not satisfied, " 3995 "not starting.\n", v->gv_name); 3996 } 3997 } else { 3998 if (was_running && v->gv_post_disable_f) 3999 v->gv_post_disable_f(); 4000 vertex_send_event(v, RESTARTER_EVENT_TYPE_DISABLE); 4001 } 4002 break; 4003 4004 case RESTARTER_STATE_ONLINE: 4005 case RESTARTER_STATE_DEGRADED: 4006 /* 4007 * If the instance has just come up, update the start 4008 * snapshot. 4009 */ 4010 if (!was_running) { 4011 /* 4012 * Don't fire if we're just recovering state 4013 * after a restart. 4014 */ 4015 if (old_state != RESTARTER_STATE_UNINIT && 4016 v->gv_post_online_f) 4017 v->gv_post_online_f(); 4018 4019 r = libscf_snapshots_poststart(h, v->gv_name, B_TRUE); 4020 switch (r) { 4021 case 0: 4022 case ENOENT: 4023 /* 4024 * If ENOENT, the instance must have been 4025 * deleted. Pretend we were successful since 4026 * we should get a delete event later. 4027 */ 4028 break; 4029 4030 case ECONNABORTED: 4031 MUTEX_UNLOCK(&dgraph_lock); 4032 return (ECONNABORTED); 4033 4034 case EACCES: 4035 case ENOTSUP: 4036 default: 4037 bad_error("libscf_snapshots_poststart", r); 4038 } 4039 } 4040 if (!(v->gv_flags & GV_ENABLED)) 4041 vertex_send_event(v, RESTARTER_EVENT_TYPE_DISABLE); 4042 break; 4043 4044 case RESTARTER_STATE_MAINT: 4045 /* No action. */ 4046 break; 4047 4048 default: 4049 /* Should have been caught above. */ 4050 #ifndef NDEBUG 4051 uu_warn("%s:%d: Uncaught case %d.\n", __FILE__, __LINE__, 4052 state); 4053 #endif 4054 abort(); 4055 } 4056 4057 /* 4058 * If the service came up or went down, propagate the event. We must 4059 * treat offline -> disabled as a start since it can satisfy 4060 * optional_all dependencies. And we must treat !running -> maintenance 4061 * as a start because maintenance satisfies optional and exclusion 4062 * dependencies. 4063 */ 4064 if (inst_running(v)) { 4065 if (!was_running) { 4066 log_framework(LOG_DEBUG, "Propagating start of %s.\n", 4067 v->gv_name); 4068 4069 graph_walk_dependents(v, propagate_start, NULL); 4070 } else if (serr == RERR_REFRESH) { 4071 /* For refresh we'll get a message sans state change */ 4072 4073 log_framework(LOG_DEBUG, "Propagating refresh of %s.\n", 4074 v->gv_name); 4075 4076 graph_walk_dependents(v, propagate_stop, (void *)serr); 4077 } 4078 } else if (was_running) { 4079 log_framework(LOG_DEBUG, "Propagating stop of %s.\n", 4080 v->gv_name); 4081 4082 graph_walk_dependents(v, propagate_stop, (void *)serr); 4083 } else if (v->gv_state == RESTARTER_STATE_DISABLED) { 4084 log_framework(LOG_DEBUG, "Propagating disable of %s.\n", 4085 v->gv_name); 4086 4087 graph_walk_dependents(v, propagate_start, NULL); 4088 propagate_satbility(v); 4089 } else if (v->gv_state == RESTARTER_STATE_MAINT) { 4090 log_framework(LOG_DEBUG, "Propagating maintenance of %s.\n", 4091 v->gv_name); 4092 4093 graph_walk_dependents(v, propagate_start, NULL); 4094 propagate_satbility(v); 4095 } 4096 4097 if (state != old_state && st->st_load_complete && 4098 !go_single_user_mode && !go_to_level1 && 4099 halting == -1) { 4100 if (!can_come_up() && !sulogin_thread_running) { 4101 (void) startd_thread_create(sulogin_thread, NULL); 4102 sulogin_thread_running = B_TRUE; 4103 } 4104 } 4105 4106 MUTEX_UNLOCK(&dgraph_lock); 4107 4108 return (err); 4109 } 4110 4111 4112 static void 4113 remove_inst_vertex(graph_vertex_t *v) 4114 { 4115 graph_edge_t *e; 4116 graph_vertex_t *sv; 4117 int i; 4118 4119 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4120 assert(uu_list_numnodes(v->gv_dependents) == 1); 4121 4122 e = uu_list_first(v->gv_dependents); 4123 sv = e->ge_vertex; 4124 graph_remove_edge(sv, v); 4125 4126 for (i = 0; up_svcs[i] != NULL; ++i) { 4127 if (up_svcs_p[i] == v) 4128 up_svcs_p[i] = NULL; 4129 } 4130 4131 if (manifest_import_p == v) 4132 manifest_import_p = NULL; 4133 4134 graph_remove_vertex(v); 4135 4136 if (uu_list_numnodes(sv->gv_dependencies) == 0 && 4137 uu_list_numnodes(sv->gv_dependents) == 0) 4138 graph_remove_vertex(sv); 4139 } 4140 4141 /* 4142 * If a vertex for fmri exists and it is enabled, send _DISABLE to the 4143 * restarter. If it is running, send _STOP. Send _REMOVE_INSTANCE. Delete 4144 * all property group dependencies, and the dependency on the restarter, 4145 * disposing of vertices as appropriate. If other vertices depend on this 4146 * one, mark it unconfigured and return. Otherwise remove the vertex. Always 4147 * returns 0. 4148 */ 4149 static int 4150 dgraph_remove_instance(const char *fmri, scf_handle_t *h) 4151 { 4152 graph_vertex_t *v; 4153 graph_edge_t *e; 4154 uu_list_t *old_deps; 4155 int err; 4156 4157 log_framework(LOG_DEBUG, "Graph engine: Removing %s.\n", fmri); 4158 4159 MUTEX_LOCK(&dgraph_lock); 4160 4161 v = vertex_get_by_name(fmri); 4162 if (v == NULL) { 4163 MUTEX_UNLOCK(&dgraph_lock); 4164 return (0); 4165 } 4166 4167 /* Send restarter delete event. */ 4168 if (v->gv_flags & GV_CONFIGURED) 4169 graph_unset_restarter(v); 4170 4171 if (milestone > MILESTONE_NONE) { 4172 /* 4173 * Make a list of v's current dependencies so we can 4174 * reevaluate their GV_INSUBGRAPH flags after the dependencies 4175 * are removed. 4176 */ 4177 old_deps = startd_list_create(graph_edge_pool, NULL, 0); 4178 4179 err = uu_list_walk(v->gv_dependencies, 4180 (uu_walk_fn_t *)append_insts, old_deps, 0); 4181 assert(err == 0); 4182 } 4183 4184 delete_instance_dependencies(v, B_TRUE); 4185 4186 /* 4187 * Deleting an instance can both satisfy and unsatisfy dependencies, 4188 * depending on their type. First propagate the stop as a RERR_RESTART 4189 * event -- deletion isn't a fault, just a normal stop. This gives 4190 * dependent services the chance to do a clean shutdown. Then, mark 4191 * the service as unconfigured and propagate the start event for the 4192 * optional_all dependencies that might have become satisfied. 4193 */ 4194 graph_walk_dependents(v, propagate_stop, (void *)RERR_RESTART); 4195 4196 v->gv_flags &= ~GV_CONFIGURED; 4197 4198 graph_walk_dependents(v, propagate_start, NULL); 4199 propagate_satbility(v); 4200 4201 /* 4202 * If there are no (non-service) dependents, the vertex can be 4203 * completely removed. 4204 */ 4205 if (v != milestone && uu_list_numnodes(v->gv_dependents) == 1) 4206 remove_inst_vertex(v); 4207 4208 if (milestone > MILESTONE_NONE) { 4209 void *cookie = NULL; 4210 4211 while ((e = uu_list_teardown(old_deps, &cookie)) != NULL) { 4212 while (eval_subgraph(e->ge_vertex, h) == ECONNABORTED) 4213 libscf_handle_rebind(h); 4214 4215 startd_free(e, sizeof (*e)); 4216 } 4217 4218 uu_list_destroy(old_deps); 4219 } 4220 4221 MUTEX_UNLOCK(&dgraph_lock); 4222 4223 return (0); 4224 } 4225 4226 /* 4227 * Return the eventual (maybe current) milestone in the form of a 4228 * legacy runlevel. 4229 */ 4230 static char 4231 target_milestone_as_runlevel() 4232 { 4233 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4234 4235 if (milestone == NULL) 4236 return ('3'); 4237 else if (milestone == MILESTONE_NONE) 4238 return ('0'); 4239 4240 if (strcmp(milestone->gv_name, multi_user_fmri) == 0) 4241 return ('2'); 4242 else if (strcmp(milestone->gv_name, single_user_fmri) == 0) 4243 return ('S'); 4244 else if (strcmp(milestone->gv_name, multi_user_svr_fmri) == 0) 4245 return ('3'); 4246 4247 #ifndef NDEBUG 4248 (void) fprintf(stderr, "%s:%d: Unknown milestone name \"%s\".\n", 4249 __FILE__, __LINE__, milestone->gv_name); 4250 #endif 4251 abort(); 4252 /* NOTREACHED */ 4253 } 4254 4255 static struct { 4256 char rl; 4257 int sig; 4258 } init_sigs[] = { 4259 { 'S', SIGBUS }, 4260 { '0', SIGINT }, 4261 { '1', SIGQUIT }, 4262 { '2', SIGILL }, 4263 { '3', SIGTRAP }, 4264 { '4', SIGIOT }, 4265 { '5', SIGEMT }, 4266 { '6', SIGFPE }, 4267 { 0, 0 } 4268 }; 4269 4270 static void 4271 signal_init(char rl) 4272 { 4273 pid_t init_pid; 4274 int i; 4275 4276 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4277 4278 if (zone_getattr(getzoneid(), ZONE_ATTR_INITPID, &init_pid, 4279 sizeof (init_pid)) != sizeof (init_pid)) { 4280 log_error(LOG_NOTICE, "Could not get pid to signal init.\n"); 4281 return; 4282 } 4283 4284 for (i = 0; init_sigs[i].rl != 0; ++i) 4285 if (init_sigs[i].rl == rl) 4286 break; 4287 4288 if (init_sigs[i].rl != 0) { 4289 if (kill(init_pid, init_sigs[i].sig) != 0) { 4290 switch (errno) { 4291 case EPERM: 4292 case ESRCH: 4293 log_error(LOG_NOTICE, "Could not signal init: " 4294 "%s.\n", strerror(errno)); 4295 break; 4296 4297 case EINVAL: 4298 default: 4299 bad_error("kill", errno); 4300 } 4301 } 4302 } 4303 } 4304 4305 /* 4306 * This is called when one of the major milestones changes state, or when 4307 * init is signalled and tells us it was told to change runlevel. We wait 4308 * to reach the milestone because this allows /etc/inittab entries to retain 4309 * some boot ordering: historically, entries could place themselves before/after 4310 * the running of /sbin/rcX scripts but we can no longer make the 4311 * distinction because the /sbin/rcX scripts no longer exist as punctuation 4312 * marks in /etc/inittab. 4313 * 4314 * Also, we only trigger an update when we reach the eventual target 4315 * milestone: without this, an /etc/inittab entry marked only for 4316 * runlevel 2 would be executed for runlevel 3, which is not how 4317 * /etc/inittab entries work. 4318 * 4319 * If we're single user coming online, then we set utmpx to the target 4320 * runlevel so that legacy scripts can work as expected. 4321 */ 4322 static void 4323 graph_runlevel_changed(char rl, int online) 4324 { 4325 char trl; 4326 4327 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4328 4329 trl = target_milestone_as_runlevel(); 4330 4331 if (online) { 4332 if (rl == trl) { 4333 current_runlevel = trl; 4334 signal_init(trl); 4335 } else if (rl == 'S') { 4336 /* 4337 * At boot, set the entry early for the benefit of the 4338 * legacy init scripts. 4339 */ 4340 utmpx_set_runlevel(trl, 'S', B_FALSE); 4341 } 4342 } else { 4343 if (rl == '3' && trl == '2') { 4344 current_runlevel = trl; 4345 signal_init(trl); 4346 } else if (rl == '2' && trl == 'S') { 4347 current_runlevel = trl; 4348 signal_init(trl); 4349 } 4350 } 4351 } 4352 4353 /* 4354 * Move to a backwards-compatible runlevel by executing the appropriate 4355 * /etc/rc?.d/K* scripts and/or setting the milestone. 4356 * 4357 * Returns 4358 * 0 - success 4359 * ECONNRESET - success, but handle was reset 4360 * ECONNABORTED - repository connection broken 4361 * ECANCELED - pg was deleted 4362 */ 4363 static int 4364 dgraph_set_runlevel(scf_propertygroup_t *pg, scf_property_t *prop) 4365 { 4366 char rl; 4367 scf_handle_t *h; 4368 int r; 4369 const char *ms = NULL; /* what to commit as options/milestone */ 4370 boolean_t rebound = B_FALSE; 4371 int mark_rl = 0; 4372 4373 const char * const stop = "stop"; 4374 4375 r = libscf_extract_runlevel(prop, &rl); 4376 switch (r) { 4377 case 0: 4378 break; 4379 4380 case ECONNABORTED: 4381 case ECANCELED: 4382 return (r); 4383 4384 case EINVAL: 4385 case ENOENT: 4386 log_error(LOG_WARNING, "runlevel property is misconfigured; " 4387 "ignoring.\n"); 4388 /* delete the bad property */ 4389 goto nolock_out; 4390 4391 default: 4392 bad_error("libscf_extract_runlevel", r); 4393 } 4394 4395 switch (rl) { 4396 case 's': 4397 rl = 'S'; 4398 /* FALLTHROUGH */ 4399 4400 case 'S': 4401 case '2': 4402 case '3': 4403 /* 4404 * These cases cause a milestone change, so 4405 * graph_runlevel_changed() will eventually deal with 4406 * signalling init. 4407 */ 4408 break; 4409 4410 case '0': 4411 case '1': 4412 case '4': 4413 case '5': 4414 case '6': 4415 mark_rl = 1; 4416 break; 4417 4418 default: 4419 log_framework(LOG_NOTICE, "Unknown runlevel '%c'.\n", rl); 4420 ms = NULL; 4421 goto nolock_out; 4422 } 4423 4424 h = scf_pg_handle(pg); 4425 4426 MUTEX_LOCK(&dgraph_lock); 4427 4428 /* 4429 * Since this triggers no milestone changes, force it by hand. 4430 */ 4431 if (current_runlevel == '4' && rl == '3') 4432 mark_rl = 1; 4433 4434 /* 4435 * 1. If we are here after an "init X": 4436 * 4437 * init X 4438 * init/lscf_set_runlevel() 4439 * process_pg_event() 4440 * dgraph_set_runlevel() 4441 * 4442 * then we haven't passed through graph_runlevel_changed() yet, 4443 * therefore 'current_runlevel' has not changed for sure but 'rl' has. 4444 * In consequence, if 'rl' is lower than 'current_runlevel', we change 4445 * the system runlevel and execute the appropriate /etc/rc?.d/K* scripts 4446 * past this test. 4447 * 4448 * 2. On the other hand, if we are here after a "svcadm milestone": 4449 * 4450 * svcadm milestone X 4451 * dgraph_set_milestone() 4452 * handle_graph_update_event() 4453 * dgraph_set_instance_state() 4454 * graph_post_X_[online|offline]() 4455 * graph_runlevel_changed() 4456 * signal_init() 4457 * init/lscf_set_runlevel() 4458 * process_pg_event() 4459 * dgraph_set_runlevel() 4460 * 4461 * then we already passed through graph_runlevel_changed() (by the way 4462 * of dgraph_set_milestone()) and 'current_runlevel' may have changed 4463 * and already be equal to 'rl' so we are going to return immediately 4464 * from dgraph_set_runlevel() without changing the system runlevel and 4465 * without executing the /etc/rc?.d/K* scripts. 4466 */ 4467 if (rl == current_runlevel) { 4468 ms = NULL; 4469 goto out; 4470 } 4471 4472 log_framework(LOG_DEBUG, "Changing to runlevel '%c'.\n", rl); 4473 4474 /* 4475 * Make sure stop rc scripts see the new settings via who -r. 4476 */ 4477 utmpx_set_runlevel(rl, current_runlevel, B_TRUE); 4478 4479 /* 4480 * Some run levels don't have a direct correspondence to any 4481 * milestones, so we have to signal init directly. 4482 */ 4483 if (mark_rl) { 4484 current_runlevel = rl; 4485 signal_init(rl); 4486 } 4487 4488 switch (rl) { 4489 case 'S': 4490 uu_warn("The system is coming down for administration. " 4491 "Please wait.\n"); 4492 fork_rc_script(rl, stop, B_FALSE); 4493 ms = single_user_fmri; 4494 go_single_user_mode = B_TRUE; 4495 break; 4496 4497 case '0': 4498 fork_rc_script(rl, stop, B_TRUE); 4499 halting = AD_HALT; 4500 goto uadmin; 4501 4502 case '5': 4503 fork_rc_script(rl, stop, B_TRUE); 4504 halting = AD_POWEROFF; 4505 goto uadmin; 4506 4507 case '6': 4508 fork_rc_script(rl, stop, B_TRUE); 4509 halting = AD_BOOT; 4510 goto uadmin; 4511 4512 uadmin: 4513 uu_warn("The system is coming down. Please wait.\n"); 4514 ms = "none"; 4515 4516 /* 4517 * We can't wait until all services are offline since this 4518 * thread is responsible for taking them offline. Instead we 4519 * set halting to the second argument for uadmin() and call 4520 * do_uadmin() from dgraph_set_instance_state() when 4521 * appropriate. 4522 */ 4523 break; 4524 4525 case '1': 4526 if (current_runlevel != 'S') { 4527 uu_warn("Changing to state 1.\n"); 4528 fork_rc_script(rl, stop, B_FALSE); 4529 } else { 4530 uu_warn("The system is coming up for administration. " 4531 "Please wait.\n"); 4532 } 4533 ms = single_user_fmri; 4534 go_to_level1 = B_TRUE; 4535 break; 4536 4537 case '2': 4538 if (current_runlevel == '3' || current_runlevel == '4') 4539 fork_rc_script(rl, stop, B_FALSE); 4540 ms = multi_user_fmri; 4541 break; 4542 4543 case '3': 4544 case '4': 4545 ms = "all"; 4546 break; 4547 4548 default: 4549 #ifndef NDEBUG 4550 (void) fprintf(stderr, "%s:%d: Uncaught case %d ('%c').\n", 4551 __FILE__, __LINE__, rl, rl); 4552 #endif 4553 abort(); 4554 } 4555 4556 out: 4557 MUTEX_UNLOCK(&dgraph_lock); 4558 4559 nolock_out: 4560 switch (r = libscf_clear_runlevel(pg, ms)) { 4561 case 0: 4562 break; 4563 4564 case ECONNABORTED: 4565 libscf_handle_rebind(h); 4566 rebound = B_TRUE; 4567 goto nolock_out; 4568 4569 case ECANCELED: 4570 break; 4571 4572 case EPERM: 4573 case EACCES: 4574 case EROFS: 4575 log_error(LOG_NOTICE, "Could not delete \"%s/%s\" property: " 4576 "%s.\n", SCF_PG_OPTIONS, "runlevel", strerror(r)); 4577 break; 4578 4579 default: 4580 bad_error("libscf_clear_runlevel", r); 4581 } 4582 4583 return (rebound ? ECONNRESET : 0); 4584 } 4585 4586 static int 4587 mark_subgraph(graph_edge_t *e, void *arg) 4588 { 4589 graph_vertex_t *v; 4590 int r; 4591 int optional = (int)arg; 4592 4593 v = e->ge_vertex; 4594 4595 /* If it's already in the subgraph, skip. */ 4596 if (v->gv_flags & GV_INSUBGRAPH) 4597 return (UU_WALK_NEXT); 4598 4599 /* 4600 * Keep track if walk has entered an optional dependency group 4601 */ 4602 if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_OPTIONAL_ALL) { 4603 optional = 1; 4604 } 4605 /* 4606 * Quit if we are in an optional dependency group and the instance 4607 * is disabled 4608 */ 4609 if (optional && (v->gv_type == GVT_INST) && 4610 (!(v->gv_flags & GV_ENBLD_NOOVR))) 4611 return (UU_WALK_NEXT); 4612 4613 v->gv_flags |= GV_INSUBGRAPH; 4614 4615 /* Skip all excluded dependencies. */ 4616 if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL) 4617 return (UU_WALK_NEXT); 4618 4619 r = uu_list_walk(v->gv_dependencies, (uu_walk_fn_t *)mark_subgraph, 4620 (void *)optional, 0); 4621 assert(r == 0); 4622 return (UU_WALK_NEXT); 4623 } 4624 4625 /* 4626 * "Restrict" the graph to dependencies of fmri. We implement it by walking 4627 * all services, override-disabling those which are not descendents of the 4628 * instance, and removing any enable-override for the rest. milestone is set 4629 * to the vertex which represents fmri so that the other graph operations may 4630 * act appropriately. 4631 * 4632 * If norepository is true, the function will not change the repository. 4633 * 4634 * The decision to change the system run level in accordance with the milestone 4635 * is taken in dgraph_set_runlevel(). 4636 * 4637 * Returns 4638 * 0 - success 4639 * ECONNRESET - success, but handle was rebound 4640 * EINVAL - fmri is invalid (error is logged) 4641 * EALREADY - the milestone is already set to fmri 4642 * ENOENT - a configured vertex does not exist for fmri (an error is logged) 4643 */ 4644 static int 4645 dgraph_set_milestone(const char *fmri, scf_handle_t *h, boolean_t norepository) 4646 { 4647 const char *cfmri, *fs; 4648 graph_vertex_t *nm, *v; 4649 int ret = 0, r; 4650 scf_instance_t *inst; 4651 boolean_t isall, isnone, rebound = B_FALSE; 4652 4653 /* Validate fmri */ 4654 isall = (strcmp(fmri, "all") == 0); 4655 isnone = (strcmp(fmri, "none") == 0); 4656 4657 if (!isall && !isnone) { 4658 if (fmri_canonify(fmri, (char **)&cfmri, B_FALSE) == EINVAL) 4659 goto reject; 4660 4661 if (strcmp(cfmri, single_user_fmri) != 0 && 4662 strcmp(cfmri, multi_user_fmri) != 0 && 4663 strcmp(cfmri, multi_user_svr_fmri) != 0) { 4664 startd_free((void *)cfmri, max_scf_fmri_size); 4665 reject: 4666 log_framework(LOG_WARNING, 4667 "Rejecting request for invalid milestone \"%s\".\n", 4668 fmri); 4669 return (EINVAL); 4670 } 4671 } 4672 4673 inst = safe_scf_instance_create(h); 4674 4675 MUTEX_LOCK(&dgraph_lock); 4676 4677 if (milestone == NULL) { 4678 if (isall) { 4679 log_framework(LOG_DEBUG, 4680 "Milestone already set to all.\n"); 4681 ret = EALREADY; 4682 goto out; 4683 } 4684 } else if (milestone == MILESTONE_NONE) { 4685 if (isnone) { 4686 log_framework(LOG_DEBUG, 4687 "Milestone already set to none.\n"); 4688 ret = EALREADY; 4689 goto out; 4690 } 4691 } else { 4692 if (!isall && !isnone && 4693 strcmp(cfmri, milestone->gv_name) == 0) { 4694 log_framework(LOG_DEBUG, 4695 "Milestone already set to %s.\n", cfmri); 4696 ret = EALREADY; 4697 goto out; 4698 } 4699 } 4700 4701 if (!isall && !isnone) { 4702 nm = vertex_get_by_name(cfmri); 4703 if (nm == NULL || !(nm->gv_flags & GV_CONFIGURED)) { 4704 log_framework(LOG_WARNING, "Cannot set milestone to %s " 4705 "because no such service exists.\n", cfmri); 4706 ret = ENOENT; 4707 goto out; 4708 } 4709 } 4710 4711 log_framework(LOG_DEBUG, "Changing milestone to %s.\n", fmri); 4712 4713 /* 4714 * Set milestone, removing the old one if this was the last reference. 4715 */ 4716 if (milestone > MILESTONE_NONE && 4717 (milestone->gv_flags & GV_CONFIGURED) == 0) 4718 remove_inst_vertex(milestone); 4719 4720 if (isall) 4721 milestone = NULL; 4722 else if (isnone) 4723 milestone = MILESTONE_NONE; 4724 else 4725 milestone = nm; 4726 4727 /* Clear all GV_INSUBGRAPH bits. */ 4728 for (v = uu_list_first(dgraph); v != NULL; v = uu_list_next(dgraph, v)) 4729 v->gv_flags &= ~GV_INSUBGRAPH; 4730 4731 if (!isall && !isnone) { 4732 /* Set GV_INSUBGRAPH for milestone & descendents. */ 4733 milestone->gv_flags |= GV_INSUBGRAPH; 4734 4735 r = uu_list_walk(milestone->gv_dependencies, 4736 (uu_walk_fn_t *)mark_subgraph, NULL, 0); 4737 assert(r == 0); 4738 } 4739 4740 /* Un-override services in the subgraph & override-disable the rest. */ 4741 if (norepository) 4742 goto out; 4743 4744 non_subgraph_svcs = 0; 4745 for (v = uu_list_first(dgraph); 4746 v != NULL; 4747 v = uu_list_next(dgraph, v)) { 4748 if (v->gv_type != GVT_INST || 4749 (v->gv_flags & GV_CONFIGURED) == 0) 4750 continue; 4751 4752 again: 4753 r = scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, inst, 4754 NULL, NULL, SCF_DECODE_FMRI_EXACT); 4755 if (r != 0) { 4756 switch (scf_error()) { 4757 case SCF_ERROR_CONNECTION_BROKEN: 4758 default: 4759 libscf_handle_rebind(h); 4760 rebound = B_TRUE; 4761 goto again; 4762 4763 case SCF_ERROR_NOT_FOUND: 4764 continue; 4765 4766 case SCF_ERROR_HANDLE_MISMATCH: 4767 case SCF_ERROR_INVALID_ARGUMENT: 4768 case SCF_ERROR_CONSTRAINT_VIOLATED: 4769 case SCF_ERROR_NOT_BOUND: 4770 bad_error("scf_handle_decode_fmri", 4771 scf_error()); 4772 } 4773 } 4774 4775 if (isall || (v->gv_flags & GV_INSUBGRAPH)) { 4776 r = libscf_delete_enable_ovr(inst); 4777 fs = "libscf_delete_enable_ovr"; 4778 } else { 4779 assert(isnone || (v->gv_flags & GV_INSUBGRAPH) == 0); 4780 4781 if (inst_running(v)) 4782 ++non_subgraph_svcs; 4783 4784 if (has_running_nonsubgraph_dependents(v)) 4785 continue; 4786 4787 r = libscf_set_enable_ovr(inst, 0); 4788 fs = "libscf_set_enable_ovr"; 4789 } 4790 switch (r) { 4791 case 0: 4792 case ECANCELED: 4793 break; 4794 4795 case ECONNABORTED: 4796 libscf_handle_rebind(h); 4797 rebound = B_TRUE; 4798 goto again; 4799 4800 case EPERM: 4801 case EROFS: 4802 log_error(LOG_WARNING, 4803 "Could not set %s/%s for %s: %s.\n", 4804 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 4805 v->gv_name, strerror(r)); 4806 break; 4807 4808 default: 4809 bad_error(fs, r); 4810 } 4811 } 4812 4813 if (halting != -1) { 4814 if (non_subgraph_svcs > 1) 4815 uu_warn("%d system services are now being stopped.\n", 4816 non_subgraph_svcs); 4817 else if (non_subgraph_svcs == 1) 4818 uu_warn("One system service is now being stopped.\n"); 4819 else if (non_subgraph_svcs == 0) 4820 do_uadmin(); 4821 } 4822 4823 ret = rebound ? ECONNRESET : 0; 4824 4825 out: 4826 MUTEX_UNLOCK(&dgraph_lock); 4827 if (!isall && !isnone) 4828 startd_free((void *)cfmri, max_scf_fmri_size); 4829 scf_instance_destroy(inst); 4830 return (ret); 4831 } 4832 4833 4834 /* 4835 * Returns 0, ECONNABORTED, or EINVAL. 4836 */ 4837 static int 4838 handle_graph_update_event(scf_handle_t *h, graph_protocol_event_t *e) 4839 { 4840 int r; 4841 4842 switch (e->gpe_type) { 4843 case GRAPH_UPDATE_RELOAD_GRAPH: 4844 log_error(LOG_WARNING, 4845 "graph_event: reload graph unimplemented\n"); 4846 break; 4847 4848 case GRAPH_UPDATE_STATE_CHANGE: { 4849 protocol_states_t *states = e->gpe_data; 4850 4851 switch (r = dgraph_set_instance_state(h, e->gpe_inst, 4852 states->ps_state, states->ps_err)) { 4853 case 0: 4854 case ENOENT: 4855 break; 4856 4857 case ECONNABORTED: 4858 return (ECONNABORTED); 4859 4860 case EINVAL: 4861 default: 4862 #ifndef NDEBUG 4863 (void) fprintf(stderr, "dgraph_set_instance_state() " 4864 "failed with unexpected error %d at %s:%d.\n", r, 4865 __FILE__, __LINE__); 4866 #endif 4867 abort(); 4868 } 4869 4870 startd_free(states, sizeof (protocol_states_t)); 4871 break; 4872 } 4873 4874 default: 4875 log_error(LOG_WARNING, 4876 "graph_event_loop received an unknown event: %d\n", 4877 e->gpe_type); 4878 break; 4879 } 4880 4881 return (0); 4882 } 4883 4884 /* 4885 * graph_event_thread() 4886 * Wait for state changes from the restarters. 4887 */ 4888 /*ARGSUSED*/ 4889 void * 4890 graph_event_thread(void *unused) 4891 { 4892 scf_handle_t *h; 4893 int err; 4894 4895 h = libscf_handle_create_bound_loop(); 4896 4897 /*CONSTCOND*/ 4898 while (1) { 4899 graph_protocol_event_t *e; 4900 4901 MUTEX_LOCK(&gu->gu_lock); 4902 4903 while (gu->gu_wakeup == 0) 4904 (void) pthread_cond_wait(&gu->gu_cv, &gu->gu_lock); 4905 4906 gu->gu_wakeup = 0; 4907 4908 while ((e = graph_event_dequeue()) != NULL) { 4909 MUTEX_LOCK(&e->gpe_lock); 4910 MUTEX_UNLOCK(&gu->gu_lock); 4911 4912 while ((err = handle_graph_update_event(h, e)) == 4913 ECONNABORTED) 4914 libscf_handle_rebind(h); 4915 4916 if (err == 0) 4917 graph_event_release(e); 4918 else 4919 graph_event_requeue(e); 4920 4921 MUTEX_LOCK(&gu->gu_lock); 4922 } 4923 4924 MUTEX_UNLOCK(&gu->gu_lock); 4925 } 4926 4927 /* 4928 * Unreachable for now -- there's currently no graceful cleanup 4929 * called on exit(). 4930 */ 4931 MUTEX_UNLOCK(&gu->gu_lock); 4932 scf_handle_destroy(h); 4933 return (NULL); 4934 } 4935 4936 static void 4937 set_initial_milestone(scf_handle_t *h) 4938 { 4939 scf_instance_t *inst; 4940 char *fmri, *cfmri; 4941 size_t sz; 4942 int r; 4943 4944 inst = safe_scf_instance_create(h); 4945 fmri = startd_alloc(max_scf_fmri_size); 4946 4947 /* 4948 * If -m milestone= was specified, we want to set options_ovr/milestone 4949 * to it. Otherwise we want to read what the milestone should be set 4950 * to. Either way we need our inst. 4951 */ 4952 get_self: 4953 if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, inst, 4954 NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) { 4955 switch (scf_error()) { 4956 case SCF_ERROR_CONNECTION_BROKEN: 4957 libscf_handle_rebind(h); 4958 goto get_self; 4959 4960 case SCF_ERROR_NOT_FOUND: 4961 if (st->st_subgraph != NULL && 4962 st->st_subgraph[0] != '\0') { 4963 sz = strlcpy(fmri, st->st_subgraph, 4964 max_scf_fmri_size); 4965 assert(sz < max_scf_fmri_size); 4966 } else { 4967 fmri[0] = '\0'; 4968 } 4969 break; 4970 4971 case SCF_ERROR_INVALID_ARGUMENT: 4972 case SCF_ERROR_CONSTRAINT_VIOLATED: 4973 case SCF_ERROR_HANDLE_MISMATCH: 4974 default: 4975 bad_error("scf_handle_decode_fmri", scf_error()); 4976 } 4977 } else { 4978 if (st->st_subgraph != NULL && st->st_subgraph[0] != '\0') { 4979 scf_propertygroup_t *pg; 4980 4981 pg = safe_scf_pg_create(h); 4982 4983 sz = strlcpy(fmri, st->st_subgraph, max_scf_fmri_size); 4984 assert(sz < max_scf_fmri_size); 4985 4986 r = libscf_inst_get_or_add_pg(inst, SCF_PG_OPTIONS_OVR, 4987 SCF_PG_OPTIONS_OVR_TYPE, SCF_PG_OPTIONS_OVR_FLAGS, 4988 pg); 4989 switch (r) { 4990 case 0: 4991 break; 4992 4993 case ECONNABORTED: 4994 libscf_handle_rebind(h); 4995 goto get_self; 4996 4997 case EPERM: 4998 case EACCES: 4999 case EROFS: 5000 log_error(LOG_WARNING, "Could not set %s/%s: " 5001 "%s.\n", SCF_PG_OPTIONS_OVR, 5002 SCF_PROPERTY_MILESTONE, strerror(r)); 5003 /* FALLTHROUGH */ 5004 5005 case ECANCELED: 5006 sz = strlcpy(fmri, st->st_subgraph, 5007 max_scf_fmri_size); 5008 assert(sz < max_scf_fmri_size); 5009 break; 5010 5011 default: 5012 bad_error("libscf_inst_get_or_add_pg", r); 5013 } 5014 5015 r = libscf_clear_runlevel(pg, fmri); 5016 switch (r) { 5017 case 0: 5018 break; 5019 5020 case ECONNABORTED: 5021 libscf_handle_rebind(h); 5022 goto get_self; 5023 5024 case EPERM: 5025 case EACCES: 5026 case EROFS: 5027 log_error(LOG_WARNING, "Could not set %s/%s: " 5028 "%s.\n", SCF_PG_OPTIONS_OVR, 5029 SCF_PROPERTY_MILESTONE, strerror(r)); 5030 /* FALLTHROUGH */ 5031 5032 case ECANCELED: 5033 sz = strlcpy(fmri, st->st_subgraph, 5034 max_scf_fmri_size); 5035 assert(sz < max_scf_fmri_size); 5036 break; 5037 5038 default: 5039 bad_error("libscf_clear_runlevel", r); 5040 } 5041 5042 scf_pg_destroy(pg); 5043 } else { 5044 scf_property_t *prop; 5045 scf_value_t *val; 5046 5047 prop = safe_scf_property_create(h); 5048 val = safe_scf_value_create(h); 5049 5050 r = libscf_get_milestone(inst, prop, val, fmri, 5051 max_scf_fmri_size); 5052 switch (r) { 5053 case 0: 5054 break; 5055 5056 case ECONNABORTED: 5057 libscf_handle_rebind(h); 5058 goto get_self; 5059 5060 case EINVAL: 5061 log_error(LOG_WARNING, "Milestone property is " 5062 "misconfigured. Defaulting to \"all\".\n"); 5063 /* FALLTHROUGH */ 5064 5065 case ECANCELED: 5066 case ENOENT: 5067 fmri[0] = '\0'; 5068 break; 5069 5070 default: 5071 bad_error("libscf_get_milestone", r); 5072 } 5073 5074 scf_value_destroy(val); 5075 scf_property_destroy(prop); 5076 } 5077 } 5078 5079 if (fmri[0] == '\0' || strcmp(fmri, "all") == 0) 5080 goto out; 5081 5082 if (strcmp(fmri, "none") != 0) { 5083 retry: 5084 if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL, 5085 NULL, SCF_DECODE_FMRI_EXACT) != 0) { 5086 switch (scf_error()) { 5087 case SCF_ERROR_INVALID_ARGUMENT: 5088 log_error(LOG_WARNING, 5089 "Requested milestone \"%s\" is invalid. " 5090 "Reverting to \"all\".\n", fmri); 5091 goto out; 5092 5093 case SCF_ERROR_CONSTRAINT_VIOLATED: 5094 log_error(LOG_WARNING, "Requested milestone " 5095 "\"%s\" does not specify an instance. " 5096 "Reverting to \"all\".\n", fmri); 5097 goto out; 5098 5099 case SCF_ERROR_CONNECTION_BROKEN: 5100 libscf_handle_rebind(h); 5101 goto retry; 5102 5103 case SCF_ERROR_NOT_FOUND: 5104 log_error(LOG_WARNING, "Requested milestone " 5105 "\"%s\" not in repository. Reverting to " 5106 "\"all\".\n", fmri); 5107 goto out; 5108 5109 case SCF_ERROR_HANDLE_MISMATCH: 5110 default: 5111 bad_error("scf_handle_decode_fmri", 5112 scf_error()); 5113 } 5114 } 5115 5116 r = fmri_canonify(fmri, &cfmri, B_FALSE); 5117 assert(r == 0); 5118 5119 r = dgraph_add_instance(cfmri, inst, B_TRUE); 5120 startd_free(cfmri, max_scf_fmri_size); 5121 switch (r) { 5122 case 0: 5123 break; 5124 5125 case ECONNABORTED: 5126 goto retry; 5127 5128 case EINVAL: 5129 log_error(LOG_WARNING, 5130 "Requested milestone \"%s\" is invalid. " 5131 "Reverting to \"all\".\n", fmri); 5132 goto out; 5133 5134 case ECANCELED: 5135 log_error(LOG_WARNING, 5136 "Requested milestone \"%s\" not " 5137 "in repository. Reverting to \"all\".\n", 5138 fmri); 5139 goto out; 5140 5141 case EEXIST: 5142 default: 5143 bad_error("dgraph_add_instance", r); 5144 } 5145 } 5146 5147 log_console(LOG_INFO, "Booting to milestone \"%s\".\n", fmri); 5148 5149 r = dgraph_set_milestone(fmri, h, B_FALSE); 5150 switch (r) { 5151 case 0: 5152 case ECONNRESET: 5153 case EALREADY: 5154 break; 5155 5156 case EINVAL: 5157 case ENOENT: 5158 default: 5159 bad_error("dgraph_set_milestone", r); 5160 } 5161 5162 out: 5163 startd_free(fmri, max_scf_fmri_size); 5164 scf_instance_destroy(inst); 5165 } 5166 5167 void 5168 set_restart_milestone(scf_handle_t *h) 5169 { 5170 scf_instance_t *inst; 5171 scf_property_t *prop; 5172 scf_value_t *val; 5173 char *fmri; 5174 int r; 5175 5176 inst = safe_scf_instance_create(h); 5177 5178 get_self: 5179 if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, 5180 inst, NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) { 5181 switch (scf_error()) { 5182 case SCF_ERROR_CONNECTION_BROKEN: 5183 libscf_handle_rebind(h); 5184 goto get_self; 5185 5186 case SCF_ERROR_NOT_FOUND: 5187 break; 5188 5189 case SCF_ERROR_INVALID_ARGUMENT: 5190 case SCF_ERROR_CONSTRAINT_VIOLATED: 5191 case SCF_ERROR_HANDLE_MISMATCH: 5192 default: 5193 bad_error("scf_handle_decode_fmri", scf_error()); 5194 } 5195 5196 scf_instance_destroy(inst); 5197 return; 5198 } 5199 5200 prop = safe_scf_property_create(h); 5201 val = safe_scf_value_create(h); 5202 fmri = startd_alloc(max_scf_fmri_size); 5203 5204 r = libscf_get_milestone(inst, prop, val, fmri, max_scf_fmri_size); 5205 switch (r) { 5206 case 0: 5207 break; 5208 5209 case ECONNABORTED: 5210 libscf_handle_rebind(h); 5211 goto get_self; 5212 5213 case ECANCELED: 5214 case ENOENT: 5215 case EINVAL: 5216 goto out; 5217 5218 default: 5219 bad_error("libscf_get_milestone", r); 5220 } 5221 5222 r = dgraph_set_milestone(fmri, h, B_TRUE); 5223 switch (r) { 5224 case 0: 5225 case ECONNRESET: 5226 case EALREADY: 5227 case EINVAL: 5228 case ENOENT: 5229 break; 5230 5231 default: 5232 bad_error("dgraph_set_milestone", r); 5233 } 5234 5235 out: 5236 startd_free(fmri, max_scf_fmri_size); 5237 scf_value_destroy(val); 5238 scf_property_destroy(prop); 5239 scf_instance_destroy(inst); 5240 } 5241 5242 /* 5243 * void *graph_thread(void *) 5244 * 5245 * Graph management thread. 5246 */ 5247 /*ARGSUSED*/ 5248 void * 5249 graph_thread(void *arg) 5250 { 5251 scf_handle_t *h; 5252 int err; 5253 5254 h = libscf_handle_create_bound_loop(); 5255 5256 if (st->st_initial) 5257 set_initial_milestone(h); 5258 5259 MUTEX_LOCK(&dgraph_lock); 5260 initial_milestone_set = B_TRUE; 5261 err = pthread_cond_broadcast(&initial_milestone_cv); 5262 assert(err == 0); 5263 MUTEX_UNLOCK(&dgraph_lock); 5264 5265 libscf_populate_graph(h); 5266 5267 if (!st->st_initial) 5268 set_restart_milestone(h); 5269 5270 MUTEX_LOCK(&st->st_load_lock); 5271 st->st_load_complete = 1; 5272 (void) pthread_cond_broadcast(&st->st_load_cv); 5273 MUTEX_UNLOCK(&st->st_load_lock); 5274 5275 MUTEX_LOCK(&dgraph_lock); 5276 /* 5277 * Now that we've set st_load_complete we need to check can_come_up() 5278 * since if we booted to a milestone, then there won't be any more 5279 * state updates. 5280 */ 5281 if (!go_single_user_mode && !go_to_level1 && 5282 halting == -1) { 5283 if (!can_come_up() && !sulogin_thread_running) { 5284 (void) startd_thread_create(sulogin_thread, NULL); 5285 sulogin_thread_running = B_TRUE; 5286 } 5287 } 5288 MUTEX_UNLOCK(&dgraph_lock); 5289 5290 (void) pthread_mutex_lock(&gu->gu_freeze_lock); 5291 5292 /*CONSTCOND*/ 5293 while (1) { 5294 (void) pthread_cond_wait(&gu->gu_freeze_cv, 5295 &gu->gu_freeze_lock); 5296 } 5297 5298 /* 5299 * Unreachable for now -- there's currently no graceful cleanup 5300 * called on exit(). 5301 */ 5302 (void) pthread_mutex_unlock(&gu->gu_freeze_lock); 5303 scf_handle_destroy(h); 5304 5305 return (NULL); 5306 } 5307 5308 5309 /* 5310 * int next_action() 5311 * Given an array of timestamps 'a' with 'num' elements, find the 5312 * lowest non-zero timestamp and return its index. If there are no 5313 * non-zero elements, return -1. 5314 */ 5315 static int 5316 next_action(hrtime_t *a, int num) 5317 { 5318 hrtime_t t = 0; 5319 int i = 0, smallest = -1; 5320 5321 for (i = 0; i < num; i++) { 5322 if (t == 0) { 5323 t = a[i]; 5324 smallest = i; 5325 } else if (a[i] != 0 && a[i] < t) { 5326 t = a[i]; 5327 smallest = i; 5328 } 5329 } 5330 5331 if (t == 0) 5332 return (-1); 5333 else 5334 return (smallest); 5335 } 5336 5337 /* 5338 * void process_actions() 5339 * Process actions requested by the administrator. Possibilities include: 5340 * refresh, restart, maintenance mode off, maintenance mode on, 5341 * maintenance mode immediate, and degraded. 5342 * 5343 * The set of pending actions is represented in the repository as a 5344 * per-instance property group, with each action being a single property 5345 * in that group. This property group is converted to an array, with each 5346 * action type having an array slot. The actions in the array at the 5347 * time process_actions() is called are acted on in the order of the 5348 * timestamp (which is the value stored in the slot). A value of zero 5349 * indicates that there is no pending action of the type associated with 5350 * a particular slot. 5351 * 5352 * Sending an action event multiple times before the restarter has a 5353 * chance to process that action will force it to be run at the last 5354 * timestamp where it appears in the ordering. 5355 * 5356 * Turning maintenance mode on trumps all other actions. 5357 * 5358 * Returns 0 or ECONNABORTED. 5359 */ 5360 static int 5361 process_actions(scf_handle_t *h, scf_propertygroup_t *pg, scf_instance_t *inst) 5362 { 5363 scf_property_t *prop = NULL; 5364 scf_value_t *val = NULL; 5365 scf_type_t type; 5366 graph_vertex_t *vertex; 5367 admin_action_t a; 5368 int i, ret = 0, r; 5369 hrtime_t action_ts[NACTIONS]; 5370 char *inst_name; 5371 5372 r = libscf_instance_get_fmri(inst, &inst_name); 5373 switch (r) { 5374 case 0: 5375 break; 5376 5377 case ECONNABORTED: 5378 return (ECONNABORTED); 5379 5380 case ECANCELED: 5381 return (0); 5382 5383 default: 5384 bad_error("libscf_instance_get_fmri", r); 5385 } 5386 5387 MUTEX_LOCK(&dgraph_lock); 5388 5389 vertex = vertex_get_by_name(inst_name); 5390 if (vertex == NULL) { 5391 MUTEX_UNLOCK(&dgraph_lock); 5392 log_framework(LOG_DEBUG, "%s: Can't find graph vertex. " 5393 "The instance must have been removed.\n", inst_name); 5394 return (0); 5395 } 5396 5397 prop = safe_scf_property_create(h); 5398 val = safe_scf_value_create(h); 5399 5400 for (i = 0; i < NACTIONS; i++) { 5401 if (scf_pg_get_property(pg, admin_actions[i], prop) != 0) { 5402 switch (scf_error()) { 5403 case SCF_ERROR_CONNECTION_BROKEN: 5404 default: 5405 ret = ECONNABORTED; 5406 goto out; 5407 5408 case SCF_ERROR_DELETED: 5409 goto out; 5410 5411 case SCF_ERROR_NOT_FOUND: 5412 action_ts[i] = 0; 5413 continue; 5414 5415 case SCF_ERROR_HANDLE_MISMATCH: 5416 case SCF_ERROR_INVALID_ARGUMENT: 5417 case SCF_ERROR_NOT_SET: 5418 bad_error("scf_pg_get_property", scf_error()); 5419 } 5420 } 5421 5422 if (scf_property_type(prop, &type) != 0) { 5423 switch (scf_error()) { 5424 case SCF_ERROR_CONNECTION_BROKEN: 5425 default: 5426 ret = ECONNABORTED; 5427 goto out; 5428 5429 case SCF_ERROR_DELETED: 5430 action_ts[i] = 0; 5431 continue; 5432 5433 case SCF_ERROR_NOT_SET: 5434 bad_error("scf_property_type", scf_error()); 5435 } 5436 } 5437 5438 if (type != SCF_TYPE_INTEGER) { 5439 action_ts[i] = 0; 5440 continue; 5441 } 5442 5443 if (scf_property_get_value(prop, val) != 0) { 5444 switch (scf_error()) { 5445 case SCF_ERROR_CONNECTION_BROKEN: 5446 default: 5447 ret = ECONNABORTED; 5448 goto out; 5449 5450 case SCF_ERROR_DELETED: 5451 goto out; 5452 5453 case SCF_ERROR_NOT_FOUND: 5454 case SCF_ERROR_CONSTRAINT_VIOLATED: 5455 action_ts[i] = 0; 5456 continue; 5457 5458 case SCF_ERROR_NOT_SET: 5459 bad_error("scf_property_get_value", 5460 scf_error()); 5461 } 5462 } 5463 5464 r = scf_value_get_integer(val, &action_ts[i]); 5465 assert(r == 0); 5466 } 5467 5468 a = ADMIN_EVENT_MAINT_ON_IMMEDIATE; 5469 if (action_ts[ADMIN_EVENT_MAINT_ON_IMMEDIATE] || 5470 action_ts[ADMIN_EVENT_MAINT_ON]) { 5471 a = action_ts[ADMIN_EVENT_MAINT_ON_IMMEDIATE] ? 5472 ADMIN_EVENT_MAINT_ON_IMMEDIATE : ADMIN_EVENT_MAINT_ON; 5473 5474 vertex_send_event(vertex, admin_events[a]); 5475 r = libscf_unset_action(h, pg, a, action_ts[a]); 5476 switch (r) { 5477 case 0: 5478 case EACCES: 5479 break; 5480 5481 case ECONNABORTED: 5482 ret = ECONNABORTED; 5483 goto out; 5484 5485 case EPERM: 5486 uu_die("Insufficient privilege.\n"); 5487 /* NOTREACHED */ 5488 5489 default: 5490 bad_error("libscf_unset_action", r); 5491 } 5492 } 5493 5494 while ((a = next_action(action_ts, NACTIONS)) != -1) { 5495 log_framework(LOG_DEBUG, 5496 "Graph: processing %s action for %s.\n", admin_actions[a], 5497 inst_name); 5498 5499 if (a == ADMIN_EVENT_REFRESH) { 5500 r = dgraph_refresh_instance(vertex, inst); 5501 switch (r) { 5502 case 0: 5503 case ECANCELED: 5504 case EINVAL: 5505 case -1: 5506 break; 5507 5508 case ECONNABORTED: 5509 /* pg & inst are reset now, so just return. */ 5510 ret = ECONNABORTED; 5511 goto out; 5512 5513 default: 5514 bad_error("dgraph_refresh_instance", r); 5515 } 5516 } 5517 5518 vertex_send_event(vertex, admin_events[a]); 5519 5520 r = libscf_unset_action(h, pg, a, action_ts[a]); 5521 switch (r) { 5522 case 0: 5523 case EACCES: 5524 break; 5525 5526 case ECONNABORTED: 5527 ret = ECONNABORTED; 5528 goto out; 5529 5530 case EPERM: 5531 uu_die("Insufficient privilege.\n"); 5532 /* NOTREACHED */ 5533 5534 default: 5535 bad_error("libscf_unset_action", r); 5536 } 5537 5538 action_ts[a] = 0; 5539 } 5540 5541 out: 5542 MUTEX_UNLOCK(&dgraph_lock); 5543 5544 scf_property_destroy(prop); 5545 scf_value_destroy(val); 5546 startd_free(inst_name, max_scf_fmri_size); 5547 return (ret); 5548 } 5549 5550 /* 5551 * inst and pg_name are scratch space, and are unset on entry. 5552 * Returns 5553 * 0 - success 5554 * ECONNRESET - success, but repository handle rebound 5555 * ECONNABORTED - repository connection broken 5556 */ 5557 static int 5558 process_pg_event(scf_handle_t *h, scf_propertygroup_t *pg, scf_instance_t *inst, 5559 char *pg_name) 5560 { 5561 int r; 5562 scf_property_t *prop; 5563 scf_value_t *val; 5564 char *fmri; 5565 boolean_t rebound = B_FALSE, rebind_inst = B_FALSE; 5566 5567 if (scf_pg_get_name(pg, pg_name, max_scf_value_size) < 0) { 5568 switch (scf_error()) { 5569 case SCF_ERROR_CONNECTION_BROKEN: 5570 default: 5571 return (ECONNABORTED); 5572 5573 case SCF_ERROR_DELETED: 5574 return (0); 5575 5576 case SCF_ERROR_NOT_SET: 5577 bad_error("scf_pg_get_name", scf_error()); 5578 } 5579 } 5580 5581 if (strcmp(pg_name, SCF_PG_GENERAL) == 0 || 5582 strcmp(pg_name, SCF_PG_GENERAL_OVR) == 0) { 5583 r = dgraph_update_general(pg); 5584 switch (r) { 5585 case 0: 5586 case ENOTSUP: 5587 case ECANCELED: 5588 return (0); 5589 5590 case ECONNABORTED: 5591 return (ECONNABORTED); 5592 5593 case -1: 5594 /* Error should have been logged. */ 5595 return (0); 5596 5597 default: 5598 bad_error("dgraph_update_general", r); 5599 } 5600 } else if (strcmp(pg_name, SCF_PG_RESTARTER_ACTIONS) == 0) { 5601 if (scf_pg_get_parent_instance(pg, inst) != 0) { 5602 switch (scf_error()) { 5603 case SCF_ERROR_CONNECTION_BROKEN: 5604 return (ECONNABORTED); 5605 5606 case SCF_ERROR_DELETED: 5607 case SCF_ERROR_CONSTRAINT_VIOLATED: 5608 /* Ignore commands on services. */ 5609 return (0); 5610 5611 case SCF_ERROR_NOT_BOUND: 5612 case SCF_ERROR_HANDLE_MISMATCH: 5613 case SCF_ERROR_NOT_SET: 5614 default: 5615 bad_error("scf_pg_get_parent_instance", 5616 scf_error()); 5617 } 5618 } 5619 5620 return (process_actions(h, pg, inst)); 5621 } 5622 5623 if (strcmp(pg_name, SCF_PG_OPTIONS) != 0 && 5624 strcmp(pg_name, SCF_PG_OPTIONS_OVR) != 0) 5625 return (0); 5626 5627 /* 5628 * We only care about the options[_ovr] property groups of our own 5629 * instance, so get the fmri and compare. Plus, once we know it's 5630 * correct, if the repository connection is broken we know exactly what 5631 * property group we were operating on, and can look it up again. 5632 */ 5633 if (scf_pg_get_parent_instance(pg, inst) != 0) { 5634 switch (scf_error()) { 5635 case SCF_ERROR_CONNECTION_BROKEN: 5636 return (ECONNABORTED); 5637 5638 case SCF_ERROR_DELETED: 5639 case SCF_ERROR_CONSTRAINT_VIOLATED: 5640 return (0); 5641 5642 case SCF_ERROR_HANDLE_MISMATCH: 5643 case SCF_ERROR_NOT_BOUND: 5644 case SCF_ERROR_NOT_SET: 5645 default: 5646 bad_error("scf_pg_get_parent_instance", 5647 scf_error()); 5648 } 5649 } 5650 5651 switch (r = libscf_instance_get_fmri(inst, &fmri)) { 5652 case 0: 5653 break; 5654 5655 case ECONNABORTED: 5656 return (ECONNABORTED); 5657 5658 case ECANCELED: 5659 return (0); 5660 5661 default: 5662 bad_error("libscf_instance_get_fmri", r); 5663 } 5664 5665 if (strcmp(fmri, SCF_SERVICE_STARTD) != 0) { 5666 startd_free(fmri, max_scf_fmri_size); 5667 return (0); 5668 } 5669 5670 prop = safe_scf_property_create(h); 5671 val = safe_scf_value_create(h); 5672 5673 if (strcmp(pg_name, SCF_PG_OPTIONS_OVR) == 0) { 5674 /* See if we need to set the runlevel. */ 5675 /* CONSTCOND */ 5676 if (0) { 5677 rebind_pg: 5678 libscf_handle_rebind(h); 5679 rebound = B_TRUE; 5680 5681 r = libscf_lookup_instance(SCF_SERVICE_STARTD, inst); 5682 switch (r) { 5683 case 0: 5684 break; 5685 5686 case ECONNABORTED: 5687 goto rebind_pg; 5688 5689 case ENOENT: 5690 goto out; 5691 5692 case EINVAL: 5693 case ENOTSUP: 5694 bad_error("libscf_lookup_instance", r); 5695 } 5696 5697 if (scf_instance_get_pg(inst, pg_name, pg) != 0) { 5698 switch (scf_error()) { 5699 case SCF_ERROR_DELETED: 5700 case SCF_ERROR_NOT_FOUND: 5701 goto out; 5702 5703 case SCF_ERROR_CONNECTION_BROKEN: 5704 goto rebind_pg; 5705 5706 case SCF_ERROR_HANDLE_MISMATCH: 5707 case SCF_ERROR_NOT_BOUND: 5708 case SCF_ERROR_NOT_SET: 5709 case SCF_ERROR_INVALID_ARGUMENT: 5710 default: 5711 bad_error("scf_instance_get_pg", 5712 scf_error()); 5713 } 5714 } 5715 } 5716 5717 if (scf_pg_get_property(pg, "runlevel", prop) == 0) { 5718 r = dgraph_set_runlevel(pg, prop); 5719 switch (r) { 5720 case ECONNRESET: 5721 rebound = B_TRUE; 5722 rebind_inst = B_TRUE; 5723 /* FALLTHROUGH */ 5724 5725 case 0: 5726 break; 5727 5728 case ECONNABORTED: 5729 goto rebind_pg; 5730 5731 case ECANCELED: 5732 goto out; 5733 5734 default: 5735 bad_error("dgraph_set_runlevel", r); 5736 } 5737 } else { 5738 switch (scf_error()) { 5739 case SCF_ERROR_CONNECTION_BROKEN: 5740 default: 5741 goto rebind_pg; 5742 5743 case SCF_ERROR_DELETED: 5744 goto out; 5745 5746 case SCF_ERROR_NOT_FOUND: 5747 break; 5748 5749 case SCF_ERROR_INVALID_ARGUMENT: 5750 case SCF_ERROR_HANDLE_MISMATCH: 5751 case SCF_ERROR_NOT_BOUND: 5752 case SCF_ERROR_NOT_SET: 5753 bad_error("scf_pg_get_property", scf_error()); 5754 } 5755 } 5756 } 5757 5758 if (rebind_inst) { 5759 lookup_inst: 5760 r = libscf_lookup_instance(SCF_SERVICE_STARTD, inst); 5761 switch (r) { 5762 case 0: 5763 break; 5764 5765 case ECONNABORTED: 5766 libscf_handle_rebind(h); 5767 rebound = B_TRUE; 5768 goto lookup_inst; 5769 5770 case ENOENT: 5771 goto out; 5772 5773 case EINVAL: 5774 case ENOTSUP: 5775 bad_error("libscf_lookup_instance", r); 5776 } 5777 } 5778 5779 r = libscf_get_milestone(inst, prop, val, fmri, max_scf_fmri_size); 5780 switch (r) { 5781 case 0: 5782 break; 5783 5784 case ECONNABORTED: 5785 libscf_handle_rebind(h); 5786 rebound = B_TRUE; 5787 goto lookup_inst; 5788 5789 case EINVAL: 5790 log_error(LOG_NOTICE, 5791 "%s/%s property of %s is misconfigured.\n", pg_name, 5792 SCF_PROPERTY_MILESTONE, SCF_SERVICE_STARTD); 5793 /* FALLTHROUGH */ 5794 5795 case ECANCELED: 5796 case ENOENT: 5797 (void) strcpy(fmri, "all"); 5798 break; 5799 5800 default: 5801 bad_error("libscf_get_milestone", r); 5802 } 5803 5804 r = dgraph_set_milestone(fmri, h, B_FALSE); 5805 switch (r) { 5806 case 0: 5807 case ECONNRESET: 5808 case EALREADY: 5809 break; 5810 5811 case EINVAL: 5812 log_error(LOG_WARNING, "Milestone %s is invalid.\n", fmri); 5813 break; 5814 5815 case ENOENT: 5816 log_error(LOG_WARNING, "Milestone %s does not exist.\n", fmri); 5817 break; 5818 5819 default: 5820 bad_error("dgraph_set_milestone", r); 5821 } 5822 5823 out: 5824 startd_free(fmri, max_scf_fmri_size); 5825 scf_value_destroy(val); 5826 scf_property_destroy(prop); 5827 5828 return (rebound ? ECONNRESET : 0); 5829 } 5830 5831 static void 5832 process_delete(char *fmri, scf_handle_t *h) 5833 { 5834 char *lfmri; 5835 const char *inst_name, *pg_name; 5836 5837 lfmri = safe_strdup(fmri); 5838 5839 /* Determine if the FMRI is a property group or instance */ 5840 if (scf_parse_svc_fmri(lfmri, NULL, NULL, &inst_name, &pg_name, 5841 NULL) != SCF_SUCCESS) { 5842 log_error(LOG_WARNING, 5843 "Received invalid FMRI \"%s\" from repository server.\n", 5844 fmri); 5845 } else if (inst_name != NULL && pg_name == NULL) { 5846 (void) dgraph_remove_instance(fmri, h); 5847 } 5848 5849 free(lfmri); 5850 } 5851 5852 /*ARGSUSED*/ 5853 void * 5854 repository_event_thread(void *unused) 5855 { 5856 scf_handle_t *h; 5857 scf_propertygroup_t *pg; 5858 scf_instance_t *inst; 5859 char *fmri = startd_alloc(max_scf_fmri_size); 5860 char *pg_name = startd_alloc(max_scf_value_size); 5861 int r; 5862 5863 h = libscf_handle_create_bound_loop(); 5864 5865 pg = safe_scf_pg_create(h); 5866 inst = safe_scf_instance_create(h); 5867 5868 retry: 5869 if (_scf_notify_add_pgtype(h, SCF_GROUP_FRAMEWORK) != SCF_SUCCESS) { 5870 if (scf_error() == SCF_ERROR_CONNECTION_BROKEN) { 5871 libscf_handle_rebind(h); 5872 } else { 5873 log_error(LOG_WARNING, 5874 "Couldn't set up repository notification " 5875 "for property group type %s: %s\n", 5876 SCF_GROUP_FRAMEWORK, scf_strerror(scf_error())); 5877 5878 (void) sleep(1); 5879 } 5880 5881 goto retry; 5882 } 5883 5884 /*CONSTCOND*/ 5885 while (1) { 5886 ssize_t res; 5887 5888 /* Note: fmri is only set on delete events. */ 5889 res = _scf_notify_wait(pg, fmri, max_scf_fmri_size); 5890 if (res < 0) { 5891 libscf_handle_rebind(h); 5892 goto retry; 5893 } else if (res == 0) { 5894 /* 5895 * property group modified. inst and pg_name are 5896 * pre-allocated scratch space. 5897 */ 5898 if (scf_pg_update(pg) < 0) { 5899 switch (scf_error()) { 5900 case SCF_ERROR_DELETED: 5901 continue; 5902 5903 case SCF_ERROR_CONNECTION_BROKEN: 5904 log_error(LOG_WARNING, 5905 "Lost repository event due to " 5906 "disconnection.\n"); 5907 libscf_handle_rebind(h); 5908 goto retry; 5909 5910 case SCF_ERROR_NOT_BOUND: 5911 case SCF_ERROR_NOT_SET: 5912 default: 5913 bad_error("scf_pg_update", scf_error()); 5914 } 5915 } 5916 5917 r = process_pg_event(h, pg, inst, pg_name); 5918 switch (r) { 5919 case 0: 5920 break; 5921 5922 case ECONNABORTED: 5923 log_error(LOG_WARNING, "Lost repository event " 5924 "due to disconnection.\n"); 5925 libscf_handle_rebind(h); 5926 /* FALLTHROUGH */ 5927 5928 case ECONNRESET: 5929 goto retry; 5930 5931 default: 5932 bad_error("process_pg_event", r); 5933 } 5934 } else { 5935 /* service, instance, or pg deleted. */ 5936 process_delete(fmri, h); 5937 } 5938 } 5939 5940 /*NOTREACHED*/ 5941 return (NULL); 5942 } 5943 5944 void 5945 graph_engine_start() 5946 { 5947 int err; 5948 5949 (void) startd_thread_create(graph_thread, NULL); 5950 5951 MUTEX_LOCK(&dgraph_lock); 5952 while (!initial_milestone_set) { 5953 err = pthread_cond_wait(&initial_milestone_cv, &dgraph_lock); 5954 assert(err == 0); 5955 } 5956 MUTEX_UNLOCK(&dgraph_lock); 5957 5958 (void) startd_thread_create(repository_event_thread, NULL); 5959 (void) startd_thread_create(graph_event_thread, NULL); 5960 } 5961