1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * graph.c - master restarter graph engine 31 * 32 * The graph engine keeps a dependency graph of all service instances on the 33 * system, as recorded in the repository. It decides when services should 34 * be brought up or down based on service states and dependencies and sends 35 * commands to restarters to effect any changes. It also executes 36 * administrator commands sent by svcadm via the repository. 37 * 38 * The graph is stored in uu_list_t *dgraph and its vertices are 39 * graph_vertex_t's, each of which has a name and an integer id unique to 40 * its name (see dict.c). A vertex's type attribute designates the type 41 * of object it represents: GVT_INST for service instances, GVT_SVC for 42 * service objects (since service instances may depend on another service, 43 * rather than service instance), GVT_FILE for files (which services may 44 * depend on), and GVT_GROUP for dependencies on multiple objects. GVT_GROUP 45 * vertices are necessary because dependency lists may have particular 46 * grouping types (require any, require all, optional, or exclude) and 47 * event-propagation characteristics. 48 * 49 * The initial graph is built by libscf_populate_graph() invoking 50 * dgraph_add_instance() for each instance in the repository. The function 51 * adds a GVT_SVC vertex for the service if one does not already exist, adds 52 * a GVT_INST vertex named by the FMRI of the instance, and sets up the edges. 53 * The resulting web of vertices & edges associated with an instance's vertex 54 * includes 55 * 56 * - an edge from the GVT_SVC vertex for the instance's service 57 * 58 * - an edge to the GVT_INST vertex of the instance's resarter, if its 59 * restarter is not svc.startd 60 * 61 * - edges from other GVT_INST vertices if the instance is a restarter 62 * 63 * - for each dependency property group in the instance's "running" 64 * snapshot, an edge to a GVT_GROUP vertex named by the FMRI of the 65 * instance and the name of the property group 66 * 67 * - for each value of the "entities" property in each dependency property 68 * group, an edge from the corresponding GVT_GROUP vertex to a 69 * GVT_INST, GVT_SVC, or GVT_FILE vertex 70 * 71 * - edges from GVT_GROUP vertices for each dependent instance 72 * 73 * After the edges are set up the vertex's GV_CONFIGURED flag is set. If 74 * there are problems, or if a service is mentioned in a dependency but does 75 * not exist in the repository, the GV_CONFIGURED flag will be clear. 76 * 77 * The graph and all of its vertices are protected by the dgraph_lock mutex. 78 * See restarter.c for more information. 79 * 80 * The properties of an instance fall into two classes: immediate and 81 * snapshotted. Immediate properties should have an immediate effect when 82 * changed. Snapshotted properties should be read from a snapshot, so they 83 * only change when the snapshot changes. The immediate properties used by 84 * the graph engine are general/enabled, general/restarter, and the properties 85 * in the restarter_actions property group. Since they are immediate, they 86 * are not read out of a snapshot. The snapshotted properties used by the 87 * graph engine are those in the property groups with type "dependency" and 88 * are read out of the "running" snapshot. The "running" snapshot is created 89 * by the the graph engine as soon as possible, and it is updated, along with 90 * in-core copies of the data (dependency information for the graph engine) on 91 * receipt of the refresh command from svcadm. In addition, the graph engine 92 * updates the "start" snapshot from the "running" snapshot whenever a service 93 * comes online. 94 */ 95 96 #include <sys/uadmin.h> 97 #include <sys/wait.h> 98 99 #include <assert.h> 100 #include <errno.h> 101 #include <fcntl.h> 102 #include <libscf.h> 103 #include <libscf_priv.h> 104 #include <libuutil.h> 105 #include <locale.h> 106 #include <poll.h> 107 #include <pthread.h> 108 #include <signal.h> 109 #include <stddef.h> 110 #include <stdio.h> 111 #include <stdlib.h> 112 #include <string.h> 113 #include <strings.h> 114 #include <sys/statvfs.h> 115 #include <sys/uadmin.h> 116 #include <zone.h> 117 118 #include "startd.h" 119 #include "protocol.h" 120 121 122 #define MILESTONE_NONE ((graph_vertex_t *)1) 123 124 #define CONSOLE_LOGIN_FMRI "svc:/system/console-login:default" 125 #define FS_MINIMAL_FMRI "svc:/system/filesystem/minimal:default" 126 127 static uu_list_pool_t *graph_edge_pool, *graph_vertex_pool; 128 static uu_list_t *dgraph; 129 static pthread_mutex_t dgraph_lock; 130 131 /* 132 * milestone indicates the current subgraph. When NULL, it is the entire 133 * graph. When MILESTONE_NONE, it is the empty graph. Otherwise, it is all 134 * services on which the target vertex depends. 135 */ 136 static graph_vertex_t *milestone = NULL; 137 static boolean_t initial_milestone_set = B_FALSE; 138 static pthread_cond_t initial_milestone_cv = PTHREAD_COND_INITIALIZER; 139 140 /* protected by dgraph_lock */ 141 static boolean_t sulogin_thread_running = B_FALSE; 142 static boolean_t sulogin_running = B_FALSE; 143 static boolean_t console_login_ready = B_FALSE; 144 145 /* Number of services to come down to complete milestone transition. */ 146 static uint_t non_subgraph_svcs; 147 148 /* 149 * These variables indicate what should be done when we reach the milestone 150 * target milestone, i.e., when non_subgraph_svcs == 0. They are acted upon in 151 * dgraph_set_instance_state(). 152 */ 153 static int halting = -1; 154 static boolean_t go_single_user_mode = B_FALSE; 155 static boolean_t go_to_level1 = B_FALSE; 156 157 /* 158 * This tracks the legacy runlevel to ensure we signal init and manage 159 * utmpx entries correctly. 160 */ 161 static char current_runlevel = '\0'; 162 163 /* Number of single user threads currently running */ 164 static pthread_mutex_t single_user_thread_lock; 165 static int single_user_thread_count = 0; 166 167 /* Statistics for dependency cycle-checking */ 168 static u_longlong_t dep_inserts = 0; 169 static u_longlong_t dep_cycle_ns = 0; 170 static u_longlong_t dep_insert_ns = 0; 171 172 173 static const char * const emsg_invalid_restarter = 174 "Restarter FMRI for %s is invalid. Transitioning to maintenance.\n"; 175 static const char * const console_login_fmri = CONSOLE_LOGIN_FMRI; 176 static const char * const single_user_fmri = SCF_MILESTONE_SINGLE_USER; 177 static const char * const multi_user_fmri = SCF_MILESTONE_MULTI_USER; 178 static const char * const multi_user_svr_fmri = SCF_MILESTONE_MULTI_USER_SERVER; 179 180 181 /* 182 * These services define the system being "up". If none of them can come 183 * online, then we will run sulogin on the console. Note that the install ones 184 * are for the miniroot and when installing CDs after the first. can_come_up() 185 * does the decision making, and an sulogin_thread() runs sulogin, which can be 186 * started by dgraph_set_instance_state() or single_user_thread(). 187 * 188 * NOTE: can_come_up() relies on SCF_MILESTONE_SINGLE_USER being the first 189 * entry, which is only used when booting_to_single_user (boot -s) is set. 190 * This is because when doing a "boot -s", sulogin is started from specials.c 191 * after milestone/single-user comes online, for backwards compatibility. 192 * In this case, SCF_MILESTONE_SINGLE_USER needs to be part of up_svcs 193 * to ensure sulogin will be spawned if milestone/single-user cannot be reached. 194 */ 195 static const char * const up_svcs[] = { 196 SCF_MILESTONE_SINGLE_USER, 197 CONSOLE_LOGIN_FMRI, 198 "svc:/system/install-setup:default", 199 "svc:/system/install:default", 200 NULL 201 }; 202 203 /* This array must have an element for each non-NULL element of up_svcs[]. */ 204 static graph_vertex_t *up_svcs_p[] = { NULL, NULL, NULL, NULL }; 205 206 /* These are for seed repository magic. See can_come_up(). */ 207 static const char * const manifest_import = 208 "svc:/system/manifest-import:default"; 209 static graph_vertex_t *manifest_import_p = NULL; 210 211 212 static char target_milestone_as_runlevel(void); 213 static void graph_runlevel_changed(char rl, int online); 214 static int dgraph_set_milestone(const char *, scf_handle_t *, boolean_t); 215 static void vertex_send_event(graph_vertex_t *v, restarter_event_type_t e); 216 static boolean_t should_be_in_subgraph(graph_vertex_t *v); 217 218 /* 219 * graph_vertex_compare() 220 * This function can compare either int *id or * graph_vertex_t *gv 221 * values, as the vertex id is always the first element of a 222 * graph_vertex structure. 223 */ 224 /* ARGSUSED */ 225 static int 226 graph_vertex_compare(const void *lc_arg, const void *rc_arg, void *private) 227 { 228 int lc_id = ((const graph_vertex_t *)lc_arg)->gv_id; 229 int rc_id = *(int *)rc_arg; 230 231 if (lc_id > rc_id) 232 return (1); 233 if (lc_id < rc_id) 234 return (-1); 235 return (0); 236 } 237 238 void 239 graph_init() 240 { 241 graph_edge_pool = startd_list_pool_create("graph_edges", 242 sizeof (graph_edge_t), offsetof(graph_edge_t, ge_link), NULL, 243 UU_LIST_POOL_DEBUG); 244 assert(graph_edge_pool != NULL); 245 246 graph_vertex_pool = startd_list_pool_create("graph_vertices", 247 sizeof (graph_vertex_t), offsetof(graph_vertex_t, gv_link), 248 graph_vertex_compare, UU_LIST_POOL_DEBUG); 249 assert(graph_vertex_pool != NULL); 250 251 (void) pthread_mutex_init(&dgraph_lock, &mutex_attrs); 252 (void) pthread_mutex_init(&single_user_thread_lock, &mutex_attrs); 253 dgraph = startd_list_create(graph_vertex_pool, NULL, UU_LIST_SORTED); 254 assert(dgraph != NULL); 255 256 if (!st->st_initial) 257 current_runlevel = utmpx_get_runlevel(); 258 259 log_framework(LOG_DEBUG, "Initialized graph\n"); 260 } 261 262 static graph_vertex_t * 263 vertex_get_by_name(const char *name) 264 { 265 int id; 266 267 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 268 269 id = dict_lookup_byname(name); 270 if (id == -1) 271 return (NULL); 272 273 return (uu_list_find(dgraph, &id, NULL, NULL)); 274 } 275 276 static graph_vertex_t * 277 vertex_get_by_id(int id) 278 { 279 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 280 281 if (id == -1) 282 return (NULL); 283 284 return (uu_list_find(dgraph, &id, NULL, NULL)); 285 } 286 287 /* 288 * Creates a new vertex with the given name, adds it to the graph, and returns 289 * a pointer to it. The graph lock must be held by this thread on entry. 290 */ 291 static graph_vertex_t * 292 graph_add_vertex(const char *name) 293 { 294 int id; 295 graph_vertex_t *v; 296 void *p; 297 uu_list_index_t idx; 298 299 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 300 301 id = dict_insert(name); 302 303 v = startd_zalloc(sizeof (*v)); 304 305 v->gv_id = id; 306 307 v->gv_name = startd_alloc(strlen(name) + 1); 308 (void) strcpy(v->gv_name, name); 309 310 v->gv_dependencies = startd_list_create(graph_edge_pool, v, 0); 311 v->gv_dependents = startd_list_create(graph_edge_pool, v, 0); 312 313 p = uu_list_find(dgraph, &id, NULL, &idx); 314 assert(p == NULL); 315 316 uu_list_node_init(v, &v->gv_link, graph_vertex_pool); 317 uu_list_insert(dgraph, v, idx); 318 319 return (v); 320 } 321 322 /* 323 * Removes v from the graph and frees it. The graph should be locked by this 324 * thread, and v should have no edges associated with it. 325 */ 326 static void 327 graph_remove_vertex(graph_vertex_t *v) 328 { 329 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 330 331 assert(uu_list_numnodes(v->gv_dependencies) == 0); 332 assert(uu_list_numnodes(v->gv_dependents) == 0); 333 334 startd_free(v->gv_name, strlen(v->gv_name) + 1); 335 uu_list_destroy(v->gv_dependencies); 336 uu_list_destroy(v->gv_dependents); 337 uu_list_remove(dgraph, v); 338 339 startd_free(v, sizeof (graph_vertex_t)); 340 } 341 342 static void 343 graph_add_edge(graph_vertex_t *fv, graph_vertex_t *tv) 344 { 345 graph_edge_t *e, *re; 346 int r; 347 348 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 349 350 e = startd_alloc(sizeof (graph_edge_t)); 351 re = startd_alloc(sizeof (graph_edge_t)); 352 353 e->ge_parent = fv; 354 e->ge_vertex = tv; 355 356 re->ge_parent = tv; 357 re->ge_vertex = fv; 358 359 uu_list_node_init(e, &e->ge_link, graph_edge_pool); 360 r = uu_list_insert_before(fv->gv_dependencies, NULL, e); 361 assert(r == 0); 362 363 uu_list_node_init(re, &re->ge_link, graph_edge_pool); 364 r = uu_list_insert_before(tv->gv_dependents, NULL, re); 365 assert(r == 0); 366 } 367 368 static void 369 graph_remove_edge(graph_vertex_t *v, graph_vertex_t *dv) 370 { 371 graph_edge_t *e; 372 373 for (e = uu_list_first(v->gv_dependencies); 374 e != NULL; 375 e = uu_list_next(v->gv_dependencies, e)) { 376 if (e->ge_vertex == dv) { 377 uu_list_remove(v->gv_dependencies, e); 378 startd_free(e, sizeof (graph_edge_t)); 379 break; 380 } 381 } 382 383 for (e = uu_list_first(dv->gv_dependents); 384 e != NULL; 385 e = uu_list_next(dv->gv_dependents, e)) { 386 if (e->ge_vertex == v) { 387 uu_list_remove(dv->gv_dependents, e); 388 startd_free(e, sizeof (graph_edge_t)); 389 break; 390 } 391 } 392 } 393 394 static void 395 graph_walk_dependents(graph_vertex_t *v, void (*func)(graph_vertex_t *, void *), 396 void *arg) 397 { 398 graph_edge_t *e; 399 400 for (e = uu_list_first(v->gv_dependents); 401 e != NULL; 402 e = uu_list_next(v->gv_dependents, e)) 403 func(e->ge_vertex, arg); 404 } 405 406 static void 407 graph_walk_dependencies(graph_vertex_t *v, void (*func)(graph_vertex_t *, 408 void *), void *arg) 409 { 410 graph_edge_t *e; 411 412 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 413 414 for (e = uu_list_first(v->gv_dependencies); 415 e != NULL; 416 e = uu_list_next(v->gv_dependencies, e)) { 417 418 func(e->ge_vertex, arg); 419 } 420 } 421 422 /* 423 * Generic graph walking function. 424 * 425 * Given a vertex, this function will walk either dependencies 426 * (WALK_DEPENDENCIES) or dependents (WALK_DEPENDENTS) of a vertex recursively 427 * for the entire graph. It will avoid cycles and never visit the same vertex 428 * twice. 429 * 430 * We avoid traversing exclusion dependencies, because they are allowed to 431 * create cycles in the graph. When propagating satisfiability, there is no 432 * need to walk exclusion dependencies because exclude_all_satisfied() doesn't 433 * test for satisfiability. 434 * 435 * The walker takes two callbacks. The first is called before examining the 436 * dependents of each vertex. The second is called on each vertex after 437 * examining its dependents. This allows is_path_to() to construct a path only 438 * after the target vertex has been found. 439 */ 440 typedef enum { 441 WALK_DEPENDENTS, 442 WALK_DEPENDENCIES 443 } graph_walk_dir_t; 444 445 typedef int (*graph_walk_cb_t)(graph_vertex_t *, void *); 446 447 typedef struct graph_walk_info { 448 graph_walk_dir_t gi_dir; 449 uchar_t *gi_visited; /* vertex bitmap */ 450 int (*gi_pre)(graph_vertex_t *, void *); 451 void (*gi_post)(graph_vertex_t *, void *); 452 void *gi_arg; /* callback arg */ 453 int gi_ret; /* return value */ 454 } graph_walk_info_t; 455 456 static int 457 graph_walk_recurse(graph_edge_t *e, graph_walk_info_t *gip) 458 { 459 uu_list_t *list; 460 int r; 461 graph_vertex_t *v = e->ge_vertex; 462 int i; 463 uint_t b; 464 465 i = v->gv_id / 8; 466 b = 1 << (v->gv_id % 8); 467 468 /* 469 * Check to see if we've visited this vertex already. 470 */ 471 if (gip->gi_visited[i] & b) 472 return (UU_WALK_NEXT); 473 474 gip->gi_visited[i] |= b; 475 476 /* 477 * Don't follow exclusions. 478 */ 479 if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL) 480 return (UU_WALK_NEXT); 481 482 /* 483 * Call pre-visit callback. If this doesn't terminate the walk, 484 * continue search. 485 */ 486 if ((gip->gi_ret = gip->gi_pre(v, gip->gi_arg)) == UU_WALK_NEXT) { 487 /* 488 * Recurse using appropriate list. 489 */ 490 if (gip->gi_dir == WALK_DEPENDENTS) 491 list = v->gv_dependents; 492 else 493 list = v->gv_dependencies; 494 495 r = uu_list_walk(list, (uu_walk_fn_t *)graph_walk_recurse, 496 gip, 0); 497 assert(r == 0); 498 } 499 500 /* 501 * Callbacks must return either UU_WALK_NEXT or UU_WALK_DONE. 502 */ 503 assert(gip->gi_ret == UU_WALK_NEXT || gip->gi_ret == UU_WALK_DONE); 504 505 /* 506 * If given a post-callback, call the function for every vertex. 507 */ 508 if (gip->gi_post != NULL) 509 (void) gip->gi_post(v, gip->gi_arg); 510 511 /* 512 * Preserve the callback's return value. If the callback returns 513 * UU_WALK_DONE, then we propagate that to the caller in order to 514 * terminate the walk. 515 */ 516 return (gip->gi_ret); 517 } 518 519 static void 520 graph_walk(graph_vertex_t *v, graph_walk_dir_t dir, 521 int (*pre)(graph_vertex_t *, void *), 522 void (*post)(graph_vertex_t *, void *), void *arg) 523 { 524 graph_walk_info_t gi; 525 graph_edge_t fake; 526 size_t sz = dictionary->dict_new_id / 8 + 1; 527 528 gi.gi_visited = startd_zalloc(sz); 529 gi.gi_pre = pre; 530 gi.gi_post = post; 531 gi.gi_arg = arg; 532 gi.gi_dir = dir; 533 gi.gi_ret = 0; 534 535 /* 536 * Fake up an edge for the first iteration 537 */ 538 fake.ge_vertex = v; 539 (void) graph_walk_recurse(&fake, &gi); 540 541 startd_free(gi.gi_visited, sz); 542 } 543 544 typedef struct child_search { 545 int id; /* id of vertex to look for */ 546 uint_t depth; /* recursion depth */ 547 /* 548 * While the vertex is not found, path is NULL. After the search, if 549 * the vertex was found then path should point to a -1-terminated 550 * array of vertex id's which constitute the path to the vertex. 551 */ 552 int *path; 553 } child_search_t; 554 555 static int 556 child_pre(graph_vertex_t *v, void *arg) 557 { 558 child_search_t *cs = arg; 559 560 cs->depth++; 561 562 if (v->gv_id == cs->id) { 563 cs->path = startd_alloc((cs->depth + 1) * sizeof (int)); 564 cs->path[cs->depth] = -1; 565 return (UU_WALK_DONE); 566 } 567 568 return (UU_WALK_NEXT); 569 } 570 571 static void 572 child_post(graph_vertex_t *v, void *arg) 573 { 574 child_search_t *cs = arg; 575 576 cs->depth--; 577 578 if (cs->path != NULL) 579 cs->path[cs->depth] = v->gv_id; 580 } 581 582 /* 583 * Look for a path from from to to. If one exists, returns a pointer to 584 * a NULL-terminated array of pointers to the vertices along the path. If 585 * there is no path, returns NULL. 586 */ 587 static int * 588 is_path_to(graph_vertex_t *from, graph_vertex_t *to) 589 { 590 child_search_t cs; 591 592 cs.id = to->gv_id; 593 cs.depth = 0; 594 cs.path = NULL; 595 596 graph_walk(from, WALK_DEPENDENCIES, child_pre, child_post, &cs); 597 598 return (cs.path); 599 } 600 601 /* 602 * Given an array of int's as returned by is_path_to, allocates a string of 603 * their names joined by newlines. Returns the size of the allocated buffer 604 * in *sz and frees path. 605 */ 606 static void 607 path_to_str(int *path, char **cpp, size_t *sz) 608 { 609 int i; 610 graph_vertex_t *v; 611 size_t allocd, new_allocd; 612 char *new, *name; 613 614 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 615 assert(path[0] != -1); 616 617 allocd = 1; 618 *cpp = startd_alloc(1); 619 (*cpp)[0] = '\0'; 620 621 for (i = 0; path[i] != -1; ++i) { 622 name = NULL; 623 624 v = vertex_get_by_id(path[i]); 625 626 if (v == NULL) 627 name = "<deleted>"; 628 else if (v->gv_type == GVT_INST || v->gv_type == GVT_SVC) 629 name = v->gv_name; 630 631 if (name != NULL) { 632 new_allocd = allocd + strlen(name) + 1; 633 new = startd_alloc(new_allocd); 634 (void) strcpy(new, *cpp); 635 (void) strcat(new, name); 636 (void) strcat(new, "\n"); 637 638 startd_free(*cpp, allocd); 639 640 *cpp = new; 641 allocd = new_allocd; 642 } 643 } 644 645 startd_free(path, sizeof (int) * (i + 1)); 646 647 *sz = allocd; 648 } 649 650 651 /* 652 * This function along with run_sulogin() implements an exclusion relationship 653 * between system/console-login and sulogin. run_sulogin() will fail if 654 * system/console-login is online, and the graph engine should call 655 * graph_clogin_start() to bring system/console-login online, which defers the 656 * start if sulogin is running. 657 */ 658 static void 659 graph_clogin_start(graph_vertex_t *v) 660 { 661 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 662 663 if (sulogin_running) 664 console_login_ready = B_TRUE; 665 else 666 vertex_send_event(v, RESTARTER_EVENT_TYPE_START); 667 } 668 669 static void 670 graph_su_start(graph_vertex_t *v) 671 { 672 /* 673 * /etc/inittab used to have the initial /sbin/rcS as a 'sysinit' 674 * entry with a runlevel of 'S', before jumping to the final 675 * target runlevel (as set in initdefault). We mimic that legacy 676 * behavior here. 677 */ 678 utmpx_set_runlevel('S', '0', B_FALSE); 679 vertex_send_event(v, RESTARTER_EVENT_TYPE_START); 680 } 681 682 static void 683 graph_post_su_online(void) 684 { 685 graph_runlevel_changed('S', 1); 686 } 687 688 static void 689 graph_post_su_disable(void) 690 { 691 graph_runlevel_changed('S', 0); 692 } 693 694 static void 695 graph_post_mu_online(void) 696 { 697 graph_runlevel_changed('2', 1); 698 } 699 700 static void 701 graph_post_mu_disable(void) 702 { 703 graph_runlevel_changed('2', 0); 704 } 705 706 static void 707 graph_post_mus_online(void) 708 { 709 graph_runlevel_changed('3', 1); 710 } 711 712 static void 713 graph_post_mus_disable(void) 714 { 715 graph_runlevel_changed('3', 0); 716 } 717 718 static struct special_vertex_info { 719 const char *name; 720 void (*start_f)(graph_vertex_t *); 721 void (*post_online_f)(void); 722 void (*post_disable_f)(void); 723 } special_vertices[] = { 724 { CONSOLE_LOGIN_FMRI, graph_clogin_start, NULL, NULL }, 725 { SCF_MILESTONE_SINGLE_USER, graph_su_start, 726 graph_post_su_online, graph_post_su_disable }, 727 { SCF_MILESTONE_MULTI_USER, NULL, 728 graph_post_mu_online, graph_post_mu_disable }, 729 { SCF_MILESTONE_MULTI_USER_SERVER, NULL, 730 graph_post_mus_online, graph_post_mus_disable }, 731 { NULL }, 732 }; 733 734 735 void 736 vertex_send_event(graph_vertex_t *v, restarter_event_type_t e) 737 { 738 switch (e) { 739 case RESTARTER_EVENT_TYPE_ADD_INSTANCE: 740 assert(v->gv_state == RESTARTER_STATE_UNINIT); 741 742 MUTEX_LOCK(&st->st_load_lock); 743 st->st_load_instances++; 744 MUTEX_UNLOCK(&st->st_load_lock); 745 break; 746 747 case RESTARTER_EVENT_TYPE_ENABLE: 748 log_framework(LOG_DEBUG, "Enabling %s.\n", v->gv_name); 749 assert(v->gv_state == RESTARTER_STATE_UNINIT || 750 v->gv_state == RESTARTER_STATE_DISABLED || 751 v->gv_state == RESTARTER_STATE_MAINT); 752 break; 753 754 case RESTARTER_EVENT_TYPE_DISABLE: 755 case RESTARTER_EVENT_TYPE_ADMIN_DISABLE: 756 log_framework(LOG_DEBUG, "Disabling %s.\n", v->gv_name); 757 assert(v->gv_state != RESTARTER_STATE_DISABLED); 758 break; 759 760 case RESTARTER_EVENT_TYPE_STOP: 761 log_framework(LOG_DEBUG, "Stopping %s.\n", v->gv_name); 762 assert(v->gv_state == RESTARTER_STATE_DEGRADED || 763 v->gv_state == RESTARTER_STATE_ONLINE); 764 break; 765 766 case RESTARTER_EVENT_TYPE_START: 767 log_framework(LOG_DEBUG, "Starting %s.\n", v->gv_name); 768 assert(v->gv_state == RESTARTER_STATE_OFFLINE); 769 break; 770 771 case RESTARTER_EVENT_TYPE_REMOVE_INSTANCE: 772 case RESTARTER_EVENT_TYPE_ADMIN_DEGRADED: 773 case RESTARTER_EVENT_TYPE_ADMIN_REFRESH: 774 case RESTARTER_EVENT_TYPE_ADMIN_RESTART: 775 case RESTARTER_EVENT_TYPE_ADMIN_MAINT_OFF: 776 case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON: 777 case RESTARTER_EVENT_TYPE_ADMIN_MAINT_ON_IMMEDIATE: 778 case RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE: 779 case RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY: 780 break; 781 782 default: 783 #ifndef NDEBUG 784 uu_warn("%s:%d: Bad event %d.\n", __FILE__, __LINE__, e); 785 #endif 786 abort(); 787 } 788 789 restarter_protocol_send_event(v->gv_name, v->gv_restarter_channel, e); 790 } 791 792 static void 793 graph_unset_restarter(graph_vertex_t *v) 794 { 795 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 796 assert(v->gv_flags & GV_CONFIGURED); 797 798 vertex_send_event(v, RESTARTER_EVENT_TYPE_REMOVE_INSTANCE); 799 800 if (v->gv_restarter_id != -1) { 801 graph_vertex_t *rv; 802 803 rv = vertex_get_by_id(v->gv_restarter_id); 804 graph_remove_edge(v, rv); 805 } 806 807 v->gv_restarter_id = -1; 808 v->gv_restarter_channel = NULL; 809 } 810 811 static void 812 delete_depgroup(graph_vertex_t *v) 813 { 814 graph_edge_t *e; 815 graph_vertex_t *dv; 816 817 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 818 assert(v->gv_type == GVT_GROUP); 819 assert(uu_list_numnodes(v->gv_dependents) == 0); 820 821 while ((e = uu_list_first(v->gv_dependencies)) != NULL) { 822 dv = e->ge_vertex; 823 824 graph_remove_edge(v, dv); 825 826 switch (dv->gv_type) { 827 case GVT_INST: /* instance dependency */ 828 break; 829 830 case GVT_SVC: /* service dependency */ 831 if (uu_list_numnodes(dv->gv_dependents) == 0 && 832 uu_list_numnodes(dv->gv_dependencies) == 0) 833 graph_remove_vertex(dv); 834 break; 835 836 case GVT_FILE: /* file dependency */ 837 assert(uu_list_numnodes(dv->gv_dependencies) == 0); 838 if (uu_list_numnodes(dv->gv_dependents) == 0) 839 graph_remove_vertex(dv); 840 break; 841 842 default: 843 #ifndef NDEBUG 844 uu_warn("%s:%d: Unexpected node type %d", __FILE__, 845 __LINE__, dv->gv_type); 846 #endif 847 abort(); 848 } 849 } 850 851 graph_remove_vertex(v); 852 } 853 854 static int 855 delete_instance_deps_cb(graph_edge_t *e, void **ptrs) 856 { 857 graph_vertex_t *v = ptrs[0]; 858 boolean_t delete_restarter_dep = (boolean_t)ptrs[1]; 859 graph_vertex_t *dv; 860 861 dv = e->ge_vertex; 862 863 /* 864 * We have four possibilities here: 865 * - GVT_INST: restarter 866 * - GVT_GROUP - GVT_INST: instance dependency 867 * - GVT_GROUP - GVT_SVC - GV_INST: service dependency 868 * - GVT_GROUP - GVT_FILE: file dependency 869 */ 870 switch (dv->gv_type) { 871 case GVT_INST: /* restarter */ 872 assert(dv->gv_id == v->gv_restarter_id); 873 if (delete_restarter_dep) 874 graph_remove_edge(v, dv); 875 break; 876 877 case GVT_GROUP: /* pg dependency */ 878 graph_remove_edge(v, dv); 879 delete_depgroup(dv); 880 break; 881 882 case GVT_FILE: 883 /* These are currently not direct dependencies */ 884 885 default: 886 #ifndef NDEBUG 887 uu_warn("%s:%d: Bad vertex type %d.\n", __FILE__, __LINE__, 888 dv->gv_type); 889 #endif 890 abort(); 891 } 892 893 return (UU_WALK_NEXT); 894 } 895 896 static void 897 delete_instance_dependencies(graph_vertex_t *v, boolean_t delete_restarter_dep) 898 { 899 void *ptrs[2]; 900 int r; 901 902 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 903 assert(v->gv_type == GVT_INST); 904 905 ptrs[0] = v; 906 ptrs[1] = (void *)delete_restarter_dep; 907 908 r = uu_list_walk(v->gv_dependencies, 909 (uu_walk_fn_t *)delete_instance_deps_cb, &ptrs, UU_WALK_ROBUST); 910 assert(r == 0); 911 } 912 913 /* 914 * int graph_insert_vertex_unconfigured() 915 * Insert a vertex without sending any restarter events. If the vertex 916 * already exists or creation is successful, return a pointer to it in *vp. 917 * 918 * If type is not GVT_GROUP, dt can remain unset. 919 * 920 * Returns 0, EEXIST, or EINVAL if the arguments are invalid (i.e., fmri 921 * doesn't agree with type, or type doesn't agree with dt). 922 */ 923 static int 924 graph_insert_vertex_unconfigured(const char *fmri, gv_type_t type, 925 depgroup_type_t dt, restarter_error_t rt, graph_vertex_t **vp) 926 { 927 int r; 928 int i; 929 930 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 931 932 switch (type) { 933 case GVT_SVC: 934 case GVT_INST: 935 if (strncmp(fmri, "svc:", sizeof ("svc:") - 1) != 0) 936 return (EINVAL); 937 break; 938 939 case GVT_FILE: 940 if (strncmp(fmri, "file:", sizeof ("file:") - 1) != 0) 941 return (EINVAL); 942 break; 943 944 case GVT_GROUP: 945 if (dt <= 0 || rt < 0) 946 return (EINVAL); 947 break; 948 949 default: 950 #ifndef NDEBUG 951 uu_warn("%s:%d: Unknown type %d.\n", __FILE__, __LINE__, type); 952 #endif 953 abort(); 954 } 955 956 *vp = vertex_get_by_name(fmri); 957 if (*vp != NULL) 958 return (EEXIST); 959 960 *vp = graph_add_vertex(fmri); 961 962 (*vp)->gv_type = type; 963 (*vp)->gv_depgroup = dt; 964 (*vp)->gv_restart = rt; 965 966 (*vp)->gv_flags = 0; 967 (*vp)->gv_state = RESTARTER_STATE_NONE; 968 969 for (i = 0; special_vertices[i].name != NULL; ++i) { 970 if (strcmp(fmri, special_vertices[i].name) == 0) { 971 (*vp)->gv_start_f = special_vertices[i].start_f; 972 (*vp)->gv_post_online_f = 973 special_vertices[i].post_online_f; 974 (*vp)->gv_post_disable_f = 975 special_vertices[i].post_disable_f; 976 break; 977 } 978 } 979 980 (*vp)->gv_restarter_id = -1; 981 (*vp)->gv_restarter_channel = 0; 982 983 if (type == GVT_INST) { 984 char *sfmri; 985 graph_vertex_t *sv; 986 987 sfmri = inst_fmri_to_svc_fmri(fmri); 988 sv = vertex_get_by_name(sfmri); 989 if (sv == NULL) { 990 r = graph_insert_vertex_unconfigured(sfmri, GVT_SVC, 0, 991 0, &sv); 992 assert(r == 0); 993 } 994 startd_free(sfmri, max_scf_fmri_size); 995 996 graph_add_edge(sv, *vp); 997 } 998 999 /* 1000 * If this vertex is in the subgraph, mark it as so, for both 1001 * GVT_INST and GVT_SERVICE verteces. 1002 * A GVT_SERVICE vertex can only be in the subgraph if another instance 1003 * depends on it, in which case it's already been added to the graph 1004 * and marked as in the subgraph (by refresh_vertex()). If a 1005 * GVT_SERVICE vertex was freshly added (by the code above), it means 1006 * that it has no dependents, and cannot be in the subgraph. 1007 * Regardless of this, we still check that gv_flags includes 1008 * GV_INSUBGRAPH in the event that future behavior causes the above 1009 * code to add a GVT_SERVICE vertex which should be in the subgraph. 1010 */ 1011 1012 (*vp)->gv_flags |= (should_be_in_subgraph(*vp)? GV_INSUBGRAPH : 0); 1013 1014 return (0); 1015 } 1016 1017 /* 1018 * Returns 0 on success or ELOOP if the dependency would create a cycle. 1019 */ 1020 static int 1021 graph_insert_dependency(graph_vertex_t *fv, graph_vertex_t *tv, int **pathp) 1022 { 1023 hrtime_t now; 1024 1025 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1026 1027 /* cycle detection */ 1028 now = gethrtime(); 1029 1030 /* Don't follow exclusions. */ 1031 if (!(fv->gv_type == GVT_GROUP && 1032 fv->gv_depgroup == DEPGRP_EXCLUDE_ALL)) { 1033 *pathp = is_path_to(tv, fv); 1034 if (*pathp) 1035 return (ELOOP); 1036 } 1037 1038 dep_cycle_ns += gethrtime() - now; 1039 ++dep_inserts; 1040 now = gethrtime(); 1041 1042 graph_add_edge(fv, tv); 1043 1044 dep_insert_ns += gethrtime() - now; 1045 1046 /* Check if the dependency adds the "to" vertex to the subgraph */ 1047 tv->gv_flags |= (should_be_in_subgraph(tv) ? GV_INSUBGRAPH : 0); 1048 1049 return (0); 1050 } 1051 1052 static int 1053 inst_running(graph_vertex_t *v) 1054 { 1055 assert(v->gv_type == GVT_INST); 1056 1057 if (v->gv_state == RESTARTER_STATE_ONLINE || 1058 v->gv_state == RESTARTER_STATE_DEGRADED) 1059 return (1); 1060 1061 return (0); 1062 } 1063 1064 /* 1065 * The dependency evaluation functions return 1066 * 1 - dependency satisfied 1067 * 0 - dependency unsatisfied 1068 * -1 - dependency unsatisfiable (without administrator intervention) 1069 * 1070 * The functions also take a boolean satbility argument. When true, the 1071 * functions may recurse in order to determine satisfiability. 1072 */ 1073 static int require_any_satisfied(graph_vertex_t *, boolean_t); 1074 static int dependency_satisfied(graph_vertex_t *, boolean_t); 1075 1076 /* 1077 * A require_all dependency is unsatisfied if any elements are unsatisfied. It 1078 * is unsatisfiable if any elements are unsatisfiable. 1079 */ 1080 static int 1081 require_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1082 { 1083 graph_edge_t *edge; 1084 int i; 1085 boolean_t any_unsatisfied; 1086 1087 if (uu_list_numnodes(groupv->gv_dependencies) == 0) 1088 return (1); 1089 1090 any_unsatisfied = B_FALSE; 1091 1092 for (edge = uu_list_first(groupv->gv_dependencies); 1093 edge != NULL; 1094 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1095 i = dependency_satisfied(edge->ge_vertex, satbility); 1096 if (i == 1) 1097 continue; 1098 1099 log_framework(LOG_DEBUG, 1100 "require_all(%s): %s is unsatisfi%s.\n", groupv->gv_name, 1101 edge->ge_vertex->gv_name, i == 0 ? "ed" : "able"); 1102 1103 if (!satbility) 1104 return (0); 1105 1106 if (i == -1) 1107 return (-1); 1108 1109 any_unsatisfied = B_TRUE; 1110 } 1111 1112 return (any_unsatisfied ? 0 : 1); 1113 } 1114 1115 /* 1116 * A require_any dependency is satisfied if any element is satisfied. It is 1117 * satisfiable if any element is satisfiable. 1118 */ 1119 static int 1120 require_any_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1121 { 1122 graph_edge_t *edge; 1123 int s; 1124 boolean_t satisfiable; 1125 1126 if (uu_list_numnodes(groupv->gv_dependencies) == 0) 1127 return (1); 1128 1129 satisfiable = B_FALSE; 1130 1131 for (edge = uu_list_first(groupv->gv_dependencies); 1132 edge != NULL; 1133 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1134 s = dependency_satisfied(edge->ge_vertex, satbility); 1135 1136 if (s == 1) 1137 return (1); 1138 1139 log_framework(LOG_DEBUG, 1140 "require_any(%s): %s is unsatisfi%s.\n", 1141 groupv->gv_name, edge->ge_vertex->gv_name, 1142 s == 0 ? "ed" : "able"); 1143 1144 if (satbility && s == 0) 1145 satisfiable = B_TRUE; 1146 } 1147 1148 return (!satbility || satisfiable ? 0 : -1); 1149 } 1150 1151 /* 1152 * An optional_all dependency only considers elements which are configured, 1153 * enabled, and not in maintenance. If any are unsatisfied, then the dependency 1154 * is unsatisfied. 1155 * 1156 * Offline dependencies which are waiting for a dependency to come online are 1157 * unsatisfied. Offline dependences which cannot possibly come online 1158 * (unsatisfiable) are always considered satisfied. 1159 */ 1160 static int 1161 optional_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1162 { 1163 graph_edge_t *edge; 1164 graph_vertex_t *v; 1165 boolean_t any_qualified; 1166 boolean_t any_unsatisfied; 1167 int i; 1168 1169 any_qualified = B_FALSE; 1170 any_unsatisfied = B_FALSE; 1171 1172 for (edge = uu_list_first(groupv->gv_dependencies); 1173 edge != NULL; 1174 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1175 v = edge->ge_vertex; 1176 1177 switch (v->gv_type) { 1178 case GVT_INST: 1179 /* Skip missing or disabled instances */ 1180 if ((v->gv_flags & (GV_CONFIGURED | GV_ENABLED)) != 1181 (GV_CONFIGURED | GV_ENABLED)) 1182 continue; 1183 1184 if (v->gv_state == RESTARTER_STATE_MAINT) 1185 continue; 1186 1187 any_qualified = B_TRUE; 1188 if (v->gv_state == RESTARTER_STATE_OFFLINE) { 1189 /* 1190 * For offline dependencies, treat unsatisfiable 1191 * as satisfied. 1192 */ 1193 i = dependency_satisfied(v, B_TRUE); 1194 if (i == -1) 1195 i = 1; 1196 } else if (v->gv_state == RESTARTER_STATE_DISABLED) { 1197 /* 1198 * The service is enabled, but hasn't 1199 * transitioned out of disabled yet. Treat it 1200 * as unsatisfied (not unsatisfiable). 1201 */ 1202 i = 0; 1203 } else { 1204 i = dependency_satisfied(v, satbility); 1205 } 1206 break; 1207 1208 case GVT_FILE: 1209 any_qualified = B_TRUE; 1210 i = dependency_satisfied(v, satbility); 1211 1212 break; 1213 1214 case GVT_SVC: { 1215 boolean_t svc_any_qualified; 1216 boolean_t svc_satisfied; 1217 boolean_t svc_satisfiable; 1218 graph_vertex_t *v2; 1219 graph_edge_t *e2; 1220 1221 svc_any_qualified = B_FALSE; 1222 svc_satisfied = B_FALSE; 1223 svc_satisfiable = B_FALSE; 1224 1225 for (e2 = uu_list_first(v->gv_dependencies); 1226 e2 != NULL; 1227 e2 = uu_list_next(v->gv_dependencies, e2)) { 1228 v2 = e2->ge_vertex; 1229 assert(v2->gv_type == GVT_INST); 1230 1231 if ((v2->gv_flags & 1232 (GV_CONFIGURED | GV_ENABLED)) != 1233 (GV_CONFIGURED | GV_ENABLED)) 1234 continue; 1235 1236 if (v2->gv_state == RESTARTER_STATE_MAINT) 1237 continue; 1238 1239 svc_any_qualified = B_TRUE; 1240 1241 if (v2->gv_state == RESTARTER_STATE_OFFLINE) { 1242 /* 1243 * For offline dependencies, treat 1244 * unsatisfiable as satisfied. 1245 */ 1246 i = dependency_satisfied(v2, B_TRUE); 1247 if (i == -1) 1248 i = 1; 1249 } else if (v2->gv_state == 1250 RESTARTER_STATE_DISABLED) { 1251 i = 0; 1252 } else { 1253 i = dependency_satisfied(v2, satbility); 1254 } 1255 1256 if (i == 1) { 1257 svc_satisfied = B_TRUE; 1258 break; 1259 } 1260 if (i == 0) 1261 svc_satisfiable = B_TRUE; 1262 } 1263 1264 if (!svc_any_qualified) 1265 continue; 1266 any_qualified = B_TRUE; 1267 if (svc_satisfied) { 1268 i = 1; 1269 } else if (svc_satisfiable) { 1270 i = 0; 1271 } else { 1272 i = -1; 1273 } 1274 break; 1275 } 1276 1277 case GVT_GROUP: 1278 default: 1279 #ifndef NDEBUG 1280 uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, 1281 __LINE__, v->gv_type); 1282 #endif 1283 abort(); 1284 } 1285 1286 if (i == 1) 1287 continue; 1288 1289 log_framework(LOG_DEBUG, 1290 "optional_all(%s): %s is unsatisfi%s.\n", groupv->gv_name, 1291 v->gv_name, i == 0 ? "ed" : "able"); 1292 1293 if (!satbility) 1294 return (0); 1295 if (i == -1) 1296 return (-1); 1297 any_unsatisfied = B_TRUE; 1298 } 1299 1300 if (!any_qualified) 1301 return (1); 1302 1303 return (any_unsatisfied ? 0 : 1); 1304 } 1305 1306 /* 1307 * An exclude_all dependency is unsatisfied if any non-service element is 1308 * satisfied or any service instance which is configured, enabled, and not in 1309 * maintenance is satisfied. Usually when unsatisfied, it is also 1310 * unsatisfiable. 1311 */ 1312 #define LOG_EXCLUDE(u, v) \ 1313 log_framework(LOG_DEBUG, "exclude_all(%s): %s is satisfied.\n", \ 1314 (u)->gv_name, (v)->gv_name) 1315 1316 /* ARGSUSED */ 1317 static int 1318 exclude_all_satisfied(graph_vertex_t *groupv, boolean_t satbility) 1319 { 1320 graph_edge_t *edge, *e2; 1321 graph_vertex_t *v, *v2; 1322 1323 for (edge = uu_list_first(groupv->gv_dependencies); 1324 edge != NULL; 1325 edge = uu_list_next(groupv->gv_dependencies, edge)) { 1326 v = edge->ge_vertex; 1327 1328 switch (v->gv_type) { 1329 case GVT_INST: 1330 if ((v->gv_flags & GV_CONFIGURED) == 0) 1331 continue; 1332 1333 switch (v->gv_state) { 1334 case RESTARTER_STATE_ONLINE: 1335 case RESTARTER_STATE_DEGRADED: 1336 LOG_EXCLUDE(groupv, v); 1337 return (v->gv_flags & GV_ENABLED ? -1 : 0); 1338 1339 case RESTARTER_STATE_OFFLINE: 1340 case RESTARTER_STATE_UNINIT: 1341 LOG_EXCLUDE(groupv, v); 1342 return (0); 1343 1344 case RESTARTER_STATE_DISABLED: 1345 case RESTARTER_STATE_MAINT: 1346 continue; 1347 1348 default: 1349 #ifndef NDEBUG 1350 uu_warn("%s:%d: Unexpected vertex state %d.\n", 1351 __FILE__, __LINE__, v->gv_state); 1352 #endif 1353 abort(); 1354 } 1355 /* NOTREACHED */ 1356 1357 case GVT_SVC: 1358 break; 1359 1360 case GVT_FILE: 1361 if (!file_ready(v)) 1362 continue; 1363 LOG_EXCLUDE(groupv, v); 1364 return (-1); 1365 1366 case GVT_GROUP: 1367 default: 1368 #ifndef NDEBUG 1369 uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, 1370 __LINE__, v->gv_type); 1371 #endif 1372 abort(); 1373 } 1374 1375 /* v represents a service */ 1376 if (uu_list_numnodes(v->gv_dependencies) == 0) 1377 continue; 1378 1379 for (e2 = uu_list_first(v->gv_dependencies); 1380 e2 != NULL; 1381 e2 = uu_list_next(v->gv_dependencies, e2)) { 1382 v2 = e2->ge_vertex; 1383 assert(v2->gv_type == GVT_INST); 1384 1385 if ((v2->gv_flags & GV_CONFIGURED) == 0) 1386 continue; 1387 1388 switch (v2->gv_state) { 1389 case RESTARTER_STATE_ONLINE: 1390 case RESTARTER_STATE_DEGRADED: 1391 LOG_EXCLUDE(groupv, v2); 1392 return (v2->gv_flags & GV_ENABLED ? -1 : 0); 1393 1394 case RESTARTER_STATE_OFFLINE: 1395 case RESTARTER_STATE_UNINIT: 1396 LOG_EXCLUDE(groupv, v2); 1397 return (0); 1398 1399 case RESTARTER_STATE_DISABLED: 1400 case RESTARTER_STATE_MAINT: 1401 continue; 1402 1403 default: 1404 #ifndef NDEBUG 1405 uu_warn("%s:%d: Unexpected vertex type %d.\n", 1406 __FILE__, __LINE__, v2->gv_type); 1407 #endif 1408 abort(); 1409 } 1410 } 1411 } 1412 1413 return (1); 1414 } 1415 1416 /* 1417 * int instance_satisfied() 1418 * Determine if all the dependencies are satisfied for the supplied instance 1419 * vertex. Return 1 if they are, 0 if they aren't, and -1 if they won't be 1420 * without administrator intervention. 1421 */ 1422 static int 1423 instance_satisfied(graph_vertex_t *v, boolean_t satbility) 1424 { 1425 assert(v->gv_type == GVT_INST); 1426 assert(!inst_running(v)); 1427 1428 return (require_all_satisfied(v, satbility)); 1429 } 1430 1431 /* 1432 * Decide whether v can satisfy a dependency. v can either be a child of 1433 * a group vertex, or of an instance vertex. 1434 */ 1435 static int 1436 dependency_satisfied(graph_vertex_t *v, boolean_t satbility) 1437 { 1438 switch (v->gv_type) { 1439 case GVT_INST: 1440 if ((v->gv_flags & GV_CONFIGURED) == 0) 1441 return (-1); 1442 1443 switch (v->gv_state) { 1444 case RESTARTER_STATE_ONLINE: 1445 case RESTARTER_STATE_DEGRADED: 1446 return (1); 1447 1448 case RESTARTER_STATE_OFFLINE: 1449 if (!satbility) 1450 return (0); 1451 return (instance_satisfied(v, satbility) != -1 ? 1452 0 : -1); 1453 1454 case RESTARTER_STATE_DISABLED: 1455 case RESTARTER_STATE_MAINT: 1456 return (-1); 1457 1458 case RESTARTER_STATE_UNINIT: 1459 return (0); 1460 1461 default: 1462 #ifndef NDEBUG 1463 uu_warn("%s:%d: Unexpected vertex state %d.\n", 1464 __FILE__, __LINE__, v->gv_state); 1465 #endif 1466 abort(); 1467 /* NOTREACHED */ 1468 } 1469 1470 case GVT_SVC: 1471 if (uu_list_numnodes(v->gv_dependencies) == 0) 1472 return (-1); 1473 return (require_any_satisfied(v, satbility)); 1474 1475 case GVT_FILE: 1476 /* i.e., we assume files will not be automatically generated */ 1477 return (file_ready(v) ? 1 : -1); 1478 1479 case GVT_GROUP: 1480 break; 1481 1482 default: 1483 #ifndef NDEBUG 1484 uu_warn("%s:%d: Unexpected node type %d.\n", __FILE__, __LINE__, 1485 v->gv_type); 1486 #endif 1487 abort(); 1488 /* NOTREACHED */ 1489 } 1490 1491 switch (v->gv_depgroup) { 1492 case DEPGRP_REQUIRE_ANY: 1493 return (require_any_satisfied(v, satbility)); 1494 1495 case DEPGRP_REQUIRE_ALL: 1496 return (require_all_satisfied(v, satbility)); 1497 1498 case DEPGRP_OPTIONAL_ALL: 1499 return (optional_all_satisfied(v, satbility)); 1500 1501 case DEPGRP_EXCLUDE_ALL: 1502 return (exclude_all_satisfied(v, satbility)); 1503 1504 default: 1505 #ifndef NDEBUG 1506 uu_warn("%s:%d: Unknown dependency grouping %d.\n", __FILE__, 1507 __LINE__, v->gv_depgroup); 1508 #endif 1509 abort(); 1510 } 1511 } 1512 1513 static void 1514 start_if_satisfied(graph_vertex_t *v) 1515 { 1516 if (v->gv_state == RESTARTER_STATE_OFFLINE && 1517 instance_satisfied(v, B_FALSE) == 1) { 1518 if (v->gv_start_f == NULL) 1519 vertex_send_event(v, RESTARTER_EVENT_TYPE_START); 1520 else 1521 v->gv_start_f(v); 1522 } 1523 } 1524 1525 /* 1526 * propagate_satbility() 1527 * 1528 * This function is used when the given vertex changes state in such a way that 1529 * one of its dependents may become unsatisfiable. This happens when an 1530 * instance transitions between offline -> online, or from !running -> 1531 * maintenance, as well as when an instance is removed from the graph. 1532 * 1533 * We have to walk the all dependents, since optional_all dependencies several 1534 * levels up could become (un)satisfied, instead of unsatisfiable. For example, 1535 * 1536 * +-----+ optional_all +-----+ require_all +-----+ 1537 * | A |--------------->| B |-------------->| C | 1538 * +-----+ +-----+ +-----+ 1539 * 1540 * offline -> maintenance 1541 * 1542 * If C goes into maintenance, it's not enough simply to check B. Because A has 1543 * an optional dependency, what was previously an unsatisfiable situation is now 1544 * satisfied (B will never come online, even though its state hasn't changed). 1545 * 1546 * Note that it's not necessary to continue examining dependents after reaching 1547 * an optional_all dependency. It's not possible for an optional_all dependency 1548 * to change satisfiability without also coming online, in which case we get a 1549 * start event and propagation continues naturally. However, it does no harm to 1550 * continue propagating satisfiability (as it is a relatively rare event), and 1551 * keeps the walker code simple and generic. 1552 */ 1553 /*ARGSUSED*/ 1554 static int 1555 satbility_cb(graph_vertex_t *v, void *arg) 1556 { 1557 if (v->gv_type == GVT_INST) 1558 start_if_satisfied(v); 1559 1560 return (UU_WALK_NEXT); 1561 } 1562 1563 static void 1564 propagate_satbility(graph_vertex_t *v) 1565 { 1566 graph_walk(v, WALK_DEPENDENTS, satbility_cb, NULL, NULL); 1567 } 1568 1569 static void propagate_stop(graph_vertex_t *, void *); 1570 1571 /* ARGSUSED */ 1572 static void 1573 propagate_start(graph_vertex_t *v, void *arg) 1574 { 1575 switch (v->gv_type) { 1576 case GVT_INST: 1577 start_if_satisfied(v); 1578 break; 1579 1580 case GVT_GROUP: 1581 if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) { 1582 graph_walk_dependents(v, propagate_stop, 1583 (void *)RERR_RESTART); 1584 break; 1585 } 1586 /* FALLTHROUGH */ 1587 1588 case GVT_SVC: 1589 graph_walk_dependents(v, propagate_start, NULL); 1590 break; 1591 1592 case GVT_FILE: 1593 #ifndef NDEBUG 1594 uu_warn("%s:%d: propagate_start() encountered GVT_FILE.\n", 1595 __FILE__, __LINE__); 1596 #endif 1597 abort(); 1598 /* NOTREACHED */ 1599 1600 default: 1601 #ifndef NDEBUG 1602 uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__, 1603 v->gv_type); 1604 #endif 1605 abort(); 1606 } 1607 } 1608 1609 static void 1610 propagate_stop(graph_vertex_t *v, void *arg) 1611 { 1612 graph_edge_t *e; 1613 graph_vertex_t *svc; 1614 restarter_error_t err = (restarter_error_t)arg; 1615 1616 switch (v->gv_type) { 1617 case GVT_INST: 1618 /* Restarter */ 1619 if (err > RERR_NONE && inst_running(v)) 1620 vertex_send_event(v, RESTARTER_EVENT_TYPE_STOP); 1621 break; 1622 1623 case GVT_SVC: 1624 graph_walk_dependents(v, propagate_stop, arg); 1625 break; 1626 1627 case GVT_FILE: 1628 #ifndef NDEBUG 1629 uu_warn("%s:%d: propagate_stop() encountered GVT_FILE.\n", 1630 __FILE__, __LINE__); 1631 #endif 1632 abort(); 1633 /* NOTREACHED */ 1634 1635 case GVT_GROUP: 1636 if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) { 1637 graph_walk_dependents(v, propagate_start, NULL); 1638 break; 1639 } 1640 1641 if (err == RERR_NONE || err > v->gv_restart) 1642 break; 1643 1644 assert(uu_list_numnodes(v->gv_dependents) == 1); 1645 e = uu_list_first(v->gv_dependents); 1646 svc = e->ge_vertex; 1647 1648 if (inst_running(svc)) 1649 vertex_send_event(svc, RESTARTER_EVENT_TYPE_STOP); 1650 break; 1651 1652 default: 1653 #ifndef NDEBUG 1654 uu_warn("%s:%d: Unknown vertex type %d.\n", __FILE__, __LINE__, 1655 v->gv_type); 1656 #endif 1657 abort(); 1658 } 1659 } 1660 1661 /* 1662 * void graph_enable_by_vertex() 1663 * If admin is non-zero, this is an administrative request for change 1664 * of the enabled property. Thus, send the ADMIN_DISABLE rather than 1665 * a plain DISABLE restarter event. 1666 */ 1667 static void 1668 graph_enable_by_vertex(graph_vertex_t *vertex, int enable, int admin) 1669 { 1670 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1671 assert((vertex->gv_flags & GV_CONFIGURED)); 1672 1673 vertex->gv_flags = (vertex->gv_flags & ~GV_ENABLED) | 1674 (enable ? GV_ENABLED : 0); 1675 1676 if (enable) { 1677 if (vertex->gv_state != RESTARTER_STATE_OFFLINE && 1678 vertex->gv_state != RESTARTER_STATE_DEGRADED && 1679 vertex->gv_state != RESTARTER_STATE_ONLINE) 1680 vertex_send_event(vertex, RESTARTER_EVENT_TYPE_ENABLE); 1681 } else { 1682 if (vertex->gv_state != RESTARTER_STATE_DISABLED) { 1683 if (admin) 1684 vertex_send_event(vertex, 1685 RESTARTER_EVENT_TYPE_ADMIN_DISABLE); 1686 else 1687 vertex_send_event(vertex, 1688 RESTARTER_EVENT_TYPE_DISABLE); 1689 } 1690 } 1691 1692 /* 1693 * Wait for state update from restarter before sending _START or 1694 * _STOP. 1695 */ 1696 } 1697 1698 static int configure_vertex(graph_vertex_t *, scf_instance_t *); 1699 1700 /* 1701 * Set the restarter for v to fmri_arg. That is, make sure a vertex for 1702 * fmri_arg exists, make v depend on it, and send _ADD_INSTANCE for v. If 1703 * v is already configured and fmri_arg indicates the current restarter, do 1704 * nothing. If v is configured and fmri_arg is a new restarter, delete v's 1705 * dependency on the restarter, send _REMOVE_INSTANCE for v, and set the new 1706 * restarter. Returns 0 on success, EINVAL if the FMRI is invalid, 1707 * ECONNABORTED if the repository connection is broken, and ELOOP 1708 * if the dependency would create a cycle. In the last case, *pathp will 1709 * point to a -1-terminated array of ids which compose the path from v to 1710 * restarter_fmri. 1711 */ 1712 int 1713 graph_change_restarter(graph_vertex_t *v, const char *fmri_arg, scf_handle_t *h, 1714 int **pathp) 1715 { 1716 char *restarter_fmri = NULL; 1717 graph_vertex_t *rv; 1718 int err; 1719 int id; 1720 1721 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 1722 1723 if (fmri_arg[0] != '\0') { 1724 err = fmri_canonify(fmri_arg, &restarter_fmri, B_TRUE); 1725 if (err != 0) { 1726 assert(err == EINVAL); 1727 return (err); 1728 } 1729 } 1730 1731 if (restarter_fmri == NULL || 1732 strcmp(restarter_fmri, SCF_SERVICE_STARTD) == 0) { 1733 if (v->gv_flags & GV_CONFIGURED) { 1734 if (v->gv_restarter_id == -1) { 1735 if (restarter_fmri != NULL) 1736 startd_free(restarter_fmri, 1737 max_scf_fmri_size); 1738 return (0); 1739 } 1740 1741 graph_unset_restarter(v); 1742 } 1743 1744 /* Master restarter, nothing to do. */ 1745 v->gv_restarter_id = -1; 1746 v->gv_restarter_channel = NULL; 1747 vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE); 1748 return (0); 1749 } 1750 1751 if (v->gv_flags & GV_CONFIGURED) { 1752 id = dict_lookup_byname(restarter_fmri); 1753 if (id != -1 && v->gv_restarter_id == id) { 1754 startd_free(restarter_fmri, max_scf_fmri_size); 1755 return (0); 1756 } 1757 1758 graph_unset_restarter(v); 1759 } 1760 1761 err = graph_insert_vertex_unconfigured(restarter_fmri, GVT_INST, 0, 1762 RERR_NONE, &rv); 1763 startd_free(restarter_fmri, max_scf_fmri_size); 1764 assert(err == 0 || err == EEXIST); 1765 1766 if (rv->gv_delegate_initialized == 0) { 1767 rv->gv_delegate_channel = restarter_protocol_init_delegate( 1768 rv->gv_name); 1769 rv->gv_delegate_initialized = 1; 1770 } 1771 v->gv_restarter_id = rv->gv_id; 1772 v->gv_restarter_channel = rv->gv_delegate_channel; 1773 1774 err = graph_insert_dependency(v, rv, pathp); 1775 if (err != 0) { 1776 assert(err == ELOOP); 1777 return (ELOOP); 1778 } 1779 1780 vertex_send_event(v, RESTARTER_EVENT_TYPE_ADD_INSTANCE); 1781 1782 if (!(rv->gv_flags & GV_CONFIGURED)) { 1783 scf_instance_t *inst; 1784 1785 err = libscf_fmri_get_instance(h, rv->gv_name, &inst); 1786 switch (err) { 1787 case 0: 1788 err = configure_vertex(rv, inst); 1789 scf_instance_destroy(inst); 1790 switch (err) { 1791 case 0: 1792 case ECANCELED: 1793 break; 1794 1795 case ECONNABORTED: 1796 return (ECONNABORTED); 1797 1798 default: 1799 bad_error("configure_vertex", err); 1800 } 1801 break; 1802 1803 case ECONNABORTED: 1804 return (ECONNABORTED); 1805 1806 case ENOENT: 1807 break; 1808 1809 case ENOTSUP: 1810 /* 1811 * The fmri doesn't specify an instance - translate 1812 * to EINVAL. 1813 */ 1814 return (EINVAL); 1815 1816 case EINVAL: 1817 default: 1818 bad_error("libscf_fmri_get_instance", err); 1819 } 1820 } 1821 1822 return (0); 1823 } 1824 1825 1826 /* 1827 * Add all of the instances of the service named by fmri to the graph. 1828 * Returns 1829 * 0 - success 1830 * ENOENT - service indicated by fmri does not exist 1831 * 1832 * In both cases *reboundp will be B_TRUE if the handle was rebound, or B_FALSE 1833 * otherwise. 1834 */ 1835 static int 1836 add_service(const char *fmri, scf_handle_t *h, boolean_t *reboundp) 1837 { 1838 scf_service_t *svc; 1839 scf_instance_t *inst; 1840 scf_iter_t *iter; 1841 char *inst_fmri; 1842 int ret, r; 1843 1844 *reboundp = B_FALSE; 1845 1846 svc = safe_scf_service_create(h); 1847 inst = safe_scf_instance_create(h); 1848 iter = safe_scf_iter_create(h); 1849 inst_fmri = startd_alloc(max_scf_fmri_size); 1850 1851 rebound: 1852 if (scf_handle_decode_fmri(h, fmri, NULL, svc, NULL, NULL, NULL, 1853 SCF_DECODE_FMRI_EXACT) != 0) { 1854 switch (scf_error()) { 1855 case SCF_ERROR_CONNECTION_BROKEN: 1856 default: 1857 libscf_handle_rebind(h); 1858 *reboundp = B_TRUE; 1859 goto rebound; 1860 1861 case SCF_ERROR_NOT_FOUND: 1862 ret = ENOENT; 1863 goto out; 1864 1865 case SCF_ERROR_INVALID_ARGUMENT: 1866 case SCF_ERROR_CONSTRAINT_VIOLATED: 1867 case SCF_ERROR_NOT_BOUND: 1868 case SCF_ERROR_HANDLE_MISMATCH: 1869 bad_error("scf_handle_decode_fmri", scf_error()); 1870 } 1871 } 1872 1873 if (scf_iter_service_instances(iter, svc) != 0) { 1874 switch (scf_error()) { 1875 case SCF_ERROR_CONNECTION_BROKEN: 1876 default: 1877 libscf_handle_rebind(h); 1878 *reboundp = B_TRUE; 1879 goto rebound; 1880 1881 case SCF_ERROR_DELETED: 1882 ret = ENOENT; 1883 goto out; 1884 1885 case SCF_ERROR_HANDLE_MISMATCH: 1886 case SCF_ERROR_NOT_BOUND: 1887 case SCF_ERROR_NOT_SET: 1888 bad_error("scf_iter_service_instances", scf_error()) 1889 } 1890 } 1891 1892 for (;;) { 1893 r = scf_iter_next_instance(iter, inst); 1894 if (r == 0) 1895 break; 1896 if (r != 1) { 1897 switch (scf_error()) { 1898 case SCF_ERROR_CONNECTION_BROKEN: 1899 default: 1900 libscf_handle_rebind(h); 1901 *reboundp = B_TRUE; 1902 goto rebound; 1903 1904 case SCF_ERROR_DELETED: 1905 ret = ENOENT; 1906 goto out; 1907 1908 case SCF_ERROR_HANDLE_MISMATCH: 1909 case SCF_ERROR_NOT_BOUND: 1910 case SCF_ERROR_NOT_SET: 1911 case SCF_ERROR_INVALID_ARGUMENT: 1912 bad_error("scf_iter_next_instance", 1913 scf_error()); 1914 } 1915 } 1916 1917 if (scf_instance_to_fmri(inst, inst_fmri, max_scf_fmri_size) < 1918 0) { 1919 switch (scf_error()) { 1920 case SCF_ERROR_CONNECTION_BROKEN: 1921 libscf_handle_rebind(h); 1922 *reboundp = B_TRUE; 1923 goto rebound; 1924 1925 case SCF_ERROR_DELETED: 1926 continue; 1927 1928 case SCF_ERROR_NOT_BOUND: 1929 case SCF_ERROR_NOT_SET: 1930 bad_error("scf_instance_to_fmri", scf_error()); 1931 } 1932 } 1933 1934 r = dgraph_add_instance(inst_fmri, inst, B_FALSE); 1935 switch (r) { 1936 case 0: 1937 case ECANCELED: 1938 break; 1939 1940 case EEXIST: 1941 continue; 1942 1943 case ECONNABORTED: 1944 libscf_handle_rebind(h); 1945 *reboundp = B_TRUE; 1946 goto rebound; 1947 1948 case EINVAL: 1949 default: 1950 bad_error("dgraph_add_instance", r); 1951 } 1952 } 1953 1954 ret = 0; 1955 1956 out: 1957 startd_free(inst_fmri, max_scf_fmri_size); 1958 scf_iter_destroy(iter); 1959 scf_instance_destroy(inst); 1960 scf_service_destroy(svc); 1961 return (ret); 1962 } 1963 1964 struct depfmri_info { 1965 graph_vertex_t *v; /* GVT_GROUP vertex */ 1966 gv_type_t type; /* type of dependency */ 1967 const char *inst_fmri; /* FMRI of parental GVT_INST vert. */ 1968 const char *pg_name; /* Name of dependency pg */ 1969 scf_handle_t *h; 1970 int err; /* return error code */ 1971 int **pathp; /* return circular dependency path */ 1972 }; 1973 1974 /* 1975 * Find or create a vertex for fmri and make info->v depend on it. 1976 * Returns 1977 * 0 - success 1978 * nonzero - failure 1979 * 1980 * On failure, sets info->err to 1981 * EINVAL - fmri is invalid 1982 * fmri does not match info->type 1983 * ELOOP - Adding the dependency creates a circular dependency. *info->pathp 1984 * will point to an array of the ids of the members of the cycle. 1985 * ECONNABORTED - repository connection was broken 1986 * ECONNRESET - succeeded, but repository connection was reset 1987 */ 1988 static int 1989 process_dependency_fmri(const char *fmri, struct depfmri_info *info) 1990 { 1991 int err; 1992 graph_vertex_t *depgroup_v, *v; 1993 char *fmri_copy, *cfmri; 1994 size_t fmri_copy_sz; 1995 const char *scope, *service, *instance, *pg; 1996 scf_instance_t *inst; 1997 boolean_t rebound; 1998 1999 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2000 2001 /* Get or create vertex for FMRI */ 2002 depgroup_v = info->v; 2003 2004 if (strncmp(fmri, "file:", sizeof ("file:") - 1) == 0) { 2005 if (info->type != GVT_FILE) { 2006 log_framework(LOG_NOTICE, 2007 "FMRI \"%s\" is not allowed for the \"%s\" " 2008 "dependency's type of instance %s.\n", fmri, 2009 info->pg_name, info->inst_fmri); 2010 return (info->err = EINVAL); 2011 } 2012 2013 err = graph_insert_vertex_unconfigured(fmri, info->type, 0, 2014 RERR_NONE, &v); 2015 switch (err) { 2016 case 0: 2017 break; 2018 2019 case EEXIST: 2020 assert(v->gv_type == GVT_FILE); 2021 break; 2022 2023 case EINVAL: /* prevented above */ 2024 default: 2025 bad_error("graph_insert_vertex_unconfigured", err); 2026 } 2027 } else { 2028 if (info->type != GVT_INST) { 2029 log_framework(LOG_NOTICE, 2030 "FMRI \"%s\" is not allowed for the \"%s\" " 2031 "dependency's type of instance %s.\n", fmri, 2032 info->pg_name, info->inst_fmri); 2033 return (info->err = EINVAL); 2034 } 2035 2036 /* 2037 * We must canonify fmri & add a vertex for it. 2038 */ 2039 fmri_copy_sz = strlen(fmri) + 1; 2040 fmri_copy = startd_alloc(fmri_copy_sz); 2041 (void) strcpy(fmri_copy, fmri); 2042 2043 /* Determine if the FMRI is a property group or instance */ 2044 if (scf_parse_svc_fmri(fmri_copy, &scope, &service, 2045 &instance, &pg, NULL) != 0) { 2046 startd_free(fmri_copy, fmri_copy_sz); 2047 log_framework(LOG_NOTICE, 2048 "Dependency \"%s\" of %s has invalid FMRI " 2049 "\"%s\".\n", info->pg_name, info->inst_fmri, 2050 fmri); 2051 return (info->err = EINVAL); 2052 } 2053 2054 if (service == NULL || pg != NULL) { 2055 startd_free(fmri_copy, fmri_copy_sz); 2056 log_framework(LOG_NOTICE, 2057 "Dependency \"%s\" of %s does not designate a " 2058 "service or instance.\n", info->pg_name, 2059 info->inst_fmri); 2060 return (info->err = EINVAL); 2061 } 2062 2063 if (scope == NULL || strcmp(scope, SCF_SCOPE_LOCAL) == 0) { 2064 cfmri = uu_msprintf("svc:/%s%s%s", 2065 service, instance ? ":" : "", instance ? instance : 2066 ""); 2067 } else { 2068 cfmri = uu_msprintf("svc://%s/%s%s%s", 2069 scope, service, instance ? ":" : "", instance ? 2070 instance : ""); 2071 } 2072 2073 startd_free(fmri_copy, fmri_copy_sz); 2074 2075 err = graph_insert_vertex_unconfigured(cfmri, instance ? 2076 GVT_INST : GVT_SVC, instance ? 0 : DEPGRP_REQUIRE_ANY, 2077 RERR_NONE, &v); 2078 uu_free(cfmri); 2079 switch (err) { 2080 case 0: 2081 break; 2082 2083 case EEXIST: 2084 /* Verify v. */ 2085 if (instance != NULL) 2086 assert(v->gv_type == GVT_INST); 2087 else 2088 assert(v->gv_type == GVT_SVC); 2089 break; 2090 2091 default: 2092 bad_error("graph_insert_vertex_unconfigured", err); 2093 } 2094 } 2095 2096 /* Add dependency from depgroup_v to new vertex */ 2097 info->err = graph_insert_dependency(depgroup_v, v, info->pathp); 2098 switch (info->err) { 2099 case 0: 2100 break; 2101 2102 case ELOOP: 2103 return (ELOOP); 2104 2105 default: 2106 bad_error("graph_insert_dependency", info->err); 2107 } 2108 2109 /* This must be after we insert the dependency, to avoid looping. */ 2110 switch (v->gv_type) { 2111 case GVT_INST: 2112 if ((v->gv_flags & GV_CONFIGURED) != 0) 2113 break; 2114 2115 inst = safe_scf_instance_create(info->h); 2116 2117 rebound = B_FALSE; 2118 2119 rebound: 2120 err = libscf_lookup_instance(v->gv_name, inst); 2121 switch (err) { 2122 case 0: 2123 err = configure_vertex(v, inst); 2124 switch (err) { 2125 case 0: 2126 case ECANCELED: 2127 break; 2128 2129 case ECONNABORTED: 2130 libscf_handle_rebind(info->h); 2131 rebound = B_TRUE; 2132 goto rebound; 2133 2134 default: 2135 bad_error("configure_vertex", err); 2136 } 2137 break; 2138 2139 case ENOENT: 2140 break; 2141 2142 case ECONNABORTED: 2143 libscf_handle_rebind(info->h); 2144 rebound = B_TRUE; 2145 goto rebound; 2146 2147 case EINVAL: 2148 case ENOTSUP: 2149 default: 2150 bad_error("libscf_fmri_get_instance", err); 2151 } 2152 2153 scf_instance_destroy(inst); 2154 2155 if (rebound) 2156 return (info->err = ECONNRESET); 2157 break; 2158 2159 case GVT_SVC: 2160 (void) add_service(v->gv_name, info->h, &rebound); 2161 if (rebound) 2162 return (info->err = ECONNRESET); 2163 } 2164 2165 return (0); 2166 } 2167 2168 struct deppg_info { 2169 graph_vertex_t *v; /* GVT_INST vertex */ 2170 int err; /* return error */ 2171 int **pathp; /* return circular dependency path */ 2172 }; 2173 2174 /* 2175 * Make info->v depend on a new GVT_GROUP node for this property group, 2176 * and then call process_dependency_fmri() for the values of the entity 2177 * property. Return 0 on success, or if something goes wrong return nonzero 2178 * and set info->err to ECONNABORTED, EINVAL, or the error code returned by 2179 * process_dependency_fmri(). 2180 */ 2181 static int 2182 process_dependency_pg(scf_propertygroup_t *pg, struct deppg_info *info) 2183 { 2184 scf_handle_t *h; 2185 depgroup_type_t deptype; 2186 struct depfmri_info linfo; 2187 char *fmri, *pg_name; 2188 size_t fmri_sz; 2189 graph_vertex_t *depgrp; 2190 scf_property_t *prop; 2191 int err; 2192 int empty; 2193 scf_error_t scferr; 2194 ssize_t len; 2195 2196 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2197 2198 h = scf_pg_handle(pg); 2199 2200 pg_name = startd_alloc(max_scf_name_size); 2201 2202 len = scf_pg_get_name(pg, pg_name, max_scf_name_size); 2203 if (len < 0) { 2204 startd_free(pg_name, max_scf_name_size); 2205 switch (scf_error()) { 2206 case SCF_ERROR_CONNECTION_BROKEN: 2207 default: 2208 return (info->err = ECONNABORTED); 2209 2210 case SCF_ERROR_DELETED: 2211 return (info->err = 0); 2212 2213 case SCF_ERROR_NOT_SET: 2214 bad_error("scf_pg_get_name", scf_error()); 2215 } 2216 } 2217 2218 /* 2219 * Skip over empty dependency groups. Since dependency property 2220 * groups are updated atomically, they are either empty or 2221 * fully populated. 2222 */ 2223 empty = depgroup_empty(h, pg); 2224 if (empty < 0) { 2225 log_error(LOG_INFO, 2226 "Error reading dependency group \"%s\" of %s: %s\n", 2227 pg_name, info->v->gv_name, scf_strerror(scf_error())); 2228 startd_free(pg_name, max_scf_name_size); 2229 return (info->err = EINVAL); 2230 2231 } else if (empty == 1) { 2232 log_framework(LOG_DEBUG, 2233 "Ignoring empty dependency group \"%s\" of %s\n", 2234 pg_name, info->v->gv_name); 2235 startd_free(pg_name, max_scf_name_size); 2236 return (info->err = 0); 2237 } 2238 2239 fmri_sz = strlen(info->v->gv_name) + 1 + len + 1; 2240 fmri = startd_alloc(fmri_sz); 2241 2242 (void) snprintf(fmri, max_scf_name_size, "%s>%s", info->v->gv_name, 2243 pg_name); 2244 2245 /* Validate the pg before modifying the graph */ 2246 deptype = depgroup_read_grouping(h, pg); 2247 if (deptype == DEPGRP_UNSUPPORTED) { 2248 log_error(LOG_INFO, 2249 "Dependency \"%s\" of %s has an unknown grouping value.\n", 2250 pg_name, info->v->gv_name); 2251 startd_free(fmri, fmri_sz); 2252 startd_free(pg_name, max_scf_name_size); 2253 return (info->err = EINVAL); 2254 } 2255 2256 prop = safe_scf_property_create(h); 2257 2258 if (scf_pg_get_property(pg, SCF_PROPERTY_ENTITIES, prop) != 0) { 2259 scferr = scf_error(); 2260 scf_property_destroy(prop); 2261 if (scferr == SCF_ERROR_DELETED) { 2262 startd_free(fmri, fmri_sz); 2263 startd_free(pg_name, max_scf_name_size); 2264 return (info->err = 0); 2265 } else if (scferr != SCF_ERROR_NOT_FOUND) { 2266 startd_free(fmri, fmri_sz); 2267 startd_free(pg_name, max_scf_name_size); 2268 return (info->err = ECONNABORTED); 2269 } 2270 2271 log_error(LOG_INFO, 2272 "Dependency \"%s\" of %s is missing a \"%s\" property.\n", 2273 pg_name, info->v->gv_name, SCF_PROPERTY_ENTITIES); 2274 2275 startd_free(fmri, fmri_sz); 2276 startd_free(pg_name, max_scf_name_size); 2277 2278 return (info->err = EINVAL); 2279 } 2280 2281 /* Create depgroup vertex for pg */ 2282 err = graph_insert_vertex_unconfigured(fmri, GVT_GROUP, deptype, 2283 depgroup_read_restart(h, pg), &depgrp); 2284 assert(err == 0); 2285 startd_free(fmri, fmri_sz); 2286 2287 /* Add dependency from inst vertex to new vertex */ 2288 err = graph_insert_dependency(info->v, depgrp, info->pathp); 2289 /* ELOOP can't happen because this should be a new vertex */ 2290 assert(err == 0); 2291 2292 linfo.v = depgrp; 2293 linfo.type = depgroup_read_scheme(h, pg); 2294 linfo.inst_fmri = info->v->gv_name; 2295 linfo.pg_name = pg_name; 2296 linfo.h = h; 2297 linfo.err = 0; 2298 linfo.pathp = info->pathp; 2299 err = walk_property_astrings(prop, (callback_t)process_dependency_fmri, 2300 &linfo); 2301 2302 scf_property_destroy(prop); 2303 startd_free(pg_name, max_scf_name_size); 2304 2305 switch (err) { 2306 case 0: 2307 case EINTR: 2308 return (info->err = linfo.err); 2309 2310 case ECONNABORTED: 2311 case EINVAL: 2312 return (info->err = err); 2313 2314 case ECANCELED: 2315 return (info->err = 0); 2316 2317 case ECONNRESET: 2318 return (info->err = ECONNABORTED); 2319 2320 default: 2321 bad_error("walk_property_astrings", err); 2322 /* NOTREACHED */ 2323 } 2324 } 2325 2326 /* 2327 * Build the dependency info for v from the repository. Returns 0 on success, 2328 * ECONNABORTED on repository disconnection, EINVAL if the repository 2329 * configuration is invalid, and ELOOP if a dependency would cause a cycle. 2330 * In the last case, *pathp will point to a -1-terminated array of ids which 2331 * constitute the rest of the dependency cycle. 2332 */ 2333 static int 2334 set_dependencies(graph_vertex_t *v, scf_instance_t *inst, int **pathp) 2335 { 2336 struct deppg_info info; 2337 int err; 2338 uint_t old_configured; 2339 2340 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2341 2342 /* 2343 * Mark the vertex as configured during dependency insertion to avoid 2344 * dependency cycles (which can appear in the graph if one of the 2345 * vertices is an exclusion-group). 2346 */ 2347 old_configured = v->gv_flags & GV_CONFIGURED; 2348 v->gv_flags |= GV_CONFIGURED; 2349 2350 info.err = 0; 2351 info.v = v; 2352 info.pathp = pathp; 2353 2354 err = walk_dependency_pgs(inst, (callback_t)process_dependency_pg, 2355 &info); 2356 2357 if (!old_configured) 2358 v->gv_flags &= ~GV_CONFIGURED; 2359 2360 switch (err) { 2361 case 0: 2362 case EINTR: 2363 return (info.err); 2364 2365 case ECONNABORTED: 2366 return (ECONNABORTED); 2367 2368 case ECANCELED: 2369 /* Should get delete event, so return 0. */ 2370 return (0); 2371 2372 default: 2373 bad_error("walk_dependency_pgs", err); 2374 /* NOTREACHED */ 2375 } 2376 } 2377 2378 2379 static void 2380 handle_cycle(const char *fmri, int *path) 2381 { 2382 const char *cp; 2383 size_t sz; 2384 2385 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2386 2387 path_to_str(path, (char **)&cp, &sz); 2388 2389 log_error(LOG_ERR, "Putting service %s into maintenance " 2390 "because it completes a dependency cycle:\n%s", fmri ? fmri : "?", 2391 cp); 2392 2393 startd_free((void *)cp, sz); 2394 } 2395 2396 /* 2397 * When run on the dependencies of a vertex, populates list with 2398 * graph_edge_t's which point to the instance vertices (no GVT_GROUP nodes) 2399 * on which the vertex depends. 2400 */ 2401 static int 2402 append_insts(graph_edge_t *e, uu_list_t *list) 2403 { 2404 graph_vertex_t *v = e->ge_vertex; 2405 graph_edge_t *new; 2406 int r; 2407 2408 switch (v->gv_type) { 2409 case GVT_INST: 2410 case GVT_SVC: 2411 break; 2412 2413 case GVT_GROUP: 2414 r = uu_list_walk(v->gv_dependencies, 2415 (uu_walk_fn_t *)append_insts, list, 0); 2416 assert(r == 0); 2417 return (UU_WALK_NEXT); 2418 2419 case GVT_FILE: 2420 return (UU_WALK_NEXT); 2421 2422 default: 2423 #ifndef NDEBUG 2424 uu_warn("%s:%d: Unexpected vertex type %d.\n", __FILE__, 2425 __LINE__, v->gv_type); 2426 #endif 2427 abort(); 2428 } 2429 2430 new = startd_alloc(sizeof (*new)); 2431 new->ge_vertex = v; 2432 uu_list_node_init(new, &new->ge_link, graph_edge_pool); 2433 r = uu_list_insert_before(list, NULL, new); 2434 assert(r == 0); 2435 return (UU_WALK_NEXT); 2436 } 2437 2438 static boolean_t 2439 should_be_in_subgraph(graph_vertex_t *v) 2440 { 2441 graph_edge_t *e; 2442 2443 if (v == milestone) 2444 return (B_TRUE); 2445 2446 /* 2447 * v is in the subgraph if any of its dependents are in the subgraph. 2448 * Except for EXCLUDE_ALL dependents. And OPTIONAL dependents only 2449 * count if we're enabled. 2450 */ 2451 for (e = uu_list_first(v->gv_dependents); 2452 e != NULL; 2453 e = uu_list_next(v->gv_dependents, e)) { 2454 graph_vertex_t *dv = e->ge_vertex; 2455 2456 if (!(dv->gv_flags & GV_INSUBGRAPH)) 2457 continue; 2458 2459 /* 2460 * Don't include instances that are optional and disabled. 2461 */ 2462 if (v->gv_type == GVT_INST && dv->gv_type == GVT_SVC) { 2463 2464 int in = 0; 2465 graph_edge_t *ee; 2466 2467 for (ee = uu_list_first(dv->gv_dependents); 2468 ee != NULL; 2469 ee = uu_list_next(dv->gv_dependents, ee)) { 2470 2471 graph_vertex_t *ddv = e->ge_vertex; 2472 2473 if (ddv->gv_type == GVT_GROUP && 2474 ddv->gv_depgroup == DEPGRP_EXCLUDE_ALL) 2475 continue; 2476 2477 if (ddv->gv_type == GVT_GROUP && 2478 ddv->gv_depgroup == DEPGRP_OPTIONAL_ALL && 2479 !(v->gv_flags & GV_ENBLD_NOOVR)) 2480 continue; 2481 2482 in = 1; 2483 } 2484 if (!in) 2485 continue; 2486 } 2487 if (v->gv_type == GVT_INST && 2488 dv->gv_type == GVT_GROUP && 2489 dv->gv_depgroup == DEPGRP_OPTIONAL_ALL && 2490 !(v->gv_flags & GV_ENBLD_NOOVR)) 2491 continue; 2492 2493 /* Don't include excluded services and instances */ 2494 if (dv->gv_type == GVT_GROUP && 2495 dv->gv_depgroup == DEPGRP_EXCLUDE_ALL) 2496 continue; 2497 2498 return (B_TRUE); 2499 } 2500 2501 return (B_FALSE); 2502 } 2503 2504 /* 2505 * Ensures that GV_INSUBGRAPH is set properly for v and its descendents. If 2506 * any bits change, manipulate the repository appropriately. Returns 0 or 2507 * ECONNABORTED. 2508 */ 2509 static int 2510 eval_subgraph(graph_vertex_t *v, scf_handle_t *h) 2511 { 2512 boolean_t old = (v->gv_flags & GV_INSUBGRAPH) != 0; 2513 boolean_t new; 2514 graph_edge_t *e; 2515 scf_instance_t *inst; 2516 int ret = 0, r; 2517 2518 assert(milestone != NULL && milestone != MILESTONE_NONE); 2519 2520 new = should_be_in_subgraph(v); 2521 2522 if (new == old) 2523 return (0); 2524 2525 log_framework(LOG_DEBUG, new ? "Adding %s to the subgraph.\n" : 2526 "Removing %s from the subgraph.\n", v->gv_name); 2527 2528 v->gv_flags = (v->gv_flags & ~GV_INSUBGRAPH) | 2529 (new ? GV_INSUBGRAPH : 0); 2530 2531 if (v->gv_type == GVT_INST && (v->gv_flags & GV_CONFIGURED)) { 2532 int err; 2533 2534 get_inst: 2535 err = libscf_fmri_get_instance(h, v->gv_name, &inst); 2536 if (err != 0) { 2537 switch (err) { 2538 case ECONNABORTED: 2539 libscf_handle_rebind(h); 2540 ret = ECONNABORTED; 2541 goto get_inst; 2542 2543 case ENOENT: 2544 break; 2545 2546 case EINVAL: 2547 case ENOTSUP: 2548 default: 2549 bad_error("libscf_fmri_get_instance", err); 2550 } 2551 } else { 2552 const char *f; 2553 2554 if (new) { 2555 err = libscf_delete_enable_ovr(inst); 2556 f = "libscf_delete_enable_ovr"; 2557 } else { 2558 err = libscf_set_enable_ovr(inst, 0); 2559 f = "libscf_set_enable_ovr"; 2560 } 2561 scf_instance_destroy(inst); 2562 switch (err) { 2563 case 0: 2564 case ECANCELED: 2565 break; 2566 2567 case ECONNABORTED: 2568 libscf_handle_rebind(h); 2569 /* 2570 * We must continue so the graph is updated, 2571 * but we must return ECONNABORTED so any 2572 * libscf state held by any callers is reset. 2573 */ 2574 ret = ECONNABORTED; 2575 goto get_inst; 2576 2577 case EROFS: 2578 case EPERM: 2579 log_error(LOG_WARNING, 2580 "Could not set %s/%s for %s: %s.\n", 2581 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 2582 v->gv_name, strerror(err)); 2583 break; 2584 2585 default: 2586 bad_error(f, err); 2587 } 2588 } 2589 } 2590 2591 for (e = uu_list_first(v->gv_dependencies); 2592 e != NULL; 2593 e = uu_list_next(v->gv_dependencies, e)) { 2594 r = eval_subgraph(e->ge_vertex, h); 2595 if (r != 0) { 2596 assert(r == ECONNABORTED); 2597 ret = ECONNABORTED; 2598 } 2599 } 2600 2601 return (ret); 2602 } 2603 2604 /* 2605 * Delete the (property group) dependencies of v & create new ones based on 2606 * inst. If doing so would create a cycle, log a message and put the instance 2607 * into maintenance. Update GV_INSUBGRAPH flags as necessary. Returns 0 or 2608 * ECONNABORTED. 2609 */ 2610 static int 2611 refresh_vertex(graph_vertex_t *v, scf_instance_t *inst) 2612 { 2613 int err; 2614 int *path; 2615 char *fmri; 2616 int r; 2617 scf_handle_t *h = scf_instance_handle(inst); 2618 uu_list_t *old_deps; 2619 int ret = 0; 2620 graph_edge_t *e; 2621 2622 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2623 assert(v->gv_type == GVT_INST); 2624 2625 log_framework(LOG_DEBUG, "Graph engine: Refreshing %s.\n", v->gv_name); 2626 2627 if (milestone > MILESTONE_NONE) { 2628 /* 2629 * In case some of v's dependencies are being deleted we must 2630 * make a list of them now for GV_INSUBGRAPH-flag evaluation 2631 * after the new dependencies are in place. 2632 */ 2633 old_deps = startd_list_create(graph_edge_pool, NULL, 0); 2634 2635 err = uu_list_walk(v->gv_dependencies, 2636 (uu_walk_fn_t *)append_insts, old_deps, 0); 2637 assert(err == 0); 2638 } 2639 2640 delete_instance_dependencies(v, B_FALSE); 2641 2642 err = set_dependencies(v, inst, &path); 2643 switch (err) { 2644 case 0: 2645 break; 2646 2647 case ECONNABORTED: 2648 ret = err; 2649 goto out; 2650 2651 case EINVAL: 2652 case ELOOP: 2653 r = libscf_instance_get_fmri(inst, &fmri); 2654 switch (r) { 2655 case 0: 2656 break; 2657 2658 case ECONNABORTED: 2659 ret = ECONNABORTED; 2660 goto out; 2661 2662 case ECANCELED: 2663 ret = 0; 2664 goto out; 2665 2666 default: 2667 bad_error("libscf_instance_get_fmri", r); 2668 } 2669 2670 if (err == EINVAL) { 2671 log_error(LOG_ERR, "Transitioning %s " 2672 "to maintenance due to misconfiguration.\n", 2673 fmri ? fmri : "?"); 2674 vertex_send_event(v, 2675 RESTARTER_EVENT_TYPE_INVALID_DEPENDENCY); 2676 } else { 2677 handle_cycle(fmri, path); 2678 vertex_send_event(v, 2679 RESTARTER_EVENT_TYPE_DEPENDENCY_CYCLE); 2680 } 2681 startd_free(fmri, max_scf_fmri_size); 2682 ret = 0; 2683 goto out; 2684 2685 default: 2686 bad_error("set_dependencies", err); 2687 } 2688 2689 if (milestone > MILESTONE_NONE) { 2690 boolean_t aborted = B_FALSE; 2691 2692 for (e = uu_list_first(old_deps); 2693 e != NULL; 2694 e = uu_list_next(old_deps, e)) { 2695 if (eval_subgraph(e->ge_vertex, h) == 2696 ECONNABORTED) 2697 aborted = B_TRUE; 2698 } 2699 2700 for (e = uu_list_first(v->gv_dependencies); 2701 e != NULL; 2702 e = uu_list_next(v->gv_dependencies, e)) { 2703 if (eval_subgraph(e->ge_vertex, h) == 2704 ECONNABORTED) 2705 aborted = B_TRUE; 2706 } 2707 2708 if (aborted) { 2709 ret = ECONNABORTED; 2710 goto out; 2711 } 2712 } 2713 2714 if (v->gv_state == RESTARTER_STATE_OFFLINE) { 2715 if (instance_satisfied(v, B_FALSE) == 1) { 2716 if (v->gv_start_f == NULL) 2717 vertex_send_event(v, 2718 RESTARTER_EVENT_TYPE_START); 2719 else 2720 v->gv_start_f(v); 2721 } 2722 } 2723 2724 ret = 0; 2725 2726 out: 2727 if (milestone > MILESTONE_NONE) { 2728 void *cookie = NULL; 2729 2730 while ((e = uu_list_teardown(old_deps, &cookie)) != NULL) 2731 startd_free(e, sizeof (*e)); 2732 2733 uu_list_destroy(old_deps); 2734 } 2735 2736 return (ret); 2737 } 2738 2739 /* 2740 * Set up v according to inst. That is, make sure it depends on its 2741 * restarter and set up its dependencies. Send the ADD_INSTANCE command to 2742 * the restarter, and send ENABLE or DISABLE as appropriate. 2743 * 2744 * Returns 0 on success, ECONNABORTED on repository disconnection, or 2745 * ECANCELED if inst is deleted. 2746 */ 2747 static int 2748 configure_vertex(graph_vertex_t *v, scf_instance_t *inst) 2749 { 2750 scf_handle_t *h; 2751 scf_propertygroup_t *pg; 2752 scf_snapshot_t *snap; 2753 char *restarter_fmri = startd_alloc(max_scf_value_size); 2754 int enabled, enabled_ovr; 2755 int err; 2756 int *path; 2757 2758 restarter_fmri[0] = '\0'; 2759 2760 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 2761 assert(v->gv_type == GVT_INST); 2762 assert((v->gv_flags & GV_CONFIGURED) == 0); 2763 2764 /* GV_INSUBGRAPH should already be set properly. */ 2765 assert(should_be_in_subgraph(v) == 2766 ((v->gv_flags & GV_INSUBGRAPH) != 0)); 2767 2768 log_framework(LOG_DEBUG, "Graph adding %s.\n", v->gv_name); 2769 2770 h = scf_instance_handle(inst); 2771 2772 /* 2773 * If the instance does not have a restarter property group, 2774 * initialize its state to uninitialized/none, in case the restarter 2775 * is not enabled. 2776 */ 2777 pg = safe_scf_pg_create(h); 2778 2779 if (scf_instance_get_pg(inst, SCF_PG_RESTARTER, pg) != 0) { 2780 instance_data_t idata; 2781 uint_t count = 0, msecs = ALLOC_DELAY; 2782 2783 switch (scf_error()) { 2784 case SCF_ERROR_NOT_FOUND: 2785 break; 2786 2787 case SCF_ERROR_CONNECTION_BROKEN: 2788 default: 2789 scf_pg_destroy(pg); 2790 return (ECONNABORTED); 2791 2792 case SCF_ERROR_DELETED: 2793 scf_pg_destroy(pg); 2794 return (ECANCELED); 2795 2796 case SCF_ERROR_NOT_SET: 2797 bad_error("scf_instance_get_pg", scf_error()); 2798 } 2799 2800 switch (err = libscf_instance_get_fmri(inst, 2801 (char **)&idata.i_fmri)) { 2802 case 0: 2803 break; 2804 2805 case ECONNABORTED: 2806 case ECANCELED: 2807 scf_pg_destroy(pg); 2808 return (err); 2809 2810 default: 2811 bad_error("libscf_instance_get_fmri", err); 2812 } 2813 2814 idata.i_state = RESTARTER_STATE_NONE; 2815 idata.i_next_state = RESTARTER_STATE_NONE; 2816 2817 init_state: 2818 switch (err = _restarter_commit_states(h, &idata, 2819 RESTARTER_STATE_UNINIT, RESTARTER_STATE_NONE, NULL)) { 2820 case 0: 2821 break; 2822 2823 case ENOMEM: 2824 ++count; 2825 if (count < ALLOC_RETRY) { 2826 (void) poll(NULL, 0, msecs); 2827 msecs *= ALLOC_DELAY_MULT; 2828 goto init_state; 2829 } 2830 2831 uu_die("Insufficient memory.\n"); 2832 /* NOTREACHED */ 2833 2834 case ECONNABORTED: 2835 scf_pg_destroy(pg); 2836 return (ECONNABORTED); 2837 2838 case ENOENT: 2839 scf_pg_destroy(pg); 2840 return (ECANCELED); 2841 2842 case EPERM: 2843 case EACCES: 2844 case EROFS: 2845 log_error(LOG_NOTICE, "Could not initialize state for " 2846 "%s: %s.\n", idata.i_fmri, strerror(err)); 2847 break; 2848 2849 case EINVAL: 2850 default: 2851 bad_error("_restarter_commit_states", err); 2852 } 2853 2854 startd_free((void *)idata.i_fmri, max_scf_fmri_size); 2855 } 2856 2857 scf_pg_destroy(pg); 2858 2859 if (milestone != NULL) { 2860 /* 2861 * Make sure the enable-override is set properly before we 2862 * read whether we should be enabled. 2863 */ 2864 if (milestone == MILESTONE_NONE || 2865 !(v->gv_flags & GV_INSUBGRAPH)) { 2866 switch (err = libscf_set_enable_ovr(inst, 0)) { 2867 case 0: 2868 break; 2869 2870 case ECONNABORTED: 2871 case ECANCELED: 2872 return (err); 2873 2874 case EROFS: 2875 log_error(LOG_WARNING, 2876 "Could not set %s/%s for %s: %s.\n", 2877 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 2878 v->gv_name, strerror(err)); 2879 break; 2880 2881 case EPERM: 2882 uu_die("Permission denied.\n"); 2883 /* NOTREACHED */ 2884 2885 default: 2886 bad_error("libscf_set_enable_ovr", err); 2887 } 2888 } else { 2889 assert(v->gv_flags & GV_INSUBGRAPH); 2890 switch (err = libscf_delete_enable_ovr(inst)) { 2891 case 0: 2892 break; 2893 2894 case ECONNABORTED: 2895 case ECANCELED: 2896 return (err); 2897 2898 case EPERM: 2899 uu_die("Permission denied.\n"); 2900 /* NOTREACHED */ 2901 2902 default: 2903 bad_error("libscf_delete_enable_ovr", err); 2904 } 2905 } 2906 } 2907 2908 err = libscf_get_basic_instance_data(h, inst, v->gv_name, &enabled, 2909 &enabled_ovr, &restarter_fmri); 2910 switch (err) { 2911 case 0: 2912 break; 2913 2914 case ECONNABORTED: 2915 case ECANCELED: 2916 startd_free(restarter_fmri, max_scf_value_size); 2917 return (err); 2918 2919 case ENOENT: 2920 log_framework(LOG_DEBUG, 2921 "Ignoring %s because it has no general property group.\n", 2922 v->gv_name); 2923 startd_free(restarter_fmri, max_scf_value_size); 2924 return (0); 2925 2926 default: 2927 bad_error("libscf_get_basic_instance_data", err); 2928 } 2929 2930 if (enabled == -1) { 2931 startd_free(restarter_fmri, max_scf_value_size); 2932 return (0); 2933 } 2934 2935 v->gv_flags = (v->gv_flags & ~GV_ENBLD_NOOVR) | 2936 (enabled ? GV_ENBLD_NOOVR : 0); 2937 2938 if (enabled_ovr != -1) 2939 enabled = enabled_ovr; 2940 2941 v->gv_state = RESTARTER_STATE_UNINIT; 2942 2943 snap = libscf_get_or_make_running_snapshot(inst, v->gv_name, B_TRUE); 2944 scf_snapshot_destroy(snap); 2945 2946 /* Set up the restarter. (Sends _ADD_INSTANCE on success.) */ 2947 err = graph_change_restarter(v, restarter_fmri, h, &path); 2948 if (err != 0) { 2949 instance_data_t idata; 2950 uint_t count = 0, msecs = ALLOC_DELAY; 2951 const char *reason; 2952 2953 if (err == ECONNABORTED) { 2954 startd_free(restarter_fmri, max_scf_value_size); 2955 return (err); 2956 } 2957 2958 assert(err == EINVAL || err == ELOOP); 2959 2960 if (err == EINVAL) { 2961 log_framework(LOG_WARNING, emsg_invalid_restarter, 2962 v->gv_name); 2963 reason = "invalid_restarter"; 2964 } else { 2965 handle_cycle(v->gv_name, path); 2966 reason = "dependency_cycle"; 2967 } 2968 2969 startd_free(restarter_fmri, max_scf_value_size); 2970 2971 /* 2972 * We didn't register the instance with the restarter, so we 2973 * must set maintenance mode ourselves. 2974 */ 2975 err = libscf_instance_get_fmri(inst, (char **)&idata.i_fmri); 2976 if (err != 0) { 2977 assert(err == ECONNABORTED || err == ECANCELED); 2978 return (err); 2979 } 2980 2981 idata.i_state = RESTARTER_STATE_NONE; 2982 idata.i_next_state = RESTARTER_STATE_NONE; 2983 2984 set_maint: 2985 switch (err = _restarter_commit_states(h, &idata, 2986 RESTARTER_STATE_MAINT, RESTARTER_STATE_NONE, reason)) { 2987 case 0: 2988 break; 2989 2990 case ENOMEM: 2991 ++count; 2992 if (count < ALLOC_RETRY) { 2993 (void) poll(NULL, 0, msecs); 2994 msecs *= ALLOC_DELAY_MULT; 2995 goto set_maint; 2996 } 2997 2998 uu_die("Insufficient memory.\n"); 2999 /* NOTREACHED */ 3000 3001 case ECONNABORTED: 3002 return (ECONNABORTED); 3003 3004 case ENOENT: 3005 return (ECANCELED); 3006 3007 case EPERM: 3008 case EACCES: 3009 case EROFS: 3010 log_error(LOG_NOTICE, "Could not initialize state for " 3011 "%s: %s.\n", idata.i_fmri, strerror(err)); 3012 break; 3013 3014 case EINVAL: 3015 default: 3016 bad_error("_restarter_commit_states", err); 3017 } 3018 3019 startd_free((void *)idata.i_fmri, max_scf_fmri_size); 3020 3021 v->gv_state = RESTARTER_STATE_MAINT; 3022 3023 goto out; 3024 } 3025 startd_free(restarter_fmri, max_scf_value_size); 3026 3027 /* Add all the other dependencies. */ 3028 err = refresh_vertex(v, inst); 3029 if (err != 0) { 3030 assert(err == ECONNABORTED); 3031 return (err); 3032 } 3033 3034 out: 3035 v->gv_flags |= GV_CONFIGURED; 3036 3037 graph_enable_by_vertex(v, enabled, 0); 3038 3039 return (0); 3040 } 3041 3042 static void 3043 do_uadmin(void) 3044 { 3045 int fd, left; 3046 struct statvfs vfs; 3047 3048 const char * const resetting = "/etc/svc/volatile/resetting"; 3049 3050 fd = creat(resetting, 0777); 3051 if (fd >= 0) 3052 startd_close(fd); 3053 else 3054 uu_warn("Could not create \"%s\"", resetting); 3055 3056 /* Kill dhcpagent if we're not using nfs for root */ 3057 if ((statvfs("/", &vfs) == 0) && 3058 (strncmp(vfs.f_basetype, "nfs", sizeof ("nfs") - 1) != 0)) 3059 (void) system("/usr/bin/pkill -x -u 0 dhcpagent"); 3060 3061 (void) system("/usr/sbin/killall"); 3062 left = 5; 3063 while (left > 0) 3064 left = sleep(left); 3065 3066 (void) system("/usr/sbin/killall 9"); 3067 left = 10; 3068 while (left > 0) 3069 left = sleep(left); 3070 3071 sync(); 3072 sync(); 3073 sync(); 3074 3075 (void) system("/sbin/umountall"); 3076 (void) system("/sbin/umount /tmp >/dev/null 2>&1"); 3077 (void) system("/sbin/umount /var/adm >/dev/null 2>&1"); 3078 (void) system("/sbin/umount /var/run >/dev/null 2>&1"); 3079 (void) system("/sbin/umount /var >/dev/null 2>&1"); 3080 (void) system("/sbin/umount /usr >/dev/null 2>&1"); 3081 3082 uu_warn("The system is down.\n"); 3083 3084 (void) uadmin(A_SHUTDOWN, halting, NULL); 3085 uu_warn("uadmin() failed"); 3086 3087 if (remove(resetting) != 0 && errno != ENOENT) 3088 uu_warn("Could not remove \"%s\"", resetting); 3089 } 3090 3091 /* 3092 * If any of the up_svcs[] are online or satisfiable, return true. If they are 3093 * all missing, disabled, in maintenance, or unsatisfiable, return false. 3094 */ 3095 boolean_t 3096 can_come_up(void) 3097 { 3098 int i; 3099 3100 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3101 3102 /* 3103 * If we are booting to single user (boot -s), 3104 * SCF_MILESTONE_SINGLE_USER is needed to come up because startd 3105 * spawns sulogin after single-user is online (see specials.c). 3106 */ 3107 i = (booting_to_single_user ? 0 : 1); 3108 3109 for (; up_svcs[i] != NULL; ++i) { 3110 if (up_svcs_p[i] == NULL) { 3111 up_svcs_p[i] = vertex_get_by_name(up_svcs[i]); 3112 3113 if (up_svcs_p[i] == NULL) 3114 continue; 3115 } 3116 3117 /* 3118 * Ignore unconfigured services (the ones that have been 3119 * mentioned in a dependency from other services, but do 3120 * not exist in the repository). Services which exist 3121 * in the repository but don't have general/enabled 3122 * property will be also ignored. 3123 */ 3124 if (!(up_svcs_p[i]->gv_flags & GV_CONFIGURED)) 3125 continue; 3126 3127 switch (up_svcs_p[i]->gv_state) { 3128 case RESTARTER_STATE_ONLINE: 3129 case RESTARTER_STATE_DEGRADED: 3130 /* 3131 * Deactivate verbose boot once a login service has been 3132 * reached. 3133 */ 3134 st->st_log_login_reached = 1; 3135 /*FALLTHROUGH*/ 3136 case RESTARTER_STATE_UNINIT: 3137 return (B_TRUE); 3138 3139 case RESTARTER_STATE_OFFLINE: 3140 if (instance_satisfied(up_svcs_p[i], B_TRUE) != -1) 3141 return (B_TRUE); 3142 log_framework(LOG_DEBUG, 3143 "can_come_up(): %s is unsatisfiable.\n", 3144 up_svcs_p[i]->gv_name); 3145 continue; 3146 3147 case RESTARTER_STATE_DISABLED: 3148 case RESTARTER_STATE_MAINT: 3149 log_framework(LOG_DEBUG, 3150 "can_come_up(): %s is in state %s.\n", 3151 up_svcs_p[i]->gv_name, 3152 instance_state_str[up_svcs_p[i]->gv_state]); 3153 continue; 3154 3155 default: 3156 #ifndef NDEBUG 3157 uu_warn("%s:%d: Unexpected vertex state %d.\n", 3158 __FILE__, __LINE__, up_svcs_p[i]->gv_state); 3159 #endif 3160 abort(); 3161 } 3162 } 3163 3164 /* 3165 * In the seed repository, console-login is unsatisfiable because 3166 * services are missing. To behave correctly in that case we don't want 3167 * to return false until manifest-import is online. 3168 */ 3169 3170 if (manifest_import_p == NULL) { 3171 manifest_import_p = vertex_get_by_name(manifest_import); 3172 3173 if (manifest_import_p == NULL) 3174 return (B_FALSE); 3175 } 3176 3177 switch (manifest_import_p->gv_state) { 3178 case RESTARTER_STATE_ONLINE: 3179 case RESTARTER_STATE_DEGRADED: 3180 case RESTARTER_STATE_DISABLED: 3181 case RESTARTER_STATE_MAINT: 3182 break; 3183 3184 case RESTARTER_STATE_OFFLINE: 3185 if (instance_satisfied(manifest_import_p, B_TRUE) == -1) 3186 break; 3187 /* FALLTHROUGH */ 3188 3189 case RESTARTER_STATE_UNINIT: 3190 return (B_TRUE); 3191 } 3192 3193 return (B_FALSE); 3194 } 3195 3196 /* 3197 * Runs sulogin. Returns 3198 * 0 - success 3199 * EALREADY - sulogin is already running 3200 * EBUSY - console-login is running 3201 */ 3202 static int 3203 run_sulogin(const char *msg) 3204 { 3205 graph_vertex_t *v; 3206 3207 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3208 3209 if (sulogin_running) 3210 return (EALREADY); 3211 3212 v = vertex_get_by_name(console_login_fmri); 3213 if (v != NULL && inst_running(v)) 3214 return (EBUSY); 3215 3216 sulogin_running = B_TRUE; 3217 3218 MUTEX_UNLOCK(&dgraph_lock); 3219 3220 fork_sulogin(B_FALSE, msg); 3221 3222 MUTEX_LOCK(&dgraph_lock); 3223 3224 sulogin_running = B_FALSE; 3225 3226 if (console_login_ready) { 3227 v = vertex_get_by_name(console_login_fmri); 3228 3229 if (v != NULL && v->gv_state == RESTARTER_STATE_OFFLINE && 3230 !inst_running(v)) { 3231 if (v->gv_start_f == NULL) 3232 vertex_send_event(v, 3233 RESTARTER_EVENT_TYPE_START); 3234 else 3235 v->gv_start_f(v); 3236 } 3237 3238 console_login_ready = B_FALSE; 3239 } 3240 3241 return (0); 3242 } 3243 3244 /* 3245 * The sulogin thread runs sulogin while can_come_up() is false. run_sulogin() 3246 * keeps sulogin from stepping on console-login's toes. 3247 */ 3248 /* ARGSUSED */ 3249 static void * 3250 sulogin_thread(void *unused) 3251 { 3252 MUTEX_LOCK(&dgraph_lock); 3253 3254 assert(sulogin_thread_running); 3255 3256 do 3257 (void) run_sulogin("Console login service(s) cannot run\n"); 3258 while (!can_come_up()); 3259 3260 sulogin_thread_running = B_FALSE; 3261 MUTEX_UNLOCK(&dgraph_lock); 3262 3263 return (NULL); 3264 } 3265 3266 /* ARGSUSED */ 3267 void * 3268 single_user_thread(void *unused) 3269 { 3270 uint_t left; 3271 scf_handle_t *h; 3272 scf_instance_t *inst; 3273 scf_property_t *prop; 3274 scf_value_t *val; 3275 const char *msg; 3276 char *buf; 3277 int r; 3278 3279 MUTEX_LOCK(&single_user_thread_lock); 3280 single_user_thread_count++; 3281 3282 if (!booting_to_single_user) { 3283 /* 3284 * From rcS.sh: Look for ttymon, in.telnetd, in.rlogind and 3285 * processes in their process groups so they can be terminated. 3286 */ 3287 (void) fputs("svc.startd: Killing user processes: ", stdout); 3288 (void) system("/usr/sbin/killall"); 3289 (void) system("/usr/sbin/killall 9"); 3290 (void) system("/usr/bin/pkill -TERM -v -u 0,1"); 3291 3292 left = 5; 3293 while (left > 0) 3294 left = sleep(left); 3295 3296 (void) system("/usr/bin/pkill -KILL -v -u 0,1"); 3297 (void) puts("done."); 3298 } 3299 3300 if (go_single_user_mode || booting_to_single_user) { 3301 msg = "SINGLE USER MODE\n"; 3302 } else { 3303 assert(go_to_level1); 3304 3305 fork_rc_script('1', "start", B_TRUE); 3306 3307 uu_warn("The system is ready for administration.\n"); 3308 3309 msg = ""; 3310 } 3311 3312 MUTEX_UNLOCK(&single_user_thread_lock); 3313 3314 for (;;) { 3315 MUTEX_LOCK(&dgraph_lock); 3316 r = run_sulogin(msg); 3317 MUTEX_UNLOCK(&dgraph_lock); 3318 if (r == 0) 3319 break; 3320 3321 assert(r == EALREADY || r == EBUSY); 3322 3323 left = 3; 3324 while (left > 0) 3325 left = sleep(left); 3326 } 3327 3328 MUTEX_LOCK(&single_user_thread_lock); 3329 3330 /* 3331 * If another single user thread has started, let it finish changing 3332 * the run level. 3333 */ 3334 if (single_user_thread_count > 1) { 3335 single_user_thread_count--; 3336 MUTEX_UNLOCK(&single_user_thread_lock); 3337 return (NULL); 3338 } 3339 3340 h = libscf_handle_create_bound_loop(); 3341 inst = scf_instance_create(h); 3342 prop = safe_scf_property_create(h); 3343 val = safe_scf_value_create(h); 3344 buf = startd_alloc(max_scf_fmri_size); 3345 3346 lookup: 3347 if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, inst, 3348 NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) { 3349 switch (scf_error()) { 3350 case SCF_ERROR_NOT_FOUND: 3351 r = libscf_create_self(h); 3352 if (r == 0) 3353 goto lookup; 3354 assert(r == ECONNABORTED); 3355 /* FALLTHROUGH */ 3356 3357 case SCF_ERROR_CONNECTION_BROKEN: 3358 libscf_handle_rebind(h); 3359 goto lookup; 3360 3361 case SCF_ERROR_INVALID_ARGUMENT: 3362 case SCF_ERROR_CONSTRAINT_VIOLATED: 3363 case SCF_ERROR_NOT_BOUND: 3364 case SCF_ERROR_HANDLE_MISMATCH: 3365 default: 3366 bad_error("scf_handle_decode_fmri", scf_error()); 3367 } 3368 } 3369 3370 MUTEX_LOCK(&dgraph_lock); 3371 3372 r = libscf_inst_delete_prop(inst, SCF_PG_OPTIONS_OVR, 3373 SCF_PROPERTY_MILESTONE); 3374 switch (r) { 3375 case 0: 3376 case ECANCELED: 3377 break; 3378 3379 case ECONNABORTED: 3380 MUTEX_UNLOCK(&dgraph_lock); 3381 libscf_handle_rebind(h); 3382 goto lookup; 3383 3384 case EPERM: 3385 case EACCES: 3386 case EROFS: 3387 log_error(LOG_WARNING, "Could not clear temporary milestone: " 3388 "%s.\n", strerror(r)); 3389 break; 3390 3391 default: 3392 bad_error("libscf_inst_delete_prop", r); 3393 } 3394 3395 MUTEX_UNLOCK(&dgraph_lock); 3396 3397 r = libscf_get_milestone(inst, prop, val, buf, max_scf_fmri_size); 3398 switch (r) { 3399 case ECANCELED: 3400 case ENOENT: 3401 case EINVAL: 3402 (void) strcpy(buf, "all"); 3403 /* FALLTHROUGH */ 3404 3405 case 0: 3406 uu_warn("Returning to milestone %s.\n", buf); 3407 break; 3408 3409 case ECONNABORTED: 3410 libscf_handle_rebind(h); 3411 goto lookup; 3412 3413 default: 3414 bad_error("libscf_get_milestone", r); 3415 } 3416 3417 r = dgraph_set_milestone(buf, h, B_FALSE); 3418 switch (r) { 3419 case 0: 3420 case ECONNRESET: 3421 case EALREADY: 3422 case EINVAL: 3423 case ENOENT: 3424 break; 3425 3426 default: 3427 bad_error("dgraph_set_milestone", r); 3428 } 3429 3430 /* 3431 * See graph_runlevel_changed(). 3432 */ 3433 MUTEX_LOCK(&dgraph_lock); 3434 utmpx_set_runlevel(target_milestone_as_runlevel(), 'S', B_TRUE); 3435 MUTEX_UNLOCK(&dgraph_lock); 3436 3437 startd_free(buf, max_scf_fmri_size); 3438 scf_value_destroy(val); 3439 scf_property_destroy(prop); 3440 scf_instance_destroy(inst); 3441 scf_handle_destroy(h); 3442 3443 /* 3444 * We'll give ourselves 3 seconds to respond to all of the enablings 3445 * that setting the milestone should have created before checking 3446 * whether to run sulogin. 3447 */ 3448 left = 3; 3449 while (left > 0) 3450 left = sleep(left); 3451 3452 MUTEX_LOCK(&dgraph_lock); 3453 /* 3454 * Clearing these variables will allow the sulogin thread to run. We 3455 * check here in case there aren't any more state updates anytime soon. 3456 */ 3457 go_to_level1 = go_single_user_mode = booting_to_single_user = B_FALSE; 3458 if (!sulogin_thread_running && !can_come_up()) { 3459 (void) startd_thread_create(sulogin_thread, NULL); 3460 sulogin_thread_running = B_TRUE; 3461 } 3462 MUTEX_UNLOCK(&dgraph_lock); 3463 single_user_thread_count--; 3464 MUTEX_UNLOCK(&single_user_thread_lock); 3465 return (NULL); 3466 } 3467 3468 3469 /* 3470 * Dependency graph operations API. These are handle-independent thread-safe 3471 * graph manipulation functions which are the entry points for the event 3472 * threads below. 3473 */ 3474 3475 /* 3476 * If a configured vertex exists for inst_fmri, return EEXIST. If no vertex 3477 * exists for inst_fmri, add one. Then fetch the restarter from inst, make 3478 * this vertex dependent on it, and send _ADD_INSTANCE to the restarter. 3479 * Fetch whether the instance should be enabled from inst and send _ENABLE or 3480 * _DISABLE as appropriate. Finally rummage through inst's dependency 3481 * property groups and add vertices and edges as appropriate. If anything 3482 * goes wrong after sending _ADD_INSTANCE, send _ADMIN_MAINT_ON to put the 3483 * instance in maintenance. Don't send _START or _STOP until we get a state 3484 * update in case we're being restarted and the service is already running. 3485 * 3486 * To support booting to a milestone, we must also make sure all dependencies 3487 * encountered are configured, if they exist in the repository. 3488 * 3489 * Returns 0 on success, ECONNABORTED on repository disconnection, EINVAL if 3490 * inst_fmri is an invalid (or not canonical) FMRI, ECANCELED if inst is 3491 * deleted, or EEXIST if a configured vertex for inst_fmri already exists. 3492 */ 3493 int 3494 dgraph_add_instance(const char *inst_fmri, scf_instance_t *inst, 3495 boolean_t lock_graph) 3496 { 3497 graph_vertex_t *v; 3498 int err; 3499 3500 if (strcmp(inst_fmri, SCF_SERVICE_STARTD) == 0) 3501 return (0); 3502 3503 /* Check for a vertex for inst_fmri. */ 3504 if (lock_graph) { 3505 MUTEX_LOCK(&dgraph_lock); 3506 } else { 3507 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3508 } 3509 3510 v = vertex_get_by_name(inst_fmri); 3511 3512 if (v != NULL) { 3513 assert(v->gv_type == GVT_INST); 3514 3515 if (v->gv_flags & GV_CONFIGURED) { 3516 if (lock_graph) 3517 MUTEX_UNLOCK(&dgraph_lock); 3518 return (EEXIST); 3519 } 3520 } else { 3521 /* Add the vertex. */ 3522 err = graph_insert_vertex_unconfigured(inst_fmri, GVT_INST, 0, 3523 RERR_NONE, &v); 3524 if (err != 0) { 3525 assert(err == EINVAL); 3526 if (lock_graph) 3527 MUTEX_UNLOCK(&dgraph_lock); 3528 return (EINVAL); 3529 } 3530 } 3531 3532 err = configure_vertex(v, inst); 3533 3534 if (lock_graph) 3535 MUTEX_UNLOCK(&dgraph_lock); 3536 3537 return (err); 3538 } 3539 3540 /* 3541 * Locate the vertex for this property group's instance. If it doesn't exist 3542 * or is unconfigured, call dgraph_add_instance() & return. Otherwise fetch 3543 * the restarter for the instance, and if it has changed, send 3544 * _REMOVE_INSTANCE to the old restarter, remove the dependency, make sure the 3545 * new restarter has a vertex, add a new dependency, and send _ADD_INSTANCE to 3546 * the new restarter. Then fetch whether the instance should be enabled, and 3547 * if it is different from what we had, or if we changed the restarter, send 3548 * the appropriate _ENABLE or _DISABLE command. 3549 * 3550 * Returns 0 on success, ENOTSUP if the pg's parent is not an instance, 3551 * ECONNABORTED on repository disconnection, ECANCELED if the instance is 3552 * deleted, or -1 if the instance's general property group is deleted or if 3553 * its enabled property is misconfigured. 3554 */ 3555 static int 3556 dgraph_update_general(scf_propertygroup_t *pg) 3557 { 3558 scf_handle_t *h; 3559 scf_instance_t *inst; 3560 char *fmri; 3561 char *restarter_fmri; 3562 graph_vertex_t *v; 3563 int err; 3564 int enabled, enabled_ovr; 3565 int oldflags; 3566 3567 /* Find the vertex for this service */ 3568 h = scf_pg_handle(pg); 3569 3570 inst = safe_scf_instance_create(h); 3571 3572 if (scf_pg_get_parent_instance(pg, inst) != 0) { 3573 switch (scf_error()) { 3574 case SCF_ERROR_CONSTRAINT_VIOLATED: 3575 return (ENOTSUP); 3576 3577 case SCF_ERROR_CONNECTION_BROKEN: 3578 default: 3579 return (ECONNABORTED); 3580 3581 case SCF_ERROR_DELETED: 3582 return (0); 3583 3584 case SCF_ERROR_NOT_SET: 3585 bad_error("scf_pg_get_parent_instance", scf_error()); 3586 } 3587 } 3588 3589 err = libscf_instance_get_fmri(inst, &fmri); 3590 switch (err) { 3591 case 0: 3592 break; 3593 3594 case ECONNABORTED: 3595 scf_instance_destroy(inst); 3596 return (ECONNABORTED); 3597 3598 case ECANCELED: 3599 scf_instance_destroy(inst); 3600 return (0); 3601 3602 default: 3603 bad_error("libscf_instance_get_fmri", err); 3604 } 3605 3606 log_framework(LOG_DEBUG, 3607 "Graph engine: Reloading general properties for %s.\n", fmri); 3608 3609 MUTEX_LOCK(&dgraph_lock); 3610 3611 v = vertex_get_by_name(fmri); 3612 if (v == NULL || !(v->gv_flags & GV_CONFIGURED)) { 3613 /* Will get the up-to-date properties. */ 3614 MUTEX_UNLOCK(&dgraph_lock); 3615 err = dgraph_add_instance(fmri, inst, B_TRUE); 3616 startd_free(fmri, max_scf_fmri_size); 3617 scf_instance_destroy(inst); 3618 return (err == ECANCELED ? 0 : err); 3619 } 3620 3621 /* Read enabled & restarter from repository. */ 3622 restarter_fmri = startd_alloc(max_scf_value_size); 3623 err = libscf_get_basic_instance_data(h, inst, v->gv_name, &enabled, 3624 &enabled_ovr, &restarter_fmri); 3625 if (err != 0 || enabled == -1) { 3626 MUTEX_UNLOCK(&dgraph_lock); 3627 scf_instance_destroy(inst); 3628 startd_free(fmri, max_scf_fmri_size); 3629 3630 switch (err) { 3631 case ENOENT: 3632 case 0: 3633 startd_free(restarter_fmri, max_scf_value_size); 3634 return (-1); 3635 3636 case ECONNABORTED: 3637 case ECANCELED: 3638 startd_free(restarter_fmri, max_scf_value_size); 3639 return (err); 3640 3641 default: 3642 bad_error("libscf_get_basic_instance_data", err); 3643 } 3644 } 3645 3646 oldflags = v->gv_flags; 3647 v->gv_flags = (v->gv_flags & ~GV_ENBLD_NOOVR) | 3648 (enabled ? GV_ENBLD_NOOVR : 0); 3649 3650 if (enabled_ovr != -1) 3651 enabled = enabled_ovr; 3652 3653 /* 3654 * If GV_ENBLD_NOOVR has changed, then we need to re-evaluate the 3655 * subgraph. 3656 */ 3657 if (milestone > MILESTONE_NONE && v->gv_flags != oldflags) 3658 (void) eval_subgraph(v, h); 3659 3660 scf_instance_destroy(inst); 3661 3662 /* Ignore restarter change for now. */ 3663 3664 startd_free(restarter_fmri, max_scf_value_size); 3665 startd_free(fmri, max_scf_fmri_size); 3666 3667 /* 3668 * Always send _ENABLE or _DISABLE. We could avoid this if the 3669 * restarter didn't change and the enabled value didn't change, but 3670 * that's not easy to check and improbable anyway, so we'll just do 3671 * this. 3672 */ 3673 graph_enable_by_vertex(v, enabled, 1); 3674 3675 MUTEX_UNLOCK(&dgraph_lock); 3676 3677 return (0); 3678 } 3679 3680 /* 3681 * Delete all of the property group dependencies of v, update inst's running 3682 * snapshot, and add the dependencies in the new snapshot. If any of the new 3683 * dependencies would create a cycle, send _ADMIN_MAINT_ON. Otherwise 3684 * reevaluate v's dependencies, send _START or _STOP as appropriate, and do 3685 * the same for v's dependents. 3686 * 3687 * Returns 3688 * 0 - success 3689 * ECONNABORTED - repository connection broken 3690 * ECANCELED - inst was deleted 3691 * EINVAL - inst is invalid (e.g., missing general/enabled) 3692 * -1 - libscf_snapshots_refresh() failed 3693 */ 3694 static int 3695 dgraph_refresh_instance(graph_vertex_t *v, scf_instance_t *inst) 3696 { 3697 int r; 3698 int enabled; 3699 3700 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3701 assert(v->gv_type == GVT_INST); 3702 3703 /* Only refresh services with valid general/enabled properties. */ 3704 r = libscf_get_basic_instance_data(scf_instance_handle(inst), inst, 3705 v->gv_name, &enabled, NULL, NULL); 3706 switch (r) { 3707 case 0: 3708 break; 3709 3710 case ECONNABORTED: 3711 case ECANCELED: 3712 return (r); 3713 3714 case ENOENT: 3715 log_framework(LOG_DEBUG, 3716 "Ignoring %s because it has no general property group.\n", 3717 v->gv_name); 3718 return (EINVAL); 3719 3720 default: 3721 bad_error("libscf_get_basic_instance_data", r); 3722 } 3723 3724 if (enabled == -1) 3725 return (EINVAL); 3726 3727 r = libscf_snapshots_refresh(inst, v->gv_name); 3728 if (r != 0) { 3729 if (r != -1) 3730 bad_error("libscf_snapshots_refresh", r); 3731 3732 /* error logged */ 3733 return (r); 3734 } 3735 3736 r = refresh_vertex(v, inst); 3737 if (r != 0 && r != ECONNABORTED) 3738 bad_error("refresh_vertex", r); 3739 return (r); 3740 } 3741 3742 /* 3743 * Returns 1 if any instances which directly depend on the passed instance 3744 * (or it's service) are running. 3745 */ 3746 static int 3747 has_running_nonsubgraph_dependents(graph_vertex_t *v) 3748 { 3749 graph_vertex_t *vv; 3750 graph_edge_t *e; 3751 3752 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3753 3754 for (e = uu_list_first(v->gv_dependents); 3755 e != NULL; 3756 e = uu_list_next(v->gv_dependents, e)) { 3757 3758 vv = e->ge_vertex; 3759 if (vv->gv_type == GVT_INST) { 3760 if (inst_running(vv) && 3761 ((vv->gv_flags & GV_INSUBGRAPH) == 0)) 3762 return (1); 3763 } else { 3764 /* 3765 * For dependency group or service vertices, keep 3766 * traversing to see if instances are running. 3767 */ 3768 if (has_running_nonsubgraph_dependents(vv)) 3769 return (1); 3770 } 3771 } 3772 return (0); 3773 } 3774 3775 /* 3776 * For the dependency, disable the instance which makes up the dependency if 3777 * it is not in the subgraph and running. If the dependency instance is in 3778 * the subgraph or it is not running, continue by disabling all of it's 3779 * non-subgraph dependencies. 3780 */ 3781 static void 3782 disable_nonsubgraph_dependencies(graph_vertex_t *v, void *arg) 3783 { 3784 int r; 3785 scf_handle_t *h = (scf_handle_t *)arg; 3786 scf_instance_t *inst = NULL; 3787 3788 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 3789 3790 /* Continue recursing non-inst nodes */ 3791 if (v->gv_type != GVT_INST) 3792 goto recurse; 3793 3794 /* 3795 * For instances that are in the subgraph or already not running, 3796 * skip and attempt to disable their non-dependencies. 3797 */ 3798 if ((v->gv_flags & GV_INSUBGRAPH) || (!inst_running(v))) 3799 goto recurse; 3800 3801 /* 3802 * If not all this instance's dependents have stopped 3803 * running, do not disable. 3804 */ 3805 if (has_running_nonsubgraph_dependents(v)) 3806 return; 3807 3808 inst = scf_instance_create(h); 3809 if (inst == NULL) { 3810 log_error(LOG_WARNING, "Unable to gracefully disable instance:" 3811 " %s due to lack of resources\n", v->gv_name); 3812 goto disable; 3813 } 3814 again: 3815 r = scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, inst, 3816 NULL, NULL, SCF_DECODE_FMRI_EXACT); 3817 if (r != 0) { 3818 switch (scf_error()) { 3819 case SCF_ERROR_CONNECTION_BROKEN: 3820 libscf_handle_rebind(h); 3821 goto again; 3822 3823 case SCF_ERROR_NOT_FOUND: 3824 goto recurse; 3825 3826 case SCF_ERROR_HANDLE_MISMATCH: 3827 case SCF_ERROR_INVALID_ARGUMENT: 3828 case SCF_ERROR_CONSTRAINT_VIOLATED: 3829 case SCF_ERROR_NOT_BOUND: 3830 default: 3831 bad_error("scf_handle_decode_fmri", 3832 scf_error()); 3833 } 3834 } 3835 r = libscf_set_enable_ovr(inst, 0); 3836 switch (r) { 3837 case 0: 3838 scf_instance_destroy(inst); 3839 return; 3840 case ECANCELED: 3841 scf_instance_destroy(inst); 3842 goto recurse; 3843 case ECONNABORTED: 3844 libscf_handle_rebind(h); 3845 goto again; 3846 case EPERM: 3847 case EROFS: 3848 log_error(LOG_WARNING, 3849 "Could not set %s/%s for %s: %s.\n", 3850 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 3851 v->gv_name, strerror(r)); 3852 goto disable; 3853 default: 3854 bad_error("libscf_set_enable_ovr", r); 3855 } 3856 disable: 3857 graph_enable_by_vertex(v, 0, 0); 3858 return; 3859 recurse: 3860 graph_walk_dependencies(v, disable_nonsubgraph_dependencies, 3861 arg); 3862 } 3863 3864 /* 3865 * Find the vertex for inst_name. If it doesn't exist, return ENOENT. 3866 * Otherwise set its state to state. If the instance has entered a state 3867 * which requires automatic action, take it (Uninitialized: do 3868 * dgraph_refresh_instance() without the snapshot update. Disabled: if the 3869 * instance should be enabled, send _ENABLE. Offline: if the instance should 3870 * be disabled, send _DISABLE, and if its dependencies are satisfied, send 3871 * _START. Online, Degraded: if the instance wasn't running, update its start 3872 * snapshot. Maintenance: no action.) 3873 * 3874 * Also fails with ECONNABORTED, or EINVAL if state is invalid. 3875 */ 3876 static int 3877 dgraph_set_instance_state(scf_handle_t *h, const char *inst_name, 3878 restarter_instance_state_t state, restarter_error_t serr) 3879 { 3880 graph_vertex_t *v; 3881 int err = 0, r; 3882 int was_running, up_or_down; 3883 restarter_instance_state_t old_state; 3884 3885 MUTEX_LOCK(&dgraph_lock); 3886 3887 v = vertex_get_by_name(inst_name); 3888 if (v == NULL) { 3889 MUTEX_UNLOCK(&dgraph_lock); 3890 return (ENOENT); 3891 } 3892 3893 switch (state) { 3894 case RESTARTER_STATE_UNINIT: 3895 case RESTARTER_STATE_DISABLED: 3896 case RESTARTER_STATE_OFFLINE: 3897 case RESTARTER_STATE_ONLINE: 3898 case RESTARTER_STATE_DEGRADED: 3899 case RESTARTER_STATE_MAINT: 3900 break; 3901 3902 default: 3903 MUTEX_UNLOCK(&dgraph_lock); 3904 return (EINVAL); 3905 } 3906 3907 log_framework(LOG_DEBUG, "Graph noting %s %s -> %s.\n", v->gv_name, 3908 instance_state_str[v->gv_state], instance_state_str[state]); 3909 3910 old_state = v->gv_state; 3911 was_running = inst_running(v); 3912 3913 v->gv_state = state; 3914 3915 up_or_down = was_running ^ inst_running(v); 3916 3917 if (up_or_down && milestone != NULL && !inst_running(v) && 3918 ((v->gv_flags & GV_INSUBGRAPH) == 0 || 3919 milestone == MILESTONE_NONE)) { 3920 --non_subgraph_svcs; 3921 if (non_subgraph_svcs == 0) { 3922 if (halting != -1) { 3923 do_uadmin(); 3924 } else if (go_single_user_mode || go_to_level1) { 3925 (void) startd_thread_create(single_user_thread, 3926 NULL); 3927 } 3928 } else { 3929 graph_walk_dependencies(v, 3930 disable_nonsubgraph_dependencies, (void *)h); 3931 } 3932 } 3933 3934 switch (state) { 3935 case RESTARTER_STATE_UNINIT: { 3936 scf_instance_t *inst; 3937 3938 /* Initialize instance by refreshing it. */ 3939 3940 err = libscf_fmri_get_instance(h, v->gv_name, &inst); 3941 switch (err) { 3942 case 0: 3943 break; 3944 3945 case ECONNABORTED: 3946 MUTEX_UNLOCK(&dgraph_lock); 3947 return (ECONNABORTED); 3948 3949 case ENOENT: 3950 MUTEX_UNLOCK(&dgraph_lock); 3951 return (0); 3952 3953 case EINVAL: 3954 case ENOTSUP: 3955 default: 3956 bad_error("libscf_fmri_get_instance", err); 3957 } 3958 3959 err = refresh_vertex(v, inst); 3960 if (err == 0) 3961 graph_enable_by_vertex(v, v->gv_flags & GV_ENABLED, 0); 3962 3963 scf_instance_destroy(inst); 3964 break; 3965 } 3966 3967 case RESTARTER_STATE_DISABLED: 3968 /* 3969 * If the instance should be disabled, no problem. Otherwise, 3970 * send an enable command, which should result in the instance 3971 * moving to OFFLINE. 3972 */ 3973 if (v->gv_flags & GV_ENABLED) { 3974 vertex_send_event(v, RESTARTER_EVENT_TYPE_ENABLE); 3975 } else if (was_running && v->gv_post_disable_f) { 3976 v->gv_post_disable_f(); 3977 } 3978 break; 3979 3980 case RESTARTER_STATE_OFFLINE: 3981 /* 3982 * If the instance should be enabled, see if we can start it. 3983 * Otherwise send a disable command. 3984 */ 3985 if (v->gv_flags & GV_ENABLED) { 3986 if (instance_satisfied(v, B_FALSE) == 1) { 3987 if (v->gv_start_f == NULL) { 3988 vertex_send_event(v, 3989 RESTARTER_EVENT_TYPE_START); 3990 } else { 3991 v->gv_start_f(v); 3992 } 3993 } else { 3994 log_framework(LOG_DEBUG, 3995 "Dependencies of %s not satisfied, " 3996 "not starting.\n", v->gv_name); 3997 } 3998 } else { 3999 if (was_running && v->gv_post_disable_f) 4000 v->gv_post_disable_f(); 4001 vertex_send_event(v, RESTARTER_EVENT_TYPE_DISABLE); 4002 } 4003 break; 4004 4005 case RESTARTER_STATE_ONLINE: 4006 case RESTARTER_STATE_DEGRADED: 4007 /* 4008 * If the instance has just come up, update the start 4009 * snapshot. 4010 */ 4011 if (!was_running) { 4012 /* 4013 * Don't fire if we're just recovering state 4014 * after a restart. 4015 */ 4016 if (old_state != RESTARTER_STATE_UNINIT && 4017 v->gv_post_online_f) 4018 v->gv_post_online_f(); 4019 4020 r = libscf_snapshots_poststart(h, v->gv_name, B_TRUE); 4021 switch (r) { 4022 case 0: 4023 case ENOENT: 4024 /* 4025 * If ENOENT, the instance must have been 4026 * deleted. Pretend we were successful since 4027 * we should get a delete event later. 4028 */ 4029 break; 4030 4031 case ECONNABORTED: 4032 MUTEX_UNLOCK(&dgraph_lock); 4033 return (ECONNABORTED); 4034 4035 case EACCES: 4036 case ENOTSUP: 4037 default: 4038 bad_error("libscf_snapshots_poststart", r); 4039 } 4040 } 4041 if (!(v->gv_flags & GV_ENABLED)) 4042 vertex_send_event(v, RESTARTER_EVENT_TYPE_DISABLE); 4043 break; 4044 4045 case RESTARTER_STATE_MAINT: 4046 /* No action. */ 4047 break; 4048 4049 default: 4050 /* Should have been caught above. */ 4051 #ifndef NDEBUG 4052 uu_warn("%s:%d: Uncaught case %d.\n", __FILE__, __LINE__, 4053 state); 4054 #endif 4055 abort(); 4056 } 4057 4058 /* 4059 * If the service came up or went down, propagate the event. We must 4060 * treat offline -> disabled as a start since it can satisfy 4061 * optional_all dependencies. And we must treat !running -> maintenance 4062 * as a start because maintenance satisfies optional and exclusion 4063 * dependencies. 4064 */ 4065 if (inst_running(v)) { 4066 if (!was_running) { 4067 log_framework(LOG_DEBUG, "Propagating start of %s.\n", 4068 v->gv_name); 4069 4070 graph_walk_dependents(v, propagate_start, NULL); 4071 } else if (serr == RERR_REFRESH) { 4072 /* For refresh we'll get a message sans state change */ 4073 4074 log_framework(LOG_DEBUG, "Propagating refresh of %s.\n", 4075 v->gv_name); 4076 4077 graph_walk_dependents(v, propagate_stop, (void *)serr); 4078 } 4079 } else if (was_running) { 4080 log_framework(LOG_DEBUG, "Propagating stop of %s.\n", 4081 v->gv_name); 4082 4083 graph_walk_dependents(v, propagate_stop, (void *)serr); 4084 } else if (v->gv_state == RESTARTER_STATE_DISABLED) { 4085 log_framework(LOG_DEBUG, "Propagating disable of %s.\n", 4086 v->gv_name); 4087 4088 graph_walk_dependents(v, propagate_start, NULL); 4089 propagate_satbility(v); 4090 } else if (v->gv_state == RESTARTER_STATE_MAINT) { 4091 log_framework(LOG_DEBUG, "Propagating maintenance of %s.\n", 4092 v->gv_name); 4093 4094 graph_walk_dependents(v, propagate_start, NULL); 4095 propagate_satbility(v); 4096 } 4097 4098 if (state != old_state && st->st_load_complete && 4099 !go_single_user_mode && !go_to_level1 && 4100 halting == -1) { 4101 if (!can_come_up() && !sulogin_thread_running) { 4102 (void) startd_thread_create(sulogin_thread, NULL); 4103 sulogin_thread_running = B_TRUE; 4104 } 4105 } 4106 4107 MUTEX_UNLOCK(&dgraph_lock); 4108 4109 return (err); 4110 } 4111 4112 4113 static void 4114 remove_inst_vertex(graph_vertex_t *v) 4115 { 4116 graph_edge_t *e; 4117 graph_vertex_t *sv; 4118 int i; 4119 4120 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4121 assert(uu_list_numnodes(v->gv_dependents) == 1); 4122 4123 e = uu_list_first(v->gv_dependents); 4124 sv = e->ge_vertex; 4125 graph_remove_edge(sv, v); 4126 4127 for (i = 0; up_svcs[i] != NULL; ++i) { 4128 if (up_svcs_p[i] == v) 4129 up_svcs_p[i] = NULL; 4130 } 4131 4132 if (manifest_import_p == v) 4133 manifest_import_p = NULL; 4134 4135 graph_remove_vertex(v); 4136 4137 if (uu_list_numnodes(sv->gv_dependencies) == 0 && 4138 uu_list_numnodes(sv->gv_dependents) == 0) 4139 graph_remove_vertex(sv); 4140 } 4141 4142 /* 4143 * If a vertex for fmri exists and it is enabled, send _DISABLE to the 4144 * restarter. If it is running, send _STOP. Send _REMOVE_INSTANCE. Delete 4145 * all property group dependencies, and the dependency on the restarter, 4146 * disposing of vertices as appropriate. If other vertices depend on this 4147 * one, mark it unconfigured and return. Otherwise remove the vertex. Always 4148 * returns 0. 4149 */ 4150 static int 4151 dgraph_remove_instance(const char *fmri, scf_handle_t *h) 4152 { 4153 graph_vertex_t *v; 4154 graph_edge_t *e; 4155 uu_list_t *old_deps; 4156 int err; 4157 4158 log_framework(LOG_DEBUG, "Graph engine: Removing %s.\n", fmri); 4159 4160 MUTEX_LOCK(&dgraph_lock); 4161 4162 v = vertex_get_by_name(fmri); 4163 if (v == NULL) { 4164 MUTEX_UNLOCK(&dgraph_lock); 4165 return (0); 4166 } 4167 4168 /* Send restarter delete event. */ 4169 if (v->gv_flags & GV_CONFIGURED) 4170 graph_unset_restarter(v); 4171 4172 if (milestone > MILESTONE_NONE) { 4173 /* 4174 * Make a list of v's current dependencies so we can 4175 * reevaluate their GV_INSUBGRAPH flags after the dependencies 4176 * are removed. 4177 */ 4178 old_deps = startd_list_create(graph_edge_pool, NULL, 0); 4179 4180 err = uu_list_walk(v->gv_dependencies, 4181 (uu_walk_fn_t *)append_insts, old_deps, 0); 4182 assert(err == 0); 4183 } 4184 4185 delete_instance_dependencies(v, B_TRUE); 4186 4187 /* 4188 * Deleting an instance can both satisfy and unsatisfy dependencies, 4189 * depending on their type. First propagate the stop as a RERR_RESTART 4190 * event -- deletion isn't a fault, just a normal stop. This gives 4191 * dependent services the chance to do a clean shutdown. Then, mark 4192 * the service as unconfigured and propagate the start event for the 4193 * optional_all dependencies that might have become satisfied. 4194 */ 4195 graph_walk_dependents(v, propagate_stop, (void *)RERR_RESTART); 4196 4197 v->gv_flags &= ~GV_CONFIGURED; 4198 4199 graph_walk_dependents(v, propagate_start, NULL); 4200 propagate_satbility(v); 4201 4202 /* 4203 * If there are no (non-service) dependents, the vertex can be 4204 * completely removed. 4205 */ 4206 if (v != milestone && uu_list_numnodes(v->gv_dependents) == 1) 4207 remove_inst_vertex(v); 4208 4209 if (milestone > MILESTONE_NONE) { 4210 void *cookie = NULL; 4211 4212 while ((e = uu_list_teardown(old_deps, &cookie)) != NULL) { 4213 while (eval_subgraph(e->ge_vertex, h) == ECONNABORTED) 4214 libscf_handle_rebind(h); 4215 4216 startd_free(e, sizeof (*e)); 4217 } 4218 4219 uu_list_destroy(old_deps); 4220 } 4221 4222 MUTEX_UNLOCK(&dgraph_lock); 4223 4224 return (0); 4225 } 4226 4227 /* 4228 * Return the eventual (maybe current) milestone in the form of a 4229 * legacy runlevel. 4230 */ 4231 static char 4232 target_milestone_as_runlevel() 4233 { 4234 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4235 4236 if (milestone == NULL) 4237 return ('3'); 4238 else if (milestone == MILESTONE_NONE) 4239 return ('0'); 4240 4241 if (strcmp(milestone->gv_name, multi_user_fmri) == 0) 4242 return ('2'); 4243 else if (strcmp(milestone->gv_name, single_user_fmri) == 0) 4244 return ('S'); 4245 else if (strcmp(milestone->gv_name, multi_user_svr_fmri) == 0) 4246 return ('3'); 4247 4248 #ifndef NDEBUG 4249 (void) fprintf(stderr, "%s:%d: Unknown milestone name \"%s\".\n", 4250 __FILE__, __LINE__, milestone->gv_name); 4251 #endif 4252 abort(); 4253 /* NOTREACHED */ 4254 } 4255 4256 static struct { 4257 char rl; 4258 int sig; 4259 } init_sigs[] = { 4260 { 'S', SIGBUS }, 4261 { '0', SIGINT }, 4262 { '1', SIGQUIT }, 4263 { '2', SIGILL }, 4264 { '3', SIGTRAP }, 4265 { '4', SIGIOT }, 4266 { '5', SIGEMT }, 4267 { '6', SIGFPE }, 4268 { 0, 0 } 4269 }; 4270 4271 static void 4272 signal_init(char rl) 4273 { 4274 pid_t init_pid; 4275 int i; 4276 4277 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4278 4279 if (zone_getattr(getzoneid(), ZONE_ATTR_INITPID, &init_pid, 4280 sizeof (init_pid)) != sizeof (init_pid)) { 4281 log_error(LOG_NOTICE, "Could not get pid to signal init.\n"); 4282 return; 4283 } 4284 4285 for (i = 0; init_sigs[i].rl != 0; ++i) 4286 if (init_sigs[i].rl == rl) 4287 break; 4288 4289 if (init_sigs[i].rl != 0) { 4290 if (kill(init_pid, init_sigs[i].sig) != 0) { 4291 switch (errno) { 4292 case EPERM: 4293 case ESRCH: 4294 log_error(LOG_NOTICE, "Could not signal init: " 4295 "%s.\n", strerror(errno)); 4296 break; 4297 4298 case EINVAL: 4299 default: 4300 bad_error("kill", errno); 4301 } 4302 } 4303 } 4304 } 4305 4306 /* 4307 * This is called when one of the major milestones changes state, or when 4308 * init is signalled and tells us it was told to change runlevel. We wait 4309 * to reach the milestone because this allows /etc/inittab entries to retain 4310 * some boot ordering: historically, entries could place themselves before/after 4311 * the running of /sbin/rcX scripts but we can no longer make the 4312 * distinction because the /sbin/rcX scripts no longer exist as punctuation 4313 * marks in /etc/inittab. 4314 * 4315 * Also, we only trigger an update when we reach the eventual target 4316 * milestone: without this, an /etc/inittab entry marked only for 4317 * runlevel 2 would be executed for runlevel 3, which is not how 4318 * /etc/inittab entries work. 4319 * 4320 * If we're single user coming online, then we set utmpx to the target 4321 * runlevel so that legacy scripts can work as expected. 4322 */ 4323 static void 4324 graph_runlevel_changed(char rl, int online) 4325 { 4326 char trl; 4327 4328 assert(PTHREAD_MUTEX_HELD(&dgraph_lock)); 4329 4330 trl = target_milestone_as_runlevel(); 4331 4332 if (online) { 4333 if (rl == trl) { 4334 signal_init(trl); 4335 current_runlevel = rl; 4336 } else if (rl == 'S') { 4337 /* 4338 * At boot, set the entry early for the benefit of the 4339 * legacy init scripts. 4340 */ 4341 utmpx_set_runlevel(trl, 'S', B_FALSE); 4342 } 4343 } else { 4344 if (rl == '3' && trl == '2') { 4345 signal_init(trl); 4346 current_runlevel = rl; 4347 } else if (rl == '2' && trl == 'S') { 4348 signal_init(trl); 4349 current_runlevel = rl; 4350 } 4351 } 4352 } 4353 4354 /* 4355 * Move to a backwards-compatible runlevel by executing the appropriate 4356 * /etc/rc?.d/K* scripts and/or setting the milestone. 4357 * 4358 * Returns 4359 * 0 - success 4360 * ECONNRESET - success, but handle was reset 4361 * ECONNABORTED - repository connection broken 4362 * ECANCELED - pg was deleted 4363 */ 4364 static int 4365 dgraph_set_runlevel(scf_propertygroup_t *pg, scf_property_t *prop) 4366 { 4367 char rl; 4368 scf_handle_t *h; 4369 int r; 4370 const char *ms = NULL; /* what to commit as options/milestone */ 4371 boolean_t rebound = B_FALSE; 4372 int mark_rl = 0; 4373 4374 const char * const stop = "stop"; 4375 4376 r = libscf_extract_runlevel(prop, &rl); 4377 switch (r) { 4378 case 0: 4379 break; 4380 4381 case ECONNABORTED: 4382 case ECANCELED: 4383 return (r); 4384 4385 case EINVAL: 4386 case ENOENT: 4387 log_error(LOG_WARNING, "runlevel property is misconfigured; " 4388 "ignoring.\n"); 4389 /* delete the bad property */ 4390 goto nolock_out; 4391 4392 default: 4393 bad_error("libscf_extract_runlevel", r); 4394 } 4395 4396 switch (rl) { 4397 case 's': 4398 rl = 'S'; 4399 /* FALLTHROUGH */ 4400 4401 case 'S': 4402 case '2': 4403 case '3': 4404 /* 4405 * These cases cause a milestone change, so 4406 * graph_runlevel_changed() will eventually deal with 4407 * signalling init. 4408 */ 4409 break; 4410 4411 case '0': 4412 case '1': 4413 case '4': 4414 case '5': 4415 case '6': 4416 mark_rl = 1; 4417 break; 4418 4419 default: 4420 log_framework(LOG_NOTICE, "Unknown runlevel '%c'.\n", rl); 4421 ms = NULL; 4422 goto nolock_out; 4423 } 4424 4425 h = scf_pg_handle(pg); 4426 4427 MUTEX_LOCK(&dgraph_lock); 4428 4429 /* 4430 * Since this triggers no milestone changes, force it by hand. 4431 */ 4432 if (current_runlevel == '4' && rl == '3') 4433 mark_rl = 1; 4434 4435 if (rl == current_runlevel) { 4436 ms = NULL; 4437 goto out; 4438 } 4439 4440 log_framework(LOG_DEBUG, "Changing to runlevel '%c'.\n", rl); 4441 4442 /* 4443 * Make sure stop rc scripts see the new settings via who -r. 4444 */ 4445 utmpx_set_runlevel(rl, current_runlevel, B_TRUE); 4446 4447 /* 4448 * Some run levels don't have a direct correspondence to any 4449 * milestones, so we have to signal init directly. 4450 */ 4451 if (mark_rl) { 4452 current_runlevel = rl; 4453 signal_init(rl); 4454 } 4455 4456 switch (rl) { 4457 case 'S': 4458 uu_warn("The system is coming down for administration. " 4459 "Please wait.\n"); 4460 fork_rc_script(rl, stop, B_FALSE); 4461 ms = single_user_fmri; 4462 go_single_user_mode = B_TRUE; 4463 break; 4464 4465 case '0': 4466 fork_rc_script(rl, stop, B_TRUE); 4467 halting = AD_HALT; 4468 goto uadmin; 4469 4470 case '5': 4471 fork_rc_script(rl, stop, B_TRUE); 4472 halting = AD_POWEROFF; 4473 goto uadmin; 4474 4475 case '6': 4476 fork_rc_script(rl, stop, B_TRUE); 4477 halting = AD_BOOT; 4478 goto uadmin; 4479 4480 uadmin: 4481 uu_warn("The system is coming down. Please wait.\n"); 4482 ms = "none"; 4483 4484 /* 4485 * We can't wait until all services are offline since this 4486 * thread is responsible for taking them offline. Instead we 4487 * set halting to the second argument for uadmin() and call 4488 * do_uadmin() from dgraph_set_instance_state() when 4489 * appropriate. 4490 */ 4491 break; 4492 4493 case '1': 4494 if (current_runlevel != 'S') { 4495 uu_warn("Changing to state 1.\n"); 4496 fork_rc_script(rl, stop, B_FALSE); 4497 } else { 4498 uu_warn("The system is coming up for administration. " 4499 "Please wait.\n"); 4500 } 4501 ms = single_user_fmri; 4502 go_to_level1 = B_TRUE; 4503 break; 4504 4505 case '2': 4506 if (current_runlevel == '3' || current_runlevel == '4') 4507 fork_rc_script(rl, stop, B_FALSE); 4508 ms = multi_user_fmri; 4509 break; 4510 4511 case '3': 4512 case '4': 4513 ms = "all"; 4514 break; 4515 4516 default: 4517 #ifndef NDEBUG 4518 (void) fprintf(stderr, "%s:%d: Uncaught case %d ('%c').\n", 4519 __FILE__, __LINE__, rl, rl); 4520 #endif 4521 abort(); 4522 } 4523 4524 out: 4525 MUTEX_UNLOCK(&dgraph_lock); 4526 4527 nolock_out: 4528 switch (r = libscf_clear_runlevel(pg, ms)) { 4529 case 0: 4530 break; 4531 4532 case ECONNABORTED: 4533 libscf_handle_rebind(h); 4534 rebound = B_TRUE; 4535 goto nolock_out; 4536 4537 case ECANCELED: 4538 break; 4539 4540 case EPERM: 4541 case EACCES: 4542 case EROFS: 4543 log_error(LOG_NOTICE, "Could not delete \"%s/%s\" property: " 4544 "%s.\n", SCF_PG_OPTIONS, "runlevel", strerror(r)); 4545 break; 4546 4547 default: 4548 bad_error("libscf_clear_runlevel", r); 4549 } 4550 4551 return (rebound ? ECONNRESET : 0); 4552 } 4553 4554 static int 4555 mark_subgraph(graph_edge_t *e, void *arg) 4556 { 4557 graph_vertex_t *v; 4558 int r; 4559 int optional = (int)arg; 4560 4561 v = e->ge_vertex; 4562 4563 /* If it's already in the subgraph, skip. */ 4564 if (v->gv_flags & GV_INSUBGRAPH) 4565 return (UU_WALK_NEXT); 4566 4567 /* 4568 * Keep track if walk has entered an optional dependency group 4569 */ 4570 if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_OPTIONAL_ALL) { 4571 optional = 1; 4572 } 4573 /* 4574 * Quit if we are in an optional dependency group and the instance 4575 * is disabled 4576 */ 4577 if (optional && (v->gv_type == GVT_INST) && 4578 (!(v->gv_flags & GV_ENBLD_NOOVR))) 4579 return (UU_WALK_NEXT); 4580 4581 v->gv_flags |= GV_INSUBGRAPH; 4582 4583 /* Skip all excluded dependencies. */ 4584 if (v->gv_type == GVT_GROUP && v->gv_depgroup == DEPGRP_EXCLUDE_ALL) 4585 return (UU_WALK_NEXT); 4586 4587 r = uu_list_walk(v->gv_dependencies, (uu_walk_fn_t *)mark_subgraph, 4588 (void *)optional, 0); 4589 assert(r == 0); 4590 return (UU_WALK_NEXT); 4591 } 4592 4593 /* 4594 * "Restrict" the graph to dependencies of fmri. We implement it by walking 4595 * all services, override-disabling those which are not descendents of the 4596 * instance, and removing any enable-override for the rest. milestone is set 4597 * to the vertex which represents fmri so that the other graph operations may 4598 * act appropriately. 4599 * 4600 * If norepository is true, the function will not change the repository. 4601 * 4602 * Returns 4603 * 0 - success 4604 * ECONNRESET - success, but handle was rebound 4605 * EINVAL - fmri is invalid (error is logged) 4606 * EALREADY - the milestone is already set to fmri 4607 * ENOENT - a configured vertex does not exist for fmri (an error is logged) 4608 */ 4609 static int 4610 dgraph_set_milestone(const char *fmri, scf_handle_t *h, boolean_t norepository) 4611 { 4612 const char *cfmri, *fs; 4613 graph_vertex_t *nm, *v; 4614 int ret = 0, r; 4615 scf_instance_t *inst; 4616 boolean_t isall, isnone, rebound = B_FALSE; 4617 4618 /* Validate fmri */ 4619 isall = (strcmp(fmri, "all") == 0); 4620 isnone = (strcmp(fmri, "none") == 0); 4621 4622 if (!isall && !isnone) { 4623 if (fmri_canonify(fmri, (char **)&cfmri, B_FALSE) == EINVAL) 4624 goto reject; 4625 4626 if (strcmp(cfmri, single_user_fmri) != 0 && 4627 strcmp(cfmri, multi_user_fmri) != 0 && 4628 strcmp(cfmri, multi_user_svr_fmri) != 0) { 4629 startd_free((void *)cfmri, max_scf_fmri_size); 4630 reject: 4631 log_framework(LOG_WARNING, 4632 "Rejecting request for invalid milestone \"%s\".\n", 4633 fmri); 4634 return (EINVAL); 4635 } 4636 } 4637 4638 inst = safe_scf_instance_create(h); 4639 4640 MUTEX_LOCK(&dgraph_lock); 4641 4642 if (milestone == NULL) { 4643 if (isall) { 4644 log_framework(LOG_DEBUG, 4645 "Milestone already set to all.\n"); 4646 ret = EALREADY; 4647 goto out; 4648 } 4649 } else if (milestone == MILESTONE_NONE) { 4650 if (isnone) { 4651 log_framework(LOG_DEBUG, 4652 "Milestone already set to none.\n"); 4653 ret = EALREADY; 4654 goto out; 4655 } 4656 } else { 4657 if (!isall && !isnone && 4658 strcmp(cfmri, milestone->gv_name) == 0) { 4659 log_framework(LOG_DEBUG, 4660 "Milestone already set to %s.\n", cfmri); 4661 ret = EALREADY; 4662 goto out; 4663 } 4664 } 4665 4666 if (!isall && !isnone) { 4667 nm = vertex_get_by_name(cfmri); 4668 if (nm == NULL || !(nm->gv_flags & GV_CONFIGURED)) { 4669 log_framework(LOG_WARNING, "Cannot set milestone to %s " 4670 "because no such service exists.\n", cfmri); 4671 ret = ENOENT; 4672 goto out; 4673 } 4674 } 4675 4676 log_framework(LOG_DEBUG, "Changing milestone to %s.\n", fmri); 4677 4678 /* 4679 * Set milestone, removing the old one if this was the last reference. 4680 */ 4681 if (milestone > MILESTONE_NONE && 4682 (milestone->gv_flags & GV_CONFIGURED) == 0) 4683 remove_inst_vertex(milestone); 4684 4685 if (isall) 4686 milestone = NULL; 4687 else if (isnone) 4688 milestone = MILESTONE_NONE; 4689 else 4690 milestone = nm; 4691 4692 /* Clear all GV_INSUBGRAPH bits. */ 4693 for (v = uu_list_first(dgraph); v != NULL; v = uu_list_next(dgraph, v)) 4694 v->gv_flags &= ~GV_INSUBGRAPH; 4695 4696 if (!isall && !isnone) { 4697 /* Set GV_INSUBGRAPH for milestone & descendents. */ 4698 milestone->gv_flags |= GV_INSUBGRAPH; 4699 4700 r = uu_list_walk(milestone->gv_dependencies, 4701 (uu_walk_fn_t *)mark_subgraph, NULL, 0); 4702 assert(r == 0); 4703 } 4704 4705 /* Un-override services in the subgraph & override-disable the rest. */ 4706 if (norepository) 4707 goto out; 4708 4709 non_subgraph_svcs = 0; 4710 for (v = uu_list_first(dgraph); 4711 v != NULL; 4712 v = uu_list_next(dgraph, v)) { 4713 if (v->gv_type != GVT_INST || 4714 (v->gv_flags & GV_CONFIGURED) == 0) 4715 continue; 4716 4717 again: 4718 r = scf_handle_decode_fmri(h, v->gv_name, NULL, NULL, inst, 4719 NULL, NULL, SCF_DECODE_FMRI_EXACT); 4720 if (r != 0) { 4721 switch (scf_error()) { 4722 case SCF_ERROR_CONNECTION_BROKEN: 4723 default: 4724 libscf_handle_rebind(h); 4725 rebound = B_TRUE; 4726 goto again; 4727 4728 case SCF_ERROR_NOT_FOUND: 4729 continue; 4730 4731 case SCF_ERROR_HANDLE_MISMATCH: 4732 case SCF_ERROR_INVALID_ARGUMENT: 4733 case SCF_ERROR_CONSTRAINT_VIOLATED: 4734 case SCF_ERROR_NOT_BOUND: 4735 bad_error("scf_handle_decode_fmri", 4736 scf_error()); 4737 } 4738 } 4739 4740 if (isall || (v->gv_flags & GV_INSUBGRAPH)) { 4741 r = libscf_delete_enable_ovr(inst); 4742 fs = "libscf_delete_enable_ovr"; 4743 } else { 4744 assert(isnone || (v->gv_flags & GV_INSUBGRAPH) == 0); 4745 4746 if (inst_running(v)) 4747 ++non_subgraph_svcs; 4748 4749 if (has_running_nonsubgraph_dependents(v)) 4750 continue; 4751 4752 r = libscf_set_enable_ovr(inst, 0); 4753 fs = "libscf_set_enable_ovr"; 4754 } 4755 switch (r) { 4756 case 0: 4757 case ECANCELED: 4758 break; 4759 4760 case ECONNABORTED: 4761 libscf_handle_rebind(h); 4762 rebound = B_TRUE; 4763 goto again; 4764 4765 case EPERM: 4766 case EROFS: 4767 log_error(LOG_WARNING, 4768 "Could not set %s/%s for %s: %s.\n", 4769 SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 4770 v->gv_name, strerror(r)); 4771 break; 4772 4773 default: 4774 bad_error(fs, r); 4775 } 4776 } 4777 4778 if (halting != -1) { 4779 if (non_subgraph_svcs > 1) 4780 uu_warn("%d system services are now being stopped.\n", 4781 non_subgraph_svcs); 4782 else if (non_subgraph_svcs == 1) 4783 uu_warn("One system service is now being stopped.\n"); 4784 else if (non_subgraph_svcs == 0) 4785 do_uadmin(); 4786 } 4787 4788 ret = rebound ? ECONNRESET : 0; 4789 4790 out: 4791 MUTEX_UNLOCK(&dgraph_lock); 4792 if (!isall && !isnone) 4793 startd_free((void *)cfmri, max_scf_fmri_size); 4794 scf_instance_destroy(inst); 4795 return (ret); 4796 } 4797 4798 4799 /* 4800 * Returns 0, ECONNABORTED, or EINVAL. 4801 */ 4802 static int 4803 handle_graph_update_event(scf_handle_t *h, graph_protocol_event_t *e) 4804 { 4805 int r; 4806 4807 switch (e->gpe_type) { 4808 case GRAPH_UPDATE_RELOAD_GRAPH: 4809 log_error(LOG_WARNING, 4810 "graph_event: reload graph unimplemented\n"); 4811 break; 4812 4813 case GRAPH_UPDATE_STATE_CHANGE: { 4814 protocol_states_t *states = e->gpe_data; 4815 4816 switch (r = dgraph_set_instance_state(h, e->gpe_inst, 4817 states->ps_state, states->ps_err)) { 4818 case 0: 4819 case ENOENT: 4820 break; 4821 4822 case ECONNABORTED: 4823 return (ECONNABORTED); 4824 4825 case EINVAL: 4826 default: 4827 #ifndef NDEBUG 4828 (void) fprintf(stderr, "dgraph_set_instance_state() " 4829 "failed with unexpected error %d at %s:%d.\n", r, 4830 __FILE__, __LINE__); 4831 #endif 4832 abort(); 4833 } 4834 4835 startd_free(states, sizeof (protocol_states_t)); 4836 break; 4837 } 4838 4839 default: 4840 log_error(LOG_WARNING, 4841 "graph_event_loop received an unknown event: %d\n", 4842 e->gpe_type); 4843 break; 4844 } 4845 4846 return (0); 4847 } 4848 4849 /* 4850 * graph_event_thread() 4851 * Wait for state changes from the restarters. 4852 */ 4853 /*ARGSUSED*/ 4854 void * 4855 graph_event_thread(void *unused) 4856 { 4857 scf_handle_t *h; 4858 int err; 4859 4860 h = libscf_handle_create_bound_loop(); 4861 4862 /*CONSTCOND*/ 4863 while (1) { 4864 graph_protocol_event_t *e; 4865 4866 MUTEX_LOCK(&gu->gu_lock); 4867 4868 while (gu->gu_wakeup == 0) 4869 (void) pthread_cond_wait(&gu->gu_cv, &gu->gu_lock); 4870 4871 gu->gu_wakeup = 0; 4872 4873 while ((e = graph_event_dequeue()) != NULL) { 4874 MUTEX_LOCK(&e->gpe_lock); 4875 MUTEX_UNLOCK(&gu->gu_lock); 4876 4877 while ((err = handle_graph_update_event(h, e)) == 4878 ECONNABORTED) 4879 libscf_handle_rebind(h); 4880 4881 if (err == 0) 4882 graph_event_release(e); 4883 else 4884 graph_event_requeue(e); 4885 4886 MUTEX_LOCK(&gu->gu_lock); 4887 } 4888 4889 MUTEX_UNLOCK(&gu->gu_lock); 4890 } 4891 4892 /* 4893 * Unreachable for now -- there's currently no graceful cleanup 4894 * called on exit(). 4895 */ 4896 MUTEX_UNLOCK(&gu->gu_lock); 4897 scf_handle_destroy(h); 4898 return (NULL); 4899 } 4900 4901 static void 4902 set_initial_milestone(scf_handle_t *h) 4903 { 4904 scf_instance_t *inst; 4905 char *fmri, *cfmri; 4906 size_t sz; 4907 int r; 4908 4909 inst = safe_scf_instance_create(h); 4910 fmri = startd_alloc(max_scf_fmri_size); 4911 4912 /* 4913 * If -m milestone= was specified, we want to set options_ovr/milestone 4914 * to it. Otherwise we want to read what the milestone should be set 4915 * to. Either way we need our inst. 4916 */ 4917 get_self: 4918 if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, inst, 4919 NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) { 4920 switch (scf_error()) { 4921 case SCF_ERROR_CONNECTION_BROKEN: 4922 libscf_handle_rebind(h); 4923 goto get_self; 4924 4925 case SCF_ERROR_NOT_FOUND: 4926 if (st->st_subgraph != NULL && 4927 st->st_subgraph[0] != '\0') { 4928 sz = strlcpy(fmri, st->st_subgraph, 4929 max_scf_fmri_size); 4930 assert(sz < max_scf_fmri_size); 4931 } else { 4932 fmri[0] = '\0'; 4933 } 4934 break; 4935 4936 case SCF_ERROR_INVALID_ARGUMENT: 4937 case SCF_ERROR_CONSTRAINT_VIOLATED: 4938 case SCF_ERROR_HANDLE_MISMATCH: 4939 default: 4940 bad_error("scf_handle_decode_fmri", scf_error()); 4941 } 4942 } else { 4943 if (st->st_subgraph != NULL && st->st_subgraph[0] != '\0') { 4944 scf_propertygroup_t *pg; 4945 4946 pg = safe_scf_pg_create(h); 4947 4948 sz = strlcpy(fmri, st->st_subgraph, max_scf_fmri_size); 4949 assert(sz < max_scf_fmri_size); 4950 4951 r = libscf_inst_get_or_add_pg(inst, SCF_PG_OPTIONS_OVR, 4952 SCF_PG_OPTIONS_OVR_TYPE, SCF_PG_OPTIONS_OVR_FLAGS, 4953 pg); 4954 switch (r) { 4955 case 0: 4956 break; 4957 4958 case ECONNABORTED: 4959 libscf_handle_rebind(h); 4960 goto get_self; 4961 4962 case EPERM: 4963 case EACCES: 4964 case EROFS: 4965 log_error(LOG_WARNING, "Could not set %s/%s: " 4966 "%s.\n", SCF_PG_OPTIONS_OVR, 4967 SCF_PROPERTY_MILESTONE, strerror(r)); 4968 /* FALLTHROUGH */ 4969 4970 case ECANCELED: 4971 sz = strlcpy(fmri, st->st_subgraph, 4972 max_scf_fmri_size); 4973 assert(sz < max_scf_fmri_size); 4974 break; 4975 4976 default: 4977 bad_error("libscf_inst_get_or_add_pg", r); 4978 } 4979 4980 r = libscf_clear_runlevel(pg, fmri); 4981 switch (r) { 4982 case 0: 4983 break; 4984 4985 case ECONNABORTED: 4986 libscf_handle_rebind(h); 4987 goto get_self; 4988 4989 case EPERM: 4990 case EACCES: 4991 case EROFS: 4992 log_error(LOG_WARNING, "Could not set %s/%s: " 4993 "%s.\n", SCF_PG_OPTIONS_OVR, 4994 SCF_PROPERTY_MILESTONE, strerror(r)); 4995 /* FALLTHROUGH */ 4996 4997 case ECANCELED: 4998 sz = strlcpy(fmri, st->st_subgraph, 4999 max_scf_fmri_size); 5000 assert(sz < max_scf_fmri_size); 5001 break; 5002 5003 default: 5004 bad_error("libscf_clear_runlevel", r); 5005 } 5006 5007 scf_pg_destroy(pg); 5008 } else { 5009 scf_property_t *prop; 5010 scf_value_t *val; 5011 5012 prop = safe_scf_property_create(h); 5013 val = safe_scf_value_create(h); 5014 5015 r = libscf_get_milestone(inst, prop, val, fmri, 5016 max_scf_fmri_size); 5017 switch (r) { 5018 case 0: 5019 break; 5020 5021 case ECONNABORTED: 5022 libscf_handle_rebind(h); 5023 goto get_self; 5024 5025 case EINVAL: 5026 log_error(LOG_WARNING, "Milestone property is " 5027 "misconfigured. Defaulting to \"all\".\n"); 5028 /* FALLTHROUGH */ 5029 5030 case ECANCELED: 5031 case ENOENT: 5032 fmri[0] = '\0'; 5033 break; 5034 5035 default: 5036 bad_error("libscf_get_milestone", r); 5037 } 5038 5039 scf_value_destroy(val); 5040 scf_property_destroy(prop); 5041 } 5042 } 5043 5044 if (fmri[0] == '\0' || strcmp(fmri, "all") == 0) 5045 goto out; 5046 5047 if (strcmp(fmri, "none") != 0) { 5048 retry: 5049 if (scf_handle_decode_fmri(h, fmri, NULL, NULL, inst, NULL, 5050 NULL, SCF_DECODE_FMRI_EXACT) != 0) { 5051 switch (scf_error()) { 5052 case SCF_ERROR_INVALID_ARGUMENT: 5053 log_error(LOG_WARNING, 5054 "Requested milestone \"%s\" is invalid. " 5055 "Reverting to \"all\".\n", fmri); 5056 goto out; 5057 5058 case SCF_ERROR_CONSTRAINT_VIOLATED: 5059 log_error(LOG_WARNING, "Requested milestone " 5060 "\"%s\" does not specify an instance. " 5061 "Reverting to \"all\".\n", fmri); 5062 goto out; 5063 5064 case SCF_ERROR_CONNECTION_BROKEN: 5065 libscf_handle_rebind(h); 5066 goto retry; 5067 5068 case SCF_ERROR_NOT_FOUND: 5069 log_error(LOG_WARNING, "Requested milestone " 5070 "\"%s\" not in repository. Reverting to " 5071 "\"all\".\n", fmri); 5072 goto out; 5073 5074 case SCF_ERROR_HANDLE_MISMATCH: 5075 default: 5076 bad_error("scf_handle_decode_fmri", 5077 scf_error()); 5078 } 5079 } 5080 5081 r = fmri_canonify(fmri, &cfmri, B_FALSE); 5082 assert(r == 0); 5083 5084 r = dgraph_add_instance(cfmri, inst, B_TRUE); 5085 startd_free(cfmri, max_scf_fmri_size); 5086 switch (r) { 5087 case 0: 5088 break; 5089 5090 case ECONNABORTED: 5091 goto retry; 5092 5093 case EINVAL: 5094 log_error(LOG_WARNING, 5095 "Requested milestone \"%s\" is invalid. " 5096 "Reverting to \"all\".\n", fmri); 5097 goto out; 5098 5099 case ECANCELED: 5100 log_error(LOG_WARNING, 5101 "Requested milestone \"%s\" not " 5102 "in repository. Reverting to \"all\".\n", 5103 fmri); 5104 goto out; 5105 5106 case EEXIST: 5107 default: 5108 bad_error("dgraph_add_instance", r); 5109 } 5110 } 5111 5112 log_console(LOG_INFO, "Booting to milestone \"%s\".\n", fmri); 5113 5114 r = dgraph_set_milestone(fmri, h, B_FALSE); 5115 switch (r) { 5116 case 0: 5117 case ECONNRESET: 5118 case EALREADY: 5119 break; 5120 5121 case EINVAL: 5122 case ENOENT: 5123 default: 5124 bad_error("dgraph_set_milestone", r); 5125 } 5126 5127 out: 5128 startd_free(fmri, max_scf_fmri_size); 5129 scf_instance_destroy(inst); 5130 } 5131 5132 void 5133 set_restart_milestone(scf_handle_t *h) 5134 { 5135 scf_instance_t *inst; 5136 scf_property_t *prop; 5137 scf_value_t *val; 5138 char *fmri; 5139 int r; 5140 5141 inst = safe_scf_instance_create(h); 5142 5143 get_self: 5144 if (scf_handle_decode_fmri(h, SCF_SERVICE_STARTD, NULL, NULL, 5145 inst, NULL, NULL, SCF_DECODE_FMRI_EXACT) != 0) { 5146 switch (scf_error()) { 5147 case SCF_ERROR_CONNECTION_BROKEN: 5148 libscf_handle_rebind(h); 5149 goto get_self; 5150 5151 case SCF_ERROR_NOT_FOUND: 5152 break; 5153 5154 case SCF_ERROR_INVALID_ARGUMENT: 5155 case SCF_ERROR_CONSTRAINT_VIOLATED: 5156 case SCF_ERROR_HANDLE_MISMATCH: 5157 default: 5158 bad_error("scf_handle_decode_fmri", scf_error()); 5159 } 5160 5161 scf_instance_destroy(inst); 5162 return; 5163 } 5164 5165 prop = safe_scf_property_create(h); 5166 val = safe_scf_value_create(h); 5167 fmri = startd_alloc(max_scf_fmri_size); 5168 5169 r = libscf_get_milestone(inst, prop, val, fmri, max_scf_fmri_size); 5170 switch (r) { 5171 case 0: 5172 break; 5173 5174 case ECONNABORTED: 5175 libscf_handle_rebind(h); 5176 goto get_self; 5177 5178 case ECANCELED: 5179 case ENOENT: 5180 case EINVAL: 5181 goto out; 5182 5183 default: 5184 bad_error("libscf_get_milestone", r); 5185 } 5186 5187 r = dgraph_set_milestone(fmri, h, B_TRUE); 5188 switch (r) { 5189 case 0: 5190 case ECONNRESET: 5191 case EALREADY: 5192 case EINVAL: 5193 case ENOENT: 5194 break; 5195 5196 default: 5197 bad_error("dgraph_set_milestone", r); 5198 } 5199 5200 out: 5201 startd_free(fmri, max_scf_fmri_size); 5202 scf_value_destroy(val); 5203 scf_property_destroy(prop); 5204 scf_instance_destroy(inst); 5205 } 5206 5207 /* 5208 * void *graph_thread(void *) 5209 * 5210 * Graph management thread. 5211 */ 5212 /*ARGSUSED*/ 5213 void * 5214 graph_thread(void *arg) 5215 { 5216 scf_handle_t *h; 5217 int err; 5218 5219 h = libscf_handle_create_bound_loop(); 5220 5221 if (st->st_initial) 5222 set_initial_milestone(h); 5223 5224 MUTEX_LOCK(&dgraph_lock); 5225 initial_milestone_set = B_TRUE; 5226 err = pthread_cond_broadcast(&initial_milestone_cv); 5227 assert(err == 0); 5228 MUTEX_UNLOCK(&dgraph_lock); 5229 5230 libscf_populate_graph(h); 5231 5232 if (!st->st_initial) 5233 set_restart_milestone(h); 5234 5235 MUTEX_LOCK(&st->st_load_lock); 5236 st->st_load_complete = 1; 5237 (void) pthread_cond_broadcast(&st->st_load_cv); 5238 MUTEX_UNLOCK(&st->st_load_lock); 5239 5240 MUTEX_LOCK(&dgraph_lock); 5241 /* 5242 * Now that we've set st_load_complete we need to check can_come_up() 5243 * since if we booted to a milestone, then there won't be any more 5244 * state updates. 5245 */ 5246 if (!go_single_user_mode && !go_to_level1 && 5247 halting == -1) { 5248 if (!can_come_up() && !sulogin_thread_running) { 5249 (void) startd_thread_create(sulogin_thread, NULL); 5250 sulogin_thread_running = B_TRUE; 5251 } 5252 } 5253 MUTEX_UNLOCK(&dgraph_lock); 5254 5255 (void) pthread_mutex_lock(&gu->gu_freeze_lock); 5256 5257 /*CONSTCOND*/ 5258 while (1) { 5259 (void) pthread_cond_wait(&gu->gu_freeze_cv, 5260 &gu->gu_freeze_lock); 5261 } 5262 5263 /* 5264 * Unreachable for now -- there's currently no graceful cleanup 5265 * called on exit(). 5266 */ 5267 (void) pthread_mutex_unlock(&gu->gu_freeze_lock); 5268 scf_handle_destroy(h); 5269 5270 return (NULL); 5271 } 5272 5273 5274 /* 5275 * int next_action() 5276 * Given an array of timestamps 'a' with 'num' elements, find the 5277 * lowest non-zero timestamp and return its index. If there are no 5278 * non-zero elements, return -1. 5279 */ 5280 static int 5281 next_action(hrtime_t *a, int num) 5282 { 5283 hrtime_t t = 0; 5284 int i = 0, smallest = -1; 5285 5286 for (i = 0; i < num; i++) { 5287 if (t == 0) { 5288 t = a[i]; 5289 smallest = i; 5290 } else if (a[i] != 0 && a[i] < t) { 5291 t = a[i]; 5292 smallest = i; 5293 } 5294 } 5295 5296 if (t == 0) 5297 return (-1); 5298 else 5299 return (smallest); 5300 } 5301 5302 /* 5303 * void process_actions() 5304 * Process actions requested by the administrator. Possibilities include: 5305 * refresh, restart, maintenance mode off, maintenance mode on, 5306 * maintenance mode immediate, and degraded. 5307 * 5308 * The set of pending actions is represented in the repository as a 5309 * per-instance property group, with each action being a single property 5310 * in that group. This property group is converted to an array, with each 5311 * action type having an array slot. The actions in the array at the 5312 * time process_actions() is called are acted on in the order of the 5313 * timestamp (which is the value stored in the slot). A value of zero 5314 * indicates that there is no pending action of the type associated with 5315 * a particular slot. 5316 * 5317 * Sending an action event multiple times before the restarter has a 5318 * chance to process that action will force it to be run at the last 5319 * timestamp where it appears in the ordering. 5320 * 5321 * Turning maintenance mode on trumps all other actions. 5322 * 5323 * Returns 0 or ECONNABORTED. 5324 */ 5325 static int 5326 process_actions(scf_handle_t *h, scf_propertygroup_t *pg, scf_instance_t *inst) 5327 { 5328 scf_property_t *prop = NULL; 5329 scf_value_t *val = NULL; 5330 scf_type_t type; 5331 graph_vertex_t *vertex; 5332 admin_action_t a; 5333 int i, ret = 0, r; 5334 hrtime_t action_ts[NACTIONS]; 5335 char *inst_name; 5336 5337 r = libscf_instance_get_fmri(inst, &inst_name); 5338 switch (r) { 5339 case 0: 5340 break; 5341 5342 case ECONNABORTED: 5343 return (ECONNABORTED); 5344 5345 case ECANCELED: 5346 return (0); 5347 5348 default: 5349 bad_error("libscf_instance_get_fmri", r); 5350 } 5351 5352 MUTEX_LOCK(&dgraph_lock); 5353 5354 vertex = vertex_get_by_name(inst_name); 5355 if (vertex == NULL) { 5356 MUTEX_UNLOCK(&dgraph_lock); 5357 log_framework(LOG_DEBUG, "%s: Can't find graph vertex. " 5358 "The instance must have been removed.\n", inst_name); 5359 return (0); 5360 } 5361 5362 prop = safe_scf_property_create(h); 5363 val = safe_scf_value_create(h); 5364 5365 for (i = 0; i < NACTIONS; i++) { 5366 if (scf_pg_get_property(pg, admin_actions[i], prop) != 0) { 5367 switch (scf_error()) { 5368 case SCF_ERROR_CONNECTION_BROKEN: 5369 default: 5370 ret = ECONNABORTED; 5371 goto out; 5372 5373 case SCF_ERROR_DELETED: 5374 goto out; 5375 5376 case SCF_ERROR_NOT_FOUND: 5377 action_ts[i] = 0; 5378 continue; 5379 5380 case SCF_ERROR_HANDLE_MISMATCH: 5381 case SCF_ERROR_INVALID_ARGUMENT: 5382 case SCF_ERROR_NOT_SET: 5383 bad_error("scf_pg_get_property", scf_error()); 5384 } 5385 } 5386 5387 if (scf_property_type(prop, &type) != 0) { 5388 switch (scf_error()) { 5389 case SCF_ERROR_CONNECTION_BROKEN: 5390 default: 5391 ret = ECONNABORTED; 5392 goto out; 5393 5394 case SCF_ERROR_DELETED: 5395 action_ts[i] = 0; 5396 continue; 5397 5398 case SCF_ERROR_NOT_SET: 5399 bad_error("scf_property_type", scf_error()); 5400 } 5401 } 5402 5403 if (type != SCF_TYPE_INTEGER) { 5404 action_ts[i] = 0; 5405 continue; 5406 } 5407 5408 if (scf_property_get_value(prop, val) != 0) { 5409 switch (scf_error()) { 5410 case SCF_ERROR_CONNECTION_BROKEN: 5411 default: 5412 ret = ECONNABORTED; 5413 goto out; 5414 5415 case SCF_ERROR_DELETED: 5416 goto out; 5417 5418 case SCF_ERROR_NOT_FOUND: 5419 case SCF_ERROR_CONSTRAINT_VIOLATED: 5420 action_ts[i] = 0; 5421 continue; 5422 5423 case SCF_ERROR_NOT_SET: 5424 bad_error("scf_property_get_value", 5425 scf_error()); 5426 } 5427 } 5428 5429 r = scf_value_get_integer(val, &action_ts[i]); 5430 assert(r == 0); 5431 } 5432 5433 a = ADMIN_EVENT_MAINT_ON_IMMEDIATE; 5434 if (action_ts[ADMIN_EVENT_MAINT_ON_IMMEDIATE] || 5435 action_ts[ADMIN_EVENT_MAINT_ON]) { 5436 a = action_ts[ADMIN_EVENT_MAINT_ON_IMMEDIATE] ? 5437 ADMIN_EVENT_MAINT_ON_IMMEDIATE : ADMIN_EVENT_MAINT_ON; 5438 5439 vertex_send_event(vertex, admin_events[a]); 5440 r = libscf_unset_action(h, pg, a, action_ts[a]); 5441 switch (r) { 5442 case 0: 5443 case EACCES: 5444 break; 5445 5446 case ECONNABORTED: 5447 ret = ECONNABORTED; 5448 goto out; 5449 5450 case EPERM: 5451 uu_die("Insufficient privilege.\n"); 5452 /* NOTREACHED */ 5453 5454 default: 5455 bad_error("libscf_unset_action", r); 5456 } 5457 } 5458 5459 while ((a = next_action(action_ts, NACTIONS)) != -1) { 5460 log_framework(LOG_DEBUG, 5461 "Graph: processing %s action for %s.\n", admin_actions[a], 5462 inst_name); 5463 5464 if (a == ADMIN_EVENT_REFRESH) { 5465 r = dgraph_refresh_instance(vertex, inst); 5466 switch (r) { 5467 case 0: 5468 case ECANCELED: 5469 case EINVAL: 5470 case -1: 5471 break; 5472 5473 case ECONNABORTED: 5474 /* pg & inst are reset now, so just return. */ 5475 ret = ECONNABORTED; 5476 goto out; 5477 5478 default: 5479 bad_error("dgraph_refresh_instance", r); 5480 } 5481 } 5482 5483 vertex_send_event(vertex, admin_events[a]); 5484 5485 r = libscf_unset_action(h, pg, a, action_ts[a]); 5486 switch (r) { 5487 case 0: 5488 case EACCES: 5489 break; 5490 5491 case ECONNABORTED: 5492 ret = ECONNABORTED; 5493 goto out; 5494 5495 case EPERM: 5496 uu_die("Insufficient privilege.\n"); 5497 /* NOTREACHED */ 5498 5499 default: 5500 bad_error("libscf_unset_action", r); 5501 } 5502 5503 action_ts[a] = 0; 5504 } 5505 5506 out: 5507 MUTEX_UNLOCK(&dgraph_lock); 5508 5509 scf_property_destroy(prop); 5510 scf_value_destroy(val); 5511 startd_free(inst_name, max_scf_fmri_size); 5512 return (ret); 5513 } 5514 5515 /* 5516 * inst and pg_name are scratch space, and are unset on entry. 5517 * Returns 5518 * 0 - success 5519 * ECONNRESET - success, but repository handle rebound 5520 * ECONNABORTED - repository connection broken 5521 */ 5522 static int 5523 process_pg_event(scf_handle_t *h, scf_propertygroup_t *pg, scf_instance_t *inst, 5524 char *pg_name) 5525 { 5526 int r; 5527 scf_property_t *prop; 5528 scf_value_t *val; 5529 char *fmri; 5530 boolean_t rebound = B_FALSE, rebind_inst = B_FALSE; 5531 5532 if (scf_pg_get_name(pg, pg_name, max_scf_value_size) < 0) { 5533 switch (scf_error()) { 5534 case SCF_ERROR_CONNECTION_BROKEN: 5535 default: 5536 return (ECONNABORTED); 5537 5538 case SCF_ERROR_DELETED: 5539 return (0); 5540 5541 case SCF_ERROR_NOT_SET: 5542 bad_error("scf_pg_get_name", scf_error()); 5543 } 5544 } 5545 5546 if (strcmp(pg_name, SCF_PG_GENERAL) == 0 || 5547 strcmp(pg_name, SCF_PG_GENERAL_OVR) == 0) { 5548 r = dgraph_update_general(pg); 5549 switch (r) { 5550 case 0: 5551 case ENOTSUP: 5552 case ECANCELED: 5553 return (0); 5554 5555 case ECONNABORTED: 5556 return (ECONNABORTED); 5557 5558 case -1: 5559 /* Error should have been logged. */ 5560 return (0); 5561 5562 default: 5563 bad_error("dgraph_update_general", r); 5564 } 5565 } else if (strcmp(pg_name, SCF_PG_RESTARTER_ACTIONS) == 0) { 5566 if (scf_pg_get_parent_instance(pg, inst) != 0) { 5567 switch (scf_error()) { 5568 case SCF_ERROR_CONNECTION_BROKEN: 5569 return (ECONNABORTED); 5570 5571 case SCF_ERROR_DELETED: 5572 case SCF_ERROR_CONSTRAINT_VIOLATED: 5573 /* Ignore commands on services. */ 5574 return (0); 5575 5576 case SCF_ERROR_NOT_BOUND: 5577 case SCF_ERROR_HANDLE_MISMATCH: 5578 case SCF_ERROR_NOT_SET: 5579 default: 5580 bad_error("scf_pg_get_parent_instance", 5581 scf_error()); 5582 } 5583 } 5584 5585 return (process_actions(h, pg, inst)); 5586 } 5587 5588 if (strcmp(pg_name, SCF_PG_OPTIONS) != 0 && 5589 strcmp(pg_name, SCF_PG_OPTIONS_OVR) != 0) 5590 return (0); 5591 5592 /* 5593 * We only care about the options[_ovr] property groups of our own 5594 * instance, so get the fmri and compare. Plus, once we know it's 5595 * correct, if the repository connection is broken we know exactly what 5596 * property group we were operating on, and can look it up again. 5597 */ 5598 if (scf_pg_get_parent_instance(pg, inst) != 0) { 5599 switch (scf_error()) { 5600 case SCF_ERROR_CONNECTION_BROKEN: 5601 return (ECONNABORTED); 5602 5603 case SCF_ERROR_DELETED: 5604 case SCF_ERROR_CONSTRAINT_VIOLATED: 5605 return (0); 5606 5607 case SCF_ERROR_HANDLE_MISMATCH: 5608 case SCF_ERROR_NOT_BOUND: 5609 case SCF_ERROR_NOT_SET: 5610 default: 5611 bad_error("scf_pg_get_parent_instance", 5612 scf_error()); 5613 } 5614 } 5615 5616 switch (r = libscf_instance_get_fmri(inst, &fmri)) { 5617 case 0: 5618 break; 5619 5620 case ECONNABORTED: 5621 return (ECONNABORTED); 5622 5623 case ECANCELED: 5624 return (0); 5625 5626 default: 5627 bad_error("libscf_instance_get_fmri", r); 5628 } 5629 5630 if (strcmp(fmri, SCF_SERVICE_STARTD) != 0) { 5631 startd_free(fmri, max_scf_fmri_size); 5632 return (0); 5633 } 5634 5635 prop = safe_scf_property_create(h); 5636 val = safe_scf_value_create(h); 5637 5638 if (strcmp(pg_name, SCF_PG_OPTIONS_OVR) == 0) { 5639 /* See if we need to set the runlevel. */ 5640 /* CONSTCOND */ 5641 if (0) { 5642 rebind_pg: 5643 libscf_handle_rebind(h); 5644 rebound = B_TRUE; 5645 5646 r = libscf_lookup_instance(SCF_SERVICE_STARTD, inst); 5647 switch (r) { 5648 case 0: 5649 break; 5650 5651 case ECONNABORTED: 5652 goto rebind_pg; 5653 5654 case ENOENT: 5655 goto out; 5656 5657 case EINVAL: 5658 case ENOTSUP: 5659 bad_error("libscf_lookup_instance", r); 5660 } 5661 5662 if (scf_instance_get_pg(inst, pg_name, pg) != 0) { 5663 switch (scf_error()) { 5664 case SCF_ERROR_DELETED: 5665 case SCF_ERROR_NOT_FOUND: 5666 goto out; 5667 5668 case SCF_ERROR_CONNECTION_BROKEN: 5669 goto rebind_pg; 5670 5671 case SCF_ERROR_HANDLE_MISMATCH: 5672 case SCF_ERROR_NOT_BOUND: 5673 case SCF_ERROR_NOT_SET: 5674 case SCF_ERROR_INVALID_ARGUMENT: 5675 default: 5676 bad_error("scf_instance_get_pg", 5677 scf_error()); 5678 } 5679 } 5680 } 5681 5682 if (scf_pg_get_property(pg, "runlevel", prop) == 0) { 5683 r = dgraph_set_runlevel(pg, prop); 5684 switch (r) { 5685 case ECONNRESET: 5686 rebound = B_TRUE; 5687 rebind_inst = B_TRUE; 5688 /* FALLTHROUGH */ 5689 5690 case 0: 5691 break; 5692 5693 case ECONNABORTED: 5694 goto rebind_pg; 5695 5696 case ECANCELED: 5697 goto out; 5698 5699 default: 5700 bad_error("dgraph_set_runlevel", r); 5701 } 5702 } else { 5703 switch (scf_error()) { 5704 case SCF_ERROR_CONNECTION_BROKEN: 5705 default: 5706 goto rebind_pg; 5707 5708 case SCF_ERROR_DELETED: 5709 goto out; 5710 5711 case SCF_ERROR_NOT_FOUND: 5712 break; 5713 5714 case SCF_ERROR_INVALID_ARGUMENT: 5715 case SCF_ERROR_HANDLE_MISMATCH: 5716 case SCF_ERROR_NOT_BOUND: 5717 case SCF_ERROR_NOT_SET: 5718 bad_error("scf_pg_get_property", scf_error()); 5719 } 5720 } 5721 } 5722 5723 if (rebind_inst) { 5724 lookup_inst: 5725 r = libscf_lookup_instance(SCF_SERVICE_STARTD, inst); 5726 switch (r) { 5727 case 0: 5728 break; 5729 5730 case ECONNABORTED: 5731 libscf_handle_rebind(h); 5732 rebound = B_TRUE; 5733 goto lookup_inst; 5734 5735 case ENOENT: 5736 goto out; 5737 5738 case EINVAL: 5739 case ENOTSUP: 5740 bad_error("libscf_lookup_instance", r); 5741 } 5742 } 5743 5744 r = libscf_get_milestone(inst, prop, val, fmri, max_scf_fmri_size); 5745 switch (r) { 5746 case 0: 5747 break; 5748 5749 case ECONNABORTED: 5750 libscf_handle_rebind(h); 5751 rebound = B_TRUE; 5752 goto lookup_inst; 5753 5754 case EINVAL: 5755 log_error(LOG_NOTICE, 5756 "%s/%s property of %s is misconfigured.\n", pg_name, 5757 SCF_PROPERTY_MILESTONE, SCF_SERVICE_STARTD); 5758 /* FALLTHROUGH */ 5759 5760 case ECANCELED: 5761 case ENOENT: 5762 (void) strcpy(fmri, "all"); 5763 break; 5764 5765 default: 5766 bad_error("libscf_get_milestone", r); 5767 } 5768 5769 r = dgraph_set_milestone(fmri, h, B_FALSE); 5770 switch (r) { 5771 case 0: 5772 case ECONNRESET: 5773 case EALREADY: 5774 break; 5775 5776 case EINVAL: 5777 log_error(LOG_WARNING, "Milestone %s is invalid.\n", fmri); 5778 break; 5779 5780 case ENOENT: 5781 log_error(LOG_WARNING, "Milestone %s does not exist.\n", fmri); 5782 break; 5783 5784 default: 5785 bad_error("dgraph_set_milestone", r); 5786 } 5787 5788 out: 5789 startd_free(fmri, max_scf_fmri_size); 5790 scf_value_destroy(val); 5791 scf_property_destroy(prop); 5792 5793 return (rebound ? ECONNRESET : 0); 5794 } 5795 5796 static void 5797 process_delete(char *fmri, scf_handle_t *h) 5798 { 5799 char *lfmri; 5800 const char *inst_name, *pg_name; 5801 5802 lfmri = safe_strdup(fmri); 5803 5804 /* Determine if the FMRI is a property group or instance */ 5805 if (scf_parse_svc_fmri(lfmri, NULL, NULL, &inst_name, &pg_name, 5806 NULL) != SCF_SUCCESS) { 5807 log_error(LOG_WARNING, 5808 "Received invalid FMRI \"%s\" from repository server.\n", 5809 fmri); 5810 } else if (inst_name != NULL && pg_name == NULL) { 5811 (void) dgraph_remove_instance(fmri, h); 5812 } 5813 5814 free(lfmri); 5815 } 5816 5817 /*ARGSUSED*/ 5818 void * 5819 repository_event_thread(void *unused) 5820 { 5821 scf_handle_t *h; 5822 scf_propertygroup_t *pg; 5823 scf_instance_t *inst; 5824 char *fmri = startd_alloc(max_scf_fmri_size); 5825 char *pg_name = startd_alloc(max_scf_value_size); 5826 int r; 5827 5828 h = libscf_handle_create_bound_loop(); 5829 5830 pg = safe_scf_pg_create(h); 5831 inst = safe_scf_instance_create(h); 5832 5833 retry: 5834 if (_scf_notify_add_pgtype(h, SCF_GROUP_FRAMEWORK) != SCF_SUCCESS) { 5835 if (scf_error() == SCF_ERROR_CONNECTION_BROKEN) { 5836 libscf_handle_rebind(h); 5837 } else { 5838 log_error(LOG_WARNING, 5839 "Couldn't set up repository notification " 5840 "for property group type %s: %s\n", 5841 SCF_GROUP_FRAMEWORK, scf_strerror(scf_error())); 5842 5843 (void) sleep(1); 5844 } 5845 5846 goto retry; 5847 } 5848 5849 /*CONSTCOND*/ 5850 while (1) { 5851 ssize_t res; 5852 5853 /* Note: fmri is only set on delete events. */ 5854 res = _scf_notify_wait(pg, fmri, max_scf_fmri_size); 5855 if (res < 0) { 5856 libscf_handle_rebind(h); 5857 goto retry; 5858 } else if (res == 0) { 5859 /* 5860 * property group modified. inst and pg_name are 5861 * pre-allocated scratch space. 5862 */ 5863 if (scf_pg_update(pg) < 0) { 5864 switch (scf_error()) { 5865 case SCF_ERROR_DELETED: 5866 continue; 5867 5868 case SCF_ERROR_CONNECTION_BROKEN: 5869 log_error(LOG_WARNING, 5870 "Lost repository event due to " 5871 "disconnection.\n"); 5872 libscf_handle_rebind(h); 5873 goto retry; 5874 5875 case SCF_ERROR_NOT_BOUND: 5876 case SCF_ERROR_NOT_SET: 5877 default: 5878 bad_error("scf_pg_update", scf_error()); 5879 } 5880 } 5881 5882 r = process_pg_event(h, pg, inst, pg_name); 5883 switch (r) { 5884 case 0: 5885 break; 5886 5887 case ECONNABORTED: 5888 log_error(LOG_WARNING, "Lost repository event " 5889 "due to disconnection.\n"); 5890 libscf_handle_rebind(h); 5891 /* FALLTHROUGH */ 5892 5893 case ECONNRESET: 5894 goto retry; 5895 5896 default: 5897 bad_error("process_pg_event", r); 5898 } 5899 } else { 5900 /* service, instance, or pg deleted. */ 5901 process_delete(fmri, h); 5902 } 5903 } 5904 5905 /*NOTREACHED*/ 5906 return (NULL); 5907 } 5908 5909 void 5910 graph_engine_start() 5911 { 5912 int err; 5913 5914 (void) startd_thread_create(graph_thread, NULL); 5915 5916 MUTEX_LOCK(&dgraph_lock); 5917 while (!initial_milestone_set) { 5918 err = pthread_cond_wait(&initial_milestone_cv, &dgraph_lock); 5919 assert(err == 0); 5920 } 5921 MUTEX_UNLOCK(&dgraph_lock); 5922 5923 (void) startd_thread_create(repository_event_thread, NULL); 5924 (void) startd_thread_create(graph_event_thread, NULL); 5925 } 5926