1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * rc_node.c - object management primitives 31 * 32 * This layer manages entities, their data structure, its locking, iterators, 33 * transactions, and change notification requests. Entities (scopes, 34 * services, instances, snapshots, snaplevels, property groups, "composed" 35 * property groups (see composition below), and properties) are represented by 36 * rc_node_t's and are kept in the cache_hash hash table. (Property values 37 * are kept in the rn_values member of the respective property -- not as 38 * separate objects.) Iterators are represented by rc_node_iter_t's. 39 * Transactions are represented by rc_node_tx_t's and are only allocated as 40 * part of repcache_tx_t's in the client layer (client.c). Change 41 * notification requests are represented by rc_notify_t structures and are 42 * described below. 43 * 44 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to 45 * the "localhost" scope. The tree is filled in from the database on-demand 46 * by rc_node_fill_children(), usually from rc_iter_create() since iterators 47 * are the only way to find the children of an entity. 48 * 49 * Each rc_node_t is protected by its rn_lock member. Operations which can 50 * take too long, however, should serialize on an RC_NODE_WAITING_FLAGS bit in 51 * rn_flags with the rc_node_{hold,rele}_flag() functions. And since pointers 52 * to rc_node_t's are allowed, rn_refs is a reference count maintained by 53 * rc_node_{hold,rele}(). See configd.h for locking order information. 54 * 55 * When a node (property group or snapshot) is updated, a new node takes the 56 * place of the old node in the global hash, and the old node is hung off of 57 * the rn_former list of the new node. At the same time, all of its children 58 * have their rn_parent_ref pointer set, and any holds they have are reflected 59 * in the old node's rn_other_refs count. This is automatically kept up 60 * to date, until the final reference to the subgraph is dropped, at which 61 * point the node is unrefed and destroyed, along with all of its children. 62 * 63 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must 64 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been 65 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag, 66 * etc.). Once you have locked an rc_node_t you must check its rn_flags for 67 * RC_NODE_DEAD before you can use it. This is usually done with the 68 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*() 69 * functions & RC_NODE_*() macros), which fail if the object has died. 70 * 71 * Because name service lookups may take a long time and, more importantly 72 * may trigger additional accesses to the repository, perm_granted() must be 73 * called without holding any locks. 74 * 75 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children() 76 * call via rc_node_setup_iter() to populate the rn_children uu_list of the 77 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For 78 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next 79 * apropriate child. 80 * 81 * An ITER_START for an ENTITY_VALUE makes sure the node has its values 82 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out 83 * the proper values and updates the offset information. 84 * 85 * When a property group gets changed by a transaction, it sticks around as 86 * a child of its replacement property group, but is removed from the parent. 87 * 88 * To allow aliases, snapshots are implemented with a level of indirection. 89 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in 90 * snapshot.c which contains the authoritative snaplevel information. The 91 * snapid is "assigned" by rc_attach_snapshot(). 92 * 93 * We provide the client layer with rc_node_ptr_t's to reference objects. 94 * Objects referred to by them are automatically held & released by 95 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at 96 * client.c entry points to read the pointers. They fetch the pointer to the 97 * object, return (from the function) if it is dead, and lock, hold, or hold 98 * a flag of the object. 99 */ 100 101 /* 102 * Permission checking is authorization-based: some operations may only 103 * proceed if the user has been assigned at least one of a set of 104 * authorization strings. The set of enabling authorizations depends on the 105 * operation and the target object. The set of authorizations assigned to 106 * a user is determined by reading /etc/security/policy.conf, querying the 107 * user_attr database, and possibly querying the prof_attr database, as per 108 * chkauthattr() in libsecdb. 109 * 110 * The fastest way to decide whether the two sets intersect is by entering the 111 * strings into a hash table and detecting collisions, which takes linear time 112 * in the total size of the sets. Except for the authorization patterns which 113 * may be assigned to users, which without advanced pattern-matching 114 * algorithms will take O(n) in the number of enabling authorizations, per 115 * pattern. 116 * 117 * We can achieve some practical speed-ups by noting that if we enter all of 118 * the authorizations from one of the sets into the hash table we can merely 119 * check the elements of the second set for existence without adding them. 120 * This reduces memory requirements and hash table clutter. The enabling set 121 * is well suited for this because it is internal to configd (for now, at 122 * least). Combine this with short-circuiting and we can even minimize the 123 * number of queries to the security databases (user_attr & prof_attr). 124 * 125 * To force this usage onto clients we provide functions for adding 126 * authorizations to the enabling set of a permission context structure 127 * (perm_add_*()) and one to decide whether the the user associated with the 128 * current door call client possesses any of them (perm_granted()). 129 * 130 * At some point, a generic version of this should move to libsecdb. 131 * 132 * While entering the enabling strings into the hash table, we keep track 133 * of which is the most specific for use in generating auditing events. 134 * See the "Collecting the Authorization String" section of the "SMF Audit 135 * Events" block comment below. 136 */ 137 138 /* 139 * Composition is the combination of sets of properties. The sets are ordered 140 * and properties in higher sets obscure properties of the same name in lower 141 * sets. Here we present a composed view of an instance's properties as the 142 * union of its properties and its service's properties. Similarly the 143 * properties of snaplevels are combined to form a composed view of the 144 * properties of a snapshot (which should match the composed view of the 145 * properties of the instance when the snapshot was taken). 146 * 147 * In terms of the client interface, the client may request that a property 148 * group iterator for an instance or snapshot be composed. Property groups 149 * traversed by such an iterator may not have the target entity as a parent. 150 * Similarly, the properties traversed by a property iterator for those 151 * property groups may not have the property groups iterated as parents. 152 * 153 * Implementation requires that iterators for instances and snapshots be 154 * composition-savvy, and that we have a "composed property group" entity 155 * which represents the composition of a number of property groups. Iteration 156 * over "composed property groups" yields properties which may have different 157 * parents, but for all other operations a composed property group behaves 158 * like the top-most property group it represents. 159 * 160 * The implementation is based on the rn_cchain[] array of rc_node_t pointers 161 * in rc_node_t. For instances, the pointers point to the instance and its 162 * parent service. For snapshots they point to the child snaplevels, and for 163 * composed property groups they point to property groups. A composed 164 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up 165 * int the rc_iter_*() code. 166 */ 167 /* 168 * SMF Audit Events: 169 * ================ 170 * 171 * To maintain security, SMF generates audit events whenever 172 * privileged operations are attempted. See the System Administration 173 * Guide:Security Services answerbook for a discussion of the Solaris 174 * audit system. 175 * 176 * The SMF audit event codes are defined in adt_event.h by symbols 177 * starting with ADT_smf_ and are described in audit_event.txt. The 178 * audit record structures are defined in the SMF section of adt.xml. 179 * adt.xml is used to automatically generate adt_event.h which 180 * contains the definitions that we code to in this file. For the 181 * most part the audit events map closely to actions that you would 182 * perform with svcadm or svccfg, but there are some special cases 183 * which we'll discuss later. 184 * 185 * The software associated with SMF audit events falls into three 186 * categories: 187 * - collecting information to be written to the audit 188 * records 189 * - using the adt_* functions in 190 * usr/src/lib/libbsm/common/adt.c to generate the audit 191 * records. 192 * - handling special cases 193 * 194 * Collecting Information: 195 * ---------------------- 196 * 197 * Most all of the audit events require the FMRI of the affected 198 * object and the authorization string that was used. The one 199 * exception is ADT_smf_annotation which we'll talk about later. 200 * 201 * Collecting the FMRI: 202 * 203 * The rc_node structure has a member called rn_fmri which points to 204 * its FMRI. This is initialized by a call to rc_node_build_fmri() 205 * when the node's parent is established. The reason for doing it 206 * at this time is that a node's FMRI is basically the concatenation 207 * of the parent's FMRI and the node's name with the appropriate 208 * decoration. rc_node_build_fmri() does this concatenation and 209 * decorating. It is called from rc_node_link_child() and 210 * rc_node_relink_child() where a node is linked to its parent. 211 * 212 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI 213 * when it is needed. It returns rn_fmri if it is set. If the node 214 * is at the top level, however, rn_fmri won't be set because it was 215 * never linked to a parent. In this case, 216 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on 217 * its node type and its name, rn_name. 218 * 219 * Collecting the Authorization String: 220 * 221 * Naturally, the authorization string is captured during the 222 * authorization checking process. Acceptable authorization strings 223 * are added to a permcheck_t hash table as noted in the section on 224 * permission checking above. Once all entries have been added to the 225 * hash table, perm_granted() is called. If the client is authorized, 226 * perm_granted() returns with pc_auth_string of the permcheck_t 227 * structure pointing to the authorization string. 228 * 229 * This works fine if the client is authorized, but what happens if 230 * the client is not authorized? We need to report the required 231 * authorization string. This is the authorization that would have 232 * been used if permission had been granted. perm_granted() will 233 * find no match, so it needs to decide which string in the hash 234 * table to use as the required authorization string. It needs to do 235 * this, because configd is still going to generate an event. A 236 * design decision was made to use the most specific authorization 237 * in the hash table. The pc_auth_type enum designates the 238 * specificity of an authorization string. For example, an 239 * authorization string that is declared in an instance PG is more 240 * specific than one that is declared in a service PG. 241 * 242 * The pc_add() function keeps track of the most specific 243 * authorization in the hash table. It does this using the 244 * pc_specific and pc_specific_type members of the permcheck 245 * structure. pc_add() updates these members whenever a more 246 * specific authorization string is added to the hash table. Thus, if 247 * an authorization match is not found, perm_granted() will return 248 * with pc_auth_string in the permcheck_t pointing to the string that 249 * is referenced by pc_specific. 250 * 251 * Generating the Audit Events: 252 * =========================== 253 * 254 * As the functions in this file process requests for clients of 255 * configd, they gather the information that is required for an audit 256 * event. Eventually, the request processing gets to the point where 257 * the authorization is rejected or to the point where the requested 258 * action was attempted. At these two points smf_audit_event() is 259 * called. 260 * 261 * smf_audit_event() takes 4 parameters: 262 * - the event ID which is one of the ADT_smf_* symbols from 263 * adt_event.h. 264 * - status to pass to adt_put_event() 265 * - return value to pass to adt_put_event() 266 * - the event data (see audit_event_data structure) 267 * 268 * All interactions with the auditing software require an audit 269 * session. We use one audit session per configd client. We keep 270 * track of the audit session in the repcache_client structure. 271 * smf_audit_event() calls get_audit_session() to get the session 272 * pointer. 273 * 274 * smf_audit_event() then calls adt_alloc_event() to allocate an 275 * adt_event_data union which is defined in adt_event.h, copies the 276 * data into the appropriate members of the union and calls 277 * adt_put_event() to generate the event. 278 * 279 * Special Cases: 280 * ============= 281 * 282 * There are three major types of special cases: 283 * 284 * - gathering event information for each action in a 285 * transaction 286 * - Higher level events represented by special property 287 * group/property name combinations. Many of these are 288 * restarter actions. 289 * - ADT_smf_annotation event 290 * 291 * Processing Transaction Actions: 292 * ------------------------------ 293 * 294 * A transaction can contain multiple actions to modify, create or 295 * delete one or more properties. We need to capture information so 296 * that we can generate an event for each property action. The 297 * transaction information is stored in a tx_commmit_data_t, and 298 * object.c provides accessor functions to retrieve data from this 299 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling 300 * tx_commit_data_new() and passes this to object_tx_commit() to 301 * commit the transaction. Then we call generate_property_events() to 302 * generate an audit event for each property action. 303 * 304 * Special Properties: 305 * ------------------ 306 * 307 * There are combinations of property group/property name that are special. 308 * They are special because they have specific meaning to startd. startd 309 * interprets them in a service-independent fashion. 310 * restarter_actions/refresh and general/enabled are two examples of these. 311 * A special event is generated for these properties in addition to the 312 * regular property event described in the previous section. The special 313 * properties are declared as an array of audit_special_prop_item 314 * structures at special_props_list in rc_node.c. 315 * 316 * In the previous section, we mentioned the 317 * generate_property_event() function that generates an event for 318 * every property action. Before generating the event, 319 * generate_property_event() calls special_property_event(). 320 * special_property_event() checks to see if the action involves a 321 * special property. If it does, it generates a special audit 322 * event. 323 * 324 * ADT_smf_annotation event: 325 * ------------------------ 326 * 327 * This is a special event unlike any other. It allows the svccfg 328 * program to store an annotation in the event log before a series 329 * of transactions is processed. It is used with the import and 330 * apply svccfg commands. svccfg uses the rep_protocol_annotation 331 * message to pass the operation (import or apply) and the file name 332 * to configd. The set_annotation() function in client.c stores 333 * these away in the a repcache_client structure. The address of 334 * this structure is saved in the thread_info structure. 335 * 336 * Before it generates any events, smf_audit_event() calls 337 * smf_annotation_event(). smf_annotation_event() calls 338 * client_annotation_needed() which is defined in client.c. If an 339 * annotation is needed client_annotation_needed() returns the 340 * operation and filename strings that were saved from the 341 * rep_protocol_annotation message. smf_annotation_event() then 342 * generates the ADT_smf_annotation event. 343 */ 344 345 #include <assert.h> 346 #include <atomic.h> 347 #include <bsm/adt_event.h> 348 #include <errno.h> 349 #include <libuutil.h> 350 #include <libscf.h> 351 #include <libscf_priv.h> 352 #include <prof_attr.h> 353 #include <pthread.h> 354 #include <pwd.h> 355 #include <stdio.h> 356 #include <stdlib.h> 357 #include <strings.h> 358 #include <sys/types.h> 359 #include <syslog.h> 360 #include <unistd.h> 361 #include <user_attr.h> 362 363 #include "configd.h" 364 365 #define AUTH_PREFIX "solaris.smf." 366 #define AUTH_MANAGE AUTH_PREFIX "manage" 367 #define AUTH_MODIFY AUTH_PREFIX "modify" 368 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "." 369 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS 370 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE 371 #define AUTH_PG_GENERAL SCF_PG_GENERAL 372 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE 373 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR 374 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE 375 #define AUTH_PROP_ACTION "action_authorization" 376 #define AUTH_PROP_ENABLED "enabled" 377 #define AUTH_PROP_MODIFY "modify_authorization" 378 #define AUTH_PROP_VALUE "value_authorization" 379 #define AUTH_PROP_READ "read_authorization" 380 /* libsecdb should take care of this. */ 381 #define RBAC_AUTH_SEP "," 382 383 #define MAX_VALID_CHILDREN 3 384 385 /* 386 * The ADT_smf_* symbols may not be defined on the build machine. Because 387 * of this, we do not want to compile the _smf_aud_event() function when 388 * doing native builds. 389 */ 390 #ifdef NATIVE_BUILD 391 #define smf_audit_event(i, s, r, d) 392 #else 393 #define smf_audit_event(i, s, r, d) _smf_audit_event(i, s, r, d) 394 #endif /* NATIVE_BUILD */ 395 396 typedef struct rc_type_info { 397 uint32_t rt_type; /* matches array index */ 398 uint32_t rt_num_ids; 399 uint32_t rt_name_flags; 400 uint32_t rt_valid_children[MAX_VALID_CHILDREN]; 401 } rc_type_info_t; 402 403 #define RT_NO_NAME -1U 404 405 static rc_type_info_t rc_types[] = { 406 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME}, 407 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0, 408 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}}, 409 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH, 410 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}}, 411 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN, 412 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}}, 413 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN, 414 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}}, 415 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME, 416 {REP_PROTOCOL_ENTITY_PROPERTYGRP}}, 417 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN, 418 {REP_PROTOCOL_ENTITY_PROPERTY}}, 419 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN, 420 {REP_PROTOCOL_ENTITY_PROPERTY}}, 421 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN}, 422 {-1UL} 423 }; 424 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types))) 425 426 /* Element of a permcheck_t hash table. */ 427 struct pc_elt { 428 struct pc_elt *pce_next; 429 char pce_auth[1]; 430 }; 431 432 /* 433 * If an authorization fails, we must decide which of the elements in the 434 * permcheck hash table to use in the audit event. That is to say of all 435 * the strings in the hash table, we must choose one and use it in the audit 436 * event. It is desirable to use the most specific string in the audit 437 * event. 438 * 439 * The pc_auth_type specifies the types (sources) of authorization 440 * strings. The enum is ordered in increasing specificity. 441 */ 442 typedef enum pc_auth_type { 443 PC_AUTH_NONE = 0, /* no auth string available. */ 444 PC_AUTH_SMF, /* strings coded into SMF. */ 445 PC_AUTH_SVC, /* strings specified in PG of a service. */ 446 PC_AUTH_INST /* strings specified in PG of an instance. */ 447 } pc_auth_type_t; 448 449 /* An authorization set hash table. */ 450 typedef struct { 451 struct pc_elt **pc_buckets; 452 uint_t pc_bnum; /* number of buckets */ 453 uint_t pc_enum; /* number of elements */ 454 struct pc_elt *pc_specific; /* most specific element */ 455 pc_auth_type_t pc_specific_type; /* type of pc_specific */ 456 char *pc_auth_string; /* authorization string */ 457 /* for audit events */ 458 } permcheck_t; 459 460 /* 461 * Structure for holding audit event data. Not all events use all members 462 * of the structure. 463 */ 464 typedef struct audit_event_data { 465 char *ed_auth; /* authorization string. */ 466 char *ed_fmri; /* affected FMRI. */ 467 char *ed_snapname; /* name of snapshot. */ 468 char *ed_old_fmri; /* old fmri in attach case. */ 469 char *ed_old_name; /* old snapshot in attach case. */ 470 char *ed_type; /* prop. group or prop. type. */ 471 char *ed_prop_value; /* property value. */ 472 } audit_event_data_t; 473 474 /* 475 * Pointer to function to do special processing to get audit event ID. 476 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function 477 * returns 0 if ID successfully retrieved. Otherwise it returns -1. 478 */ 479 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *, 480 au_event_t *); 481 static int general_enable_id(tx_commit_data_t *, size_t, const char *, 482 au_event_t *); 483 484 static uu_list_pool_t *rc_children_pool; 485 static uu_list_pool_t *rc_pg_notify_pool; 486 static uu_list_pool_t *rc_notify_pool; 487 static uu_list_pool_t *rc_notify_info_pool; 488 489 static rc_node_t *rc_scope; 490 491 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER; 492 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER; 493 static uint_t rc_notify_in_use; /* blocks removals */ 494 495 /* 496 * Some combinations of property group/property name require a special 497 * audit event to be generated when there is a change. 498 * audit_special_prop_item_t is used to specify these special cases. The 499 * special_props_list array defines a list of these special properties. 500 */ 501 typedef struct audit_special_prop_item { 502 const char *api_pg_name; /* property group name. */ 503 const char *api_prop_name; /* property name. */ 504 au_event_t api_event_id; /* event id or 0. */ 505 spc_getid_fn_t api_event_func; /* function to get event id. */ 506 } audit_special_prop_item_t; 507 508 /* 509 * Native builds are done using the build machine's standard include 510 * files. These files may not yet have the definitions for the ADT_smf_* 511 * symbols. Thus, we do not compile this table when doing native builds. 512 */ 513 #ifndef NATIVE_BUILD 514 /* 515 * The following special_props_list array specifies property group/property 516 * name combinations that have specific meaning to startd. A special event 517 * is generated for these combinations in addition to the regular property 518 * event. 519 * 520 * At run time this array gets sorted. See the call to qsort(3C) in 521 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used 522 * to do lookups. 523 */ 524 static audit_special_prop_item_t special_props_list[] = { 525 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade, 526 NULL}, 527 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE, 528 ADT_smf_immediate_degrade, NULL}, 529 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL}, 530 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON, 531 ADT_smf_maintenance, NULL}, 532 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE, 533 ADT_smf_immediate_maintenance, NULL}, 534 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP, 535 ADT_smf_immtmp_maintenance, NULL}, 536 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY, 537 ADT_smf_tmp_maintenance, NULL}, 538 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL}, 539 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL}, 540 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL}, 541 {SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL}, 542 {SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL}, 543 {SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id}, 544 {SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id} 545 }; 546 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\ 547 sizeof (audit_special_prop_item_t)) 548 #endif /* NATIVE_BUILD */ 549 550 static void rc_node_unrefed(rc_node_t *np); 551 552 /* 553 * We support an arbitrary number of clients interested in events for certain 554 * types of changes. Each client is represented by an rc_notify_info_t, and 555 * all clients are chained onto the rc_notify_info_list. 556 * 557 * The rc_notify_list is the global notification list. Each entry is of 558 * type rc_notify_t, which is embedded in one of three other structures: 559 * 560 * rc_node_t property group update notification 561 * rc_notify_delete_t object deletion notification 562 * rc_notify_info_t notification clients 563 * 564 * Which type of object is determined by which pointer in the rc_notify_t is 565 * non-NULL. 566 * 567 * New notifications and clients are added to the end of the list. 568 * Notifications no-one is interested in are never added to the list. 569 * 570 * Clients use their position in the list to track which notifications they 571 * have not yet reported. As they process notifications, they move forward 572 * in the list past them. There is always a client at the beginning of the 573 * list -- as he moves past notifications, he removes them from the list and 574 * cleans them up. 575 * 576 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv 577 * is used for global signalling, and each client has a cv which he waits for 578 * events of interest on. 579 */ 580 static uu_list_t *rc_notify_info_list; 581 static uu_list_t *rc_notify_list; 582 583 #define HASH_SIZE 512 584 #define HASH_MASK (HASH_SIZE - 1) 585 586 #pragma align 64(cache_hash) 587 static cache_bucket_t cache_hash[HASH_SIZE]; 588 589 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK]) 590 591 static uint32_t 592 rc_node_hash(rc_node_lookup_t *lp) 593 { 594 uint32_t type = lp->rl_type; 595 uint32_t backend = lp->rl_backend; 596 uint32_t mainid = lp->rl_main_id; 597 uint32_t *ids = lp->rl_ids; 598 599 rc_type_info_t *tp = &rc_types[type]; 600 uint32_t num_ids; 601 uint32_t left; 602 uint32_t hash; 603 604 assert(backend == BACKEND_TYPE_NORMAL || 605 backend == BACKEND_TYPE_NONPERSIST); 606 607 assert(type > 0 && type < NUM_TYPES); 608 num_ids = tp->rt_num_ids; 609 610 left = MAX_IDS - num_ids; 611 assert(num_ids <= MAX_IDS); 612 613 hash = type * 7 + mainid * 5 + backend; 614 615 while (num_ids-- > 0) 616 hash = hash * 11 + *ids++ * 7; 617 618 /* 619 * the rest should be zeroed 620 */ 621 while (left-- > 0) 622 assert(*ids++ == 0); 623 624 return (hash); 625 } 626 627 static int 628 rc_node_match(rc_node_t *np, rc_node_lookup_t *l) 629 { 630 rc_node_lookup_t *r = &np->rn_id; 631 rc_type_info_t *tp; 632 uint32_t type; 633 uint32_t num_ids; 634 635 if (r->rl_main_id != l->rl_main_id) 636 return (0); 637 638 type = r->rl_type; 639 if (type != l->rl_type) 640 return (0); 641 642 assert(type > 0 && type < NUM_TYPES); 643 644 tp = &rc_types[r->rl_type]; 645 num_ids = tp->rt_num_ids; 646 647 assert(num_ids <= MAX_IDS); 648 while (num_ids-- > 0) 649 if (r->rl_ids[num_ids] != l->rl_ids[num_ids]) 650 return (0); 651 652 return (1); 653 } 654 655 /* 656 * the "other" references on a node are maintained in an atomically 657 * updated refcount, rn_other_refs. This can be bumped from arbitrary 658 * context, and tracks references to a possibly out-of-date node's children. 659 * 660 * To prevent the node from disappearing between the final drop of 661 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on 662 * 0->1 transitions and decremented (with the node lock held) on 1->0 663 * transitions. 664 */ 665 static void 666 rc_node_hold_other(rc_node_t *np) 667 { 668 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) { 669 atomic_add_32(&np->rn_other_refs_held, 1); 670 assert(np->rn_other_refs_held > 0); 671 } 672 assert(np->rn_other_refs > 0); 673 } 674 675 /* 676 * No node locks may be held 677 */ 678 static void 679 rc_node_rele_other(rc_node_t *np) 680 { 681 assert(np->rn_other_refs > 0); 682 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) { 683 (void) pthread_mutex_lock(&np->rn_lock); 684 assert(np->rn_other_refs_held > 0); 685 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 && 686 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) 687 rc_node_unrefed(np); 688 else 689 (void) pthread_mutex_unlock(&np->rn_lock); 690 } 691 } 692 693 static void 694 rc_node_hold_locked(rc_node_t *np) 695 { 696 assert(MUTEX_HELD(&np->rn_lock)); 697 698 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF)) 699 rc_node_hold_other(np->rn_parent_ref); 700 np->rn_refs++; 701 assert(np->rn_refs > 0); 702 } 703 704 static void 705 rc_node_hold(rc_node_t *np) 706 { 707 (void) pthread_mutex_lock(&np->rn_lock); 708 rc_node_hold_locked(np); 709 (void) pthread_mutex_unlock(&np->rn_lock); 710 } 711 712 static void 713 rc_node_rele_locked(rc_node_t *np) 714 { 715 int unref = 0; 716 rc_node_t *par_ref = NULL; 717 718 assert(MUTEX_HELD(&np->rn_lock)); 719 assert(np->rn_refs > 0); 720 721 if (--np->rn_refs == 0) { 722 if (np->rn_flags & RC_NODE_PARENT_REF) 723 par_ref = np->rn_parent_ref; 724 725 /* 726 * Composed property groups are only as good as their 727 * references. 728 */ 729 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) 730 np->rn_flags |= RC_NODE_DEAD; 731 732 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) && 733 np->rn_other_refs == 0 && np->rn_other_refs_held == 0) 734 unref = 1; 735 } 736 737 if (unref) 738 rc_node_unrefed(np); 739 else 740 (void) pthread_mutex_unlock(&np->rn_lock); 741 742 if (par_ref != NULL) 743 rc_node_rele_other(par_ref); 744 } 745 746 void 747 rc_node_rele(rc_node_t *np) 748 { 749 (void) pthread_mutex_lock(&np->rn_lock); 750 rc_node_rele_locked(np); 751 } 752 753 static cache_bucket_t * 754 cache_hold(uint32_t h) 755 { 756 cache_bucket_t *bp = CACHE_BUCKET(h); 757 (void) pthread_mutex_lock(&bp->cb_lock); 758 return (bp); 759 } 760 761 static void 762 cache_release(cache_bucket_t *bp) 763 { 764 (void) pthread_mutex_unlock(&bp->cb_lock); 765 } 766 767 static rc_node_t * 768 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp) 769 { 770 uint32_t h = rc_node_hash(lp); 771 rc_node_t *np; 772 773 assert(MUTEX_HELD(&bp->cb_lock)); 774 assert(bp == CACHE_BUCKET(h)); 775 776 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) { 777 if (np->rn_hash == h && rc_node_match(np, lp)) { 778 rc_node_hold(np); 779 return (np); 780 } 781 } 782 783 return (NULL); 784 } 785 786 static rc_node_t * 787 cache_lookup(rc_node_lookup_t *lp) 788 { 789 uint32_t h; 790 cache_bucket_t *bp; 791 rc_node_t *np; 792 793 h = rc_node_hash(lp); 794 bp = cache_hold(h); 795 796 np = cache_lookup_unlocked(bp, lp); 797 798 cache_release(bp); 799 800 return (np); 801 } 802 803 static void 804 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np) 805 { 806 assert(MUTEX_HELD(&bp->cb_lock)); 807 assert(np->rn_hash == rc_node_hash(&np->rn_id)); 808 assert(bp == CACHE_BUCKET(np->rn_hash)); 809 810 assert(np->rn_hash_next == NULL); 811 812 np->rn_hash_next = bp->cb_head; 813 bp->cb_head = np; 814 } 815 816 static void 817 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np) 818 { 819 rc_node_t **npp; 820 821 assert(MUTEX_HELD(&bp->cb_lock)); 822 assert(np->rn_hash == rc_node_hash(&np->rn_id)); 823 assert(bp == CACHE_BUCKET(np->rn_hash)); 824 825 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next) 826 if (*npp == np) 827 break; 828 829 assert(*npp == np); 830 *npp = np->rn_hash_next; 831 np->rn_hash_next = NULL; 832 } 833 834 /* 835 * verify that the 'parent' type can have a child typed 'child' 836 * Fails with 837 * _INVALID_TYPE - argument is invalid 838 * _TYPE_MISMATCH - parent type cannot have children of type child 839 */ 840 static int 841 rc_check_parent_child(uint32_t parent, uint32_t child) 842 { 843 int idx; 844 uint32_t type; 845 846 if (parent == 0 || parent >= NUM_TYPES || 847 child == 0 || child >= NUM_TYPES) 848 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */ 849 850 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) { 851 type = rc_types[parent].rt_valid_children[idx]; 852 if (type == child) 853 return (REP_PROTOCOL_SUCCESS); 854 } 855 856 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 857 } 858 859 /* 860 * Fails with 861 * _INVALID_TYPE - type is invalid 862 * _BAD_REQUEST - name is an invalid name for a node of type type 863 */ 864 int 865 rc_check_type_name(uint32_t type, const char *name) 866 { 867 if (type == 0 || type >= NUM_TYPES) 868 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */ 869 870 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1) 871 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 872 873 return (REP_PROTOCOL_SUCCESS); 874 } 875 876 static int 877 rc_check_pgtype_name(const char *name) 878 { 879 if (uu_check_name(name, UU_NAME_DOMAIN) == -1) 880 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 881 882 return (REP_PROTOCOL_SUCCESS); 883 } 884 885 /* 886 * rc_node_free_fmri should be called whenever a node loses its parent. 887 * The reason is that the node's fmri string is built up by concatenating 888 * its name to the parent's fmri. Thus, when the node no longer has a 889 * parent, its fmri is no longer valid. 890 */ 891 static void 892 rc_node_free_fmri(rc_node_t *np) 893 { 894 if (np->rn_fmri != NULL) { 895 free((void *)np->rn_fmri); 896 np->rn_fmri = NULL; 897 } 898 } 899 900 /* 901 * Concatenate the appropriate separator and the FMRI element to the base 902 * FMRI string at fmri. 903 * 904 * Fails with 905 * _TRUNCATED Not enough room in buffer at fmri. 906 */ 907 static int 908 rc_concat_fmri_element( 909 char *fmri, /* base fmri */ 910 size_t bufsize, /* size of buf at fmri */ 911 size_t *sz_out, /* receives result size. */ 912 const char *element, /* element name to concat */ 913 rep_protocol_entity_t type) /* type of element */ 914 { 915 size_t actual; 916 const char *name = element; 917 int rc; 918 const char *separator; 919 920 if (bufsize > 0) 921 *sz_out = strlen(fmri); 922 else 923 *sz_out = 0; 924 925 switch (type) { 926 case REP_PROTOCOL_ENTITY_SCOPE: 927 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) { 928 /* 929 * No need to display scope information if we are 930 * in the local scope. 931 */ 932 separator = SCF_FMRI_SVC_PREFIX; 933 name = NULL; 934 } else { 935 /* 936 * Need to display scope information, because it is 937 * not the local scope. 938 */ 939 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX; 940 } 941 break; 942 case REP_PROTOCOL_ENTITY_SERVICE: 943 separator = SCF_FMRI_SERVICE_PREFIX; 944 break; 945 case REP_PROTOCOL_ENTITY_INSTANCE: 946 separator = SCF_FMRI_INSTANCE_PREFIX; 947 break; 948 case REP_PROTOCOL_ENTITY_PROPERTYGRP: 949 case REP_PROTOCOL_ENTITY_CPROPERTYGRP: 950 separator = SCF_FMRI_PROPERTYGRP_PREFIX; 951 break; 952 case REP_PROTOCOL_ENTITY_PROPERTY: 953 separator = SCF_FMRI_PROPERTY_PREFIX; 954 break; 955 case REP_PROTOCOL_ENTITY_VALUE: 956 /* 957 * A value does not have a separate FMRI from its property, 958 * so there is nothing to concat. 959 */ 960 return (REP_PROTOCOL_SUCCESS); 961 case REP_PROTOCOL_ENTITY_SNAPSHOT: 962 case REP_PROTOCOL_ENTITY_SNAPLEVEL: 963 /* Snapshots do not have FMRIs, so there is nothing to do. */ 964 return (REP_PROTOCOL_SUCCESS); 965 default: 966 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n", 967 __FILE__, __LINE__, type); 968 abort(); /* Missing a case in switch if we get here. */ 969 } 970 971 /* Concatenate separator and element to the fmri buffer. */ 972 973 actual = strlcat(fmri, separator, bufsize); 974 if (name != NULL) { 975 if (actual < bufsize) { 976 actual = strlcat(fmri, name, bufsize); 977 } else { 978 actual += strlen(name); 979 } 980 } 981 if (actual < bufsize) { 982 rc = REP_PROTOCOL_SUCCESS; 983 } else { 984 rc = REP_PROTOCOL_FAIL_TRUNCATED; 985 } 986 *sz_out = actual; 987 return (rc); 988 } 989 990 /* 991 * Get the FMRI for the node at np. The fmri will be placed in buf. On 992 * success sz_out will be set to the size of the fmri in buf. If 993 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size 994 * of the buffer that would be required to avoid truncation. 995 * 996 * Fails with 997 * _TRUNCATED not enough room in buf for the FMRI. 998 */ 999 static int 1000 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize, 1001 size_t *sz_out) 1002 { 1003 size_t fmri_len = 0; 1004 int r; 1005 1006 if (bufsize > 0) 1007 *buf = 0; 1008 *sz_out = 0; 1009 1010 if (np->rn_fmri == NULL) { 1011 /* 1012 * A NULL rn_fmri implies that this is a top level scope. 1013 * Child nodes will always have an rn_fmri established 1014 * because both rc_node_link_child() and 1015 * rc_node_relink_child() call rc_node_build_fmri(). In 1016 * this case, we'll just return our name preceded by the 1017 * appropriate FMRI decorations. 1018 */ 1019 assert(np->rn_parent == NULL); 1020 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name, 1021 np->rn_id.rl_type); 1022 if (r != REP_PROTOCOL_SUCCESS) 1023 return (r); 1024 } else { 1025 /* We have an fmri, so return it. */ 1026 fmri_len = strlcpy(buf, np->rn_fmri, bufsize); 1027 } 1028 1029 *sz_out = fmri_len; 1030 1031 if (fmri_len >= bufsize) 1032 return (REP_PROTOCOL_FAIL_TRUNCATED); 1033 1034 return (REP_PROTOCOL_SUCCESS); 1035 } 1036 1037 /* 1038 * Build an FMRI string for this node and save it in rn_fmri. 1039 * 1040 * The basic strategy here is to get the fmri of our parent and then 1041 * concatenate the appropriate separator followed by our name. If our name 1042 * is null, the resulting fmri will just be a copy of the parent fmri. 1043 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag 1044 * set. Also the rn_lock for this node should be held. 1045 * 1046 * Fails with 1047 * _NO_RESOURCES Could not allocate memory. 1048 */ 1049 static int 1050 rc_node_build_fmri(rc_node_t *np) 1051 { 1052 size_t actual; 1053 char fmri[REP_PROTOCOL_FMRI_LEN]; 1054 int rc; 1055 size_t sz = REP_PROTOCOL_FMRI_LEN; 1056 1057 assert(MUTEX_HELD(&np->rn_lock)); 1058 assert(np->rn_flags & RC_NODE_USING_PARENT); 1059 1060 rc_node_free_fmri(np); 1061 1062 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual); 1063 assert(rc == REP_PROTOCOL_SUCCESS); 1064 1065 if (np->rn_name != NULL) { 1066 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name, 1067 np->rn_id.rl_type); 1068 assert(rc == REP_PROTOCOL_SUCCESS); 1069 np->rn_fmri = strdup(fmri); 1070 } else { 1071 np->rn_fmri = strdup(fmri); 1072 } 1073 if (np->rn_fmri == NULL) { 1074 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 1075 } else { 1076 rc = REP_PROTOCOL_SUCCESS; 1077 } 1078 1079 return (rc); 1080 } 1081 1082 /* 1083 * Get the FMRI of the node at np placing the result in fmri. Then 1084 * concatenate the additional element to fmri. The type variable indicates 1085 * the type of element, so that the appropriate separator can be 1086 * generated. size is the number of bytes in the buffer at fmri, and 1087 * sz_out receives the size of the generated string. If the result is 1088 * truncated, sz_out will receive the size of the buffer that would be 1089 * required to avoid truncation. 1090 * 1091 * Fails with 1092 * _TRUNCATED Not enough room in buffer at fmri. 1093 */ 1094 static int 1095 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out, 1096 const char *element, rep_protocol_entity_t type) 1097 { 1098 int rc; 1099 1100 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) != 1101 REP_PROTOCOL_SUCCESS) { 1102 return (rc); 1103 } 1104 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) != 1105 REP_PROTOCOL_SUCCESS) { 1106 return (rc); 1107 } 1108 1109 return (REP_PROTOCOL_SUCCESS); 1110 } 1111 1112 static int 1113 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np) 1114 { 1115 rc_node_t *nnp = np->rcn_node; 1116 int i; 1117 1118 assert(MUTEX_HELD(&rc_pg_notify_lock)); 1119 1120 if (np->rcn_delete != NULL) { 1121 assert(np->rcn_info == NULL && np->rcn_node == NULL); 1122 return (1); /* everyone likes deletes */ 1123 } 1124 if (np->rcn_node == NULL) { 1125 assert(np->rcn_info != NULL || np->rcn_delete != NULL); 1126 return (0); 1127 } 1128 assert(np->rcn_info == NULL); 1129 1130 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) { 1131 if (rnip->rni_namelist[i] != NULL) { 1132 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0) 1133 return (1); 1134 } 1135 if (rnip->rni_typelist[i] != NULL) { 1136 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0) 1137 return (1); 1138 } 1139 } 1140 return (0); 1141 } 1142 1143 static void 1144 rc_notify_insert_node(rc_node_t *nnp) 1145 { 1146 rc_notify_t *np = &nnp->rn_notify; 1147 rc_notify_info_t *nip; 1148 int found = 0; 1149 1150 assert(np->rcn_info == NULL); 1151 1152 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) 1153 return; 1154 1155 (void) pthread_mutex_lock(&rc_pg_notify_lock); 1156 np->rcn_node = nnp; 1157 for (nip = uu_list_first(rc_notify_info_list); nip != NULL; 1158 nip = uu_list_next(rc_notify_info_list, nip)) { 1159 if (rc_notify_info_interested(nip, np)) { 1160 (void) pthread_cond_broadcast(&nip->rni_cv); 1161 found++; 1162 } 1163 } 1164 if (found) 1165 (void) uu_list_insert_before(rc_notify_list, NULL, np); 1166 else 1167 np->rcn_node = NULL; 1168 1169 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 1170 } 1171 1172 static void 1173 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service, 1174 const char *instance, const char *pg) 1175 { 1176 rc_notify_info_t *nip; 1177 1178 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node, 1179 rc_notify_pool); 1180 ndp->rnd_notify.rcn_delete = ndp; 1181 1182 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri), 1183 "svc:/%s%s%s%s%s", service, 1184 (instance != NULL)? ":" : "", (instance != NULL)? instance : "", 1185 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : ""); 1186 1187 /* 1188 * add to notification list, notify watchers 1189 */ 1190 (void) pthread_mutex_lock(&rc_pg_notify_lock); 1191 for (nip = uu_list_first(rc_notify_info_list); nip != NULL; 1192 nip = uu_list_next(rc_notify_info_list, nip)) 1193 (void) pthread_cond_broadcast(&nip->rni_cv); 1194 (void) uu_list_insert_before(rc_notify_list, NULL, ndp); 1195 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 1196 } 1197 1198 static void 1199 rc_notify_remove_node(rc_node_t *nnp) 1200 { 1201 rc_notify_t *np = &nnp->rn_notify; 1202 1203 assert(np->rcn_info == NULL); 1204 assert(!MUTEX_HELD(&nnp->rn_lock)); 1205 1206 (void) pthread_mutex_lock(&rc_pg_notify_lock); 1207 while (np->rcn_node != NULL) { 1208 if (rc_notify_in_use) { 1209 (void) pthread_cond_wait(&rc_pg_notify_cv, 1210 &rc_pg_notify_lock); 1211 continue; 1212 } 1213 (void) uu_list_remove(rc_notify_list, np); 1214 np->rcn_node = NULL; 1215 break; 1216 } 1217 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 1218 } 1219 1220 static void 1221 rc_notify_remove_locked(rc_notify_t *np) 1222 { 1223 assert(MUTEX_HELD(&rc_pg_notify_lock)); 1224 assert(rc_notify_in_use == 0); 1225 1226 (void) uu_list_remove(rc_notify_list, np); 1227 if (np->rcn_node) { 1228 np->rcn_node = NULL; 1229 } else if (np->rcn_delete) { 1230 uu_free(np->rcn_delete); 1231 } else { 1232 assert(0); /* CAN'T HAPPEN */ 1233 } 1234 } 1235 1236 /* 1237 * Permission checking functions. See comment atop this file. 1238 */ 1239 #ifndef NATIVE_BUILD 1240 static permcheck_t * 1241 pc_create() 1242 { 1243 permcheck_t *p; 1244 1245 p = uu_zalloc(sizeof (*p)); 1246 if (p == NULL) 1247 return (NULL); 1248 p->pc_bnum = 8; /* Normal case will only have 2 elts. */ 1249 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum); 1250 if (p->pc_buckets == NULL) { 1251 uu_free(p); 1252 return (NULL); 1253 } 1254 1255 p->pc_enum = 0; 1256 return (p); 1257 } 1258 1259 static void 1260 pc_free(permcheck_t *pcp) 1261 { 1262 uint_t i; 1263 struct pc_elt *ep, *next; 1264 1265 for (i = 0; i < pcp->pc_bnum; ++i) { 1266 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) { 1267 next = ep->pce_next; 1268 free(ep); 1269 } 1270 } 1271 1272 free(pcp->pc_buckets); 1273 free(pcp); 1274 } 1275 1276 static uint32_t 1277 pc_hash(const char *auth) 1278 { 1279 uint32_t h = 0, g; 1280 const char *p; 1281 1282 /* 1283 * Generic hash function from uts/common/os/modhash.c. 1284 */ 1285 for (p = auth; *p != '\0'; ++p) { 1286 h = (h << 4) + *p; 1287 g = (h & 0xf0000000); 1288 if (g != 0) { 1289 h ^= (g >> 24); 1290 h ^= g; 1291 } 1292 } 1293 1294 return (h); 1295 } 1296 1297 static int 1298 pc_exists(permcheck_t *pcp, const char *auth) 1299 { 1300 uint32_t h; 1301 struct pc_elt *ep; 1302 1303 h = pc_hash(auth); 1304 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)]; 1305 ep != NULL; 1306 ep = ep->pce_next) { 1307 if (strcmp(auth, ep->pce_auth) == 0) { 1308 pcp->pc_auth_string = ep->pce_auth; 1309 return (1); 1310 } 1311 } 1312 1313 return (0); 1314 } 1315 1316 static int 1317 pc_match(permcheck_t *pcp, const char *pattern) 1318 { 1319 uint_t i; 1320 struct pc_elt *ep; 1321 1322 for (i = 0; i < pcp->pc_bnum; ++i) { 1323 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) { 1324 if (_auth_match(pattern, ep->pce_auth)) { 1325 pcp->pc_auth_string = ep->pce_auth; 1326 return (1); 1327 } 1328 } 1329 } 1330 1331 return (0); 1332 } 1333 1334 static int 1335 pc_grow(permcheck_t *pcp) 1336 { 1337 uint_t new_bnum, i, j; 1338 struct pc_elt **new_buckets; 1339 struct pc_elt *ep, *next; 1340 1341 new_bnum = pcp->pc_bnum * 2; 1342 if (new_bnum < pcp->pc_bnum) 1343 /* Homey don't play that. */ 1344 return (-1); 1345 1346 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum); 1347 if (new_buckets == NULL) 1348 return (-1); 1349 1350 for (i = 0; i < pcp->pc_bnum; ++i) { 1351 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) { 1352 next = ep->pce_next; 1353 j = pc_hash(ep->pce_auth) & (new_bnum - 1); 1354 ep->pce_next = new_buckets[j]; 1355 new_buckets[j] = ep; 1356 } 1357 } 1358 1359 uu_free(pcp->pc_buckets); 1360 pcp->pc_buckets = new_buckets; 1361 pcp->pc_bnum = new_bnum; 1362 1363 return (0); 1364 } 1365 1366 static int 1367 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type) 1368 { 1369 struct pc_elt *ep; 1370 uint_t i; 1371 1372 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1); 1373 if (ep == NULL) 1374 return (-1); 1375 1376 /* Grow if pc_enum / pc_bnum > 3/4. */ 1377 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum) 1378 /* Failure is not a stopper; we'll try again next time. */ 1379 (void) pc_grow(pcp); 1380 1381 (void) strcpy(ep->pce_auth, auth); 1382 1383 i = pc_hash(auth) & (pcp->pc_bnum - 1); 1384 ep->pce_next = pcp->pc_buckets[i]; 1385 pcp->pc_buckets[i] = ep; 1386 1387 if (auth_type > pcp->pc_specific_type) { 1388 pcp->pc_specific_type = auth_type; 1389 pcp->pc_specific = ep; 1390 } 1391 1392 ++pcp->pc_enum; 1393 1394 return (0); 1395 } 1396 1397 /* 1398 * For the type of a property group, return the authorization which may be 1399 * used to modify it. 1400 */ 1401 static const char * 1402 perm_auth_for_pgtype(const char *pgtype) 1403 { 1404 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0) 1405 return (AUTH_MODIFY_PREFIX "method"); 1406 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0) 1407 return (AUTH_MODIFY_PREFIX "dependency"); 1408 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0) 1409 return (AUTH_MODIFY_PREFIX "application"); 1410 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0) 1411 return (AUTH_MODIFY_PREFIX "framework"); 1412 else 1413 return (NULL); 1414 } 1415 1416 /* 1417 * Fails with 1418 * _NO_RESOURCES - out of memory 1419 */ 1420 static int 1421 perm_add_enabling_type(permcheck_t *pcp, const char *auth, 1422 pc_auth_type_t auth_type) 1423 { 1424 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS : 1425 REP_PROTOCOL_FAIL_NO_RESOURCES); 1426 } 1427 1428 /* 1429 * Fails with 1430 * _NO_RESOURCES - out of memory 1431 */ 1432 static int 1433 perm_add_enabling(permcheck_t *pcp, const char *auth) 1434 { 1435 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF)); 1436 } 1437 1438 /* Note that perm_add_enabling_values() is defined below. */ 1439 1440 /* 1441 * perm_granted() returns 1 if the current door caller has one of the enabling 1442 * authorizations in pcp, 0 if it doesn't, and -1 if an error (usually lack of 1443 * memory) occurs. check_auth_list() checks an RBAC_AUTH_SEP-separated list 1444 * of authorizations for existence in pcp, and check_prof_list() checks the 1445 * authorizations granted to an RBAC_AUTH_SEP-separated list of profiles. 1446 */ 1447 static int 1448 check_auth_list(permcheck_t *pcp, char *authlist) 1449 { 1450 char *auth, *lasts; 1451 int ret; 1452 1453 for (auth = (char *)strtok_r(authlist, RBAC_AUTH_SEP, &lasts); 1454 auth != NULL; 1455 auth = (char *)strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) { 1456 if (strchr(auth, KV_WILDCHAR) == NULL) 1457 ret = pc_exists(pcp, auth); 1458 else 1459 ret = pc_match(pcp, auth); 1460 1461 if (ret) 1462 return (ret); 1463 } 1464 1465 /* 1466 * If we failed, choose the most specific auth string for use in 1467 * the audit event. 1468 */ 1469 assert(pcp->pc_specific != NULL); 1470 pcp->pc_auth_string = pcp->pc_specific->pce_auth; 1471 1472 return (0); 1473 } 1474 1475 static int 1476 check_prof_list(permcheck_t *pcp, char *proflist) 1477 { 1478 char *prof, *lasts, *authlist, *subproflist; 1479 profattr_t *pap; 1480 int ret = 0; 1481 1482 for (prof = strtok_r(proflist, RBAC_AUTH_SEP, &lasts); 1483 prof != NULL; 1484 prof = strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) { 1485 pap = getprofnam(prof); 1486 if (pap == NULL) 1487 continue; 1488 1489 authlist = kva_match(pap->attr, PROFATTR_AUTHS_KW); 1490 if (authlist != NULL) 1491 ret = check_auth_list(pcp, authlist); 1492 1493 if (!ret) { 1494 subproflist = kva_match(pap->attr, PROFATTR_PROFS_KW); 1495 if (subproflist != NULL) 1496 /* depth check to avoid infinite recursion? */ 1497 ret = check_prof_list(pcp, subproflist); 1498 } 1499 1500 free_profattr(pap); 1501 if (ret) 1502 return (ret); 1503 } 1504 1505 return (ret); 1506 } 1507 1508 static int 1509 perm_granted(permcheck_t *pcp) 1510 { 1511 ucred_t *uc; 1512 1513 int ret = 0; 1514 uid_t uid; 1515 userattr_t *uap; 1516 char *authlist, *userattr_authlist, *proflist, *def_prof = NULL; 1517 struct passwd pw; 1518 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */ 1519 1520 /* Get the uid */ 1521 if ((uc = get_ucred()) == NULL) { 1522 if (errno == EINVAL) { 1523 /* 1524 * Client is no longer waiting for our response (e.g., 1525 * it received a signal & resumed with EINTR). 1526 * Punting with door_return() would be nice but we 1527 * need to release all of the locks & references we 1528 * hold. And we must report failure to the client 1529 * layer to keep it from ignoring retries as 1530 * already-done (idempotency & all that). None of the 1531 * error codes fit very well, so we might as well 1532 * force the return of _PERMISSION_DENIED since we 1533 * couldn't determine the user. 1534 */ 1535 return (0); 1536 } 1537 assert(0); 1538 abort(); 1539 } 1540 1541 uid = ucred_geteuid(uc); 1542 assert(uid != (uid_t)-1); 1543 1544 if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) { 1545 return (-1); 1546 } 1547 1548 /* 1549 * Get user's default authorizations from policy.conf 1550 */ 1551 ret = _get_user_defs(pw.pw_name, &authlist, &def_prof); 1552 1553 if (ret != 0) 1554 return (-1); 1555 1556 if (authlist != NULL) { 1557 ret = check_auth_list(pcp, authlist); 1558 1559 if (ret) { 1560 _free_user_defs(authlist, def_prof); 1561 return (ret); 1562 } 1563 } 1564 1565 /* 1566 * Put off checking def_prof for later in an attempt to consolidate 1567 * prof_attr accesses. 1568 */ 1569 1570 uap = getusernam(pw.pw_name); 1571 if (uap != NULL) { 1572 /* Get the authorizations from user_attr. */ 1573 userattr_authlist = kva_match(uap->attr, USERATTR_AUTHS_KW); 1574 if (userattr_authlist != NULL) { 1575 ret = check_auth_list(pcp, userattr_authlist); 1576 } 1577 } 1578 1579 if (!ret && def_prof != NULL) { 1580 /* Check generic profiles. */ 1581 ret = check_prof_list(pcp, def_prof); 1582 } 1583 1584 if (!ret && uap != NULL) { 1585 proflist = kva_match(uap->attr, USERATTR_PROFILES_KW); 1586 if (proflist != NULL) 1587 ret = check_prof_list(pcp, proflist); 1588 } 1589 1590 _free_user_defs(authlist, def_prof); 1591 if (uap != NULL) 1592 free_userattr(uap); 1593 1594 return (ret); 1595 } 1596 #endif /* NATIVE_BUILD */ 1597 1598 /* 1599 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to 1600 * serialize certain actions, and to wait for certain operations to complete 1601 * 1602 * The waiting flags are: 1603 * RC_NODE_CHILDREN_CHANGING 1604 * The child list is being built or changed (due to creation 1605 * or deletion). All iterators pause. 1606 * 1607 * RC_NODE_USING_PARENT 1608 * Someone is actively using the parent pointer, so we can't 1609 * be removed from the parent list. 1610 * 1611 * RC_NODE_CREATING_CHILD 1612 * A child is being created -- locks out other creations, to 1613 * prevent insert-insert races. 1614 * 1615 * RC_NODE_IN_TX 1616 * This object is running a transaction. 1617 * 1618 * RC_NODE_DYING 1619 * This node might be dying. Always set as a set, using 1620 * RC_NODE_DYING_FLAGS (which is everything but 1621 * RC_NODE_USING_PARENT) 1622 */ 1623 static int 1624 rc_node_hold_flag(rc_node_t *np, uint32_t flag) 1625 { 1626 assert(MUTEX_HELD(&np->rn_lock)); 1627 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0); 1628 1629 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) { 1630 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock); 1631 } 1632 if (np->rn_flags & RC_NODE_DEAD) 1633 return (0); 1634 1635 np->rn_flags |= flag; 1636 return (1); 1637 } 1638 1639 static void 1640 rc_node_rele_flag(rc_node_t *np, uint32_t flag) 1641 { 1642 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0); 1643 assert(MUTEX_HELD(&np->rn_lock)); 1644 assert((np->rn_flags & flag) == flag); 1645 np->rn_flags &= ~flag; 1646 (void) pthread_cond_broadcast(&np->rn_cv); 1647 } 1648 1649 /* 1650 * wait until a particular flag has cleared. Fails if the object dies. 1651 */ 1652 static int 1653 rc_node_wait_flag(rc_node_t *np, uint32_t flag) 1654 { 1655 assert(MUTEX_HELD(&np->rn_lock)); 1656 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) 1657 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock); 1658 1659 return (!(np->rn_flags & RC_NODE_DEAD)); 1660 } 1661 1662 /* 1663 * On entry, np's lock must be held, and this thread must be holding 1664 * RC_NODE_USING_PARENT. On return, both of them are released. 1665 * 1666 * If the return value is NULL, np either does not have a parent, or 1667 * the parent has been marked DEAD. 1668 * 1669 * If the return value is non-NULL, it is the parent of np, and both 1670 * its lock and the requested flags are held. 1671 */ 1672 static rc_node_t * 1673 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag) 1674 { 1675 rc_node_t *pp; 1676 1677 assert(MUTEX_HELD(&np->rn_lock)); 1678 assert(np->rn_flags & RC_NODE_USING_PARENT); 1679 1680 if ((pp = np->rn_parent) == NULL) { 1681 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 1682 (void) pthread_mutex_unlock(&np->rn_lock); 1683 return (NULL); 1684 } 1685 (void) pthread_mutex_unlock(&np->rn_lock); 1686 1687 (void) pthread_mutex_lock(&pp->rn_lock); 1688 (void) pthread_mutex_lock(&np->rn_lock); 1689 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 1690 (void) pthread_mutex_unlock(&np->rn_lock); 1691 1692 if (!rc_node_hold_flag(pp, flag)) { 1693 (void) pthread_mutex_unlock(&pp->rn_lock); 1694 return (NULL); 1695 } 1696 return (pp); 1697 } 1698 1699 rc_node_t * 1700 rc_node_alloc(void) 1701 { 1702 rc_node_t *np = uu_zalloc(sizeof (*np)); 1703 1704 if (np == NULL) 1705 return (NULL); 1706 1707 (void) pthread_mutex_init(&np->rn_lock, NULL); 1708 (void) pthread_cond_init(&np->rn_cv, NULL); 1709 1710 np->rn_children = uu_list_create(rc_children_pool, np, 0); 1711 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0); 1712 1713 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool); 1714 1715 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node, 1716 rc_notify_pool); 1717 1718 return (np); 1719 } 1720 1721 void 1722 rc_node_destroy(rc_node_t *np) 1723 { 1724 int i; 1725 1726 if (np->rn_flags & RC_NODE_UNREFED) 1727 return; /* being handled elsewhere */ 1728 1729 assert(np->rn_refs == 0 && np->rn_other_refs == 0); 1730 assert(np->rn_former == NULL); 1731 1732 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 1733 /* Release the holds from rc_iter_next(). */ 1734 for (i = 0; i < COMPOSITION_DEPTH; ++i) { 1735 /* rn_cchain[i] may be NULL for empty snapshots. */ 1736 if (np->rn_cchain[i] != NULL) 1737 rc_node_rele(np->rn_cchain[i]); 1738 } 1739 } 1740 1741 if (np->rn_name != NULL) 1742 free((void *)np->rn_name); 1743 np->rn_name = NULL; 1744 if (np->rn_type != NULL) 1745 free((void *)np->rn_type); 1746 np->rn_type = NULL; 1747 if (np->rn_values != NULL) 1748 object_free_values(np->rn_values, np->rn_valtype, 1749 np->rn_values_count, np->rn_values_size); 1750 np->rn_values = NULL; 1751 rc_node_free_fmri(np); 1752 1753 if (np->rn_snaplevel != NULL) 1754 rc_snaplevel_rele(np->rn_snaplevel); 1755 np->rn_snaplevel = NULL; 1756 1757 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool); 1758 1759 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node, 1760 rc_notify_pool); 1761 1762 assert(uu_list_first(np->rn_children) == NULL); 1763 uu_list_destroy(np->rn_children); 1764 uu_list_destroy(np->rn_pg_notify_list); 1765 1766 (void) pthread_mutex_destroy(&np->rn_lock); 1767 (void) pthread_cond_destroy(&np->rn_cv); 1768 1769 uu_free(np); 1770 } 1771 1772 /* 1773 * Link in a child node. 1774 * 1775 * Because of the lock ordering, cp has to already be in the hash table with 1776 * its lock dropped before we get it. To prevent anyone from noticing that 1777 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once 1778 * we've linked it in, we release the flag. 1779 */ 1780 static void 1781 rc_node_link_child(rc_node_t *np, rc_node_t *cp) 1782 { 1783 assert(!MUTEX_HELD(&np->rn_lock)); 1784 assert(!MUTEX_HELD(&cp->rn_lock)); 1785 1786 (void) pthread_mutex_lock(&np->rn_lock); 1787 (void) pthread_mutex_lock(&cp->rn_lock); 1788 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) && 1789 (cp->rn_flags & RC_NODE_USING_PARENT)); 1790 1791 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) == 1792 REP_PROTOCOL_SUCCESS); 1793 1794 cp->rn_parent = np; 1795 cp->rn_flags |= RC_NODE_IN_PARENT; 1796 (void) uu_list_insert_before(np->rn_children, NULL, cp); 1797 (void) rc_node_build_fmri(cp); 1798 1799 (void) pthread_mutex_unlock(&np->rn_lock); 1800 1801 rc_node_rele_flag(cp, RC_NODE_USING_PARENT); 1802 (void) pthread_mutex_unlock(&cp->rn_lock); 1803 } 1804 1805 /* 1806 * Sets the rn_parent_ref field of all the children of np to pp -- always 1807 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse. 1808 * 1809 * This is used when we mark a node RC_NODE_OLD, so that when the object and 1810 * its children are no longer referenced, they will all be deleted as a unit. 1811 */ 1812 static void 1813 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp) 1814 { 1815 rc_node_t *cp; 1816 1817 assert(MUTEX_HELD(&np->rn_lock)); 1818 1819 for (cp = uu_list_first(np->rn_children); cp != NULL; 1820 cp = uu_list_next(np->rn_children, cp)) { 1821 (void) pthread_mutex_lock(&cp->rn_lock); 1822 if (cp->rn_flags & RC_NODE_PARENT_REF) { 1823 assert(cp->rn_parent_ref == pp); 1824 } else { 1825 assert(cp->rn_parent_ref == NULL); 1826 1827 cp->rn_flags |= RC_NODE_PARENT_REF; 1828 cp->rn_parent_ref = pp; 1829 if (cp->rn_refs != 0) 1830 rc_node_hold_other(pp); 1831 } 1832 rc_node_setup_parent_ref(cp, pp); /* recurse */ 1833 (void) pthread_mutex_unlock(&cp->rn_lock); 1834 } 1835 } 1836 1837 /* 1838 * Atomically replace 'np' with 'newp', with a parent of 'pp'. 1839 * 1840 * Requirements: 1841 * *no* node locks may be held. 1842 * pp must be held with RC_NODE_CHILDREN_CHANGING 1843 * newp and np must be held with RC_NODE_IN_TX 1844 * np must be marked RC_NODE_IN_PARENT, newp must not be 1845 * np must be marked RC_NODE_OLD 1846 * 1847 * Afterwards: 1848 * pp's RC_NODE_CHILDREN_CHANGING is dropped 1849 * newp and np's RC_NODE_IN_TX is dropped 1850 * newp->rn_former = np; 1851 * newp is RC_NODE_IN_PARENT, np is not. 1852 * interested notify subscribers have been notified of newp's new status. 1853 */ 1854 static void 1855 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp) 1856 { 1857 cache_bucket_t *bp; 1858 /* 1859 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag 1860 * keeps rc_node_update() from seeing it until we are done. 1861 */ 1862 bp = cache_hold(newp->rn_hash); 1863 cache_remove_unlocked(bp, np); 1864 cache_insert_unlocked(bp, newp); 1865 cache_release(bp); 1866 1867 /* 1868 * replace np with newp in pp's list, and attach it to newp's rn_former 1869 * link. 1870 */ 1871 (void) pthread_mutex_lock(&pp->rn_lock); 1872 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING); 1873 1874 (void) pthread_mutex_lock(&newp->rn_lock); 1875 assert(!(newp->rn_flags & RC_NODE_IN_PARENT)); 1876 assert(newp->rn_flags & RC_NODE_IN_TX); 1877 1878 (void) pthread_mutex_lock(&np->rn_lock); 1879 assert(np->rn_flags & RC_NODE_IN_PARENT); 1880 assert(np->rn_flags & RC_NODE_OLD); 1881 assert(np->rn_flags & RC_NODE_IN_TX); 1882 1883 newp->rn_parent = pp; 1884 newp->rn_flags |= RC_NODE_IN_PARENT; 1885 1886 /* 1887 * Note that we carefully add newp before removing np -- this 1888 * keeps iterators on the list from missing us. 1889 */ 1890 (void) uu_list_insert_after(pp->rn_children, np, newp); 1891 (void) rc_node_build_fmri(newp); 1892 (void) uu_list_remove(pp->rn_children, np); 1893 1894 /* 1895 * re-set np 1896 */ 1897 newp->rn_former = np; 1898 np->rn_parent = NULL; 1899 np->rn_flags &= ~RC_NODE_IN_PARENT; 1900 np->rn_flags |= RC_NODE_ON_FORMER; 1901 1902 rc_notify_insert_node(newp); 1903 1904 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 1905 (void) pthread_mutex_unlock(&pp->rn_lock); 1906 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX); 1907 (void) pthread_mutex_unlock(&newp->rn_lock); 1908 rc_node_setup_parent_ref(np, np); 1909 rc_node_rele_flag(np, RC_NODE_IN_TX); 1910 (void) pthread_mutex_unlock(&np->rn_lock); 1911 } 1912 1913 /* 1914 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists. 1915 * 'cp' is used (and returned) if the node does not yet exist. If it does 1916 * exist, 'cp' is freed, and the existent node is returned instead. 1917 */ 1918 rc_node_t * 1919 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name, 1920 rc_node_t *pp) 1921 { 1922 rc_node_t *np; 1923 cache_bucket_t *bp; 1924 uint32_t h = rc_node_hash(nip); 1925 1926 assert(cp->rn_refs == 0); 1927 1928 bp = cache_hold(h); 1929 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) { 1930 cache_release(bp); 1931 1932 /* 1933 * make sure it matches our expectations 1934 */ 1935 (void) pthread_mutex_lock(&np->rn_lock); 1936 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 1937 assert(np->rn_parent == pp); 1938 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0); 1939 assert(strcmp(np->rn_name, name) == 0); 1940 assert(np->rn_type == NULL); 1941 assert(np->rn_flags & RC_NODE_IN_PARENT); 1942 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 1943 } 1944 (void) pthread_mutex_unlock(&np->rn_lock); 1945 1946 rc_node_destroy(cp); 1947 return (np); 1948 } 1949 1950 /* 1951 * No one is there -- create a new node. 1952 */ 1953 np = cp; 1954 rc_node_hold(np); 1955 np->rn_id = *nip; 1956 np->rn_hash = h; 1957 np->rn_name = strdup(name); 1958 1959 np->rn_flags |= RC_NODE_USING_PARENT; 1960 1961 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) { 1962 #if COMPOSITION_DEPTH == 2 1963 np->rn_cchain[0] = np; 1964 np->rn_cchain[1] = pp; 1965 #else 1966 #error This code must be updated. 1967 #endif 1968 } 1969 1970 cache_insert_unlocked(bp, np); 1971 cache_release(bp); /* we are now visible */ 1972 1973 rc_node_link_child(pp, np); 1974 1975 return (np); 1976 } 1977 1978 /* 1979 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists. 1980 * 'cp' is used (and returned) if the node does not yet exist. If it does 1981 * exist, 'cp' is freed, and the existent node is returned instead. 1982 */ 1983 rc_node_t * 1984 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name, 1985 uint32_t snap_id, rc_node_t *pp) 1986 { 1987 rc_node_t *np; 1988 cache_bucket_t *bp; 1989 uint32_t h = rc_node_hash(nip); 1990 1991 assert(cp->rn_refs == 0); 1992 1993 bp = cache_hold(h); 1994 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) { 1995 cache_release(bp); 1996 1997 /* 1998 * make sure it matches our expectations 1999 */ 2000 (void) pthread_mutex_lock(&np->rn_lock); 2001 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 2002 assert(np->rn_parent == pp); 2003 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0); 2004 assert(strcmp(np->rn_name, name) == 0); 2005 assert(np->rn_type == NULL); 2006 assert(np->rn_flags & RC_NODE_IN_PARENT); 2007 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 2008 } 2009 (void) pthread_mutex_unlock(&np->rn_lock); 2010 2011 rc_node_destroy(cp); 2012 return (np); 2013 } 2014 2015 /* 2016 * No one is there -- create a new node. 2017 */ 2018 np = cp; 2019 rc_node_hold(np); 2020 np->rn_id = *nip; 2021 np->rn_hash = h; 2022 np->rn_name = strdup(name); 2023 np->rn_snapshot_id = snap_id; 2024 2025 np->rn_flags |= RC_NODE_USING_PARENT; 2026 2027 cache_insert_unlocked(bp, np); 2028 cache_release(bp); /* we are now visible */ 2029 2030 rc_node_link_child(pp, np); 2031 2032 return (np); 2033 } 2034 2035 /* 2036 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is 2037 * used (and returned) if the node does not yet exist. If it does exist, 'cp' 2038 * is freed, and the existent node is returned instead. 2039 */ 2040 rc_node_t * 2041 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip, 2042 rc_snaplevel_t *lvl, rc_node_t *pp) 2043 { 2044 rc_node_t *np; 2045 cache_bucket_t *bp; 2046 uint32_t h = rc_node_hash(nip); 2047 2048 assert(cp->rn_refs == 0); 2049 2050 bp = cache_hold(h); 2051 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) { 2052 cache_release(bp); 2053 2054 /* 2055 * make sure it matches our expectations 2056 */ 2057 (void) pthread_mutex_lock(&np->rn_lock); 2058 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 2059 assert(np->rn_parent == pp); 2060 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0); 2061 assert(np->rn_name == NULL); 2062 assert(np->rn_type == NULL); 2063 assert(np->rn_flags & RC_NODE_IN_PARENT); 2064 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 2065 } 2066 (void) pthread_mutex_unlock(&np->rn_lock); 2067 2068 rc_node_destroy(cp); 2069 return (np); 2070 } 2071 2072 /* 2073 * No one is there -- create a new node. 2074 */ 2075 np = cp; 2076 rc_node_hold(np); /* released in snapshot_fill_children() */ 2077 np->rn_id = *nip; 2078 np->rn_hash = h; 2079 2080 rc_snaplevel_hold(lvl); 2081 np->rn_snaplevel = lvl; 2082 2083 np->rn_flags |= RC_NODE_USING_PARENT; 2084 2085 cache_insert_unlocked(bp, np); 2086 cache_release(bp); /* we are now visible */ 2087 2088 /* Add this snaplevel to the snapshot's composition chain. */ 2089 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL); 2090 pp->rn_cchain[lvl->rsl_level_num - 1] = np; 2091 2092 rc_node_link_child(pp, np); 2093 2094 return (np); 2095 } 2096 2097 /* 2098 * Returns NULL if strdup() fails. 2099 */ 2100 rc_node_t * 2101 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name, 2102 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp) 2103 { 2104 rc_node_t *np; 2105 cache_bucket_t *bp; 2106 2107 uint32_t h = rc_node_hash(nip); 2108 bp = cache_hold(h); 2109 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) { 2110 cache_release(bp); 2111 2112 /* 2113 * make sure it matches our expectations (don't check 2114 * the generation number or parent, since someone could 2115 * have gotten a transaction through while we weren't 2116 * looking) 2117 */ 2118 (void) pthread_mutex_lock(&np->rn_lock); 2119 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 2120 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0); 2121 assert(strcmp(np->rn_name, name) == 0); 2122 assert(strcmp(np->rn_type, type) == 0); 2123 assert(np->rn_pgflags == flags); 2124 assert(np->rn_flags & RC_NODE_IN_PARENT); 2125 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 2126 } 2127 (void) pthread_mutex_unlock(&np->rn_lock); 2128 2129 rc_node_destroy(cp); 2130 return (np); 2131 } 2132 2133 np = cp; 2134 rc_node_hold(np); /* released in fill_pg_callback() */ 2135 np->rn_id = *nip; 2136 np->rn_hash = h; 2137 np->rn_name = strdup(name); 2138 if (np->rn_name == NULL) { 2139 rc_node_rele(np); 2140 return (NULL); 2141 } 2142 np->rn_type = strdup(type); 2143 if (np->rn_type == NULL) { 2144 free((void *)np->rn_name); 2145 rc_node_rele(np); 2146 return (NULL); 2147 } 2148 np->rn_pgflags = flags; 2149 np->rn_gen_id = gen_id; 2150 2151 np->rn_flags |= RC_NODE_USING_PARENT; 2152 2153 cache_insert_unlocked(bp, np); 2154 cache_release(bp); /* we are now visible */ 2155 2156 rc_node_link_child(pp, np); 2157 2158 return (np); 2159 } 2160 2161 #if COMPOSITION_DEPTH == 2 2162 /* 2163 * Initialize a "composed property group" which represents the composition of 2164 * property groups pg1 & pg2. It is ephemeral: once created & returned for an 2165 * ITER_READ request, keeping it out of cache_hash and any child lists 2166 * prevents it from being looked up. Operations besides iteration are passed 2167 * through to pg1. 2168 * 2169 * pg1 & pg2 should be held before entering this function. They will be 2170 * released in rc_node_destroy(). 2171 */ 2172 static int 2173 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2) 2174 { 2175 if (strcmp(pg1->rn_type, pg2->rn_type) != 0) 2176 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 2177 2178 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP; 2179 cpg->rn_name = strdup(pg1->rn_name); 2180 if (cpg->rn_name == NULL) 2181 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 2182 2183 cpg->rn_cchain[0] = pg1; 2184 cpg->rn_cchain[1] = pg2; 2185 2186 return (REP_PROTOCOL_SUCCESS); 2187 } 2188 #else 2189 #error This code must be updated. 2190 #endif 2191 2192 /* 2193 * Fails with _NO_RESOURCES. 2194 */ 2195 int 2196 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip, 2197 const char *name, rep_protocol_value_type_t type, 2198 const char *vals, size_t count, size_t size) 2199 { 2200 rc_node_t *np; 2201 cache_bucket_t *bp; 2202 2203 uint32_t h = rc_node_hash(nip); 2204 bp = cache_hold(h); 2205 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) { 2206 cache_release(bp); 2207 /* 2208 * make sure it matches our expectations 2209 */ 2210 (void) pthread_mutex_lock(&np->rn_lock); 2211 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 2212 assert(np->rn_parent == pp); 2213 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0); 2214 assert(strcmp(np->rn_name, name) == 0); 2215 assert(np->rn_valtype == type); 2216 assert(np->rn_values_count == count); 2217 assert(np->rn_values_size == size); 2218 assert(vals == NULL || 2219 memcmp(np->rn_values, vals, size) == 0); 2220 assert(np->rn_flags & RC_NODE_IN_PARENT); 2221 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 2222 } 2223 rc_node_rele_locked(np); 2224 object_free_values(vals, type, count, size); 2225 return (REP_PROTOCOL_SUCCESS); 2226 } 2227 2228 /* 2229 * No one is there -- create a new node. 2230 */ 2231 np = rc_node_alloc(); 2232 if (np == NULL) { 2233 cache_release(bp); 2234 object_free_values(vals, type, count, size); 2235 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 2236 } 2237 np->rn_id = *nip; 2238 np->rn_hash = h; 2239 np->rn_name = strdup(name); 2240 if (np->rn_name == NULL) { 2241 cache_release(bp); 2242 object_free_values(vals, type, count, size); 2243 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 2244 } 2245 2246 np->rn_valtype = type; 2247 np->rn_values = vals; 2248 np->rn_values_count = count; 2249 np->rn_values_size = size; 2250 2251 np->rn_flags |= RC_NODE_USING_PARENT; 2252 2253 cache_insert_unlocked(bp, np); 2254 cache_release(bp); /* we are now visible */ 2255 2256 rc_node_link_child(pp, np); 2257 2258 return (REP_PROTOCOL_SUCCESS); 2259 } 2260 2261 /* 2262 * This function implements a decision table to determine the event ID for 2263 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is 2264 * determined by the value of the first property in the command specified 2265 * by cmd_no and the name of the property group. Here is the decision 2266 * table: 2267 * 2268 * Property Group Name 2269 * Property ------------------------------------------ 2270 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR 2271 * -------- -------------- ------------------ 2272 * "0" ADT_smf_disable ADT_smf_tmp_disable 2273 * "1" ADT_smf_enable ADT_smf_tmp_enable 2274 * 2275 * This function is called by special_property_event through a function 2276 * pointer in the special_props_list array. 2277 * 2278 * Since the ADT_smf_* symbols may not be defined in the build machine's 2279 * include files, this function is not compiled when doing native builds. 2280 */ 2281 #ifndef NATIVE_BUILD 2282 static int 2283 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg, 2284 au_event_t *event_id) 2285 { 2286 const char *value; 2287 uint32_t nvalues; 2288 int enable; 2289 2290 /* 2291 * First, check property value. 2292 */ 2293 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS) 2294 return (-1); 2295 if (nvalues == 0) 2296 return (-1); 2297 if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS) 2298 return (-1); 2299 if (strcmp(value, "0") == 0) { 2300 enable = 0; 2301 } else if (strcmp(value, "1") == 0) { 2302 enable = 1; 2303 } else { 2304 return (-1); 2305 } 2306 2307 /* 2308 * Now check property group name. 2309 */ 2310 if (strcmp(pg, SCF_PG_GENERAL) == 0) { 2311 *event_id = enable ? ADT_smf_enable : ADT_smf_disable; 2312 return (0); 2313 } else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) { 2314 *event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable; 2315 return (0); 2316 } 2317 return (-1); 2318 } 2319 #endif /* NATIVE_BUILD */ 2320 2321 /* 2322 * This function compares two audit_special_prop_item_t structures 2323 * represented by item1 and item2. It returns an integer greater than 0 if 2324 * item1 is greater than item2. It returns 0 if they are equal and an 2325 * integer less than 0 if item1 is less than item2. api_prop_name and 2326 * api_pg_name are the key fields for sorting. 2327 * 2328 * This function is suitable for calls to bsearch(3C) and qsort(3C). 2329 */ 2330 static int 2331 special_prop_compare(const void *item1, const void *item2) 2332 { 2333 const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1; 2334 const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2; 2335 int r; 2336 2337 r = strcmp(a->api_prop_name, b->api_prop_name); 2338 if (r == 0) { 2339 /* 2340 * Primary keys are the same, so check the secondary key. 2341 */ 2342 r = strcmp(a->api_pg_name, b->api_pg_name); 2343 } 2344 return (r); 2345 } 2346 2347 int 2348 rc_node_init(void) 2349 { 2350 rc_node_t *np; 2351 cache_bucket_t *bp; 2352 2353 rc_children_pool = uu_list_pool_create("rc_children_pool", 2354 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node), 2355 NULL, UU_LIST_POOL_DEBUG); 2356 2357 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool", 2358 sizeof (rc_node_pg_notify_t), 2359 offsetof(rc_node_pg_notify_t, rnpn_node), 2360 NULL, UU_LIST_POOL_DEBUG); 2361 2362 rc_notify_pool = uu_list_pool_create("rc_notify_pool", 2363 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node), 2364 NULL, UU_LIST_POOL_DEBUG); 2365 2366 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool", 2367 sizeof (rc_notify_info_t), 2368 offsetof(rc_notify_info_t, rni_list_node), 2369 NULL, UU_LIST_POOL_DEBUG); 2370 2371 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL || 2372 rc_notify_pool == NULL || rc_notify_info_pool == NULL) 2373 uu_die("out of memory"); 2374 2375 rc_notify_list = uu_list_create(rc_notify_pool, 2376 &rc_notify_list, 0); 2377 2378 rc_notify_info_list = uu_list_create(rc_notify_info_pool, 2379 &rc_notify_info_list, 0); 2380 2381 if (rc_notify_list == NULL || rc_notify_info_list == NULL) 2382 uu_die("out of memory"); 2383 2384 /* 2385 * Sort the special_props_list array so that it can be searched 2386 * with bsearch(3C). 2387 * 2388 * The special_props_list array is not compiled into the native 2389 * build code, so there is no need to call qsort if NATIVE_BUILD is 2390 * defined. 2391 */ 2392 #ifndef NATIVE_BUILD 2393 qsort(special_props_list, SPECIAL_PROP_COUNT, 2394 sizeof (special_props_list[0]), special_prop_compare); 2395 #endif /* NATIVE_BUILD */ 2396 2397 if ((np = rc_node_alloc()) == NULL) 2398 uu_die("out of memory"); 2399 2400 rc_node_hold(np); 2401 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE; 2402 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL; 2403 np->rn_hash = rc_node_hash(&np->rn_id); 2404 np->rn_name = "localhost"; 2405 2406 bp = cache_hold(np->rn_hash); 2407 cache_insert_unlocked(bp, np); 2408 cache_release(bp); 2409 2410 rc_scope = np; 2411 return (1); 2412 } 2413 2414 /* 2415 * Fails with 2416 * _INVALID_TYPE - type is invalid 2417 * _TYPE_MISMATCH - np doesn't carry children of type type 2418 * _DELETED - np has been deleted 2419 * _NO_RESOURCES 2420 */ 2421 static int 2422 rc_node_fill_children(rc_node_t *np, uint32_t type) 2423 { 2424 int rc; 2425 2426 assert(MUTEX_HELD(&np->rn_lock)); 2427 2428 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) != 2429 REP_PROTOCOL_SUCCESS) 2430 return (rc); 2431 2432 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING)) 2433 return (REP_PROTOCOL_FAIL_DELETED); 2434 2435 if (np->rn_flags & RC_NODE_HAS_CHILDREN) { 2436 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING); 2437 return (REP_PROTOCOL_SUCCESS); 2438 } 2439 2440 (void) pthread_mutex_unlock(&np->rn_lock); 2441 rc = object_fill_children(np); 2442 (void) pthread_mutex_lock(&np->rn_lock); 2443 2444 if (rc == REP_PROTOCOL_SUCCESS) { 2445 np->rn_flags |= RC_NODE_HAS_CHILDREN; 2446 } 2447 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING); 2448 2449 return (rc); 2450 } 2451 2452 /* 2453 * Returns 2454 * _INVALID_TYPE - type is invalid 2455 * _TYPE_MISMATCH - np doesn't carry children of type type 2456 * _DELETED - np has been deleted 2457 * _NO_RESOURCES 2458 * _SUCCESS - if *cpp is not NULL, it is held 2459 */ 2460 static int 2461 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type, 2462 rc_node_t **cpp) 2463 { 2464 int ret; 2465 rc_node_t *cp; 2466 2467 assert(MUTEX_HELD(&np->rn_lock)); 2468 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP); 2469 2470 ret = rc_node_fill_children(np, type); 2471 if (ret != REP_PROTOCOL_SUCCESS) 2472 return (ret); 2473 2474 for (cp = uu_list_first(np->rn_children); 2475 cp != NULL; 2476 cp = uu_list_next(np->rn_children, cp)) { 2477 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0) 2478 break; 2479 } 2480 2481 if (cp != NULL) 2482 rc_node_hold(cp); 2483 *cpp = cp; 2484 2485 return (REP_PROTOCOL_SUCCESS); 2486 } 2487 2488 static int rc_node_parent(rc_node_t *, rc_node_t **); 2489 2490 /* 2491 * Returns 2492 * _INVALID_TYPE - type is invalid 2493 * _DELETED - np or an ancestor has been deleted 2494 * _NOT_FOUND - no ancestor of specified type exists 2495 * _SUCCESS - *app is held 2496 */ 2497 static int 2498 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app) 2499 { 2500 int ret; 2501 rc_node_t *parent, *np_orig; 2502 2503 if (type >= REP_PROTOCOL_ENTITY_MAX) 2504 return (REP_PROTOCOL_FAIL_INVALID_TYPE); 2505 2506 np_orig = np; 2507 2508 while (np->rn_id.rl_type > type) { 2509 ret = rc_node_parent(np, &parent); 2510 if (np != np_orig) 2511 rc_node_rele(np); 2512 if (ret != REP_PROTOCOL_SUCCESS) 2513 return (ret); 2514 np = parent; 2515 } 2516 2517 if (np->rn_id.rl_type == type) { 2518 *app = parent; 2519 return (REP_PROTOCOL_SUCCESS); 2520 } 2521 2522 return (REP_PROTOCOL_FAIL_NOT_FOUND); 2523 } 2524 2525 #ifndef NATIVE_BUILD 2526 /* 2527 * If the propname property exists in pg, and it is of type string, add its 2528 * values as authorizations to pcp. pg must not be locked on entry, and it is 2529 * returned unlocked. Returns 2530 * _DELETED - pg was deleted 2531 * _NO_RESOURCES 2532 * _NOT_FOUND - pg has no property named propname 2533 * _SUCCESS 2534 */ 2535 static int 2536 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname) 2537 { 2538 rc_node_t *prop; 2539 int result; 2540 2541 uint_t count; 2542 const char *cp; 2543 2544 assert(!MUTEX_HELD(&pg->rn_lock)); 2545 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP); 2546 2547 (void) pthread_mutex_lock(&pg->rn_lock); 2548 result = rc_node_find_named_child(pg, propname, 2549 REP_PROTOCOL_ENTITY_PROPERTY, &prop); 2550 (void) pthread_mutex_unlock(&pg->rn_lock); 2551 if (result != REP_PROTOCOL_SUCCESS) { 2552 switch (result) { 2553 case REP_PROTOCOL_FAIL_DELETED: 2554 case REP_PROTOCOL_FAIL_NO_RESOURCES: 2555 return (result); 2556 2557 case REP_PROTOCOL_FAIL_INVALID_TYPE: 2558 case REP_PROTOCOL_FAIL_TYPE_MISMATCH: 2559 default: 2560 bad_error("rc_node_find_named_child", result); 2561 } 2562 } 2563 2564 if (prop == NULL) 2565 return (REP_PROTOCOL_FAIL_NOT_FOUND); 2566 2567 /* rn_valtype is immutable, so no locking. */ 2568 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) { 2569 rc_node_rele(prop); 2570 return (REP_PROTOCOL_SUCCESS); 2571 } 2572 2573 (void) pthread_mutex_lock(&prop->rn_lock); 2574 for (count = prop->rn_values_count, cp = prop->rn_values; 2575 count > 0; 2576 --count) { 2577 result = perm_add_enabling_type(pcp, cp, 2578 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST : 2579 PC_AUTH_SVC); 2580 if (result != REP_PROTOCOL_SUCCESS) 2581 break; 2582 2583 cp = strchr(cp, '\0') + 1; 2584 } 2585 2586 rc_node_rele_locked(prop); 2587 2588 return (result); 2589 } 2590 2591 /* 2592 * Assuming that ent is a service or instance node, if the pgname property 2593 * group has type pgtype, and it has a propname property with string type, add 2594 * its values as authorizations to pcp. If pgtype is NULL, it is not checked. 2595 * Returns 2596 * _SUCCESS 2597 * _DELETED - ent was deleted 2598 * _NO_RESOURCES - no resources 2599 * _NOT_FOUND - ent does not have pgname pg or propname property 2600 */ 2601 static int 2602 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname, 2603 const char *pgtype, const char *propname) 2604 { 2605 int r; 2606 rc_node_t *pg; 2607 2608 assert(!MUTEX_HELD(&ent->rn_lock)); 2609 2610 (void) pthread_mutex_lock(&ent->rn_lock); 2611 r = rc_node_find_named_child(ent, pgname, 2612 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg); 2613 (void) pthread_mutex_unlock(&ent->rn_lock); 2614 2615 switch (r) { 2616 case REP_PROTOCOL_SUCCESS: 2617 break; 2618 2619 case REP_PROTOCOL_FAIL_DELETED: 2620 case REP_PROTOCOL_FAIL_NO_RESOURCES: 2621 return (r); 2622 2623 default: 2624 bad_error("rc_node_find_named_child", r); 2625 } 2626 2627 if (pg == NULL) 2628 return (REP_PROTOCOL_FAIL_NOT_FOUND); 2629 2630 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) { 2631 r = perm_add_pg_prop_values(pcp, pg, propname); 2632 switch (r) { 2633 case REP_PROTOCOL_FAIL_DELETED: 2634 r = REP_PROTOCOL_FAIL_NOT_FOUND; 2635 break; 2636 2637 case REP_PROTOCOL_FAIL_NO_RESOURCES: 2638 case REP_PROTOCOL_SUCCESS: 2639 case REP_PROTOCOL_FAIL_NOT_FOUND: 2640 break; 2641 2642 default: 2643 bad_error("perm_add_pg_prop_values", r); 2644 } 2645 } 2646 2647 rc_node_rele(pg); 2648 2649 return (r); 2650 } 2651 2652 /* 2653 * If pg has a property named propname, and is string typed, add its values as 2654 * authorizations to pcp. If pg has no such property, and its parent is an 2655 * instance, walk up to the service and try doing the same with the property 2656 * of the same name from the property group of the same name. Returns 2657 * _SUCCESS 2658 * _NO_RESOURCES 2659 * _DELETED - pg (or an ancestor) was deleted 2660 */ 2661 static int 2662 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname) 2663 { 2664 int r; 2665 char pgname[REP_PROTOCOL_NAME_LEN + 1]; 2666 rc_node_t *svc; 2667 size_t sz; 2668 2669 r = perm_add_pg_prop_values(pcp, pg, propname); 2670 2671 if (r != REP_PROTOCOL_FAIL_NOT_FOUND) 2672 return (r); 2673 2674 assert(!MUTEX_HELD(&pg->rn_lock)); 2675 2676 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0) 2677 return (REP_PROTOCOL_SUCCESS); 2678 2679 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname)); 2680 assert(sz < sizeof (pgname)); 2681 2682 /* 2683 * If pg is a child of an instance or snapshot, we want to compose the 2684 * authorization property with the service's (if it exists). The 2685 * snapshot case applies only to read_authorization. In all other 2686 * cases, the pg's parent will be the instance. 2687 */ 2688 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc); 2689 if (r != REP_PROTOCOL_SUCCESS) { 2690 assert(r == REP_PROTOCOL_FAIL_DELETED); 2691 return (r); 2692 } 2693 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE); 2694 2695 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname); 2696 2697 rc_node_rele(svc); 2698 2699 if (r == REP_PROTOCOL_FAIL_NOT_FOUND) 2700 r = REP_PROTOCOL_SUCCESS; 2701 2702 return (r); 2703 } 2704 2705 /* 2706 * Call perm_add_enabling_values() for the "action_authorization" property of 2707 * the "general" property group of inst. Returns 2708 * _DELETED - inst (or an ancestor) was deleted 2709 * _NO_RESOURCES 2710 * _SUCCESS 2711 */ 2712 static int 2713 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst) 2714 { 2715 int r; 2716 rc_node_t *svc; 2717 2718 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE); 2719 2720 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL, 2721 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION); 2722 2723 if (r != REP_PROTOCOL_FAIL_NOT_FOUND) 2724 return (r); 2725 2726 r = rc_node_parent(inst, &svc); 2727 if (r != REP_PROTOCOL_SUCCESS) { 2728 assert(r == REP_PROTOCOL_FAIL_DELETED); 2729 return (r); 2730 } 2731 2732 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL, 2733 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION); 2734 2735 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r); 2736 } 2737 #endif /* NATIVE_BUILD */ 2738 2739 void 2740 rc_node_ptr_init(rc_node_ptr_t *out) 2741 { 2742 out->rnp_node = NULL; 2743 out->rnp_auth_string = NULL; 2744 out->rnp_authorized = RC_AUTH_UNKNOWN; 2745 out->rnp_deleted = 0; 2746 } 2747 2748 void 2749 rc_node_ptr_free_mem(rc_node_ptr_t *npp) 2750 { 2751 if (npp->rnp_auth_string != NULL) { 2752 free((void *)npp->rnp_auth_string); 2753 npp->rnp_auth_string = NULL; 2754 } 2755 } 2756 2757 static void 2758 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val) 2759 { 2760 rc_node_t *cur = out->rnp_node; 2761 if (val != NULL) 2762 rc_node_hold(val); 2763 out->rnp_node = val; 2764 if (cur != NULL) 2765 rc_node_rele(cur); 2766 out->rnp_authorized = RC_AUTH_UNKNOWN; 2767 rc_node_ptr_free_mem(out); 2768 out->rnp_deleted = 0; 2769 } 2770 2771 void 2772 rc_node_clear(rc_node_ptr_t *out, int deleted) 2773 { 2774 rc_node_assign(out, NULL); 2775 out->rnp_deleted = deleted; 2776 } 2777 2778 void 2779 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val) 2780 { 2781 rc_node_assign(out, val->rnp_node); 2782 } 2783 2784 /* 2785 * rc_node_check()/RC_NODE_CHECK() 2786 * generic "entry" checks, run before the use of an rc_node pointer. 2787 * 2788 * Fails with 2789 * _NOT_SET 2790 * _DELETED 2791 */ 2792 static int 2793 rc_node_check_and_lock(rc_node_t *np) 2794 { 2795 int result = REP_PROTOCOL_SUCCESS; 2796 if (np == NULL) 2797 return (REP_PROTOCOL_FAIL_NOT_SET); 2798 2799 (void) pthread_mutex_lock(&np->rn_lock); 2800 if (!rc_node_wait_flag(np, RC_NODE_DYING)) { 2801 result = REP_PROTOCOL_FAIL_DELETED; 2802 (void) pthread_mutex_unlock(&np->rn_lock); 2803 } 2804 2805 return (result); 2806 } 2807 2808 /* 2809 * Fails with 2810 * _NOT_SET - ptr is reset 2811 * _DELETED - node has been deleted 2812 */ 2813 static rc_node_t * 2814 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res) 2815 { 2816 rc_node_t *np = npp->rnp_node; 2817 if (np == NULL) { 2818 if (npp->rnp_deleted) 2819 *res = REP_PROTOCOL_FAIL_DELETED; 2820 else 2821 *res = REP_PROTOCOL_FAIL_NOT_SET; 2822 return (NULL); 2823 } 2824 2825 (void) pthread_mutex_lock(&np->rn_lock); 2826 if (!rc_node_wait_flag(np, RC_NODE_DYING)) { 2827 (void) pthread_mutex_unlock(&np->rn_lock); 2828 rc_node_clear(npp, 1); 2829 *res = REP_PROTOCOL_FAIL_DELETED; 2830 return (NULL); 2831 } 2832 return (np); 2833 } 2834 2835 #define RC_NODE_CHECK_AND_LOCK(n) { \ 2836 int rc__res; \ 2837 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \ 2838 return (rc__res); \ 2839 } 2840 2841 #define RC_NODE_CHECK(n) { \ 2842 RC_NODE_CHECK_AND_LOCK(n); \ 2843 (void) pthread_mutex_unlock(&(n)->rn_lock); \ 2844 } 2845 2846 #define RC_NODE_CHECK_AND_HOLD(n) { \ 2847 RC_NODE_CHECK_AND_LOCK(n); \ 2848 rc_node_hold_locked(n); \ 2849 (void) pthread_mutex_unlock(&(n)->rn_lock); \ 2850 } 2851 2852 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \ 2853 int rc__res; \ 2854 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \ 2855 return (rc__res); \ 2856 } 2857 2858 #define RC_NODE_PTR_GET_CHECK(np, npp) { \ 2859 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \ 2860 (void) pthread_mutex_unlock(&(np)->rn_lock); \ 2861 } 2862 2863 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \ 2864 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \ 2865 rc_node_hold_locked(np); \ 2866 (void) pthread_mutex_unlock(&(np)->rn_lock); \ 2867 } 2868 2869 #define HOLD_FLAG_OR_RETURN(np, flag) { \ 2870 assert(MUTEX_HELD(&(np)->rn_lock)); \ 2871 assert(!((np)->rn_flags & RC_NODE_DEAD)); \ 2872 if (!rc_node_hold_flag((np), flag)) { \ 2873 (void) pthread_mutex_unlock(&(np)->rn_lock); \ 2874 return (REP_PROTOCOL_FAIL_DELETED); \ 2875 } \ 2876 } 2877 2878 #define HOLD_PTR_FLAG_OR_RETURN(np, npp, flag) { \ 2879 assert(MUTEX_HELD(&(np)->rn_lock)); \ 2880 assert(!((np)->rn_flags & RC_NODE_DEAD)); \ 2881 if (!rc_node_hold_flag((np), flag)) { \ 2882 (void) pthread_mutex_unlock(&(np)->rn_lock); \ 2883 assert((np) == (npp)->rnp_node); \ 2884 rc_node_clear(npp, 1); \ 2885 return (REP_PROTOCOL_FAIL_DELETED); \ 2886 } \ 2887 } 2888 2889 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \ 2890 assert(MUTEX_HELD(&(np)->rn_lock)); \ 2891 assert(!((np)->rn_flags & RC_NODE_DEAD)); \ 2892 if (!rc_node_hold_flag((np), flag)) { \ 2893 (void) pthread_mutex_unlock(&(np)->rn_lock); \ 2894 assert((np) == (npp)->rnp_node); \ 2895 rc_node_clear(npp, 1); \ 2896 if ((mem) != NULL) \ 2897 free((mem)); \ 2898 return (REP_PROTOCOL_FAIL_DELETED); \ 2899 } \ 2900 } 2901 2902 int 2903 rc_local_scope(uint32_t type, rc_node_ptr_t *out) 2904 { 2905 if (type != REP_PROTOCOL_ENTITY_SCOPE) { 2906 rc_node_clear(out, 0); 2907 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 2908 } 2909 2910 /* 2911 * the main scope never gets destroyed 2912 */ 2913 rc_node_assign(out, rc_scope); 2914 2915 return (REP_PROTOCOL_SUCCESS); 2916 } 2917 2918 /* 2919 * Fails with 2920 * _NOT_SET - npp is not set 2921 * _DELETED - the node npp pointed at has been deleted 2922 * _TYPE_MISMATCH - type is not _SCOPE 2923 * _NOT_FOUND - scope has no parent 2924 */ 2925 static int 2926 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out) 2927 { 2928 rc_node_t *np; 2929 2930 rc_node_clear(out, 0); 2931 2932 RC_NODE_PTR_GET_CHECK(np, npp); 2933 2934 if (type != REP_PROTOCOL_ENTITY_SCOPE) 2935 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 2936 2937 return (REP_PROTOCOL_FAIL_NOT_FOUND); 2938 } 2939 2940 static int rc_node_pg_check_read_protect(rc_node_t *); 2941 2942 /* 2943 * Fails with 2944 * _NOT_SET 2945 * _DELETED 2946 * _NOT_APPLICABLE 2947 * _NOT_FOUND 2948 * _BAD_REQUEST 2949 * _TRUNCATED 2950 * _NO_RESOURCES 2951 */ 2952 int 2953 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype, 2954 size_t *sz_out) 2955 { 2956 size_t actual; 2957 rc_node_t *np; 2958 2959 assert(sz == *sz_out); 2960 2961 RC_NODE_PTR_GET_CHECK(np, npp); 2962 2963 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 2964 np = np->rn_cchain[0]; 2965 RC_NODE_CHECK(np); 2966 } 2967 2968 switch (answertype) { 2969 case RP_ENTITY_NAME_NAME: 2970 if (np->rn_name == NULL) 2971 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 2972 actual = strlcpy(buf, np->rn_name, sz); 2973 break; 2974 case RP_ENTITY_NAME_PGTYPE: 2975 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) 2976 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 2977 actual = strlcpy(buf, np->rn_type, sz); 2978 break; 2979 case RP_ENTITY_NAME_PGFLAGS: 2980 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) 2981 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 2982 actual = snprintf(buf, sz, "%d", np->rn_pgflags); 2983 break; 2984 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE: 2985 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) 2986 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 2987 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz); 2988 break; 2989 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE: 2990 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) 2991 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 2992 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz); 2993 break; 2994 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE: 2995 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) 2996 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 2997 if (np->rn_snaplevel->rsl_instance == NULL) 2998 return (REP_PROTOCOL_FAIL_NOT_FOUND); 2999 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz); 3000 break; 3001 case RP_ENTITY_NAME_PGREADPROT: 3002 { 3003 int ret; 3004 3005 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) 3006 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 3007 ret = rc_node_pg_check_read_protect(np); 3008 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH); 3009 switch (ret) { 3010 case REP_PROTOCOL_FAIL_PERMISSION_DENIED: 3011 actual = snprintf(buf, sz, "1"); 3012 break; 3013 case REP_PROTOCOL_SUCCESS: 3014 actual = snprintf(buf, sz, "0"); 3015 break; 3016 default: 3017 return (ret); 3018 } 3019 break; 3020 } 3021 default: 3022 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 3023 } 3024 if (actual >= sz) 3025 return (REP_PROTOCOL_FAIL_TRUNCATED); 3026 3027 *sz_out = actual; 3028 return (REP_PROTOCOL_SUCCESS); 3029 } 3030 3031 int 3032 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out) 3033 { 3034 rc_node_t *np; 3035 3036 RC_NODE_PTR_GET_CHECK(np, npp); 3037 3038 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) 3039 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 3040 3041 *out = np->rn_valtype; 3042 3043 return (REP_PROTOCOL_SUCCESS); 3044 } 3045 3046 /* 3047 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold 3048 * on the parent, returns a pointer to it in *out, and returns _SUCCESS. 3049 */ 3050 static int 3051 rc_node_parent(rc_node_t *np, rc_node_t **out) 3052 { 3053 rc_node_t *pnp; 3054 rc_node_t *np_orig; 3055 3056 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 3057 RC_NODE_CHECK_AND_LOCK(np); 3058 } else { 3059 np = np->rn_cchain[0]; 3060 RC_NODE_CHECK_AND_LOCK(np); 3061 } 3062 3063 np_orig = np; 3064 rc_node_hold_locked(np); /* simplifies the remainder */ 3065 3066 for (;;) { 3067 if (!rc_node_wait_flag(np, 3068 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) { 3069 rc_node_rele_locked(np); 3070 return (REP_PROTOCOL_FAIL_DELETED); 3071 } 3072 3073 if (!(np->rn_flags & RC_NODE_OLD)) 3074 break; 3075 3076 rc_node_rele_locked(np); 3077 np = cache_lookup(&np_orig->rn_id); 3078 assert(np != np_orig); 3079 3080 if (np == NULL) 3081 goto deleted; 3082 (void) pthread_mutex_lock(&np->rn_lock); 3083 } 3084 3085 /* guaranteed to succeed without dropping the lock */ 3086 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 3087 (void) pthread_mutex_unlock(&np->rn_lock); 3088 *out = NULL; 3089 rc_node_rele(np); 3090 return (REP_PROTOCOL_FAIL_DELETED); 3091 } 3092 3093 assert(np->rn_parent != NULL); 3094 pnp = np->rn_parent; 3095 (void) pthread_mutex_unlock(&np->rn_lock); 3096 3097 (void) pthread_mutex_lock(&pnp->rn_lock); 3098 (void) pthread_mutex_lock(&np->rn_lock); 3099 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 3100 (void) pthread_mutex_unlock(&np->rn_lock); 3101 3102 rc_node_hold_locked(pnp); 3103 3104 (void) pthread_mutex_unlock(&pnp->rn_lock); 3105 3106 rc_node_rele(np); 3107 *out = pnp; 3108 return (REP_PROTOCOL_SUCCESS); 3109 3110 deleted: 3111 rc_node_rele(np); 3112 return (REP_PROTOCOL_FAIL_DELETED); 3113 } 3114 3115 /* 3116 * Fails with 3117 * _NOT_SET 3118 * _DELETED 3119 */ 3120 static int 3121 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out) 3122 { 3123 rc_node_t *np; 3124 3125 RC_NODE_PTR_GET_CHECK(np, npp); 3126 3127 return (rc_node_parent(np, out)); 3128 } 3129 3130 /* 3131 * Fails with 3132 * _NOT_SET - npp is not set 3133 * _DELETED - the node npp pointed at has been deleted 3134 * _TYPE_MISMATCH - npp's node's parent is not of type type 3135 * 3136 * If npp points to a scope, can also fail with 3137 * _NOT_FOUND - scope has no parent 3138 */ 3139 int 3140 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out) 3141 { 3142 rc_node_t *pnp; 3143 int rc; 3144 3145 if (npp->rnp_node != NULL && 3146 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) 3147 return (rc_scope_parent_scope(npp, type, out)); 3148 3149 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) { 3150 rc_node_clear(out, 0); 3151 return (rc); 3152 } 3153 3154 if (type != pnp->rn_id.rl_type) { 3155 rc_node_rele(pnp); 3156 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 3157 } 3158 3159 rc_node_assign(out, pnp); 3160 rc_node_rele(pnp); 3161 3162 return (REP_PROTOCOL_SUCCESS); 3163 } 3164 3165 int 3166 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out) 3167 { 3168 rc_node_t *pnp; 3169 int rc; 3170 3171 if (npp->rnp_node != NULL && 3172 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) { 3173 *type_out = REP_PROTOCOL_ENTITY_SCOPE; 3174 return (REP_PROTOCOL_SUCCESS); 3175 } 3176 3177 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) 3178 return (rc); 3179 3180 *type_out = pnp->rn_id.rl_type; 3181 3182 rc_node_rele(pnp); 3183 3184 return (REP_PROTOCOL_SUCCESS); 3185 } 3186 3187 /* 3188 * Fails with 3189 * _INVALID_TYPE - type is invalid 3190 * _TYPE_MISMATCH - np doesn't carry children of type type 3191 * _DELETED - np has been deleted 3192 * _NOT_FOUND - no child with that name/type combo found 3193 * _NO_RESOURCES 3194 * _BACKEND_ACCESS 3195 */ 3196 int 3197 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type, 3198 rc_node_ptr_t *outp) 3199 { 3200 rc_node_t *np, *cp; 3201 rc_node_t *child = NULL; 3202 int ret, idx; 3203 3204 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 3205 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) { 3206 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 3207 ret = rc_node_find_named_child(np, name, type, &child); 3208 } else { 3209 (void) pthread_mutex_unlock(&np->rn_lock); 3210 ret = REP_PROTOCOL_SUCCESS; 3211 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) { 3212 cp = np->rn_cchain[idx]; 3213 if (cp == NULL) 3214 break; 3215 RC_NODE_CHECK_AND_LOCK(cp); 3216 ret = rc_node_find_named_child(cp, name, type, 3217 &child); 3218 (void) pthread_mutex_unlock(&cp->rn_lock); 3219 /* 3220 * loop only if we succeeded, but no child of 3221 * the correct name was found. 3222 */ 3223 if (ret != REP_PROTOCOL_SUCCESS || 3224 child != NULL) 3225 break; 3226 } 3227 (void) pthread_mutex_lock(&np->rn_lock); 3228 } 3229 } 3230 (void) pthread_mutex_unlock(&np->rn_lock); 3231 3232 if (ret == REP_PROTOCOL_SUCCESS) { 3233 rc_node_assign(outp, child); 3234 if (child != NULL) 3235 rc_node_rele(child); 3236 else 3237 ret = REP_PROTOCOL_FAIL_NOT_FOUND; 3238 } else { 3239 rc_node_assign(outp, NULL); 3240 } 3241 return (ret); 3242 } 3243 3244 int 3245 rc_node_update(rc_node_ptr_t *npp) 3246 { 3247 cache_bucket_t *bp; 3248 rc_node_t *np = npp->rnp_node; 3249 rc_node_t *nnp; 3250 rc_node_t *cpg = NULL; 3251 3252 if (np != NULL && 3253 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 3254 /* 3255 * If we're updating a composed property group, actually 3256 * update the top-level property group & return the 3257 * appropriate value. But leave *nnp pointing at us. 3258 */ 3259 cpg = np; 3260 np = np->rn_cchain[0]; 3261 } 3262 3263 RC_NODE_CHECK(np); 3264 3265 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP && 3266 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) 3267 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 3268 3269 for (;;) { 3270 bp = cache_hold(np->rn_hash); 3271 nnp = cache_lookup_unlocked(bp, &np->rn_id); 3272 if (nnp == NULL) { 3273 cache_release(bp); 3274 rc_node_clear(npp, 1); 3275 return (REP_PROTOCOL_FAIL_DELETED); 3276 } 3277 /* 3278 * grab the lock before dropping the cache bucket, so 3279 * that no one else can sneak in 3280 */ 3281 (void) pthread_mutex_lock(&nnp->rn_lock); 3282 cache_release(bp); 3283 3284 if (!(nnp->rn_flags & RC_NODE_IN_TX) || 3285 !rc_node_wait_flag(nnp, RC_NODE_IN_TX)) 3286 break; 3287 3288 rc_node_rele_locked(nnp); 3289 } 3290 3291 /* 3292 * If it is dead, we want to update it so that it will continue to 3293 * report being dead. 3294 */ 3295 if (nnp->rn_flags & RC_NODE_DEAD) { 3296 (void) pthread_mutex_unlock(&nnp->rn_lock); 3297 if (nnp != np && cpg == NULL) 3298 rc_node_assign(npp, nnp); /* updated */ 3299 rc_node_rele(nnp); 3300 return (REP_PROTOCOL_FAIL_DELETED); 3301 } 3302 3303 assert(!(nnp->rn_flags & RC_NODE_OLD)); 3304 (void) pthread_mutex_unlock(&nnp->rn_lock); 3305 3306 if (nnp != np && cpg == NULL) 3307 rc_node_assign(npp, nnp); /* updated */ 3308 3309 rc_node_rele(nnp); 3310 3311 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE); 3312 } 3313 3314 /* 3315 * does a generic modification check, for creation, deletion, and snapshot 3316 * management only. Property group transactions have different checks. 3317 * 3318 * The string returned to *match_auth must be freed. 3319 */ 3320 int 3321 rc_node_modify_permission_check(char **match_auth) 3322 { 3323 int rc = REP_PROTOCOL_SUCCESS; 3324 permcheck_t *pcp; 3325 int granted; 3326 3327 *match_auth = NULL; 3328 #ifdef NATIVE_BUILD 3329 if (!client_is_privileged()) { 3330 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 3331 } 3332 return (rc); 3333 #else 3334 if (is_main_repository == 0) 3335 return (REP_PROTOCOL_SUCCESS); 3336 pcp = pc_create(); 3337 if (pcp != NULL) { 3338 rc = perm_add_enabling(pcp, AUTH_MODIFY); 3339 3340 if (rc == REP_PROTOCOL_SUCCESS) { 3341 granted = perm_granted(pcp); 3342 3343 if (granted < 0) { 3344 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 3345 } else { 3346 /* 3347 * Copy off the authorization 3348 * string before freeing pcp. 3349 */ 3350 *match_auth = 3351 strdup(pcp->pc_auth_string); 3352 if (*match_auth == NULL) 3353 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 3354 } 3355 } 3356 3357 pc_free(pcp); 3358 } else { 3359 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 3360 } 3361 3362 if (rc == REP_PROTOCOL_SUCCESS && !granted) 3363 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 3364 3365 return (rc); 3366 #endif /* NATIVE_BUILD */ 3367 } 3368 3369 /* 3370 * Native builds are done to create svc.configd-native. This program runs 3371 * only on the Solaris build machines to create the seed repository, and it 3372 * is compiled against the build machine's header files. The ADT_smf_* 3373 * symbols may not be defined in these header files. For this reason 3374 * smf_annotation_event(), _smf_audit_event() and special_property_event() 3375 * are not compiled for native builds. 3376 */ 3377 #ifndef NATIVE_BUILD 3378 3379 /* 3380 * This function generates an annotation audit event if one has been setup. 3381 * Annotation events should only be generated immediately before the audit 3382 * record from the first attempt to modify the repository from a client 3383 * which has requested an annotation. 3384 */ 3385 static void 3386 smf_annotation_event(int status, int return_val) 3387 { 3388 adt_session_data_t *session; 3389 adt_event_data_t *event = NULL; 3390 char file[MAXPATHLEN]; 3391 char operation[REP_PROTOCOL_NAME_LEN]; 3392 3393 /* Don't audit if we're using an alternate repository. */ 3394 if (is_main_repository == 0) 3395 return; 3396 3397 if (client_annotation_needed(operation, sizeof (operation), file, 3398 sizeof (file)) == 0) { 3399 return; 3400 } 3401 if (file[0] == 0) { 3402 (void) strlcpy(file, "NO FILE", sizeof (file)); 3403 } 3404 if (operation[0] == 0) { 3405 (void) strlcpy(operation, "NO OPERATION", 3406 sizeof (operation)); 3407 } 3408 if ((session = get_audit_session()) == NULL) 3409 return; 3410 if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) { 3411 uu_warn("smf_annotation_event cannot allocate event " 3412 "data. %s\n", strerror(errno)); 3413 return; 3414 } 3415 event->adt_smf_annotation.operation = operation; 3416 event->adt_smf_annotation.file = file; 3417 if (adt_put_event(event, status, return_val) == 0) { 3418 client_annotation_finished(); 3419 } else { 3420 uu_warn("smf_annotation_event failed to put event. " 3421 "%s\n", strerror(errno)); 3422 } 3423 adt_free_event(event); 3424 } 3425 3426 /* 3427 * _smf_audit_event interacts with the security auditing system to generate 3428 * an audit event structure. It establishes an audit session and allocates 3429 * an audit event. The event is filled in from the audit data, and 3430 * adt_put_event is called to generate the event. 3431 */ 3432 static void 3433 _smf_audit_event(au_event_t event_id, int status, int return_val, 3434 audit_event_data_t *data) 3435 { 3436 char *auth_used; 3437 char *fmri; 3438 char *prop_value; 3439 adt_session_data_t *session; 3440 adt_event_data_t *event = NULL; 3441 3442 /* Don't audit if we're using an alternate repository */ 3443 if (is_main_repository == 0) 3444 return; 3445 3446 smf_annotation_event(status, return_val); 3447 if ((session = get_audit_session()) == NULL) 3448 return; 3449 if ((event = adt_alloc_event(session, event_id)) == NULL) { 3450 uu_warn("_smf_audit_event cannot allocate event " 3451 "data. %s\n", strerror(errno)); 3452 return; 3453 } 3454 3455 /* 3456 * Handle possibility of NULL authorization strings, FMRIs and 3457 * property values. 3458 */ 3459 if (data->ed_auth == NULL) { 3460 auth_used = "PRIVILEGED"; 3461 } else { 3462 auth_used = data->ed_auth; 3463 } 3464 if (data->ed_fmri == NULL) { 3465 syslog(LOG_WARNING, "_smf_audit_event called with " 3466 "empty FMRI string"); 3467 fmri = "UNKNOWN FMRI"; 3468 } else { 3469 fmri = data->ed_fmri; 3470 } 3471 if (data->ed_prop_value == NULL) { 3472 prop_value = ""; 3473 } else { 3474 prop_value = data->ed_prop_value; 3475 } 3476 3477 /* Fill in the event data. */ 3478 switch (event_id) { 3479 case ADT_smf_attach_snap: 3480 event->adt_smf_attach_snap.auth_used = auth_used; 3481 event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri; 3482 event->adt_smf_attach_snap.old_name = data->ed_old_name; 3483 event->adt_smf_attach_snap.new_fmri = fmri; 3484 event->adt_smf_attach_snap.new_name = data->ed_snapname; 3485 break; 3486 case ADT_smf_change_prop: 3487 event->adt_smf_change_prop.auth_used = auth_used; 3488 event->adt_smf_change_prop.fmri = fmri; 3489 event->adt_smf_change_prop.type = data->ed_type; 3490 event->adt_smf_change_prop.value = prop_value; 3491 break; 3492 case ADT_smf_clear: 3493 event->adt_smf_clear.auth_used = auth_used; 3494 event->adt_smf_clear.fmri = fmri; 3495 break; 3496 case ADT_smf_create: 3497 event->adt_smf_create.fmri = fmri; 3498 event->adt_smf_create.auth_used = auth_used; 3499 break; 3500 case ADT_smf_create_npg: 3501 event->adt_smf_create_npg.auth_used = auth_used; 3502 event->adt_smf_create_npg.fmri = fmri; 3503 event->adt_smf_create_npg.type = data->ed_type; 3504 break; 3505 case ADT_smf_create_pg: 3506 event->adt_smf_create_pg.auth_used = auth_used; 3507 event->adt_smf_create_pg.fmri = fmri; 3508 event->adt_smf_create_pg.type = data->ed_type; 3509 break; 3510 case ADT_smf_create_prop: 3511 event->adt_smf_create_prop.auth_used = auth_used; 3512 event->adt_smf_create_prop.fmri = fmri; 3513 event->adt_smf_create_prop.type = data->ed_type; 3514 event->adt_smf_create_prop.value = prop_value; 3515 break; 3516 case ADT_smf_create_snap: 3517 event->adt_smf_create_snap.auth_used = auth_used; 3518 event->adt_smf_create_snap.fmri = fmri; 3519 event->adt_smf_create_snap.name = data->ed_snapname; 3520 break; 3521 case ADT_smf_degrade: 3522 event->adt_smf_degrade.auth_used = auth_used; 3523 event->adt_smf_degrade.fmri = fmri; 3524 break; 3525 case ADT_smf_delete: 3526 event->adt_smf_delete.fmri = fmri; 3527 event->adt_smf_delete.auth_used = auth_used; 3528 break; 3529 case ADT_smf_delete_npg: 3530 event->adt_smf_delete_npg.auth_used = auth_used; 3531 event->adt_smf_delete_npg.fmri = fmri; 3532 event->adt_smf_delete_npg.type = data->ed_type; 3533 break; 3534 case ADT_smf_delete_pg: 3535 event->adt_smf_delete_pg.auth_used = auth_used; 3536 event->adt_smf_delete_pg.fmri = fmri; 3537 event->adt_smf_delete_pg.type = data->ed_type; 3538 break; 3539 case ADT_smf_delete_prop: 3540 event->adt_smf_delete_prop.auth_used = auth_used; 3541 event->adt_smf_delete_prop.fmri = fmri; 3542 break; 3543 case ADT_smf_delete_snap: 3544 event->adt_smf_delete_snap.auth_used = auth_used; 3545 event->adt_smf_delete_snap.fmri = fmri; 3546 event->adt_smf_delete_snap.name = data->ed_snapname; 3547 break; 3548 case ADT_smf_disable: 3549 event->adt_smf_disable.auth_used = auth_used; 3550 event->adt_smf_disable.fmri = fmri; 3551 break; 3552 case ADT_smf_enable: 3553 event->adt_smf_enable.auth_used = auth_used; 3554 event->adt_smf_enable.fmri = fmri; 3555 break; 3556 case ADT_smf_immediate_degrade: 3557 event->adt_smf_immediate_degrade.auth_used = auth_used; 3558 event->adt_smf_immediate_degrade.fmri = fmri; 3559 break; 3560 case ADT_smf_immediate_maintenance: 3561 event->adt_smf_immediate_maintenance.auth_used = auth_used; 3562 event->adt_smf_immediate_maintenance.fmri = fmri; 3563 break; 3564 case ADT_smf_immtmp_maintenance: 3565 event->adt_smf_immtmp_maintenance.auth_used = auth_used; 3566 event->adt_smf_immtmp_maintenance.fmri = fmri; 3567 break; 3568 case ADT_smf_maintenance: 3569 event->adt_smf_maintenance.auth_used = auth_used; 3570 event->adt_smf_maintenance.fmri = fmri; 3571 break; 3572 case ADT_smf_milestone: 3573 event->adt_smf_milestone.auth_used = auth_used; 3574 event->adt_smf_milestone.fmri = fmri; 3575 break; 3576 case ADT_smf_read_prop: 3577 event->adt_smf_read_prop.auth_used = auth_used; 3578 event->adt_smf_read_prop.fmri = fmri; 3579 break; 3580 case ADT_smf_refresh: 3581 event->adt_smf_refresh.auth_used = auth_used; 3582 event->adt_smf_refresh.fmri = fmri; 3583 break; 3584 case ADT_smf_restart: 3585 event->adt_smf_restart.auth_used = auth_used; 3586 event->adt_smf_restart.fmri = fmri; 3587 break; 3588 case ADT_smf_tmp_disable: 3589 event->adt_smf_tmp_disable.auth_used = auth_used; 3590 event->adt_smf_tmp_disable.fmri = fmri; 3591 break; 3592 case ADT_smf_tmp_enable: 3593 event->adt_smf_tmp_enable.auth_used = auth_used; 3594 event->adt_smf_tmp_enable.fmri = fmri; 3595 break; 3596 case ADT_smf_tmp_maintenance: 3597 event->adt_smf_tmp_maintenance.auth_used = auth_used; 3598 event->adt_smf_tmp_maintenance.fmri = fmri; 3599 break; 3600 default: 3601 abort(); /* Need to cover all SMF event IDs */ 3602 } 3603 3604 if (adt_put_event(event, status, return_val) != 0) { 3605 uu_warn("_smf_audit_event failed to put event. %s\n", 3606 strerror(errno)); 3607 } 3608 adt_free_event(event); 3609 } 3610 3611 /* 3612 * Determine if the combination of the property group at pg_name and the 3613 * property at prop_name are in the set of special startd properties. If 3614 * they are, a special audit event will be generated. 3615 */ 3616 static void 3617 special_property_event(audit_event_data_t *evdp, const char *prop_name, 3618 char *pg_name, int status, int return_val, tx_commit_data_t *tx_data, 3619 size_t cmd_no) 3620 { 3621 au_event_t event_id; 3622 audit_special_prop_item_t search_key; 3623 audit_special_prop_item_t *found; 3624 3625 /* Use bsearch to find the special property information. */ 3626 search_key.api_prop_name = prop_name; 3627 search_key.api_pg_name = pg_name; 3628 found = (audit_special_prop_item_t *)bsearch(&search_key, 3629 special_props_list, SPECIAL_PROP_COUNT, 3630 sizeof (special_props_list[0]), special_prop_compare); 3631 if (found == NULL) { 3632 /* Not a special property. */ 3633 return; 3634 } 3635 3636 /* Get the event id */ 3637 if (found->api_event_func == NULL) { 3638 event_id = found->api_event_id; 3639 } else { 3640 if ((*found->api_event_func)(tx_data, cmd_no, 3641 found->api_pg_name, &event_id) < 0) 3642 return; 3643 } 3644 3645 /* Generate the event. */ 3646 smf_audit_event(event_id, status, return_val, evdp); 3647 } 3648 #endif /* NATIVE_BUILD */ 3649 3650 /* 3651 * Return a pointer to a string containing all the values of the command 3652 * specified by cmd_no with each value enclosed in quotes. It is up to the 3653 * caller to free the memory at the returned pointer. 3654 */ 3655 static char * 3656 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no) 3657 { 3658 const char *cp; 3659 const char *cur_value; 3660 size_t byte_count = 0; 3661 uint32_t i; 3662 uint32_t nvalues; 3663 size_t str_size = 0; 3664 char *values = NULL; 3665 char *vp; 3666 3667 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS) 3668 return (NULL); 3669 /* 3670 * First determine the size of the buffer that we will need. We 3671 * will represent each property value surrounded by quotes with a 3672 * space separating the values. Thus, we need to find the total 3673 * size of all the value strings and add 3 for each value. 3674 * 3675 * There is one catch, though. We need to escape any internal 3676 * quote marks in the values. So for each quote in the value we 3677 * need to add another byte to the buffer size. 3678 */ 3679 for (i = 0; i < nvalues; i++) { 3680 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) != 3681 REP_PROTOCOL_SUCCESS) 3682 return (NULL); 3683 for (cp = cur_value; *cp != 0; cp++) { 3684 byte_count += (*cp == '"') ? 2 : 1; 3685 } 3686 byte_count += 3; /* surrounding quotes & space */ 3687 } 3688 byte_count++; /* nul terminator */ 3689 values = malloc(byte_count); 3690 if (values == NULL) 3691 return (NULL); 3692 *values = 0; 3693 3694 /* Now build up the string of values. */ 3695 for (i = 0; i < nvalues; i++) { 3696 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) != 3697 REP_PROTOCOL_SUCCESS) { 3698 free(values); 3699 return (NULL); 3700 } 3701 (void) strlcat(values, "\"", byte_count); 3702 for (cp = cur_value, vp = values + strlen(values); 3703 *cp != 0; cp++) { 3704 if (*cp == '"') { 3705 *vp++ = '\\'; 3706 *vp++ = '"'; 3707 } else { 3708 *vp++ = *cp; 3709 } 3710 } 3711 *vp = 0; 3712 str_size = strlcat(values, "\" ", byte_count); 3713 assert(str_size < byte_count); 3714 } 3715 if (str_size > 0) 3716 values[str_size - 1] = 0; /* get rid of trailing space */ 3717 return (values); 3718 } 3719 3720 /* 3721 * generate_property_events takes the transaction commit data at tx_data 3722 * and generates an audit event for each command. 3723 * 3724 * Native builds are done to create svc.configd-native. This program runs 3725 * only on the Solaris build machines to create the seed repository. Thus, 3726 * no audit events should be generated when running svc.configd-native. 3727 */ 3728 static void 3729 generate_property_events( 3730 tx_commit_data_t *tx_data, 3731 char *pg_fmri, /* FMRI of property group */ 3732 char *auth_string, 3733 int auth_status, 3734 int auth_ret_value) 3735 { 3736 #ifndef NATIVE_BUILD 3737 enum rep_protocol_transaction_action action; 3738 audit_event_data_t audit_data; 3739 size_t count; 3740 size_t cmd_no; 3741 char *cp; 3742 au_event_t event_id; 3743 char fmri[REP_PROTOCOL_FMRI_LEN]; 3744 char pg_name[REP_PROTOCOL_NAME_LEN]; 3745 char *pg_end; /* End of prop. group fmri */ 3746 const char *prop_name; 3747 uint32_t ptype; 3748 char prop_type[3]; 3749 enum rep_protocol_responseid rc; 3750 size_t sz_out; 3751 3752 /* Make sure we have something to do. */ 3753 if (tx_data == NULL) 3754 return; 3755 if ((count = tx_cmd_count(tx_data)) == 0) 3756 return; 3757 3758 /* Copy the property group fmri */ 3759 pg_end = fmri; 3760 pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri)); 3761 3762 /* 3763 * Get the property group name. It is the first component after 3764 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri. 3765 */ 3766 cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX); 3767 if (cp == NULL) { 3768 pg_name[0] = 0; 3769 } else { 3770 cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX); 3771 (void) strlcpy(pg_name, cp, sizeof (pg_name)); 3772 } 3773 3774 audit_data.ed_auth = auth_string; 3775 audit_data.ed_fmri = fmri; 3776 audit_data.ed_type = prop_type; 3777 3778 /* 3779 * Property type is two characters (see 3780 * rep_protocol_value_type_t), so terminate the string. 3781 */ 3782 prop_type[2] = 0; 3783 3784 for (cmd_no = 0; cmd_no < count; cmd_no++) { 3785 /* Construct FMRI of the property */ 3786 *pg_end = 0; 3787 if (tx_cmd_prop(tx_data, cmd_no, &prop_name) != 3788 REP_PROTOCOL_SUCCESS) { 3789 continue; 3790 } 3791 rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out, 3792 prop_name, REP_PROTOCOL_ENTITY_PROPERTY); 3793 if (rc != REP_PROTOCOL_SUCCESS) { 3794 /* 3795 * If we can't get the FMRI, we'll abandon this 3796 * command 3797 */ 3798 continue; 3799 } 3800 3801 /* Generate special property event if necessary. */ 3802 special_property_event(&audit_data, prop_name, pg_name, 3803 auth_status, auth_ret_value, tx_data, cmd_no); 3804 3805 /* Capture rest of audit data. */ 3806 if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) != 3807 REP_PROTOCOL_SUCCESS) { 3808 continue; 3809 } 3810 prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype); 3811 prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype); 3812 audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no); 3813 3814 /* Determine the event type. */ 3815 if (tx_cmd_action(tx_data, cmd_no, &action) != 3816 REP_PROTOCOL_SUCCESS) { 3817 free(audit_data.ed_prop_value); 3818 continue; 3819 } 3820 switch (action) { 3821 case REP_PROTOCOL_TX_ENTRY_NEW: 3822 event_id = ADT_smf_create_prop; 3823 break; 3824 case REP_PROTOCOL_TX_ENTRY_CLEAR: 3825 event_id = ADT_smf_change_prop; 3826 break; 3827 case REP_PROTOCOL_TX_ENTRY_REPLACE: 3828 event_id = ADT_smf_change_prop; 3829 break; 3830 case REP_PROTOCOL_TX_ENTRY_DELETE: 3831 event_id = ADT_smf_delete_prop; 3832 break; 3833 default: 3834 assert(0); /* Missing a case */ 3835 free(audit_data.ed_prop_value); 3836 continue; 3837 } 3838 3839 /* Generate the event. */ 3840 smf_audit_event(event_id, auth_status, auth_ret_value, 3841 &audit_data); 3842 free(audit_data.ed_prop_value); 3843 } 3844 #endif /* NATIVE_BUILD */ 3845 } 3846 3847 /* 3848 * Fails with 3849 * _DELETED - node has been deleted 3850 * _NOT_SET - npp is reset 3851 * _NOT_APPLICABLE - type is _PROPERTYGRP 3852 * _INVALID_TYPE - node is corrupt or type is invalid 3853 * _TYPE_MISMATCH - node cannot have children of type type 3854 * _BAD_REQUEST - name is invalid 3855 * cannot create children for this type of node 3856 * _NO_RESOURCES - out of memory, or could not allocate new id 3857 * _PERMISSION_DENIED 3858 * _BACKEND_ACCESS 3859 * _BACKEND_READONLY 3860 * _EXISTS - child already exists 3861 * _TRUNCATED - truncated FMRI for the audit record 3862 */ 3863 int 3864 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name, 3865 rc_node_ptr_t *cpp) 3866 { 3867 rc_node_t *np; 3868 rc_node_t *cp = NULL; 3869 int rc, perm_rc; 3870 size_t sz_out; 3871 char fmri[REP_PROTOCOL_FMRI_LEN]; 3872 audit_event_data_t audit_data; 3873 3874 rc_node_clear(cpp, 0); 3875 3876 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth); 3877 3878 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 3879 3880 audit_data.ed_fmri = fmri; 3881 audit_data.ed_auth = NULL; 3882 3883 /* 3884 * there is a separate interface for creating property groups 3885 */ 3886 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) { 3887 (void) pthread_mutex_unlock(&np->rn_lock); 3888 free(audit_data.ed_auth); 3889 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 3890 } 3891 3892 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 3893 (void) pthread_mutex_unlock(&np->rn_lock); 3894 np = np->rn_cchain[0]; 3895 RC_NODE_CHECK_AND_LOCK(np); 3896 } 3897 3898 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) != 3899 REP_PROTOCOL_SUCCESS) { 3900 (void) pthread_mutex_unlock(&np->rn_lock); 3901 free(audit_data.ed_auth); 3902 return (rc); 3903 } 3904 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) { 3905 (void) pthread_mutex_unlock(&np->rn_lock); 3906 free(audit_data.ed_auth); 3907 return (rc); 3908 } 3909 3910 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out, 3911 name, type)) != REP_PROTOCOL_SUCCESS) { 3912 (void) pthread_mutex_unlock(&np->rn_lock); 3913 free(audit_data.ed_auth); 3914 return (rc); 3915 } 3916 if (perm_rc != REP_PROTOCOL_SUCCESS) { 3917 (void) pthread_mutex_unlock(&np->rn_lock); 3918 smf_audit_event(ADT_smf_create, ADT_FAILURE, 3919 ADT_FAIL_VALUE_AUTH, &audit_data); 3920 free(audit_data.ed_auth); 3921 return (perm_rc); 3922 } 3923 3924 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD, 3925 audit_data.ed_auth); 3926 (void) pthread_mutex_unlock(&np->rn_lock); 3927 3928 rc = object_create(np, type, name, &cp); 3929 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE); 3930 3931 if (rc == REP_PROTOCOL_SUCCESS) { 3932 rc_node_assign(cpp, cp); 3933 rc_node_rele(cp); 3934 } 3935 3936 (void) pthread_mutex_lock(&np->rn_lock); 3937 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD); 3938 (void) pthread_mutex_unlock(&np->rn_lock); 3939 3940 if (rc == REP_PROTOCOL_SUCCESS) { 3941 smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS, 3942 &audit_data); 3943 } 3944 3945 free(audit_data.ed_auth); 3946 3947 return (rc); 3948 } 3949 3950 int 3951 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name, 3952 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp) 3953 { 3954 rc_node_t *np; 3955 rc_node_t *cp; 3956 int rc; 3957 permcheck_t *pcp; 3958 int granted; 3959 char fmri[REP_PROTOCOL_FMRI_LEN]; 3960 audit_event_data_t audit_data; 3961 au_event_t event_id; 3962 size_t sz_out; 3963 3964 audit_data.ed_auth = NULL; 3965 audit_data.ed_fmri = fmri; 3966 audit_data.ed_type = (char *)pgtype; 3967 3968 rc_node_clear(cpp, 0); 3969 3970 /* verify flags is valid */ 3971 if (flags & ~SCF_PG_FLAG_NONPERSISTENT) 3972 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 3973 3974 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp); 3975 3976 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) { 3977 rc_node_rele(np); 3978 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 3979 } 3980 3981 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) != 3982 REP_PROTOCOL_SUCCESS) { 3983 rc_node_rele(np); 3984 return (rc); 3985 } 3986 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS || 3987 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) { 3988 rc_node_rele(np); 3989 return (rc); 3990 } 3991 3992 #ifdef NATIVE_BUILD 3993 if (!client_is_privileged()) { 3994 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 3995 } 3996 #else 3997 if (flags & SCF_PG_FLAG_NONPERSISTENT) { 3998 event_id = ADT_smf_create_npg; 3999 } else { 4000 event_id = ADT_smf_create_pg; 4001 } 4002 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out, 4003 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) { 4004 rc_node_rele(np); 4005 return (rc); 4006 } 4007 4008 if (is_main_repository) { 4009 /* Must have .smf.modify or smf.modify.<type> authorization */ 4010 pcp = pc_create(); 4011 if (pcp != NULL) { 4012 rc = perm_add_enabling(pcp, AUTH_MODIFY); 4013 4014 if (rc == REP_PROTOCOL_SUCCESS) { 4015 const char * const auth = 4016 perm_auth_for_pgtype(pgtype); 4017 4018 if (auth != NULL) 4019 rc = perm_add_enabling(pcp, auth); 4020 } 4021 4022 /* 4023 * .manage or $action_authorization can be used to 4024 * create the actions pg and the general_ovr pg. 4025 */ 4026 if (rc == REP_PROTOCOL_SUCCESS && 4027 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 && 4028 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE && 4029 ((strcmp(name, AUTH_PG_ACTIONS) == 0 && 4030 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) || 4031 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 && 4032 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) { 4033 rc = perm_add_enabling(pcp, AUTH_MANAGE); 4034 4035 if (rc == REP_PROTOCOL_SUCCESS) 4036 rc = perm_add_inst_action_auth(pcp, np); 4037 } 4038 4039 if (rc == REP_PROTOCOL_SUCCESS) { 4040 granted = perm_granted(pcp); 4041 4042 if (granted < 0) { 4043 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4044 } else { 4045 /* 4046 * Copy out the authorization 4047 * string before freeing pcp. 4048 */ 4049 audit_data.ed_auth = 4050 strdup(pcp->pc_auth_string); 4051 if (audit_data.ed_auth == NULL) { 4052 /* 4053 * Following code line 4054 * cannot meet both the 4055 * indentation and the line 4056 * length requirements of 4057 * cstyle. Indendation has 4058 * been sacrificed. 4059 */ 4060 /* CSTYLED */ 4061 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4062 } 4063 } 4064 } 4065 4066 pc_free(pcp); 4067 } else { 4068 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4069 } 4070 4071 if (rc == REP_PROTOCOL_SUCCESS && !granted) 4072 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 4073 } else { 4074 rc = REP_PROTOCOL_SUCCESS; 4075 } 4076 #endif /* NATIVE_BUILD */ 4077 4078 if (rc != REP_PROTOCOL_SUCCESS) { 4079 rc_node_rele(np); 4080 smf_audit_event(event_id, ADT_FAILURE, 4081 ADT_FAIL_VALUE_AUTH, &audit_data); 4082 if (audit_data.ed_auth != NULL) 4083 free(audit_data.ed_auth); 4084 return (rc); 4085 } 4086 4087 (void) pthread_mutex_lock(&np->rn_lock); 4088 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD, 4089 audit_data.ed_auth); 4090 (void) pthread_mutex_unlock(&np->rn_lock); 4091 4092 rc = object_create_pg(np, type, name, pgtype, flags, &cp); 4093 4094 if (rc == REP_PROTOCOL_SUCCESS) { 4095 rc_node_assign(cpp, cp); 4096 rc_node_rele(cp); 4097 } 4098 4099 (void) pthread_mutex_lock(&np->rn_lock); 4100 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD); 4101 (void) pthread_mutex_unlock(&np->rn_lock); 4102 4103 if (rc == REP_PROTOCOL_SUCCESS) { 4104 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, 4105 &audit_data); 4106 } 4107 if (audit_data.ed_auth != NULL) 4108 free(audit_data.ed_auth); 4109 4110 return (rc); 4111 } 4112 4113 static void 4114 rc_pg_notify_fire(rc_node_pg_notify_t *pnp) 4115 { 4116 assert(MUTEX_HELD(&rc_pg_notify_lock)); 4117 4118 if (pnp->rnpn_pg != NULL) { 4119 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp); 4120 (void) close(pnp->rnpn_fd); 4121 4122 pnp->rnpn_pg = NULL; 4123 pnp->rnpn_fd = -1; 4124 } else { 4125 assert(pnp->rnpn_fd == -1); 4126 } 4127 } 4128 4129 static void 4130 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg) 4131 { 4132 rc_node_t *svc = NULL; 4133 rc_node_t *inst = NULL; 4134 rc_node_t *pg = NULL; 4135 rc_node_t *np = np_arg; 4136 rc_node_t *nnp; 4137 4138 while (svc == NULL) { 4139 (void) pthread_mutex_lock(&np->rn_lock); 4140 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 4141 (void) pthread_mutex_unlock(&np->rn_lock); 4142 goto cleanup; 4143 } 4144 nnp = np->rn_parent; 4145 rc_node_hold_locked(np); /* hold it in place */ 4146 4147 switch (np->rn_id.rl_type) { 4148 case REP_PROTOCOL_ENTITY_PROPERTYGRP: 4149 assert(pg == NULL); 4150 pg = np; 4151 break; 4152 case REP_PROTOCOL_ENTITY_INSTANCE: 4153 assert(inst == NULL); 4154 inst = np; 4155 break; 4156 case REP_PROTOCOL_ENTITY_SERVICE: 4157 assert(svc == NULL); 4158 svc = np; 4159 break; 4160 default: 4161 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 4162 rc_node_rele_locked(np); 4163 goto cleanup; 4164 } 4165 4166 (void) pthread_mutex_unlock(&np->rn_lock); 4167 4168 np = nnp; 4169 if (np == NULL) 4170 goto cleanup; 4171 } 4172 4173 rc_notify_deletion(ndp, 4174 svc->rn_name, 4175 inst != NULL ? inst->rn_name : NULL, 4176 pg != NULL ? pg->rn_name : NULL); 4177 4178 ndp = NULL; 4179 4180 cleanup: 4181 if (ndp != NULL) 4182 uu_free(ndp); 4183 4184 for (;;) { 4185 if (svc != NULL) { 4186 np = svc; 4187 svc = NULL; 4188 } else if (inst != NULL) { 4189 np = inst; 4190 inst = NULL; 4191 } else if (pg != NULL) { 4192 np = pg; 4193 pg = NULL; 4194 } else 4195 break; 4196 4197 (void) pthread_mutex_lock(&np->rn_lock); 4198 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 4199 rc_node_rele_locked(np); 4200 } 4201 } 4202 4203 /* 4204 * N.B.: this function drops np->rn_lock on the way out. 4205 */ 4206 static void 4207 rc_node_delete_hold(rc_node_t *np, int andformer) 4208 { 4209 rc_node_t *cp; 4210 4211 again: 4212 assert(MUTEX_HELD(&np->rn_lock)); 4213 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS); 4214 4215 for (cp = uu_list_first(np->rn_children); cp != NULL; 4216 cp = uu_list_next(np->rn_children, cp)) { 4217 (void) pthread_mutex_lock(&cp->rn_lock); 4218 (void) pthread_mutex_unlock(&np->rn_lock); 4219 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) { 4220 /* 4221 * already marked as dead -- can't happen, since that 4222 * would require setting RC_NODE_CHILDREN_CHANGING 4223 * in np, and we're holding that... 4224 */ 4225 abort(); 4226 } 4227 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */ 4228 4229 (void) pthread_mutex_lock(&np->rn_lock); 4230 } 4231 if (andformer && (cp = np->rn_former) != NULL) { 4232 (void) pthread_mutex_lock(&cp->rn_lock); 4233 (void) pthread_mutex_unlock(&np->rn_lock); 4234 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) 4235 abort(); /* can't happen, see above */ 4236 np = cp; 4237 goto again; /* tail-recurse down rn_former */ 4238 } 4239 (void) pthread_mutex_unlock(&np->rn_lock); 4240 } 4241 4242 /* 4243 * N.B.: this function drops np->rn_lock on the way out. 4244 */ 4245 static void 4246 rc_node_delete_rele(rc_node_t *np, int andformer) 4247 { 4248 rc_node_t *cp; 4249 4250 again: 4251 assert(MUTEX_HELD(&np->rn_lock)); 4252 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS); 4253 4254 for (cp = uu_list_first(np->rn_children); cp != NULL; 4255 cp = uu_list_next(np->rn_children, cp)) { 4256 (void) pthread_mutex_lock(&cp->rn_lock); 4257 (void) pthread_mutex_unlock(&np->rn_lock); 4258 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */ 4259 (void) pthread_mutex_lock(&np->rn_lock); 4260 } 4261 if (andformer && (cp = np->rn_former) != NULL) { 4262 (void) pthread_mutex_lock(&cp->rn_lock); 4263 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS); 4264 (void) pthread_mutex_unlock(&np->rn_lock); 4265 4266 np = cp; 4267 goto again; /* tail-recurse down rn_former */ 4268 } 4269 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS); 4270 (void) pthread_mutex_unlock(&np->rn_lock); 4271 } 4272 4273 static void 4274 rc_node_finish_delete(rc_node_t *cp) 4275 { 4276 cache_bucket_t *bp; 4277 rc_node_pg_notify_t *pnp; 4278 4279 assert(MUTEX_HELD(&cp->rn_lock)); 4280 4281 if (!(cp->rn_flags & RC_NODE_OLD)) { 4282 assert(cp->rn_flags & RC_NODE_IN_PARENT); 4283 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) { 4284 abort(); /* can't happen, see above */ 4285 } 4286 cp->rn_flags &= ~RC_NODE_IN_PARENT; 4287 cp->rn_parent = NULL; 4288 rc_node_free_fmri(cp); 4289 } 4290 4291 cp->rn_flags |= RC_NODE_DEAD; 4292 4293 /* 4294 * If this node is not out-dated, we need to remove it from 4295 * the notify list and cache hash table. 4296 */ 4297 if (!(cp->rn_flags & RC_NODE_OLD)) { 4298 assert(cp->rn_refs > 0); /* can't go away yet */ 4299 (void) pthread_mutex_unlock(&cp->rn_lock); 4300 4301 (void) pthread_mutex_lock(&rc_pg_notify_lock); 4302 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL) 4303 rc_pg_notify_fire(pnp); 4304 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 4305 rc_notify_remove_node(cp); 4306 4307 bp = cache_hold(cp->rn_hash); 4308 (void) pthread_mutex_lock(&cp->rn_lock); 4309 cache_remove_unlocked(bp, cp); 4310 cache_release(bp); 4311 } 4312 } 4313 4314 /* 4315 * N.B.: this function drops np->rn_lock and a reference on the way out. 4316 */ 4317 static void 4318 rc_node_delete_children(rc_node_t *np, int andformer) 4319 { 4320 rc_node_t *cp; 4321 4322 again: 4323 assert(np->rn_refs > 0); 4324 assert(MUTEX_HELD(&np->rn_lock)); 4325 assert(np->rn_flags & RC_NODE_DEAD); 4326 4327 while ((cp = uu_list_first(np->rn_children)) != NULL) { 4328 uu_list_remove(np->rn_children, cp); 4329 (void) pthread_mutex_lock(&cp->rn_lock); 4330 (void) pthread_mutex_unlock(&np->rn_lock); 4331 rc_node_hold_locked(cp); /* hold while we recurse */ 4332 rc_node_finish_delete(cp); 4333 rc_node_delete_children(cp, andformer); /* drops lock + ref */ 4334 (void) pthread_mutex_lock(&np->rn_lock); 4335 } 4336 4337 /* 4338 * when we drop cp's lock, all the children will be gone, so we 4339 * can release DYING_FLAGS. 4340 */ 4341 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS); 4342 if (andformer && (cp = np->rn_former) != NULL) { 4343 np->rn_former = NULL; /* unlink */ 4344 (void) pthread_mutex_lock(&cp->rn_lock); 4345 (void) pthread_mutex_unlock(&np->rn_lock); 4346 np->rn_flags &= ~RC_NODE_ON_FORMER; 4347 4348 rc_node_hold_locked(cp); /* hold while we loop */ 4349 4350 rc_node_finish_delete(cp); 4351 4352 rc_node_rele(np); /* drop the old reference */ 4353 4354 np = cp; 4355 goto again; /* tail-recurse down rn_former */ 4356 } 4357 rc_node_rele_locked(np); 4358 } 4359 4360 static void 4361 rc_node_unrefed(rc_node_t *np) 4362 { 4363 int unrefed; 4364 rc_node_t *pp, *cur; 4365 4366 assert(MUTEX_HELD(&np->rn_lock)); 4367 assert(np->rn_refs == 0); 4368 assert(np->rn_other_refs == 0); 4369 assert(np->rn_other_refs_held == 0); 4370 4371 if (np->rn_flags & RC_NODE_DEAD) { 4372 (void) pthread_mutex_unlock(&np->rn_lock); 4373 rc_node_destroy(np); 4374 return; 4375 } 4376 4377 assert(np->rn_flags & RC_NODE_OLD); 4378 if (np->rn_flags & RC_NODE_UNREFED) { 4379 (void) pthread_mutex_unlock(&np->rn_lock); 4380 return; 4381 } 4382 np->rn_flags |= RC_NODE_UNREFED; 4383 4384 (void) pthread_mutex_unlock(&np->rn_lock); 4385 4386 /* 4387 * find the current in-hash object, and grab it's RC_NODE_IN_TX 4388 * flag. That protects the entire rn_former chain. 4389 */ 4390 for (;;) { 4391 pp = cache_lookup(&np->rn_id); 4392 if (pp == NULL) { 4393 (void) pthread_mutex_lock(&np->rn_lock); 4394 if (np->rn_flags & RC_NODE_DEAD) 4395 goto died; 4396 /* 4397 * We are trying to unreference this node, but the 4398 * owner of the former list does not exist. It must 4399 * be the case that another thread is deleting this 4400 * entire sub-branch, but has not yet reached us. 4401 * We will in short order be deleted. 4402 */ 4403 np->rn_flags &= ~RC_NODE_UNREFED; 4404 (void) pthread_mutex_unlock(&np->rn_lock); 4405 return; 4406 } 4407 if (pp == np) { 4408 /* 4409 * no longer unreferenced 4410 */ 4411 (void) pthread_mutex_lock(&np->rn_lock); 4412 np->rn_flags &= ~RC_NODE_UNREFED; 4413 rc_node_rele_locked(np); 4414 return; 4415 } 4416 (void) pthread_mutex_lock(&pp->rn_lock); 4417 if ((pp->rn_flags & RC_NODE_OLD) || 4418 !rc_node_hold_flag(pp, RC_NODE_IN_TX)) { 4419 rc_node_rele_locked(pp); 4420 continue; 4421 } 4422 if (!(pp->rn_flags & RC_NODE_OLD)) { 4423 (void) pthread_mutex_unlock(&pp->rn_lock); 4424 break; 4425 } 4426 rc_node_rele_flag(pp, RC_NODE_IN_TX); 4427 rc_node_rele_locked(pp); 4428 } 4429 4430 (void) pthread_mutex_lock(&np->rn_lock); 4431 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) || 4432 np->rn_refs != 0 || np->rn_other_refs != 0 || 4433 np->rn_other_refs_held != 0) { 4434 np->rn_flags &= ~RC_NODE_UNREFED; 4435 (void) pthread_mutex_lock(&pp->rn_lock); 4436 4437 rc_node_rele_flag(pp, RC_NODE_IN_TX); 4438 rc_node_rele_locked(pp); 4439 return; 4440 } 4441 4442 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) { 4443 (void) pthread_mutex_unlock(&np->rn_lock); 4444 4445 rc_node_rele_flag(pp, RC_NODE_IN_TX); 4446 rc_node_rele_locked(pp); 4447 4448 (void) pthread_mutex_lock(&np->rn_lock); 4449 goto died; 4450 } 4451 4452 rc_node_delete_hold(np, 0); 4453 4454 (void) pthread_mutex_lock(&np->rn_lock); 4455 if (!(np->rn_flags & RC_NODE_OLD) || 4456 np->rn_refs != 0 || np->rn_other_refs != 0 || 4457 np->rn_other_refs_held != 0) { 4458 np->rn_flags &= ~RC_NODE_UNREFED; 4459 rc_node_delete_rele(np, 0); 4460 4461 (void) pthread_mutex_lock(&pp->rn_lock); 4462 rc_node_rele_flag(pp, RC_NODE_IN_TX); 4463 rc_node_rele_locked(pp); 4464 return; 4465 } 4466 4467 np->rn_flags |= RC_NODE_DEAD; 4468 rc_node_hold_locked(np); 4469 rc_node_delete_children(np, 0); 4470 4471 /* 4472 * It's gone -- remove it from the former chain and destroy it. 4473 */ 4474 (void) pthread_mutex_lock(&pp->rn_lock); 4475 for (cur = pp; cur != NULL && cur->rn_former != np; 4476 cur = cur->rn_former) 4477 ; 4478 assert(cur != NULL && cur != np); 4479 4480 cur->rn_former = np->rn_former; 4481 np->rn_former = NULL; 4482 4483 rc_node_rele_flag(pp, RC_NODE_IN_TX); 4484 rc_node_rele_locked(pp); 4485 4486 (void) pthread_mutex_lock(&np->rn_lock); 4487 assert(np->rn_flags & RC_NODE_ON_FORMER); 4488 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER); 4489 (void) pthread_mutex_unlock(&np->rn_lock); 4490 rc_node_destroy(np); 4491 return; 4492 4493 died: 4494 np->rn_flags &= ~RC_NODE_UNREFED; 4495 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 && 4496 np->rn_other_refs_held == 0); 4497 (void) pthread_mutex_unlock(&np->rn_lock); 4498 if (unrefed) 4499 rc_node_destroy(np); 4500 } 4501 4502 static au_event_t 4503 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags) 4504 { 4505 au_event_t id = 0; 4506 4507 #ifndef NATIVE_BUILD 4508 switch (entity) { 4509 case REP_PROTOCOL_ENTITY_SERVICE: 4510 case REP_PROTOCOL_ENTITY_INSTANCE: 4511 id = ADT_smf_delete; 4512 break; 4513 case REP_PROTOCOL_ENTITY_SNAPSHOT: 4514 id = ADT_smf_delete_snap; 4515 break; 4516 case REP_PROTOCOL_ENTITY_PROPERTYGRP: 4517 case REP_PROTOCOL_ENTITY_CPROPERTYGRP: 4518 if (pgflags & SCF_PG_FLAG_NONPERSISTENT) { 4519 id = ADT_smf_delete_npg; 4520 } else { 4521 id = ADT_smf_delete_pg; 4522 } 4523 break; 4524 default: 4525 abort(); 4526 } 4527 #endif /* NATIVE_BUILD */ 4528 return (id); 4529 } 4530 4531 /* 4532 * Fails with 4533 * _NOT_SET 4534 * _DELETED 4535 * _BAD_REQUEST 4536 * _PERMISSION_DENIED 4537 * _NO_RESOURCES 4538 * _TRUNCATED 4539 * and whatever object_delete() fails with. 4540 */ 4541 int 4542 rc_node_delete(rc_node_ptr_t *npp) 4543 { 4544 rc_node_t *np, *np_orig; 4545 rc_node_t *pp = NULL; 4546 int rc; 4547 rc_node_pg_notify_t *pnp; 4548 cache_bucket_t *bp; 4549 rc_notify_delete_t *ndp; 4550 permcheck_t *pcp; 4551 int granted; 4552 au_event_t event_id = 0; 4553 size_t sz_out; 4554 audit_event_data_t audit_data; 4555 int audit_failure = 0; 4556 4557 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 4558 4559 audit_data.ed_fmri = NULL; 4560 audit_data.ed_auth = NULL; 4561 audit_data.ed_snapname = NULL; 4562 audit_data.ed_type = NULL; 4563 4564 switch (np->rn_id.rl_type) { 4565 case REP_PROTOCOL_ENTITY_SERVICE: 4566 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE, 4567 np->rn_pgflags); 4568 break; 4569 case REP_PROTOCOL_ENTITY_INSTANCE: 4570 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE, 4571 np->rn_pgflags); 4572 break; 4573 case REP_PROTOCOL_ENTITY_SNAPSHOT: 4574 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT, 4575 np->rn_pgflags); 4576 audit_data.ed_snapname = strdup(np->rn_name); 4577 if (audit_data.ed_snapname == NULL) { 4578 (void) pthread_mutex_unlock(&np->rn_lock); 4579 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 4580 } 4581 break; /* deletable */ 4582 4583 case REP_PROTOCOL_ENTITY_SCOPE: 4584 case REP_PROTOCOL_ENTITY_SNAPLEVEL: 4585 /* Scopes and snaplevels are indelible. */ 4586 (void) pthread_mutex_unlock(&np->rn_lock); 4587 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 4588 4589 case REP_PROTOCOL_ENTITY_CPROPERTYGRP: 4590 (void) pthread_mutex_unlock(&np->rn_lock); 4591 np = np->rn_cchain[0]; 4592 RC_NODE_CHECK_AND_LOCK(np); 4593 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP, 4594 np->rn_pgflags); 4595 break; 4596 4597 case REP_PROTOCOL_ENTITY_PROPERTYGRP: 4598 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) { 4599 event_id = 4600 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP, 4601 np->rn_pgflags); 4602 audit_data.ed_type = strdup(np->rn_type); 4603 if (audit_data.ed_type == NULL) { 4604 (void) pthread_mutex_unlock(&np->rn_lock); 4605 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 4606 } 4607 break; 4608 } 4609 4610 /* Snapshot property groups are indelible. */ 4611 (void) pthread_mutex_unlock(&np->rn_lock); 4612 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED); 4613 4614 case REP_PROTOCOL_ENTITY_PROPERTY: 4615 (void) pthread_mutex_unlock(&np->rn_lock); 4616 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 4617 4618 default: 4619 assert(0); 4620 abort(); 4621 break; 4622 } 4623 4624 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN); 4625 if (audit_data.ed_fmri == NULL) { 4626 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4627 goto cleanout; 4628 } 4629 np_orig = np; 4630 rc_node_hold_locked(np); /* simplifies rest of the code */ 4631 4632 again: 4633 /* 4634 * The following loop is to deal with the fact that snapshots and 4635 * property groups are moving targets -- changes to them result 4636 * in a new "child" node. Since we can only delete from the top node, 4637 * we have to loop until we have a non-RC_NODE_OLD version. 4638 */ 4639 for (;;) { 4640 if (!rc_node_wait_flag(np, 4641 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) { 4642 rc_node_rele_locked(np); 4643 rc = REP_PROTOCOL_FAIL_DELETED; 4644 goto cleanout; 4645 } 4646 4647 if (np->rn_flags & RC_NODE_OLD) { 4648 rc_node_rele_locked(np); 4649 np = cache_lookup(&np_orig->rn_id); 4650 assert(np != np_orig); 4651 4652 if (np == NULL) { 4653 rc = REP_PROTOCOL_FAIL_DELETED; 4654 goto fail; 4655 } 4656 (void) pthread_mutex_lock(&np->rn_lock); 4657 continue; 4658 } 4659 4660 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 4661 rc_node_rele_locked(np); 4662 rc_node_clear(npp, 1); 4663 rc = REP_PROTOCOL_FAIL_DELETED; 4664 } 4665 4666 /* 4667 * Mark our parent as children changing. this call drops our 4668 * lock and the RC_NODE_USING_PARENT flag, and returns with 4669 * pp's lock held 4670 */ 4671 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING); 4672 if (pp == NULL) { 4673 /* our parent is gone, we're going next... */ 4674 rc_node_rele(np); 4675 4676 rc_node_clear(npp, 1); 4677 rc = REP_PROTOCOL_FAIL_DELETED; 4678 goto cleanout; 4679 } 4680 4681 rc_node_hold_locked(pp); /* hold for later */ 4682 (void) pthread_mutex_unlock(&pp->rn_lock); 4683 4684 (void) pthread_mutex_lock(&np->rn_lock); 4685 if (!(np->rn_flags & RC_NODE_OLD)) 4686 break; /* not old -- we're done */ 4687 4688 (void) pthread_mutex_unlock(&np->rn_lock); 4689 (void) pthread_mutex_lock(&pp->rn_lock); 4690 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 4691 rc_node_rele_locked(pp); 4692 (void) pthread_mutex_lock(&np->rn_lock); 4693 continue; /* loop around and try again */ 4694 } 4695 /* 4696 * Everyone out of the pool -- we grab everything but 4697 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep 4698 * any changes from occurring while we are attempting to 4699 * delete the node. 4700 */ 4701 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) { 4702 (void) pthread_mutex_unlock(&np->rn_lock); 4703 rc = REP_PROTOCOL_FAIL_DELETED; 4704 goto fail; 4705 } 4706 4707 assert(!(np->rn_flags & RC_NODE_OLD)); 4708 4709 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri, 4710 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) { 4711 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS); 4712 (void) pthread_mutex_unlock(&np->rn_lock); 4713 goto fail; 4714 } 4715 4716 #ifdef NATIVE_BUILD 4717 if (!client_is_privileged()) { 4718 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 4719 } 4720 #else 4721 if (is_main_repository) { 4722 /* permission check */ 4723 (void) pthread_mutex_unlock(&np->rn_lock); 4724 pcp = pc_create(); 4725 if (pcp != NULL) { 4726 rc = perm_add_enabling(pcp, AUTH_MODIFY); 4727 4728 /* add .smf.modify.<type> for pgs. */ 4729 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type == 4730 REP_PROTOCOL_ENTITY_PROPERTYGRP) { 4731 const char * const auth = 4732 perm_auth_for_pgtype(np->rn_type); 4733 4734 if (auth != NULL) 4735 rc = perm_add_enabling(pcp, auth); 4736 } 4737 4738 if (rc == REP_PROTOCOL_SUCCESS) { 4739 granted = perm_granted(pcp); 4740 4741 if (granted < 0) { 4742 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4743 } else { 4744 /* 4745 * Copy out the authorization 4746 * string before freeing pcp. 4747 */ 4748 audit_data.ed_auth = 4749 strdup(pcp->pc_auth_string); 4750 if (audit_data.ed_auth == NULL) { 4751 /* 4752 * Following code line 4753 * cannot meet both the 4754 * indentation and the line 4755 * length requirements of 4756 * cstyle. Indendation has 4757 * been sacrificed. 4758 */ 4759 /* CSTYLED */ 4760 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4761 } 4762 } 4763 } 4764 4765 pc_free(pcp); 4766 } else { 4767 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4768 } 4769 4770 if (rc == REP_PROTOCOL_SUCCESS && !granted) { 4771 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 4772 audit_failure = 1; 4773 } 4774 (void) pthread_mutex_lock(&np->rn_lock); 4775 } else { 4776 rc = REP_PROTOCOL_SUCCESS; 4777 } 4778 #endif /* NATIVE_BUILD */ 4779 4780 if (rc != REP_PROTOCOL_SUCCESS) { 4781 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS); 4782 (void) pthread_mutex_unlock(&np->rn_lock); 4783 goto fail; 4784 } 4785 4786 ndp = uu_zalloc(sizeof (*ndp)); 4787 if (ndp == NULL) { 4788 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS); 4789 (void) pthread_mutex_unlock(&np->rn_lock); 4790 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 4791 goto fail; 4792 } 4793 4794 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */ 4795 4796 rc = object_delete(np); 4797 4798 if (rc != REP_PROTOCOL_SUCCESS) { 4799 (void) pthread_mutex_lock(&np->rn_lock); 4800 rc_node_delete_rele(np, 1); /* drops lock */ 4801 uu_free(ndp); 4802 goto fail; 4803 } 4804 4805 /* 4806 * Now, delicately unlink and delete the object. 4807 * 4808 * Create the delete notification, atomically remove 4809 * from the hash table and set the NODE_DEAD flag, and 4810 * remove from the parent's children list. 4811 */ 4812 rc_notify_node_delete(ndp, np); /* frees or uses ndp */ 4813 4814 bp = cache_hold(np->rn_hash); 4815 4816 (void) pthread_mutex_lock(&np->rn_lock); 4817 cache_remove_unlocked(bp, np); 4818 cache_release(bp); 4819 4820 np->rn_flags |= RC_NODE_DEAD; 4821 if (pp != NULL) { 4822 (void) pthread_mutex_unlock(&np->rn_lock); 4823 4824 (void) pthread_mutex_lock(&pp->rn_lock); 4825 (void) pthread_mutex_lock(&np->rn_lock); 4826 uu_list_remove(pp->rn_children, np); 4827 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 4828 (void) pthread_mutex_unlock(&pp->rn_lock); 4829 np->rn_flags &= ~RC_NODE_IN_PARENT; 4830 } 4831 /* 4832 * finally, propagate death to our children, handle notifications, 4833 * and release our hold. 4834 */ 4835 rc_node_hold_locked(np); /* hold for delete */ 4836 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */ 4837 4838 rc_node_clear(npp, 1); 4839 4840 (void) pthread_mutex_lock(&rc_pg_notify_lock); 4841 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL) 4842 rc_pg_notify_fire(pnp); 4843 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 4844 rc_notify_remove_node(np); 4845 4846 rc_node_rele(np); 4847 4848 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, 4849 &audit_data); 4850 free(audit_data.ed_auth); 4851 free(audit_data.ed_snapname); 4852 free(audit_data.ed_type); 4853 free(audit_data.ed_fmri); 4854 return (rc); 4855 4856 fail: 4857 rc_node_rele(np); 4858 if (rc == REP_PROTOCOL_FAIL_DELETED) 4859 rc_node_clear(npp, 1); 4860 if (pp != NULL) { 4861 (void) pthread_mutex_lock(&pp->rn_lock); 4862 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 4863 rc_node_rele_locked(pp); /* drop ref and lock */ 4864 } 4865 if (audit_failure) { 4866 smf_audit_event(event_id, ADT_FAILURE, 4867 ADT_FAIL_VALUE_AUTH, &audit_data); 4868 } 4869 cleanout: 4870 free(audit_data.ed_auth); 4871 free(audit_data.ed_snapname); 4872 free(audit_data.ed_type); 4873 free(audit_data.ed_fmri); 4874 return (rc); 4875 } 4876 4877 int 4878 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp) 4879 { 4880 rc_node_t *np; 4881 rc_node_t *cp, *pp; 4882 int res; 4883 4884 rc_node_clear(cpp, 0); 4885 4886 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 4887 4888 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT && 4889 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) { 4890 (void) pthread_mutex_unlock(&np->rn_lock); 4891 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 4892 } 4893 4894 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) { 4895 if ((res = rc_node_fill_children(np, 4896 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) { 4897 (void) pthread_mutex_unlock(&np->rn_lock); 4898 return (res); 4899 } 4900 4901 for (cp = uu_list_first(np->rn_children); 4902 cp != NULL; 4903 cp = uu_list_next(np->rn_children, cp)) { 4904 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) 4905 continue; 4906 rc_node_hold(cp); 4907 break; 4908 } 4909 4910 (void) pthread_mutex_unlock(&np->rn_lock); 4911 } else { 4912 HOLD_PTR_FLAG_OR_RETURN(np, npp, RC_NODE_USING_PARENT); 4913 /* 4914 * mark our parent as children changing. This call drops our 4915 * lock and the RC_NODE_USING_PARENT flag, and returns with 4916 * pp's lock held 4917 */ 4918 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING); 4919 if (pp == NULL) { 4920 /* our parent is gone, we're going next... */ 4921 4922 rc_node_clear(npp, 1); 4923 return (REP_PROTOCOL_FAIL_DELETED); 4924 } 4925 4926 /* 4927 * find the next snaplevel 4928 */ 4929 cp = np; 4930 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL && 4931 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) 4932 ; 4933 4934 /* it must match the snaplevel list */ 4935 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) || 4936 (cp != NULL && np->rn_snaplevel->rsl_next == 4937 cp->rn_snaplevel)); 4938 4939 if (cp != NULL) 4940 rc_node_hold(cp); 4941 4942 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 4943 4944 (void) pthread_mutex_unlock(&pp->rn_lock); 4945 } 4946 4947 rc_node_assign(cpp, cp); 4948 if (cp != NULL) { 4949 rc_node_rele(cp); 4950 4951 return (REP_PROTOCOL_SUCCESS); 4952 } 4953 return (REP_PROTOCOL_FAIL_NOT_FOUND); 4954 } 4955 4956 /* 4957 * This call takes a snapshot (np) and either: 4958 * an existing snapid (to be associated with np), or 4959 * a non-NULL parentp (from which a new snapshot is taken, and associated 4960 * with np) 4961 * 4962 * To do the association, np is duplicated, the duplicate is made to 4963 * represent the new snapid, and np is replaced with the new rc_node_t on 4964 * np's parent's child list. np is placed on the new node's rn_former list, 4965 * and replaces np in cache_hash (so rc_node_update() will find the new one). 4966 * 4967 * old_fmri and old_name point to the original snap shot's FMRI and name. 4968 * These values are used when generating audit events. 4969 * 4970 * Fails with 4971 * _BAD_REQUEST 4972 * _BACKEND_READONLY 4973 * _DELETED 4974 * _NO_RESOURCES 4975 * _TRUNCATED 4976 * _TYPE_MISMATCH 4977 */ 4978 static int 4979 rc_attach_snapshot( 4980 rc_node_t *np, 4981 uint32_t snapid, 4982 rc_node_t *parentp, 4983 char *old_fmri, 4984 char *old_name) 4985 { 4986 rc_node_t *np_orig; 4987 rc_node_t *nnp, *prev; 4988 rc_node_t *pp; 4989 int rc; 4990 size_t sz_out; 4991 au_event_t event_id; 4992 audit_event_data_t audit_data; 4993 4994 if (parentp == NULL) { 4995 assert(old_fmri != NULL); 4996 } else { 4997 assert(snapid == 0); 4998 } 4999 assert(MUTEX_HELD(&np->rn_lock)); 5000 5001 /* Gather the audit data. */ 5002 /* 5003 * ADT_smf_* symbols may not be defined in the /usr/include header 5004 * files on the build machine. Thus, the following if-else will 5005 * not be compiled when doing native builds. 5006 */ 5007 #ifndef NATIVE_BUILD 5008 if (parentp == NULL) { 5009 event_id = ADT_smf_attach_snap; 5010 } else { 5011 event_id = ADT_smf_create_snap; 5012 } 5013 #endif /* NATIVE_BUILD */ 5014 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN); 5015 audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN); 5016 if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) { 5017 (void) pthread_mutex_unlock(&np->rn_lock); 5018 free(audit_data.ed_fmri); 5019 free(audit_data.ed_snapname); 5020 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 5021 } 5022 audit_data.ed_auth = NULL; 5023 if (strlcpy(audit_data.ed_snapname, np->rn_name, 5024 REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) { 5025 abort(); 5026 } 5027 audit_data.ed_old_fmri = old_fmri; 5028 audit_data.ed_old_name = old_name ? old_name : "NO NAME"; 5029 5030 if (parentp == NULL) { 5031 /* 5032 * In the attach case, get the instance FMRIs of the 5033 * snapshots. 5034 */ 5035 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri, 5036 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) { 5037 (void) pthread_mutex_unlock(&np->rn_lock); 5038 free(audit_data.ed_fmri); 5039 free(audit_data.ed_snapname); 5040 return (rc); 5041 } 5042 } else { 5043 /* 5044 * Capture the FMRI of the parent if we're actually going 5045 * to take the snapshot. 5046 */ 5047 if ((rc = rc_node_get_fmri_or_fragment(parentp, 5048 audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) != 5049 REP_PROTOCOL_SUCCESS) { 5050 (void) pthread_mutex_unlock(&np->rn_lock); 5051 free(audit_data.ed_fmri); 5052 free(audit_data.ed_snapname); 5053 return (rc); 5054 } 5055 } 5056 5057 np_orig = np; 5058 rc_node_hold_locked(np); /* simplifies the remainder */ 5059 5060 (void) pthread_mutex_unlock(&np->rn_lock); 5061 if ((rc = rc_node_modify_permission_check(&audit_data.ed_auth)) != 5062 REP_PROTOCOL_SUCCESS) { 5063 smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH, 5064 &audit_data); 5065 goto cleanout; 5066 } 5067 (void) pthread_mutex_lock(&np->rn_lock); 5068 5069 /* 5070 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former 5071 * list from changing. 5072 */ 5073 for (;;) { 5074 if (!(np->rn_flags & RC_NODE_OLD)) { 5075 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 5076 goto again; 5077 } 5078 pp = rc_node_hold_parent_flag(np, 5079 RC_NODE_CHILDREN_CHANGING); 5080 5081 (void) pthread_mutex_lock(&np->rn_lock); 5082 if (pp == NULL) { 5083 goto again; 5084 } 5085 if (np->rn_flags & RC_NODE_OLD) { 5086 rc_node_rele_flag(pp, 5087 RC_NODE_CHILDREN_CHANGING); 5088 (void) pthread_mutex_unlock(&pp->rn_lock); 5089 goto again; 5090 } 5091 (void) pthread_mutex_unlock(&pp->rn_lock); 5092 5093 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) { 5094 /* 5095 * Can't happen, since we're holding our 5096 * parent's CHILDREN_CHANGING flag... 5097 */ 5098 abort(); 5099 } 5100 break; /* everything's ready */ 5101 } 5102 again: 5103 rc_node_rele_locked(np); 5104 np = cache_lookup(&np_orig->rn_id); 5105 5106 if (np == NULL) { 5107 rc = REP_PROTOCOL_FAIL_DELETED; 5108 goto cleanout; 5109 } 5110 5111 (void) pthread_mutex_lock(&np->rn_lock); 5112 } 5113 5114 if (parentp != NULL) { 5115 if (pp != parentp) { 5116 rc = REP_PROTOCOL_FAIL_BAD_REQUEST; 5117 goto fail; 5118 } 5119 nnp = NULL; 5120 } else { 5121 /* 5122 * look for a former node with the snapid we need. 5123 */ 5124 if (np->rn_snapshot_id == snapid) { 5125 rc_node_rele_flag(np, RC_NODE_IN_TX); 5126 rc_node_rele_locked(np); 5127 5128 (void) pthread_mutex_lock(&pp->rn_lock); 5129 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 5130 (void) pthread_mutex_unlock(&pp->rn_lock); 5131 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */ 5132 goto cleanout; 5133 } 5134 5135 prev = np; 5136 while ((nnp = prev->rn_former) != NULL) { 5137 if (nnp->rn_snapshot_id == snapid) { 5138 rc_node_hold(nnp); 5139 break; /* existing node with that id */ 5140 } 5141 prev = nnp; 5142 } 5143 } 5144 5145 if (nnp == NULL) { 5146 prev = NULL; 5147 nnp = rc_node_alloc(); 5148 if (nnp == NULL) { 5149 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 5150 goto fail; 5151 } 5152 5153 nnp->rn_id = np->rn_id; /* structure assignment */ 5154 nnp->rn_hash = np->rn_hash; 5155 nnp->rn_name = strdup(np->rn_name); 5156 nnp->rn_snapshot_id = snapid; 5157 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT; 5158 5159 if (nnp->rn_name == NULL) { 5160 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 5161 goto fail; 5162 } 5163 } 5164 5165 (void) pthread_mutex_unlock(&np->rn_lock); 5166 5167 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL)); 5168 5169 if (parentp != NULL) 5170 nnp->rn_snapshot_id = snapid; /* fill in new snapid */ 5171 else 5172 assert(nnp->rn_snapshot_id == snapid); 5173 5174 (void) pthread_mutex_lock(&np->rn_lock); 5175 if (rc != REP_PROTOCOL_SUCCESS) 5176 goto fail; 5177 5178 /* 5179 * fix up the former chain 5180 */ 5181 if (prev != NULL) { 5182 prev->rn_former = nnp->rn_former; 5183 (void) pthread_mutex_lock(&nnp->rn_lock); 5184 nnp->rn_flags &= ~RC_NODE_ON_FORMER; 5185 nnp->rn_former = NULL; 5186 (void) pthread_mutex_unlock(&nnp->rn_lock); 5187 } 5188 np->rn_flags |= RC_NODE_OLD; 5189 (void) pthread_mutex_unlock(&np->rn_lock); 5190 5191 /* 5192 * replace np with nnp 5193 */ 5194 rc_node_relink_child(pp, np, nnp); 5195 5196 rc_node_rele(np); 5197 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data); 5198 rc = REP_PROTOCOL_SUCCESS; 5199 5200 cleanout: 5201 free(audit_data.ed_auth); 5202 free(audit_data.ed_fmri); 5203 free(audit_data.ed_snapname); 5204 return (rc); 5205 5206 fail: 5207 rc_node_rele_flag(np, RC_NODE_IN_TX); 5208 rc_node_rele_locked(np); 5209 (void) pthread_mutex_lock(&pp->rn_lock); 5210 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 5211 (void) pthread_mutex_unlock(&pp->rn_lock); 5212 5213 if (nnp != NULL) { 5214 if (prev == NULL) 5215 rc_node_destroy(nnp); 5216 else 5217 rc_node_rele(nnp); 5218 } 5219 5220 free(audit_data.ed_auth); 5221 free(audit_data.ed_fmri); 5222 free(audit_data.ed_snapname); 5223 return (rc); 5224 } 5225 5226 int 5227 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname, 5228 const char *instname, const char *name, rc_node_ptr_t *outpp) 5229 { 5230 rc_node_t *np; 5231 rc_node_t *outp = NULL; 5232 int rc, perm_rc; 5233 char fmri[REP_PROTOCOL_FMRI_LEN]; 5234 audit_event_data_t audit_data; 5235 size_t sz_out; 5236 5237 rc_node_clear(outpp, 0); 5238 5239 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth); 5240 5241 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 5242 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) { 5243 (void) pthread_mutex_unlock(&np->rn_lock); 5244 free(audit_data.ed_auth); 5245 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 5246 } 5247 5248 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name); 5249 if (rc != REP_PROTOCOL_SUCCESS) { 5250 (void) pthread_mutex_unlock(&np->rn_lock); 5251 free(audit_data.ed_auth); 5252 return (rc); 5253 } 5254 5255 if (svcname != NULL && (rc = 5256 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) != 5257 REP_PROTOCOL_SUCCESS) { 5258 (void) pthread_mutex_unlock(&np->rn_lock); 5259 free(audit_data.ed_auth); 5260 return (rc); 5261 } 5262 5263 if (instname != NULL && (rc = 5264 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) != 5265 REP_PROTOCOL_SUCCESS) { 5266 (void) pthread_mutex_unlock(&np->rn_lock); 5267 free(audit_data.ed_auth); 5268 return (rc); 5269 } 5270 5271 audit_data.ed_auth = NULL; 5272 audit_data.ed_fmri = fmri; 5273 audit_data.ed_snapname = (char *)name; 5274 5275 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri), 5276 &sz_out)) != REP_PROTOCOL_SUCCESS) { 5277 (void) pthread_mutex_unlock(&np->rn_lock); 5278 free(audit_data.ed_auth); 5279 return (rc); 5280 } 5281 if (perm_rc != REP_PROTOCOL_SUCCESS) { 5282 (void) pthread_mutex_unlock(&np->rn_lock); 5283 smf_audit_event(ADT_smf_create_snap, ADT_FAILURE, 5284 ADT_FAIL_VALUE_AUTH, &audit_data); 5285 free(audit_data.ed_auth); 5286 return (perm_rc); 5287 } 5288 5289 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD, 5290 audit_data.ed_auth); 5291 (void) pthread_mutex_unlock(&np->rn_lock); 5292 5293 rc = object_snapshot_take_new(np, svcname, instname, name, &outp); 5294 5295 if (rc == REP_PROTOCOL_SUCCESS) { 5296 rc_node_assign(outpp, outp); 5297 rc_node_rele(outp); 5298 } 5299 5300 (void) pthread_mutex_lock(&np->rn_lock); 5301 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD); 5302 (void) pthread_mutex_unlock(&np->rn_lock); 5303 5304 if (rc == REP_PROTOCOL_SUCCESS) { 5305 smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS, 5306 &audit_data); 5307 } 5308 if (audit_data.ed_auth != NULL) 5309 free(audit_data.ed_auth); 5310 return (rc); 5311 } 5312 5313 int 5314 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp) 5315 { 5316 rc_node_t *np, *outp; 5317 5318 RC_NODE_PTR_GET_CHECK(np, npp); 5319 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) { 5320 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 5321 } 5322 5323 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp); 5324 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) { 5325 (void) pthread_mutex_unlock(&outp->rn_lock); 5326 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 5327 } 5328 5329 return (rc_attach_snapshot(outp, 0, np, NULL, 5330 NULL)); /* drops outp's lock */ 5331 } 5332 5333 int 5334 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp) 5335 { 5336 rc_node_t *np; 5337 rc_node_t *cp; 5338 uint32_t snapid; 5339 char old_name[REP_PROTOCOL_NAME_LEN]; 5340 int rc; 5341 size_t sz_out; 5342 char old_fmri[REP_PROTOCOL_FMRI_LEN]; 5343 5344 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 5345 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) { 5346 (void) pthread_mutex_unlock(&np->rn_lock); 5347 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 5348 } 5349 snapid = np->rn_snapshot_id; 5350 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri), 5351 &sz_out); 5352 (void) pthread_mutex_unlock(&np->rn_lock); 5353 if (rc != REP_PROTOCOL_SUCCESS) 5354 return (rc); 5355 if (np->rn_name != NULL) { 5356 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >= 5357 sizeof (old_name)) { 5358 return (REP_PROTOCOL_FAIL_TRUNCATED); 5359 } 5360 } 5361 5362 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp); 5363 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) { 5364 (void) pthread_mutex_unlock(&cp->rn_lock); 5365 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 5366 } 5367 5368 rc = rc_attach_snapshot(cp, snapid, NULL, 5369 old_fmri, old_name); /* drops cp's lock */ 5370 return (rc); 5371 } 5372 5373 /* 5374 * If the pgname property group under ent has type pgtype, and it has a 5375 * propname property with type ptype, return _SUCCESS. If pgtype is NULL, 5376 * it is not checked. If ent is not a service node, we will return _SUCCESS if 5377 * a property meeting the requirements exists in either the instance or its 5378 * parent. 5379 * 5380 * Returns 5381 * _SUCCESS - see above 5382 * _DELETED - ent or one of its ancestors was deleted 5383 * _NO_RESOURCES - no resources 5384 * _NOT_FOUND - no matching property was found 5385 */ 5386 static int 5387 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype, 5388 const char *propname, rep_protocol_value_type_t ptype) 5389 { 5390 int ret; 5391 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop; 5392 5393 assert(!MUTEX_HELD(&ent->rn_lock)); 5394 5395 (void) pthread_mutex_lock(&ent->rn_lock); 5396 ret = rc_node_find_named_child(ent, pgname, 5397 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg); 5398 (void) pthread_mutex_unlock(&ent->rn_lock); 5399 5400 switch (ret) { 5401 case REP_PROTOCOL_SUCCESS: 5402 break; 5403 5404 case REP_PROTOCOL_FAIL_DELETED: 5405 case REP_PROTOCOL_FAIL_NO_RESOURCES: 5406 return (ret); 5407 5408 default: 5409 bad_error("rc_node_find_named_child", ret); 5410 } 5411 5412 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) { 5413 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE, 5414 &svc); 5415 if (ret != REP_PROTOCOL_SUCCESS) { 5416 assert(ret == REP_PROTOCOL_FAIL_DELETED); 5417 if (pg != NULL) 5418 rc_node_rele(pg); 5419 return (ret); 5420 } 5421 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE); 5422 5423 (void) pthread_mutex_lock(&svc->rn_lock); 5424 ret = rc_node_find_named_child(svc, pgname, 5425 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg); 5426 (void) pthread_mutex_unlock(&svc->rn_lock); 5427 5428 rc_node_rele(svc); 5429 5430 switch (ret) { 5431 case REP_PROTOCOL_SUCCESS: 5432 break; 5433 5434 case REP_PROTOCOL_FAIL_DELETED: 5435 case REP_PROTOCOL_FAIL_NO_RESOURCES: 5436 if (pg != NULL) 5437 rc_node_rele(pg); 5438 return (ret); 5439 5440 default: 5441 bad_error("rc_node_find_named_child", ret); 5442 } 5443 } 5444 5445 if (pg != NULL && 5446 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) { 5447 rc_node_rele(pg); 5448 pg = NULL; 5449 } 5450 5451 if (spg != NULL && 5452 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) { 5453 rc_node_rele(spg); 5454 spg = NULL; 5455 } 5456 5457 if (pg == NULL) { 5458 if (spg == NULL) 5459 return (REP_PROTOCOL_FAIL_NOT_FOUND); 5460 pg = spg; 5461 spg = NULL; 5462 } 5463 5464 /* 5465 * At this point, pg is non-NULL, and is a property group node of the 5466 * correct type. spg, if non-NULL, is also a property group node of 5467 * the correct type. Check for the property in pg first, then spg 5468 * (if applicable). 5469 */ 5470 (void) pthread_mutex_lock(&pg->rn_lock); 5471 ret = rc_node_find_named_child(pg, propname, 5472 REP_PROTOCOL_ENTITY_PROPERTY, &prop); 5473 (void) pthread_mutex_unlock(&pg->rn_lock); 5474 rc_node_rele(pg); 5475 switch (ret) { 5476 case REP_PROTOCOL_SUCCESS: 5477 if (prop != NULL) { 5478 if (prop->rn_valtype == ptype) { 5479 rc_node_rele(prop); 5480 if (spg != NULL) 5481 rc_node_rele(spg); 5482 return (REP_PROTOCOL_SUCCESS); 5483 } 5484 rc_node_rele(prop); 5485 } 5486 break; 5487 5488 case REP_PROTOCOL_FAIL_NO_RESOURCES: 5489 if (spg != NULL) 5490 rc_node_rele(spg); 5491 return (ret); 5492 5493 case REP_PROTOCOL_FAIL_DELETED: 5494 break; 5495 5496 default: 5497 bad_error("rc_node_find_named_child", ret); 5498 } 5499 5500 if (spg == NULL) 5501 return (REP_PROTOCOL_FAIL_NOT_FOUND); 5502 5503 pg = spg; 5504 5505 (void) pthread_mutex_lock(&pg->rn_lock); 5506 ret = rc_node_find_named_child(pg, propname, 5507 REP_PROTOCOL_ENTITY_PROPERTY, &prop); 5508 (void) pthread_mutex_unlock(&pg->rn_lock); 5509 rc_node_rele(pg); 5510 switch (ret) { 5511 case REP_PROTOCOL_SUCCESS: 5512 if (prop != NULL) { 5513 if (prop->rn_valtype == ptype) { 5514 rc_node_rele(prop); 5515 return (REP_PROTOCOL_SUCCESS); 5516 } 5517 rc_node_rele(prop); 5518 } 5519 return (REP_PROTOCOL_FAIL_NOT_FOUND); 5520 5521 case REP_PROTOCOL_FAIL_NO_RESOURCES: 5522 return (ret); 5523 5524 case REP_PROTOCOL_FAIL_DELETED: 5525 return (REP_PROTOCOL_FAIL_NOT_FOUND); 5526 5527 default: 5528 bad_error("rc_node_find_named_child", ret); 5529 } 5530 5531 return (REP_PROTOCOL_SUCCESS); 5532 } 5533 5534 /* 5535 * Given a property group node, returns _SUCCESS if the property group may 5536 * be read without any special authorization. 5537 * 5538 * Fails with: 5539 * _DELETED - np or an ancestor node was deleted 5540 * _TYPE_MISMATCH - np does not refer to a property group 5541 * _NO_RESOURCES - no resources 5542 * _PERMISSION_DENIED - authorization is required 5543 */ 5544 static int 5545 rc_node_pg_check_read_protect(rc_node_t *np) 5546 { 5547 int ret; 5548 rc_node_t *ent; 5549 5550 assert(!MUTEX_HELD(&np->rn_lock)); 5551 5552 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) 5553 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 5554 5555 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 || 5556 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 || 5557 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0) 5558 return (REP_PROTOCOL_SUCCESS); 5559 5560 ret = rc_node_parent(np, &ent); 5561 5562 if (ret != REP_PROTOCOL_SUCCESS) 5563 return (ret); 5564 5565 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type, 5566 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING); 5567 5568 rc_node_rele(ent); 5569 5570 switch (ret) { 5571 case REP_PROTOCOL_FAIL_NOT_FOUND: 5572 return (REP_PROTOCOL_SUCCESS); 5573 case REP_PROTOCOL_SUCCESS: 5574 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED); 5575 case REP_PROTOCOL_FAIL_DELETED: 5576 case REP_PROTOCOL_FAIL_NO_RESOURCES: 5577 return (ret); 5578 default: 5579 bad_error("rc_svc_prop_exists", ret); 5580 } 5581 5582 return (REP_PROTOCOL_SUCCESS); 5583 } 5584 5585 /* 5586 * Fails with 5587 * _DELETED - np's node or parent has been deleted 5588 * _TYPE_MISMATCH - np's node is not a property 5589 * _NO_RESOURCES - out of memory 5590 * _PERMISSION_DENIED - no authorization to read this property's value(s) 5591 * _BAD_REQUEST - np's parent is not a property group 5592 */ 5593 static int 5594 rc_node_property_may_read(rc_node_t *np) 5595 { 5596 int ret, granted = 0; 5597 rc_node_t *pgp; 5598 permcheck_t *pcp; 5599 audit_event_data_t audit_data; 5600 size_t sz_out; 5601 5602 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) 5603 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 5604 5605 if (client_is_privileged()) 5606 return (REP_PROTOCOL_SUCCESS); 5607 5608 #ifdef NATIVE_BUILD 5609 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED); 5610 #else 5611 ret = rc_node_parent(np, &pgp); 5612 5613 if (ret != REP_PROTOCOL_SUCCESS) 5614 return (ret); 5615 5616 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) { 5617 rc_node_rele(pgp); 5618 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 5619 } 5620 5621 ret = rc_node_pg_check_read_protect(pgp); 5622 5623 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) { 5624 rc_node_rele(pgp); 5625 return (ret); 5626 } 5627 5628 pcp = pc_create(); 5629 5630 if (pcp == NULL) { 5631 rc_node_rele(pgp); 5632 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 5633 } 5634 5635 ret = perm_add_enabling(pcp, AUTH_MODIFY); 5636 5637 if (ret == REP_PROTOCOL_SUCCESS) { 5638 const char * const auth = 5639 perm_auth_for_pgtype(pgp->rn_type); 5640 5641 if (auth != NULL) 5642 ret = perm_add_enabling(pcp, auth); 5643 } 5644 5645 /* 5646 * If you are permitted to modify the value, you may also 5647 * read it. This means that both the MODIFY and VALUE 5648 * authorizations are acceptable. We don't allow requests 5649 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE, 5650 * however, to avoid leaking possibly valuable information 5651 * since such a user can't change the property anyway. 5652 */ 5653 if (ret == REP_PROTOCOL_SUCCESS) 5654 ret = perm_add_enabling_values(pcp, pgp, 5655 AUTH_PROP_MODIFY); 5656 5657 if (ret == REP_PROTOCOL_SUCCESS && 5658 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0) 5659 ret = perm_add_enabling_values(pcp, pgp, 5660 AUTH_PROP_VALUE); 5661 5662 if (ret == REP_PROTOCOL_SUCCESS) 5663 ret = perm_add_enabling_values(pcp, pgp, 5664 AUTH_PROP_READ); 5665 5666 rc_node_rele(pgp); 5667 5668 if (ret == REP_PROTOCOL_SUCCESS) { 5669 granted = perm_granted(pcp); 5670 if (granted < 0) 5671 ret = REP_PROTOCOL_FAIL_NO_RESOURCES; 5672 } 5673 if (ret == REP_PROTOCOL_SUCCESS) { 5674 /* Generate a read_prop audit event. */ 5675 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN); 5676 if (audit_data.ed_fmri == NULL) 5677 ret = REP_PROTOCOL_FAIL_NO_RESOURCES; 5678 } 5679 ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri, 5680 REP_PROTOCOL_FMRI_LEN, &sz_out); 5681 assert(ret == REP_PROTOCOL_SUCCESS); 5682 if (ret == REP_PROTOCOL_SUCCESS) { 5683 int status; 5684 int ret_value; 5685 5686 if (granted == 0) { 5687 status = ADT_FAILURE; 5688 ret_value = ADT_FAIL_VALUE_AUTH; 5689 } else { 5690 status = ADT_SUCCESS; 5691 ret_value = ADT_SUCCESS; 5692 } 5693 audit_data.ed_auth = pcp->pc_auth_string; 5694 smf_audit_event(ADT_smf_read_prop, 5695 status, ret_value, &audit_data); 5696 } 5697 free(audit_data.ed_fmri); 5698 5699 pc_free(pcp); 5700 5701 if (ret == REP_PROTOCOL_SUCCESS && !granted) 5702 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 5703 5704 return (ret); 5705 #endif /* NATIVE_BUILD */ 5706 } 5707 5708 /* 5709 * Iteration 5710 */ 5711 static int 5712 rc_iter_filter_name(rc_node_t *np, void *s) 5713 { 5714 const char *name = s; 5715 5716 return (strcmp(np->rn_name, name) == 0); 5717 } 5718 5719 static int 5720 rc_iter_filter_type(rc_node_t *np, void *s) 5721 { 5722 const char *type = s; 5723 5724 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0); 5725 } 5726 5727 /*ARGSUSED*/ 5728 static int 5729 rc_iter_null_filter(rc_node_t *np, void *s) 5730 { 5731 return (1); 5732 } 5733 5734 /* 5735 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure 5736 * np->rn_children is populated and call uu_list_walk_start(np->rn_children). 5737 * If successful, leaves a hold on np & increments np->rn_other_refs 5738 * 5739 * If composed is true, then set up for iteration across the top level of np's 5740 * composition chain. If successful, leaves a hold on np and increments 5741 * rn_other_refs for the top level of np's composition chain. 5742 * 5743 * Fails with 5744 * _NO_RESOURCES 5745 * _INVALID_TYPE 5746 * _TYPE_MISMATCH - np cannot carry type children 5747 * _DELETED 5748 */ 5749 static int 5750 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type, 5751 rc_iter_filter_func *filter, void *arg, boolean_t composed) 5752 { 5753 rc_node_iter_t *nip; 5754 int res; 5755 5756 assert(*resp == NULL); 5757 5758 nip = uu_zalloc(sizeof (*nip)); 5759 if (nip == NULL) 5760 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 5761 5762 /* np is held by the client's rc_node_ptr_t */ 5763 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) 5764 composed = 1; 5765 5766 if (!composed) { 5767 (void) pthread_mutex_lock(&np->rn_lock); 5768 5769 if ((res = rc_node_fill_children(np, type)) != 5770 REP_PROTOCOL_SUCCESS) { 5771 (void) pthread_mutex_unlock(&np->rn_lock); 5772 uu_free(nip); 5773 return (res); 5774 } 5775 5776 nip->rni_clevel = -1; 5777 5778 nip->rni_iter = uu_list_walk_start(np->rn_children, 5779 UU_WALK_ROBUST); 5780 if (nip->rni_iter != NULL) { 5781 nip->rni_iter_node = np; 5782 rc_node_hold_other(np); 5783 } else { 5784 (void) pthread_mutex_unlock(&np->rn_lock); 5785 uu_free(nip); 5786 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 5787 } 5788 (void) pthread_mutex_unlock(&np->rn_lock); 5789 } else { 5790 rc_node_t *ent; 5791 5792 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) { 5793 /* rn_cchain isn't valid until children are loaded. */ 5794 (void) pthread_mutex_lock(&np->rn_lock); 5795 res = rc_node_fill_children(np, 5796 REP_PROTOCOL_ENTITY_SNAPLEVEL); 5797 (void) pthread_mutex_unlock(&np->rn_lock); 5798 if (res != REP_PROTOCOL_SUCCESS) { 5799 uu_free(nip); 5800 return (res); 5801 } 5802 5803 /* Check for an empty snapshot. */ 5804 if (np->rn_cchain[0] == NULL) 5805 goto empty; 5806 } 5807 5808 /* Start at the top of the composition chain. */ 5809 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) { 5810 if (nip->rni_clevel >= COMPOSITION_DEPTH) { 5811 /* Empty composition chain. */ 5812 empty: 5813 nip->rni_clevel = -1; 5814 nip->rni_iter = NULL; 5815 /* It's ok, iter_next() will return _DONE. */ 5816 goto out; 5817 } 5818 5819 ent = np->rn_cchain[nip->rni_clevel]; 5820 assert(ent != NULL); 5821 5822 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS) 5823 break; 5824 5825 /* Someone deleted it, so try the next one. */ 5826 } 5827 5828 res = rc_node_fill_children(ent, type); 5829 5830 if (res == REP_PROTOCOL_SUCCESS) { 5831 nip->rni_iter = uu_list_walk_start(ent->rn_children, 5832 UU_WALK_ROBUST); 5833 5834 if (nip->rni_iter == NULL) 5835 res = REP_PROTOCOL_FAIL_NO_RESOURCES; 5836 else { 5837 nip->rni_iter_node = ent; 5838 rc_node_hold_other(ent); 5839 } 5840 } 5841 5842 if (res != REP_PROTOCOL_SUCCESS) { 5843 (void) pthread_mutex_unlock(&ent->rn_lock); 5844 uu_free(nip); 5845 return (res); 5846 } 5847 5848 (void) pthread_mutex_unlock(&ent->rn_lock); 5849 } 5850 5851 out: 5852 rc_node_hold(np); /* released by rc_iter_end() */ 5853 nip->rni_parent = np; 5854 nip->rni_type = type; 5855 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter; 5856 nip->rni_filter_arg = arg; 5857 *resp = nip; 5858 return (REP_PROTOCOL_SUCCESS); 5859 } 5860 5861 static void 5862 rc_iter_end(rc_node_iter_t *iter) 5863 { 5864 rc_node_t *np = iter->rni_parent; 5865 5866 if (iter->rni_clevel >= 0) 5867 np = np->rn_cchain[iter->rni_clevel]; 5868 5869 assert(MUTEX_HELD(&np->rn_lock)); 5870 if (iter->rni_iter != NULL) 5871 uu_list_walk_end(iter->rni_iter); 5872 iter->rni_iter = NULL; 5873 5874 (void) pthread_mutex_unlock(&np->rn_lock); 5875 rc_node_rele(iter->rni_parent); 5876 if (iter->rni_iter_node != NULL) 5877 rc_node_rele_other(iter->rni_iter_node); 5878 } 5879 5880 /* 5881 * Fails with 5882 * _NOT_SET - npp is reset 5883 * _DELETED - npp's node has been deleted 5884 * _NOT_APPLICABLE - npp's node is not a property 5885 * _NO_RESOURCES - out of memory 5886 */ 5887 static int 5888 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp) 5889 { 5890 rc_node_t *np; 5891 5892 rc_node_iter_t *nip; 5893 5894 assert(*iterp == NULL); 5895 5896 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 5897 5898 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) { 5899 (void) pthread_mutex_unlock(&np->rn_lock); 5900 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE); 5901 } 5902 5903 nip = uu_zalloc(sizeof (*nip)); 5904 if (nip == NULL) { 5905 (void) pthread_mutex_unlock(&np->rn_lock); 5906 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 5907 } 5908 5909 nip->rni_parent = np; 5910 nip->rni_iter = NULL; 5911 nip->rni_clevel = -1; 5912 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE; 5913 nip->rni_offset = 0; 5914 nip->rni_last_offset = 0; 5915 5916 rc_node_hold_locked(np); 5917 5918 *iterp = nip; 5919 (void) pthread_mutex_unlock(&np->rn_lock); 5920 5921 return (REP_PROTOCOL_SUCCESS); 5922 } 5923 5924 /* 5925 * Returns: 5926 * _NO_RESOURCES - out of memory 5927 * _NOT_SET - npp is reset 5928 * _DELETED - npp's node has been deleted 5929 * _TYPE_MISMATCH - npp's node is not a property 5930 * _NOT_FOUND - property has no values 5931 * _TRUNCATED - property has >1 values (first is written into out) 5932 * _SUCCESS - property has 1 value (which is written into out) 5933 * _PERMISSION_DENIED - no authorization to read property value(s) 5934 * 5935 * We shorten *sz_out to not include anything after the final '\0'. 5936 */ 5937 int 5938 rc_node_get_property_value(rc_node_ptr_t *npp, 5939 struct rep_protocol_value_response *out, size_t *sz_out) 5940 { 5941 rc_node_t *np; 5942 size_t w; 5943 int ret; 5944 5945 assert(*sz_out == sizeof (*out)); 5946 5947 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp); 5948 ret = rc_node_property_may_read(np); 5949 rc_node_rele(np); 5950 5951 if (ret != REP_PROTOCOL_SUCCESS) 5952 return (ret); 5953 5954 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 5955 5956 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) { 5957 (void) pthread_mutex_unlock(&np->rn_lock); 5958 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 5959 } 5960 5961 if (np->rn_values_size == 0) { 5962 (void) pthread_mutex_unlock(&np->rn_lock); 5963 return (REP_PROTOCOL_FAIL_NOT_FOUND); 5964 } 5965 out->rpr_type = np->rn_valtype; 5966 w = strlcpy(out->rpr_value, &np->rn_values[0], 5967 sizeof (out->rpr_value)); 5968 5969 if (w >= sizeof (out->rpr_value)) 5970 backend_panic("value too large"); 5971 5972 *sz_out = offsetof(struct rep_protocol_value_response, 5973 rpr_value[w + 1]); 5974 5975 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED : 5976 REP_PROTOCOL_SUCCESS; 5977 (void) pthread_mutex_unlock(&np->rn_lock); 5978 return (ret); 5979 } 5980 5981 int 5982 rc_iter_next_value(rc_node_iter_t *iter, 5983 struct rep_protocol_value_response *out, size_t *sz_out, int repeat) 5984 { 5985 rc_node_t *np = iter->rni_parent; 5986 const char *vals; 5987 size_t len; 5988 5989 size_t start; 5990 size_t w; 5991 int ret; 5992 5993 rep_protocol_responseid_t result; 5994 5995 assert(*sz_out == sizeof (*out)); 5996 5997 (void) memset(out, '\0', *sz_out); 5998 5999 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE) 6000 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6001 6002 RC_NODE_CHECK(np); 6003 ret = rc_node_property_may_read(np); 6004 6005 if (ret != REP_PROTOCOL_SUCCESS) 6006 return (ret); 6007 6008 RC_NODE_CHECK_AND_LOCK(np); 6009 6010 vals = np->rn_values; 6011 len = np->rn_values_size; 6012 6013 out->rpr_type = np->rn_valtype; 6014 6015 start = (repeat)? iter->rni_last_offset : iter->rni_offset; 6016 6017 if (len == 0 || start >= len) { 6018 result = REP_PROTOCOL_DONE; 6019 *sz_out -= sizeof (out->rpr_value); 6020 } else { 6021 w = strlcpy(out->rpr_value, &vals[start], 6022 sizeof (out->rpr_value)); 6023 6024 if (w >= sizeof (out->rpr_value)) 6025 backend_panic("value too large"); 6026 6027 *sz_out = offsetof(struct rep_protocol_value_response, 6028 rpr_value[w + 1]); 6029 6030 /* 6031 * update the offsets if we're not repeating 6032 */ 6033 if (!repeat) { 6034 iter->rni_last_offset = iter->rni_offset; 6035 iter->rni_offset += (w + 1); 6036 } 6037 6038 result = REP_PROTOCOL_SUCCESS; 6039 } 6040 6041 (void) pthread_mutex_unlock(&np->rn_lock); 6042 return (result); 6043 } 6044 6045 /* 6046 * Entry point for ITER_START from client.c. Validate the arguments & call 6047 * rc_iter_create(). 6048 * 6049 * Fails with 6050 * _NOT_SET 6051 * _DELETED 6052 * _TYPE_MISMATCH - np cannot carry type children 6053 * _BAD_REQUEST - flags is invalid 6054 * pattern is invalid 6055 * _NO_RESOURCES 6056 * _INVALID_TYPE 6057 * _TYPE_MISMATCH - *npp cannot have children of type 6058 * _BACKEND_ACCESS 6059 */ 6060 int 6061 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp, 6062 uint32_t type, uint32_t flags, const char *pattern) 6063 { 6064 rc_node_t *np; 6065 rc_iter_filter_func *f = NULL; 6066 int rc; 6067 6068 RC_NODE_PTR_GET_CHECK(np, npp); 6069 6070 if (pattern != NULL && pattern[0] == '\0') 6071 pattern = NULL; 6072 6073 if (type == REP_PROTOCOL_ENTITY_VALUE) { 6074 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) 6075 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 6076 if (flags != RP_ITER_START_ALL || pattern != NULL) 6077 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6078 6079 rc = rc_node_setup_value_iter(npp, iterp); 6080 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE); 6081 return (rc); 6082 } 6083 6084 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) != 6085 REP_PROTOCOL_SUCCESS) 6086 return (rc); 6087 6088 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^ 6089 (pattern == NULL)) 6090 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6091 6092 /* Composition only works for instances & snapshots. */ 6093 if ((flags & RP_ITER_START_COMPOSED) && 6094 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE && 6095 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)) 6096 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6097 6098 if (pattern != NULL) { 6099 if ((rc = rc_check_type_name(type, pattern)) != 6100 REP_PROTOCOL_SUCCESS) 6101 return (rc); 6102 pattern = strdup(pattern); 6103 if (pattern == NULL) 6104 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6105 } 6106 6107 switch (flags & RP_ITER_START_FILT_MASK) { 6108 case RP_ITER_START_ALL: 6109 f = NULL; 6110 break; 6111 case RP_ITER_START_EXACT: 6112 f = rc_iter_filter_name; 6113 break; 6114 case RP_ITER_START_PGTYPE: 6115 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) { 6116 free((void *)pattern); 6117 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6118 } 6119 f = rc_iter_filter_type; 6120 break; 6121 default: 6122 free((void *)pattern); 6123 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6124 } 6125 6126 rc = rc_iter_create(iterp, np, type, f, (void *)pattern, 6127 flags & RP_ITER_START_COMPOSED); 6128 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL) 6129 free((void *)pattern); 6130 6131 return (rc); 6132 } 6133 6134 /* 6135 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches 6136 * the filter. 6137 * For composed iterators, then check to see if there's an overlapping entity 6138 * (see embedded comments). If we reach the end of the list, start over at 6139 * the next level. 6140 * 6141 * Returns 6142 * _BAD_REQUEST - iter walks values 6143 * _TYPE_MISMATCH - iter does not walk type entities 6144 * _DELETED - parent was deleted 6145 * _NO_RESOURCES 6146 * _INVALID_TYPE - type is invalid 6147 * _DONE 6148 * _SUCCESS 6149 * 6150 * For composed property group iterators, can also return 6151 * _TYPE_MISMATCH - parent cannot have type children 6152 */ 6153 int 6154 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type) 6155 { 6156 rc_node_t *np = iter->rni_parent; 6157 rc_node_t *res; 6158 int rc; 6159 6160 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE) 6161 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6162 6163 if (iter->rni_iter == NULL) { 6164 rc_node_clear(out, 0); 6165 return (REP_PROTOCOL_DONE); 6166 } 6167 6168 if (iter->rni_type != type) { 6169 rc_node_clear(out, 0); 6170 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 6171 } 6172 6173 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */ 6174 6175 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) { 6176 (void) pthread_mutex_unlock(&np->rn_lock); 6177 rc_node_clear(out, 1); 6178 return (REP_PROTOCOL_FAIL_DELETED); 6179 } 6180 6181 if (iter->rni_clevel >= 0) { 6182 /* Composed iterator. Iterate over appropriate level. */ 6183 (void) pthread_mutex_unlock(&np->rn_lock); 6184 np = np->rn_cchain[iter->rni_clevel]; 6185 /* 6186 * If iter->rni_parent is an instance or a snapshot, np must 6187 * be valid since iter holds iter->rni_parent & possible 6188 * levels (service, instance, snaplevel) cannot be destroyed 6189 * while rni_parent is held. If iter->rni_parent is 6190 * a composed property group then rc_node_setup_cpg() put 6191 * a hold on np. 6192 */ 6193 6194 (void) pthread_mutex_lock(&np->rn_lock); 6195 6196 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) { 6197 (void) pthread_mutex_unlock(&np->rn_lock); 6198 rc_node_clear(out, 1); 6199 return (REP_PROTOCOL_FAIL_DELETED); 6200 } 6201 } 6202 6203 assert(np->rn_flags & RC_NODE_HAS_CHILDREN); 6204 6205 for (;;) { 6206 res = uu_list_walk_next(iter->rni_iter); 6207 if (res == NULL) { 6208 rc_node_t *parent = iter->rni_parent; 6209 6210 #if COMPOSITION_DEPTH == 2 6211 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) { 6212 /* release walker and lock */ 6213 rc_iter_end(iter); 6214 break; 6215 } 6216 6217 /* Stop walking current level. */ 6218 uu_list_walk_end(iter->rni_iter); 6219 iter->rni_iter = NULL; 6220 (void) pthread_mutex_unlock(&np->rn_lock); 6221 rc_node_rele_other(iter->rni_iter_node); 6222 iter->rni_iter_node = NULL; 6223 6224 /* Start walking next level. */ 6225 ++iter->rni_clevel; 6226 np = parent->rn_cchain[iter->rni_clevel]; 6227 assert(np != NULL); 6228 #else 6229 #error This code must be updated. 6230 #endif 6231 6232 (void) pthread_mutex_lock(&np->rn_lock); 6233 6234 rc = rc_node_fill_children(np, iter->rni_type); 6235 6236 if (rc == REP_PROTOCOL_SUCCESS) { 6237 iter->rni_iter = 6238 uu_list_walk_start(np->rn_children, 6239 UU_WALK_ROBUST); 6240 6241 if (iter->rni_iter == NULL) 6242 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 6243 else { 6244 iter->rni_iter_node = np; 6245 rc_node_hold_other(np); 6246 } 6247 } 6248 6249 if (rc != REP_PROTOCOL_SUCCESS) { 6250 (void) pthread_mutex_unlock(&np->rn_lock); 6251 rc_node_clear(out, 0); 6252 return (rc); 6253 } 6254 6255 continue; 6256 } 6257 6258 if (res->rn_id.rl_type != type || 6259 !iter->rni_filter(res, iter->rni_filter_arg)) 6260 continue; 6261 6262 /* 6263 * If we're composed and not at the top level, check to see if 6264 * there's an entity at a higher level with the same name. If 6265 * so, skip this one. 6266 */ 6267 if (iter->rni_clevel > 0) { 6268 rc_node_t *ent = iter->rni_parent->rn_cchain[0]; 6269 rc_node_t *pg; 6270 6271 #if COMPOSITION_DEPTH == 2 6272 assert(iter->rni_clevel == 1); 6273 6274 (void) pthread_mutex_unlock(&np->rn_lock); 6275 (void) pthread_mutex_lock(&ent->rn_lock); 6276 rc = rc_node_find_named_child(ent, res->rn_name, type, 6277 &pg); 6278 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL) 6279 rc_node_rele(pg); 6280 (void) pthread_mutex_unlock(&ent->rn_lock); 6281 if (rc != REP_PROTOCOL_SUCCESS) { 6282 rc_node_clear(out, 0); 6283 return (rc); 6284 } 6285 (void) pthread_mutex_lock(&np->rn_lock); 6286 6287 /* Make sure np isn't being deleted all of a sudden. */ 6288 if (!rc_node_wait_flag(np, RC_NODE_DYING)) { 6289 (void) pthread_mutex_unlock(&np->rn_lock); 6290 rc_node_clear(out, 1); 6291 return (REP_PROTOCOL_FAIL_DELETED); 6292 } 6293 6294 if (pg != NULL) 6295 /* Keep going. */ 6296 continue; 6297 #else 6298 #error This code must be updated. 6299 #endif 6300 } 6301 6302 /* 6303 * If we're composed, iterating over property groups, and not 6304 * at the bottom level, check to see if there's a pg at lower 6305 * level with the same name. If so, return a cpg. 6306 */ 6307 if (iter->rni_clevel >= 0 && 6308 type == REP_PROTOCOL_ENTITY_PROPERTYGRP && 6309 iter->rni_clevel < COMPOSITION_DEPTH - 1) { 6310 #if COMPOSITION_DEPTH == 2 6311 rc_node_t *pg; 6312 rc_node_t *ent = iter->rni_parent->rn_cchain[1]; 6313 6314 rc_node_hold(res); /* While we drop np->rn_lock */ 6315 6316 (void) pthread_mutex_unlock(&np->rn_lock); 6317 (void) pthread_mutex_lock(&ent->rn_lock); 6318 rc = rc_node_find_named_child(ent, res->rn_name, type, 6319 &pg); 6320 /* holds pg if not NULL */ 6321 (void) pthread_mutex_unlock(&ent->rn_lock); 6322 if (rc != REP_PROTOCOL_SUCCESS) { 6323 rc_node_rele(res); 6324 rc_node_clear(out, 0); 6325 return (rc); 6326 } 6327 6328 (void) pthread_mutex_lock(&np->rn_lock); 6329 if (!rc_node_wait_flag(np, RC_NODE_DYING)) { 6330 (void) pthread_mutex_unlock(&np->rn_lock); 6331 rc_node_rele(res); 6332 if (pg != NULL) 6333 rc_node_rele(pg); 6334 rc_node_clear(out, 1); 6335 return (REP_PROTOCOL_FAIL_DELETED); 6336 } 6337 6338 if (pg == NULL) { 6339 rc_node_rele(res); 6340 } else { 6341 rc_node_t *cpg; 6342 6343 /* Keep res held for rc_node_setup_cpg(). */ 6344 6345 cpg = rc_node_alloc(); 6346 if (cpg == NULL) { 6347 (void) pthread_mutex_unlock( 6348 &np->rn_lock); 6349 rc_node_rele(res); 6350 rc_node_rele(pg); 6351 rc_node_clear(out, 0); 6352 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6353 } 6354 6355 switch (rc_node_setup_cpg(cpg, res, pg)) { 6356 case REP_PROTOCOL_SUCCESS: 6357 res = cpg; 6358 break; 6359 6360 case REP_PROTOCOL_FAIL_TYPE_MISMATCH: 6361 /* Nevermind. */ 6362 rc_node_destroy(cpg); 6363 rc_node_rele(pg); 6364 rc_node_rele(res); 6365 break; 6366 6367 case REP_PROTOCOL_FAIL_NO_RESOURCES: 6368 rc_node_destroy(cpg); 6369 (void) pthread_mutex_unlock( 6370 &np->rn_lock); 6371 rc_node_rele(res); 6372 rc_node_rele(pg); 6373 rc_node_clear(out, 0); 6374 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6375 6376 default: 6377 assert(0); 6378 abort(); 6379 } 6380 } 6381 #else 6382 #error This code must be updated. 6383 #endif 6384 } 6385 6386 rc_node_hold(res); 6387 (void) pthread_mutex_unlock(&np->rn_lock); 6388 break; 6389 } 6390 rc_node_assign(out, res); 6391 6392 if (res == NULL) 6393 return (REP_PROTOCOL_DONE); 6394 rc_node_rele(res); 6395 return (REP_PROTOCOL_SUCCESS); 6396 } 6397 6398 void 6399 rc_iter_destroy(rc_node_iter_t **nipp) 6400 { 6401 rc_node_iter_t *nip = *nipp; 6402 rc_node_t *np; 6403 6404 if (nip == NULL) 6405 return; /* already freed */ 6406 6407 np = nip->rni_parent; 6408 6409 if (nip->rni_filter_arg != NULL) 6410 free(nip->rni_filter_arg); 6411 nip->rni_filter_arg = NULL; 6412 6413 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE || 6414 nip->rni_iter != NULL) { 6415 if (nip->rni_clevel < 0) 6416 (void) pthread_mutex_lock(&np->rn_lock); 6417 else 6418 (void) pthread_mutex_lock( 6419 &np->rn_cchain[nip->rni_clevel]->rn_lock); 6420 rc_iter_end(nip); /* release walker and lock */ 6421 } 6422 nip->rni_parent = NULL; 6423 6424 uu_free(nip); 6425 *nipp = NULL; 6426 } 6427 6428 int 6429 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp) 6430 { 6431 rc_node_t *np; 6432 permcheck_t *pcp; 6433 int ret; 6434 rc_auth_state_t authorized = RC_AUTH_UNKNOWN; 6435 char *auth_string = NULL; 6436 6437 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp); 6438 6439 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) { 6440 rc_node_rele(np); 6441 np = np->rn_cchain[0]; 6442 RC_NODE_CHECK_AND_HOLD(np); 6443 } 6444 6445 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) { 6446 rc_node_rele(np); 6447 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH); 6448 } 6449 6450 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) { 6451 rc_node_rele(np); 6452 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED); 6453 } 6454 6455 #ifdef NATIVE_BUILD 6456 if (client_is_privileged()) 6457 goto skip_checks; 6458 rc_node_rele(np); 6459 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED); 6460 #else 6461 if (is_main_repository == 0) 6462 goto skip_checks; 6463 6464 /* permission check */ 6465 pcp = pc_create(); 6466 if (pcp == NULL) { 6467 rc_node_rele(np); 6468 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6469 } 6470 6471 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */ 6472 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 && 6473 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) || 6474 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 && 6475 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) { 6476 rc_node_t *instn; 6477 6478 /* solaris.smf.manage can be used. */ 6479 ret = perm_add_enabling(pcp, AUTH_MANAGE); 6480 6481 if (ret != REP_PROTOCOL_SUCCESS) { 6482 pc_free(pcp); 6483 rc_node_rele(np); 6484 return (ret); 6485 } 6486 6487 /* general/action_authorization values can be used. */ 6488 ret = rc_node_parent(np, &instn); 6489 if (ret != REP_PROTOCOL_SUCCESS) { 6490 assert(ret == REP_PROTOCOL_FAIL_DELETED); 6491 rc_node_rele(np); 6492 pc_free(pcp); 6493 return (REP_PROTOCOL_FAIL_DELETED); 6494 } 6495 6496 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE); 6497 6498 ret = perm_add_inst_action_auth(pcp, instn); 6499 rc_node_rele(instn); 6500 switch (ret) { 6501 case REP_PROTOCOL_SUCCESS: 6502 break; 6503 6504 case REP_PROTOCOL_FAIL_DELETED: 6505 case REP_PROTOCOL_FAIL_NO_RESOURCES: 6506 rc_node_rele(np); 6507 pc_free(pcp); 6508 return (ret); 6509 6510 default: 6511 bad_error("perm_add_inst_action_auth", ret); 6512 } 6513 6514 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0) 6515 authorized = RC_AUTH_PASSED; /* No check on commit. */ 6516 } else { 6517 ret = perm_add_enabling(pcp, AUTH_MODIFY); 6518 6519 if (ret == REP_PROTOCOL_SUCCESS) { 6520 /* propertygroup-type-specific authorization */ 6521 /* no locking because rn_type won't change anyway */ 6522 const char * const auth = 6523 perm_auth_for_pgtype(np->rn_type); 6524 6525 if (auth != NULL) 6526 ret = perm_add_enabling(pcp, auth); 6527 } 6528 6529 if (ret == REP_PROTOCOL_SUCCESS) 6530 /* propertygroup/transaction-type-specific auths */ 6531 ret = 6532 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE); 6533 6534 if (ret == REP_PROTOCOL_SUCCESS) 6535 ret = 6536 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY); 6537 6538 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */ 6539 if (ret == REP_PROTOCOL_SUCCESS && 6540 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 && 6541 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) 6542 ret = perm_add_enabling(pcp, AUTH_MANAGE); 6543 6544 if (ret != REP_PROTOCOL_SUCCESS) { 6545 pc_free(pcp); 6546 rc_node_rele(np); 6547 return (ret); 6548 } 6549 } 6550 6551 ret = perm_granted(pcp); 6552 /* 6553 * Copy out the authorization string before freeing pcp. 6554 */ 6555 if (ret >= 0) { 6556 auth_string = strdup(pcp->pc_auth_string); 6557 } 6558 pc_free(pcp); 6559 if ((auth_string == NULL) || (ret < 0)) { 6560 rc_node_rele(np); 6561 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6562 } 6563 6564 if (ret == 0) { 6565 /* 6566 * If we get here, the authorization failed. 6567 * Unfortunately, we don't have enough information at this 6568 * point to generate the security audit events. We'll only 6569 * get that information when the client tries to commit the 6570 * event. Thus, we'll remember the failed authorization, 6571 * so that we can generate the audit events later. 6572 */ 6573 authorized = RC_AUTH_FAILED; 6574 } 6575 #endif /* NATIVE_BUILD */ 6576 6577 skip_checks: 6578 rc_node_assign(txp, np); 6579 txp->rnp_authorized = authorized; 6580 if (authorized != RC_AUTH_UNKNOWN) { 6581 /* Save the authorization string. */ 6582 if (txp->rnp_auth_string != NULL) 6583 free((void *)txp->rnp_auth_string); 6584 txp->rnp_auth_string = auth_string; 6585 auth_string = NULL; /* Don't free until done with txp. */ 6586 } 6587 6588 rc_node_rele(np); 6589 if (auth_string != NULL) 6590 free(auth_string); 6591 return (REP_PROTOCOL_SUCCESS); 6592 } 6593 6594 /* 6595 * Return 1 if the given transaction commands only modify the values of 6596 * properties other than "modify_authorization". Return -1 if any of the 6597 * commands are invalid, and 0 otherwise. 6598 */ 6599 static int 6600 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg) 6601 { 6602 const struct rep_protocol_transaction_cmd *cmds; 6603 uintptr_t loc; 6604 uint32_t sz; 6605 rc_node_t *prop; 6606 boolean_t ok; 6607 6608 assert(!MUTEX_HELD(&pg->rn_lock)); 6609 6610 loc = (uintptr_t)cmds_arg; 6611 6612 while (cmds_sz > 0) { 6613 cmds = (struct rep_protocol_transaction_cmd *)loc; 6614 6615 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) 6616 return (-1); 6617 6618 sz = cmds->rptc_size; 6619 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) 6620 return (-1); 6621 6622 sz = TX_SIZE(sz); 6623 if (sz > cmds_sz) 6624 return (-1); 6625 6626 switch (cmds[0].rptc_action) { 6627 case REP_PROTOCOL_TX_ENTRY_CLEAR: 6628 break; 6629 6630 case REP_PROTOCOL_TX_ENTRY_REPLACE: 6631 /* Check type */ 6632 (void) pthread_mutex_lock(&pg->rn_lock); 6633 if (rc_node_find_named_child(pg, 6634 (const char *)cmds[0].rptc_data, 6635 REP_PROTOCOL_ENTITY_PROPERTY, &prop) == 6636 REP_PROTOCOL_SUCCESS) { 6637 ok = (prop != NULL && 6638 prop->rn_valtype == cmds[0].rptc_type); 6639 } else { 6640 /* Return more particular error? */ 6641 ok = B_FALSE; 6642 } 6643 (void) pthread_mutex_unlock(&pg->rn_lock); 6644 if (ok) 6645 break; 6646 return (0); 6647 6648 default: 6649 return (0); 6650 } 6651 6652 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY) 6653 == 0) 6654 return (0); 6655 6656 loc += sz; 6657 cmds_sz -= sz; 6658 } 6659 6660 return (1); 6661 } 6662 6663 /* 6664 * Return 1 if any of the given transaction commands affect 6665 * "action_authorization". Return -1 if any of the commands are invalid and 6666 * 0 in all other cases. 6667 */ 6668 static int 6669 tx_modifies_action(const void *cmds_arg, size_t cmds_sz) 6670 { 6671 const struct rep_protocol_transaction_cmd *cmds; 6672 uintptr_t loc; 6673 uint32_t sz; 6674 6675 loc = (uintptr_t)cmds_arg; 6676 6677 while (cmds_sz > 0) { 6678 cmds = (struct rep_protocol_transaction_cmd *)loc; 6679 6680 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) 6681 return (-1); 6682 6683 sz = cmds->rptc_size; 6684 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) 6685 return (-1); 6686 6687 sz = TX_SIZE(sz); 6688 if (sz > cmds_sz) 6689 return (-1); 6690 6691 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION) 6692 == 0) 6693 return (1); 6694 6695 loc += sz; 6696 cmds_sz -= sz; 6697 } 6698 6699 return (0); 6700 } 6701 6702 /* 6703 * Returns 1 if the transaction commands only modify properties named 6704 * 'enabled'. 6705 */ 6706 static int 6707 tx_only_enabled(const void *cmds_arg, size_t cmds_sz) 6708 { 6709 const struct rep_protocol_transaction_cmd *cmd; 6710 uintptr_t loc; 6711 uint32_t sz; 6712 6713 loc = (uintptr_t)cmds_arg; 6714 6715 while (cmds_sz > 0) { 6716 cmd = (struct rep_protocol_transaction_cmd *)loc; 6717 6718 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) 6719 return (-1); 6720 6721 sz = cmd->rptc_size; 6722 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE) 6723 return (-1); 6724 6725 sz = TX_SIZE(sz); 6726 if (sz > cmds_sz) 6727 return (-1); 6728 6729 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED) 6730 != 0) 6731 return (0); 6732 6733 loc += sz; 6734 cmds_sz -= sz; 6735 } 6736 6737 return (1); 6738 } 6739 6740 int 6741 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz) 6742 { 6743 rc_node_t *np = txp->rnp_node; 6744 rc_node_t *pp; 6745 rc_node_t *nnp; 6746 rc_node_pg_notify_t *pnp; 6747 int rc; 6748 permcheck_t *pcp; 6749 int granted, normal; 6750 char *pg_fmri = NULL; 6751 char *auth_string = NULL; 6752 int auth_status = ADT_SUCCESS; 6753 int auth_ret_value = ADT_SUCCESS; 6754 size_t sz_out; 6755 int tx_flag = 1; 6756 tx_commit_data_t *tx_data = NULL; 6757 6758 RC_NODE_CHECK(np); 6759 6760 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) && 6761 (txp->rnp_auth_string != NULL)) { 6762 auth_string = strdup(txp->rnp_auth_string); 6763 if (auth_string == NULL) 6764 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6765 } 6766 6767 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) && 6768 is_main_repository) { 6769 #ifdef NATIVE_BUILD 6770 if (!client_is_privileged()) { 6771 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED); 6772 } 6773 #else 6774 /* permission check: depends on contents of transaction */ 6775 pcp = pc_create(); 6776 if (pcp == NULL) 6777 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 6778 6779 /* If normal is cleared, we won't do the normal checks. */ 6780 normal = 1; 6781 rc = REP_PROTOCOL_SUCCESS; 6782 6783 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 && 6784 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) { 6785 /* Touching general[framework]/action_authorization? */ 6786 rc = tx_modifies_action(cmds, cmds_sz); 6787 if (rc == -1) { 6788 pc_free(pcp); 6789 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6790 } 6791 6792 if (rc) { 6793 /* Yes: only AUTH_MANAGE can be used. */ 6794 rc = perm_add_enabling(pcp, AUTH_MANAGE); 6795 normal = 0; 6796 } else { 6797 rc = REP_PROTOCOL_SUCCESS; 6798 } 6799 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && 6800 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 && 6801 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) { 6802 rc_node_t *instn; 6803 6804 rc = tx_only_enabled(cmds, cmds_sz); 6805 if (rc == -1) { 6806 pc_free(pcp); 6807 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 6808 } 6809 6810 if (rc) { 6811 rc = rc_node_parent(np, &instn); 6812 if (rc != REP_PROTOCOL_SUCCESS) { 6813 assert(rc == REP_PROTOCOL_FAIL_DELETED); 6814 pc_free(pcp); 6815 return (rc); 6816 } 6817 6818 assert(instn->rn_id.rl_type == 6819 REP_PROTOCOL_ENTITY_INSTANCE); 6820 6821 rc = perm_add_inst_action_auth(pcp, instn); 6822 rc_node_rele(instn); 6823 switch (rc) { 6824 case REP_PROTOCOL_SUCCESS: 6825 break; 6826 6827 case REP_PROTOCOL_FAIL_DELETED: 6828 case REP_PROTOCOL_FAIL_NO_RESOURCES: 6829 pc_free(pcp); 6830 return (rc); 6831 6832 default: 6833 bad_error("perm_add_inst_action_auth", 6834 rc); 6835 } 6836 } else { 6837 rc = REP_PROTOCOL_SUCCESS; 6838 } 6839 } 6840 6841 if (rc == REP_PROTOCOL_SUCCESS && normal) { 6842 rc = perm_add_enabling(pcp, AUTH_MODIFY); 6843 6844 if (rc == REP_PROTOCOL_SUCCESS) { 6845 /* Add pgtype-specific authorization. */ 6846 const char * const auth = 6847 perm_auth_for_pgtype(np->rn_type); 6848 6849 if (auth != NULL) 6850 rc = perm_add_enabling(pcp, auth); 6851 } 6852 6853 /* Add pg-specific modify_authorization auths. */ 6854 if (rc == REP_PROTOCOL_SUCCESS) 6855 rc = perm_add_enabling_values(pcp, np, 6856 AUTH_PROP_MODIFY); 6857 6858 /* If value_authorization values are ok, add them. */ 6859 if (rc == REP_PROTOCOL_SUCCESS) { 6860 rc = tx_allow_value(cmds, cmds_sz, np); 6861 if (rc == -1) 6862 rc = REP_PROTOCOL_FAIL_BAD_REQUEST; 6863 else if (rc) 6864 rc = perm_add_enabling_values(pcp, np, 6865 AUTH_PROP_VALUE); 6866 } 6867 } 6868 6869 if (rc == REP_PROTOCOL_SUCCESS) { 6870 granted = perm_granted(pcp); 6871 if (granted < 0) { 6872 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 6873 } else { 6874 /* 6875 * Copy out the authorization string before 6876 * freeing pcp. 6877 */ 6878 auth_string = strdup(pcp->pc_auth_string); 6879 if (auth_string == NULL) 6880 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 6881 } 6882 } 6883 6884 pc_free(pcp); 6885 6886 if (rc != REP_PROTOCOL_SUCCESS) 6887 goto cleanout; 6888 6889 if (!granted) { 6890 auth_status = ADT_FAILURE; 6891 auth_ret_value = ADT_FAIL_VALUE_AUTH; 6892 tx_flag = 0; 6893 } 6894 #endif /* NATIVE_BUILD */ 6895 } else if (txp->rnp_authorized == RC_AUTH_FAILED) { 6896 auth_status = ADT_FAILURE; 6897 auth_ret_value = ADT_FAIL_VALUE_AUTH; 6898 tx_flag = 0; 6899 } 6900 6901 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN); 6902 if (pg_fmri == NULL) { 6903 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 6904 goto cleanout; 6905 } 6906 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri, 6907 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) { 6908 goto cleanout; 6909 } 6910 6911 /* 6912 * Parse the transaction commands into a useful form. 6913 */ 6914 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) != 6915 REP_PROTOCOL_SUCCESS) { 6916 goto cleanout; 6917 } 6918 6919 if (tx_flag == 0) { 6920 /* Authorization failed. Generate audit events. */ 6921 generate_property_events(tx_data, pg_fmri, auth_string, 6922 auth_status, auth_ret_value); 6923 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED; 6924 goto cleanout; 6925 } 6926 6927 nnp = rc_node_alloc(); 6928 if (nnp == NULL) { 6929 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 6930 goto cleanout; 6931 } 6932 6933 nnp->rn_id = np->rn_id; /* structure assignment */ 6934 nnp->rn_hash = np->rn_hash; 6935 nnp->rn_name = strdup(np->rn_name); 6936 nnp->rn_type = strdup(np->rn_type); 6937 nnp->rn_pgflags = np->rn_pgflags; 6938 6939 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT; 6940 6941 if (nnp->rn_name == NULL || nnp->rn_type == NULL) { 6942 rc_node_destroy(nnp); 6943 rc = REP_PROTOCOL_FAIL_NO_RESOURCES; 6944 goto cleanout; 6945 } 6946 6947 (void) pthread_mutex_lock(&np->rn_lock); 6948 6949 /* 6950 * We must have all of the old properties in the cache, or the 6951 * database deletions could cause inconsistencies. 6952 */ 6953 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) != 6954 REP_PROTOCOL_SUCCESS) { 6955 (void) pthread_mutex_unlock(&np->rn_lock); 6956 rc_node_destroy(nnp); 6957 goto cleanout; 6958 } 6959 6960 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) { 6961 (void) pthread_mutex_unlock(&np->rn_lock); 6962 rc_node_destroy(nnp); 6963 rc = REP_PROTOCOL_FAIL_DELETED; 6964 goto cleanout; 6965 } 6966 6967 if (np->rn_flags & RC_NODE_OLD) { 6968 rc_node_rele_flag(np, RC_NODE_USING_PARENT); 6969 (void) pthread_mutex_unlock(&np->rn_lock); 6970 rc_node_destroy(nnp); 6971 rc = REP_PROTOCOL_FAIL_NOT_LATEST; 6972 goto cleanout; 6973 } 6974 6975 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING); 6976 if (pp == NULL) { 6977 /* our parent is gone, we're going next... */ 6978 rc_node_destroy(nnp); 6979 (void) pthread_mutex_lock(&np->rn_lock); 6980 if (np->rn_flags & RC_NODE_OLD) { 6981 (void) pthread_mutex_unlock(&np->rn_lock); 6982 rc = REP_PROTOCOL_FAIL_NOT_LATEST; 6983 goto cleanout; 6984 } 6985 (void) pthread_mutex_unlock(&np->rn_lock); 6986 rc = REP_PROTOCOL_FAIL_DELETED; 6987 goto cleanout; 6988 } 6989 (void) pthread_mutex_unlock(&pp->rn_lock); 6990 6991 /* 6992 * prepare for the transaction 6993 */ 6994 (void) pthread_mutex_lock(&np->rn_lock); 6995 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) { 6996 (void) pthread_mutex_unlock(&np->rn_lock); 6997 (void) pthread_mutex_lock(&pp->rn_lock); 6998 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 6999 (void) pthread_mutex_unlock(&pp->rn_lock); 7000 rc_node_destroy(nnp); 7001 rc = REP_PROTOCOL_FAIL_DELETED; 7002 goto cleanout; 7003 } 7004 nnp->rn_gen_id = np->rn_gen_id; 7005 (void) pthread_mutex_unlock(&np->rn_lock); 7006 7007 /* Sets nnp->rn_gen_id on success. */ 7008 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id); 7009 7010 (void) pthread_mutex_lock(&np->rn_lock); 7011 if (rc != REP_PROTOCOL_SUCCESS) { 7012 rc_node_rele_flag(np, RC_NODE_IN_TX); 7013 (void) pthread_mutex_unlock(&np->rn_lock); 7014 (void) pthread_mutex_lock(&pp->rn_lock); 7015 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING); 7016 (void) pthread_mutex_unlock(&pp->rn_lock); 7017 rc_node_destroy(nnp); 7018 rc_node_clear(txp, 0); 7019 if (rc == REP_PROTOCOL_DONE) 7020 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */ 7021 goto cleanout; 7022 } 7023 7024 /* 7025 * Notify waiters 7026 */ 7027 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7028 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL) 7029 rc_pg_notify_fire(pnp); 7030 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7031 7032 np->rn_flags |= RC_NODE_OLD; 7033 (void) pthread_mutex_unlock(&np->rn_lock); 7034 7035 rc_notify_remove_node(np); 7036 7037 /* 7038 * replace np with nnp 7039 */ 7040 rc_node_relink_child(pp, np, nnp); 7041 7042 /* 7043 * all done -- clear the transaction. 7044 */ 7045 rc_node_clear(txp, 0); 7046 generate_property_events(tx_data, pg_fmri, auth_string, 7047 auth_status, auth_ret_value); 7048 7049 rc = REP_PROTOCOL_SUCCESS; 7050 7051 cleanout: 7052 free(auth_string); 7053 free(pg_fmri); 7054 tx_commit_data_free(tx_data); 7055 return (rc); 7056 } 7057 7058 void 7059 rc_pg_notify_init(rc_node_pg_notify_t *pnp) 7060 { 7061 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool); 7062 pnp->rnpn_pg = NULL; 7063 pnp->rnpn_fd = -1; 7064 } 7065 7066 int 7067 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd) 7068 { 7069 rc_node_t *np; 7070 7071 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); 7072 7073 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) { 7074 (void) pthread_mutex_unlock(&np->rn_lock); 7075 return (REP_PROTOCOL_FAIL_BAD_REQUEST); 7076 } 7077 7078 /* 7079 * wait for any transaction in progress to complete 7080 */ 7081 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) { 7082 (void) pthread_mutex_unlock(&np->rn_lock); 7083 return (REP_PROTOCOL_FAIL_DELETED); 7084 } 7085 7086 if (np->rn_flags & RC_NODE_OLD) { 7087 (void) pthread_mutex_unlock(&np->rn_lock); 7088 return (REP_PROTOCOL_FAIL_NOT_LATEST); 7089 } 7090 7091 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7092 rc_pg_notify_fire(pnp); 7093 pnp->rnpn_pg = np; 7094 pnp->rnpn_fd = fd; 7095 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp); 7096 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7097 7098 (void) pthread_mutex_unlock(&np->rn_lock); 7099 return (REP_PROTOCOL_SUCCESS); 7100 } 7101 7102 void 7103 rc_pg_notify_fini(rc_node_pg_notify_t *pnp) 7104 { 7105 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7106 rc_pg_notify_fire(pnp); 7107 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7108 7109 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool); 7110 } 7111 7112 void 7113 rc_notify_info_init(rc_notify_info_t *rnip) 7114 { 7115 int i; 7116 7117 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool); 7118 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node, 7119 rc_notify_pool); 7120 7121 rnip->rni_notify.rcn_node = NULL; 7122 rnip->rni_notify.rcn_info = rnip; 7123 7124 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist)); 7125 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist)); 7126 7127 (void) pthread_cond_init(&rnip->rni_cv, NULL); 7128 7129 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) { 7130 rnip->rni_namelist[i] = NULL; 7131 rnip->rni_typelist[i] = NULL; 7132 } 7133 } 7134 7135 static void 7136 rc_notify_info_insert_locked(rc_notify_info_t *rnip) 7137 { 7138 assert(MUTEX_HELD(&rc_pg_notify_lock)); 7139 7140 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE)); 7141 7142 rnip->rni_flags |= RC_NOTIFY_ACTIVE; 7143 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip); 7144 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify); 7145 } 7146 7147 static void 7148 rc_notify_info_remove_locked(rc_notify_info_t *rnip) 7149 { 7150 rc_notify_t *me = &rnip->rni_notify; 7151 rc_notify_t *np; 7152 7153 assert(MUTEX_HELD(&rc_pg_notify_lock)); 7154 7155 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE); 7156 7157 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN)); 7158 rnip->rni_flags |= RC_NOTIFY_DRAIN; 7159 (void) pthread_cond_broadcast(&rnip->rni_cv); 7160 7161 (void) uu_list_remove(rc_notify_info_list, rnip); 7162 7163 /* 7164 * clean up any notifications at the beginning of the list 7165 */ 7166 if (uu_list_first(rc_notify_list) == me) { 7167 while ((np = uu_list_next(rc_notify_list, me)) != NULL && 7168 np->rcn_info == NULL) 7169 rc_notify_remove_locked(np); 7170 } 7171 (void) uu_list_remove(rc_notify_list, me); 7172 7173 while (rnip->rni_waiters) { 7174 (void) pthread_cond_broadcast(&rc_pg_notify_cv); 7175 (void) pthread_cond_broadcast(&rnip->rni_cv); 7176 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock); 7177 } 7178 7179 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE); 7180 } 7181 7182 static int 7183 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr, 7184 const char *name) 7185 { 7186 int i; 7187 int rc; 7188 char *f; 7189 7190 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name); 7191 if (rc != REP_PROTOCOL_SUCCESS) 7192 return (rc); 7193 7194 f = strdup(name); 7195 if (f == NULL) 7196 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 7197 7198 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7199 7200 while (rnip->rni_flags & RC_NOTIFY_EMPTYING) 7201 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock); 7202 7203 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) 7204 if (arr[i] == NULL) 7205 break; 7206 7207 if (i == RC_NOTIFY_MAX_NAMES) { 7208 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7209 free(f); 7210 return (REP_PROTOCOL_FAIL_NO_RESOURCES); 7211 } 7212 7213 arr[i] = f; 7214 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE)) 7215 rc_notify_info_insert_locked(rnip); 7216 7217 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7218 return (REP_PROTOCOL_SUCCESS); 7219 } 7220 7221 int 7222 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name) 7223 { 7224 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name)); 7225 } 7226 7227 int 7228 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type) 7229 { 7230 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type)); 7231 } 7232 7233 /* 7234 * Wait for and report an event of interest to rnip, a notification client 7235 */ 7236 int 7237 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out, 7238 char *outp, size_t sz) 7239 { 7240 rc_notify_t *np; 7241 rc_notify_t *me = &rnip->rni_notify; 7242 rc_node_t *nnp; 7243 rc_notify_delete_t *ndp; 7244 7245 int am_first_info; 7246 7247 if (sz > 0) 7248 outp[0] = 0; 7249 7250 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7251 7252 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) == 7253 RC_NOTIFY_ACTIVE) { 7254 /* 7255 * If I'm first on the notify list, it is my job to 7256 * clean up any notifications I pass by. I can't do that 7257 * if someone is blocking the list from removals, so I 7258 * have to wait until they have all drained. 7259 */ 7260 am_first_info = (uu_list_first(rc_notify_list) == me); 7261 if (am_first_info && rc_notify_in_use) { 7262 rnip->rni_waiters++; 7263 (void) pthread_cond_wait(&rc_pg_notify_cv, 7264 &rc_pg_notify_lock); 7265 rnip->rni_waiters--; 7266 continue; 7267 } 7268 7269 /* 7270 * Search the list for a node of interest. 7271 */ 7272 np = uu_list_next(rc_notify_list, me); 7273 while (np != NULL && !rc_notify_info_interested(rnip, np)) { 7274 rc_notify_t *next = uu_list_next(rc_notify_list, np); 7275 7276 if (am_first_info) { 7277 if (np->rcn_info) { 7278 /* 7279 * Passing another client -- stop 7280 * cleaning up notifications 7281 */ 7282 am_first_info = 0; 7283 } else { 7284 rc_notify_remove_locked(np); 7285 } 7286 } 7287 np = next; 7288 } 7289 7290 /* 7291 * Nothing of interest -- wait for notification 7292 */ 7293 if (np == NULL) { 7294 rnip->rni_waiters++; 7295 (void) pthread_cond_wait(&rnip->rni_cv, 7296 &rc_pg_notify_lock); 7297 rnip->rni_waiters--; 7298 continue; 7299 } 7300 7301 /* 7302 * found something to report -- move myself after the 7303 * notification and process it. 7304 */ 7305 (void) uu_list_remove(rc_notify_list, me); 7306 (void) uu_list_insert_after(rc_notify_list, np, me); 7307 7308 if ((ndp = np->rcn_delete) != NULL) { 7309 (void) strlcpy(outp, ndp->rnd_fmri, sz); 7310 if (am_first_info) 7311 rc_notify_remove_locked(np); 7312 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7313 rc_node_clear(out, 0); 7314 return (REP_PROTOCOL_SUCCESS); 7315 } 7316 7317 nnp = np->rcn_node; 7318 assert(nnp != NULL); 7319 7320 /* 7321 * We can't bump nnp's reference count without grabbing its 7322 * lock, and rc_pg_notify_lock is a leaf lock. So we 7323 * temporarily block all removals to keep nnp from 7324 * disappearing. 7325 */ 7326 rc_notify_in_use++; 7327 assert(rc_notify_in_use > 0); 7328 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7329 7330 rc_node_assign(out, nnp); 7331 7332 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7333 assert(rc_notify_in_use > 0); 7334 rc_notify_in_use--; 7335 if (am_first_info) 7336 rc_notify_remove_locked(np); 7337 if (rc_notify_in_use == 0) 7338 (void) pthread_cond_broadcast(&rc_pg_notify_cv); 7339 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7340 7341 return (REP_PROTOCOL_SUCCESS); 7342 } 7343 /* 7344 * If we're the last one out, let people know it's clear. 7345 */ 7346 if (rnip->rni_waiters == 0) 7347 (void) pthread_cond_broadcast(&rnip->rni_cv); 7348 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7349 return (REP_PROTOCOL_DONE); 7350 } 7351 7352 static void 7353 rc_notify_info_reset(rc_notify_info_t *rnip) 7354 { 7355 int i; 7356 7357 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7358 if (rnip->rni_flags & RC_NOTIFY_ACTIVE) 7359 rc_notify_info_remove_locked(rnip); 7360 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING))); 7361 rnip->rni_flags |= RC_NOTIFY_EMPTYING; 7362 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7363 7364 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) { 7365 if (rnip->rni_namelist[i] != NULL) { 7366 free((void *)rnip->rni_namelist[i]); 7367 rnip->rni_namelist[i] = NULL; 7368 } 7369 if (rnip->rni_typelist[i] != NULL) { 7370 free((void *)rnip->rni_typelist[i]); 7371 rnip->rni_typelist[i] = NULL; 7372 } 7373 } 7374 7375 (void) pthread_mutex_lock(&rc_pg_notify_lock); 7376 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING; 7377 (void) pthread_mutex_unlock(&rc_pg_notify_lock); 7378 } 7379 7380 void 7381 rc_notify_info_fini(rc_notify_info_t *rnip) 7382 { 7383 rc_notify_info_reset(rnip); 7384 7385 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool); 7386 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node, 7387 rc_notify_pool); 7388 } 7389