1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 */
26
27 /*
28 * rc_node.c - In-memory SCF object management
29 *
30 * This layer manages the in-memory cache (the Repository Cache) of SCF
31 * data. Read requests are usually satisfied from here, but may require
32 * load calls to the "object" layer. Modify requests always write-through
33 * to the object layer.
34 *
35 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
36 * property groups, properties, and property values. All but the last are
37 * known here as "entities" and are represented by rc_node_t data
38 * structures. (Property values are kept in the rn_values member of the
39 * respective property, not as separate objects.) All entities besides
40 * the "localhost" scope have some entity as a parent, and therefore form
41 * a tree.
42 *
43 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
44 * the "localhost" scope. The tree is filled in from the database on-demand
45 * by rc_node_fill_children().
46 *
47 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
48 * lookup.
49 *
50 * Multiple threads may service client requests, so access to each
51 * rc_node_t is synchronized by its rn_lock member. Some fields are
52 * protected by bits in the rn_flags field instead, to support operations
53 * which need to drop rn_lock, for example to respect locking order. Such
54 * flags should be manipulated with the rc_node_{hold,rele}_flag()
55 * functions.
56 *
57 * We track references to nodes to tell when they can be free()d. rn_refs
58 * should be incremented with rc_node_hold() on the creation of client
59 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
60 * references") should be incremented when a pointer is read into a local
61 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
62 * hasn't been fully implemented, however, so rc_node_rele() tolerates
63 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
64 * references in rn_refs. Other references are tracked by the
65 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
66 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
67 *
68 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
69 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
70 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
71 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
72 * RC_NODE_DEAD before you can use it. This is usually done with the
73 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
74 * functions & RC_NODE_*() macros), which fail if the object has died.
75 *
76 * When a transactional node (property group or snapshot) is updated,
77 * a new node takes the place of the old node in the global hash and the
78 * old node is hung off of the rn_former list of the new node. At the
79 * same time, all of its children have their rn_parent_ref pointer set,
80 * and any holds they have are reflected in the old node's rn_other_refs
81 * count. This is automatically kept up to date until the final reference
82 * to the subgraph is dropped, at which point the node is unrefed and
83 * destroyed, along with all of its children.
84 *
85 * Because name service lookups may take a long time and, more importantly
86 * may trigger additional accesses to the repository, perm_granted() must be
87 * called without holding any locks.
88 *
89 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
90 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
91 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
92 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
93 * apropriate child.
94 *
95 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
96 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
97 * the proper values and updates the offset information.
98 *
99 * To allow aliases, snapshots are implemented with a level of indirection.
100 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
101 * snapshot.c which contains the authoritative snaplevel information. The
102 * snapid is "assigned" by rc_attach_snapshot().
103 *
104 * We provide the client layer with rc_node_ptr_t's to reference objects.
105 * Objects referred to by them are automatically held & released by
106 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
107 * client.c entry points to read the pointers. They fetch the pointer to the
108 * object, return (from the function) if it is dead, and lock, hold, or hold
109 * a flag of the object.
110 */
111
112 /*
113 * Permission checking is authorization-based: some operations may only
114 * proceed if the user has been assigned at least one of a set of
115 * authorization strings. The set of enabling authorizations depends on the
116 * operation and the target object. The set of authorizations assigned to
117 * a user is determined by an algorithm defined in libsecdb.
118 *
119 * The fastest way to decide whether the two sets intersect is by entering the
120 * strings into a hash table and detecting collisions, which takes linear time
121 * in the total size of the sets. Except for the authorization patterns which
122 * may be assigned to users, which without advanced pattern-matching
123 * algorithms will take O(n) in the number of enabling authorizations, per
124 * pattern.
125 *
126 * We can achieve some practical speed-ups by noting that if we enter all of
127 * the authorizations from one of the sets into the hash table we can merely
128 * check the elements of the second set for existence without adding them.
129 * This reduces memory requirements and hash table clutter. The enabling set
130 * is well suited for this because it is internal to configd (for now, at
131 * least). Combine this with short-circuiting and we can even minimize the
132 * number of queries to the security databases (user_attr & prof_attr).
133 *
134 * To force this usage onto clients we provide functions for adding
135 * authorizations to the enabling set of a permission context structure
136 * (perm_add_*()) and one to decide whether the the user associated with the
137 * current door call client possesses any of them (perm_granted()).
138 *
139 * At some point, a generic version of this should move to libsecdb.
140 *
141 * While entering the enabling strings into the hash table, we keep track
142 * of which is the most specific for use in generating auditing events.
143 * See the "Collecting the Authorization String" section of the "SMF Audit
144 * Events" block comment below.
145 */
146
147 /*
148 * Composition is the combination of sets of properties. The sets are ordered
149 * and properties in higher sets obscure properties of the same name in lower
150 * sets. Here we present a composed view of an instance's properties as the
151 * union of its properties and its service's properties. Similarly the
152 * properties of snaplevels are combined to form a composed view of the
153 * properties of a snapshot (which should match the composed view of the
154 * properties of the instance when the snapshot was taken).
155 *
156 * In terms of the client interface, the client may request that a property
157 * group iterator for an instance or snapshot be composed. Property groups
158 * traversed by such an iterator may not have the target entity as a parent.
159 * Similarly, the properties traversed by a property iterator for those
160 * property groups may not have the property groups iterated as parents.
161 *
162 * Implementation requires that iterators for instances and snapshots be
163 * composition-savvy, and that we have a "composed property group" entity
164 * which represents the composition of a number of property groups. Iteration
165 * over "composed property groups" yields properties which may have different
166 * parents, but for all other operations a composed property group behaves
167 * like the top-most property group it represents.
168 *
169 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
170 * in rc_node_t. For instances, the pointers point to the instance and its
171 * parent service. For snapshots they point to the child snaplevels, and for
172 * composed property groups they point to property groups. A composed
173 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
174 * int the rc_iter_*() code.
175 */
176 /*
177 * SMF Audit Events:
178 * ================
179 *
180 * To maintain security, SMF generates audit events whenever
181 * privileged operations are attempted. See the System Administration
182 * Guide:Security Services answerbook for a discussion of the Solaris
183 * audit system.
184 *
185 * The SMF audit event codes are defined in adt_event.h by symbols
186 * starting with ADT_smf_ and are described in audit_event.txt. The
187 * audit record structures are defined in the SMF section of adt.xml.
188 * adt.xml is used to automatically generate adt_event.h which
189 * contains the definitions that we code to in this file. For the
190 * most part the audit events map closely to actions that you would
191 * perform with svcadm or svccfg, but there are some special cases
192 * which we'll discuss later.
193 *
194 * The software associated with SMF audit events falls into three
195 * categories:
196 * - collecting information to be written to the audit
197 * records
198 * - using the adt_* functions in
199 * usr/src/lib/libbsm/common/adt.c to generate the audit
200 * records.
201 * - handling special cases
202 *
203 * Collecting Information:
204 * ----------------------
205 *
206 * Most all of the audit events require the FMRI of the affected
207 * object and the authorization string that was used. The one
208 * exception is ADT_smf_annotation which we'll talk about later.
209 *
210 * Collecting the FMRI:
211 *
212 * The rc_node structure has a member called rn_fmri which points to
213 * its FMRI. This is initialized by a call to rc_node_build_fmri()
214 * when the node's parent is established. The reason for doing it
215 * at this time is that a node's FMRI is basically the concatenation
216 * of the parent's FMRI and the node's name with the appropriate
217 * decoration. rc_node_build_fmri() does this concatenation and
218 * decorating. It is called from rc_node_link_child() and
219 * rc_node_relink_child() where a node is linked to its parent.
220 *
221 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
222 * when it is needed. It returns rn_fmri if it is set. If the node
223 * is at the top level, however, rn_fmri won't be set because it was
224 * never linked to a parent. In this case,
225 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
226 * its node type and its name, rn_name.
227 *
228 * Collecting the Authorization String:
229 *
230 * Naturally, the authorization string is captured during the
231 * authorization checking process. Acceptable authorization strings
232 * are added to a permcheck_t hash table as noted in the section on
233 * permission checking above. Once all entries have been added to the
234 * hash table, perm_granted() is called. If the client is authorized,
235 * perm_granted() returns with pc_auth_string of the permcheck_t
236 * structure pointing to the authorization string.
237 *
238 * This works fine if the client is authorized, but what happens if
239 * the client is not authorized? We need to report the required
240 * authorization string. This is the authorization that would have
241 * been used if permission had been granted. perm_granted() will
242 * find no match, so it needs to decide which string in the hash
243 * table to use as the required authorization string. It needs to do
244 * this, because configd is still going to generate an event. A
245 * design decision was made to use the most specific authorization
246 * in the hash table. The pc_auth_type enum designates the
247 * specificity of an authorization string. For example, an
248 * authorization string that is declared in an instance PG is more
249 * specific than one that is declared in a service PG.
250 *
251 * The pc_add() function keeps track of the most specific
252 * authorization in the hash table. It does this using the
253 * pc_specific and pc_specific_type members of the permcheck
254 * structure. pc_add() updates these members whenever a more
255 * specific authorization string is added to the hash table. Thus, if
256 * an authorization match is not found, perm_granted() will return
257 * with pc_auth_string in the permcheck_t pointing to the string that
258 * is referenced by pc_specific.
259 *
260 * Generating the Audit Events:
261 * ===========================
262 *
263 * As the functions in this file process requests for clients of
264 * configd, they gather the information that is required for an audit
265 * event. Eventually, the request processing gets to the point where
266 * the authorization is rejected or to the point where the requested
267 * action was attempted. At these two points smf_audit_event() is
268 * called.
269 *
270 * smf_audit_event() takes 4 parameters:
271 * - the event ID which is one of the ADT_smf_* symbols from
272 * adt_event.h.
273 * - status to pass to adt_put_event()
274 * - return value to pass to adt_put_event()
275 * - the event data (see audit_event_data structure)
276 *
277 * All interactions with the auditing software require an audit
278 * session. We use one audit session per configd client. We keep
279 * track of the audit session in the repcache_client structure.
280 * smf_audit_event() calls get_audit_session() to get the session
281 * pointer.
282 *
283 * smf_audit_event() then calls adt_alloc_event() to allocate an
284 * adt_event_data union which is defined in adt_event.h, copies the
285 * data into the appropriate members of the union and calls
286 * adt_put_event() to generate the event.
287 *
288 * Special Cases:
289 * =============
290 *
291 * There are three major types of special cases:
292 *
293 * - gathering event information for each action in a
294 * transaction
295 * - Higher level events represented by special property
296 * group/property name combinations. Many of these are
297 * restarter actions.
298 * - ADT_smf_annotation event
299 *
300 * Processing Transaction Actions:
301 * ------------------------------
302 *
303 * A transaction can contain multiple actions to modify, create or
304 * delete one or more properties. We need to capture information so
305 * that we can generate an event for each property action. The
306 * transaction information is stored in a tx_commmit_data_t, and
307 * object.c provides accessor functions to retrieve data from this
308 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling
309 * tx_commit_data_new() and passes this to object_tx_commit() to
310 * commit the transaction. Then we call generate_property_events() to
311 * generate an audit event for each property action.
312 *
313 * Special Properties:
314 * ------------------
315 *
316 * There are combinations of property group/property name that are special.
317 * They are special because they have specific meaning to startd. startd
318 * interprets them in a service-independent fashion.
319 * restarter_actions/refresh and general/enabled are two examples of these.
320 * A special event is generated for these properties in addition to the
321 * regular property event described in the previous section. The special
322 * properties are declared as an array of audit_special_prop_item
323 * structures at special_props_list in rc_node.c.
324 *
325 * In the previous section, we mentioned the
326 * generate_property_event() function that generates an event for
327 * every property action. Before generating the event,
328 * generate_property_event() calls special_property_event().
329 * special_property_event() checks to see if the action involves a
330 * special property. If it does, it generates a special audit
331 * event.
332 *
333 * ADT_smf_annotation event:
334 * ------------------------
335 *
336 * This is a special event unlike any other. It allows the svccfg
337 * program to store an annotation in the event log before a series
338 * of transactions is processed. It is used with the import and
339 * apply svccfg commands. svccfg uses the rep_protocol_annotation
340 * message to pass the operation (import or apply) and the file name
341 * to configd. The set_annotation() function in client.c stores
342 * these away in the a repcache_client structure. The address of
343 * this structure is saved in the thread_info structure.
344 *
345 * Before it generates any events, smf_audit_event() calls
346 * smf_annotation_event(). smf_annotation_event() calls
347 * client_annotation_needed() which is defined in client.c. If an
348 * annotation is needed client_annotation_needed() returns the
349 * operation and filename strings that were saved from the
350 * rep_protocol_annotation message. smf_annotation_event() then
351 * generates the ADT_smf_annotation event.
352 */
353
354 #include <assert.h>
355 #include <atomic.h>
356 #include <bsm/adt_event.h>
357 #include <errno.h>
358 #include <libuutil.h>
359 #include <libscf.h>
360 #include <libscf_priv.h>
361 #include <pthread.h>
362 #include <pwd.h>
363 #include <stdio.h>
364 #include <stdlib.h>
365 #include <strings.h>
366 #include <sys/types.h>
367 #include <syslog.h>
368 #include <unistd.h>
369 #include <secdb.h>
370
371 #include "configd.h"
372
373 #define AUTH_PREFIX "solaris.smf."
374 #define AUTH_MANAGE AUTH_PREFIX "manage"
375 #define AUTH_MODIFY AUTH_PREFIX "modify"
376 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
377 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
378 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
379 #define AUTH_PG_GENERAL SCF_PG_GENERAL
380 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
381 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
382 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
383 #define AUTH_PROP_ACTION "action_authorization"
384 #define AUTH_PROP_ENABLED "enabled"
385 #define AUTH_PROP_MODIFY "modify_authorization"
386 #define AUTH_PROP_VALUE "value_authorization"
387 #define AUTH_PROP_READ "read_authorization"
388
389 #define MAX_VALID_CHILDREN 3
390
391 /*
392 * The ADT_smf_* symbols may not be defined on the build machine. Because
393 * of this, we do not want to compile the _smf_aud_event() function when
394 * doing native builds.
395 */
396 #ifdef NATIVE_BUILD
397 #define smf_audit_event(i, s, r, d)
398 #else
399 #define smf_audit_event(i, s, r, d) _smf_audit_event(i, s, r, d)
400 #endif /* NATIVE_BUILD */
401
402 typedef struct rc_type_info {
403 uint32_t rt_type; /* matches array index */
404 uint32_t rt_num_ids;
405 uint32_t rt_name_flags;
406 uint32_t rt_valid_children[MAX_VALID_CHILDREN];
407 } rc_type_info_t;
408
409 #define RT_NO_NAME -1U
410
411 static rc_type_info_t rc_types[] = {
412 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
413 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
414 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
415 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
416 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
417 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
418 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
419 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
420 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
421 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
422 {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
423 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
424 {REP_PROTOCOL_ENTITY_PROPERTY}},
425 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
426 {REP_PROTOCOL_ENTITY_PROPERTY}},
427 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
428 {-1UL}
429 };
430 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
431
432 /* Element of a permcheck_t hash table. */
433 struct pc_elt {
434 struct pc_elt *pce_next;
435 char pce_auth[1];
436 };
437
438 /*
439 * If an authorization fails, we must decide which of the elements in the
440 * permcheck hash table to use in the audit event. That is to say of all
441 * the strings in the hash table, we must choose one and use it in the audit
442 * event. It is desirable to use the most specific string in the audit
443 * event.
444 *
445 * The pc_auth_type specifies the types (sources) of authorization
446 * strings. The enum is ordered in increasing specificity.
447 */
448 typedef enum pc_auth_type {
449 PC_AUTH_NONE = 0, /* no auth string available. */
450 PC_AUTH_SMF, /* strings coded into SMF. */
451 PC_AUTH_SVC, /* strings specified in PG of a service. */
452 PC_AUTH_INST /* strings specified in PG of an instance. */
453 } pc_auth_type_t;
454
455 /*
456 * The following enum is used to represent the results of the checks to see
457 * if the client has the appropriate permissions to perform an action.
458 */
459 typedef enum perm_status {
460 PERM_DENIED = 0, /* Permission denied. */
461 PERM_GRANTED, /* Client has authorizations. */
462 PERM_GONE, /* Door client went away. */
463 PERM_FAIL /* Generic failure. e.g. resources */
464 } perm_status_t;
465
466 /* An authorization set hash table. */
467 typedef struct {
468 struct pc_elt **pc_buckets;
469 uint_t pc_bnum; /* number of buckets */
470 uint_t pc_enum; /* number of elements */
471 struct pc_elt *pc_specific; /* most specific element */
472 pc_auth_type_t pc_specific_type; /* type of pc_specific */
473 char *pc_auth_string; /* authorization string */
474 /* for audit events */
475 } permcheck_t;
476
477 /*
478 * Structure for holding audit event data. Not all events use all members
479 * of the structure.
480 */
481 typedef struct audit_event_data {
482 char *ed_auth; /* authorization string. */
483 char *ed_fmri; /* affected FMRI. */
484 char *ed_snapname; /* name of snapshot. */
485 char *ed_old_fmri; /* old fmri in attach case. */
486 char *ed_old_name; /* old snapshot in attach case. */
487 char *ed_type; /* prop. group or prop. type. */
488 char *ed_prop_value; /* property value. */
489 } audit_event_data_t;
490
491 /*
492 * Pointer to function to do special processing to get audit event ID.
493 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function
494 * returns 0 if ID successfully retrieved. Otherwise it returns -1.
495 */
496 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
497 au_event_t *);
498 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
499 au_event_t *);
500
501 static uu_list_pool_t *rc_children_pool;
502 static uu_list_pool_t *rc_pg_notify_pool;
503 static uu_list_pool_t *rc_notify_pool;
504 static uu_list_pool_t *rc_notify_info_pool;
505
506 static rc_node_t *rc_scope;
507
508 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
509 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
510 static uint_t rc_notify_in_use; /* blocks removals */
511
512 /*
513 * Some combinations of property group/property name require a special
514 * audit event to be generated when there is a change.
515 * audit_special_prop_item_t is used to specify these special cases. The
516 * special_props_list array defines a list of these special properties.
517 */
518 typedef struct audit_special_prop_item {
519 const char *api_pg_name; /* property group name. */
520 const char *api_prop_name; /* property name. */
521 au_event_t api_event_id; /* event id or 0. */
522 spc_getid_fn_t api_event_func; /* function to get event id. */
523 } audit_special_prop_item_t;
524
525 /*
526 * Native builds are done using the build machine's standard include
527 * files. These files may not yet have the definitions for the ADT_smf_*
528 * symbols. Thus, we do not compile this table when doing native builds.
529 */
530 #ifndef NATIVE_BUILD
531 /*
532 * The following special_props_list array specifies property group/property
533 * name combinations that have specific meaning to startd. A special event
534 * is generated for these combinations in addition to the regular property
535 * event.
536 *
537 * At run time this array gets sorted. See the call to qsort(3C) in
538 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used
539 * to do lookups.
540 */
541 static audit_special_prop_item_t special_props_list[] = {
542 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
543 NULL},
544 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
545 ADT_smf_immediate_degrade, NULL},
546 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
547 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
548 ADT_smf_maintenance, NULL},
549 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
550 ADT_smf_immediate_maintenance, NULL},
551 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
552 ADT_smf_immtmp_maintenance, NULL},
553 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
554 ADT_smf_tmp_maintenance, NULL},
555 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
556 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
557 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
558 {SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
559 {SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
560 {SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
561 {SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
562 };
563 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\
564 sizeof (audit_special_prop_item_t))
565 #endif /* NATIVE_BUILD */
566
567 /*
568 * We support an arbitrary number of clients interested in events for certain
569 * types of changes. Each client is represented by an rc_notify_info_t, and
570 * all clients are chained onto the rc_notify_info_list.
571 *
572 * The rc_notify_list is the global notification list. Each entry is of
573 * type rc_notify_t, which is embedded in one of three other structures:
574 *
575 * rc_node_t property group update notification
576 * rc_notify_delete_t object deletion notification
577 * rc_notify_info_t notification clients
578 *
579 * Which type of object is determined by which pointer in the rc_notify_t is
580 * non-NULL.
581 *
582 * New notifications and clients are added to the end of the list.
583 * Notifications no-one is interested in are never added to the list.
584 *
585 * Clients use their position in the list to track which notifications they
586 * have not yet reported. As they process notifications, they move forward
587 * in the list past them. There is always a client at the beginning of the
588 * list -- as he moves past notifications, he removes them from the list and
589 * cleans them up.
590 *
591 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
592 * is used for global signalling, and each client has a cv which he waits for
593 * events of interest on.
594 *
595 * rc_notify_in_use is used to protect rc_notify_list from deletions when
596 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
597 * must drop the lock to call rc_node_assign(), and then it reacquires the
598 * lock. Deletions from rc_notify_list during this period are not
599 * allowed. Insertions do not matter, because they are always done at the
600 * end of the list.
601 */
602 static uu_list_t *rc_notify_info_list;
603 static uu_list_t *rc_notify_list;
604
605 #define HASH_SIZE 512
606 #define HASH_MASK (HASH_SIZE - 1)
607
608 #pragma align 64(cache_hash)
609 static cache_bucket_t cache_hash[HASH_SIZE];
610
611 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
612
613
614 static void rc_node_no_client_refs(rc_node_t *np);
615
616
617 static uint32_t
rc_node_hash(rc_node_lookup_t * lp)618 rc_node_hash(rc_node_lookup_t *lp)
619 {
620 uint32_t type = lp->rl_type;
621 uint32_t backend = lp->rl_backend;
622 uint32_t mainid = lp->rl_main_id;
623 uint32_t *ids = lp->rl_ids;
624
625 rc_type_info_t *tp = &rc_types[type];
626 uint32_t num_ids;
627 uint32_t left;
628 uint32_t hash;
629
630 assert(backend == BACKEND_TYPE_NORMAL ||
631 backend == BACKEND_TYPE_NONPERSIST);
632
633 assert(type > 0 && type < NUM_TYPES);
634 num_ids = tp->rt_num_ids;
635
636 left = MAX_IDS - num_ids;
637 assert(num_ids <= MAX_IDS);
638
639 hash = type * 7 + mainid * 5 + backend;
640
641 while (num_ids-- > 0)
642 hash = hash * 11 + *ids++ * 7;
643
644 /*
645 * the rest should be zeroed
646 */
647 while (left-- > 0)
648 assert(*ids++ == 0);
649
650 return (hash);
651 }
652
653 static int
rc_node_match(rc_node_t * np,rc_node_lookup_t * l)654 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
655 {
656 rc_node_lookup_t *r = &np->rn_id;
657 rc_type_info_t *tp;
658 uint32_t type;
659 uint32_t num_ids;
660
661 if (r->rl_main_id != l->rl_main_id)
662 return (0);
663
664 type = r->rl_type;
665 if (type != l->rl_type)
666 return (0);
667
668 assert(type > 0 && type < NUM_TYPES);
669
670 tp = &rc_types[r->rl_type];
671 num_ids = tp->rt_num_ids;
672
673 assert(num_ids <= MAX_IDS);
674 while (num_ids-- > 0)
675 if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
676 return (0);
677
678 return (1);
679 }
680
681 /*
682 * Register an ephemeral reference to np. This should be done while both
683 * the persistent reference from which the np pointer was read is locked
684 * and np itself is locked. This guarantees that another thread which
685 * thinks it has the last reference will yield without destroying the
686 * node.
687 */
688 static void
rc_node_hold_ephemeral_locked(rc_node_t * np)689 rc_node_hold_ephemeral_locked(rc_node_t *np)
690 {
691 assert(MUTEX_HELD(&np->rn_lock));
692
693 ++np->rn_erefs;
694 }
695
696 /*
697 * the "other" references on a node are maintained in an atomically
698 * updated refcount, rn_other_refs. This can be bumped from arbitrary
699 * context, and tracks references to a possibly out-of-date node's children.
700 *
701 * To prevent the node from disappearing between the final drop of
702 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
703 * 0->1 transitions and decremented (with the node lock held) on 1->0
704 * transitions.
705 */
706 static void
rc_node_hold_other(rc_node_t * np)707 rc_node_hold_other(rc_node_t *np)
708 {
709 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
710 atomic_add_32(&np->rn_other_refs_held, 1);
711 assert(np->rn_other_refs_held > 0);
712 }
713 assert(np->rn_other_refs > 0);
714 }
715
716 /*
717 * No node locks may be held
718 */
719 static void
rc_node_rele_other(rc_node_t * np)720 rc_node_rele_other(rc_node_t *np)
721 {
722 assert(np->rn_other_refs > 0);
723 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
724 (void) pthread_mutex_lock(&np->rn_lock);
725 assert(np->rn_other_refs_held > 0);
726 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
727 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
728 /*
729 * This was the last client reference. Destroy
730 * any other references and free() the node.
731 */
732 rc_node_no_client_refs(np);
733 } else {
734 (void) pthread_mutex_unlock(&np->rn_lock);
735 }
736 }
737 }
738
739 static void
rc_node_hold_locked(rc_node_t * np)740 rc_node_hold_locked(rc_node_t *np)
741 {
742 assert(MUTEX_HELD(&np->rn_lock));
743
744 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
745 rc_node_hold_other(np->rn_parent_ref);
746 np->rn_refs++;
747 assert(np->rn_refs > 0);
748 }
749
750 static void
rc_node_hold(rc_node_t * np)751 rc_node_hold(rc_node_t *np)
752 {
753 (void) pthread_mutex_lock(&np->rn_lock);
754 rc_node_hold_locked(np);
755 (void) pthread_mutex_unlock(&np->rn_lock);
756 }
757
758 static void
rc_node_rele_locked(rc_node_t * np)759 rc_node_rele_locked(rc_node_t *np)
760 {
761 int unref = 0;
762 rc_node_t *par_ref = NULL;
763
764 assert(MUTEX_HELD(&np->rn_lock));
765 assert(np->rn_refs > 0);
766
767 if (--np->rn_refs == 0) {
768 if (np->rn_flags & RC_NODE_PARENT_REF)
769 par_ref = np->rn_parent_ref;
770
771 /*
772 * Composed property groups are only as good as their
773 * references.
774 */
775 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
776 np->rn_flags |= RC_NODE_DEAD;
777
778 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
779 np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
780 unref = 1;
781 }
782
783 if (unref) {
784 /*
785 * This was the last client reference. Destroy any other
786 * references and free() the node.
787 */
788 rc_node_no_client_refs(np);
789 } else {
790 /*
791 * rn_erefs can be 0 if we acquired the reference in
792 * a path which hasn't been updated to increment rn_erefs.
793 * When all paths which end here are updated, we should
794 * assert rn_erefs > 0 and always decrement it.
795 */
796 if (np->rn_erefs > 0)
797 --np->rn_erefs;
798 (void) pthread_mutex_unlock(&np->rn_lock);
799 }
800
801 if (par_ref != NULL)
802 rc_node_rele_other(par_ref);
803 }
804
805 void
rc_node_rele(rc_node_t * np)806 rc_node_rele(rc_node_t *np)
807 {
808 (void) pthread_mutex_lock(&np->rn_lock);
809 rc_node_rele_locked(np);
810 }
811
812 static cache_bucket_t *
cache_hold(uint32_t h)813 cache_hold(uint32_t h)
814 {
815 cache_bucket_t *bp = CACHE_BUCKET(h);
816 (void) pthread_mutex_lock(&bp->cb_lock);
817 return (bp);
818 }
819
820 static void
cache_release(cache_bucket_t * bp)821 cache_release(cache_bucket_t *bp)
822 {
823 (void) pthread_mutex_unlock(&bp->cb_lock);
824 }
825
826 static rc_node_t *
cache_lookup_unlocked(cache_bucket_t * bp,rc_node_lookup_t * lp)827 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
828 {
829 uint32_t h = rc_node_hash(lp);
830 rc_node_t *np;
831
832 assert(MUTEX_HELD(&bp->cb_lock));
833 assert(bp == CACHE_BUCKET(h));
834
835 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
836 if (np->rn_hash == h && rc_node_match(np, lp)) {
837 rc_node_hold(np);
838 return (np);
839 }
840 }
841
842 return (NULL);
843 }
844
845 static rc_node_t *
cache_lookup(rc_node_lookup_t * lp)846 cache_lookup(rc_node_lookup_t *lp)
847 {
848 uint32_t h;
849 cache_bucket_t *bp;
850 rc_node_t *np;
851
852 h = rc_node_hash(lp);
853 bp = cache_hold(h);
854
855 np = cache_lookup_unlocked(bp, lp);
856
857 cache_release(bp);
858
859 return (np);
860 }
861
862 static void
cache_insert_unlocked(cache_bucket_t * bp,rc_node_t * np)863 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
864 {
865 assert(MUTEX_HELD(&bp->cb_lock));
866 assert(np->rn_hash == rc_node_hash(&np->rn_id));
867 assert(bp == CACHE_BUCKET(np->rn_hash));
868
869 assert(np->rn_hash_next == NULL);
870
871 np->rn_hash_next = bp->cb_head;
872 bp->cb_head = np;
873 }
874
875 static void
cache_remove_unlocked(cache_bucket_t * bp,rc_node_t * np)876 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
877 {
878 rc_node_t **npp;
879
880 assert(MUTEX_HELD(&bp->cb_lock));
881 assert(np->rn_hash == rc_node_hash(&np->rn_id));
882 assert(bp == CACHE_BUCKET(np->rn_hash));
883
884 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
885 if (*npp == np)
886 break;
887
888 assert(*npp == np);
889 *npp = np->rn_hash_next;
890 np->rn_hash_next = NULL;
891 }
892
893 /*
894 * verify that the 'parent' type can have a child typed 'child'
895 * Fails with
896 * _INVALID_TYPE - argument is invalid
897 * _TYPE_MISMATCH - parent type cannot have children of type child
898 */
899 static int
rc_check_parent_child(uint32_t parent,uint32_t child)900 rc_check_parent_child(uint32_t parent, uint32_t child)
901 {
902 int idx;
903 uint32_t type;
904
905 if (parent == 0 || parent >= NUM_TYPES ||
906 child == 0 || child >= NUM_TYPES)
907 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
908
909 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
910 type = rc_types[parent].rt_valid_children[idx];
911 if (type == child)
912 return (REP_PROTOCOL_SUCCESS);
913 }
914
915 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
916 }
917
918 /*
919 * Fails with
920 * _INVALID_TYPE - type is invalid
921 * _BAD_REQUEST - name is an invalid name for a node of type type
922 */
923 int
rc_check_type_name(uint32_t type,const char * name)924 rc_check_type_name(uint32_t type, const char *name)
925 {
926 if (type == 0 || type >= NUM_TYPES)
927 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
928
929 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
930 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
931
932 return (REP_PROTOCOL_SUCCESS);
933 }
934
935 static int
rc_check_pgtype_name(const char * name)936 rc_check_pgtype_name(const char *name)
937 {
938 if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
939 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
940
941 return (REP_PROTOCOL_SUCCESS);
942 }
943
944 /*
945 * rc_node_free_fmri should be called whenever a node loses its parent.
946 * The reason is that the node's fmri string is built up by concatenating
947 * its name to the parent's fmri. Thus, when the node no longer has a
948 * parent, its fmri is no longer valid.
949 */
950 static void
rc_node_free_fmri(rc_node_t * np)951 rc_node_free_fmri(rc_node_t *np)
952 {
953 if (np->rn_fmri != NULL) {
954 free((void *)np->rn_fmri);
955 np->rn_fmri = NULL;
956 }
957 }
958
959 /*
960 * Concatenate the appropriate separator and the FMRI element to the base
961 * FMRI string at fmri.
962 *
963 * Fails with
964 * _TRUNCATED Not enough room in buffer at fmri.
965 */
966 static int
rc_concat_fmri_element(char * fmri,size_t bufsize,size_t * sz_out,const char * element,rep_protocol_entity_t type)967 rc_concat_fmri_element(
968 char *fmri, /* base fmri */
969 size_t bufsize, /* size of buf at fmri */
970 size_t *sz_out, /* receives result size. */
971 const char *element, /* element name to concat */
972 rep_protocol_entity_t type) /* type of element */
973 {
974 size_t actual;
975 const char *name = element;
976 int rc;
977 const char *separator;
978
979 if (bufsize > 0)
980 *sz_out = strlen(fmri);
981 else
982 *sz_out = 0;
983
984 switch (type) {
985 case REP_PROTOCOL_ENTITY_SCOPE:
986 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
987 /*
988 * No need to display scope information if we are
989 * in the local scope.
990 */
991 separator = SCF_FMRI_SVC_PREFIX;
992 name = NULL;
993 } else {
994 /*
995 * Need to display scope information, because it is
996 * not the local scope.
997 */
998 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
999 }
1000 break;
1001 case REP_PROTOCOL_ENTITY_SERVICE:
1002 separator = SCF_FMRI_SERVICE_PREFIX;
1003 break;
1004 case REP_PROTOCOL_ENTITY_INSTANCE:
1005 separator = SCF_FMRI_INSTANCE_PREFIX;
1006 break;
1007 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
1008 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
1009 separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1010 break;
1011 case REP_PROTOCOL_ENTITY_PROPERTY:
1012 separator = SCF_FMRI_PROPERTY_PREFIX;
1013 break;
1014 case REP_PROTOCOL_ENTITY_VALUE:
1015 /*
1016 * A value does not have a separate FMRI from its property,
1017 * so there is nothing to concat.
1018 */
1019 return (REP_PROTOCOL_SUCCESS);
1020 case REP_PROTOCOL_ENTITY_SNAPSHOT:
1021 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1022 /* Snapshots do not have FMRIs, so there is nothing to do. */
1023 return (REP_PROTOCOL_SUCCESS);
1024 default:
1025 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1026 __FILE__, __LINE__, type);
1027 abort(); /* Missing a case in switch if we get here. */
1028 }
1029
1030 /* Concatenate separator and element to the fmri buffer. */
1031
1032 actual = strlcat(fmri, separator, bufsize);
1033 if (name != NULL) {
1034 if (actual < bufsize) {
1035 actual = strlcat(fmri, name, bufsize);
1036 } else {
1037 actual += strlen(name);
1038 }
1039 }
1040 if (actual < bufsize) {
1041 rc = REP_PROTOCOL_SUCCESS;
1042 } else {
1043 rc = REP_PROTOCOL_FAIL_TRUNCATED;
1044 }
1045 *sz_out = actual;
1046 return (rc);
1047 }
1048
1049 /*
1050 * Get the FMRI for the node at np. The fmri will be placed in buf. On
1051 * success sz_out will be set to the size of the fmri in buf. If
1052 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1053 * of the buffer that would be required to avoid truncation.
1054 *
1055 * Fails with
1056 * _TRUNCATED not enough room in buf for the FMRI.
1057 */
1058 static int
rc_node_get_fmri_or_fragment(rc_node_t * np,char * buf,size_t bufsize,size_t * sz_out)1059 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1060 size_t *sz_out)
1061 {
1062 size_t fmri_len = 0;
1063 int r;
1064
1065 if (bufsize > 0)
1066 *buf = 0;
1067 *sz_out = 0;
1068
1069 if (np->rn_fmri == NULL) {
1070 /*
1071 * A NULL rn_fmri implies that this is a top level scope.
1072 * Child nodes will always have an rn_fmri established
1073 * because both rc_node_link_child() and
1074 * rc_node_relink_child() call rc_node_build_fmri(). In
1075 * this case, we'll just return our name preceded by the
1076 * appropriate FMRI decorations.
1077 */
1078 assert(np->rn_parent == NULL);
1079 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1080 np->rn_id.rl_type);
1081 if (r != REP_PROTOCOL_SUCCESS)
1082 return (r);
1083 } else {
1084 /* We have an fmri, so return it. */
1085 fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1086 }
1087
1088 *sz_out = fmri_len;
1089
1090 if (fmri_len >= bufsize)
1091 return (REP_PROTOCOL_FAIL_TRUNCATED);
1092
1093 return (REP_PROTOCOL_SUCCESS);
1094 }
1095
1096 /*
1097 * Build an FMRI string for this node and save it in rn_fmri.
1098 *
1099 * The basic strategy here is to get the fmri of our parent and then
1100 * concatenate the appropriate separator followed by our name. If our name
1101 * is null, the resulting fmri will just be a copy of the parent fmri.
1102 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1103 * set. Also the rn_lock for this node should be held.
1104 *
1105 * Fails with
1106 * _NO_RESOURCES Could not allocate memory.
1107 */
1108 static int
rc_node_build_fmri(rc_node_t * np)1109 rc_node_build_fmri(rc_node_t *np)
1110 {
1111 size_t actual;
1112 char fmri[REP_PROTOCOL_FMRI_LEN];
1113 int rc;
1114 size_t sz = REP_PROTOCOL_FMRI_LEN;
1115
1116 assert(MUTEX_HELD(&np->rn_lock));
1117 assert(np->rn_flags & RC_NODE_USING_PARENT);
1118
1119 rc_node_free_fmri(np);
1120
1121 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1122 assert(rc == REP_PROTOCOL_SUCCESS);
1123
1124 if (np->rn_name != NULL) {
1125 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1126 np->rn_id.rl_type);
1127 assert(rc == REP_PROTOCOL_SUCCESS);
1128 np->rn_fmri = strdup(fmri);
1129 } else {
1130 np->rn_fmri = strdup(fmri);
1131 }
1132 if (np->rn_fmri == NULL) {
1133 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1134 } else {
1135 rc = REP_PROTOCOL_SUCCESS;
1136 }
1137
1138 return (rc);
1139 }
1140
1141 /*
1142 * Get the FMRI of the node at np placing the result in fmri. Then
1143 * concatenate the additional element to fmri. The type variable indicates
1144 * the type of element, so that the appropriate separator can be
1145 * generated. size is the number of bytes in the buffer at fmri, and
1146 * sz_out receives the size of the generated string. If the result is
1147 * truncated, sz_out will receive the size of the buffer that would be
1148 * required to avoid truncation.
1149 *
1150 * Fails with
1151 * _TRUNCATED Not enough room in buffer at fmri.
1152 */
1153 static int
rc_get_fmri_and_concat(rc_node_t * np,char * fmri,size_t size,size_t * sz_out,const char * element,rep_protocol_entity_t type)1154 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1155 const char *element, rep_protocol_entity_t type)
1156 {
1157 int rc;
1158
1159 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1160 REP_PROTOCOL_SUCCESS) {
1161 return (rc);
1162 }
1163 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1164 REP_PROTOCOL_SUCCESS) {
1165 return (rc);
1166 }
1167
1168 return (REP_PROTOCOL_SUCCESS);
1169 }
1170
1171 static int
rc_notify_info_interested(rc_notify_info_t * rnip,rc_notify_t * np)1172 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1173 {
1174 rc_node_t *nnp = np->rcn_node;
1175 int i;
1176
1177 assert(MUTEX_HELD(&rc_pg_notify_lock));
1178
1179 if (np->rcn_delete != NULL) {
1180 assert(np->rcn_info == NULL && np->rcn_node == NULL);
1181 return (1); /* everyone likes deletes */
1182 }
1183 if (np->rcn_node == NULL) {
1184 assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1185 return (0);
1186 }
1187 assert(np->rcn_info == NULL);
1188
1189 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1190 if (rnip->rni_namelist[i] != NULL) {
1191 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1192 return (1);
1193 }
1194 if (rnip->rni_typelist[i] != NULL) {
1195 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1196 return (1);
1197 }
1198 }
1199 return (0);
1200 }
1201
1202 static void
rc_notify_insert_node(rc_node_t * nnp)1203 rc_notify_insert_node(rc_node_t *nnp)
1204 {
1205 rc_notify_t *np = &nnp->rn_notify;
1206 rc_notify_info_t *nip;
1207 int found = 0;
1208
1209 assert(np->rcn_info == NULL);
1210
1211 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1212 return;
1213
1214 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1215 np->rcn_node = nnp;
1216 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1217 nip = uu_list_next(rc_notify_info_list, nip)) {
1218 if (rc_notify_info_interested(nip, np)) {
1219 (void) pthread_cond_broadcast(&nip->rni_cv);
1220 found++;
1221 }
1222 }
1223 if (found)
1224 (void) uu_list_insert_before(rc_notify_list, NULL, np);
1225 else
1226 np->rcn_node = NULL;
1227
1228 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1229 }
1230
1231 static void
rc_notify_deletion(rc_notify_delete_t * ndp,const char * service,const char * instance,const char * pg)1232 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1233 const char *instance, const char *pg)
1234 {
1235 rc_notify_info_t *nip;
1236
1237 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1238 rc_notify_pool);
1239 ndp->rnd_notify.rcn_delete = ndp;
1240
1241 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1242 "svc:/%s%s%s%s%s", service,
1243 (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1244 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1245
1246 /*
1247 * add to notification list, notify watchers
1248 */
1249 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1250 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1251 nip = uu_list_next(rc_notify_info_list, nip))
1252 (void) pthread_cond_broadcast(&nip->rni_cv);
1253 (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1254 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1255 }
1256
1257 static void
rc_notify_remove_node(rc_node_t * nnp)1258 rc_notify_remove_node(rc_node_t *nnp)
1259 {
1260 rc_notify_t *np = &nnp->rn_notify;
1261
1262 assert(np->rcn_info == NULL);
1263 assert(!MUTEX_HELD(&nnp->rn_lock));
1264
1265 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1266 while (np->rcn_node != NULL) {
1267 if (rc_notify_in_use) {
1268 (void) pthread_cond_wait(&rc_pg_notify_cv,
1269 &rc_pg_notify_lock);
1270 continue;
1271 }
1272 (void) uu_list_remove(rc_notify_list, np);
1273 np->rcn_node = NULL;
1274 break;
1275 }
1276 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1277 }
1278
1279 static void
rc_notify_remove_locked(rc_notify_t * np)1280 rc_notify_remove_locked(rc_notify_t *np)
1281 {
1282 assert(MUTEX_HELD(&rc_pg_notify_lock));
1283 assert(rc_notify_in_use == 0);
1284
1285 (void) uu_list_remove(rc_notify_list, np);
1286 if (np->rcn_node) {
1287 np->rcn_node = NULL;
1288 } else if (np->rcn_delete) {
1289 uu_free(np->rcn_delete);
1290 } else {
1291 assert(0); /* CAN'T HAPPEN */
1292 }
1293 }
1294
1295 /*
1296 * Permission checking functions. See comment atop this file.
1297 */
1298 #ifndef NATIVE_BUILD
1299 static permcheck_t *
pc_create()1300 pc_create()
1301 {
1302 permcheck_t *p;
1303
1304 p = uu_zalloc(sizeof (*p));
1305 if (p == NULL)
1306 return (NULL);
1307 p->pc_bnum = 8; /* Normal case will only have 2 elts. */
1308 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1309 if (p->pc_buckets == NULL) {
1310 uu_free(p);
1311 return (NULL);
1312 }
1313
1314 p->pc_enum = 0;
1315 return (p);
1316 }
1317
1318 static void
pc_free(permcheck_t * pcp)1319 pc_free(permcheck_t *pcp)
1320 {
1321 uint_t i;
1322 struct pc_elt *ep, *next;
1323
1324 for (i = 0; i < pcp->pc_bnum; ++i) {
1325 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1326 next = ep->pce_next;
1327 free(ep);
1328 }
1329 }
1330
1331 free(pcp->pc_buckets);
1332 free(pcp);
1333 }
1334
1335 static uint32_t
pc_hash(const char * auth)1336 pc_hash(const char *auth)
1337 {
1338 uint32_t h = 0, g;
1339 const char *p;
1340
1341 /*
1342 * Generic hash function from uts/common/os/modhash.c.
1343 */
1344 for (p = auth; *p != '\0'; ++p) {
1345 h = (h << 4) + *p;
1346 g = (h & 0xf0000000);
1347 if (g != 0) {
1348 h ^= (g >> 24);
1349 h ^= g;
1350 }
1351 }
1352
1353 return (h);
1354 }
1355
1356 static perm_status_t
pc_exists(permcheck_t * pcp,const char * auth)1357 pc_exists(permcheck_t *pcp, const char *auth)
1358 {
1359 uint32_t h;
1360 struct pc_elt *ep;
1361
1362 h = pc_hash(auth);
1363 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1364 ep != NULL;
1365 ep = ep->pce_next) {
1366 if (strcmp(auth, ep->pce_auth) == 0) {
1367 pcp->pc_auth_string = ep->pce_auth;
1368 return (PERM_GRANTED);
1369 }
1370 }
1371
1372 return (PERM_DENIED);
1373 }
1374
1375 static perm_status_t
pc_match(permcheck_t * pcp,const char * pattern)1376 pc_match(permcheck_t *pcp, const char *pattern)
1377 {
1378 uint_t i;
1379 struct pc_elt *ep;
1380
1381 for (i = 0; i < pcp->pc_bnum; ++i) {
1382 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1383 if (_auth_match(pattern, ep->pce_auth)) {
1384 pcp->pc_auth_string = ep->pce_auth;
1385 return (PERM_GRANTED);
1386 }
1387 }
1388 }
1389
1390 return (PERM_DENIED);
1391 }
1392
1393 static int
pc_grow(permcheck_t * pcp)1394 pc_grow(permcheck_t *pcp)
1395 {
1396 uint_t new_bnum, i, j;
1397 struct pc_elt **new_buckets;
1398 struct pc_elt *ep, *next;
1399
1400 new_bnum = pcp->pc_bnum * 2;
1401 if (new_bnum < pcp->pc_bnum)
1402 /* Homey don't play that. */
1403 return (-1);
1404
1405 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1406 if (new_buckets == NULL)
1407 return (-1);
1408
1409 for (i = 0; i < pcp->pc_bnum; ++i) {
1410 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1411 next = ep->pce_next;
1412 j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1413 ep->pce_next = new_buckets[j];
1414 new_buckets[j] = ep;
1415 }
1416 }
1417
1418 uu_free(pcp->pc_buckets);
1419 pcp->pc_buckets = new_buckets;
1420 pcp->pc_bnum = new_bnum;
1421
1422 return (0);
1423 }
1424
1425 static int
pc_add(permcheck_t * pcp,const char * auth,pc_auth_type_t auth_type)1426 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1427 {
1428 struct pc_elt *ep;
1429 uint_t i;
1430
1431 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1432 if (ep == NULL)
1433 return (-1);
1434
1435 /* Grow if pc_enum / pc_bnum > 3/4. */
1436 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1437 /* Failure is not a stopper; we'll try again next time. */
1438 (void) pc_grow(pcp);
1439
1440 (void) strcpy(ep->pce_auth, auth);
1441
1442 i = pc_hash(auth) & (pcp->pc_bnum - 1);
1443 ep->pce_next = pcp->pc_buckets[i];
1444 pcp->pc_buckets[i] = ep;
1445
1446 if (auth_type > pcp->pc_specific_type) {
1447 pcp->pc_specific_type = auth_type;
1448 pcp->pc_specific = ep;
1449 }
1450
1451 ++pcp->pc_enum;
1452
1453 return (0);
1454 }
1455
1456 /*
1457 * For the type of a property group, return the authorization which may be
1458 * used to modify it.
1459 */
1460 static const char *
perm_auth_for_pgtype(const char * pgtype)1461 perm_auth_for_pgtype(const char *pgtype)
1462 {
1463 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1464 return (AUTH_MODIFY_PREFIX "method");
1465 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1466 return (AUTH_MODIFY_PREFIX "dependency");
1467 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1468 return (AUTH_MODIFY_PREFIX "application");
1469 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1470 return (AUTH_MODIFY_PREFIX "framework");
1471 else
1472 return (NULL);
1473 }
1474
1475 /*
1476 * Fails with
1477 * _NO_RESOURCES - out of memory
1478 */
1479 static int
perm_add_enabling_type(permcheck_t * pcp,const char * auth,pc_auth_type_t auth_type)1480 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1481 pc_auth_type_t auth_type)
1482 {
1483 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1484 REP_PROTOCOL_FAIL_NO_RESOURCES);
1485 }
1486
1487 /*
1488 * Fails with
1489 * _NO_RESOURCES - out of memory
1490 */
1491 static int
perm_add_enabling(permcheck_t * pcp,const char * auth)1492 perm_add_enabling(permcheck_t *pcp, const char *auth)
1493 {
1494 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1495 }
1496
1497 /* Note that perm_add_enabling_values() is defined below. */
1498
1499 /*
1500 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1501 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1502 * the door client went away and PERM_FAIL if an error (usually lack of
1503 * memory) occurs. auth_cb() checks each and every authorizations as
1504 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1505 * we short-cut the enumeration and return non-zero.
1506 */
1507
1508 static int
auth_cb(const char * auth,void * ctxt,void * vres)1509 auth_cb(const char *auth, void *ctxt, void *vres)
1510 {
1511 permcheck_t *pcp = ctxt;
1512 int *pret = vres;
1513
1514 if (strchr(auth, KV_WILDCHAR) == NULL)
1515 *pret = pc_exists(pcp, auth);
1516 else
1517 *pret = pc_match(pcp, auth);
1518
1519 if (*pret != PERM_DENIED)
1520 return (1);
1521 /*
1522 * If we failed, choose the most specific auth string for use in
1523 * the audit event.
1524 */
1525 assert(pcp->pc_specific != NULL);
1526 pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1527
1528 return (0); /* Tells that we need to continue */
1529 }
1530
1531 static perm_status_t
perm_granted(permcheck_t * pcp)1532 perm_granted(permcheck_t *pcp)
1533 {
1534 ucred_t *uc;
1535
1536 perm_status_t ret = PERM_DENIED;
1537 uid_t uid;
1538 struct passwd pw;
1539 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1540
1541 /* Get the uid */
1542 if ((uc = get_ucred()) == NULL) {
1543 if (errno == EINVAL) {
1544 /*
1545 * Client is no longer waiting for our response (e.g.,
1546 * it received a signal & resumed with EINTR).
1547 * Punting with door_return() would be nice but we
1548 * need to release all of the locks & references we
1549 * hold. And we must report failure to the client
1550 * layer to keep it from ignoring retries as
1551 * already-done (idempotency & all that). None of the
1552 * error codes fit very well, so we might as well
1553 * force the return of _PERMISSION_DENIED since we
1554 * couldn't determine the user.
1555 */
1556 return (PERM_GONE);
1557 }
1558 assert(0);
1559 abort();
1560 }
1561
1562 uid = ucred_geteuid(uc);
1563 assert(uid != (uid_t)-1);
1564
1565 if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1566 return (PERM_FAIL);
1567 }
1568
1569 /*
1570 * Enumerate all the auths defined for the user and return the
1571 * result in ret.
1572 */
1573 if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1574 return (PERM_FAIL);
1575
1576 return (ret);
1577 }
1578
1579 static int
map_granted_status(perm_status_t status,permcheck_t * pcp,char ** match_auth)1580 map_granted_status(perm_status_t status, permcheck_t *pcp,
1581 char **match_auth)
1582 {
1583 int rc;
1584
1585 *match_auth = NULL;
1586 switch (status) {
1587 case PERM_DENIED:
1588 *match_auth = strdup(pcp->pc_auth_string);
1589 if (*match_auth == NULL)
1590 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1591 else
1592 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1593 break;
1594 case PERM_GRANTED:
1595 *match_auth = strdup(pcp->pc_auth_string);
1596 if (*match_auth == NULL)
1597 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1598 else
1599 rc = REP_PROTOCOL_SUCCESS;
1600 break;
1601 case PERM_GONE:
1602 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1603 break;
1604 case PERM_FAIL:
1605 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1606 break;
1607 }
1608 return (rc);
1609 }
1610 #endif /* NATIVE_BUILD */
1611
1612 /*
1613 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1614 * serialize certain actions, and to wait for certain operations to complete
1615 *
1616 * The waiting flags are:
1617 * RC_NODE_CHILDREN_CHANGING
1618 * The child list is being built or changed (due to creation
1619 * or deletion). All iterators pause.
1620 *
1621 * RC_NODE_USING_PARENT
1622 * Someone is actively using the parent pointer, so we can't
1623 * be removed from the parent list.
1624 *
1625 * RC_NODE_CREATING_CHILD
1626 * A child is being created -- locks out other creations, to
1627 * prevent insert-insert races.
1628 *
1629 * RC_NODE_IN_TX
1630 * This object is running a transaction.
1631 *
1632 * RC_NODE_DYING
1633 * This node might be dying. Always set as a set, using
1634 * RC_NODE_DYING_FLAGS (which is everything but
1635 * RC_NODE_USING_PARENT)
1636 */
1637 static int
rc_node_hold_flag(rc_node_t * np,uint32_t flag)1638 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1639 {
1640 assert(MUTEX_HELD(&np->rn_lock));
1641 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1642
1643 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1644 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1645 }
1646 if (np->rn_flags & RC_NODE_DEAD)
1647 return (0);
1648
1649 np->rn_flags |= flag;
1650 return (1);
1651 }
1652
1653 static void
rc_node_rele_flag(rc_node_t * np,uint32_t flag)1654 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1655 {
1656 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1657 assert(MUTEX_HELD(&np->rn_lock));
1658 assert((np->rn_flags & flag) == flag);
1659 np->rn_flags &= ~flag;
1660 (void) pthread_cond_broadcast(&np->rn_cv);
1661 }
1662
1663 /*
1664 * wait until a particular flag has cleared. Fails if the object dies.
1665 */
1666 static int
rc_node_wait_flag(rc_node_t * np,uint32_t flag)1667 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1668 {
1669 assert(MUTEX_HELD(&np->rn_lock));
1670 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1671 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1672
1673 return (!(np->rn_flags & RC_NODE_DEAD));
1674 }
1675
1676 /*
1677 * On entry, np's lock must be held, and this thread must be holding
1678 * RC_NODE_USING_PARENT. On return, both of them are released.
1679 *
1680 * If the return value is NULL, np either does not have a parent, or
1681 * the parent has been marked DEAD.
1682 *
1683 * If the return value is non-NULL, it is the parent of np, and both
1684 * its lock and the requested flags are held.
1685 */
1686 static rc_node_t *
rc_node_hold_parent_flag(rc_node_t * np,uint32_t flag)1687 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1688 {
1689 rc_node_t *pp;
1690
1691 assert(MUTEX_HELD(&np->rn_lock));
1692 assert(np->rn_flags & RC_NODE_USING_PARENT);
1693
1694 if ((pp = np->rn_parent) == NULL) {
1695 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1696 (void) pthread_mutex_unlock(&np->rn_lock);
1697 return (NULL);
1698 }
1699 (void) pthread_mutex_unlock(&np->rn_lock);
1700
1701 (void) pthread_mutex_lock(&pp->rn_lock);
1702 (void) pthread_mutex_lock(&np->rn_lock);
1703 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1704 (void) pthread_mutex_unlock(&np->rn_lock);
1705
1706 if (!rc_node_hold_flag(pp, flag)) {
1707 (void) pthread_mutex_unlock(&pp->rn_lock);
1708 return (NULL);
1709 }
1710 return (pp);
1711 }
1712
1713 rc_node_t *
rc_node_alloc(void)1714 rc_node_alloc(void)
1715 {
1716 rc_node_t *np = uu_zalloc(sizeof (*np));
1717
1718 if (np == NULL)
1719 return (NULL);
1720
1721 (void) pthread_mutex_init(&np->rn_lock, NULL);
1722 (void) pthread_cond_init(&np->rn_cv, NULL);
1723
1724 np->rn_children = uu_list_create(rc_children_pool, np, 0);
1725 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1726
1727 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1728
1729 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1730 rc_notify_pool);
1731
1732 return (np);
1733 }
1734
1735 void
rc_node_destroy(rc_node_t * np)1736 rc_node_destroy(rc_node_t *np)
1737 {
1738 int i;
1739
1740 if (np->rn_flags & RC_NODE_UNREFED)
1741 return; /* being handled elsewhere */
1742
1743 assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1744 assert(np->rn_former == NULL);
1745
1746 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1747 /* Release the holds from rc_iter_next(). */
1748 for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1749 /* rn_cchain[i] may be NULL for empty snapshots. */
1750 if (np->rn_cchain[i] != NULL)
1751 rc_node_rele(np->rn_cchain[i]);
1752 }
1753 }
1754
1755 if (np->rn_name != NULL)
1756 free((void *)np->rn_name);
1757 np->rn_name = NULL;
1758 if (np->rn_type != NULL)
1759 free((void *)np->rn_type);
1760 np->rn_type = NULL;
1761 if (np->rn_values != NULL)
1762 object_free_values(np->rn_values, np->rn_valtype,
1763 np->rn_values_count, np->rn_values_size);
1764 np->rn_values = NULL;
1765 rc_node_free_fmri(np);
1766
1767 if (np->rn_snaplevel != NULL)
1768 rc_snaplevel_rele(np->rn_snaplevel);
1769 np->rn_snaplevel = NULL;
1770
1771 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1772
1773 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1774 rc_notify_pool);
1775
1776 assert(uu_list_first(np->rn_children) == NULL);
1777 uu_list_destroy(np->rn_children);
1778 uu_list_destroy(np->rn_pg_notify_list);
1779
1780 (void) pthread_mutex_destroy(&np->rn_lock);
1781 (void) pthread_cond_destroy(&np->rn_cv);
1782
1783 uu_free(np);
1784 }
1785
1786 /*
1787 * Link in a child node.
1788 *
1789 * Because of the lock ordering, cp has to already be in the hash table with
1790 * its lock dropped before we get it. To prevent anyone from noticing that
1791 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1792 * we've linked it in, we release the flag.
1793 */
1794 static void
rc_node_link_child(rc_node_t * np,rc_node_t * cp)1795 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1796 {
1797 assert(!MUTEX_HELD(&np->rn_lock));
1798 assert(!MUTEX_HELD(&cp->rn_lock));
1799
1800 (void) pthread_mutex_lock(&np->rn_lock);
1801 (void) pthread_mutex_lock(&cp->rn_lock);
1802 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1803 (cp->rn_flags & RC_NODE_USING_PARENT));
1804
1805 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1806 REP_PROTOCOL_SUCCESS);
1807
1808 cp->rn_parent = np;
1809 cp->rn_flags |= RC_NODE_IN_PARENT;
1810 (void) uu_list_insert_before(np->rn_children, NULL, cp);
1811 (void) rc_node_build_fmri(cp);
1812
1813 (void) pthread_mutex_unlock(&np->rn_lock);
1814
1815 rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1816 (void) pthread_mutex_unlock(&cp->rn_lock);
1817 }
1818
1819 /*
1820 * Sets the rn_parent_ref field of all the children of np to pp -- always
1821 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1822 *
1823 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1824 * its children are no longer referenced, they will all be deleted as a unit.
1825 */
1826 static void
rc_node_setup_parent_ref(rc_node_t * np,rc_node_t * pp)1827 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1828 {
1829 rc_node_t *cp;
1830
1831 assert(MUTEX_HELD(&np->rn_lock));
1832
1833 for (cp = uu_list_first(np->rn_children); cp != NULL;
1834 cp = uu_list_next(np->rn_children, cp)) {
1835 (void) pthread_mutex_lock(&cp->rn_lock);
1836 if (cp->rn_flags & RC_NODE_PARENT_REF) {
1837 assert(cp->rn_parent_ref == pp);
1838 } else {
1839 assert(cp->rn_parent_ref == NULL);
1840
1841 cp->rn_flags |= RC_NODE_PARENT_REF;
1842 cp->rn_parent_ref = pp;
1843 if (cp->rn_refs != 0)
1844 rc_node_hold_other(pp);
1845 }
1846 rc_node_setup_parent_ref(cp, pp); /* recurse */
1847 (void) pthread_mutex_unlock(&cp->rn_lock);
1848 }
1849 }
1850
1851 /*
1852 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1853 *
1854 * Requirements:
1855 * *no* node locks may be held.
1856 * pp must be held with RC_NODE_CHILDREN_CHANGING
1857 * newp and np must be held with RC_NODE_IN_TX
1858 * np must be marked RC_NODE_IN_PARENT, newp must not be
1859 * np must be marked RC_NODE_OLD
1860 *
1861 * Afterwards:
1862 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1863 * newp and np's RC_NODE_IN_TX is dropped
1864 * newp->rn_former = np;
1865 * newp is RC_NODE_IN_PARENT, np is not.
1866 * interested notify subscribers have been notified of newp's new status.
1867 */
1868 static void
rc_node_relink_child(rc_node_t * pp,rc_node_t * np,rc_node_t * newp)1869 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1870 {
1871 cache_bucket_t *bp;
1872 /*
1873 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1874 * keeps rc_node_update() from seeing it until we are done.
1875 */
1876 bp = cache_hold(newp->rn_hash);
1877 cache_remove_unlocked(bp, np);
1878 cache_insert_unlocked(bp, newp);
1879 cache_release(bp);
1880
1881 /*
1882 * replace np with newp in pp's list, and attach it to newp's rn_former
1883 * link.
1884 */
1885 (void) pthread_mutex_lock(&pp->rn_lock);
1886 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1887
1888 (void) pthread_mutex_lock(&newp->rn_lock);
1889 assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1890 assert(newp->rn_flags & RC_NODE_IN_TX);
1891
1892 (void) pthread_mutex_lock(&np->rn_lock);
1893 assert(np->rn_flags & RC_NODE_IN_PARENT);
1894 assert(np->rn_flags & RC_NODE_OLD);
1895 assert(np->rn_flags & RC_NODE_IN_TX);
1896
1897 newp->rn_parent = pp;
1898 newp->rn_flags |= RC_NODE_IN_PARENT;
1899
1900 /*
1901 * Note that we carefully add newp before removing np -- this
1902 * keeps iterators on the list from missing us.
1903 */
1904 (void) uu_list_insert_after(pp->rn_children, np, newp);
1905 (void) rc_node_build_fmri(newp);
1906 (void) uu_list_remove(pp->rn_children, np);
1907
1908 /*
1909 * re-set np
1910 */
1911 newp->rn_former = np;
1912 np->rn_parent = NULL;
1913 np->rn_flags &= ~RC_NODE_IN_PARENT;
1914 np->rn_flags |= RC_NODE_ON_FORMER;
1915
1916 rc_notify_insert_node(newp);
1917
1918 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1919 (void) pthread_mutex_unlock(&pp->rn_lock);
1920 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1921 (void) pthread_mutex_unlock(&newp->rn_lock);
1922 rc_node_setup_parent_ref(np, np);
1923 rc_node_rele_flag(np, RC_NODE_IN_TX);
1924 (void) pthread_mutex_unlock(&np->rn_lock);
1925 }
1926
1927 /*
1928 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1929 * 'cp' is used (and returned) if the node does not yet exist. If it does
1930 * exist, 'cp' is freed, and the existent node is returned instead.
1931 */
1932 rc_node_t *
rc_node_setup(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,rc_node_t * pp)1933 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1934 rc_node_t *pp)
1935 {
1936 rc_node_t *np;
1937 cache_bucket_t *bp;
1938 uint32_t h = rc_node_hash(nip);
1939
1940 assert(cp->rn_refs == 0);
1941
1942 bp = cache_hold(h);
1943 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1944 cache_release(bp);
1945
1946 /*
1947 * make sure it matches our expectations
1948 */
1949 (void) pthread_mutex_lock(&np->rn_lock);
1950 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1951 assert(np->rn_parent == pp);
1952 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1953 assert(strcmp(np->rn_name, name) == 0);
1954 assert(np->rn_type == NULL);
1955 assert(np->rn_flags & RC_NODE_IN_PARENT);
1956 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1957 }
1958 (void) pthread_mutex_unlock(&np->rn_lock);
1959
1960 rc_node_destroy(cp);
1961 return (np);
1962 }
1963
1964 /*
1965 * No one is there -- setup & install the new node.
1966 */
1967 np = cp;
1968 rc_node_hold(np);
1969 np->rn_id = *nip;
1970 np->rn_hash = h;
1971 np->rn_name = strdup(name);
1972
1973 np->rn_flags |= RC_NODE_USING_PARENT;
1974
1975 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1976 #if COMPOSITION_DEPTH == 2
1977 np->rn_cchain[0] = np;
1978 np->rn_cchain[1] = pp;
1979 #else
1980 #error This code must be updated.
1981 #endif
1982 }
1983
1984 cache_insert_unlocked(bp, np);
1985 cache_release(bp); /* we are now visible */
1986
1987 rc_node_link_child(pp, np);
1988
1989 return (np);
1990 }
1991
1992 /*
1993 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1994 * 'cp' is used (and returned) if the node does not yet exist. If it does
1995 * exist, 'cp' is freed, and the existent node is returned instead.
1996 */
1997 rc_node_t *
rc_node_setup_snapshot(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,uint32_t snap_id,rc_node_t * pp)1998 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1999 uint32_t snap_id, rc_node_t *pp)
2000 {
2001 rc_node_t *np;
2002 cache_bucket_t *bp;
2003 uint32_t h = rc_node_hash(nip);
2004
2005 assert(cp->rn_refs == 0);
2006
2007 bp = cache_hold(h);
2008 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2009 cache_release(bp);
2010
2011 /*
2012 * make sure it matches our expectations
2013 */
2014 (void) pthread_mutex_lock(&np->rn_lock);
2015 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2016 assert(np->rn_parent == pp);
2017 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2018 assert(strcmp(np->rn_name, name) == 0);
2019 assert(np->rn_type == NULL);
2020 assert(np->rn_flags & RC_NODE_IN_PARENT);
2021 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2022 }
2023 (void) pthread_mutex_unlock(&np->rn_lock);
2024
2025 rc_node_destroy(cp);
2026 return (np);
2027 }
2028
2029 /*
2030 * No one is there -- create a new node.
2031 */
2032 np = cp;
2033 rc_node_hold(np);
2034 np->rn_id = *nip;
2035 np->rn_hash = h;
2036 np->rn_name = strdup(name);
2037 np->rn_snapshot_id = snap_id;
2038
2039 np->rn_flags |= RC_NODE_USING_PARENT;
2040
2041 cache_insert_unlocked(bp, np);
2042 cache_release(bp); /* we are now visible */
2043
2044 rc_node_link_child(pp, np);
2045
2046 return (np);
2047 }
2048
2049 /*
2050 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
2051 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
2052 * is freed, and the existent node is returned instead.
2053 */
2054 rc_node_t *
rc_node_setup_snaplevel(rc_node_t * cp,rc_node_lookup_t * nip,rc_snaplevel_t * lvl,rc_node_t * pp)2055 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2056 rc_snaplevel_t *lvl, rc_node_t *pp)
2057 {
2058 rc_node_t *np;
2059 cache_bucket_t *bp;
2060 uint32_t h = rc_node_hash(nip);
2061
2062 assert(cp->rn_refs == 0);
2063
2064 bp = cache_hold(h);
2065 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2066 cache_release(bp);
2067
2068 /*
2069 * make sure it matches our expectations
2070 */
2071 (void) pthread_mutex_lock(&np->rn_lock);
2072 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2073 assert(np->rn_parent == pp);
2074 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2075 assert(np->rn_name == NULL);
2076 assert(np->rn_type == NULL);
2077 assert(np->rn_flags & RC_NODE_IN_PARENT);
2078 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2079 }
2080 (void) pthread_mutex_unlock(&np->rn_lock);
2081
2082 rc_node_destroy(cp);
2083 return (np);
2084 }
2085
2086 /*
2087 * No one is there -- create a new node.
2088 */
2089 np = cp;
2090 rc_node_hold(np); /* released in snapshot_fill_children() */
2091 np->rn_id = *nip;
2092 np->rn_hash = h;
2093
2094 rc_snaplevel_hold(lvl);
2095 np->rn_snaplevel = lvl;
2096
2097 np->rn_flags |= RC_NODE_USING_PARENT;
2098
2099 cache_insert_unlocked(bp, np);
2100 cache_release(bp); /* we are now visible */
2101
2102 /* Add this snaplevel to the snapshot's composition chain. */
2103 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2104 pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2105
2106 rc_node_link_child(pp, np);
2107
2108 return (np);
2109 }
2110
2111 /*
2112 * Returns NULL if strdup() fails.
2113 */
2114 rc_node_t *
rc_node_setup_pg(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,const char * type,uint32_t flags,uint32_t gen_id,rc_node_t * pp)2115 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2116 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2117 {
2118 rc_node_t *np;
2119 cache_bucket_t *bp;
2120
2121 uint32_t h = rc_node_hash(nip);
2122 bp = cache_hold(h);
2123 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2124 cache_release(bp);
2125
2126 /*
2127 * make sure it matches our expectations (don't check
2128 * the generation number or parent, since someone could
2129 * have gotten a transaction through while we weren't
2130 * looking)
2131 */
2132 (void) pthread_mutex_lock(&np->rn_lock);
2133 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2134 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2135 assert(strcmp(np->rn_name, name) == 0);
2136 assert(strcmp(np->rn_type, type) == 0);
2137 assert(np->rn_pgflags == flags);
2138 assert(np->rn_flags & RC_NODE_IN_PARENT);
2139 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2140 }
2141 (void) pthread_mutex_unlock(&np->rn_lock);
2142
2143 rc_node_destroy(cp);
2144 return (np);
2145 }
2146
2147 np = cp;
2148 rc_node_hold(np); /* released in fill_pg_callback() */
2149 np->rn_id = *nip;
2150 np->rn_hash = h;
2151 np->rn_name = strdup(name);
2152 if (np->rn_name == NULL) {
2153 rc_node_rele(np);
2154 return (NULL);
2155 }
2156 np->rn_type = strdup(type);
2157 if (np->rn_type == NULL) {
2158 free((void *)np->rn_name);
2159 rc_node_rele(np);
2160 return (NULL);
2161 }
2162 np->rn_pgflags = flags;
2163 np->rn_gen_id = gen_id;
2164
2165 np->rn_flags |= RC_NODE_USING_PARENT;
2166
2167 cache_insert_unlocked(bp, np);
2168 cache_release(bp); /* we are now visible */
2169
2170 rc_node_link_child(pp, np);
2171
2172 return (np);
2173 }
2174
2175 #if COMPOSITION_DEPTH == 2
2176 /*
2177 * Initialize a "composed property group" which represents the composition of
2178 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
2179 * ITER_READ request, keeping it out of cache_hash and any child lists
2180 * prevents it from being looked up. Operations besides iteration are passed
2181 * through to pg1.
2182 *
2183 * pg1 & pg2 should be held before entering this function. They will be
2184 * released in rc_node_destroy().
2185 */
2186 static int
rc_node_setup_cpg(rc_node_t * cpg,rc_node_t * pg1,rc_node_t * pg2)2187 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2188 {
2189 if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2190 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2191
2192 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2193 cpg->rn_name = strdup(pg1->rn_name);
2194 if (cpg->rn_name == NULL)
2195 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2196
2197 cpg->rn_cchain[0] = pg1;
2198 cpg->rn_cchain[1] = pg2;
2199
2200 return (REP_PROTOCOL_SUCCESS);
2201 }
2202 #else
2203 #error This code must be updated.
2204 #endif
2205
2206 /*
2207 * Fails with _NO_RESOURCES.
2208 */
2209 int
rc_node_create_property(rc_node_t * pp,rc_node_lookup_t * nip,const char * name,rep_protocol_value_type_t type,const char * vals,size_t count,size_t size)2210 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2211 const char *name, rep_protocol_value_type_t type,
2212 const char *vals, size_t count, size_t size)
2213 {
2214 rc_node_t *np;
2215 cache_bucket_t *bp;
2216
2217 uint32_t h = rc_node_hash(nip);
2218 bp = cache_hold(h);
2219 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2220 cache_release(bp);
2221 /*
2222 * make sure it matches our expectations
2223 */
2224 (void) pthread_mutex_lock(&np->rn_lock);
2225 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2226 assert(np->rn_parent == pp);
2227 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2228 assert(strcmp(np->rn_name, name) == 0);
2229 assert(np->rn_valtype == type);
2230 assert(np->rn_values_count == count);
2231 assert(np->rn_values_size == size);
2232 assert(vals == NULL ||
2233 memcmp(np->rn_values, vals, size) == 0);
2234 assert(np->rn_flags & RC_NODE_IN_PARENT);
2235 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2236 }
2237 rc_node_rele_locked(np);
2238 object_free_values(vals, type, count, size);
2239 return (REP_PROTOCOL_SUCCESS);
2240 }
2241
2242 /*
2243 * No one is there -- create a new node.
2244 */
2245 np = rc_node_alloc();
2246 if (np == NULL) {
2247 cache_release(bp);
2248 object_free_values(vals, type, count, size);
2249 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2250 }
2251 np->rn_id = *nip;
2252 np->rn_hash = h;
2253 np->rn_name = strdup(name);
2254 if (np->rn_name == NULL) {
2255 cache_release(bp);
2256 object_free_values(vals, type, count, size);
2257 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2258 }
2259
2260 np->rn_valtype = type;
2261 np->rn_values = vals;
2262 np->rn_values_count = count;
2263 np->rn_values_size = size;
2264
2265 np->rn_flags |= RC_NODE_USING_PARENT;
2266
2267 cache_insert_unlocked(bp, np);
2268 cache_release(bp); /* we are now visible */
2269
2270 rc_node_link_child(pp, np);
2271
2272 return (REP_PROTOCOL_SUCCESS);
2273 }
2274
2275 /*
2276 * This function implements a decision table to determine the event ID for
2277 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is
2278 * determined by the value of the first property in the command specified
2279 * by cmd_no and the name of the property group. Here is the decision
2280 * table:
2281 *
2282 * Property Group Name
2283 * Property ------------------------------------------
2284 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR
2285 * -------- -------------- ------------------
2286 * "0" ADT_smf_disable ADT_smf_tmp_disable
2287 * "1" ADT_smf_enable ADT_smf_tmp_enable
2288 *
2289 * This function is called by special_property_event through a function
2290 * pointer in the special_props_list array.
2291 *
2292 * Since the ADT_smf_* symbols may not be defined in the build machine's
2293 * include files, this function is not compiled when doing native builds.
2294 */
2295 #ifndef NATIVE_BUILD
2296 static int
general_enable_id(tx_commit_data_t * tx_data,size_t cmd_no,const char * pg,au_event_t * event_id)2297 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2298 au_event_t *event_id)
2299 {
2300 const char *value;
2301 uint32_t nvalues;
2302 int enable;
2303
2304 /*
2305 * First, check property value.
2306 */
2307 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2308 return (-1);
2309 if (nvalues == 0)
2310 return (-1);
2311 if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2312 return (-1);
2313 if (strcmp(value, "0") == 0) {
2314 enable = 0;
2315 } else if (strcmp(value, "1") == 0) {
2316 enable = 1;
2317 } else {
2318 return (-1);
2319 }
2320
2321 /*
2322 * Now check property group name.
2323 */
2324 if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2325 *event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2326 return (0);
2327 } else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2328 *event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2329 return (0);
2330 }
2331 return (-1);
2332 }
2333 #endif /* NATIVE_BUILD */
2334
2335 /*
2336 * This function compares two audit_special_prop_item_t structures
2337 * represented by item1 and item2. It returns an integer greater than 0 if
2338 * item1 is greater than item2. It returns 0 if they are equal and an
2339 * integer less than 0 if item1 is less than item2. api_prop_name and
2340 * api_pg_name are the key fields for sorting.
2341 *
2342 * This function is suitable for calls to bsearch(3C) and qsort(3C).
2343 */
2344 static int
special_prop_compare(const void * item1,const void * item2)2345 special_prop_compare(const void *item1, const void *item2)
2346 {
2347 const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2348 const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2349 int r;
2350
2351 r = strcmp(a->api_prop_name, b->api_prop_name);
2352 if (r == 0) {
2353 /*
2354 * Primary keys are the same, so check the secondary key.
2355 */
2356 r = strcmp(a->api_pg_name, b->api_pg_name);
2357 }
2358 return (r);
2359 }
2360
2361 int
rc_node_init(void)2362 rc_node_init(void)
2363 {
2364 rc_node_t *np;
2365 cache_bucket_t *bp;
2366
2367 rc_children_pool = uu_list_pool_create("rc_children_pool",
2368 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2369 NULL, UU_LIST_POOL_DEBUG);
2370
2371 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2372 sizeof (rc_node_pg_notify_t),
2373 offsetof(rc_node_pg_notify_t, rnpn_node),
2374 NULL, UU_LIST_POOL_DEBUG);
2375
2376 rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2377 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2378 NULL, UU_LIST_POOL_DEBUG);
2379
2380 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2381 sizeof (rc_notify_info_t),
2382 offsetof(rc_notify_info_t, rni_list_node),
2383 NULL, UU_LIST_POOL_DEBUG);
2384
2385 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2386 rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2387 uu_die("out of memory");
2388
2389 rc_notify_list = uu_list_create(rc_notify_pool,
2390 &rc_notify_list, 0);
2391
2392 rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2393 &rc_notify_info_list, 0);
2394
2395 if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2396 uu_die("out of memory");
2397
2398 /*
2399 * Sort the special_props_list array so that it can be searched
2400 * with bsearch(3C).
2401 *
2402 * The special_props_list array is not compiled into the native
2403 * build code, so there is no need to call qsort if NATIVE_BUILD is
2404 * defined.
2405 */
2406 #ifndef NATIVE_BUILD
2407 qsort(special_props_list, SPECIAL_PROP_COUNT,
2408 sizeof (special_props_list[0]), special_prop_compare);
2409 #endif /* NATIVE_BUILD */
2410
2411 if ((np = rc_node_alloc()) == NULL)
2412 uu_die("out of memory");
2413
2414 rc_node_hold(np);
2415 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2416 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2417 np->rn_hash = rc_node_hash(&np->rn_id);
2418 np->rn_name = "localhost";
2419
2420 bp = cache_hold(np->rn_hash);
2421 cache_insert_unlocked(bp, np);
2422 cache_release(bp);
2423
2424 rc_scope = np;
2425 return (1);
2426 }
2427
2428 /*
2429 * Fails with
2430 * _INVALID_TYPE - type is invalid
2431 * _TYPE_MISMATCH - np doesn't carry children of type type
2432 * _DELETED - np has been deleted
2433 * _NO_RESOURCES
2434 */
2435 static int
rc_node_fill_children(rc_node_t * np,uint32_t type)2436 rc_node_fill_children(rc_node_t *np, uint32_t type)
2437 {
2438 int rc;
2439
2440 assert(MUTEX_HELD(&np->rn_lock));
2441
2442 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2443 REP_PROTOCOL_SUCCESS)
2444 return (rc);
2445
2446 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2447 return (REP_PROTOCOL_FAIL_DELETED);
2448
2449 if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2450 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2451 return (REP_PROTOCOL_SUCCESS);
2452 }
2453
2454 (void) pthread_mutex_unlock(&np->rn_lock);
2455 rc = object_fill_children(np);
2456 (void) pthread_mutex_lock(&np->rn_lock);
2457
2458 if (rc == REP_PROTOCOL_SUCCESS) {
2459 np->rn_flags |= RC_NODE_HAS_CHILDREN;
2460 }
2461 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2462
2463 return (rc);
2464 }
2465
2466 /*
2467 * Returns
2468 * _INVALID_TYPE - type is invalid
2469 * _TYPE_MISMATCH - np doesn't carry children of type type
2470 * _DELETED - np has been deleted
2471 * _NO_RESOURCES
2472 * _SUCCESS - if *cpp is not NULL, it is held
2473 */
2474 static int
rc_node_find_named_child(rc_node_t * np,const char * name,uint32_t type,rc_node_t ** cpp)2475 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2476 rc_node_t **cpp)
2477 {
2478 int ret;
2479 rc_node_t *cp;
2480
2481 assert(MUTEX_HELD(&np->rn_lock));
2482 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2483
2484 ret = rc_node_fill_children(np, type);
2485 if (ret != REP_PROTOCOL_SUCCESS)
2486 return (ret);
2487
2488 for (cp = uu_list_first(np->rn_children);
2489 cp != NULL;
2490 cp = uu_list_next(np->rn_children, cp)) {
2491 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2492 break;
2493 }
2494
2495 if (cp != NULL)
2496 rc_node_hold(cp);
2497 *cpp = cp;
2498
2499 return (REP_PROTOCOL_SUCCESS);
2500 }
2501
2502 static int rc_node_parent(rc_node_t *, rc_node_t **);
2503
2504 /*
2505 * Returns
2506 * _INVALID_TYPE - type is invalid
2507 * _DELETED - np or an ancestor has been deleted
2508 * _NOT_FOUND - no ancestor of specified type exists
2509 * _SUCCESS - *app is held
2510 */
2511 static int
rc_node_find_ancestor(rc_node_t * np,uint32_t type,rc_node_t ** app)2512 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2513 {
2514 int ret;
2515 rc_node_t *parent, *np_orig;
2516
2517 if (type >= REP_PROTOCOL_ENTITY_MAX)
2518 return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2519
2520 np_orig = np;
2521
2522 while (np->rn_id.rl_type > type) {
2523 ret = rc_node_parent(np, &parent);
2524 if (np != np_orig)
2525 rc_node_rele(np);
2526 if (ret != REP_PROTOCOL_SUCCESS)
2527 return (ret);
2528 np = parent;
2529 }
2530
2531 if (np->rn_id.rl_type == type) {
2532 *app = parent;
2533 return (REP_PROTOCOL_SUCCESS);
2534 }
2535
2536 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2537 }
2538
2539 #ifndef NATIVE_BUILD
2540 /*
2541 * If the propname property exists in pg, and it is of type string, add its
2542 * values as authorizations to pcp. pg must not be locked on entry, and it is
2543 * returned unlocked. Returns
2544 * _DELETED - pg was deleted
2545 * _NO_RESOURCES
2546 * _NOT_FOUND - pg has no property named propname
2547 * _SUCCESS
2548 */
2549 static int
perm_add_pg_prop_values(permcheck_t * pcp,rc_node_t * pg,const char * propname)2550 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2551 {
2552 rc_node_t *prop;
2553 int result;
2554
2555 uint_t count;
2556 const char *cp;
2557
2558 assert(!MUTEX_HELD(&pg->rn_lock));
2559 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2560
2561 (void) pthread_mutex_lock(&pg->rn_lock);
2562 result = rc_node_find_named_child(pg, propname,
2563 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2564 (void) pthread_mutex_unlock(&pg->rn_lock);
2565 if (result != REP_PROTOCOL_SUCCESS) {
2566 switch (result) {
2567 case REP_PROTOCOL_FAIL_DELETED:
2568 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2569 return (result);
2570
2571 case REP_PROTOCOL_FAIL_INVALID_TYPE:
2572 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2573 default:
2574 bad_error("rc_node_find_named_child", result);
2575 }
2576 }
2577
2578 if (prop == NULL)
2579 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2580
2581 /* rn_valtype is immutable, so no locking. */
2582 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2583 rc_node_rele(prop);
2584 return (REP_PROTOCOL_SUCCESS);
2585 }
2586
2587 (void) pthread_mutex_lock(&prop->rn_lock);
2588 for (count = prop->rn_values_count, cp = prop->rn_values;
2589 count > 0;
2590 --count) {
2591 result = perm_add_enabling_type(pcp, cp,
2592 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2593 PC_AUTH_SVC);
2594 if (result != REP_PROTOCOL_SUCCESS)
2595 break;
2596
2597 cp = strchr(cp, '\0') + 1;
2598 }
2599
2600 rc_node_rele_locked(prop);
2601
2602 return (result);
2603 }
2604
2605 /*
2606 * Assuming that ent is a service or instance node, if the pgname property
2607 * group has type pgtype, and it has a propname property with string type, add
2608 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2609 * Returns
2610 * _SUCCESS
2611 * _DELETED - ent was deleted
2612 * _NO_RESOURCES - no resources
2613 * _NOT_FOUND - ent does not have pgname pg or propname property
2614 */
2615 static int
perm_add_ent_prop_values(permcheck_t * pcp,rc_node_t * ent,const char * pgname,const char * pgtype,const char * propname)2616 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2617 const char *pgtype, const char *propname)
2618 {
2619 int r;
2620 rc_node_t *pg;
2621
2622 assert(!MUTEX_HELD(&ent->rn_lock));
2623
2624 (void) pthread_mutex_lock(&ent->rn_lock);
2625 r = rc_node_find_named_child(ent, pgname,
2626 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2627 (void) pthread_mutex_unlock(&ent->rn_lock);
2628
2629 switch (r) {
2630 case REP_PROTOCOL_SUCCESS:
2631 break;
2632
2633 case REP_PROTOCOL_FAIL_DELETED:
2634 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2635 return (r);
2636
2637 default:
2638 bad_error("rc_node_find_named_child", r);
2639 }
2640
2641 if (pg == NULL)
2642 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2643
2644 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2645 r = perm_add_pg_prop_values(pcp, pg, propname);
2646 switch (r) {
2647 case REP_PROTOCOL_FAIL_DELETED:
2648 r = REP_PROTOCOL_FAIL_NOT_FOUND;
2649 break;
2650
2651 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2652 case REP_PROTOCOL_SUCCESS:
2653 case REP_PROTOCOL_FAIL_NOT_FOUND:
2654 break;
2655
2656 default:
2657 bad_error("perm_add_pg_prop_values", r);
2658 }
2659 }
2660
2661 rc_node_rele(pg);
2662
2663 return (r);
2664 }
2665
2666 /*
2667 * If pg has a property named propname, and is string typed, add its values as
2668 * authorizations to pcp. If pg has no such property, and its parent is an
2669 * instance, walk up to the service and try doing the same with the property
2670 * of the same name from the property group of the same name. Returns
2671 * _SUCCESS
2672 * _NO_RESOURCES
2673 * _DELETED - pg (or an ancestor) was deleted
2674 */
2675 static int
perm_add_enabling_values(permcheck_t * pcp,rc_node_t * pg,const char * propname)2676 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2677 {
2678 int r;
2679 char pgname[REP_PROTOCOL_NAME_LEN + 1];
2680 rc_node_t *svc;
2681 size_t sz;
2682
2683 r = perm_add_pg_prop_values(pcp, pg, propname);
2684
2685 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2686 return (r);
2687
2688 assert(!MUTEX_HELD(&pg->rn_lock));
2689
2690 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2691 return (REP_PROTOCOL_SUCCESS);
2692
2693 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2694 assert(sz < sizeof (pgname));
2695
2696 /*
2697 * If pg is a child of an instance or snapshot, we want to compose the
2698 * authorization property with the service's (if it exists). The
2699 * snapshot case applies only to read_authorization. In all other
2700 * cases, the pg's parent will be the instance.
2701 */
2702 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2703 if (r != REP_PROTOCOL_SUCCESS) {
2704 assert(r == REP_PROTOCOL_FAIL_DELETED);
2705 return (r);
2706 }
2707 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2708
2709 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2710
2711 rc_node_rele(svc);
2712
2713 if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2714 r = REP_PROTOCOL_SUCCESS;
2715
2716 return (r);
2717 }
2718
2719 /*
2720 * Call perm_add_enabling_values() for the "action_authorization" property of
2721 * the "general" property group of inst. Returns
2722 * _DELETED - inst (or an ancestor) was deleted
2723 * _NO_RESOURCES
2724 * _SUCCESS
2725 */
2726 static int
perm_add_inst_action_auth(permcheck_t * pcp,rc_node_t * inst)2727 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2728 {
2729 int r;
2730 rc_node_t *svc;
2731
2732 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2733
2734 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2735 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2736
2737 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2738 return (r);
2739
2740 r = rc_node_parent(inst, &svc);
2741 if (r != REP_PROTOCOL_SUCCESS) {
2742 assert(r == REP_PROTOCOL_FAIL_DELETED);
2743 return (r);
2744 }
2745
2746 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2747 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2748
2749 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2750 }
2751 #endif /* NATIVE_BUILD */
2752
2753 void
rc_node_ptr_init(rc_node_ptr_t * out)2754 rc_node_ptr_init(rc_node_ptr_t *out)
2755 {
2756 out->rnp_node = NULL;
2757 out->rnp_auth_string = NULL;
2758 out->rnp_authorized = RC_AUTH_UNKNOWN;
2759 out->rnp_deleted = 0;
2760 }
2761
2762 void
rc_node_ptr_free_mem(rc_node_ptr_t * npp)2763 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2764 {
2765 if (npp->rnp_auth_string != NULL) {
2766 free((void *)npp->rnp_auth_string);
2767 npp->rnp_auth_string = NULL;
2768 }
2769 }
2770
2771 static void
rc_node_assign(rc_node_ptr_t * out,rc_node_t * val)2772 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2773 {
2774 rc_node_t *cur = out->rnp_node;
2775 if (val != NULL)
2776 rc_node_hold(val);
2777 out->rnp_node = val;
2778 if (cur != NULL) {
2779 NODE_LOCK(cur);
2780
2781 /*
2782 * Register the ephemeral reference created by reading
2783 * out->rnp_node into cur. Note that the persistent
2784 * reference we're destroying is locked by the client
2785 * layer.
2786 */
2787 rc_node_hold_ephemeral_locked(cur);
2788
2789 rc_node_rele_locked(cur);
2790 }
2791 out->rnp_authorized = RC_AUTH_UNKNOWN;
2792 rc_node_ptr_free_mem(out);
2793 out->rnp_deleted = 0;
2794 }
2795
2796 void
rc_node_clear(rc_node_ptr_t * out,int deleted)2797 rc_node_clear(rc_node_ptr_t *out, int deleted)
2798 {
2799 rc_node_assign(out, NULL);
2800 out->rnp_deleted = deleted;
2801 }
2802
2803 void
rc_node_ptr_assign(rc_node_ptr_t * out,const rc_node_ptr_t * val)2804 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2805 {
2806 rc_node_assign(out, val->rnp_node);
2807 }
2808
2809 /*
2810 * rc_node_check()/RC_NODE_CHECK()
2811 * generic "entry" checks, run before the use of an rc_node pointer.
2812 *
2813 * Fails with
2814 * _NOT_SET
2815 * _DELETED
2816 */
2817 static int
rc_node_check_and_lock(rc_node_t * np)2818 rc_node_check_and_lock(rc_node_t *np)
2819 {
2820 int result = REP_PROTOCOL_SUCCESS;
2821 if (np == NULL)
2822 return (REP_PROTOCOL_FAIL_NOT_SET);
2823
2824 (void) pthread_mutex_lock(&np->rn_lock);
2825 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2826 result = REP_PROTOCOL_FAIL_DELETED;
2827 (void) pthread_mutex_unlock(&np->rn_lock);
2828 }
2829
2830 return (result);
2831 }
2832
2833 /*
2834 * Fails with
2835 * _NOT_SET - ptr is reset
2836 * _DELETED - node has been deleted
2837 */
2838 static rc_node_t *
rc_node_ptr_check_and_lock(rc_node_ptr_t * npp,int * res)2839 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2840 {
2841 rc_node_t *np = npp->rnp_node;
2842 if (np == NULL) {
2843 if (npp->rnp_deleted)
2844 *res = REP_PROTOCOL_FAIL_DELETED;
2845 else
2846 *res = REP_PROTOCOL_FAIL_NOT_SET;
2847 return (NULL);
2848 }
2849
2850 (void) pthread_mutex_lock(&np->rn_lock);
2851 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2852 (void) pthread_mutex_unlock(&np->rn_lock);
2853 rc_node_clear(npp, 1);
2854 *res = REP_PROTOCOL_FAIL_DELETED;
2855 return (NULL);
2856 }
2857 return (np);
2858 }
2859
2860 #define RC_NODE_CHECK_AND_LOCK(n) { \
2861 int rc__res; \
2862 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2863 return (rc__res); \
2864 }
2865
2866 #define RC_NODE_CHECK(n) { \
2867 RC_NODE_CHECK_AND_LOCK(n); \
2868 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2869 }
2870
2871 #define RC_NODE_CHECK_AND_HOLD(n) { \
2872 RC_NODE_CHECK_AND_LOCK(n); \
2873 rc_node_hold_locked(n); \
2874 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2875 }
2876
2877 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2878 int rc__res; \
2879 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2880 return (rc__res); \
2881 }
2882
2883 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2884 int rc__res; \
2885 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2886 NULL) { \
2887 if ((mem) != NULL) \
2888 free((mem)); \
2889 return (rc__res); \
2890 } \
2891 }
2892
2893 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2894 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2895 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2896 }
2897
2898 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2899 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2900 rc_node_hold_locked(np); \
2901 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2902 }
2903
2904 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2905 assert(MUTEX_HELD(&(np)->rn_lock)); \
2906 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2907 if (!rc_node_hold_flag((np), flag)) { \
2908 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2909 return (REP_PROTOCOL_FAIL_DELETED); \
2910 } \
2911 }
2912
2913 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2914 assert(MUTEX_HELD(&(np)->rn_lock)); \
2915 if (!rc_node_hold_flag((np), flag)) { \
2916 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2917 assert((np) == (npp)->rnp_node); \
2918 rc_node_clear(npp, 1); \
2919 if ((mem) != NULL) \
2920 free((mem)); \
2921 return (REP_PROTOCOL_FAIL_DELETED); \
2922 } \
2923 }
2924
2925 int
rc_local_scope(uint32_t type,rc_node_ptr_t * out)2926 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2927 {
2928 if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2929 rc_node_clear(out, 0);
2930 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2931 }
2932
2933 /*
2934 * the main scope never gets destroyed
2935 */
2936 rc_node_assign(out, rc_scope);
2937
2938 return (REP_PROTOCOL_SUCCESS);
2939 }
2940
2941 /*
2942 * Fails with
2943 * _NOT_SET - npp is not set
2944 * _DELETED - the node npp pointed at has been deleted
2945 * _TYPE_MISMATCH - type is not _SCOPE
2946 * _NOT_FOUND - scope has no parent
2947 */
2948 static int
rc_scope_parent_scope(rc_node_ptr_t * npp,uint32_t type,rc_node_ptr_t * out)2949 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2950 {
2951 rc_node_t *np;
2952
2953 rc_node_clear(out, 0);
2954
2955 RC_NODE_PTR_GET_CHECK(np, npp);
2956
2957 if (type != REP_PROTOCOL_ENTITY_SCOPE)
2958 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2959
2960 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2961 }
2962
2963 static int rc_node_pg_check_read_protect(rc_node_t *);
2964
2965 /*
2966 * Fails with
2967 * _NOT_SET
2968 * _DELETED
2969 * _NOT_APPLICABLE
2970 * _NOT_FOUND
2971 * _BAD_REQUEST
2972 * _TRUNCATED
2973 * _NO_RESOURCES
2974 */
2975 int
rc_node_name(rc_node_ptr_t * npp,char * buf,size_t sz,uint32_t answertype,size_t * sz_out)2976 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2977 size_t *sz_out)
2978 {
2979 size_t actual;
2980 rc_node_t *np;
2981
2982 assert(sz == *sz_out);
2983
2984 RC_NODE_PTR_GET_CHECK(np, npp);
2985
2986 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2987 np = np->rn_cchain[0];
2988 RC_NODE_CHECK(np);
2989 }
2990
2991 switch (answertype) {
2992 case RP_ENTITY_NAME_NAME:
2993 if (np->rn_name == NULL)
2994 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2995 actual = strlcpy(buf, np->rn_name, sz);
2996 break;
2997 case RP_ENTITY_NAME_PGTYPE:
2998 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2999 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3000 actual = strlcpy(buf, np->rn_type, sz);
3001 break;
3002 case RP_ENTITY_NAME_PGFLAGS:
3003 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3004 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3005 actual = snprintf(buf, sz, "%d", np->rn_pgflags);
3006 break;
3007 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
3008 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3009 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3010 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3011 break;
3012 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3013 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3014 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3015 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3016 break;
3017 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3018 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3019 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3020 if (np->rn_snaplevel->rsl_instance == NULL)
3021 return (REP_PROTOCOL_FAIL_NOT_FOUND);
3022 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3023 break;
3024 case RP_ENTITY_NAME_PGREADPROT:
3025 {
3026 int ret;
3027
3028 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3029 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3030 ret = rc_node_pg_check_read_protect(np);
3031 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3032 switch (ret) {
3033 case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3034 actual = snprintf(buf, sz, "1");
3035 break;
3036 case REP_PROTOCOL_SUCCESS:
3037 actual = snprintf(buf, sz, "0");
3038 break;
3039 default:
3040 return (ret);
3041 }
3042 break;
3043 }
3044 default:
3045 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3046 }
3047 if (actual >= sz)
3048 return (REP_PROTOCOL_FAIL_TRUNCATED);
3049
3050 *sz_out = actual;
3051 return (REP_PROTOCOL_SUCCESS);
3052 }
3053
3054 int
rc_node_get_property_type(rc_node_ptr_t * npp,rep_protocol_value_type_t * out)3055 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3056 {
3057 rc_node_t *np;
3058
3059 RC_NODE_PTR_GET_CHECK(np, npp);
3060
3061 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3062 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3063
3064 *out = np->rn_valtype;
3065
3066 return (REP_PROTOCOL_SUCCESS);
3067 }
3068
3069 /*
3070 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
3071 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3072 */
3073 static int
rc_node_parent(rc_node_t * np,rc_node_t ** out)3074 rc_node_parent(rc_node_t *np, rc_node_t **out)
3075 {
3076 rc_node_t *pnp;
3077 rc_node_t *np_orig;
3078
3079 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3080 RC_NODE_CHECK_AND_LOCK(np);
3081 } else {
3082 np = np->rn_cchain[0];
3083 RC_NODE_CHECK_AND_LOCK(np);
3084 }
3085
3086 np_orig = np;
3087 rc_node_hold_locked(np); /* simplifies the remainder */
3088
3089 for (;;) {
3090 if (!rc_node_wait_flag(np,
3091 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3092 rc_node_rele_locked(np);
3093 return (REP_PROTOCOL_FAIL_DELETED);
3094 }
3095
3096 if (!(np->rn_flags & RC_NODE_OLD))
3097 break;
3098
3099 rc_node_rele_locked(np);
3100 np = cache_lookup(&np_orig->rn_id);
3101 assert(np != np_orig);
3102
3103 if (np == NULL)
3104 goto deleted;
3105 (void) pthread_mutex_lock(&np->rn_lock);
3106 }
3107
3108 /* guaranteed to succeed without dropping the lock */
3109 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3110 (void) pthread_mutex_unlock(&np->rn_lock);
3111 *out = NULL;
3112 rc_node_rele(np);
3113 return (REP_PROTOCOL_FAIL_DELETED);
3114 }
3115
3116 assert(np->rn_parent != NULL);
3117 pnp = np->rn_parent;
3118 (void) pthread_mutex_unlock(&np->rn_lock);
3119
3120 (void) pthread_mutex_lock(&pnp->rn_lock);
3121 (void) pthread_mutex_lock(&np->rn_lock);
3122 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3123 (void) pthread_mutex_unlock(&np->rn_lock);
3124
3125 rc_node_hold_locked(pnp);
3126
3127 (void) pthread_mutex_unlock(&pnp->rn_lock);
3128
3129 rc_node_rele(np);
3130 *out = pnp;
3131 return (REP_PROTOCOL_SUCCESS);
3132
3133 deleted:
3134 rc_node_rele(np);
3135 return (REP_PROTOCOL_FAIL_DELETED);
3136 }
3137
3138 /*
3139 * Fails with
3140 * _NOT_SET
3141 * _DELETED
3142 */
3143 static int
rc_node_ptr_parent(rc_node_ptr_t * npp,rc_node_t ** out)3144 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3145 {
3146 rc_node_t *np;
3147
3148 RC_NODE_PTR_GET_CHECK(np, npp);
3149
3150 return (rc_node_parent(np, out));
3151 }
3152
3153 /*
3154 * Fails with
3155 * _NOT_SET - npp is not set
3156 * _DELETED - the node npp pointed at has been deleted
3157 * _TYPE_MISMATCH - npp's node's parent is not of type type
3158 *
3159 * If npp points to a scope, can also fail with
3160 * _NOT_FOUND - scope has no parent
3161 */
3162 int
rc_node_get_parent(rc_node_ptr_t * npp,uint32_t type,rc_node_ptr_t * out)3163 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3164 {
3165 rc_node_t *pnp;
3166 int rc;
3167
3168 if (npp->rnp_node != NULL &&
3169 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3170 return (rc_scope_parent_scope(npp, type, out));
3171
3172 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3173 rc_node_clear(out, 0);
3174 return (rc);
3175 }
3176
3177 if (type != pnp->rn_id.rl_type) {
3178 rc_node_rele(pnp);
3179 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3180 }
3181
3182 rc_node_assign(out, pnp);
3183 rc_node_rele(pnp);
3184
3185 return (REP_PROTOCOL_SUCCESS);
3186 }
3187
3188 int
rc_node_parent_type(rc_node_ptr_t * npp,uint32_t * type_out)3189 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3190 {
3191 rc_node_t *pnp;
3192 int rc;
3193
3194 if (npp->rnp_node != NULL &&
3195 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3196 *type_out = REP_PROTOCOL_ENTITY_SCOPE;
3197 return (REP_PROTOCOL_SUCCESS);
3198 }
3199
3200 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3201 return (rc);
3202
3203 *type_out = pnp->rn_id.rl_type;
3204
3205 rc_node_rele(pnp);
3206
3207 return (REP_PROTOCOL_SUCCESS);
3208 }
3209
3210 /*
3211 * Fails with
3212 * _INVALID_TYPE - type is invalid
3213 * _TYPE_MISMATCH - np doesn't carry children of type type
3214 * _DELETED - np has been deleted
3215 * _NOT_FOUND - no child with that name/type combo found
3216 * _NO_RESOURCES
3217 * _BACKEND_ACCESS
3218 */
3219 int
rc_node_get_child(rc_node_ptr_t * npp,const char * name,uint32_t type,rc_node_ptr_t * outp)3220 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3221 rc_node_ptr_t *outp)
3222 {
3223 rc_node_t *np, *cp;
3224 rc_node_t *child = NULL;
3225 int ret, idx;
3226
3227 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3228 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3229 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3230 ret = rc_node_find_named_child(np, name, type, &child);
3231 } else {
3232 (void) pthread_mutex_unlock(&np->rn_lock);
3233 ret = REP_PROTOCOL_SUCCESS;
3234 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3235 cp = np->rn_cchain[idx];
3236 if (cp == NULL)
3237 break;
3238 RC_NODE_CHECK_AND_LOCK(cp);
3239 ret = rc_node_find_named_child(cp, name, type,
3240 &child);
3241 (void) pthread_mutex_unlock(&cp->rn_lock);
3242 /*
3243 * loop only if we succeeded, but no child of
3244 * the correct name was found.
3245 */
3246 if (ret != REP_PROTOCOL_SUCCESS ||
3247 child != NULL)
3248 break;
3249 }
3250 (void) pthread_mutex_lock(&np->rn_lock);
3251 }
3252 }
3253 (void) pthread_mutex_unlock(&np->rn_lock);
3254
3255 if (ret == REP_PROTOCOL_SUCCESS) {
3256 rc_node_assign(outp, child);
3257 if (child != NULL)
3258 rc_node_rele(child);
3259 else
3260 ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3261 } else {
3262 rc_node_assign(outp, NULL);
3263 }
3264 return (ret);
3265 }
3266
3267 int
rc_node_update(rc_node_ptr_t * npp)3268 rc_node_update(rc_node_ptr_t *npp)
3269 {
3270 cache_bucket_t *bp;
3271 rc_node_t *np = npp->rnp_node;
3272 rc_node_t *nnp;
3273 rc_node_t *cpg = NULL;
3274
3275 if (np != NULL &&
3276 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3277 /*
3278 * If we're updating a composed property group, actually
3279 * update the top-level property group & return the
3280 * appropriate value. But leave *nnp pointing at us.
3281 */
3282 cpg = np;
3283 np = np->rn_cchain[0];
3284 }
3285
3286 RC_NODE_CHECK(np);
3287
3288 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3289 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3290 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3291
3292 for (;;) {
3293 bp = cache_hold(np->rn_hash);
3294 nnp = cache_lookup_unlocked(bp, &np->rn_id);
3295 if (nnp == NULL) {
3296 cache_release(bp);
3297 rc_node_clear(npp, 1);
3298 return (REP_PROTOCOL_FAIL_DELETED);
3299 }
3300 /*
3301 * grab the lock before dropping the cache bucket, so
3302 * that no one else can sneak in
3303 */
3304 (void) pthread_mutex_lock(&nnp->rn_lock);
3305 cache_release(bp);
3306
3307 if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3308 !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3309 break;
3310
3311 rc_node_rele_locked(nnp);
3312 }
3313
3314 /*
3315 * If it is dead, we want to update it so that it will continue to
3316 * report being dead.
3317 */
3318 if (nnp->rn_flags & RC_NODE_DEAD) {
3319 (void) pthread_mutex_unlock(&nnp->rn_lock);
3320 if (nnp != np && cpg == NULL)
3321 rc_node_assign(npp, nnp); /* updated */
3322 rc_node_rele(nnp);
3323 return (REP_PROTOCOL_FAIL_DELETED);
3324 }
3325
3326 assert(!(nnp->rn_flags & RC_NODE_OLD));
3327 (void) pthread_mutex_unlock(&nnp->rn_lock);
3328
3329 if (nnp != np && cpg == NULL)
3330 rc_node_assign(npp, nnp); /* updated */
3331
3332 rc_node_rele(nnp);
3333
3334 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3335 }
3336
3337 /*
3338 * does a generic modification check, for creation, deletion, and snapshot
3339 * management only. Property group transactions have different checks.
3340 *
3341 * The string returned to *match_auth must be freed.
3342 */
3343 static perm_status_t
rc_node_modify_permission_check(char ** match_auth)3344 rc_node_modify_permission_check(char **match_auth)
3345 {
3346 permcheck_t *pcp;
3347 perm_status_t granted = PERM_GRANTED;
3348 int rc;
3349
3350 *match_auth = NULL;
3351 #ifdef NATIVE_BUILD
3352 if (!client_is_privileged()) {
3353 granted = PERM_DENIED;
3354 }
3355 return (granted);
3356 #else
3357 if (is_main_repository == 0)
3358 return (PERM_GRANTED);
3359 pcp = pc_create();
3360 if (pcp != NULL) {
3361 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3362
3363 if (rc == REP_PROTOCOL_SUCCESS) {
3364 granted = perm_granted(pcp);
3365
3366 if ((granted == PERM_GRANTED) ||
3367 (granted == PERM_DENIED)) {
3368 /*
3369 * Copy off the authorization
3370 * string before freeing pcp.
3371 */
3372 *match_auth =
3373 strdup(pcp->pc_auth_string);
3374 if (*match_auth == NULL)
3375 granted = PERM_FAIL;
3376 }
3377 } else {
3378 granted = PERM_FAIL;
3379 }
3380
3381 pc_free(pcp);
3382 } else {
3383 granted = PERM_FAIL;
3384 }
3385
3386 return (granted);
3387 #endif /* NATIVE_BUILD */
3388 }
3389
3390 /*
3391 * Native builds are done to create svc.configd-native. This program runs
3392 * only on the Solaris build machines to create the seed repository, and it
3393 * is compiled against the build machine's header files. The ADT_smf_*
3394 * symbols may not be defined in these header files. For this reason
3395 * smf_annotation_event(), _smf_audit_event() and special_property_event()
3396 * are not compiled for native builds.
3397 */
3398 #ifndef NATIVE_BUILD
3399
3400 /*
3401 * This function generates an annotation audit event if one has been setup.
3402 * Annotation events should only be generated immediately before the audit
3403 * record from the first attempt to modify the repository from a client
3404 * which has requested an annotation.
3405 */
3406 static void
smf_annotation_event(int status,int return_val)3407 smf_annotation_event(int status, int return_val)
3408 {
3409 adt_session_data_t *session;
3410 adt_event_data_t *event = NULL;
3411 char file[MAXPATHLEN];
3412 char operation[REP_PROTOCOL_NAME_LEN];
3413
3414 /* Don't audit if we're using an alternate repository. */
3415 if (is_main_repository == 0)
3416 return;
3417
3418 if (client_annotation_needed(operation, sizeof (operation), file,
3419 sizeof (file)) == 0) {
3420 return;
3421 }
3422 if (file[0] == 0) {
3423 (void) strlcpy(file, "NO FILE", sizeof (file));
3424 }
3425 if (operation[0] == 0) {
3426 (void) strlcpy(operation, "NO OPERATION",
3427 sizeof (operation));
3428 }
3429 if ((session = get_audit_session()) == NULL)
3430 return;
3431 if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3432 uu_warn("smf_annotation_event cannot allocate event "
3433 "data. %s\n", strerror(errno));
3434 return;
3435 }
3436 event->adt_smf_annotation.operation = operation;
3437 event->adt_smf_annotation.file = file;
3438 if (adt_put_event(event, status, return_val) == 0) {
3439 client_annotation_finished();
3440 } else {
3441 uu_warn("smf_annotation_event failed to put event. "
3442 "%s\n", strerror(errno));
3443 }
3444 adt_free_event(event);
3445 }
3446
3447 /*
3448 * _smf_audit_event interacts with the security auditing system to generate
3449 * an audit event structure. It establishes an audit session and allocates
3450 * an audit event. The event is filled in from the audit data, and
3451 * adt_put_event is called to generate the event.
3452 */
3453 static void
_smf_audit_event(au_event_t event_id,int status,int return_val,audit_event_data_t * data)3454 _smf_audit_event(au_event_t event_id, int status, int return_val,
3455 audit_event_data_t *data)
3456 {
3457 char *auth_used;
3458 char *fmri;
3459 char *prop_value;
3460 adt_session_data_t *session;
3461 adt_event_data_t *event = NULL;
3462
3463 /* Don't audit if we're using an alternate repository */
3464 if (is_main_repository == 0)
3465 return;
3466
3467 smf_annotation_event(status, return_val);
3468 if ((session = get_audit_session()) == NULL)
3469 return;
3470 if ((event = adt_alloc_event(session, event_id)) == NULL) {
3471 uu_warn("_smf_audit_event cannot allocate event "
3472 "data. %s\n", strerror(errno));
3473 return;
3474 }
3475
3476 /*
3477 * Handle possibility of NULL authorization strings, FMRIs and
3478 * property values.
3479 */
3480 if (data->ed_auth == NULL) {
3481 auth_used = "PRIVILEGED";
3482 } else {
3483 auth_used = data->ed_auth;
3484 }
3485 if (data->ed_fmri == NULL) {
3486 syslog(LOG_WARNING, "_smf_audit_event called with "
3487 "empty FMRI string");
3488 fmri = "UNKNOWN FMRI";
3489 } else {
3490 fmri = data->ed_fmri;
3491 }
3492 if (data->ed_prop_value == NULL) {
3493 prop_value = "";
3494 } else {
3495 prop_value = data->ed_prop_value;
3496 }
3497
3498 /* Fill in the event data. */
3499 switch (event_id) {
3500 case ADT_smf_attach_snap:
3501 event->adt_smf_attach_snap.auth_used = auth_used;
3502 event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3503 event->adt_smf_attach_snap.old_name = data->ed_old_name;
3504 event->adt_smf_attach_snap.new_fmri = fmri;
3505 event->adt_smf_attach_snap.new_name = data->ed_snapname;
3506 break;
3507 case ADT_smf_change_prop:
3508 event->adt_smf_change_prop.auth_used = auth_used;
3509 event->adt_smf_change_prop.fmri = fmri;
3510 event->adt_smf_change_prop.type = data->ed_type;
3511 event->adt_smf_change_prop.value = prop_value;
3512 break;
3513 case ADT_smf_clear:
3514 event->adt_smf_clear.auth_used = auth_used;
3515 event->adt_smf_clear.fmri = fmri;
3516 break;
3517 case ADT_smf_create:
3518 event->adt_smf_create.fmri = fmri;
3519 event->adt_smf_create.auth_used = auth_used;
3520 break;
3521 case ADT_smf_create_npg:
3522 event->adt_smf_create_npg.auth_used = auth_used;
3523 event->adt_smf_create_npg.fmri = fmri;
3524 event->adt_smf_create_npg.type = data->ed_type;
3525 break;
3526 case ADT_smf_create_pg:
3527 event->adt_smf_create_pg.auth_used = auth_used;
3528 event->adt_smf_create_pg.fmri = fmri;
3529 event->adt_smf_create_pg.type = data->ed_type;
3530 break;
3531 case ADT_smf_create_prop:
3532 event->adt_smf_create_prop.auth_used = auth_used;
3533 event->adt_smf_create_prop.fmri = fmri;
3534 event->adt_smf_create_prop.type = data->ed_type;
3535 event->adt_smf_create_prop.value = prop_value;
3536 break;
3537 case ADT_smf_create_snap:
3538 event->adt_smf_create_snap.auth_used = auth_used;
3539 event->adt_smf_create_snap.fmri = fmri;
3540 event->adt_smf_create_snap.name = data->ed_snapname;
3541 break;
3542 case ADT_smf_degrade:
3543 event->adt_smf_degrade.auth_used = auth_used;
3544 event->adt_smf_degrade.fmri = fmri;
3545 break;
3546 case ADT_smf_delete:
3547 event->adt_smf_delete.fmri = fmri;
3548 event->adt_smf_delete.auth_used = auth_used;
3549 break;
3550 case ADT_smf_delete_npg:
3551 event->adt_smf_delete_npg.auth_used = auth_used;
3552 event->adt_smf_delete_npg.fmri = fmri;
3553 event->adt_smf_delete_npg.type = data->ed_type;
3554 break;
3555 case ADT_smf_delete_pg:
3556 event->adt_smf_delete_pg.auth_used = auth_used;
3557 event->adt_smf_delete_pg.fmri = fmri;
3558 event->adt_smf_delete_pg.type = data->ed_type;
3559 break;
3560 case ADT_smf_delete_prop:
3561 event->adt_smf_delete_prop.auth_used = auth_used;
3562 event->adt_smf_delete_prop.fmri = fmri;
3563 break;
3564 case ADT_smf_delete_snap:
3565 event->adt_smf_delete_snap.auth_used = auth_used;
3566 event->adt_smf_delete_snap.fmri = fmri;
3567 event->adt_smf_delete_snap.name = data->ed_snapname;
3568 break;
3569 case ADT_smf_disable:
3570 event->adt_smf_disable.auth_used = auth_used;
3571 event->adt_smf_disable.fmri = fmri;
3572 break;
3573 case ADT_smf_enable:
3574 event->adt_smf_enable.auth_used = auth_used;
3575 event->adt_smf_enable.fmri = fmri;
3576 break;
3577 case ADT_smf_immediate_degrade:
3578 event->adt_smf_immediate_degrade.auth_used = auth_used;
3579 event->adt_smf_immediate_degrade.fmri = fmri;
3580 break;
3581 case ADT_smf_immediate_maintenance:
3582 event->adt_smf_immediate_maintenance.auth_used = auth_used;
3583 event->adt_smf_immediate_maintenance.fmri = fmri;
3584 break;
3585 case ADT_smf_immtmp_maintenance:
3586 event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3587 event->adt_smf_immtmp_maintenance.fmri = fmri;
3588 break;
3589 case ADT_smf_maintenance:
3590 event->adt_smf_maintenance.auth_used = auth_used;
3591 event->adt_smf_maintenance.fmri = fmri;
3592 break;
3593 case ADT_smf_milestone:
3594 event->adt_smf_milestone.auth_used = auth_used;
3595 event->adt_smf_milestone.fmri = fmri;
3596 break;
3597 case ADT_smf_read_prop:
3598 event->adt_smf_read_prop.auth_used = auth_used;
3599 event->adt_smf_read_prop.fmri = fmri;
3600 break;
3601 case ADT_smf_refresh:
3602 event->adt_smf_refresh.auth_used = auth_used;
3603 event->adt_smf_refresh.fmri = fmri;
3604 break;
3605 case ADT_smf_restart:
3606 event->adt_smf_restart.auth_used = auth_used;
3607 event->adt_smf_restart.fmri = fmri;
3608 break;
3609 case ADT_smf_tmp_disable:
3610 event->adt_smf_tmp_disable.auth_used = auth_used;
3611 event->adt_smf_tmp_disable.fmri = fmri;
3612 break;
3613 case ADT_smf_tmp_enable:
3614 event->adt_smf_tmp_enable.auth_used = auth_used;
3615 event->adt_smf_tmp_enable.fmri = fmri;
3616 break;
3617 case ADT_smf_tmp_maintenance:
3618 event->adt_smf_tmp_maintenance.auth_used = auth_used;
3619 event->adt_smf_tmp_maintenance.fmri = fmri;
3620 break;
3621 default:
3622 abort(); /* Need to cover all SMF event IDs */
3623 }
3624
3625 if (adt_put_event(event, status, return_val) != 0) {
3626 uu_warn("_smf_audit_event failed to put event. %s\n",
3627 strerror(errno));
3628 }
3629 adt_free_event(event);
3630 }
3631
3632 /*
3633 * Determine if the combination of the property group at pg_name and the
3634 * property at prop_name are in the set of special startd properties. If
3635 * they are, a special audit event will be generated.
3636 */
3637 static void
special_property_event(audit_event_data_t * evdp,const char * prop_name,char * pg_name,int status,int return_val,tx_commit_data_t * tx_data,size_t cmd_no)3638 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3639 char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3640 size_t cmd_no)
3641 {
3642 au_event_t event_id;
3643 audit_special_prop_item_t search_key;
3644 audit_special_prop_item_t *found;
3645
3646 /* Use bsearch to find the special property information. */
3647 search_key.api_prop_name = prop_name;
3648 search_key.api_pg_name = pg_name;
3649 found = (audit_special_prop_item_t *)bsearch(&search_key,
3650 special_props_list, SPECIAL_PROP_COUNT,
3651 sizeof (special_props_list[0]), special_prop_compare);
3652 if (found == NULL) {
3653 /* Not a special property. */
3654 return;
3655 }
3656
3657 /* Get the event id */
3658 if (found->api_event_func == NULL) {
3659 event_id = found->api_event_id;
3660 } else {
3661 if ((*found->api_event_func)(tx_data, cmd_no,
3662 found->api_pg_name, &event_id) < 0)
3663 return;
3664 }
3665
3666 /* Generate the event. */
3667 smf_audit_event(event_id, status, return_val, evdp);
3668 }
3669 #endif /* NATIVE_BUILD */
3670
3671 /*
3672 * Return a pointer to a string containing all the values of the command
3673 * specified by cmd_no with each value enclosed in quotes. It is up to the
3674 * caller to free the memory at the returned pointer.
3675 */
3676 static char *
generate_value_list(tx_commit_data_t * tx_data,size_t cmd_no)3677 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3678 {
3679 const char *cp;
3680 const char *cur_value;
3681 size_t byte_count = 0;
3682 uint32_t i;
3683 uint32_t nvalues;
3684 size_t str_size = 0;
3685 char *values = NULL;
3686 char *vp;
3687
3688 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3689 return (NULL);
3690 /*
3691 * First determine the size of the buffer that we will need. We
3692 * will represent each property value surrounded by quotes with a
3693 * space separating the values. Thus, we need to find the total
3694 * size of all the value strings and add 3 for each value.
3695 *
3696 * There is one catch, though. We need to escape any internal
3697 * quote marks in the values. So for each quote in the value we
3698 * need to add another byte to the buffer size.
3699 */
3700 for (i = 0; i < nvalues; i++) {
3701 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3702 REP_PROTOCOL_SUCCESS)
3703 return (NULL);
3704 for (cp = cur_value; *cp != 0; cp++) {
3705 byte_count += (*cp == '"') ? 2 : 1;
3706 }
3707 byte_count += 3; /* surrounding quotes & space */
3708 }
3709 byte_count++; /* nul terminator */
3710 values = malloc(byte_count);
3711 if (values == NULL)
3712 return (NULL);
3713 *values = 0;
3714
3715 /* Now build up the string of values. */
3716 for (i = 0; i < nvalues; i++) {
3717 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3718 REP_PROTOCOL_SUCCESS) {
3719 free(values);
3720 return (NULL);
3721 }
3722 (void) strlcat(values, "\"", byte_count);
3723 for (cp = cur_value, vp = values + strlen(values);
3724 *cp != 0; cp++) {
3725 if (*cp == '"') {
3726 *vp++ = '\\';
3727 *vp++ = '"';
3728 } else {
3729 *vp++ = *cp;
3730 }
3731 }
3732 *vp = 0;
3733 str_size = strlcat(values, "\" ", byte_count);
3734 assert(str_size < byte_count);
3735 }
3736 if (str_size > 0)
3737 values[str_size - 1] = 0; /* get rid of trailing space */
3738 return (values);
3739 }
3740
3741 /*
3742 * generate_property_events takes the transaction commit data at tx_data
3743 * and generates an audit event for each command.
3744 *
3745 * Native builds are done to create svc.configd-native. This program runs
3746 * only on the Solaris build machines to create the seed repository. Thus,
3747 * no audit events should be generated when running svc.configd-native.
3748 */
3749 static void
generate_property_events(tx_commit_data_t * tx_data,char * pg_fmri,char * auth_string,int auth_status,int auth_ret_value)3750 generate_property_events(
3751 tx_commit_data_t *tx_data,
3752 char *pg_fmri, /* FMRI of property group */
3753 char *auth_string,
3754 int auth_status,
3755 int auth_ret_value)
3756 {
3757 #ifndef NATIVE_BUILD
3758 enum rep_protocol_transaction_action action;
3759 audit_event_data_t audit_data;
3760 size_t count;
3761 size_t cmd_no;
3762 char *cp;
3763 au_event_t event_id;
3764 char fmri[REP_PROTOCOL_FMRI_LEN];
3765 char pg_name[REP_PROTOCOL_NAME_LEN];
3766 char *pg_end; /* End of prop. group fmri */
3767 const char *prop_name;
3768 uint32_t ptype;
3769 char prop_type[3];
3770 enum rep_protocol_responseid rc;
3771 size_t sz_out;
3772
3773 /* Make sure we have something to do. */
3774 if (tx_data == NULL)
3775 return;
3776 if ((count = tx_cmd_count(tx_data)) == 0)
3777 return;
3778
3779 /* Copy the property group fmri */
3780 pg_end = fmri;
3781 pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3782
3783 /*
3784 * Get the property group name. It is the first component after
3785 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3786 */
3787 cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3788 if (cp == NULL) {
3789 pg_name[0] = 0;
3790 } else {
3791 cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3792 (void) strlcpy(pg_name, cp, sizeof (pg_name));
3793 }
3794
3795 audit_data.ed_auth = auth_string;
3796 audit_data.ed_fmri = fmri;
3797 audit_data.ed_type = prop_type;
3798
3799 /*
3800 * Property type is two characters (see
3801 * rep_protocol_value_type_t), so terminate the string.
3802 */
3803 prop_type[2] = 0;
3804
3805 for (cmd_no = 0; cmd_no < count; cmd_no++) {
3806 /* Construct FMRI of the property */
3807 *pg_end = 0;
3808 if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3809 REP_PROTOCOL_SUCCESS) {
3810 continue;
3811 }
3812 rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3813 prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3814 if (rc != REP_PROTOCOL_SUCCESS) {
3815 /*
3816 * If we can't get the FMRI, we'll abandon this
3817 * command
3818 */
3819 continue;
3820 }
3821
3822 /* Generate special property event if necessary. */
3823 special_property_event(&audit_data, prop_name, pg_name,
3824 auth_status, auth_ret_value, tx_data, cmd_no);
3825
3826 /* Capture rest of audit data. */
3827 if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3828 REP_PROTOCOL_SUCCESS) {
3829 continue;
3830 }
3831 prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3832 prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3833 audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3834
3835 /* Determine the event type. */
3836 if (tx_cmd_action(tx_data, cmd_no, &action) !=
3837 REP_PROTOCOL_SUCCESS) {
3838 free(audit_data.ed_prop_value);
3839 continue;
3840 }
3841 switch (action) {
3842 case REP_PROTOCOL_TX_ENTRY_NEW:
3843 event_id = ADT_smf_create_prop;
3844 break;
3845 case REP_PROTOCOL_TX_ENTRY_CLEAR:
3846 event_id = ADT_smf_change_prop;
3847 break;
3848 case REP_PROTOCOL_TX_ENTRY_REPLACE:
3849 event_id = ADT_smf_change_prop;
3850 break;
3851 case REP_PROTOCOL_TX_ENTRY_DELETE:
3852 event_id = ADT_smf_delete_prop;
3853 break;
3854 default:
3855 assert(0); /* Missing a case */
3856 free(audit_data.ed_prop_value);
3857 continue;
3858 }
3859
3860 /* Generate the event. */
3861 smf_audit_event(event_id, auth_status, auth_ret_value,
3862 &audit_data);
3863 free(audit_data.ed_prop_value);
3864 }
3865 #endif /* NATIVE_BUILD */
3866 }
3867
3868 /*
3869 * Fails with
3870 * _DELETED - node has been deleted
3871 * _NOT_SET - npp is reset
3872 * _NOT_APPLICABLE - type is _PROPERTYGRP
3873 * _INVALID_TYPE - node is corrupt or type is invalid
3874 * _TYPE_MISMATCH - node cannot have children of type type
3875 * _BAD_REQUEST - name is invalid
3876 * cannot create children for this type of node
3877 * _NO_RESOURCES - out of memory, or could not allocate new id
3878 * _PERMISSION_DENIED
3879 * _BACKEND_ACCESS
3880 * _BACKEND_READONLY
3881 * _EXISTS - child already exists
3882 * _TRUNCATED - truncated FMRI for the audit record
3883 */
3884 int
rc_node_create_child(rc_node_ptr_t * npp,uint32_t type,const char * name,rc_node_ptr_t * cpp)3885 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3886 rc_node_ptr_t *cpp)
3887 {
3888 rc_node_t *np;
3889 rc_node_t *cp = NULL;
3890 int rc;
3891 perm_status_t perm_rc;
3892 size_t sz_out;
3893 char fmri[REP_PROTOCOL_FMRI_LEN];
3894 audit_event_data_t audit_data;
3895
3896 rc_node_clear(cpp, 0);
3897
3898 /*
3899 * rc_node_modify_permission_check() must be called before the node
3900 * is locked. This is because the library functions that check
3901 * authorizations can trigger calls back into configd.
3902 */
3903 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3904 switch (perm_rc) {
3905 case PERM_DENIED:
3906 /*
3907 * We continue in this case, so that an audit event can be
3908 * generated later in the function.
3909 */
3910 break;
3911 case PERM_GRANTED:
3912 break;
3913 case PERM_GONE:
3914 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3915 case PERM_FAIL:
3916 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3917 default:
3918 bad_error(rc_node_modify_permission_check, perm_rc);
3919 }
3920
3921 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
3922
3923 audit_data.ed_fmri = fmri;
3924
3925 /*
3926 * there is a separate interface for creating property groups
3927 */
3928 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3929 (void) pthread_mutex_unlock(&np->rn_lock);
3930 free(audit_data.ed_auth);
3931 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3932 }
3933
3934 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3935 (void) pthread_mutex_unlock(&np->rn_lock);
3936 np = np->rn_cchain[0];
3937 if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3938 free(audit_data.ed_auth);
3939 return (rc);
3940 }
3941 }
3942
3943 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3944 REP_PROTOCOL_SUCCESS) {
3945 (void) pthread_mutex_unlock(&np->rn_lock);
3946 free(audit_data.ed_auth);
3947 return (rc);
3948 }
3949 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3950 (void) pthread_mutex_unlock(&np->rn_lock);
3951 free(audit_data.ed_auth);
3952 return (rc);
3953 }
3954
3955 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3956 name, type)) != REP_PROTOCOL_SUCCESS) {
3957 (void) pthread_mutex_unlock(&np->rn_lock);
3958 free(audit_data.ed_auth);
3959 return (rc);
3960 }
3961 if (perm_rc == PERM_DENIED) {
3962 (void) pthread_mutex_unlock(&np->rn_lock);
3963 smf_audit_event(ADT_smf_create, ADT_FAILURE,
3964 ADT_FAIL_VALUE_AUTH, &audit_data);
3965 free(audit_data.ed_auth);
3966 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3967 }
3968
3969 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3970 audit_data.ed_auth);
3971 (void) pthread_mutex_unlock(&np->rn_lock);
3972
3973 rc = object_create(np, type, name, &cp);
3974 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3975
3976 if (rc == REP_PROTOCOL_SUCCESS) {
3977 rc_node_assign(cpp, cp);
3978 rc_node_rele(cp);
3979 }
3980
3981 (void) pthread_mutex_lock(&np->rn_lock);
3982 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3983 (void) pthread_mutex_unlock(&np->rn_lock);
3984
3985 if (rc == REP_PROTOCOL_SUCCESS) {
3986 smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
3987 &audit_data);
3988 }
3989
3990 free(audit_data.ed_auth);
3991
3992 return (rc);
3993 }
3994
3995 int
rc_node_create_child_pg(rc_node_ptr_t * npp,uint32_t type,const char * name,const char * pgtype,uint32_t flags,rc_node_ptr_t * cpp)3996 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3997 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
3998 {
3999 rc_node_t *np;
4000 rc_node_t *cp;
4001 int rc;
4002 permcheck_t *pcp;
4003 perm_status_t granted;
4004 char fmri[REP_PROTOCOL_FMRI_LEN];
4005 audit_event_data_t audit_data;
4006 au_event_t event_id;
4007 size_t sz_out;
4008
4009 audit_data.ed_auth = NULL;
4010 audit_data.ed_fmri = fmri;
4011 audit_data.ed_type = (char *)pgtype;
4012
4013 rc_node_clear(cpp, 0);
4014
4015 /* verify flags is valid */
4016 if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4017 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4018
4019 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4020
4021 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4022 rc_node_rele(np);
4023 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4024 }
4025
4026 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4027 REP_PROTOCOL_SUCCESS) {
4028 rc_node_rele(np);
4029 return (rc);
4030 }
4031 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4032 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4033 rc_node_rele(np);
4034 return (rc);
4035 }
4036
4037 #ifdef NATIVE_BUILD
4038 if (!client_is_privileged()) {
4039 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4040 }
4041 #else
4042 if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4043 event_id = ADT_smf_create_npg;
4044 } else {
4045 event_id = ADT_smf_create_pg;
4046 }
4047 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4048 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4049 rc_node_rele(np);
4050 return (rc);
4051 }
4052
4053 if (is_main_repository) {
4054 /* Must have .smf.modify or smf.modify.<type> authorization */
4055 pcp = pc_create();
4056 if (pcp != NULL) {
4057 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4058
4059 if (rc == REP_PROTOCOL_SUCCESS) {
4060 const char * const auth =
4061 perm_auth_for_pgtype(pgtype);
4062
4063 if (auth != NULL)
4064 rc = perm_add_enabling(pcp, auth);
4065 }
4066
4067 /*
4068 * .manage or $action_authorization can be used to
4069 * create the actions pg and the general_ovr pg.
4070 */
4071 if (rc == REP_PROTOCOL_SUCCESS &&
4072 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4073 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4074 ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4075 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4076 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4077 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4078 rc = perm_add_enabling(pcp, AUTH_MANAGE);
4079
4080 if (rc == REP_PROTOCOL_SUCCESS)
4081 rc = perm_add_inst_action_auth(pcp, np);
4082 }
4083
4084 if (rc == REP_PROTOCOL_SUCCESS) {
4085 granted = perm_granted(pcp);
4086
4087 rc = map_granted_status(granted, pcp,
4088 &audit_data.ed_auth);
4089 if (granted == PERM_GONE) {
4090 /* No auditing if client gone. */
4091 pc_free(pcp);
4092 rc_node_rele(np);
4093 return (rc);
4094 }
4095 }
4096
4097 pc_free(pcp);
4098 } else {
4099 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4100 }
4101
4102 } else {
4103 rc = REP_PROTOCOL_SUCCESS;
4104 }
4105 #endif /* NATIVE_BUILD */
4106
4107
4108 if (rc != REP_PROTOCOL_SUCCESS) {
4109 rc_node_rele(np);
4110 if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4111 smf_audit_event(event_id, ADT_FAILURE,
4112 ADT_FAIL_VALUE_AUTH, &audit_data);
4113 }
4114 if (audit_data.ed_auth != NULL)
4115 free(audit_data.ed_auth);
4116 return (rc);
4117 }
4118
4119 (void) pthread_mutex_lock(&np->rn_lock);
4120 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4121 audit_data.ed_auth);
4122 (void) pthread_mutex_unlock(&np->rn_lock);
4123
4124 rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4125
4126 if (rc == REP_PROTOCOL_SUCCESS) {
4127 rc_node_assign(cpp, cp);
4128 rc_node_rele(cp);
4129 }
4130
4131 (void) pthread_mutex_lock(&np->rn_lock);
4132 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4133 (void) pthread_mutex_unlock(&np->rn_lock);
4134
4135 if (rc == REP_PROTOCOL_SUCCESS) {
4136 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4137 &audit_data);
4138 }
4139 if (audit_data.ed_auth != NULL)
4140 free(audit_data.ed_auth);
4141
4142 return (rc);
4143 }
4144
4145 static void
rc_pg_notify_fire(rc_node_pg_notify_t * pnp)4146 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4147 {
4148 assert(MUTEX_HELD(&rc_pg_notify_lock));
4149
4150 if (pnp->rnpn_pg != NULL) {
4151 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4152 (void) close(pnp->rnpn_fd);
4153
4154 pnp->rnpn_pg = NULL;
4155 pnp->rnpn_fd = -1;
4156 } else {
4157 assert(pnp->rnpn_fd == -1);
4158 }
4159 }
4160
4161 static void
rc_notify_node_delete(rc_notify_delete_t * ndp,rc_node_t * np_arg)4162 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4163 {
4164 rc_node_t *svc = NULL;
4165 rc_node_t *inst = NULL;
4166 rc_node_t *pg = NULL;
4167 rc_node_t *np = np_arg;
4168 rc_node_t *nnp;
4169
4170 while (svc == NULL) {
4171 (void) pthread_mutex_lock(&np->rn_lock);
4172 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4173 (void) pthread_mutex_unlock(&np->rn_lock);
4174 goto cleanup;
4175 }
4176 nnp = np->rn_parent;
4177 rc_node_hold_locked(np); /* hold it in place */
4178
4179 switch (np->rn_id.rl_type) {
4180 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4181 assert(pg == NULL);
4182 pg = np;
4183 break;
4184 case REP_PROTOCOL_ENTITY_INSTANCE:
4185 assert(inst == NULL);
4186 inst = np;
4187 break;
4188 case REP_PROTOCOL_ENTITY_SERVICE:
4189 assert(svc == NULL);
4190 svc = np;
4191 break;
4192 default:
4193 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4194 rc_node_rele_locked(np);
4195 goto cleanup;
4196 }
4197
4198 (void) pthread_mutex_unlock(&np->rn_lock);
4199
4200 np = nnp;
4201 if (np == NULL)
4202 goto cleanup;
4203 }
4204
4205 rc_notify_deletion(ndp,
4206 svc->rn_name,
4207 inst != NULL ? inst->rn_name : NULL,
4208 pg != NULL ? pg->rn_name : NULL);
4209
4210 ndp = NULL;
4211
4212 cleanup:
4213 if (ndp != NULL)
4214 uu_free(ndp);
4215
4216 for (;;) {
4217 if (svc != NULL) {
4218 np = svc;
4219 svc = NULL;
4220 } else if (inst != NULL) {
4221 np = inst;
4222 inst = NULL;
4223 } else if (pg != NULL) {
4224 np = pg;
4225 pg = NULL;
4226 } else
4227 break;
4228
4229 (void) pthread_mutex_lock(&np->rn_lock);
4230 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4231 rc_node_rele_locked(np);
4232 }
4233 }
4234
4235 /*
4236 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
4237 * the same down the rn_former chain.
4238 */
4239 static void
rc_node_delete_hold(rc_node_t * np,int andformer)4240 rc_node_delete_hold(rc_node_t *np, int andformer)
4241 {
4242 rc_node_t *cp;
4243
4244 again:
4245 assert(MUTEX_HELD(&np->rn_lock));
4246 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4247
4248 for (cp = uu_list_first(np->rn_children); cp != NULL;
4249 cp = uu_list_next(np->rn_children, cp)) {
4250 (void) pthread_mutex_lock(&cp->rn_lock);
4251 (void) pthread_mutex_unlock(&np->rn_lock);
4252 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4253 /*
4254 * already marked as dead -- can't happen, since that
4255 * would require setting RC_NODE_CHILDREN_CHANGING
4256 * in np, and we're holding that...
4257 */
4258 abort();
4259 }
4260 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
4261
4262 (void) pthread_mutex_lock(&np->rn_lock);
4263 }
4264 if (andformer && (cp = np->rn_former) != NULL) {
4265 (void) pthread_mutex_lock(&cp->rn_lock);
4266 (void) pthread_mutex_unlock(&np->rn_lock);
4267 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4268 abort(); /* can't happen, see above */
4269 np = cp;
4270 goto again; /* tail-recurse down rn_former */
4271 }
4272 (void) pthread_mutex_unlock(&np->rn_lock);
4273 }
4274
4275 /*
4276 * N.B.: this function drops np->rn_lock on the way out.
4277 */
4278 static void
rc_node_delete_rele(rc_node_t * np,int andformer)4279 rc_node_delete_rele(rc_node_t *np, int andformer)
4280 {
4281 rc_node_t *cp;
4282
4283 again:
4284 assert(MUTEX_HELD(&np->rn_lock));
4285 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4286
4287 for (cp = uu_list_first(np->rn_children); cp != NULL;
4288 cp = uu_list_next(np->rn_children, cp)) {
4289 (void) pthread_mutex_lock(&cp->rn_lock);
4290 (void) pthread_mutex_unlock(&np->rn_lock);
4291 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
4292 (void) pthread_mutex_lock(&np->rn_lock);
4293 }
4294 if (andformer && (cp = np->rn_former) != NULL) {
4295 (void) pthread_mutex_lock(&cp->rn_lock);
4296 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4297 (void) pthread_mutex_unlock(&np->rn_lock);
4298
4299 np = cp;
4300 goto again; /* tail-recurse down rn_former */
4301 }
4302 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4303 (void) pthread_mutex_unlock(&np->rn_lock);
4304 }
4305
4306 static void
rc_node_finish_delete(rc_node_t * cp)4307 rc_node_finish_delete(rc_node_t *cp)
4308 {
4309 cache_bucket_t *bp;
4310 rc_node_pg_notify_t *pnp;
4311
4312 assert(MUTEX_HELD(&cp->rn_lock));
4313
4314 if (!(cp->rn_flags & RC_NODE_OLD)) {
4315 assert(cp->rn_flags & RC_NODE_IN_PARENT);
4316 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4317 abort(); /* can't happen, see above */
4318 }
4319 cp->rn_flags &= ~RC_NODE_IN_PARENT;
4320 cp->rn_parent = NULL;
4321 rc_node_free_fmri(cp);
4322 }
4323
4324 cp->rn_flags |= RC_NODE_DEAD;
4325
4326 /*
4327 * If this node is not out-dated, we need to remove it from
4328 * the notify list and cache hash table.
4329 */
4330 if (!(cp->rn_flags & RC_NODE_OLD)) {
4331 assert(cp->rn_refs > 0); /* can't go away yet */
4332 (void) pthread_mutex_unlock(&cp->rn_lock);
4333
4334 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4335 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4336 rc_pg_notify_fire(pnp);
4337 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4338 rc_notify_remove_node(cp);
4339
4340 bp = cache_hold(cp->rn_hash);
4341 (void) pthread_mutex_lock(&cp->rn_lock);
4342 cache_remove_unlocked(bp, cp);
4343 cache_release(bp);
4344 }
4345 }
4346
4347 /*
4348 * For each child, call rc_node_finish_delete() and recurse. If andformer
4349 * is set, also recurse down rn_former. Finally release np, which might
4350 * free it.
4351 */
4352 static void
rc_node_delete_children(rc_node_t * np,int andformer)4353 rc_node_delete_children(rc_node_t *np, int andformer)
4354 {
4355 rc_node_t *cp;
4356
4357 again:
4358 assert(np->rn_refs > 0);
4359 assert(MUTEX_HELD(&np->rn_lock));
4360 assert(np->rn_flags & RC_NODE_DEAD);
4361
4362 while ((cp = uu_list_first(np->rn_children)) != NULL) {
4363 uu_list_remove(np->rn_children, cp);
4364 (void) pthread_mutex_lock(&cp->rn_lock);
4365 (void) pthread_mutex_unlock(&np->rn_lock);
4366 rc_node_hold_locked(cp); /* hold while we recurse */
4367 rc_node_finish_delete(cp);
4368 rc_node_delete_children(cp, andformer); /* drops lock + ref */
4369 (void) pthread_mutex_lock(&np->rn_lock);
4370 }
4371
4372 /*
4373 * When we drop cp's lock, all the children will be gone, so we
4374 * can release DYING_FLAGS.
4375 */
4376 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4377 if (andformer && (cp = np->rn_former) != NULL) {
4378 np->rn_former = NULL; /* unlink */
4379 (void) pthread_mutex_lock(&cp->rn_lock);
4380
4381 /*
4382 * Register the ephemeral reference created by reading
4383 * np->rn_former into cp. Note that the persistent
4384 * reference (np->rn_former) is locked because we haven't
4385 * dropped np's lock since we dropped its RC_NODE_IN_TX
4386 * (via RC_NODE_DYING_FLAGS).
4387 */
4388 rc_node_hold_ephemeral_locked(cp);
4389
4390 (void) pthread_mutex_unlock(&np->rn_lock);
4391 cp->rn_flags &= ~RC_NODE_ON_FORMER;
4392
4393 rc_node_hold_locked(cp); /* hold while we loop */
4394
4395 rc_node_finish_delete(cp);
4396
4397 rc_node_rele(np); /* drop the old reference */
4398
4399 np = cp;
4400 goto again; /* tail-recurse down rn_former */
4401 }
4402 rc_node_rele_locked(np);
4403 }
4404
4405 /*
4406 * The last client or child reference to np, which must be either
4407 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
4408 * remaining references (e.g., rn_former) and call rc_node_destroy() to
4409 * free np.
4410 */
4411 static void
rc_node_no_client_refs(rc_node_t * np)4412 rc_node_no_client_refs(rc_node_t *np)
4413 {
4414 int unrefed;
4415 rc_node_t *current, *cur;
4416
4417 assert(MUTEX_HELD(&np->rn_lock));
4418 assert(np->rn_refs == 0);
4419 assert(np->rn_other_refs == 0);
4420 assert(np->rn_other_refs_held == 0);
4421
4422 if (np->rn_flags & RC_NODE_DEAD) {
4423 /*
4424 * The node is DEAD, so the deletion code should have
4425 * destroyed all rn_children or rn_former references.
4426 * Since the last client or child reference has been
4427 * destroyed, we're free to destroy np. Unless another
4428 * thread has an ephemeral reference, in which case we'll
4429 * pass the buck.
4430 */
4431 if (np->rn_erefs > 1) {
4432 --np->rn_erefs;
4433 NODE_UNLOCK(np);
4434 return;
4435 }
4436
4437 (void) pthread_mutex_unlock(&np->rn_lock);
4438 rc_node_destroy(np);
4439 return;
4440 }
4441
4442 /* We only collect DEAD and OLD nodes, thank you. */
4443 assert(np->rn_flags & RC_NODE_OLD);
4444
4445 /*
4446 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4447 * nodes. But it's vulnerable to unfriendly scheduling, so full
4448 * use of rn_erefs should supersede it someday.
4449 */
4450 if (np->rn_flags & RC_NODE_UNREFED) {
4451 (void) pthread_mutex_unlock(&np->rn_lock);
4452 return;
4453 }
4454 np->rn_flags |= RC_NODE_UNREFED;
4455
4456 /*
4457 * Now we'll remove the node from the rn_former chain and take its
4458 * DYING_FLAGS.
4459 */
4460
4461 /*
4462 * Since this node is OLD, it should be on an rn_former chain. To
4463 * remove it, we must find the current in-hash object and grab its
4464 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4465 */
4466
4467 (void) pthread_mutex_unlock(&np->rn_lock);
4468
4469 for (;;) {
4470 current = cache_lookup(&np->rn_id);
4471
4472 if (current == NULL) {
4473 (void) pthread_mutex_lock(&np->rn_lock);
4474
4475 if (np->rn_flags & RC_NODE_DEAD)
4476 goto died;
4477
4478 /*
4479 * We are trying to unreference this node, but the
4480 * owner of the former list does not exist. It must
4481 * be the case that another thread is deleting this
4482 * entire sub-branch, but has not yet reached us.
4483 * We will in short order be deleted.
4484 */
4485 np->rn_flags &= ~RC_NODE_UNREFED;
4486 (void) pthread_mutex_unlock(&np->rn_lock);
4487 return;
4488 }
4489
4490 if (current == np) {
4491 /*
4492 * no longer unreferenced
4493 */
4494 (void) pthread_mutex_lock(&np->rn_lock);
4495 np->rn_flags &= ~RC_NODE_UNREFED;
4496 /* held in cache_lookup() */
4497 rc_node_rele_locked(np);
4498 return;
4499 }
4500
4501 (void) pthread_mutex_lock(¤t->rn_lock);
4502 if (current->rn_flags & RC_NODE_OLD) {
4503 /*
4504 * current has been replaced since we looked it
4505 * up. Try again.
4506 */
4507 /* held in cache_lookup() */
4508 rc_node_rele_locked(current);
4509 continue;
4510 }
4511
4512 if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4513 /*
4514 * current has been deleted since we looked it up. Try
4515 * again.
4516 */
4517 /* held in cache_lookup() */
4518 rc_node_rele_locked(current);
4519 continue;
4520 }
4521
4522 /*
4523 * rc_node_hold_flag() might have dropped current's lock, so
4524 * check OLD again.
4525 */
4526 if (!(current->rn_flags & RC_NODE_OLD)) {
4527 /* Not old. Stop looping. */
4528 (void) pthread_mutex_unlock(¤t->rn_lock);
4529 break;
4530 }
4531
4532 rc_node_rele_flag(current, RC_NODE_IN_TX);
4533 rc_node_rele_locked(current);
4534 }
4535
4536 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4537 (void) pthread_mutex_lock(&np->rn_lock);
4538
4539 /*
4540 * While we didn't have the lock, a thread may have added
4541 * a reference or changed the flags.
4542 */
4543 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4544 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4545 np->rn_other_refs_held != 0) {
4546 np->rn_flags &= ~RC_NODE_UNREFED;
4547
4548 (void) pthread_mutex_lock(¤t->rn_lock);
4549 rc_node_rele_flag(current, RC_NODE_IN_TX);
4550 /* held by cache_lookup() */
4551 rc_node_rele_locked(current);
4552 return;
4553 }
4554
4555 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4556 /*
4557 * Someone deleted the node while we were waiting for
4558 * DYING_FLAGS. Undo the modifications to current.
4559 */
4560 (void) pthread_mutex_unlock(&np->rn_lock);
4561
4562 rc_node_rele_flag(current, RC_NODE_IN_TX);
4563 /* held by cache_lookup() */
4564 rc_node_rele_locked(current);
4565
4566 (void) pthread_mutex_lock(&np->rn_lock);
4567 goto died;
4568 }
4569
4570 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
4571 rc_node_delete_hold(np, 0); /* drops np->rn_lock */
4572
4573 /* Mark np DEAD. This requires the lock. */
4574 (void) pthread_mutex_lock(&np->rn_lock);
4575
4576 /* Recheck for new references. */
4577 if (!(np->rn_flags & RC_NODE_OLD) ||
4578 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4579 np->rn_other_refs_held != 0) {
4580 np->rn_flags &= ~RC_NODE_UNREFED;
4581 rc_node_delete_rele(np, 0); /* drops np's lock */
4582
4583 (void) pthread_mutex_lock(¤t->rn_lock);
4584 rc_node_rele_flag(current, RC_NODE_IN_TX);
4585 /* held by cache_lookup() */
4586 rc_node_rele_locked(current);
4587 return;
4588 }
4589
4590 np->rn_flags |= RC_NODE_DEAD;
4591
4592 /*
4593 * Delete the children. This calls rc_node_rele_locked() on np at
4594 * the end, so add a reference to keep the count from going
4595 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
4596 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4597 * shouldn't actually free() np.
4598 */
4599 rc_node_hold_locked(np);
4600 rc_node_delete_children(np, 0); /* unlocks np */
4601
4602 /* Remove np from current's rn_former chain. */
4603 (void) pthread_mutex_lock(¤t->rn_lock);
4604 for (cur = current; cur != NULL && cur->rn_former != np;
4605 cur = cur->rn_former)
4606 ;
4607 assert(cur != NULL && cur != np);
4608
4609 cur->rn_former = np->rn_former;
4610 np->rn_former = NULL;
4611
4612 rc_node_rele_flag(current, RC_NODE_IN_TX);
4613 /* held by cache_lookup() */
4614 rc_node_rele_locked(current);
4615
4616 /* Clear ON_FORMER and UNREFED, and destroy. */
4617 (void) pthread_mutex_lock(&np->rn_lock);
4618 assert(np->rn_flags & RC_NODE_ON_FORMER);
4619 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4620
4621 if (np->rn_erefs > 1) {
4622 /* Still referenced. Stay execution. */
4623 --np->rn_erefs;
4624 NODE_UNLOCK(np);
4625 return;
4626 }
4627
4628 (void) pthread_mutex_unlock(&np->rn_lock);
4629 rc_node_destroy(np);
4630 return;
4631
4632 died:
4633 /*
4634 * Another thread marked np DEAD. If there still aren't any
4635 * persistent references, destroy the node.
4636 */
4637 np->rn_flags &= ~RC_NODE_UNREFED;
4638
4639 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4640 np->rn_other_refs_held == 0);
4641
4642 if (np->rn_erefs > 0)
4643 --np->rn_erefs;
4644
4645 if (unrefed && np->rn_erefs > 0) {
4646 NODE_UNLOCK(np);
4647 return;
4648 }
4649
4650 (void) pthread_mutex_unlock(&np->rn_lock);
4651
4652 if (unrefed)
4653 rc_node_destroy(np);
4654 }
4655
4656 static au_event_t
get_delete_event_id(rep_protocol_entity_t entity,uint32_t pgflags)4657 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4658 {
4659 au_event_t id = 0;
4660
4661 #ifndef NATIVE_BUILD
4662 switch (entity) {
4663 case REP_PROTOCOL_ENTITY_SERVICE:
4664 case REP_PROTOCOL_ENTITY_INSTANCE:
4665 id = ADT_smf_delete;
4666 break;
4667 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4668 id = ADT_smf_delete_snap;
4669 break;
4670 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4671 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4672 if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4673 id = ADT_smf_delete_npg;
4674 } else {
4675 id = ADT_smf_delete_pg;
4676 }
4677 break;
4678 default:
4679 abort();
4680 }
4681 #endif /* NATIVE_BUILD */
4682 return (id);
4683 }
4684
4685 /*
4686 * Fails with
4687 * _NOT_SET
4688 * _DELETED
4689 * _BAD_REQUEST
4690 * _PERMISSION_DENIED
4691 * _NO_RESOURCES
4692 * _TRUNCATED
4693 * and whatever object_delete() fails with.
4694 */
4695 int
rc_node_delete(rc_node_ptr_t * npp)4696 rc_node_delete(rc_node_ptr_t *npp)
4697 {
4698 rc_node_t *np, *np_orig;
4699 rc_node_t *pp = NULL;
4700 int rc;
4701 rc_node_pg_notify_t *pnp;
4702 cache_bucket_t *bp;
4703 rc_notify_delete_t *ndp;
4704 permcheck_t *pcp;
4705 int granted;
4706 au_event_t event_id = 0;
4707 size_t sz_out;
4708 audit_event_data_t audit_data;
4709 int audit_failure = 0;
4710
4711 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4712
4713 audit_data.ed_fmri = NULL;
4714 audit_data.ed_auth = NULL;
4715 audit_data.ed_snapname = NULL;
4716 audit_data.ed_type = NULL;
4717
4718 switch (np->rn_id.rl_type) {
4719 case REP_PROTOCOL_ENTITY_SERVICE:
4720 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4721 np->rn_pgflags);
4722 break;
4723 case REP_PROTOCOL_ENTITY_INSTANCE:
4724 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4725 np->rn_pgflags);
4726 break;
4727 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4728 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4729 np->rn_pgflags);
4730 audit_data.ed_snapname = strdup(np->rn_name);
4731 if (audit_data.ed_snapname == NULL) {
4732 (void) pthread_mutex_unlock(&np->rn_lock);
4733 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4734 }
4735 break; /* deletable */
4736
4737 case REP_PROTOCOL_ENTITY_SCOPE:
4738 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4739 /* Scopes and snaplevels are indelible. */
4740 (void) pthread_mutex_unlock(&np->rn_lock);
4741 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4742
4743 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4744 (void) pthread_mutex_unlock(&np->rn_lock);
4745 np = np->rn_cchain[0];
4746 RC_NODE_CHECK_AND_LOCK(np);
4747 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4748 np->rn_pgflags);
4749 break;
4750
4751 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4752 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4753 event_id =
4754 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4755 np->rn_pgflags);
4756 audit_data.ed_type = strdup(np->rn_type);
4757 if (audit_data.ed_type == NULL) {
4758 (void) pthread_mutex_unlock(&np->rn_lock);
4759 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4760 }
4761 break;
4762 }
4763
4764 /* Snapshot property groups are indelible. */
4765 (void) pthread_mutex_unlock(&np->rn_lock);
4766 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4767
4768 case REP_PROTOCOL_ENTITY_PROPERTY:
4769 (void) pthread_mutex_unlock(&np->rn_lock);
4770 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4771
4772 default:
4773 assert(0);
4774 abort();
4775 break;
4776 }
4777
4778 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4779 if (audit_data.ed_fmri == NULL) {
4780 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4781 goto cleanout;
4782 }
4783 np_orig = np;
4784 rc_node_hold_locked(np); /* simplifies rest of the code */
4785
4786 again:
4787 /*
4788 * The following loop is to deal with the fact that snapshots and
4789 * property groups are moving targets -- changes to them result
4790 * in a new "child" node. Since we can only delete from the top node,
4791 * we have to loop until we have a non-RC_NODE_OLD version.
4792 */
4793 for (;;) {
4794 if (!rc_node_wait_flag(np,
4795 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4796 rc_node_rele_locked(np);
4797 rc = REP_PROTOCOL_FAIL_DELETED;
4798 goto cleanout;
4799 }
4800
4801 if (np->rn_flags & RC_NODE_OLD) {
4802 rc_node_rele_locked(np);
4803 np = cache_lookup(&np_orig->rn_id);
4804 assert(np != np_orig);
4805
4806 if (np == NULL) {
4807 rc = REP_PROTOCOL_FAIL_DELETED;
4808 goto fail;
4809 }
4810 (void) pthread_mutex_lock(&np->rn_lock);
4811 continue;
4812 }
4813
4814 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4815 rc_node_rele_locked(np);
4816 rc_node_clear(npp, 1);
4817 rc = REP_PROTOCOL_FAIL_DELETED;
4818 }
4819
4820 /*
4821 * Mark our parent as children changing. this call drops our
4822 * lock and the RC_NODE_USING_PARENT flag, and returns with
4823 * pp's lock held
4824 */
4825 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4826 if (pp == NULL) {
4827 /* our parent is gone, we're going next... */
4828 rc_node_rele(np);
4829
4830 rc_node_clear(npp, 1);
4831 rc = REP_PROTOCOL_FAIL_DELETED;
4832 goto cleanout;
4833 }
4834
4835 rc_node_hold_locked(pp); /* hold for later */
4836 (void) pthread_mutex_unlock(&pp->rn_lock);
4837
4838 (void) pthread_mutex_lock(&np->rn_lock);
4839 if (!(np->rn_flags & RC_NODE_OLD))
4840 break; /* not old -- we're done */
4841
4842 (void) pthread_mutex_unlock(&np->rn_lock);
4843 (void) pthread_mutex_lock(&pp->rn_lock);
4844 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4845 rc_node_rele_locked(pp);
4846 (void) pthread_mutex_lock(&np->rn_lock);
4847 continue; /* loop around and try again */
4848 }
4849 /*
4850 * Everyone out of the pool -- we grab everything but
4851 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4852 * any changes from occurring while we are attempting to
4853 * delete the node.
4854 */
4855 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4856 (void) pthread_mutex_unlock(&np->rn_lock);
4857 rc = REP_PROTOCOL_FAIL_DELETED;
4858 goto fail;
4859 }
4860
4861 assert(!(np->rn_flags & RC_NODE_OLD));
4862
4863 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4864 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4865 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4866 (void) pthread_mutex_unlock(&np->rn_lock);
4867 goto fail;
4868 }
4869
4870 #ifdef NATIVE_BUILD
4871 if (!client_is_privileged()) {
4872 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4873 }
4874 #else
4875 if (is_main_repository) {
4876 /* permission check */
4877 (void) pthread_mutex_unlock(&np->rn_lock);
4878 pcp = pc_create();
4879 if (pcp != NULL) {
4880 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4881
4882 /* add .smf.modify.<type> for pgs. */
4883 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4884 REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4885 const char * const auth =
4886 perm_auth_for_pgtype(np->rn_type);
4887
4888 if (auth != NULL)
4889 rc = perm_add_enabling(pcp, auth);
4890 }
4891
4892 if (rc == REP_PROTOCOL_SUCCESS) {
4893 granted = perm_granted(pcp);
4894
4895 rc = map_granted_status(granted, pcp,
4896 &audit_data.ed_auth);
4897 if (granted == PERM_GONE) {
4898 /* No need to audit if client gone. */
4899 pc_free(pcp);
4900 rc_node_rele_flag(np,
4901 RC_NODE_DYING_FLAGS);
4902 return (rc);
4903 }
4904 if (granted == PERM_DENIED)
4905 audit_failure = 1;
4906 }
4907
4908 pc_free(pcp);
4909 } else {
4910 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4911 }
4912
4913 (void) pthread_mutex_lock(&np->rn_lock);
4914 } else {
4915 rc = REP_PROTOCOL_SUCCESS;
4916 }
4917 #endif /* NATIVE_BUILD */
4918
4919 if (rc != REP_PROTOCOL_SUCCESS) {
4920 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4921 (void) pthread_mutex_unlock(&np->rn_lock);
4922 goto fail;
4923 }
4924
4925 ndp = uu_zalloc(sizeof (*ndp));
4926 if (ndp == NULL) {
4927 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4928 (void) pthread_mutex_unlock(&np->rn_lock);
4929 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4930 goto fail;
4931 }
4932
4933 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
4934
4935 rc = object_delete(np);
4936
4937 if (rc != REP_PROTOCOL_SUCCESS) {
4938 (void) pthread_mutex_lock(&np->rn_lock);
4939 rc_node_delete_rele(np, 1); /* drops lock */
4940 uu_free(ndp);
4941 goto fail;
4942 }
4943
4944 /*
4945 * Now, delicately unlink and delete the object.
4946 *
4947 * Create the delete notification, atomically remove
4948 * from the hash table and set the NODE_DEAD flag, and
4949 * remove from the parent's children list.
4950 */
4951 rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4952
4953 bp = cache_hold(np->rn_hash);
4954
4955 (void) pthread_mutex_lock(&np->rn_lock);
4956 cache_remove_unlocked(bp, np);
4957 cache_release(bp);
4958
4959 np->rn_flags |= RC_NODE_DEAD;
4960
4961 if (pp != NULL) {
4962 /*
4963 * Remove from pp's rn_children. This requires pp's lock,
4964 * so we must drop np's lock to respect lock order.
4965 */
4966 (void) pthread_mutex_unlock(&np->rn_lock);
4967 (void) pthread_mutex_lock(&pp->rn_lock);
4968 (void) pthread_mutex_lock(&np->rn_lock);
4969
4970 uu_list_remove(pp->rn_children, np);
4971
4972 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4973
4974 (void) pthread_mutex_unlock(&pp->rn_lock);
4975
4976 np->rn_flags &= ~RC_NODE_IN_PARENT;
4977 }
4978
4979 /*
4980 * finally, propagate death to our children (including marking
4981 * them DEAD), handle notifications, and release our hold.
4982 */
4983 rc_node_hold_locked(np); /* hold for delete */
4984 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
4985
4986 rc_node_clear(npp, 1);
4987
4988 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4989 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4990 rc_pg_notify_fire(pnp);
4991 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4992 rc_notify_remove_node(np);
4993
4994 rc_node_rele(np);
4995
4996 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4997 &audit_data);
4998 free(audit_data.ed_auth);
4999 free(audit_data.ed_snapname);
5000 free(audit_data.ed_type);
5001 free(audit_data.ed_fmri);
5002 return (rc);
5003
5004 fail:
5005 rc_node_rele(np);
5006 if (rc == REP_PROTOCOL_FAIL_DELETED)
5007 rc_node_clear(npp, 1);
5008 if (pp != NULL) {
5009 (void) pthread_mutex_lock(&pp->rn_lock);
5010 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5011 rc_node_rele_locked(pp); /* drop ref and lock */
5012 }
5013 if (audit_failure) {
5014 smf_audit_event(event_id, ADT_FAILURE,
5015 ADT_FAIL_VALUE_AUTH, &audit_data);
5016 }
5017 cleanout:
5018 free(audit_data.ed_auth);
5019 free(audit_data.ed_snapname);
5020 free(audit_data.ed_type);
5021 free(audit_data.ed_fmri);
5022 return (rc);
5023 }
5024
5025 int
rc_node_next_snaplevel(rc_node_ptr_t * npp,rc_node_ptr_t * cpp)5026 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5027 {
5028 rc_node_t *np;
5029 rc_node_t *cp, *pp;
5030 int res;
5031
5032 rc_node_clear(cpp, 0);
5033
5034 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5035
5036 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5037 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5038 (void) pthread_mutex_unlock(&np->rn_lock);
5039 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5040 }
5041
5042 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5043 if ((res = rc_node_fill_children(np,
5044 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5045 (void) pthread_mutex_unlock(&np->rn_lock);
5046 return (res);
5047 }
5048
5049 for (cp = uu_list_first(np->rn_children);
5050 cp != NULL;
5051 cp = uu_list_next(np->rn_children, cp)) {
5052 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5053 continue;
5054 rc_node_hold(cp);
5055 break;
5056 }
5057
5058 (void) pthread_mutex_unlock(&np->rn_lock);
5059 } else {
5060 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5061 (void) pthread_mutex_unlock(&np->rn_lock);
5062 rc_node_clear(npp, 1);
5063 return (REP_PROTOCOL_FAIL_DELETED);
5064 }
5065
5066 /*
5067 * mark our parent as children changing. This call drops our
5068 * lock and the RC_NODE_USING_PARENT flag, and returns with
5069 * pp's lock held
5070 */
5071 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5072 if (pp == NULL) {
5073 /* our parent is gone, we're going next... */
5074
5075 rc_node_clear(npp, 1);
5076 return (REP_PROTOCOL_FAIL_DELETED);
5077 }
5078
5079 /*
5080 * find the next snaplevel
5081 */
5082 cp = np;
5083 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5084 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5085 ;
5086
5087 /* it must match the snaplevel list */
5088 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5089 (cp != NULL && np->rn_snaplevel->rsl_next ==
5090 cp->rn_snaplevel));
5091
5092 if (cp != NULL)
5093 rc_node_hold(cp);
5094
5095 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5096
5097 (void) pthread_mutex_unlock(&pp->rn_lock);
5098 }
5099
5100 rc_node_assign(cpp, cp);
5101 if (cp != NULL) {
5102 rc_node_rele(cp);
5103
5104 return (REP_PROTOCOL_SUCCESS);
5105 }
5106 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5107 }
5108
5109 /*
5110 * This call takes a snapshot (np) and either:
5111 * an existing snapid (to be associated with np), or
5112 * a non-NULL parentp (from which a new snapshot is taken, and associated
5113 * with np)
5114 *
5115 * To do the association, np is duplicated, the duplicate is made to
5116 * represent the new snapid, and np is replaced with the new rc_node_t on
5117 * np's parent's child list. np is placed on the new node's rn_former list,
5118 * and replaces np in cache_hash (so rc_node_update() will find the new one).
5119 *
5120 * old_fmri and old_name point to the original snap shot's FMRI and name.
5121 * These values are used when generating audit events.
5122 *
5123 * Fails with
5124 * _BAD_REQUEST
5125 * _BACKEND_READONLY
5126 * _DELETED
5127 * _NO_RESOURCES
5128 * _TRUNCATED
5129 * _TYPE_MISMATCH
5130 */
5131 static int
rc_attach_snapshot(rc_node_t * np,uint32_t snapid,rc_node_t * parentp,char * old_fmri,char * old_name)5132 rc_attach_snapshot(
5133 rc_node_t *np,
5134 uint32_t snapid,
5135 rc_node_t *parentp,
5136 char *old_fmri,
5137 char *old_name)
5138 {
5139 rc_node_t *np_orig;
5140 rc_node_t *nnp, *prev;
5141 rc_node_t *pp;
5142 int rc;
5143 size_t sz_out;
5144 perm_status_t granted;
5145 au_event_t event_id;
5146 audit_event_data_t audit_data;
5147
5148 if (parentp == NULL) {
5149 assert(old_fmri != NULL);
5150 } else {
5151 assert(snapid == 0);
5152 }
5153 assert(MUTEX_HELD(&np->rn_lock));
5154
5155 /* Gather the audit data. */
5156 /*
5157 * ADT_smf_* symbols may not be defined in the /usr/include header
5158 * files on the build machine. Thus, the following if-else will
5159 * not be compiled when doing native builds.
5160 */
5161 #ifndef NATIVE_BUILD
5162 if (parentp == NULL) {
5163 event_id = ADT_smf_attach_snap;
5164 } else {
5165 event_id = ADT_smf_create_snap;
5166 }
5167 #endif /* NATIVE_BUILD */
5168 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5169 audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5170 if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5171 (void) pthread_mutex_unlock(&np->rn_lock);
5172 free(audit_data.ed_fmri);
5173 free(audit_data.ed_snapname);
5174 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5175 }
5176 audit_data.ed_auth = NULL;
5177 if (strlcpy(audit_data.ed_snapname, np->rn_name,
5178 REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5179 abort();
5180 }
5181 audit_data.ed_old_fmri = old_fmri;
5182 audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5183
5184 if (parentp == NULL) {
5185 /*
5186 * In the attach case, get the instance FMRIs of the
5187 * snapshots.
5188 */
5189 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5190 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5191 (void) pthread_mutex_unlock(&np->rn_lock);
5192 free(audit_data.ed_fmri);
5193 free(audit_data.ed_snapname);
5194 return (rc);
5195 }
5196 } else {
5197 /*
5198 * Capture the FMRI of the parent if we're actually going
5199 * to take the snapshot.
5200 */
5201 if ((rc = rc_node_get_fmri_or_fragment(parentp,
5202 audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5203 REP_PROTOCOL_SUCCESS) {
5204 (void) pthread_mutex_unlock(&np->rn_lock);
5205 free(audit_data.ed_fmri);
5206 free(audit_data.ed_snapname);
5207 return (rc);
5208 }
5209 }
5210
5211 np_orig = np;
5212 rc_node_hold_locked(np); /* simplifies the remainder */
5213
5214 (void) pthread_mutex_unlock(&np->rn_lock);
5215 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5216 switch (granted) {
5217 case PERM_DENIED:
5218 smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5219 &audit_data);
5220 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5221 rc_node_rele(np);
5222 goto cleanout;
5223 case PERM_GRANTED:
5224 break;
5225 case PERM_GONE:
5226 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5227 rc_node_rele(np);
5228 goto cleanout;
5229 case PERM_FAIL:
5230 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5231 rc_node_rele(np);
5232 goto cleanout;
5233 default:
5234 bad_error(rc_node_modify_permission_check, granted);
5235 }
5236 (void) pthread_mutex_lock(&np->rn_lock);
5237
5238 /*
5239 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5240 * list from changing.
5241 */
5242 for (;;) {
5243 if (!(np->rn_flags & RC_NODE_OLD)) {
5244 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5245 goto again;
5246 }
5247 pp = rc_node_hold_parent_flag(np,
5248 RC_NODE_CHILDREN_CHANGING);
5249
5250 (void) pthread_mutex_lock(&np->rn_lock);
5251 if (pp == NULL) {
5252 goto again;
5253 }
5254 if (np->rn_flags & RC_NODE_OLD) {
5255 rc_node_rele_flag(pp,
5256 RC_NODE_CHILDREN_CHANGING);
5257 (void) pthread_mutex_unlock(&pp->rn_lock);
5258 goto again;
5259 }
5260 (void) pthread_mutex_unlock(&pp->rn_lock);
5261
5262 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5263 /*
5264 * Can't happen, since we're holding our
5265 * parent's CHILDREN_CHANGING flag...
5266 */
5267 abort();
5268 }
5269 break; /* everything's ready */
5270 }
5271 again:
5272 rc_node_rele_locked(np);
5273 np = cache_lookup(&np_orig->rn_id);
5274
5275 if (np == NULL) {
5276 rc = REP_PROTOCOL_FAIL_DELETED;
5277 goto cleanout;
5278 }
5279
5280 (void) pthread_mutex_lock(&np->rn_lock);
5281 }
5282
5283 if (parentp != NULL) {
5284 if (pp != parentp) {
5285 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5286 goto fail;
5287 }
5288 nnp = NULL;
5289 } else {
5290 /*
5291 * look for a former node with the snapid we need.
5292 */
5293 if (np->rn_snapshot_id == snapid) {
5294 rc_node_rele_flag(np, RC_NODE_IN_TX);
5295 rc_node_rele_locked(np);
5296
5297 (void) pthread_mutex_lock(&pp->rn_lock);
5298 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5299 (void) pthread_mutex_unlock(&pp->rn_lock);
5300 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */
5301 goto cleanout;
5302 }
5303
5304 prev = np;
5305 while ((nnp = prev->rn_former) != NULL) {
5306 if (nnp->rn_snapshot_id == snapid) {
5307 rc_node_hold(nnp);
5308 break; /* existing node with that id */
5309 }
5310 prev = nnp;
5311 }
5312 }
5313
5314 if (nnp == NULL) {
5315 prev = NULL;
5316 nnp = rc_node_alloc();
5317 if (nnp == NULL) {
5318 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5319 goto fail;
5320 }
5321
5322 nnp->rn_id = np->rn_id; /* structure assignment */
5323 nnp->rn_hash = np->rn_hash;
5324 nnp->rn_name = strdup(np->rn_name);
5325 nnp->rn_snapshot_id = snapid;
5326 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5327
5328 if (nnp->rn_name == NULL) {
5329 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5330 goto fail;
5331 }
5332 }
5333
5334 (void) pthread_mutex_unlock(&np->rn_lock);
5335
5336 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5337
5338 if (parentp != NULL)
5339 nnp->rn_snapshot_id = snapid; /* fill in new snapid */
5340 else
5341 assert(nnp->rn_snapshot_id == snapid);
5342
5343 (void) pthread_mutex_lock(&np->rn_lock);
5344 if (rc != REP_PROTOCOL_SUCCESS)
5345 goto fail;
5346
5347 /*
5348 * fix up the former chain
5349 */
5350 if (prev != NULL) {
5351 prev->rn_former = nnp->rn_former;
5352 (void) pthread_mutex_lock(&nnp->rn_lock);
5353 nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5354 nnp->rn_former = NULL;
5355 (void) pthread_mutex_unlock(&nnp->rn_lock);
5356 }
5357 np->rn_flags |= RC_NODE_OLD;
5358 (void) pthread_mutex_unlock(&np->rn_lock);
5359
5360 /*
5361 * replace np with nnp
5362 */
5363 rc_node_relink_child(pp, np, nnp);
5364
5365 rc_node_rele(np);
5366 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5367 rc = REP_PROTOCOL_SUCCESS;
5368
5369 cleanout:
5370 free(audit_data.ed_auth);
5371 free(audit_data.ed_fmri);
5372 free(audit_data.ed_snapname);
5373 return (rc);
5374
5375 fail:
5376 rc_node_rele_flag(np, RC_NODE_IN_TX);
5377 rc_node_rele_locked(np);
5378 (void) pthread_mutex_lock(&pp->rn_lock);
5379 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5380 (void) pthread_mutex_unlock(&pp->rn_lock);
5381
5382 if (nnp != NULL) {
5383 if (prev == NULL)
5384 rc_node_destroy(nnp);
5385 else
5386 rc_node_rele(nnp);
5387 }
5388
5389 free(audit_data.ed_auth);
5390 free(audit_data.ed_fmri);
5391 free(audit_data.ed_snapname);
5392 return (rc);
5393 }
5394
5395 int
rc_snapshot_take_new(rc_node_ptr_t * npp,const char * svcname,const char * instname,const char * name,rc_node_ptr_t * outpp)5396 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5397 const char *instname, const char *name, rc_node_ptr_t *outpp)
5398 {
5399 perm_status_t granted;
5400 rc_node_t *np;
5401 rc_node_t *outp = NULL;
5402 int rc, perm_rc;
5403 char fmri[REP_PROTOCOL_FMRI_LEN];
5404 audit_event_data_t audit_data;
5405 size_t sz_out;
5406
5407 rc_node_clear(outpp, 0);
5408
5409 /*
5410 * rc_node_modify_permission_check() must be called before the node
5411 * is locked. This is because the library functions that check
5412 * authorizations can trigger calls back into configd.
5413 */
5414 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5415 switch (granted) {
5416 case PERM_DENIED:
5417 /*
5418 * We continue in this case, so that we can generate an
5419 * audit event later in this function.
5420 */
5421 perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5422 break;
5423 case PERM_GRANTED:
5424 perm_rc = REP_PROTOCOL_SUCCESS;
5425 break;
5426 case PERM_GONE:
5427 /* No need to produce audit event if client is gone. */
5428 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5429 case PERM_FAIL:
5430 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5431 default:
5432 bad_error("rc_node_modify_permission_check", granted);
5433 break;
5434 }
5435
5436 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5437 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5438 (void) pthread_mutex_unlock(&np->rn_lock);
5439 free(audit_data.ed_auth);
5440 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5441 }
5442
5443 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5444 if (rc != REP_PROTOCOL_SUCCESS) {
5445 (void) pthread_mutex_unlock(&np->rn_lock);
5446 free(audit_data.ed_auth);
5447 return (rc);
5448 }
5449
5450 if (svcname != NULL && (rc =
5451 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5452 REP_PROTOCOL_SUCCESS) {
5453 (void) pthread_mutex_unlock(&np->rn_lock);
5454 free(audit_data.ed_auth);
5455 return (rc);
5456 }
5457
5458 if (instname != NULL && (rc =
5459 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5460 REP_PROTOCOL_SUCCESS) {
5461 (void) pthread_mutex_unlock(&np->rn_lock);
5462 free(audit_data.ed_auth);
5463 return (rc);
5464 }
5465
5466 audit_data.ed_fmri = fmri;
5467 audit_data.ed_snapname = (char *)name;
5468
5469 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5470 &sz_out)) != REP_PROTOCOL_SUCCESS) {
5471 (void) pthread_mutex_unlock(&np->rn_lock);
5472 free(audit_data.ed_auth);
5473 return (rc);
5474 }
5475 if (perm_rc != REP_PROTOCOL_SUCCESS) {
5476 (void) pthread_mutex_unlock(&np->rn_lock);
5477 smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5478 ADT_FAIL_VALUE_AUTH, &audit_data);
5479 free(audit_data.ed_auth);
5480 return (perm_rc);
5481 }
5482
5483 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5484 audit_data.ed_auth);
5485 (void) pthread_mutex_unlock(&np->rn_lock);
5486
5487 rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5488
5489 if (rc == REP_PROTOCOL_SUCCESS) {
5490 rc_node_assign(outpp, outp);
5491 rc_node_rele(outp);
5492 }
5493
5494 (void) pthread_mutex_lock(&np->rn_lock);
5495 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5496 (void) pthread_mutex_unlock(&np->rn_lock);
5497
5498 if (rc == REP_PROTOCOL_SUCCESS) {
5499 smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5500 &audit_data);
5501 }
5502 if (audit_data.ed_auth != NULL)
5503 free(audit_data.ed_auth);
5504 return (rc);
5505 }
5506
5507 int
rc_snapshot_take_attach(rc_node_ptr_t * npp,rc_node_ptr_t * outpp)5508 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5509 {
5510 rc_node_t *np, *outp;
5511
5512 RC_NODE_PTR_GET_CHECK(np, npp);
5513 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5514 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5515 }
5516
5517 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5518 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5519 (void) pthread_mutex_unlock(&outp->rn_lock);
5520 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5521 }
5522
5523 return (rc_attach_snapshot(outp, 0, np, NULL,
5524 NULL)); /* drops outp's lock */
5525 }
5526
5527 int
rc_snapshot_attach(rc_node_ptr_t * npp,rc_node_ptr_t * cpp)5528 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5529 {
5530 rc_node_t *np;
5531 rc_node_t *cp;
5532 uint32_t snapid;
5533 char old_name[REP_PROTOCOL_NAME_LEN];
5534 int rc;
5535 size_t sz_out;
5536 char old_fmri[REP_PROTOCOL_FMRI_LEN];
5537
5538 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5539 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5540 (void) pthread_mutex_unlock(&np->rn_lock);
5541 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5542 }
5543 snapid = np->rn_snapshot_id;
5544 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5545 &sz_out);
5546 (void) pthread_mutex_unlock(&np->rn_lock);
5547 if (rc != REP_PROTOCOL_SUCCESS)
5548 return (rc);
5549 if (np->rn_name != NULL) {
5550 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5551 sizeof (old_name)) {
5552 return (REP_PROTOCOL_FAIL_TRUNCATED);
5553 }
5554 }
5555
5556 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5557 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5558 (void) pthread_mutex_unlock(&cp->rn_lock);
5559 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5560 }
5561
5562 rc = rc_attach_snapshot(cp, snapid, NULL,
5563 old_fmri, old_name); /* drops cp's lock */
5564 return (rc);
5565 }
5566
5567 /*
5568 * If the pgname property group under ent has type pgtype, and it has a
5569 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
5570 * it is not checked. If ent is not a service node, we will return _SUCCESS if
5571 * a property meeting the requirements exists in either the instance or its
5572 * parent.
5573 *
5574 * Returns
5575 * _SUCCESS - see above
5576 * _DELETED - ent or one of its ancestors was deleted
5577 * _NO_RESOURCES - no resources
5578 * _NOT_FOUND - no matching property was found
5579 */
5580 static int
rc_svc_prop_exists(rc_node_t * ent,const char * pgname,const char * pgtype,const char * propname,rep_protocol_value_type_t ptype)5581 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5582 const char *propname, rep_protocol_value_type_t ptype)
5583 {
5584 int ret;
5585 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5586
5587 assert(!MUTEX_HELD(&ent->rn_lock));
5588
5589 (void) pthread_mutex_lock(&ent->rn_lock);
5590 ret = rc_node_find_named_child(ent, pgname,
5591 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5592 (void) pthread_mutex_unlock(&ent->rn_lock);
5593
5594 switch (ret) {
5595 case REP_PROTOCOL_SUCCESS:
5596 break;
5597
5598 case REP_PROTOCOL_FAIL_DELETED:
5599 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5600 return (ret);
5601
5602 default:
5603 bad_error("rc_node_find_named_child", ret);
5604 }
5605
5606 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5607 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5608 &svc);
5609 if (ret != REP_PROTOCOL_SUCCESS) {
5610 assert(ret == REP_PROTOCOL_FAIL_DELETED);
5611 if (pg != NULL)
5612 rc_node_rele(pg);
5613 return (ret);
5614 }
5615 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5616
5617 (void) pthread_mutex_lock(&svc->rn_lock);
5618 ret = rc_node_find_named_child(svc, pgname,
5619 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5620 (void) pthread_mutex_unlock(&svc->rn_lock);
5621
5622 rc_node_rele(svc);
5623
5624 switch (ret) {
5625 case REP_PROTOCOL_SUCCESS:
5626 break;
5627
5628 case REP_PROTOCOL_FAIL_DELETED:
5629 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5630 if (pg != NULL)
5631 rc_node_rele(pg);
5632 return (ret);
5633
5634 default:
5635 bad_error("rc_node_find_named_child", ret);
5636 }
5637 }
5638
5639 if (pg != NULL &&
5640 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5641 rc_node_rele(pg);
5642 pg = NULL;
5643 }
5644
5645 if (spg != NULL &&
5646 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5647 rc_node_rele(spg);
5648 spg = NULL;
5649 }
5650
5651 if (pg == NULL) {
5652 if (spg == NULL)
5653 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5654 pg = spg;
5655 spg = NULL;
5656 }
5657
5658 /*
5659 * At this point, pg is non-NULL, and is a property group node of the
5660 * correct type. spg, if non-NULL, is also a property group node of
5661 * the correct type. Check for the property in pg first, then spg
5662 * (if applicable).
5663 */
5664 (void) pthread_mutex_lock(&pg->rn_lock);
5665 ret = rc_node_find_named_child(pg, propname,
5666 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5667 (void) pthread_mutex_unlock(&pg->rn_lock);
5668 rc_node_rele(pg);
5669 switch (ret) {
5670 case REP_PROTOCOL_SUCCESS:
5671 if (prop != NULL) {
5672 if (prop->rn_valtype == ptype) {
5673 rc_node_rele(prop);
5674 if (spg != NULL)
5675 rc_node_rele(spg);
5676 return (REP_PROTOCOL_SUCCESS);
5677 }
5678 rc_node_rele(prop);
5679 }
5680 break;
5681
5682 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5683 if (spg != NULL)
5684 rc_node_rele(spg);
5685 return (ret);
5686
5687 case REP_PROTOCOL_FAIL_DELETED:
5688 break;
5689
5690 default:
5691 bad_error("rc_node_find_named_child", ret);
5692 }
5693
5694 if (spg == NULL)
5695 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5696
5697 pg = spg;
5698
5699 (void) pthread_mutex_lock(&pg->rn_lock);
5700 ret = rc_node_find_named_child(pg, propname,
5701 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5702 (void) pthread_mutex_unlock(&pg->rn_lock);
5703 rc_node_rele(pg);
5704 switch (ret) {
5705 case REP_PROTOCOL_SUCCESS:
5706 if (prop != NULL) {
5707 if (prop->rn_valtype == ptype) {
5708 rc_node_rele(prop);
5709 return (REP_PROTOCOL_SUCCESS);
5710 }
5711 rc_node_rele(prop);
5712 }
5713 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5714
5715 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5716 return (ret);
5717
5718 case REP_PROTOCOL_FAIL_DELETED:
5719 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5720
5721 default:
5722 bad_error("rc_node_find_named_child", ret);
5723 }
5724
5725 return (REP_PROTOCOL_SUCCESS);
5726 }
5727
5728 /*
5729 * Given a property group node, returns _SUCCESS if the property group may
5730 * be read without any special authorization.
5731 *
5732 * Fails with:
5733 * _DELETED - np or an ancestor node was deleted
5734 * _TYPE_MISMATCH - np does not refer to a property group
5735 * _NO_RESOURCES - no resources
5736 * _PERMISSION_DENIED - authorization is required
5737 */
5738 static int
rc_node_pg_check_read_protect(rc_node_t * np)5739 rc_node_pg_check_read_protect(rc_node_t *np)
5740 {
5741 int ret;
5742 rc_node_t *ent;
5743
5744 assert(!MUTEX_HELD(&np->rn_lock));
5745
5746 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5747 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5748
5749 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5750 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5751 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5752 return (REP_PROTOCOL_SUCCESS);
5753
5754 ret = rc_node_parent(np, &ent);
5755
5756 if (ret != REP_PROTOCOL_SUCCESS)
5757 return (ret);
5758
5759 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5760 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5761
5762 rc_node_rele(ent);
5763
5764 switch (ret) {
5765 case REP_PROTOCOL_FAIL_NOT_FOUND:
5766 return (REP_PROTOCOL_SUCCESS);
5767 case REP_PROTOCOL_SUCCESS:
5768 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5769 case REP_PROTOCOL_FAIL_DELETED:
5770 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5771 return (ret);
5772 default:
5773 bad_error("rc_svc_prop_exists", ret);
5774 }
5775
5776 return (REP_PROTOCOL_SUCCESS);
5777 }
5778
5779 /*
5780 * Fails with
5781 * _DELETED - np's node or parent has been deleted
5782 * _TYPE_MISMATCH - np's node is not a property
5783 * _NO_RESOURCES - out of memory
5784 * _PERMISSION_DENIED - no authorization to read this property's value(s)
5785 * _BAD_REQUEST - np's parent is not a property group
5786 */
5787 static int
rc_node_property_may_read(rc_node_t * np)5788 rc_node_property_may_read(rc_node_t *np)
5789 {
5790 int ret;
5791 perm_status_t granted = PERM_DENIED;
5792 rc_node_t *pgp;
5793 permcheck_t *pcp;
5794 audit_event_data_t audit_data;
5795 size_t sz_out;
5796
5797 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5798 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5799
5800 if (client_is_privileged())
5801 return (REP_PROTOCOL_SUCCESS);
5802
5803 #ifdef NATIVE_BUILD
5804 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5805 #else
5806 ret = rc_node_parent(np, &pgp);
5807
5808 if (ret != REP_PROTOCOL_SUCCESS)
5809 return (ret);
5810
5811 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5812 rc_node_rele(pgp);
5813 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5814 }
5815
5816 ret = rc_node_pg_check_read_protect(pgp);
5817
5818 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5819 rc_node_rele(pgp);
5820 return (ret);
5821 }
5822
5823 pcp = pc_create();
5824
5825 if (pcp == NULL) {
5826 rc_node_rele(pgp);
5827 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5828 }
5829
5830 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5831
5832 if (ret == REP_PROTOCOL_SUCCESS) {
5833 const char * const auth =
5834 perm_auth_for_pgtype(pgp->rn_type);
5835
5836 if (auth != NULL)
5837 ret = perm_add_enabling(pcp, auth);
5838 }
5839
5840 /*
5841 * If you are permitted to modify the value, you may also
5842 * read it. This means that both the MODIFY and VALUE
5843 * authorizations are acceptable. We don't allow requests
5844 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5845 * however, to avoid leaking possibly valuable information
5846 * since such a user can't change the property anyway.
5847 */
5848 if (ret == REP_PROTOCOL_SUCCESS)
5849 ret = perm_add_enabling_values(pcp, pgp,
5850 AUTH_PROP_MODIFY);
5851
5852 if (ret == REP_PROTOCOL_SUCCESS &&
5853 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5854 ret = perm_add_enabling_values(pcp, pgp,
5855 AUTH_PROP_VALUE);
5856
5857 if (ret == REP_PROTOCOL_SUCCESS)
5858 ret = perm_add_enabling_values(pcp, pgp,
5859 AUTH_PROP_READ);
5860
5861 rc_node_rele(pgp);
5862
5863 if (ret == REP_PROTOCOL_SUCCESS) {
5864 granted = perm_granted(pcp);
5865 if (granted == PERM_FAIL)
5866 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5867 if (granted == PERM_GONE)
5868 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5869 }
5870
5871 if (ret == REP_PROTOCOL_SUCCESS) {
5872 /* Generate a read_prop audit event. */
5873 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5874 if (audit_data.ed_fmri == NULL)
5875 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5876 }
5877 if (ret == REP_PROTOCOL_SUCCESS) {
5878 ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5879 REP_PROTOCOL_FMRI_LEN, &sz_out);
5880 }
5881 if (ret == REP_PROTOCOL_SUCCESS) {
5882 int status;
5883 int ret_value;
5884
5885 if (granted == PERM_DENIED) {
5886 status = ADT_FAILURE;
5887 ret_value = ADT_FAIL_VALUE_AUTH;
5888 } else {
5889 status = ADT_SUCCESS;
5890 ret_value = ADT_SUCCESS;
5891 }
5892 audit_data.ed_auth = pcp->pc_auth_string;
5893 smf_audit_event(ADT_smf_read_prop,
5894 status, ret_value, &audit_data);
5895 }
5896 free(audit_data.ed_fmri);
5897
5898 pc_free(pcp);
5899
5900 if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5901 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5902
5903 return (ret);
5904 #endif /* NATIVE_BUILD */
5905 }
5906
5907 /*
5908 * Iteration
5909 */
5910 static int
rc_iter_filter_name(rc_node_t * np,void * s)5911 rc_iter_filter_name(rc_node_t *np, void *s)
5912 {
5913 const char *name = s;
5914
5915 return (strcmp(np->rn_name, name) == 0);
5916 }
5917
5918 static int
rc_iter_filter_type(rc_node_t * np,void * s)5919 rc_iter_filter_type(rc_node_t *np, void *s)
5920 {
5921 const char *type = s;
5922
5923 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5924 }
5925
5926 /*ARGSUSED*/
5927 static int
rc_iter_null_filter(rc_node_t * np,void * s)5928 rc_iter_null_filter(rc_node_t *np, void *s)
5929 {
5930 return (1);
5931 }
5932
5933 /*
5934 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
5935 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5936 * If successful, leaves a hold on np & increments np->rn_other_refs
5937 *
5938 * If composed is true, then set up for iteration across the top level of np's
5939 * composition chain. If successful, leaves a hold on np and increments
5940 * rn_other_refs for the top level of np's composition chain.
5941 *
5942 * Fails with
5943 * _NO_RESOURCES
5944 * _INVALID_TYPE
5945 * _TYPE_MISMATCH - np cannot carry type children
5946 * _DELETED
5947 */
5948 static int
rc_iter_create(rc_node_iter_t ** resp,rc_node_t * np,uint32_t type,rc_iter_filter_func * filter,void * arg,boolean_t composed)5949 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5950 rc_iter_filter_func *filter, void *arg, boolean_t composed)
5951 {
5952 rc_node_iter_t *nip;
5953 int res;
5954
5955 assert(*resp == NULL);
5956
5957 nip = uu_zalloc(sizeof (*nip));
5958 if (nip == NULL)
5959 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5960
5961 /* np is held by the client's rc_node_ptr_t */
5962 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5963 composed = 1;
5964
5965 if (!composed) {
5966 (void) pthread_mutex_lock(&np->rn_lock);
5967
5968 if ((res = rc_node_fill_children(np, type)) !=
5969 REP_PROTOCOL_SUCCESS) {
5970 (void) pthread_mutex_unlock(&np->rn_lock);
5971 uu_free(nip);
5972 return (res);
5973 }
5974
5975 nip->rni_clevel = -1;
5976
5977 nip->rni_iter = uu_list_walk_start(np->rn_children,
5978 UU_WALK_ROBUST);
5979 if (nip->rni_iter != NULL) {
5980 nip->rni_iter_node = np;
5981 rc_node_hold_other(np);
5982 } else {
5983 (void) pthread_mutex_unlock(&np->rn_lock);
5984 uu_free(nip);
5985 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5986 }
5987 (void) pthread_mutex_unlock(&np->rn_lock);
5988 } else {
5989 rc_node_t *ent;
5990
5991 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5992 /* rn_cchain isn't valid until children are loaded. */
5993 (void) pthread_mutex_lock(&np->rn_lock);
5994 res = rc_node_fill_children(np,
5995 REP_PROTOCOL_ENTITY_SNAPLEVEL);
5996 (void) pthread_mutex_unlock(&np->rn_lock);
5997 if (res != REP_PROTOCOL_SUCCESS) {
5998 uu_free(nip);
5999 return (res);
6000 }
6001
6002 /* Check for an empty snapshot. */
6003 if (np->rn_cchain[0] == NULL)
6004 goto empty;
6005 }
6006
6007 /* Start at the top of the composition chain. */
6008 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6009 if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6010 /* Empty composition chain. */
6011 empty:
6012 nip->rni_clevel = -1;
6013 nip->rni_iter = NULL;
6014 /* It's ok, iter_next() will return _DONE. */
6015 goto out;
6016 }
6017
6018 ent = np->rn_cchain[nip->rni_clevel];
6019 assert(ent != NULL);
6020
6021 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6022 break;
6023
6024 /* Someone deleted it, so try the next one. */
6025 }
6026
6027 res = rc_node_fill_children(ent, type);
6028
6029 if (res == REP_PROTOCOL_SUCCESS) {
6030 nip->rni_iter = uu_list_walk_start(ent->rn_children,
6031 UU_WALK_ROBUST);
6032
6033 if (nip->rni_iter == NULL)
6034 res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6035 else {
6036 nip->rni_iter_node = ent;
6037 rc_node_hold_other(ent);
6038 }
6039 }
6040
6041 if (res != REP_PROTOCOL_SUCCESS) {
6042 (void) pthread_mutex_unlock(&ent->rn_lock);
6043 uu_free(nip);
6044 return (res);
6045 }
6046
6047 (void) pthread_mutex_unlock(&ent->rn_lock);
6048 }
6049
6050 out:
6051 rc_node_hold(np); /* released by rc_iter_end() */
6052 nip->rni_parent = np;
6053 nip->rni_type = type;
6054 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6055 nip->rni_filter_arg = arg;
6056 *resp = nip;
6057 return (REP_PROTOCOL_SUCCESS);
6058 }
6059
6060 static void
rc_iter_end(rc_node_iter_t * iter)6061 rc_iter_end(rc_node_iter_t *iter)
6062 {
6063 rc_node_t *np = iter->rni_parent;
6064
6065 if (iter->rni_clevel >= 0)
6066 np = np->rn_cchain[iter->rni_clevel];
6067
6068 assert(MUTEX_HELD(&np->rn_lock));
6069 if (iter->rni_iter != NULL)
6070 uu_list_walk_end(iter->rni_iter);
6071 iter->rni_iter = NULL;
6072
6073 (void) pthread_mutex_unlock(&np->rn_lock);
6074 rc_node_rele(iter->rni_parent);
6075 if (iter->rni_iter_node != NULL)
6076 rc_node_rele_other(iter->rni_iter_node);
6077 }
6078
6079 /*
6080 * Fails with
6081 * _NOT_SET - npp is reset
6082 * _DELETED - npp's node has been deleted
6083 * _NOT_APPLICABLE - npp's node is not a property
6084 * _NO_RESOURCES - out of memory
6085 */
6086 static int
rc_node_setup_value_iter(rc_node_ptr_t * npp,rc_node_iter_t ** iterp)6087 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6088 {
6089 rc_node_t *np;
6090
6091 rc_node_iter_t *nip;
6092
6093 assert(*iterp == NULL);
6094
6095 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6096
6097 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6098 (void) pthread_mutex_unlock(&np->rn_lock);
6099 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6100 }
6101
6102 nip = uu_zalloc(sizeof (*nip));
6103 if (nip == NULL) {
6104 (void) pthread_mutex_unlock(&np->rn_lock);
6105 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6106 }
6107
6108 nip->rni_parent = np;
6109 nip->rni_iter = NULL;
6110 nip->rni_clevel = -1;
6111 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6112 nip->rni_offset = 0;
6113 nip->rni_last_offset = 0;
6114
6115 rc_node_hold_locked(np);
6116
6117 *iterp = nip;
6118 (void) pthread_mutex_unlock(&np->rn_lock);
6119
6120 return (REP_PROTOCOL_SUCCESS);
6121 }
6122
6123 /*
6124 * Returns:
6125 * _NO_RESOURCES - out of memory
6126 * _NOT_SET - npp is reset
6127 * _DELETED - npp's node has been deleted
6128 * _TYPE_MISMATCH - npp's node is not a property
6129 * _NOT_FOUND - property has no values
6130 * _TRUNCATED - property has >1 values (first is written into out)
6131 * _SUCCESS - property has 1 value (which is written into out)
6132 * _PERMISSION_DENIED - no authorization to read property value(s)
6133 *
6134 * We shorten *sz_out to not include anything after the final '\0'.
6135 */
6136 int
rc_node_get_property_value(rc_node_ptr_t * npp,struct rep_protocol_value_response * out,size_t * sz_out)6137 rc_node_get_property_value(rc_node_ptr_t *npp,
6138 struct rep_protocol_value_response *out, size_t *sz_out)
6139 {
6140 rc_node_t *np;
6141 size_t w;
6142 int ret;
6143
6144 assert(*sz_out == sizeof (*out));
6145
6146 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6147 ret = rc_node_property_may_read(np);
6148 rc_node_rele(np);
6149
6150 if (ret != REP_PROTOCOL_SUCCESS)
6151 return (ret);
6152
6153 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6154
6155 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6156 (void) pthread_mutex_unlock(&np->rn_lock);
6157 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6158 }
6159
6160 if (np->rn_values_size == 0) {
6161 (void) pthread_mutex_unlock(&np->rn_lock);
6162 return (REP_PROTOCOL_FAIL_NOT_FOUND);
6163 }
6164 out->rpr_type = np->rn_valtype;
6165 w = strlcpy(out->rpr_value, &np->rn_values[0],
6166 sizeof (out->rpr_value));
6167
6168 if (w >= sizeof (out->rpr_value))
6169 backend_panic("value too large");
6170
6171 *sz_out = offsetof(struct rep_protocol_value_response,
6172 rpr_value[w + 1]);
6173
6174 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6175 REP_PROTOCOL_SUCCESS;
6176 (void) pthread_mutex_unlock(&np->rn_lock);
6177 return (ret);
6178 }
6179
6180 int
rc_iter_next_value(rc_node_iter_t * iter,struct rep_protocol_value_response * out,size_t * sz_out,int repeat)6181 rc_iter_next_value(rc_node_iter_t *iter,
6182 struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6183 {
6184 rc_node_t *np = iter->rni_parent;
6185 const char *vals;
6186 size_t len;
6187
6188 size_t start;
6189 size_t w;
6190 int ret;
6191
6192 rep_protocol_responseid_t result;
6193
6194 assert(*sz_out == sizeof (*out));
6195
6196 (void) memset(out, '\0', *sz_out);
6197
6198 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6199 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6200
6201 RC_NODE_CHECK(np);
6202 ret = rc_node_property_may_read(np);
6203
6204 if (ret != REP_PROTOCOL_SUCCESS)
6205 return (ret);
6206
6207 RC_NODE_CHECK_AND_LOCK(np);
6208
6209 vals = np->rn_values;
6210 len = np->rn_values_size;
6211
6212 out->rpr_type = np->rn_valtype;
6213
6214 start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6215
6216 if (len == 0 || start >= len) {
6217 result = REP_PROTOCOL_DONE;
6218 *sz_out -= sizeof (out->rpr_value);
6219 } else {
6220 w = strlcpy(out->rpr_value, &vals[start],
6221 sizeof (out->rpr_value));
6222
6223 if (w >= sizeof (out->rpr_value))
6224 backend_panic("value too large");
6225
6226 *sz_out = offsetof(struct rep_protocol_value_response,
6227 rpr_value[w + 1]);
6228
6229 /*
6230 * update the offsets if we're not repeating
6231 */
6232 if (!repeat) {
6233 iter->rni_last_offset = iter->rni_offset;
6234 iter->rni_offset += (w + 1);
6235 }
6236
6237 result = REP_PROTOCOL_SUCCESS;
6238 }
6239
6240 (void) pthread_mutex_unlock(&np->rn_lock);
6241 return (result);
6242 }
6243
6244 /*
6245 * Entry point for ITER_START from client.c. Validate the arguments & call
6246 * rc_iter_create().
6247 *
6248 * Fails with
6249 * _NOT_SET
6250 * _DELETED
6251 * _TYPE_MISMATCH - np cannot carry type children
6252 * _BAD_REQUEST - flags is invalid
6253 * pattern is invalid
6254 * _NO_RESOURCES
6255 * _INVALID_TYPE
6256 * _TYPE_MISMATCH - *npp cannot have children of type
6257 * _BACKEND_ACCESS
6258 */
6259 int
rc_node_setup_iter(rc_node_ptr_t * npp,rc_node_iter_t ** iterp,uint32_t type,uint32_t flags,const char * pattern)6260 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6261 uint32_t type, uint32_t flags, const char *pattern)
6262 {
6263 rc_node_t *np;
6264 rc_iter_filter_func *f = NULL;
6265 int rc;
6266
6267 RC_NODE_PTR_GET_CHECK(np, npp);
6268
6269 if (pattern != NULL && pattern[0] == '\0')
6270 pattern = NULL;
6271
6272 if (type == REP_PROTOCOL_ENTITY_VALUE) {
6273 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6274 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6275 if (flags != RP_ITER_START_ALL || pattern != NULL)
6276 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6277
6278 rc = rc_node_setup_value_iter(npp, iterp);
6279 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6280 return (rc);
6281 }
6282
6283 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6284 REP_PROTOCOL_SUCCESS)
6285 return (rc);
6286
6287 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6288 (pattern == NULL))
6289 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6290
6291 /* Composition only works for instances & snapshots. */
6292 if ((flags & RP_ITER_START_COMPOSED) &&
6293 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6294 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6295 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6296
6297 if (pattern != NULL) {
6298 if ((rc = rc_check_type_name(type, pattern)) !=
6299 REP_PROTOCOL_SUCCESS)
6300 return (rc);
6301 pattern = strdup(pattern);
6302 if (pattern == NULL)
6303 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6304 }
6305
6306 switch (flags & RP_ITER_START_FILT_MASK) {
6307 case RP_ITER_START_ALL:
6308 f = NULL;
6309 break;
6310 case RP_ITER_START_EXACT:
6311 f = rc_iter_filter_name;
6312 break;
6313 case RP_ITER_START_PGTYPE:
6314 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6315 free((void *)pattern);
6316 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6317 }
6318 f = rc_iter_filter_type;
6319 break;
6320 default:
6321 free((void *)pattern);
6322 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6323 }
6324
6325 rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6326 flags & RP_ITER_START_COMPOSED);
6327 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6328 free((void *)pattern);
6329
6330 return (rc);
6331 }
6332
6333 /*
6334 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6335 * the filter.
6336 * For composed iterators, then check to see if there's an overlapping entity
6337 * (see embedded comments). If we reach the end of the list, start over at
6338 * the next level.
6339 *
6340 * Returns
6341 * _BAD_REQUEST - iter walks values
6342 * _TYPE_MISMATCH - iter does not walk type entities
6343 * _DELETED - parent was deleted
6344 * _NO_RESOURCES
6345 * _INVALID_TYPE - type is invalid
6346 * _DONE
6347 * _SUCCESS
6348 *
6349 * For composed property group iterators, can also return
6350 * _TYPE_MISMATCH - parent cannot have type children
6351 */
6352 int
rc_iter_next(rc_node_iter_t * iter,rc_node_ptr_t * out,uint32_t type)6353 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6354 {
6355 rc_node_t *np = iter->rni_parent;
6356 rc_node_t *res;
6357 int rc;
6358
6359 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6360 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6361
6362 if (iter->rni_iter == NULL) {
6363 rc_node_clear(out, 0);
6364 return (REP_PROTOCOL_DONE);
6365 }
6366
6367 if (iter->rni_type != type) {
6368 rc_node_clear(out, 0);
6369 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6370 }
6371
6372 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
6373
6374 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6375 (void) pthread_mutex_unlock(&np->rn_lock);
6376 rc_node_clear(out, 1);
6377 return (REP_PROTOCOL_FAIL_DELETED);
6378 }
6379
6380 if (iter->rni_clevel >= 0) {
6381 /* Composed iterator. Iterate over appropriate level. */
6382 (void) pthread_mutex_unlock(&np->rn_lock);
6383 np = np->rn_cchain[iter->rni_clevel];
6384 /*
6385 * If iter->rni_parent is an instance or a snapshot, np must
6386 * be valid since iter holds iter->rni_parent & possible
6387 * levels (service, instance, snaplevel) cannot be destroyed
6388 * while rni_parent is held. If iter->rni_parent is
6389 * a composed property group then rc_node_setup_cpg() put
6390 * a hold on np.
6391 */
6392
6393 (void) pthread_mutex_lock(&np->rn_lock);
6394
6395 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6396 (void) pthread_mutex_unlock(&np->rn_lock);
6397 rc_node_clear(out, 1);
6398 return (REP_PROTOCOL_FAIL_DELETED);
6399 }
6400 }
6401
6402 assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6403
6404 for (;;) {
6405 res = uu_list_walk_next(iter->rni_iter);
6406 if (res == NULL) {
6407 rc_node_t *parent = iter->rni_parent;
6408
6409 #if COMPOSITION_DEPTH == 2
6410 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6411 /* release walker and lock */
6412 rc_iter_end(iter);
6413 break;
6414 }
6415
6416 /* Stop walking current level. */
6417 uu_list_walk_end(iter->rni_iter);
6418 iter->rni_iter = NULL;
6419 (void) pthread_mutex_unlock(&np->rn_lock);
6420 rc_node_rele_other(iter->rni_iter_node);
6421 iter->rni_iter_node = NULL;
6422
6423 /* Start walking next level. */
6424 ++iter->rni_clevel;
6425 np = parent->rn_cchain[iter->rni_clevel];
6426 assert(np != NULL);
6427 #else
6428 #error This code must be updated.
6429 #endif
6430
6431 (void) pthread_mutex_lock(&np->rn_lock);
6432
6433 rc = rc_node_fill_children(np, iter->rni_type);
6434
6435 if (rc == REP_PROTOCOL_SUCCESS) {
6436 iter->rni_iter =
6437 uu_list_walk_start(np->rn_children,
6438 UU_WALK_ROBUST);
6439
6440 if (iter->rni_iter == NULL)
6441 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6442 else {
6443 iter->rni_iter_node = np;
6444 rc_node_hold_other(np);
6445 }
6446 }
6447
6448 if (rc != REP_PROTOCOL_SUCCESS) {
6449 (void) pthread_mutex_unlock(&np->rn_lock);
6450 rc_node_clear(out, 0);
6451 return (rc);
6452 }
6453
6454 continue;
6455 }
6456
6457 if (res->rn_id.rl_type != type ||
6458 !iter->rni_filter(res, iter->rni_filter_arg))
6459 continue;
6460
6461 /*
6462 * If we're composed and not at the top level, check to see if
6463 * there's an entity at a higher level with the same name. If
6464 * so, skip this one.
6465 */
6466 if (iter->rni_clevel > 0) {
6467 rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6468 rc_node_t *pg;
6469
6470 #if COMPOSITION_DEPTH == 2
6471 assert(iter->rni_clevel == 1);
6472
6473 (void) pthread_mutex_unlock(&np->rn_lock);
6474 (void) pthread_mutex_lock(&ent->rn_lock);
6475 rc = rc_node_find_named_child(ent, res->rn_name, type,
6476 &pg);
6477 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6478 rc_node_rele(pg);
6479 (void) pthread_mutex_unlock(&ent->rn_lock);
6480 if (rc != REP_PROTOCOL_SUCCESS) {
6481 rc_node_clear(out, 0);
6482 return (rc);
6483 }
6484 (void) pthread_mutex_lock(&np->rn_lock);
6485
6486 /* Make sure np isn't being deleted all of a sudden. */
6487 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6488 (void) pthread_mutex_unlock(&np->rn_lock);
6489 rc_node_clear(out, 1);
6490 return (REP_PROTOCOL_FAIL_DELETED);
6491 }
6492
6493 if (pg != NULL)
6494 /* Keep going. */
6495 continue;
6496 #else
6497 #error This code must be updated.
6498 #endif
6499 }
6500
6501 /*
6502 * If we're composed, iterating over property groups, and not
6503 * at the bottom level, check to see if there's a pg at lower
6504 * level with the same name. If so, return a cpg.
6505 */
6506 if (iter->rni_clevel >= 0 &&
6507 type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6508 iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6509 #if COMPOSITION_DEPTH == 2
6510 rc_node_t *pg;
6511 rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6512
6513 rc_node_hold(res); /* While we drop np->rn_lock */
6514
6515 (void) pthread_mutex_unlock(&np->rn_lock);
6516 (void) pthread_mutex_lock(&ent->rn_lock);
6517 rc = rc_node_find_named_child(ent, res->rn_name, type,
6518 &pg);
6519 /* holds pg if not NULL */
6520 (void) pthread_mutex_unlock(&ent->rn_lock);
6521 if (rc != REP_PROTOCOL_SUCCESS) {
6522 rc_node_rele(res);
6523 rc_node_clear(out, 0);
6524 return (rc);
6525 }
6526
6527 (void) pthread_mutex_lock(&np->rn_lock);
6528 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6529 (void) pthread_mutex_unlock(&np->rn_lock);
6530 rc_node_rele(res);
6531 if (pg != NULL)
6532 rc_node_rele(pg);
6533 rc_node_clear(out, 1);
6534 return (REP_PROTOCOL_FAIL_DELETED);
6535 }
6536
6537 if (pg == NULL) {
6538 (void) pthread_mutex_unlock(&np->rn_lock);
6539 rc_node_rele(res);
6540 (void) pthread_mutex_lock(&np->rn_lock);
6541 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6542 (void) pthread_mutex_unlock(&np->
6543 rn_lock);
6544 rc_node_clear(out, 1);
6545 return (REP_PROTOCOL_FAIL_DELETED);
6546 }
6547 } else {
6548 rc_node_t *cpg;
6549
6550 /* Keep res held for rc_node_setup_cpg(). */
6551
6552 cpg = rc_node_alloc();
6553 if (cpg == NULL) {
6554 (void) pthread_mutex_unlock(
6555 &np->rn_lock);
6556 rc_node_rele(res);
6557 rc_node_rele(pg);
6558 rc_node_clear(out, 0);
6559 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6560 }
6561
6562 switch (rc_node_setup_cpg(cpg, res, pg)) {
6563 case REP_PROTOCOL_SUCCESS:
6564 res = cpg;
6565 break;
6566
6567 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6568 /* Nevermind. */
6569 (void) pthread_mutex_unlock(&np->
6570 rn_lock);
6571 rc_node_destroy(cpg);
6572 rc_node_rele(pg);
6573 rc_node_rele(res);
6574 (void) pthread_mutex_lock(&np->
6575 rn_lock);
6576 if (!rc_node_wait_flag(np,
6577 RC_NODE_DYING)) {
6578 (void) pthread_mutex_unlock(&
6579 np->rn_lock);
6580 rc_node_clear(out, 1);
6581 return
6582 (REP_PROTOCOL_FAIL_DELETED);
6583 }
6584 break;
6585
6586 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6587 rc_node_destroy(cpg);
6588 (void) pthread_mutex_unlock(
6589 &np->rn_lock);
6590 rc_node_rele(res);
6591 rc_node_rele(pg);
6592 rc_node_clear(out, 0);
6593 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6594
6595 default:
6596 assert(0);
6597 abort();
6598 }
6599 }
6600 #else
6601 #error This code must be updated.
6602 #endif
6603 }
6604
6605 rc_node_hold(res);
6606 (void) pthread_mutex_unlock(&np->rn_lock);
6607 break;
6608 }
6609 rc_node_assign(out, res);
6610
6611 if (res == NULL)
6612 return (REP_PROTOCOL_DONE);
6613 rc_node_rele(res);
6614 return (REP_PROTOCOL_SUCCESS);
6615 }
6616
6617 void
rc_iter_destroy(rc_node_iter_t ** nipp)6618 rc_iter_destroy(rc_node_iter_t **nipp)
6619 {
6620 rc_node_iter_t *nip = *nipp;
6621 rc_node_t *np;
6622
6623 if (nip == NULL)
6624 return; /* already freed */
6625
6626 np = nip->rni_parent;
6627
6628 if (nip->rni_filter_arg != NULL)
6629 free(nip->rni_filter_arg);
6630 nip->rni_filter_arg = NULL;
6631
6632 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6633 nip->rni_iter != NULL) {
6634 if (nip->rni_clevel < 0)
6635 (void) pthread_mutex_lock(&np->rn_lock);
6636 else
6637 (void) pthread_mutex_lock(
6638 &np->rn_cchain[nip->rni_clevel]->rn_lock);
6639 rc_iter_end(nip); /* release walker and lock */
6640 }
6641 nip->rni_parent = NULL;
6642
6643 uu_free(nip);
6644 *nipp = NULL;
6645 }
6646
6647 int
rc_node_setup_tx(rc_node_ptr_t * npp,rc_node_ptr_t * txp)6648 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6649 {
6650 rc_node_t *np;
6651 permcheck_t *pcp;
6652 int ret;
6653 perm_status_t granted;
6654 rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6655 char *auth_string = NULL;
6656
6657 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6658
6659 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6660 rc_node_rele(np);
6661 np = np->rn_cchain[0];
6662 RC_NODE_CHECK_AND_HOLD(np);
6663 }
6664
6665 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6666 rc_node_rele(np);
6667 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6668 }
6669
6670 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6671 rc_node_rele(np);
6672 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6673 }
6674
6675 #ifdef NATIVE_BUILD
6676 if (client_is_privileged())
6677 goto skip_checks;
6678 rc_node_rele(np);
6679 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6680 #else
6681 if (is_main_repository == 0)
6682 goto skip_checks;
6683
6684 /* permission check */
6685 pcp = pc_create();
6686 if (pcp == NULL) {
6687 rc_node_rele(np);
6688 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6689 }
6690
6691 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
6692 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6693 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6694 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6695 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6696 rc_node_t *instn;
6697
6698 /* solaris.smf.modify can be used */
6699 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6700 if (ret != REP_PROTOCOL_SUCCESS) {
6701 pc_free(pcp);
6702 rc_node_rele(np);
6703 return (ret);
6704 }
6705
6706 /* solaris.smf.manage can be used. */
6707 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6708
6709 if (ret != REP_PROTOCOL_SUCCESS) {
6710 pc_free(pcp);
6711 rc_node_rele(np);
6712 return (ret);
6713 }
6714
6715 /* general/action_authorization values can be used. */
6716 ret = rc_node_parent(np, &instn);
6717 if (ret != REP_PROTOCOL_SUCCESS) {
6718 assert(ret == REP_PROTOCOL_FAIL_DELETED);
6719 rc_node_rele(np);
6720 pc_free(pcp);
6721 return (REP_PROTOCOL_FAIL_DELETED);
6722 }
6723
6724 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6725
6726 ret = perm_add_inst_action_auth(pcp, instn);
6727 rc_node_rele(instn);
6728 switch (ret) {
6729 case REP_PROTOCOL_SUCCESS:
6730 break;
6731
6732 case REP_PROTOCOL_FAIL_DELETED:
6733 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6734 rc_node_rele(np);
6735 pc_free(pcp);
6736 return (ret);
6737
6738 default:
6739 bad_error("perm_add_inst_action_auth", ret);
6740 }
6741
6742 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6743 authorized = RC_AUTH_PASSED; /* No check on commit. */
6744 } else {
6745 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6746
6747 if (ret == REP_PROTOCOL_SUCCESS) {
6748 /* propertygroup-type-specific authorization */
6749 /* no locking because rn_type won't change anyway */
6750 const char * const auth =
6751 perm_auth_for_pgtype(np->rn_type);
6752
6753 if (auth != NULL)
6754 ret = perm_add_enabling(pcp, auth);
6755 }
6756
6757 if (ret == REP_PROTOCOL_SUCCESS)
6758 /* propertygroup/transaction-type-specific auths */
6759 ret =
6760 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6761
6762 if (ret == REP_PROTOCOL_SUCCESS)
6763 ret =
6764 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6765
6766 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6767 if (ret == REP_PROTOCOL_SUCCESS &&
6768 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6769 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6770 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6771
6772 if (ret != REP_PROTOCOL_SUCCESS) {
6773 pc_free(pcp);
6774 rc_node_rele(np);
6775 return (ret);
6776 }
6777 }
6778
6779 granted = perm_granted(pcp);
6780 ret = map_granted_status(granted, pcp, &auth_string);
6781 pc_free(pcp);
6782
6783 if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6784 (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6785 free(auth_string);
6786 rc_node_rele(np);
6787 return (ret);
6788 }
6789
6790 if (granted == PERM_DENIED) {
6791 /*
6792 * If we get here, the authorization failed.
6793 * Unfortunately, we don't have enough information at this
6794 * point to generate the security audit events. We'll only
6795 * get that information when the client tries to commit the
6796 * event. Thus, we'll remember the failed authorization,
6797 * so that we can generate the audit events later.
6798 */
6799 authorized = RC_AUTH_FAILED;
6800 }
6801 #endif /* NATIVE_BUILD */
6802
6803 skip_checks:
6804 rc_node_assign(txp, np);
6805 txp->rnp_authorized = authorized;
6806 if (authorized != RC_AUTH_UNKNOWN) {
6807 /* Save the authorization string. */
6808 if (txp->rnp_auth_string != NULL)
6809 free((void *)txp->rnp_auth_string);
6810 txp->rnp_auth_string = auth_string;
6811 auth_string = NULL; /* Don't free until done with txp. */
6812 }
6813
6814 rc_node_rele(np);
6815 if (auth_string != NULL)
6816 free(auth_string);
6817 return (REP_PROTOCOL_SUCCESS);
6818 }
6819
6820 /*
6821 * Return 1 if the given transaction commands only modify the values of
6822 * properties other than "modify_authorization". Return -1 if any of the
6823 * commands are invalid, and 0 otherwise.
6824 */
6825 static int
tx_allow_value(const void * cmds_arg,size_t cmds_sz,rc_node_t * pg)6826 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6827 {
6828 const struct rep_protocol_transaction_cmd *cmds;
6829 uintptr_t loc;
6830 uint32_t sz;
6831 rc_node_t *prop;
6832 boolean_t ok;
6833
6834 assert(!MUTEX_HELD(&pg->rn_lock));
6835
6836 loc = (uintptr_t)cmds_arg;
6837
6838 while (cmds_sz > 0) {
6839 cmds = (struct rep_protocol_transaction_cmd *)loc;
6840
6841 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6842 return (-1);
6843
6844 sz = cmds->rptc_size;
6845 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6846 return (-1);
6847
6848 sz = TX_SIZE(sz);
6849 if (sz > cmds_sz)
6850 return (-1);
6851
6852 switch (cmds[0].rptc_action) {
6853 case REP_PROTOCOL_TX_ENTRY_CLEAR:
6854 break;
6855
6856 case REP_PROTOCOL_TX_ENTRY_REPLACE:
6857 /* Check type */
6858 (void) pthread_mutex_lock(&pg->rn_lock);
6859 ok = B_FALSE;
6860 if (rc_node_find_named_child(pg,
6861 (const char *)cmds[0].rptc_data,
6862 REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6863 REP_PROTOCOL_SUCCESS) {
6864 if (prop != NULL) {
6865 ok = prop->rn_valtype ==
6866 cmds[0].rptc_type;
6867 /*
6868 * rc_node_find_named_child()
6869 * places a hold on prop which we
6870 * do not need to hang on to.
6871 */
6872 rc_node_rele(prop);
6873 }
6874 }
6875 (void) pthread_mutex_unlock(&pg->rn_lock);
6876 if (ok)
6877 break;
6878 return (0);
6879
6880 default:
6881 return (0);
6882 }
6883
6884 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6885 == 0)
6886 return (0);
6887
6888 loc += sz;
6889 cmds_sz -= sz;
6890 }
6891
6892 return (1);
6893 }
6894
6895 /*
6896 * Return 1 if any of the given transaction commands affect
6897 * "action_authorization". Return -1 if any of the commands are invalid and
6898 * 0 in all other cases.
6899 */
6900 static int
tx_modifies_action(const void * cmds_arg,size_t cmds_sz)6901 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6902 {
6903 const struct rep_protocol_transaction_cmd *cmds;
6904 uintptr_t loc;
6905 uint32_t sz;
6906
6907 loc = (uintptr_t)cmds_arg;
6908
6909 while (cmds_sz > 0) {
6910 cmds = (struct rep_protocol_transaction_cmd *)loc;
6911
6912 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6913 return (-1);
6914
6915 sz = cmds->rptc_size;
6916 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6917 return (-1);
6918
6919 sz = TX_SIZE(sz);
6920 if (sz > cmds_sz)
6921 return (-1);
6922
6923 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6924 == 0)
6925 return (1);
6926
6927 loc += sz;
6928 cmds_sz -= sz;
6929 }
6930
6931 return (0);
6932 }
6933
6934 /*
6935 * Returns 1 if the transaction commands only modify properties named
6936 * 'enabled'.
6937 */
6938 static int
tx_only_enabled(const void * cmds_arg,size_t cmds_sz)6939 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6940 {
6941 const struct rep_protocol_transaction_cmd *cmd;
6942 uintptr_t loc;
6943 uint32_t sz;
6944
6945 loc = (uintptr_t)cmds_arg;
6946
6947 while (cmds_sz > 0) {
6948 cmd = (struct rep_protocol_transaction_cmd *)loc;
6949
6950 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6951 return (-1);
6952
6953 sz = cmd->rptc_size;
6954 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6955 return (-1);
6956
6957 sz = TX_SIZE(sz);
6958 if (sz > cmds_sz)
6959 return (-1);
6960
6961 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6962 != 0)
6963 return (0);
6964
6965 loc += sz;
6966 cmds_sz -= sz;
6967 }
6968
6969 return (1);
6970 }
6971
6972 int
rc_tx_commit(rc_node_ptr_t * txp,const void * cmds,size_t cmds_sz)6973 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6974 {
6975 rc_node_t *np = txp->rnp_node;
6976 rc_node_t *pp;
6977 rc_node_t *nnp;
6978 rc_node_pg_notify_t *pnp;
6979 int rc;
6980 permcheck_t *pcp;
6981 perm_status_t granted;
6982 int normal;
6983 char *pg_fmri = NULL;
6984 char *auth_string = NULL;
6985 int auth_status = ADT_SUCCESS;
6986 int auth_ret_value = ADT_SUCCESS;
6987 size_t sz_out;
6988 int tx_flag = 1;
6989 tx_commit_data_t *tx_data = NULL;
6990
6991 RC_NODE_CHECK(np);
6992
6993 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6994 (txp->rnp_auth_string != NULL)) {
6995 auth_string = strdup(txp->rnp_auth_string);
6996 if (auth_string == NULL)
6997 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6998 }
6999
7000 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
7001 is_main_repository) {
7002 #ifdef NATIVE_BUILD
7003 if (!client_is_privileged()) {
7004 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
7005 }
7006 #else
7007 /* permission check: depends on contents of transaction */
7008 pcp = pc_create();
7009 if (pcp == NULL)
7010 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7011
7012 /* If normal is cleared, we won't do the normal checks. */
7013 normal = 1;
7014 rc = REP_PROTOCOL_SUCCESS;
7015
7016 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
7017 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
7018 /* Touching general[framework]/action_authorization? */
7019 rc = tx_modifies_action(cmds, cmds_sz);
7020 if (rc == -1) {
7021 pc_free(pcp);
7022 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7023 }
7024
7025 if (rc) {
7026 /*
7027 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7028 * can be used.
7029 */
7030 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7031
7032 if (rc == REP_PROTOCOL_SUCCESS)
7033 rc = perm_add_enabling(pcp,
7034 AUTH_MANAGE);
7035
7036 normal = 0;
7037 } else {
7038 rc = REP_PROTOCOL_SUCCESS;
7039 }
7040 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7041 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7042 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7043 rc_node_t *instn;
7044
7045 rc = tx_only_enabled(cmds, cmds_sz);
7046 if (rc == -1) {
7047 pc_free(pcp);
7048 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7049 }
7050
7051 if (rc) {
7052 rc = rc_node_parent(np, &instn);
7053 if (rc != REP_PROTOCOL_SUCCESS) {
7054 assert(rc == REP_PROTOCOL_FAIL_DELETED);
7055 pc_free(pcp);
7056 return (rc);
7057 }
7058
7059 assert(instn->rn_id.rl_type ==
7060 REP_PROTOCOL_ENTITY_INSTANCE);
7061
7062 rc = perm_add_inst_action_auth(pcp, instn);
7063 rc_node_rele(instn);
7064 switch (rc) {
7065 case REP_PROTOCOL_SUCCESS:
7066 break;
7067
7068 case REP_PROTOCOL_FAIL_DELETED:
7069 case REP_PROTOCOL_FAIL_NO_RESOURCES:
7070 pc_free(pcp);
7071 return (rc);
7072
7073 default:
7074 bad_error("perm_add_inst_action_auth",
7075 rc);
7076 }
7077 } else {
7078 rc = REP_PROTOCOL_SUCCESS;
7079 }
7080 }
7081
7082 if (rc == REP_PROTOCOL_SUCCESS && normal) {
7083 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7084
7085 if (rc == REP_PROTOCOL_SUCCESS) {
7086 /* Add pgtype-specific authorization. */
7087 const char * const auth =
7088 perm_auth_for_pgtype(np->rn_type);
7089
7090 if (auth != NULL)
7091 rc = perm_add_enabling(pcp, auth);
7092 }
7093
7094 /* Add pg-specific modify_authorization auths. */
7095 if (rc == REP_PROTOCOL_SUCCESS)
7096 rc = perm_add_enabling_values(pcp, np,
7097 AUTH_PROP_MODIFY);
7098
7099 /* If value_authorization values are ok, add them. */
7100 if (rc == REP_PROTOCOL_SUCCESS) {
7101 rc = tx_allow_value(cmds, cmds_sz, np);
7102 if (rc == -1)
7103 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7104 else if (rc)
7105 rc = perm_add_enabling_values(pcp, np,
7106 AUTH_PROP_VALUE);
7107 }
7108 }
7109
7110 if (rc == REP_PROTOCOL_SUCCESS) {
7111 granted = perm_granted(pcp);
7112 rc = map_granted_status(granted, pcp, &auth_string);
7113 if ((granted == PERM_DENIED) && auth_string) {
7114 /*
7115 * _PERMISSION_DENIED should not cause us
7116 * to exit at this point, because we still
7117 * want to generate an audit event.
7118 */
7119 rc = REP_PROTOCOL_SUCCESS;
7120 }
7121 }
7122
7123 pc_free(pcp);
7124
7125 if (rc != REP_PROTOCOL_SUCCESS)
7126 goto cleanout;
7127
7128 if (granted == PERM_DENIED) {
7129 auth_status = ADT_FAILURE;
7130 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7131 tx_flag = 0;
7132 }
7133 #endif /* NATIVE_BUILD */
7134 } else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7135 auth_status = ADT_FAILURE;
7136 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7137 tx_flag = 0;
7138 }
7139
7140 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7141 if (pg_fmri == NULL) {
7142 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7143 goto cleanout;
7144 }
7145 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7146 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7147 goto cleanout;
7148 }
7149
7150 /*
7151 * Parse the transaction commands into a useful form.
7152 */
7153 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7154 REP_PROTOCOL_SUCCESS) {
7155 goto cleanout;
7156 }
7157
7158 if (tx_flag == 0) {
7159 /* Authorization failed. Generate audit events. */
7160 generate_property_events(tx_data, pg_fmri, auth_string,
7161 auth_status, auth_ret_value);
7162 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7163 goto cleanout;
7164 }
7165
7166 nnp = rc_node_alloc();
7167 if (nnp == NULL) {
7168 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7169 goto cleanout;
7170 }
7171
7172 nnp->rn_id = np->rn_id; /* structure assignment */
7173 nnp->rn_hash = np->rn_hash;
7174 nnp->rn_name = strdup(np->rn_name);
7175 nnp->rn_type = strdup(np->rn_type);
7176 nnp->rn_pgflags = np->rn_pgflags;
7177
7178 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7179
7180 if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7181 rc_node_destroy(nnp);
7182 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7183 goto cleanout;
7184 }
7185
7186 (void) pthread_mutex_lock(&np->rn_lock);
7187
7188 /*
7189 * We must have all of the old properties in the cache, or the
7190 * database deletions could cause inconsistencies.
7191 */
7192 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7193 REP_PROTOCOL_SUCCESS) {
7194 (void) pthread_mutex_unlock(&np->rn_lock);
7195 rc_node_destroy(nnp);
7196 goto cleanout;
7197 }
7198
7199 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7200 (void) pthread_mutex_unlock(&np->rn_lock);
7201 rc_node_destroy(nnp);
7202 rc = REP_PROTOCOL_FAIL_DELETED;
7203 goto cleanout;
7204 }
7205
7206 if (np->rn_flags & RC_NODE_OLD) {
7207 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7208 (void) pthread_mutex_unlock(&np->rn_lock);
7209 rc_node_destroy(nnp);
7210 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7211 goto cleanout;
7212 }
7213
7214 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7215 if (pp == NULL) {
7216 /* our parent is gone, we're going next... */
7217 rc_node_destroy(nnp);
7218 (void) pthread_mutex_lock(&np->rn_lock);
7219 if (np->rn_flags & RC_NODE_OLD) {
7220 (void) pthread_mutex_unlock(&np->rn_lock);
7221 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7222 goto cleanout;
7223 }
7224 (void) pthread_mutex_unlock(&np->rn_lock);
7225 rc = REP_PROTOCOL_FAIL_DELETED;
7226 goto cleanout;
7227 }
7228 (void) pthread_mutex_unlock(&pp->rn_lock);
7229
7230 /*
7231 * prepare for the transaction
7232 */
7233 (void) pthread_mutex_lock(&np->rn_lock);
7234 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7235 (void) pthread_mutex_unlock(&np->rn_lock);
7236 (void) pthread_mutex_lock(&pp->rn_lock);
7237 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7238 (void) pthread_mutex_unlock(&pp->rn_lock);
7239 rc_node_destroy(nnp);
7240 rc = REP_PROTOCOL_FAIL_DELETED;
7241 goto cleanout;
7242 }
7243 nnp->rn_gen_id = np->rn_gen_id;
7244 (void) pthread_mutex_unlock(&np->rn_lock);
7245
7246 /* Sets nnp->rn_gen_id on success. */
7247 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7248
7249 (void) pthread_mutex_lock(&np->rn_lock);
7250 if (rc != REP_PROTOCOL_SUCCESS) {
7251 rc_node_rele_flag(np, RC_NODE_IN_TX);
7252 (void) pthread_mutex_unlock(&np->rn_lock);
7253 (void) pthread_mutex_lock(&pp->rn_lock);
7254 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7255 (void) pthread_mutex_unlock(&pp->rn_lock);
7256 rc_node_destroy(nnp);
7257 rc_node_clear(txp, 0);
7258 if (rc == REP_PROTOCOL_DONE)
7259 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7260 goto cleanout;
7261 }
7262
7263 /*
7264 * Notify waiters
7265 */
7266 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7267 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7268 rc_pg_notify_fire(pnp);
7269 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7270
7271 np->rn_flags |= RC_NODE_OLD;
7272 (void) pthread_mutex_unlock(&np->rn_lock);
7273
7274 rc_notify_remove_node(np);
7275
7276 /*
7277 * replace np with nnp
7278 */
7279 rc_node_relink_child(pp, np, nnp);
7280
7281 /*
7282 * all done -- clear the transaction.
7283 */
7284 rc_node_clear(txp, 0);
7285 generate_property_events(tx_data, pg_fmri, auth_string,
7286 auth_status, auth_ret_value);
7287
7288 rc = REP_PROTOCOL_SUCCESS;
7289
7290 cleanout:
7291 free(auth_string);
7292 free(pg_fmri);
7293 tx_commit_data_free(tx_data);
7294 return (rc);
7295 }
7296
7297 void
rc_pg_notify_init(rc_node_pg_notify_t * pnp)7298 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7299 {
7300 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7301 pnp->rnpn_pg = NULL;
7302 pnp->rnpn_fd = -1;
7303 }
7304
7305 int
rc_pg_notify_setup(rc_node_pg_notify_t * pnp,rc_node_ptr_t * npp,int fd)7306 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7307 {
7308 rc_node_t *np;
7309
7310 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7311
7312 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7313 (void) pthread_mutex_unlock(&np->rn_lock);
7314 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7315 }
7316
7317 /*
7318 * wait for any transaction in progress to complete
7319 */
7320 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7321 (void) pthread_mutex_unlock(&np->rn_lock);
7322 return (REP_PROTOCOL_FAIL_DELETED);
7323 }
7324
7325 if (np->rn_flags & RC_NODE_OLD) {
7326 (void) pthread_mutex_unlock(&np->rn_lock);
7327 return (REP_PROTOCOL_FAIL_NOT_LATEST);
7328 }
7329
7330 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7331 rc_pg_notify_fire(pnp);
7332 pnp->rnpn_pg = np;
7333 pnp->rnpn_fd = fd;
7334 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7335 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7336
7337 (void) pthread_mutex_unlock(&np->rn_lock);
7338 return (REP_PROTOCOL_SUCCESS);
7339 }
7340
7341 void
rc_pg_notify_fini(rc_node_pg_notify_t * pnp)7342 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7343 {
7344 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7345 rc_pg_notify_fire(pnp);
7346 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7347
7348 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7349 }
7350
7351 void
rc_notify_info_init(rc_notify_info_t * rnip)7352 rc_notify_info_init(rc_notify_info_t *rnip)
7353 {
7354 int i;
7355
7356 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7357 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7358 rc_notify_pool);
7359
7360 rnip->rni_notify.rcn_node = NULL;
7361 rnip->rni_notify.rcn_info = rnip;
7362
7363 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7364 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7365
7366 (void) pthread_cond_init(&rnip->rni_cv, NULL);
7367
7368 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7369 rnip->rni_namelist[i] = NULL;
7370 rnip->rni_typelist[i] = NULL;
7371 }
7372 }
7373
7374 static void
rc_notify_info_insert_locked(rc_notify_info_t * rnip)7375 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7376 {
7377 assert(MUTEX_HELD(&rc_pg_notify_lock));
7378
7379 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7380
7381 rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7382 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7383 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7384 }
7385
7386 static void
rc_notify_info_remove_locked(rc_notify_info_t * rnip)7387 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7388 {
7389 rc_notify_t *me = &rnip->rni_notify;
7390 rc_notify_t *np;
7391
7392 assert(MUTEX_HELD(&rc_pg_notify_lock));
7393
7394 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7395
7396 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7397 rnip->rni_flags |= RC_NOTIFY_DRAIN;
7398 (void) pthread_cond_broadcast(&rnip->rni_cv);
7399
7400 (void) uu_list_remove(rc_notify_info_list, rnip);
7401
7402 /*
7403 * clean up any notifications at the beginning of the list
7404 */
7405 if (uu_list_first(rc_notify_list) == me) {
7406 /*
7407 * We can't call rc_notify_remove_locked() unless
7408 * rc_notify_in_use is 0.
7409 */
7410 while (rc_notify_in_use) {
7411 (void) pthread_cond_wait(&rc_pg_notify_cv,
7412 &rc_pg_notify_lock);
7413 }
7414 while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7415 np->rcn_info == NULL)
7416 rc_notify_remove_locked(np);
7417 }
7418 (void) uu_list_remove(rc_notify_list, me);
7419
7420 while (rnip->rni_waiters) {
7421 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7422 (void) pthread_cond_broadcast(&rnip->rni_cv);
7423 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7424 }
7425
7426 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7427 }
7428
7429 static int
rc_notify_info_add_watch(rc_notify_info_t * rnip,const char ** arr,const char * name)7430 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7431 const char *name)
7432 {
7433 int i;
7434 int rc;
7435 char *f;
7436
7437 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7438 if (rc != REP_PROTOCOL_SUCCESS)
7439 return (rc);
7440
7441 f = strdup(name);
7442 if (f == NULL)
7443 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7444
7445 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7446
7447 while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7448 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7449
7450 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7451 if (arr[i] == NULL)
7452 break;
7453
7454 /*
7455 * Don't add name if it's already being tracked.
7456 */
7457 if (strcmp(arr[i], f) == 0) {
7458 free(f);
7459 goto out;
7460 }
7461 }
7462
7463 if (i == RC_NOTIFY_MAX_NAMES) {
7464 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7465 free(f);
7466 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7467 }
7468
7469 arr[i] = f;
7470
7471 out:
7472 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7473 rc_notify_info_insert_locked(rnip);
7474
7475 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7476 return (REP_PROTOCOL_SUCCESS);
7477 }
7478
7479 int
rc_notify_info_add_name(rc_notify_info_t * rnip,const char * name)7480 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7481 {
7482 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7483 }
7484
7485 int
rc_notify_info_add_type(rc_notify_info_t * rnip,const char * type)7486 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7487 {
7488 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7489 }
7490
7491 /*
7492 * Wait for and report an event of interest to rnip, a notification client
7493 */
7494 int
rc_notify_info_wait(rc_notify_info_t * rnip,rc_node_ptr_t * out,char * outp,size_t sz)7495 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7496 char *outp, size_t sz)
7497 {
7498 rc_notify_t *np;
7499 rc_notify_t *me = &rnip->rni_notify;
7500 rc_node_t *nnp;
7501 rc_notify_delete_t *ndp;
7502
7503 int am_first_info;
7504
7505 if (sz > 0)
7506 outp[0] = 0;
7507
7508 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7509
7510 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7511 RC_NOTIFY_ACTIVE) {
7512 /*
7513 * If I'm first on the notify list, it is my job to
7514 * clean up any notifications I pass by. I can't do that
7515 * if someone is blocking the list from removals, so I
7516 * have to wait until they have all drained.
7517 */
7518 am_first_info = (uu_list_first(rc_notify_list) == me);
7519 if (am_first_info && rc_notify_in_use) {
7520 rnip->rni_waiters++;
7521 (void) pthread_cond_wait(&rc_pg_notify_cv,
7522 &rc_pg_notify_lock);
7523 rnip->rni_waiters--;
7524 continue;
7525 }
7526
7527 /*
7528 * Search the list for a node of interest.
7529 */
7530 np = uu_list_next(rc_notify_list, me);
7531 while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7532 rc_notify_t *next = uu_list_next(rc_notify_list, np);
7533
7534 if (am_first_info) {
7535 if (np->rcn_info) {
7536 /*
7537 * Passing another client -- stop
7538 * cleaning up notifications
7539 */
7540 am_first_info = 0;
7541 } else {
7542 rc_notify_remove_locked(np);
7543 }
7544 }
7545 np = next;
7546 }
7547
7548 /*
7549 * Nothing of interest -- wait for notification
7550 */
7551 if (np == NULL) {
7552 rnip->rni_waiters++;
7553 (void) pthread_cond_wait(&rnip->rni_cv,
7554 &rc_pg_notify_lock);
7555 rnip->rni_waiters--;
7556 continue;
7557 }
7558
7559 /*
7560 * found something to report -- move myself after the
7561 * notification and process it.
7562 */
7563 (void) uu_list_remove(rc_notify_list, me);
7564 (void) uu_list_insert_after(rc_notify_list, np, me);
7565
7566 if ((ndp = np->rcn_delete) != NULL) {
7567 (void) strlcpy(outp, ndp->rnd_fmri, sz);
7568 if (am_first_info)
7569 rc_notify_remove_locked(np);
7570 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7571 rc_node_clear(out, 0);
7572 return (REP_PROTOCOL_SUCCESS);
7573 }
7574
7575 nnp = np->rcn_node;
7576 assert(nnp != NULL);
7577
7578 /*
7579 * We can't bump nnp's reference count without grabbing its
7580 * lock, and rc_pg_notify_lock is a leaf lock. So we
7581 * temporarily block all removals to keep nnp from
7582 * disappearing.
7583 */
7584 rc_notify_in_use++;
7585 assert(rc_notify_in_use > 0);
7586 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7587
7588 rc_node_assign(out, nnp);
7589
7590 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7591 assert(rc_notify_in_use > 0);
7592 rc_notify_in_use--;
7593
7594 if (am_first_info) {
7595 /*
7596 * While we had the lock dropped, another thread
7597 * may have also incremented rc_notify_in_use. We
7598 * need to make sure that we're back to 0 before
7599 * removing the node.
7600 */
7601 while (rc_notify_in_use) {
7602 (void) pthread_cond_wait(&rc_pg_notify_cv,
7603 &rc_pg_notify_lock);
7604 }
7605 rc_notify_remove_locked(np);
7606 }
7607 if (rc_notify_in_use == 0)
7608 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7609 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7610
7611 return (REP_PROTOCOL_SUCCESS);
7612 }
7613 /*
7614 * If we're the last one out, let people know it's clear.
7615 */
7616 if (rnip->rni_waiters == 0)
7617 (void) pthread_cond_broadcast(&rnip->rni_cv);
7618 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7619 return (REP_PROTOCOL_DONE);
7620 }
7621
7622 static void
rc_notify_info_reset(rc_notify_info_t * rnip)7623 rc_notify_info_reset(rc_notify_info_t *rnip)
7624 {
7625 int i;
7626
7627 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7628 if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7629 rc_notify_info_remove_locked(rnip);
7630 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7631 rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7632 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7633
7634 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7635 if (rnip->rni_namelist[i] != NULL) {
7636 free((void *)rnip->rni_namelist[i]);
7637 rnip->rni_namelist[i] = NULL;
7638 }
7639 if (rnip->rni_typelist[i] != NULL) {
7640 free((void *)rnip->rni_typelist[i]);
7641 rnip->rni_typelist[i] = NULL;
7642 }
7643 }
7644
7645 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7646 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7647 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7648 }
7649
7650 void
rc_notify_info_fini(rc_notify_info_t * rnip)7651 rc_notify_info_fini(rc_notify_info_t *rnip)
7652 {
7653 rc_notify_info_reset(rnip);
7654
7655 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7656 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7657 rc_notify_pool);
7658 }
7659