1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 */
27
28 /*
29 * rc_node.c - In-memory SCF object management
30 *
31 * This layer manages the in-memory cache (the Repository Cache) of SCF
32 * data. Read requests are usually satisfied from here, but may require
33 * load calls to the "object" layer. Modify requests always write-through
34 * to the object layer.
35 *
36 * SCF data comprises scopes, services, instances, snapshots, snaplevels,
37 * property groups, properties, and property values. All but the last are
38 * known here as "entities" and are represented by rc_node_t data
39 * structures. (Property values are kept in the rn_values member of the
40 * respective property, not as separate objects.) All entities besides
41 * the "localhost" scope have some entity as a parent, and therefore form
42 * a tree.
43 *
44 * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
45 * the "localhost" scope. The tree is filled in from the database on-demand
46 * by rc_node_fill_children().
47 *
48 * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
49 * lookup.
50 *
51 * Multiple threads may service client requests, so access to each
52 * rc_node_t is synchronized by its rn_lock member. Some fields are
53 * protected by bits in the rn_flags field instead, to support operations
54 * which need to drop rn_lock, for example to respect locking order. Such
55 * flags should be manipulated with the rc_node_{hold,rele}_flag()
56 * functions.
57 *
58 * We track references to nodes to tell when they can be free()d. rn_refs
59 * should be incremented with rc_node_hold() on the creation of client
60 * references (rc_node_ptr_t's and rc_iter_t's). rn_erefs ("ephemeral
61 * references") should be incremented when a pointer is read into a local
62 * variable of a thread, with rc_node_hold_ephemeral_locked(). This
63 * hasn't been fully implemented, however, so rc_node_rele() tolerates
64 * rn_erefs being 0. Some code which predates rn_erefs counts ephemeral
65 * references in rn_refs. Other references are tracked by the
66 * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
67 * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
68 *
69 * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
70 * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
71 * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
72 * etc.). Once you have locked an rc_node_t you must check its rn_flags for
73 * RC_NODE_DEAD before you can use it. This is usually done with the
74 * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
75 * functions & RC_NODE_*() macros), which fail if the object has died.
76 *
77 * When a transactional node (property group or snapshot) is updated,
78 * a new node takes the place of the old node in the global hash and the
79 * old node is hung off of the rn_former list of the new node. At the
80 * same time, all of its children have their rn_parent_ref pointer set,
81 * and any holds they have are reflected in the old node's rn_other_refs
82 * count. This is automatically kept up to date until the final reference
83 * to the subgraph is dropped, at which point the node is unrefed and
84 * destroyed, along with all of its children.
85 *
86 * Because name service lookups may take a long time and, more importantly
87 * may trigger additional accesses to the repository, perm_granted() must be
88 * called without holding any locks.
89 *
90 * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
91 * call via rc_node_setup_iter() to populate the rn_children uu_list of the
92 * rc_node_t * in question and a call to uu_list_walk_start() on that list. For
93 * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
94 * apropriate child.
95 *
96 * An ITER_START for an ENTITY_VALUE makes sure the node has its values
97 * filled, and sets up the iterator. An ITER_READ_VALUE just copies out
98 * the proper values and updates the offset information.
99 *
100 * To allow aliases, snapshots are implemented with a level of indirection.
101 * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
102 * snapshot.c which contains the authoritative snaplevel information. The
103 * snapid is "assigned" by rc_attach_snapshot().
104 *
105 * We provide the client layer with rc_node_ptr_t's to reference objects.
106 * Objects referred to by them are automatically held & released by
107 * rc_node_assign() & rc_node_clear(). The RC_NODE_PTR_*() macros are used at
108 * client.c entry points to read the pointers. They fetch the pointer to the
109 * object, return (from the function) if it is dead, and lock, hold, or hold
110 * a flag of the object.
111 */
112
113 /*
114 * Permission checking is authorization-based: some operations may only
115 * proceed if the user has been assigned at least one of a set of
116 * authorization strings. The set of enabling authorizations depends on the
117 * operation and the target object. The set of authorizations assigned to
118 * a user is determined by an algorithm defined in libsecdb.
119 *
120 * The fastest way to decide whether the two sets intersect is by entering the
121 * strings into a hash table and detecting collisions, which takes linear time
122 * in the total size of the sets. Except for the authorization patterns which
123 * may be assigned to users, which without advanced pattern-matching
124 * algorithms will take O(n) in the number of enabling authorizations, per
125 * pattern.
126 *
127 * We can achieve some practical speed-ups by noting that if we enter all of
128 * the authorizations from one of the sets into the hash table we can merely
129 * check the elements of the second set for existence without adding them.
130 * This reduces memory requirements and hash table clutter. The enabling set
131 * is well suited for this because it is internal to configd (for now, at
132 * least). Combine this with short-circuiting and we can even minimize the
133 * number of queries to the security databases (user_attr & prof_attr).
134 *
135 * To force this usage onto clients we provide functions for adding
136 * authorizations to the enabling set of a permission context structure
137 * (perm_add_*()) and one to decide whether the the user associated with the
138 * current door call client possesses any of them (perm_granted()).
139 *
140 * At some point, a generic version of this should move to libsecdb.
141 *
142 * While entering the enabling strings into the hash table, we keep track
143 * of which is the most specific for use in generating auditing events.
144 * See the "Collecting the Authorization String" section of the "SMF Audit
145 * Events" block comment below.
146 */
147
148 /*
149 * Composition is the combination of sets of properties. The sets are ordered
150 * and properties in higher sets obscure properties of the same name in lower
151 * sets. Here we present a composed view of an instance's properties as the
152 * union of its properties and its service's properties. Similarly the
153 * properties of snaplevels are combined to form a composed view of the
154 * properties of a snapshot (which should match the composed view of the
155 * properties of the instance when the snapshot was taken).
156 *
157 * In terms of the client interface, the client may request that a property
158 * group iterator for an instance or snapshot be composed. Property groups
159 * traversed by such an iterator may not have the target entity as a parent.
160 * Similarly, the properties traversed by a property iterator for those
161 * property groups may not have the property groups iterated as parents.
162 *
163 * Implementation requires that iterators for instances and snapshots be
164 * composition-savvy, and that we have a "composed property group" entity
165 * which represents the composition of a number of property groups. Iteration
166 * over "composed property groups" yields properties which may have different
167 * parents, but for all other operations a composed property group behaves
168 * like the top-most property group it represents.
169 *
170 * The implementation is based on the rn_cchain[] array of rc_node_t pointers
171 * in rc_node_t. For instances, the pointers point to the instance and its
172 * parent service. For snapshots they point to the child snaplevels, and for
173 * composed property groups they point to property groups. A composed
174 * iterator carries an index into rn_cchain[]. Thus most of the magic ends up
175 * int the rc_iter_*() code.
176 */
177 /*
178 * SMF Audit Events:
179 * ================
180 *
181 * To maintain security, SMF generates audit events whenever
182 * privileged operations are attempted. See the System Administration
183 * Guide:Security Services answerbook for a discussion of the Solaris
184 * audit system.
185 *
186 * The SMF audit event codes are defined in adt_event.h by symbols
187 * starting with ADT_smf_ and are described in audit_event.txt. The
188 * audit record structures are defined in the SMF section of adt.xml.
189 * adt.xml is used to automatically generate adt_event.h which
190 * contains the definitions that we code to in this file. For the
191 * most part the audit events map closely to actions that you would
192 * perform with svcadm or svccfg, but there are some special cases
193 * which we'll discuss later.
194 *
195 * The software associated with SMF audit events falls into three
196 * categories:
197 * - collecting information to be written to the audit
198 * records
199 * - using the adt_* functions in
200 * usr/src/lib/libbsm/common/adt.c to generate the audit
201 * records.
202 * - handling special cases
203 *
204 * Collecting Information:
205 * ----------------------
206 *
207 * Most all of the audit events require the FMRI of the affected
208 * object and the authorization string that was used. The one
209 * exception is ADT_smf_annotation which we'll talk about later.
210 *
211 * Collecting the FMRI:
212 *
213 * The rc_node structure has a member called rn_fmri which points to
214 * its FMRI. This is initialized by a call to rc_node_build_fmri()
215 * when the node's parent is established. The reason for doing it
216 * at this time is that a node's FMRI is basically the concatenation
217 * of the parent's FMRI and the node's name with the appropriate
218 * decoration. rc_node_build_fmri() does this concatenation and
219 * decorating. It is called from rc_node_link_child() and
220 * rc_node_relink_child() where a node is linked to its parent.
221 *
222 * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
223 * when it is needed. It returns rn_fmri if it is set. If the node
224 * is at the top level, however, rn_fmri won't be set because it was
225 * never linked to a parent. In this case,
226 * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
227 * its node type and its name, rn_name.
228 *
229 * Collecting the Authorization String:
230 *
231 * Naturally, the authorization string is captured during the
232 * authorization checking process. Acceptable authorization strings
233 * are added to a permcheck_t hash table as noted in the section on
234 * permission checking above. Once all entries have been added to the
235 * hash table, perm_granted() is called. If the client is authorized,
236 * perm_granted() returns with pc_auth_string of the permcheck_t
237 * structure pointing to the authorization string.
238 *
239 * This works fine if the client is authorized, but what happens if
240 * the client is not authorized? We need to report the required
241 * authorization string. This is the authorization that would have
242 * been used if permission had been granted. perm_granted() will
243 * find no match, so it needs to decide which string in the hash
244 * table to use as the required authorization string. It needs to do
245 * this, because configd is still going to generate an event. A
246 * design decision was made to use the most specific authorization
247 * in the hash table. The pc_auth_type enum designates the
248 * specificity of an authorization string. For example, an
249 * authorization string that is declared in an instance PG is more
250 * specific than one that is declared in a service PG.
251 *
252 * The pc_add() function keeps track of the most specific
253 * authorization in the hash table. It does this using the
254 * pc_specific and pc_specific_type members of the permcheck
255 * structure. pc_add() updates these members whenever a more
256 * specific authorization string is added to the hash table. Thus, if
257 * an authorization match is not found, perm_granted() will return
258 * with pc_auth_string in the permcheck_t pointing to the string that
259 * is referenced by pc_specific.
260 *
261 * Generating the Audit Events:
262 * ===========================
263 *
264 * As the functions in this file process requests for clients of
265 * configd, they gather the information that is required for an audit
266 * event. Eventually, the request processing gets to the point where
267 * the authorization is rejected or to the point where the requested
268 * action was attempted. At these two points smf_audit_event() is
269 * called.
270 *
271 * smf_audit_event() takes 4 parameters:
272 * - the event ID which is one of the ADT_smf_* symbols from
273 * adt_event.h.
274 * - status to pass to adt_put_event()
275 * - return value to pass to adt_put_event()
276 * - the event data (see audit_event_data structure)
277 *
278 * All interactions with the auditing software require an audit
279 * session. We use one audit session per configd client. We keep
280 * track of the audit session in the repcache_client structure.
281 * smf_audit_event() calls get_audit_session() to get the session
282 * pointer.
283 *
284 * smf_audit_event() then calls adt_alloc_event() to allocate an
285 * adt_event_data union which is defined in adt_event.h, copies the
286 * data into the appropriate members of the union and calls
287 * adt_put_event() to generate the event.
288 *
289 * Special Cases:
290 * =============
291 *
292 * There are three major types of special cases:
293 *
294 * - gathering event information for each action in a
295 * transaction
296 * - Higher level events represented by special property
297 * group/property name combinations. Many of these are
298 * restarter actions.
299 * - ADT_smf_annotation event
300 *
301 * Processing Transaction Actions:
302 * ------------------------------
303 *
304 * A transaction can contain multiple actions to modify, create or
305 * delete one or more properties. We need to capture information so
306 * that we can generate an event for each property action. The
307 * transaction information is stored in a tx_commmit_data_t, and
308 * object.c provides accessor functions to retrieve data from this
309 * structure. rc_tx_commit() obtains a tx_commit_data_t by calling
310 * tx_commit_data_new() and passes this to object_tx_commit() to
311 * commit the transaction. Then we call generate_property_events() to
312 * generate an audit event for each property action.
313 *
314 * Special Properties:
315 * ------------------
316 *
317 * There are combinations of property group/property name that are special.
318 * They are special because they have specific meaning to startd. startd
319 * interprets them in a service-independent fashion.
320 * restarter_actions/refresh and general/enabled are two examples of these.
321 * A special event is generated for these properties in addition to the
322 * regular property event described in the previous section. The special
323 * properties are declared as an array of audit_special_prop_item
324 * structures at special_props_list in rc_node.c.
325 *
326 * In the previous section, we mentioned the
327 * generate_property_event() function that generates an event for
328 * every property action. Before generating the event,
329 * generate_property_event() calls special_property_event().
330 * special_property_event() checks to see if the action involves a
331 * special property. If it does, it generates a special audit
332 * event.
333 *
334 * ADT_smf_annotation event:
335 * ------------------------
336 *
337 * This is a special event unlike any other. It allows the svccfg
338 * program to store an annotation in the event log before a series
339 * of transactions is processed. It is used with the import and
340 * apply svccfg commands. svccfg uses the rep_protocol_annotation
341 * message to pass the operation (import or apply) and the file name
342 * to configd. The set_annotation() function in client.c stores
343 * these away in the a repcache_client structure. The address of
344 * this structure is saved in the thread_info structure.
345 *
346 * Before it generates any events, smf_audit_event() calls
347 * smf_annotation_event(). smf_annotation_event() calls
348 * client_annotation_needed() which is defined in client.c. If an
349 * annotation is needed client_annotation_needed() returns the
350 * operation and filename strings that were saved from the
351 * rep_protocol_annotation message. smf_annotation_event() then
352 * generates the ADT_smf_annotation event.
353 */
354
355 #include <assert.h>
356 #include <atomic.h>
357 #include <bsm/adt_event.h>
358 #include <errno.h>
359 #include <libuutil.h>
360 #include <libscf.h>
361 #include <libscf_priv.h>
362 #include <pthread.h>
363 #include <pwd.h>
364 #include <stdio.h>
365 #include <stdlib.h>
366 #include <strings.h>
367 #include <sys/types.h>
368 #include <syslog.h>
369 #include <unistd.h>
370 #include <secdb.h>
371
372 #include "configd.h"
373
374 #define AUTH_PREFIX "solaris.smf."
375 #define AUTH_MANAGE AUTH_PREFIX "manage"
376 #define AUTH_MODIFY AUTH_PREFIX "modify"
377 #define AUTH_MODIFY_PREFIX AUTH_MODIFY "."
378 #define AUTH_PG_ACTIONS SCF_PG_RESTARTER_ACTIONS
379 #define AUTH_PG_ACTIONS_TYPE SCF_PG_RESTARTER_ACTIONS_TYPE
380 #define AUTH_PG_GENERAL SCF_PG_GENERAL
381 #define AUTH_PG_GENERAL_TYPE SCF_PG_GENERAL_TYPE
382 #define AUTH_PG_GENERAL_OVR SCF_PG_GENERAL_OVR
383 #define AUTH_PG_GENERAL_OVR_TYPE SCF_PG_GENERAL_OVR_TYPE
384 #define AUTH_PROP_ACTION "action_authorization"
385 #define AUTH_PROP_ENABLED "enabled"
386 #define AUTH_PROP_MODIFY "modify_authorization"
387 #define AUTH_PROP_VALUE "value_authorization"
388 #define AUTH_PROP_READ "read_authorization"
389
390 #define MAX_VALID_CHILDREN 3
391
392 typedef struct rc_type_info {
393 uint32_t rt_type; /* matches array index */
394 uint32_t rt_num_ids;
395 uint32_t rt_name_flags;
396 uint32_t rt_valid_children[MAX_VALID_CHILDREN];
397 } rc_type_info_t;
398
399 #define RT_NO_NAME -1U
400
401 static rc_type_info_t rc_types[] = {
402 {REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
403 {REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
404 {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
405 {REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
406 {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
407 {REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
408 {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
409 {REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
410 {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
411 {REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
412 {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
413 {REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
414 {REP_PROTOCOL_ENTITY_PROPERTY}},
415 {REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
416 {REP_PROTOCOL_ENTITY_PROPERTY}},
417 {REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
418 {-1UL}
419 };
420 #define NUM_TYPES ((sizeof (rc_types) / sizeof (*rc_types)))
421
422 /* Element of a permcheck_t hash table. */
423 struct pc_elt {
424 struct pc_elt *pce_next;
425 char pce_auth[1];
426 };
427
428 /*
429 * If an authorization fails, we must decide which of the elements in the
430 * permcheck hash table to use in the audit event. That is to say of all
431 * the strings in the hash table, we must choose one and use it in the audit
432 * event. It is desirable to use the most specific string in the audit
433 * event.
434 *
435 * The pc_auth_type specifies the types (sources) of authorization
436 * strings. The enum is ordered in increasing specificity.
437 */
438 typedef enum pc_auth_type {
439 PC_AUTH_NONE = 0, /* no auth string available. */
440 PC_AUTH_SMF, /* strings coded into SMF. */
441 PC_AUTH_SVC, /* strings specified in PG of a service. */
442 PC_AUTH_INST /* strings specified in PG of an instance. */
443 } pc_auth_type_t;
444
445 /*
446 * The following enum is used to represent the results of the checks to see
447 * if the client has the appropriate permissions to perform an action.
448 */
449 typedef enum perm_status {
450 PERM_DENIED = 0, /* Permission denied. */
451 PERM_GRANTED, /* Client has authorizations. */
452 PERM_GONE, /* Door client went away. */
453 PERM_FAIL /* Generic failure. e.g. resources */
454 } perm_status_t;
455
456 /* An authorization set hash table. */
457 typedef struct {
458 struct pc_elt **pc_buckets;
459 uint_t pc_bnum; /* number of buckets */
460 uint_t pc_enum; /* number of elements */
461 struct pc_elt *pc_specific; /* most specific element */
462 pc_auth_type_t pc_specific_type; /* type of pc_specific */
463 char *pc_auth_string; /* authorization string */
464 /* for audit events */
465 } permcheck_t;
466
467 /*
468 * Structure for holding audit event data. Not all events use all members
469 * of the structure.
470 */
471 typedef struct audit_event_data {
472 char *ed_auth; /* authorization string. */
473 char *ed_fmri; /* affected FMRI. */
474 char *ed_snapname; /* name of snapshot. */
475 char *ed_old_fmri; /* old fmri in attach case. */
476 char *ed_old_name; /* old snapshot in attach case. */
477 char *ed_type; /* prop. group or prop. type. */
478 char *ed_prop_value; /* property value. */
479 } audit_event_data_t;
480
481 /*
482 * Pointer to function to do special processing to get audit event ID.
483 * Audit event IDs are defined in /usr/include/bsm/adt_event.h. Function
484 * returns 0 if ID successfully retrieved. Otherwise it returns -1.
485 */
486 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
487 au_event_t *);
488 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
489 au_event_t *);
490
491 static uu_list_pool_t *rc_children_pool;
492 static uu_list_pool_t *rc_pg_notify_pool;
493 static uu_list_pool_t *rc_notify_pool;
494 static uu_list_pool_t *rc_notify_info_pool;
495
496 static rc_node_t *rc_scope;
497
498 static pthread_mutex_t rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
499 static pthread_cond_t rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
500 static uint_t rc_notify_in_use; /* blocks removals */
501
502 /*
503 * Some combinations of property group/property name require a special
504 * audit event to be generated when there is a change.
505 * audit_special_prop_item_t is used to specify these special cases. The
506 * special_props_list array defines a list of these special properties.
507 */
508 typedef struct audit_special_prop_item {
509 const char *api_pg_name; /* property group name. */
510 const char *api_prop_name; /* property name. */
511 au_event_t api_event_id; /* event id or 0. */
512 spc_getid_fn_t api_event_func; /* function to get event id. */
513 } audit_special_prop_item_t;
514
515 /*
516 * Native builds are done using the build machine's standard include
517 * files. These files may not yet have the definitions for the ADT_smf_*
518 * symbols. Thus, we do not compile this table when doing native builds.
519 */
520 #ifndef NATIVE_BUILD
521 /*
522 * The following special_props_list array specifies property group/property
523 * name combinations that have specific meaning to startd. A special event
524 * is generated for these combinations in addition to the regular property
525 * event.
526 *
527 * At run time this array gets sorted. See the call to qsort(3C) in
528 * rc_node_init(). The array is sorted, so that bsearch(3C) can be used
529 * to do lookups.
530 */
531 static audit_special_prop_item_t special_props_list[] = {
532 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
533 NULL},
534 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
535 ADT_smf_immediate_degrade, NULL},
536 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
537 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
538 ADT_smf_maintenance, NULL},
539 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
540 ADT_smf_immediate_maintenance, NULL},
541 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
542 ADT_smf_immtmp_maintenance, NULL},
543 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
544 ADT_smf_tmp_maintenance, NULL},
545 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
546 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
547 {SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
548 {SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
549 {SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
550 {SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
551 {SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
552 };
553 #define SPECIAL_PROP_COUNT (sizeof (special_props_list) /\
554 sizeof (audit_special_prop_item_t))
555 #endif /* NATIVE_BUILD */
556
557 /*
558 * We support an arbitrary number of clients interested in events for certain
559 * types of changes. Each client is represented by an rc_notify_info_t, and
560 * all clients are chained onto the rc_notify_info_list.
561 *
562 * The rc_notify_list is the global notification list. Each entry is of
563 * type rc_notify_t, which is embedded in one of three other structures:
564 *
565 * rc_node_t property group update notification
566 * rc_notify_delete_t object deletion notification
567 * rc_notify_info_t notification clients
568 *
569 * Which type of object is determined by which pointer in the rc_notify_t is
570 * non-NULL.
571 *
572 * New notifications and clients are added to the end of the list.
573 * Notifications no-one is interested in are never added to the list.
574 *
575 * Clients use their position in the list to track which notifications they
576 * have not yet reported. As they process notifications, they move forward
577 * in the list past them. There is always a client at the beginning of the
578 * list -- as it moves past notifications, it removes them from the list and
579 * cleans them up.
580 *
581 * The rc_pg_notify_lock protects all notification state. The rc_pg_notify_cv
582 * is used for global signalling, and each client has a cv which it waits for
583 * events of interest on.
584 *
585 * rc_notify_in_use is used to protect rc_notify_list from deletions when
586 * the rc_pg_notify_lock is dropped. Specifically, rc_notify_info_wait()
587 * must drop the lock to call rc_node_assign(), and then it reacquires the
588 * lock. Deletions from rc_notify_list during this period are not
589 * allowed. Insertions do not matter, because they are always done at the
590 * end of the list.
591 */
592 static uu_list_t *rc_notify_info_list;
593 static uu_list_t *rc_notify_list;
594
595 #define HASH_SIZE 512
596 #define HASH_MASK (HASH_SIZE - 1)
597
598 #pragma align 64(cache_hash)
599 static cache_bucket_t cache_hash[HASH_SIZE];
600
601 #define CACHE_BUCKET(h) (&cache_hash[(h) & HASH_MASK])
602
603
604 static void rc_node_no_client_refs(rc_node_t *np);
605
606
607 static uint32_t
rc_node_hash(rc_node_lookup_t * lp)608 rc_node_hash(rc_node_lookup_t *lp)
609 {
610 uint32_t type = lp->rl_type;
611 uint32_t backend = lp->rl_backend;
612 uint32_t mainid = lp->rl_main_id;
613 uint32_t *ids = lp->rl_ids;
614
615 rc_type_info_t *tp = &rc_types[type];
616 uint32_t num_ids;
617 uint32_t left;
618 uint32_t hash;
619
620 assert(backend == BACKEND_TYPE_NORMAL ||
621 backend == BACKEND_TYPE_NONPERSIST);
622
623 assert(type > 0 && type < NUM_TYPES);
624 num_ids = tp->rt_num_ids;
625
626 left = MAX_IDS - num_ids;
627 assert(num_ids <= MAX_IDS);
628
629 hash = type * 7 + mainid * 5 + backend;
630
631 while (num_ids-- > 0)
632 hash = hash * 11 + *ids++ * 7;
633
634 /*
635 * the rest should be zeroed
636 */
637 while (left-- > 0)
638 assert(*ids++ == 0);
639
640 return (hash);
641 }
642
643 static int
rc_node_match(rc_node_t * np,rc_node_lookup_t * l)644 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
645 {
646 rc_node_lookup_t *r = &np->rn_id;
647 rc_type_info_t *tp;
648 uint32_t type;
649 uint32_t num_ids;
650
651 if (r->rl_main_id != l->rl_main_id)
652 return (0);
653
654 type = r->rl_type;
655 if (type != l->rl_type)
656 return (0);
657
658 assert(type > 0 && type < NUM_TYPES);
659
660 tp = &rc_types[r->rl_type];
661 num_ids = tp->rt_num_ids;
662
663 assert(num_ids <= MAX_IDS);
664 while (num_ids-- > 0)
665 if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
666 return (0);
667
668 return (1);
669 }
670
671 /*
672 * Register an ephemeral reference to np. This should be done while both
673 * the persistent reference from which the np pointer was read is locked
674 * and np itself is locked. This guarantees that another thread which
675 * thinks it has the last reference will yield without destroying the
676 * node.
677 */
678 static void
rc_node_hold_ephemeral_locked(rc_node_t * np)679 rc_node_hold_ephemeral_locked(rc_node_t *np)
680 {
681 assert(MUTEX_HELD(&np->rn_lock));
682
683 ++np->rn_erefs;
684 }
685
686 /*
687 * the "other" references on a node are maintained in an atomically
688 * updated refcount, rn_other_refs. This can be bumped from arbitrary
689 * context, and tracks references to a possibly out-of-date node's children.
690 *
691 * To prevent the node from disappearing between the final drop of
692 * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
693 * 0->1 transitions and decremented (with the node lock held) on 1->0
694 * transitions.
695 */
696 static void
rc_node_hold_other(rc_node_t * np)697 rc_node_hold_other(rc_node_t *np)
698 {
699 if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
700 atomic_add_32(&np->rn_other_refs_held, 1);
701 assert(np->rn_other_refs_held > 0);
702 }
703 assert(np->rn_other_refs > 0);
704 }
705
706 /*
707 * No node locks may be held
708 */
709 static void
rc_node_rele_other(rc_node_t * np)710 rc_node_rele_other(rc_node_t *np)
711 {
712 assert(np->rn_other_refs > 0);
713 if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
714 (void) pthread_mutex_lock(&np->rn_lock);
715 assert(np->rn_other_refs_held > 0);
716 if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
717 np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
718 /*
719 * This was the last client reference. Destroy
720 * any other references and free() the node.
721 */
722 rc_node_no_client_refs(np);
723 } else {
724 (void) pthread_mutex_unlock(&np->rn_lock);
725 }
726 }
727 }
728
729 static void
rc_node_hold_locked(rc_node_t * np)730 rc_node_hold_locked(rc_node_t *np)
731 {
732 assert(MUTEX_HELD(&np->rn_lock));
733
734 if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
735 rc_node_hold_other(np->rn_parent_ref);
736 np->rn_refs++;
737 assert(np->rn_refs > 0);
738 }
739
740 static void
rc_node_hold(rc_node_t * np)741 rc_node_hold(rc_node_t *np)
742 {
743 (void) pthread_mutex_lock(&np->rn_lock);
744 rc_node_hold_locked(np);
745 (void) pthread_mutex_unlock(&np->rn_lock);
746 }
747
748 static void
rc_node_rele_locked(rc_node_t * np)749 rc_node_rele_locked(rc_node_t *np)
750 {
751 int unref = 0;
752 rc_node_t *par_ref = NULL;
753
754 assert(MUTEX_HELD(&np->rn_lock));
755 assert(np->rn_refs > 0);
756
757 if (--np->rn_refs == 0) {
758 if (np->rn_flags & RC_NODE_PARENT_REF)
759 par_ref = np->rn_parent_ref;
760
761 /*
762 * Composed property groups are only as good as their
763 * references.
764 */
765 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
766 np->rn_flags |= RC_NODE_DEAD;
767
768 if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
769 np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
770 unref = 1;
771 }
772
773 if (unref) {
774 /*
775 * This was the last client reference. Destroy any other
776 * references and free() the node.
777 */
778 rc_node_no_client_refs(np);
779 } else {
780 /*
781 * rn_erefs can be 0 if we acquired the reference in
782 * a path which hasn't been updated to increment rn_erefs.
783 * When all paths which end here are updated, we should
784 * assert rn_erefs > 0 and always decrement it.
785 */
786 if (np->rn_erefs > 0)
787 --np->rn_erefs;
788 (void) pthread_mutex_unlock(&np->rn_lock);
789 }
790
791 if (par_ref != NULL)
792 rc_node_rele_other(par_ref);
793 }
794
795 void
rc_node_rele(rc_node_t * np)796 rc_node_rele(rc_node_t *np)
797 {
798 (void) pthread_mutex_lock(&np->rn_lock);
799 rc_node_rele_locked(np);
800 }
801
802 static cache_bucket_t *
cache_hold(uint32_t h)803 cache_hold(uint32_t h)
804 {
805 cache_bucket_t *bp = CACHE_BUCKET(h);
806 (void) pthread_mutex_lock(&bp->cb_lock);
807 return (bp);
808 }
809
810 static void
cache_release(cache_bucket_t * bp)811 cache_release(cache_bucket_t *bp)
812 {
813 (void) pthread_mutex_unlock(&bp->cb_lock);
814 }
815
816 static rc_node_t *
cache_lookup_unlocked(cache_bucket_t * bp,rc_node_lookup_t * lp)817 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
818 {
819 uint32_t h = rc_node_hash(lp);
820 rc_node_t *np;
821
822 assert(MUTEX_HELD(&bp->cb_lock));
823 assert(bp == CACHE_BUCKET(h));
824
825 for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
826 if (np->rn_hash == h && rc_node_match(np, lp)) {
827 rc_node_hold(np);
828 return (np);
829 }
830 }
831
832 return (NULL);
833 }
834
835 static rc_node_t *
cache_lookup(rc_node_lookup_t * lp)836 cache_lookup(rc_node_lookup_t *lp)
837 {
838 uint32_t h;
839 cache_bucket_t *bp;
840 rc_node_t *np;
841
842 h = rc_node_hash(lp);
843 bp = cache_hold(h);
844
845 np = cache_lookup_unlocked(bp, lp);
846
847 cache_release(bp);
848
849 return (np);
850 }
851
852 static void
cache_insert_unlocked(cache_bucket_t * bp,rc_node_t * np)853 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
854 {
855 assert(MUTEX_HELD(&bp->cb_lock));
856 assert(np->rn_hash == rc_node_hash(&np->rn_id));
857 assert(bp == CACHE_BUCKET(np->rn_hash));
858
859 assert(np->rn_hash_next == NULL);
860
861 np->rn_hash_next = bp->cb_head;
862 bp->cb_head = np;
863 }
864
865 static void
cache_remove_unlocked(cache_bucket_t * bp,rc_node_t * np)866 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
867 {
868 rc_node_t **npp;
869
870 assert(MUTEX_HELD(&bp->cb_lock));
871 assert(np->rn_hash == rc_node_hash(&np->rn_id));
872 assert(bp == CACHE_BUCKET(np->rn_hash));
873
874 for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
875 if (*npp == np)
876 break;
877
878 assert(*npp == np);
879 *npp = np->rn_hash_next;
880 np->rn_hash_next = NULL;
881 }
882
883 /*
884 * verify that the 'parent' type can have a child typed 'child'
885 * Fails with
886 * _INVALID_TYPE - argument is invalid
887 * _TYPE_MISMATCH - parent type cannot have children of type child
888 */
889 static int
rc_check_parent_child(uint32_t parent,uint32_t child)890 rc_check_parent_child(uint32_t parent, uint32_t child)
891 {
892 int idx;
893 uint32_t type;
894
895 if (parent == 0 || parent >= NUM_TYPES ||
896 child == 0 || child >= NUM_TYPES)
897 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
898
899 for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
900 type = rc_types[parent].rt_valid_children[idx];
901 if (type == child)
902 return (REP_PROTOCOL_SUCCESS);
903 }
904
905 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
906 }
907
908 /*
909 * Fails with
910 * _INVALID_TYPE - type is invalid
911 * _BAD_REQUEST - name is an invalid name for a node of type type
912 */
913 int
rc_check_type_name(uint32_t type,const char * name)914 rc_check_type_name(uint32_t type, const char *name)
915 {
916 if (type == 0 || type >= NUM_TYPES)
917 return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
918
919 if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
920 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
921
922 return (REP_PROTOCOL_SUCCESS);
923 }
924
925 static int
rc_check_pgtype_name(const char * name)926 rc_check_pgtype_name(const char *name)
927 {
928 if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
929 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
930
931 return (REP_PROTOCOL_SUCCESS);
932 }
933
934 /*
935 * rc_node_free_fmri should be called whenever a node loses its parent.
936 * The reason is that the node's fmri string is built up by concatenating
937 * its name to the parent's fmri. Thus, when the node no longer has a
938 * parent, its fmri is no longer valid.
939 */
940 static void
rc_node_free_fmri(rc_node_t * np)941 rc_node_free_fmri(rc_node_t *np)
942 {
943 if (np->rn_fmri != NULL) {
944 free((void *)np->rn_fmri);
945 np->rn_fmri = NULL;
946 }
947 }
948
949 /*
950 * Concatenate the appropriate separator and the FMRI element to the base
951 * FMRI string at fmri.
952 *
953 * Fails with
954 * _TRUNCATED Not enough room in buffer at fmri.
955 */
956 static int
rc_concat_fmri_element(char * fmri,size_t bufsize,size_t * sz_out,const char * element,rep_protocol_entity_t type)957 rc_concat_fmri_element(
958 char *fmri, /* base fmri */
959 size_t bufsize, /* size of buf at fmri */
960 size_t *sz_out, /* receives result size. */
961 const char *element, /* element name to concat */
962 rep_protocol_entity_t type) /* type of element */
963 {
964 size_t actual;
965 const char *name = element;
966 int rc;
967 const char *separator;
968
969 if (bufsize > 0)
970 *sz_out = strlen(fmri);
971 else
972 *sz_out = 0;
973
974 switch (type) {
975 case REP_PROTOCOL_ENTITY_SCOPE:
976 if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
977 /*
978 * No need to display scope information if we are
979 * in the local scope.
980 */
981 separator = SCF_FMRI_SVC_PREFIX;
982 name = NULL;
983 } else {
984 /*
985 * Need to display scope information, because it is
986 * not the local scope.
987 */
988 separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
989 }
990 break;
991 case REP_PROTOCOL_ENTITY_SERVICE:
992 separator = SCF_FMRI_SERVICE_PREFIX;
993 break;
994 case REP_PROTOCOL_ENTITY_INSTANCE:
995 separator = SCF_FMRI_INSTANCE_PREFIX;
996 break;
997 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
998 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
999 separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1000 break;
1001 case REP_PROTOCOL_ENTITY_PROPERTY:
1002 separator = SCF_FMRI_PROPERTY_PREFIX;
1003 break;
1004 case REP_PROTOCOL_ENTITY_VALUE:
1005 /*
1006 * A value does not have a separate FMRI from its property,
1007 * so there is nothing to concat.
1008 */
1009 return (REP_PROTOCOL_SUCCESS);
1010 case REP_PROTOCOL_ENTITY_SNAPSHOT:
1011 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1012 /* Snapshots do not have FMRIs, so there is nothing to do. */
1013 return (REP_PROTOCOL_SUCCESS);
1014 default:
1015 (void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1016 __FILE__, __LINE__, type);
1017 abort(); /* Missing a case in switch if we get here. */
1018 }
1019
1020 /* Concatenate separator and element to the fmri buffer. */
1021
1022 actual = strlcat(fmri, separator, bufsize);
1023 if (name != NULL) {
1024 if (actual < bufsize) {
1025 actual = strlcat(fmri, name, bufsize);
1026 } else {
1027 actual += strlen(name);
1028 }
1029 }
1030 if (actual < bufsize) {
1031 rc = REP_PROTOCOL_SUCCESS;
1032 } else {
1033 rc = REP_PROTOCOL_FAIL_TRUNCATED;
1034 }
1035 *sz_out = actual;
1036 return (rc);
1037 }
1038
1039 /*
1040 * Get the FMRI for the node at np. The fmri will be placed in buf. On
1041 * success sz_out will be set to the size of the fmri in buf. If
1042 * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1043 * of the buffer that would be required to avoid truncation.
1044 *
1045 * Fails with
1046 * _TRUNCATED not enough room in buf for the FMRI.
1047 */
1048 static int
rc_node_get_fmri_or_fragment(rc_node_t * np,char * buf,size_t bufsize,size_t * sz_out)1049 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1050 size_t *sz_out)
1051 {
1052 size_t fmri_len = 0;
1053 int r;
1054
1055 if (bufsize > 0)
1056 *buf = 0;
1057 *sz_out = 0;
1058
1059 if (np->rn_fmri == NULL) {
1060 /*
1061 * A NULL rn_fmri implies that this is a top level scope.
1062 * Child nodes will always have an rn_fmri established
1063 * because both rc_node_link_child() and
1064 * rc_node_relink_child() call rc_node_build_fmri(). In
1065 * this case, we'll just return our name preceded by the
1066 * appropriate FMRI decorations.
1067 */
1068 assert(np->rn_parent == NULL);
1069 r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1070 np->rn_id.rl_type);
1071 if (r != REP_PROTOCOL_SUCCESS)
1072 return (r);
1073 } else {
1074 /* We have an fmri, so return it. */
1075 fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1076 }
1077
1078 *sz_out = fmri_len;
1079
1080 if (fmri_len >= bufsize)
1081 return (REP_PROTOCOL_FAIL_TRUNCATED);
1082
1083 return (REP_PROTOCOL_SUCCESS);
1084 }
1085
1086 /*
1087 * Build an FMRI string for this node and save it in rn_fmri.
1088 *
1089 * The basic strategy here is to get the fmri of our parent and then
1090 * concatenate the appropriate separator followed by our name. If our name
1091 * is null, the resulting fmri will just be a copy of the parent fmri.
1092 * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1093 * set. Also the rn_lock for this node should be held.
1094 *
1095 * Fails with
1096 * _NO_RESOURCES Could not allocate memory.
1097 */
1098 static int
rc_node_build_fmri(rc_node_t * np)1099 rc_node_build_fmri(rc_node_t *np)
1100 {
1101 size_t actual;
1102 char fmri[REP_PROTOCOL_FMRI_LEN];
1103 int rc;
1104 size_t sz = REP_PROTOCOL_FMRI_LEN;
1105
1106 assert(MUTEX_HELD(&np->rn_lock));
1107 assert(np->rn_flags & RC_NODE_USING_PARENT);
1108
1109 rc_node_free_fmri(np);
1110
1111 rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1112 assert(rc == REP_PROTOCOL_SUCCESS);
1113
1114 if (np->rn_name != NULL) {
1115 rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1116 np->rn_id.rl_type);
1117 assert(rc == REP_PROTOCOL_SUCCESS);
1118 np->rn_fmri = strdup(fmri);
1119 } else {
1120 np->rn_fmri = strdup(fmri);
1121 }
1122 if (np->rn_fmri == NULL) {
1123 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1124 } else {
1125 rc = REP_PROTOCOL_SUCCESS;
1126 }
1127
1128 return (rc);
1129 }
1130
1131 /*
1132 * Get the FMRI of the node at np placing the result in fmri. Then
1133 * concatenate the additional element to fmri. The type variable indicates
1134 * the type of element, so that the appropriate separator can be
1135 * generated. size is the number of bytes in the buffer at fmri, and
1136 * sz_out receives the size of the generated string. If the result is
1137 * truncated, sz_out will receive the size of the buffer that would be
1138 * required to avoid truncation.
1139 *
1140 * Fails with
1141 * _TRUNCATED Not enough room in buffer at fmri.
1142 */
1143 static int
rc_get_fmri_and_concat(rc_node_t * np,char * fmri,size_t size,size_t * sz_out,const char * element,rep_protocol_entity_t type)1144 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1145 const char *element, rep_protocol_entity_t type)
1146 {
1147 int rc;
1148
1149 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1150 REP_PROTOCOL_SUCCESS) {
1151 return (rc);
1152 }
1153 if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1154 REP_PROTOCOL_SUCCESS) {
1155 return (rc);
1156 }
1157
1158 return (REP_PROTOCOL_SUCCESS);
1159 }
1160
1161 static int
rc_notify_info_interested(rc_notify_info_t * rnip,rc_notify_t * np)1162 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1163 {
1164 rc_node_t *nnp = np->rcn_node;
1165 int i;
1166
1167 assert(MUTEX_HELD(&rc_pg_notify_lock));
1168
1169 if (np->rcn_delete != NULL) {
1170 assert(np->rcn_info == NULL && np->rcn_node == NULL);
1171 return (1); /* everyone likes deletes */
1172 }
1173 if (np->rcn_node == NULL) {
1174 assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1175 return (0);
1176 }
1177 assert(np->rcn_info == NULL);
1178
1179 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1180 if (rnip->rni_namelist[i] != NULL) {
1181 if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1182 return (1);
1183 }
1184 if (rnip->rni_typelist[i] != NULL) {
1185 if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1186 return (1);
1187 }
1188 }
1189 return (0);
1190 }
1191
1192 static void
rc_notify_insert_node(rc_node_t * nnp)1193 rc_notify_insert_node(rc_node_t *nnp)
1194 {
1195 rc_notify_t *np = &nnp->rn_notify;
1196 rc_notify_info_t *nip;
1197 int found = 0;
1198
1199 assert(np->rcn_info == NULL);
1200
1201 if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1202 return;
1203
1204 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1205 np->rcn_node = nnp;
1206 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1207 nip = uu_list_next(rc_notify_info_list, nip)) {
1208 if (rc_notify_info_interested(nip, np)) {
1209 (void) pthread_cond_broadcast(&nip->rni_cv);
1210 found++;
1211 }
1212 }
1213 if (found)
1214 (void) uu_list_insert_before(rc_notify_list, NULL, np);
1215 else
1216 np->rcn_node = NULL;
1217
1218 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1219 }
1220
1221 static void
rc_notify_deletion(rc_notify_delete_t * ndp,const char * service,const char * instance,const char * pg)1222 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1223 const char *instance, const char *pg)
1224 {
1225 rc_notify_info_t *nip;
1226
1227 uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1228 rc_notify_pool);
1229 ndp->rnd_notify.rcn_delete = ndp;
1230
1231 (void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1232 "svc:/%s%s%s%s%s", service,
1233 (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1234 (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1235
1236 /*
1237 * add to notification list, notify watchers
1238 */
1239 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1240 for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1241 nip = uu_list_next(rc_notify_info_list, nip))
1242 (void) pthread_cond_broadcast(&nip->rni_cv);
1243 (void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1244 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1245 }
1246
1247 static void
rc_notify_remove_node(rc_node_t * nnp)1248 rc_notify_remove_node(rc_node_t *nnp)
1249 {
1250 rc_notify_t *np = &nnp->rn_notify;
1251
1252 assert(np->rcn_info == NULL);
1253 assert(!MUTEX_HELD(&nnp->rn_lock));
1254
1255 (void) pthread_mutex_lock(&rc_pg_notify_lock);
1256 while (np->rcn_node != NULL) {
1257 if (rc_notify_in_use) {
1258 (void) pthread_cond_wait(&rc_pg_notify_cv,
1259 &rc_pg_notify_lock);
1260 continue;
1261 }
1262 (void) uu_list_remove(rc_notify_list, np);
1263 np->rcn_node = NULL;
1264 break;
1265 }
1266 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
1267 }
1268
1269 static void
rc_notify_remove_locked(rc_notify_t * np)1270 rc_notify_remove_locked(rc_notify_t *np)
1271 {
1272 assert(MUTEX_HELD(&rc_pg_notify_lock));
1273 assert(rc_notify_in_use == 0);
1274
1275 (void) uu_list_remove(rc_notify_list, np);
1276 if (np->rcn_node) {
1277 np->rcn_node = NULL;
1278 } else if (np->rcn_delete) {
1279 uu_free(np->rcn_delete);
1280 } else {
1281 assert(0); /* CAN'T HAPPEN */
1282 }
1283 }
1284
1285 /*
1286 * Permission checking functions. See comment atop this file.
1287 */
1288 #ifndef NATIVE_BUILD
1289 static permcheck_t *
pc_create()1290 pc_create()
1291 {
1292 permcheck_t *p;
1293
1294 p = uu_zalloc(sizeof (*p));
1295 if (p == NULL)
1296 return (NULL);
1297 p->pc_bnum = 8; /* Normal case will only have 2 elts. */
1298 p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1299 if (p->pc_buckets == NULL) {
1300 uu_free(p);
1301 return (NULL);
1302 }
1303
1304 p->pc_enum = 0;
1305 return (p);
1306 }
1307
1308 static void
pc_free(permcheck_t * pcp)1309 pc_free(permcheck_t *pcp)
1310 {
1311 uint_t i;
1312 struct pc_elt *ep, *next;
1313
1314 for (i = 0; i < pcp->pc_bnum; ++i) {
1315 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1316 next = ep->pce_next;
1317 free(ep);
1318 }
1319 }
1320
1321 free(pcp->pc_buckets);
1322 free(pcp);
1323 }
1324
1325 static uint32_t
pc_hash(const char * auth)1326 pc_hash(const char *auth)
1327 {
1328 uint32_t h = 0, g;
1329 const char *p;
1330
1331 /*
1332 * Generic hash function from uts/common/os/modhash.c.
1333 */
1334 for (p = auth; *p != '\0'; ++p) {
1335 h = (h << 4) + *p;
1336 g = (h & 0xf0000000);
1337 if (g != 0) {
1338 h ^= (g >> 24);
1339 h ^= g;
1340 }
1341 }
1342
1343 return (h);
1344 }
1345
1346 static perm_status_t
pc_exists(permcheck_t * pcp,const char * auth)1347 pc_exists(permcheck_t *pcp, const char *auth)
1348 {
1349 uint32_t h;
1350 struct pc_elt *ep;
1351
1352 h = pc_hash(auth);
1353 for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1354 ep != NULL;
1355 ep = ep->pce_next) {
1356 if (strcmp(auth, ep->pce_auth) == 0) {
1357 pcp->pc_auth_string = ep->pce_auth;
1358 return (PERM_GRANTED);
1359 }
1360 }
1361
1362 return (PERM_DENIED);
1363 }
1364
1365 static perm_status_t
pc_match(permcheck_t * pcp,const char * pattern)1366 pc_match(permcheck_t *pcp, const char *pattern)
1367 {
1368 uint_t i;
1369 struct pc_elt *ep;
1370
1371 for (i = 0; i < pcp->pc_bnum; ++i) {
1372 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1373 if (_auth_match(pattern, ep->pce_auth)) {
1374 pcp->pc_auth_string = ep->pce_auth;
1375 return (PERM_GRANTED);
1376 }
1377 }
1378 }
1379
1380 return (PERM_DENIED);
1381 }
1382
1383 static int
pc_grow(permcheck_t * pcp)1384 pc_grow(permcheck_t *pcp)
1385 {
1386 uint_t new_bnum, i, j;
1387 struct pc_elt **new_buckets;
1388 struct pc_elt *ep, *next;
1389
1390 new_bnum = pcp->pc_bnum * 2;
1391 if (new_bnum < pcp->pc_bnum)
1392 /* Homey don't play that. */
1393 return (-1);
1394
1395 new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1396 if (new_buckets == NULL)
1397 return (-1);
1398
1399 for (i = 0; i < pcp->pc_bnum; ++i) {
1400 for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1401 next = ep->pce_next;
1402 j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1403 ep->pce_next = new_buckets[j];
1404 new_buckets[j] = ep;
1405 }
1406 }
1407
1408 uu_free(pcp->pc_buckets);
1409 pcp->pc_buckets = new_buckets;
1410 pcp->pc_bnum = new_bnum;
1411
1412 return (0);
1413 }
1414
1415 static int
pc_add(permcheck_t * pcp,const char * auth,pc_auth_type_t auth_type)1416 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1417 {
1418 struct pc_elt *ep;
1419 uint_t i;
1420
1421 ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1422 if (ep == NULL)
1423 return (-1);
1424
1425 /* Grow if pc_enum / pc_bnum > 3/4. */
1426 if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1427 /* Failure is not a stopper; we'll try again next time. */
1428 (void) pc_grow(pcp);
1429
1430 (void) strcpy(ep->pce_auth, auth);
1431
1432 i = pc_hash(auth) & (pcp->pc_bnum - 1);
1433 ep->pce_next = pcp->pc_buckets[i];
1434 pcp->pc_buckets[i] = ep;
1435
1436 if (auth_type > pcp->pc_specific_type) {
1437 pcp->pc_specific_type = auth_type;
1438 pcp->pc_specific = ep;
1439 }
1440
1441 ++pcp->pc_enum;
1442
1443 return (0);
1444 }
1445
1446 /*
1447 * For the type of a property group, return the authorization which may be
1448 * used to modify it.
1449 */
1450 static const char *
perm_auth_for_pgtype(const char * pgtype)1451 perm_auth_for_pgtype(const char *pgtype)
1452 {
1453 if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1454 return (AUTH_MODIFY_PREFIX "method");
1455 else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1456 return (AUTH_MODIFY_PREFIX "dependency");
1457 else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1458 return (AUTH_MODIFY_PREFIX "application");
1459 else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1460 return (AUTH_MODIFY_PREFIX "framework");
1461 else
1462 return (NULL);
1463 }
1464
1465 /*
1466 * Fails with
1467 * _NO_RESOURCES - out of memory
1468 */
1469 static int
perm_add_enabling_type(permcheck_t * pcp,const char * auth,pc_auth_type_t auth_type)1470 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1471 pc_auth_type_t auth_type)
1472 {
1473 return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1474 REP_PROTOCOL_FAIL_NO_RESOURCES);
1475 }
1476
1477 /*
1478 * Fails with
1479 * _NO_RESOURCES - out of memory
1480 */
1481 static int
perm_add_enabling(permcheck_t * pcp,const char * auth)1482 perm_add_enabling(permcheck_t *pcp, const char *auth)
1483 {
1484 return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1485 }
1486
1487 /* Note that perm_add_enabling_values() is defined below. */
1488
1489 /*
1490 * perm_granted() returns PERM_GRANTED if the current door caller has one of
1491 * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1492 * the door client went away and PERM_FAIL if an error (usually lack of
1493 * memory) occurs. auth_cb() checks each and every authorizations as
1494 * enumerated by _enum_auths. When we find a result other than PERM_DENIED,
1495 * we short-cut the enumeration and return non-zero.
1496 */
1497
1498 static int
auth_cb(const char * auth,void * ctxt,void * vres)1499 auth_cb(const char *auth, void *ctxt, void *vres)
1500 {
1501 permcheck_t *pcp = ctxt;
1502 int *pret = vres;
1503
1504 if (strchr(auth, KV_WILDCHAR) == NULL)
1505 *pret = pc_exists(pcp, auth);
1506 else
1507 *pret = pc_match(pcp, auth);
1508
1509 if (*pret != PERM_DENIED)
1510 return (1);
1511 /*
1512 * If we failed, choose the most specific auth string for use in
1513 * the audit event.
1514 */
1515 assert(pcp->pc_specific != NULL);
1516 pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1517
1518 return (0); /* Tells that we need to continue */
1519 }
1520
1521 static perm_status_t
perm_granted(permcheck_t * pcp)1522 perm_granted(permcheck_t *pcp)
1523 {
1524 ucred_t *uc;
1525
1526 perm_status_t ret = PERM_DENIED;
1527 uid_t uid;
1528 struct passwd pw;
1529 char pwbuf[1024]; /* XXX should be NSS_BUFLEN_PASSWD */
1530
1531 /* Get the uid */
1532 if ((uc = get_ucred()) == NULL) {
1533 if (errno == EINVAL) {
1534 /*
1535 * Client is no longer waiting for our response (e.g.,
1536 * it received a signal & resumed with EINTR).
1537 * Punting with door_return() would be nice but we
1538 * need to release all of the locks & references we
1539 * hold. And we must report failure to the client
1540 * layer to keep it from ignoring retries as
1541 * already-done (idempotency & all that). None of the
1542 * error codes fit very well, so we might as well
1543 * force the return of _PERMISSION_DENIED since we
1544 * couldn't determine the user.
1545 */
1546 return (PERM_GONE);
1547 }
1548 assert(0);
1549 abort();
1550 }
1551
1552 uid = ucred_geteuid(uc);
1553 assert(uid != (uid_t)-1);
1554
1555 if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1556 return (PERM_FAIL);
1557 }
1558
1559 /*
1560 * Enumerate all the auths defined for the user and return the
1561 * result in ret.
1562 */
1563 if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1564 return (PERM_FAIL);
1565
1566 return (ret);
1567 }
1568
1569 static int
map_granted_status(perm_status_t status,permcheck_t * pcp,char ** match_auth)1570 map_granted_status(perm_status_t status, permcheck_t *pcp,
1571 char **match_auth)
1572 {
1573 int rc;
1574
1575 *match_auth = NULL;
1576 switch (status) {
1577 case PERM_DENIED:
1578 *match_auth = strdup(pcp->pc_auth_string);
1579 if (*match_auth == NULL)
1580 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1581 else
1582 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1583 break;
1584 case PERM_GRANTED:
1585 *match_auth = strdup(pcp->pc_auth_string);
1586 if (*match_auth == NULL)
1587 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1588 else
1589 rc = REP_PROTOCOL_SUCCESS;
1590 break;
1591 case PERM_GONE:
1592 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1593 break;
1594 case PERM_FAIL:
1595 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1596 break;
1597 }
1598 return (rc);
1599 }
1600 #endif /* NATIVE_BUILD */
1601
1602 /*
1603 * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1604 * serialize certain actions, and to wait for certain operations to complete
1605 *
1606 * The waiting flags are:
1607 * RC_NODE_CHILDREN_CHANGING
1608 * The child list is being built or changed (due to creation
1609 * or deletion). All iterators pause.
1610 *
1611 * RC_NODE_USING_PARENT
1612 * Someone is actively using the parent pointer, so we can't
1613 * be removed from the parent list.
1614 *
1615 * RC_NODE_CREATING_CHILD
1616 * A child is being created -- locks out other creations, to
1617 * prevent insert-insert races.
1618 *
1619 * RC_NODE_IN_TX
1620 * This object is running a transaction.
1621 *
1622 * RC_NODE_DYING
1623 * This node might be dying. Always set as a set, using
1624 * RC_NODE_DYING_FLAGS (which is everything but
1625 * RC_NODE_USING_PARENT)
1626 */
1627 static int
rc_node_hold_flag(rc_node_t * np,uint32_t flag)1628 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1629 {
1630 assert(MUTEX_HELD(&np->rn_lock));
1631 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1632
1633 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1634 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1635 }
1636 if (np->rn_flags & RC_NODE_DEAD)
1637 return (0);
1638
1639 np->rn_flags |= flag;
1640 return (1);
1641 }
1642
1643 static void
rc_node_rele_flag(rc_node_t * np,uint32_t flag)1644 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1645 {
1646 assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1647 assert(MUTEX_HELD(&np->rn_lock));
1648 assert((np->rn_flags & flag) == flag);
1649 np->rn_flags &= ~flag;
1650 (void) pthread_cond_broadcast(&np->rn_cv);
1651 }
1652
1653 /*
1654 * wait until a particular flag has cleared. Fails if the object dies.
1655 */
1656 static int
rc_node_wait_flag(rc_node_t * np,uint32_t flag)1657 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1658 {
1659 assert(MUTEX_HELD(&np->rn_lock));
1660 while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1661 (void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1662
1663 return (!(np->rn_flags & RC_NODE_DEAD));
1664 }
1665
1666 /*
1667 * On entry, np's lock must be held, and this thread must be holding
1668 * RC_NODE_USING_PARENT. On return, both of them are released.
1669 *
1670 * If the return value is NULL, np either does not have a parent, or
1671 * the parent has been marked DEAD.
1672 *
1673 * If the return value is non-NULL, it is the parent of np, and both
1674 * its lock and the requested flags are held.
1675 */
1676 static rc_node_t *
rc_node_hold_parent_flag(rc_node_t * np,uint32_t flag)1677 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1678 {
1679 rc_node_t *pp;
1680
1681 assert(MUTEX_HELD(&np->rn_lock));
1682 assert(np->rn_flags & RC_NODE_USING_PARENT);
1683
1684 if ((pp = np->rn_parent) == NULL) {
1685 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1686 (void) pthread_mutex_unlock(&np->rn_lock);
1687 return (NULL);
1688 }
1689 (void) pthread_mutex_unlock(&np->rn_lock);
1690
1691 (void) pthread_mutex_lock(&pp->rn_lock);
1692 (void) pthread_mutex_lock(&np->rn_lock);
1693 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1694 (void) pthread_mutex_unlock(&np->rn_lock);
1695
1696 if (!rc_node_hold_flag(pp, flag)) {
1697 (void) pthread_mutex_unlock(&pp->rn_lock);
1698 return (NULL);
1699 }
1700 return (pp);
1701 }
1702
1703 rc_node_t *
rc_node_alloc(void)1704 rc_node_alloc(void)
1705 {
1706 rc_node_t *np = uu_zalloc(sizeof (*np));
1707
1708 if (np == NULL)
1709 return (NULL);
1710
1711 (void) pthread_mutex_init(&np->rn_lock, NULL);
1712 (void) pthread_cond_init(&np->rn_cv, NULL);
1713
1714 np->rn_children = uu_list_create(rc_children_pool, np, 0);
1715 np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1716
1717 uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1718
1719 uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1720 rc_notify_pool);
1721
1722 return (np);
1723 }
1724
1725 void
rc_node_destroy(rc_node_t * np)1726 rc_node_destroy(rc_node_t *np)
1727 {
1728 int i;
1729
1730 if (np->rn_flags & RC_NODE_UNREFED)
1731 return; /* being handled elsewhere */
1732
1733 assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1734 assert(np->rn_former == NULL);
1735
1736 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1737 /* Release the holds from rc_iter_next(). */
1738 for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1739 /* rn_cchain[i] may be NULL for empty snapshots. */
1740 if (np->rn_cchain[i] != NULL)
1741 rc_node_rele(np->rn_cchain[i]);
1742 }
1743 }
1744
1745 if (np->rn_name != NULL)
1746 free((void *)np->rn_name);
1747 np->rn_name = NULL;
1748 if (np->rn_type != NULL)
1749 free((void *)np->rn_type);
1750 np->rn_type = NULL;
1751 if (np->rn_values != NULL)
1752 object_free_values(np->rn_values, np->rn_valtype,
1753 np->rn_values_count, np->rn_values_size);
1754 np->rn_values = NULL;
1755 rc_node_free_fmri(np);
1756
1757 if (np->rn_snaplevel != NULL)
1758 rc_snaplevel_rele(np->rn_snaplevel);
1759 np->rn_snaplevel = NULL;
1760
1761 uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1762
1763 uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1764 rc_notify_pool);
1765
1766 assert(uu_list_first(np->rn_children) == NULL);
1767 uu_list_destroy(np->rn_children);
1768 uu_list_destroy(np->rn_pg_notify_list);
1769
1770 (void) pthread_mutex_destroy(&np->rn_lock);
1771 (void) pthread_cond_destroy(&np->rn_cv);
1772
1773 uu_free(np);
1774 }
1775
1776 /*
1777 * Link in a child node.
1778 *
1779 * Because of the lock ordering, cp has to already be in the hash table with
1780 * its lock dropped before we get it. To prevent anyone from noticing that
1781 * it is parentless, the creation code sets the RC_NODE_USING_PARENT. Once
1782 * we've linked it in, we release the flag.
1783 */
1784 static void
rc_node_link_child(rc_node_t * np,rc_node_t * cp)1785 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1786 {
1787 assert(!MUTEX_HELD(&np->rn_lock));
1788 assert(!MUTEX_HELD(&cp->rn_lock));
1789
1790 (void) pthread_mutex_lock(&np->rn_lock);
1791 (void) pthread_mutex_lock(&cp->rn_lock);
1792 assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1793 (cp->rn_flags & RC_NODE_USING_PARENT));
1794
1795 assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1796 REP_PROTOCOL_SUCCESS);
1797
1798 cp->rn_parent = np;
1799 cp->rn_flags |= RC_NODE_IN_PARENT;
1800 (void) uu_list_insert_before(np->rn_children, NULL, cp);
1801 (void) rc_node_build_fmri(cp);
1802
1803 (void) pthread_mutex_unlock(&np->rn_lock);
1804
1805 rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1806 (void) pthread_mutex_unlock(&cp->rn_lock);
1807 }
1808
1809 /*
1810 * Sets the rn_parent_ref field of all the children of np to pp -- always
1811 * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1812 *
1813 * This is used when we mark a node RC_NODE_OLD, so that when the object and
1814 * its children are no longer referenced, they will all be deleted as a unit.
1815 */
1816 static void
rc_node_setup_parent_ref(rc_node_t * np,rc_node_t * pp)1817 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1818 {
1819 rc_node_t *cp;
1820
1821 assert(MUTEX_HELD(&np->rn_lock));
1822
1823 for (cp = uu_list_first(np->rn_children); cp != NULL;
1824 cp = uu_list_next(np->rn_children, cp)) {
1825 (void) pthread_mutex_lock(&cp->rn_lock);
1826 if (cp->rn_flags & RC_NODE_PARENT_REF) {
1827 assert(cp->rn_parent_ref == pp);
1828 } else {
1829 assert(cp->rn_parent_ref == NULL);
1830
1831 cp->rn_flags |= RC_NODE_PARENT_REF;
1832 cp->rn_parent_ref = pp;
1833 if (cp->rn_refs != 0)
1834 rc_node_hold_other(pp);
1835 }
1836 rc_node_setup_parent_ref(cp, pp); /* recurse */
1837 (void) pthread_mutex_unlock(&cp->rn_lock);
1838 }
1839 }
1840
1841 /*
1842 * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1843 *
1844 * Requirements:
1845 * *no* node locks may be held.
1846 * pp must be held with RC_NODE_CHILDREN_CHANGING
1847 * newp and np must be held with RC_NODE_IN_TX
1848 * np must be marked RC_NODE_IN_PARENT, newp must not be
1849 * np must be marked RC_NODE_OLD
1850 *
1851 * Afterwards:
1852 * pp's RC_NODE_CHILDREN_CHANGING is dropped
1853 * newp and np's RC_NODE_IN_TX is dropped
1854 * newp->rn_former = np;
1855 * newp is RC_NODE_IN_PARENT, np is not.
1856 * interested notify subscribers have been notified of newp's new status.
1857 */
1858 static void
rc_node_relink_child(rc_node_t * pp,rc_node_t * np,rc_node_t * newp)1859 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1860 {
1861 cache_bucket_t *bp;
1862 /*
1863 * First, swap np and nnp in the cache. newp's RC_NODE_IN_TX flag
1864 * keeps rc_node_update() from seeing it until we are done.
1865 */
1866 bp = cache_hold(newp->rn_hash);
1867 cache_remove_unlocked(bp, np);
1868 cache_insert_unlocked(bp, newp);
1869 cache_release(bp);
1870
1871 /*
1872 * replace np with newp in pp's list, and attach it to newp's rn_former
1873 * link.
1874 */
1875 (void) pthread_mutex_lock(&pp->rn_lock);
1876 assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1877
1878 (void) pthread_mutex_lock(&newp->rn_lock);
1879 assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1880 assert(newp->rn_flags & RC_NODE_IN_TX);
1881
1882 (void) pthread_mutex_lock(&np->rn_lock);
1883 assert(np->rn_flags & RC_NODE_IN_PARENT);
1884 assert(np->rn_flags & RC_NODE_OLD);
1885 assert(np->rn_flags & RC_NODE_IN_TX);
1886
1887 newp->rn_parent = pp;
1888 newp->rn_flags |= RC_NODE_IN_PARENT;
1889
1890 /*
1891 * Note that we carefully add newp before removing np -- this
1892 * keeps iterators on the list from missing us.
1893 */
1894 (void) uu_list_insert_after(pp->rn_children, np, newp);
1895 (void) rc_node_build_fmri(newp);
1896 (void) uu_list_remove(pp->rn_children, np);
1897
1898 /*
1899 * re-set np
1900 */
1901 newp->rn_former = np;
1902 np->rn_parent = NULL;
1903 np->rn_flags &= ~RC_NODE_IN_PARENT;
1904 np->rn_flags |= RC_NODE_ON_FORMER;
1905
1906 rc_notify_insert_node(newp);
1907
1908 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1909 (void) pthread_mutex_unlock(&pp->rn_lock);
1910 rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1911 (void) pthread_mutex_unlock(&newp->rn_lock);
1912 rc_node_setup_parent_ref(np, np);
1913 rc_node_rele_flag(np, RC_NODE_IN_TX);
1914 (void) pthread_mutex_unlock(&np->rn_lock);
1915 }
1916
1917 /*
1918 * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1919 * 'cp' is used (and returned) if the node does not yet exist. If it does
1920 * exist, 'cp' is freed, and the existent node is returned instead.
1921 */
1922 rc_node_t *
rc_node_setup(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,rc_node_t * pp)1923 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1924 rc_node_t *pp)
1925 {
1926 rc_node_t *np;
1927 cache_bucket_t *bp;
1928 uint32_t h = rc_node_hash(nip);
1929
1930 assert(cp->rn_refs == 0);
1931
1932 bp = cache_hold(h);
1933 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1934 cache_release(bp);
1935
1936 /*
1937 * make sure it matches our expectations
1938 */
1939 (void) pthread_mutex_lock(&np->rn_lock);
1940 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1941 assert(np->rn_parent == pp);
1942 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1943 assert(strcmp(np->rn_name, name) == 0);
1944 assert(np->rn_type == NULL);
1945 assert(np->rn_flags & RC_NODE_IN_PARENT);
1946 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1947 }
1948 (void) pthread_mutex_unlock(&np->rn_lock);
1949
1950 rc_node_destroy(cp);
1951 return (np);
1952 }
1953
1954 /*
1955 * No one is there -- setup & install the new node.
1956 */
1957 np = cp;
1958 rc_node_hold(np);
1959 np->rn_id = *nip;
1960 np->rn_hash = h;
1961 np->rn_name = strdup(name);
1962
1963 np->rn_flags |= RC_NODE_USING_PARENT;
1964
1965 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1966 #if COMPOSITION_DEPTH == 2
1967 np->rn_cchain[0] = np;
1968 np->rn_cchain[1] = pp;
1969 #else
1970 #error This code must be updated.
1971 #endif
1972 }
1973
1974 cache_insert_unlocked(bp, np);
1975 cache_release(bp); /* we are now visible */
1976
1977 rc_node_link_child(pp, np);
1978
1979 return (np);
1980 }
1981
1982 /*
1983 * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1984 * 'cp' is used (and returned) if the node does not yet exist. If it does
1985 * exist, 'cp' is freed, and the existent node is returned instead.
1986 */
1987 rc_node_t *
rc_node_setup_snapshot(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,uint32_t snap_id,rc_node_t * pp)1988 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1989 uint32_t snap_id, rc_node_t *pp)
1990 {
1991 rc_node_t *np;
1992 cache_bucket_t *bp;
1993 uint32_t h = rc_node_hash(nip);
1994
1995 assert(cp->rn_refs == 0);
1996
1997 bp = cache_hold(h);
1998 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1999 cache_release(bp);
2000
2001 /*
2002 * make sure it matches our expectations
2003 */
2004 (void) pthread_mutex_lock(&np->rn_lock);
2005 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2006 assert(np->rn_parent == pp);
2007 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2008 assert(strcmp(np->rn_name, name) == 0);
2009 assert(np->rn_type == NULL);
2010 assert(np->rn_flags & RC_NODE_IN_PARENT);
2011 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2012 }
2013 (void) pthread_mutex_unlock(&np->rn_lock);
2014
2015 rc_node_destroy(cp);
2016 return (np);
2017 }
2018
2019 /*
2020 * No one is there -- create a new node.
2021 */
2022 np = cp;
2023 rc_node_hold(np);
2024 np->rn_id = *nip;
2025 np->rn_hash = h;
2026 np->rn_name = strdup(name);
2027 np->rn_snapshot_id = snap_id;
2028
2029 np->rn_flags |= RC_NODE_USING_PARENT;
2030
2031 cache_insert_unlocked(bp, np);
2032 cache_release(bp); /* we are now visible */
2033
2034 rc_node_link_child(pp, np);
2035
2036 return (np);
2037 }
2038
2039 /*
2040 * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists. 'cp' is
2041 * used (and returned) if the node does not yet exist. If it does exist, 'cp'
2042 * is freed, and the existent node is returned instead.
2043 */
2044 rc_node_t *
rc_node_setup_snaplevel(rc_node_t * cp,rc_node_lookup_t * nip,rc_snaplevel_t * lvl,rc_node_t * pp)2045 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2046 rc_snaplevel_t *lvl, rc_node_t *pp)
2047 {
2048 rc_node_t *np;
2049 cache_bucket_t *bp;
2050 uint32_t h = rc_node_hash(nip);
2051
2052 assert(cp->rn_refs == 0);
2053
2054 bp = cache_hold(h);
2055 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2056 cache_release(bp);
2057
2058 /*
2059 * make sure it matches our expectations
2060 */
2061 (void) pthread_mutex_lock(&np->rn_lock);
2062 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2063 assert(np->rn_parent == pp);
2064 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2065 assert(np->rn_name == NULL);
2066 assert(np->rn_type == NULL);
2067 assert(np->rn_flags & RC_NODE_IN_PARENT);
2068 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2069 }
2070 (void) pthread_mutex_unlock(&np->rn_lock);
2071
2072 rc_node_destroy(cp);
2073 return (np);
2074 }
2075
2076 /*
2077 * No one is there -- create a new node.
2078 */
2079 np = cp;
2080 rc_node_hold(np); /* released in snapshot_fill_children() */
2081 np->rn_id = *nip;
2082 np->rn_hash = h;
2083
2084 rc_snaplevel_hold(lvl);
2085 np->rn_snaplevel = lvl;
2086
2087 np->rn_flags |= RC_NODE_USING_PARENT;
2088
2089 cache_insert_unlocked(bp, np);
2090 cache_release(bp); /* we are now visible */
2091
2092 /* Add this snaplevel to the snapshot's composition chain. */
2093 assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2094 pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2095
2096 rc_node_link_child(pp, np);
2097
2098 return (np);
2099 }
2100
2101 /*
2102 * Returns NULL if strdup() fails.
2103 */
2104 rc_node_t *
rc_node_setup_pg(rc_node_t * cp,rc_node_lookup_t * nip,const char * name,const char * type,uint32_t flags,uint32_t gen_id,rc_node_t * pp)2105 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2106 const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2107 {
2108 rc_node_t *np;
2109 cache_bucket_t *bp;
2110
2111 uint32_t h = rc_node_hash(nip);
2112 bp = cache_hold(h);
2113 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2114 cache_release(bp);
2115
2116 /*
2117 * make sure it matches our expectations (don't check
2118 * the generation number or parent, since someone could
2119 * have gotten a transaction through while we weren't
2120 * looking)
2121 */
2122 (void) pthread_mutex_lock(&np->rn_lock);
2123 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2124 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2125 assert(strcmp(np->rn_name, name) == 0);
2126 assert(strcmp(np->rn_type, type) == 0);
2127 assert(np->rn_pgflags == flags);
2128 assert(np->rn_flags & RC_NODE_IN_PARENT);
2129 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2130 }
2131 (void) pthread_mutex_unlock(&np->rn_lock);
2132
2133 rc_node_destroy(cp);
2134 return (np);
2135 }
2136
2137 np = cp;
2138 rc_node_hold(np); /* released in fill_pg_callback() */
2139 np->rn_id = *nip;
2140 np->rn_hash = h;
2141 np->rn_name = strdup(name);
2142 if (np->rn_name == NULL) {
2143 rc_node_rele(np);
2144 return (NULL);
2145 }
2146 np->rn_type = strdup(type);
2147 if (np->rn_type == NULL) {
2148 free((void *)np->rn_name);
2149 rc_node_rele(np);
2150 return (NULL);
2151 }
2152 np->rn_pgflags = flags;
2153 np->rn_gen_id = gen_id;
2154
2155 np->rn_flags |= RC_NODE_USING_PARENT;
2156
2157 cache_insert_unlocked(bp, np);
2158 cache_release(bp); /* we are now visible */
2159
2160 rc_node_link_child(pp, np);
2161
2162 return (np);
2163 }
2164
2165 #if COMPOSITION_DEPTH == 2
2166 /*
2167 * Initialize a "composed property group" which represents the composition of
2168 * property groups pg1 & pg2. It is ephemeral: once created & returned for an
2169 * ITER_READ request, keeping it out of cache_hash and any child lists
2170 * prevents it from being looked up. Operations besides iteration are passed
2171 * through to pg1.
2172 *
2173 * pg1 & pg2 should be held before entering this function. They will be
2174 * released in rc_node_destroy().
2175 */
2176 static int
rc_node_setup_cpg(rc_node_t * cpg,rc_node_t * pg1,rc_node_t * pg2)2177 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2178 {
2179 if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2180 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2181
2182 cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2183 cpg->rn_name = strdup(pg1->rn_name);
2184 if (cpg->rn_name == NULL)
2185 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2186
2187 cpg->rn_cchain[0] = pg1;
2188 cpg->rn_cchain[1] = pg2;
2189
2190 return (REP_PROTOCOL_SUCCESS);
2191 }
2192 #else
2193 #error This code must be updated.
2194 #endif
2195
2196 /*
2197 * Fails with _NO_RESOURCES.
2198 */
2199 int
rc_node_create_property(rc_node_t * pp,rc_node_lookup_t * nip,const char * name,rep_protocol_value_type_t type,const char * vals,size_t count,size_t size)2200 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2201 const char *name, rep_protocol_value_type_t type,
2202 const char *vals, size_t count, size_t size)
2203 {
2204 rc_node_t *np;
2205 cache_bucket_t *bp;
2206
2207 uint32_t h = rc_node_hash(nip);
2208 bp = cache_hold(h);
2209 if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2210 cache_release(bp);
2211 /*
2212 * make sure it matches our expectations
2213 */
2214 (void) pthread_mutex_lock(&np->rn_lock);
2215 if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2216 assert(np->rn_parent == pp);
2217 assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2218 assert(strcmp(np->rn_name, name) == 0);
2219 assert(np->rn_valtype == type);
2220 assert(np->rn_values_count == count);
2221 assert(np->rn_values_size == size);
2222 assert(vals == NULL ||
2223 memcmp(np->rn_values, vals, size) == 0);
2224 assert(np->rn_flags & RC_NODE_IN_PARENT);
2225 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2226 }
2227 rc_node_rele_locked(np);
2228 object_free_values(vals, type, count, size);
2229 return (REP_PROTOCOL_SUCCESS);
2230 }
2231
2232 /*
2233 * No one is there -- create a new node.
2234 */
2235 np = rc_node_alloc();
2236 if (np == NULL) {
2237 cache_release(bp);
2238 object_free_values(vals, type, count, size);
2239 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2240 }
2241 np->rn_id = *nip;
2242 np->rn_hash = h;
2243 np->rn_name = strdup(name);
2244 if (np->rn_name == NULL) {
2245 cache_release(bp);
2246 object_free_values(vals, type, count, size);
2247 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2248 }
2249
2250 np->rn_valtype = type;
2251 np->rn_values = vals;
2252 np->rn_values_count = count;
2253 np->rn_values_size = size;
2254
2255 np->rn_flags |= RC_NODE_USING_PARENT;
2256
2257 cache_insert_unlocked(bp, np);
2258 cache_release(bp); /* we are now visible */
2259
2260 rc_node_link_child(pp, np);
2261
2262 return (REP_PROTOCOL_SUCCESS);
2263 }
2264
2265 /*
2266 * This function implements a decision table to determine the event ID for
2267 * changes to the enabled (SCF_PROPERTY_ENABLED) property. The event ID is
2268 * determined by the value of the first property in the command specified
2269 * by cmd_no and the name of the property group. Here is the decision
2270 * table:
2271 *
2272 * Property Group Name
2273 * Property ------------------------------------------
2274 * Value SCF_PG_GENERAL SCF_PG_GENERAL_OVR
2275 * -------- -------------- ------------------
2276 * "0" ADT_smf_disable ADT_smf_tmp_disable
2277 * "1" ADT_smf_enable ADT_smf_tmp_enable
2278 *
2279 * This function is called by special_property_event through a function
2280 * pointer in the special_props_list array.
2281 *
2282 * Since the ADT_smf_* symbols may not be defined in the build machine's
2283 * include files, this function is not compiled when doing native builds.
2284 */
2285 #ifndef NATIVE_BUILD
2286 static int
general_enable_id(tx_commit_data_t * tx_data,size_t cmd_no,const char * pg,au_event_t * event_id)2287 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2288 au_event_t *event_id)
2289 {
2290 const char *value;
2291 uint32_t nvalues;
2292 int enable;
2293
2294 /*
2295 * First, check property value.
2296 */
2297 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2298 return (-1);
2299 if (nvalues == 0)
2300 return (-1);
2301 if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2302 return (-1);
2303 if (strcmp(value, "0") == 0) {
2304 enable = 0;
2305 } else if (strcmp(value, "1") == 0) {
2306 enable = 1;
2307 } else {
2308 return (-1);
2309 }
2310
2311 /*
2312 * Now check property group name.
2313 */
2314 if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2315 *event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2316 return (0);
2317 } else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2318 *event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2319 return (0);
2320 }
2321 return (-1);
2322 }
2323 #endif /* NATIVE_BUILD */
2324
2325 /*
2326 * This function compares two audit_special_prop_item_t structures
2327 * represented by item1 and item2. It returns an integer greater than 0 if
2328 * item1 is greater than item2. It returns 0 if they are equal and an
2329 * integer less than 0 if item1 is less than item2. api_prop_name and
2330 * api_pg_name are the key fields for sorting.
2331 *
2332 * This function is suitable for calls to bsearch(3C) and qsort(3C).
2333 */
2334 static int
special_prop_compare(const void * item1,const void * item2)2335 special_prop_compare(const void *item1, const void *item2)
2336 {
2337 const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2338 const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2339 int r;
2340
2341 r = strcmp(a->api_prop_name, b->api_prop_name);
2342 if (r == 0) {
2343 /*
2344 * Primary keys are the same, so check the secondary key.
2345 */
2346 r = strcmp(a->api_pg_name, b->api_pg_name);
2347 }
2348 return (r);
2349 }
2350
2351 int
rc_node_init(void)2352 rc_node_init(void)
2353 {
2354 rc_node_t *np;
2355 cache_bucket_t *bp;
2356
2357 rc_children_pool = uu_list_pool_create("rc_children_pool",
2358 sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2359 NULL, UU_LIST_POOL_DEBUG);
2360
2361 rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2362 sizeof (rc_node_pg_notify_t),
2363 offsetof(rc_node_pg_notify_t, rnpn_node),
2364 NULL, UU_LIST_POOL_DEBUG);
2365
2366 rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2367 sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2368 NULL, UU_LIST_POOL_DEBUG);
2369
2370 rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2371 sizeof (rc_notify_info_t),
2372 offsetof(rc_notify_info_t, rni_list_node),
2373 NULL, UU_LIST_POOL_DEBUG);
2374
2375 if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2376 rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2377 uu_die("out of memory");
2378
2379 rc_notify_list = uu_list_create(rc_notify_pool,
2380 &rc_notify_list, 0);
2381
2382 rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2383 &rc_notify_info_list, 0);
2384
2385 if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2386 uu_die("out of memory");
2387
2388 /*
2389 * Sort the special_props_list array so that it can be searched
2390 * with bsearch(3C).
2391 *
2392 * The special_props_list array is not compiled into the native
2393 * build code, so there is no need to call qsort if NATIVE_BUILD is
2394 * defined.
2395 */
2396 #ifndef NATIVE_BUILD
2397 qsort(special_props_list, SPECIAL_PROP_COUNT,
2398 sizeof (special_props_list[0]), special_prop_compare);
2399 #endif /* NATIVE_BUILD */
2400
2401 if ((np = rc_node_alloc()) == NULL)
2402 uu_die("out of memory");
2403
2404 rc_node_hold(np);
2405 np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2406 np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2407 np->rn_hash = rc_node_hash(&np->rn_id);
2408 np->rn_name = "localhost";
2409
2410 bp = cache_hold(np->rn_hash);
2411 cache_insert_unlocked(bp, np);
2412 cache_release(bp);
2413
2414 rc_scope = np;
2415 return (1);
2416 }
2417
2418 /*
2419 * Fails with
2420 * _INVALID_TYPE - type is invalid
2421 * _TYPE_MISMATCH - np doesn't carry children of type type
2422 * _DELETED - np has been deleted
2423 * _NO_RESOURCES
2424 */
2425 static int
rc_node_fill_children(rc_node_t * np,uint32_t type)2426 rc_node_fill_children(rc_node_t *np, uint32_t type)
2427 {
2428 int rc;
2429
2430 assert(MUTEX_HELD(&np->rn_lock));
2431
2432 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2433 REP_PROTOCOL_SUCCESS)
2434 return (rc);
2435
2436 if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2437 return (REP_PROTOCOL_FAIL_DELETED);
2438
2439 if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2440 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2441 return (REP_PROTOCOL_SUCCESS);
2442 }
2443
2444 (void) pthread_mutex_unlock(&np->rn_lock);
2445 rc = object_fill_children(np);
2446 (void) pthread_mutex_lock(&np->rn_lock);
2447
2448 if (rc == REP_PROTOCOL_SUCCESS) {
2449 np->rn_flags |= RC_NODE_HAS_CHILDREN;
2450 }
2451 rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2452
2453 return (rc);
2454 }
2455
2456 /*
2457 * Returns
2458 * _INVALID_TYPE - type is invalid
2459 * _TYPE_MISMATCH - np doesn't carry children of type type
2460 * _DELETED - np has been deleted
2461 * _NO_RESOURCES
2462 * _SUCCESS - if *cpp is not NULL, it is held
2463 */
2464 static int
rc_node_find_named_child(rc_node_t * np,const char * name,uint32_t type,rc_node_t ** cpp)2465 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2466 rc_node_t **cpp)
2467 {
2468 int ret;
2469 rc_node_t *cp;
2470
2471 assert(MUTEX_HELD(&np->rn_lock));
2472 assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2473
2474 ret = rc_node_fill_children(np, type);
2475 if (ret != REP_PROTOCOL_SUCCESS)
2476 return (ret);
2477
2478 for (cp = uu_list_first(np->rn_children);
2479 cp != NULL;
2480 cp = uu_list_next(np->rn_children, cp)) {
2481 if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2482 break;
2483 }
2484
2485 if (cp != NULL)
2486 rc_node_hold(cp);
2487 *cpp = cp;
2488
2489 return (REP_PROTOCOL_SUCCESS);
2490 }
2491
2492 static int rc_node_parent(rc_node_t *, rc_node_t **);
2493
2494 /*
2495 * Returns
2496 * _INVALID_TYPE - type is invalid
2497 * _DELETED - np or an ancestor has been deleted
2498 * _NOT_FOUND - no ancestor of specified type exists
2499 * _SUCCESS - *app is held
2500 */
2501 static int
rc_node_find_ancestor(rc_node_t * np,uint32_t type,rc_node_t ** app)2502 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2503 {
2504 int ret;
2505 rc_node_t *parent, *np_orig;
2506
2507 if (type >= REP_PROTOCOL_ENTITY_MAX)
2508 return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2509
2510 np_orig = np;
2511
2512 while (np->rn_id.rl_type > type) {
2513 ret = rc_node_parent(np, &parent);
2514 if (np != np_orig)
2515 rc_node_rele(np);
2516 if (ret != REP_PROTOCOL_SUCCESS)
2517 return (ret);
2518 np = parent;
2519 }
2520
2521 if (np->rn_id.rl_type == type) {
2522 *app = parent;
2523 return (REP_PROTOCOL_SUCCESS);
2524 }
2525
2526 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2527 }
2528
2529 #ifndef NATIVE_BUILD
2530 /*
2531 * If the propname property exists in pg, and it is of type string, add its
2532 * values as authorizations to pcp. pg must not be locked on entry, and it is
2533 * returned unlocked. Returns
2534 * _DELETED - pg was deleted
2535 * _NO_RESOURCES
2536 * _NOT_FOUND - pg has no property named propname
2537 * _SUCCESS
2538 */
2539 static int
perm_add_pg_prop_values(permcheck_t * pcp,rc_node_t * pg,const char * propname)2540 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2541 {
2542 rc_node_t *prop;
2543 int result;
2544
2545 uint_t count;
2546 const char *cp;
2547
2548 assert(!MUTEX_HELD(&pg->rn_lock));
2549 assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2550
2551 (void) pthread_mutex_lock(&pg->rn_lock);
2552 result = rc_node_find_named_child(pg, propname,
2553 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2554 (void) pthread_mutex_unlock(&pg->rn_lock);
2555 if (result != REP_PROTOCOL_SUCCESS) {
2556 switch (result) {
2557 case REP_PROTOCOL_FAIL_DELETED:
2558 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2559 return (result);
2560
2561 case REP_PROTOCOL_FAIL_INVALID_TYPE:
2562 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2563 default:
2564 bad_error("rc_node_find_named_child", result);
2565 }
2566 }
2567
2568 if (prop == NULL)
2569 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2570
2571 /* rn_valtype is immutable, so no locking. */
2572 if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2573 rc_node_rele(prop);
2574 return (REP_PROTOCOL_SUCCESS);
2575 }
2576
2577 (void) pthread_mutex_lock(&prop->rn_lock);
2578 for (count = prop->rn_values_count, cp = prop->rn_values;
2579 count > 0;
2580 --count) {
2581 result = perm_add_enabling_type(pcp, cp,
2582 (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2583 PC_AUTH_SVC);
2584 if (result != REP_PROTOCOL_SUCCESS)
2585 break;
2586
2587 cp = strchr(cp, '\0') + 1;
2588 }
2589
2590 rc_node_rele_locked(prop);
2591
2592 return (result);
2593 }
2594
2595 /*
2596 * Assuming that ent is a service or instance node, if the pgname property
2597 * group has type pgtype, and it has a propname property with string type, add
2598 * its values as authorizations to pcp. If pgtype is NULL, it is not checked.
2599 * Returns
2600 * _SUCCESS
2601 * _DELETED - ent was deleted
2602 * _NO_RESOURCES - no resources
2603 * _NOT_FOUND - ent does not have pgname pg or propname property
2604 */
2605 static int
perm_add_ent_prop_values(permcheck_t * pcp,rc_node_t * ent,const char * pgname,const char * pgtype,const char * propname)2606 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2607 const char *pgtype, const char *propname)
2608 {
2609 int r;
2610 rc_node_t *pg;
2611
2612 assert(!MUTEX_HELD(&ent->rn_lock));
2613
2614 (void) pthread_mutex_lock(&ent->rn_lock);
2615 r = rc_node_find_named_child(ent, pgname,
2616 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2617 (void) pthread_mutex_unlock(&ent->rn_lock);
2618
2619 switch (r) {
2620 case REP_PROTOCOL_SUCCESS:
2621 break;
2622
2623 case REP_PROTOCOL_FAIL_DELETED:
2624 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2625 return (r);
2626
2627 default:
2628 bad_error("rc_node_find_named_child", r);
2629 }
2630
2631 if (pg == NULL)
2632 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2633
2634 if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2635 r = perm_add_pg_prop_values(pcp, pg, propname);
2636 switch (r) {
2637 case REP_PROTOCOL_FAIL_DELETED:
2638 r = REP_PROTOCOL_FAIL_NOT_FOUND;
2639 break;
2640
2641 case REP_PROTOCOL_FAIL_NO_RESOURCES:
2642 case REP_PROTOCOL_SUCCESS:
2643 case REP_PROTOCOL_FAIL_NOT_FOUND:
2644 break;
2645
2646 default:
2647 bad_error("perm_add_pg_prop_values", r);
2648 }
2649 }
2650
2651 rc_node_rele(pg);
2652
2653 return (r);
2654 }
2655
2656 /*
2657 * If pg has a property named propname, and is string typed, add its values as
2658 * authorizations to pcp. If pg has no such property, and its parent is an
2659 * instance, walk up to the service and try doing the same with the property
2660 * of the same name from the property group of the same name. Returns
2661 * _SUCCESS
2662 * _NO_RESOURCES
2663 * _DELETED - pg (or an ancestor) was deleted
2664 */
2665 static int
perm_add_enabling_values(permcheck_t * pcp,rc_node_t * pg,const char * propname)2666 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2667 {
2668 int r;
2669 char pgname[REP_PROTOCOL_NAME_LEN + 1];
2670 rc_node_t *svc;
2671 size_t sz;
2672
2673 r = perm_add_pg_prop_values(pcp, pg, propname);
2674
2675 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2676 return (r);
2677
2678 assert(!MUTEX_HELD(&pg->rn_lock));
2679
2680 if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2681 return (REP_PROTOCOL_SUCCESS);
2682
2683 sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2684 assert(sz < sizeof (pgname));
2685
2686 /*
2687 * If pg is a child of an instance or snapshot, we want to compose the
2688 * authorization property with the service's (if it exists). The
2689 * snapshot case applies only to read_authorization. In all other
2690 * cases, the pg's parent will be the instance.
2691 */
2692 r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2693 if (r != REP_PROTOCOL_SUCCESS) {
2694 assert(r == REP_PROTOCOL_FAIL_DELETED);
2695 return (r);
2696 }
2697 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2698
2699 r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2700
2701 rc_node_rele(svc);
2702
2703 if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2704 r = REP_PROTOCOL_SUCCESS;
2705
2706 return (r);
2707 }
2708
2709 /*
2710 * Call perm_add_enabling_values() for the "action_authorization" property of
2711 * the "general" property group of inst. Returns
2712 * _DELETED - inst (or an ancestor) was deleted
2713 * _NO_RESOURCES
2714 * _SUCCESS
2715 */
2716 static int
perm_add_inst_action_auth(permcheck_t * pcp,rc_node_t * inst)2717 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2718 {
2719 int r;
2720 rc_node_t *svc;
2721
2722 assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2723
2724 r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2725 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2726
2727 if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2728 return (r);
2729
2730 r = rc_node_parent(inst, &svc);
2731 if (r != REP_PROTOCOL_SUCCESS) {
2732 assert(r == REP_PROTOCOL_FAIL_DELETED);
2733 return (r);
2734 }
2735
2736 r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2737 AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2738
2739 return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2740 }
2741 #endif /* NATIVE_BUILD */
2742
2743 void
rc_node_ptr_init(rc_node_ptr_t * out)2744 rc_node_ptr_init(rc_node_ptr_t *out)
2745 {
2746 out->rnp_node = NULL;
2747 out->rnp_auth_string = NULL;
2748 out->rnp_authorized = RC_AUTH_UNKNOWN;
2749 out->rnp_deleted = 0;
2750 }
2751
2752 void
rc_node_ptr_free_mem(rc_node_ptr_t * npp)2753 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2754 {
2755 if (npp->rnp_auth_string != NULL) {
2756 free((void *)npp->rnp_auth_string);
2757 npp->rnp_auth_string = NULL;
2758 }
2759 }
2760
2761 static void
rc_node_assign(rc_node_ptr_t * out,rc_node_t * val)2762 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2763 {
2764 rc_node_t *cur = out->rnp_node;
2765 if (val != NULL)
2766 rc_node_hold(val);
2767 out->rnp_node = val;
2768 if (cur != NULL) {
2769 NODE_LOCK(cur);
2770
2771 /*
2772 * Register the ephemeral reference created by reading
2773 * out->rnp_node into cur. Note that the persistent
2774 * reference we're destroying is locked by the client
2775 * layer.
2776 */
2777 rc_node_hold_ephemeral_locked(cur);
2778
2779 rc_node_rele_locked(cur);
2780 }
2781 out->rnp_authorized = RC_AUTH_UNKNOWN;
2782 rc_node_ptr_free_mem(out);
2783 out->rnp_deleted = 0;
2784 }
2785
2786 void
rc_node_clear(rc_node_ptr_t * out,int deleted)2787 rc_node_clear(rc_node_ptr_t *out, int deleted)
2788 {
2789 rc_node_assign(out, NULL);
2790 out->rnp_deleted = deleted;
2791 }
2792
2793 void
rc_node_ptr_assign(rc_node_ptr_t * out,const rc_node_ptr_t * val)2794 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2795 {
2796 rc_node_assign(out, val->rnp_node);
2797 }
2798
2799 /*
2800 * rc_node_check()/RC_NODE_CHECK()
2801 * generic "entry" checks, run before the use of an rc_node pointer.
2802 *
2803 * Fails with
2804 * _NOT_SET
2805 * _DELETED
2806 */
2807 static int
rc_node_check_and_lock(rc_node_t * np)2808 rc_node_check_and_lock(rc_node_t *np)
2809 {
2810 int result = REP_PROTOCOL_SUCCESS;
2811 if (np == NULL)
2812 return (REP_PROTOCOL_FAIL_NOT_SET);
2813
2814 (void) pthread_mutex_lock(&np->rn_lock);
2815 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2816 result = REP_PROTOCOL_FAIL_DELETED;
2817 (void) pthread_mutex_unlock(&np->rn_lock);
2818 }
2819
2820 return (result);
2821 }
2822
2823 /*
2824 * Fails with
2825 * _NOT_SET - ptr is reset
2826 * _DELETED - node has been deleted
2827 */
2828 static rc_node_t *
rc_node_ptr_check_and_lock(rc_node_ptr_t * npp,int * res)2829 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2830 {
2831 rc_node_t *np = npp->rnp_node;
2832 if (np == NULL) {
2833 if (npp->rnp_deleted)
2834 *res = REP_PROTOCOL_FAIL_DELETED;
2835 else
2836 *res = REP_PROTOCOL_FAIL_NOT_SET;
2837 return (NULL);
2838 }
2839
2840 (void) pthread_mutex_lock(&np->rn_lock);
2841 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2842 (void) pthread_mutex_unlock(&np->rn_lock);
2843 rc_node_clear(npp, 1);
2844 *res = REP_PROTOCOL_FAIL_DELETED;
2845 return (NULL);
2846 }
2847 return (np);
2848 }
2849
2850 #define RC_NODE_CHECK_AND_LOCK(n) { \
2851 int rc__res; \
2852 if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2853 return (rc__res); \
2854 }
2855
2856 #define RC_NODE_CHECK(n) { \
2857 RC_NODE_CHECK_AND_LOCK(n); \
2858 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2859 }
2860
2861 #define RC_NODE_CHECK_AND_HOLD(n) { \
2862 RC_NODE_CHECK_AND_LOCK(n); \
2863 rc_node_hold_locked(n); \
2864 (void) pthread_mutex_unlock(&(n)->rn_lock); \
2865 }
2866
2867 #define RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) { \
2868 int rc__res; \
2869 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL) \
2870 return (rc__res); \
2871 }
2872
2873 #define RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) { \
2874 int rc__res; \
2875 if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == \
2876 NULL) { \
2877 if ((mem) != NULL) \
2878 free((mem)); \
2879 return (rc__res); \
2880 } \
2881 }
2882
2883 #define RC_NODE_PTR_GET_CHECK(np, npp) { \
2884 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2885 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2886 }
2887
2888 #define RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) { \
2889 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp); \
2890 rc_node_hold_locked(np); \
2891 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2892 }
2893
2894 #define HOLD_FLAG_OR_RETURN(np, flag) { \
2895 assert(MUTEX_HELD(&(np)->rn_lock)); \
2896 assert(!((np)->rn_flags & RC_NODE_DEAD)); \
2897 if (!rc_node_hold_flag((np), flag)) { \
2898 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2899 return (REP_PROTOCOL_FAIL_DELETED); \
2900 } \
2901 }
2902
2903 #define HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) { \
2904 assert(MUTEX_HELD(&(np)->rn_lock)); \
2905 if (!rc_node_hold_flag((np), flag)) { \
2906 (void) pthread_mutex_unlock(&(np)->rn_lock); \
2907 assert((np) == (npp)->rnp_node); \
2908 rc_node_clear(npp, 1); \
2909 if ((mem) != NULL) \
2910 free((mem)); \
2911 return (REP_PROTOCOL_FAIL_DELETED); \
2912 } \
2913 }
2914
2915 int
rc_local_scope(uint32_t type,rc_node_ptr_t * out)2916 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2917 {
2918 if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2919 rc_node_clear(out, 0);
2920 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2921 }
2922
2923 /*
2924 * the main scope never gets destroyed
2925 */
2926 rc_node_assign(out, rc_scope);
2927
2928 return (REP_PROTOCOL_SUCCESS);
2929 }
2930
2931 /*
2932 * Fails with
2933 * _NOT_SET - npp is not set
2934 * _DELETED - the node npp pointed at has been deleted
2935 * _TYPE_MISMATCH - type is not _SCOPE
2936 * _NOT_FOUND - scope has no parent
2937 */
2938 static int
rc_scope_parent_scope(rc_node_ptr_t * npp,uint32_t type,rc_node_ptr_t * out)2939 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2940 {
2941 rc_node_t *np;
2942
2943 rc_node_clear(out, 0);
2944
2945 RC_NODE_PTR_GET_CHECK(np, npp);
2946
2947 if (type != REP_PROTOCOL_ENTITY_SCOPE)
2948 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2949
2950 return (REP_PROTOCOL_FAIL_NOT_FOUND);
2951 }
2952
2953 static int rc_node_pg_check_read_protect(rc_node_t *);
2954
2955 /*
2956 * Fails with
2957 * _NOT_SET
2958 * _DELETED
2959 * _NOT_APPLICABLE
2960 * _NOT_FOUND
2961 * _BAD_REQUEST
2962 * _TRUNCATED
2963 * _NO_RESOURCES
2964 */
2965 int
rc_node_name(rc_node_ptr_t * npp,char * buf,size_t sz,uint32_t answertype,size_t * sz_out)2966 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2967 size_t *sz_out)
2968 {
2969 size_t actual;
2970 rc_node_t *np;
2971
2972 assert(sz == *sz_out);
2973
2974 RC_NODE_PTR_GET_CHECK(np, npp);
2975
2976 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2977 np = np->rn_cchain[0];
2978 RC_NODE_CHECK(np);
2979 }
2980
2981 switch (answertype) {
2982 case RP_ENTITY_NAME_NAME:
2983 if (np->rn_name == NULL)
2984 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2985 actual = strlcpy(buf, np->rn_name, sz);
2986 break;
2987 case RP_ENTITY_NAME_PGTYPE:
2988 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2989 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2990 actual = strlcpy(buf, np->rn_type, sz);
2991 break;
2992 case RP_ENTITY_NAME_PGFLAGS:
2993 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2994 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2995 actual = snprintf(buf, sz, "%d", np->rn_pgflags);
2996 break;
2997 case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
2998 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
2999 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3000 actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3001 break;
3002 case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3003 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3004 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3005 actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3006 break;
3007 case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3008 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3009 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3010 if (np->rn_snaplevel->rsl_instance == NULL)
3011 return (REP_PROTOCOL_FAIL_NOT_FOUND);
3012 actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3013 break;
3014 case RP_ENTITY_NAME_PGREADPROT:
3015 {
3016 int ret;
3017
3018 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3019 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3020 ret = rc_node_pg_check_read_protect(np);
3021 assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3022 switch (ret) {
3023 case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3024 actual = snprintf(buf, sz, "1");
3025 break;
3026 case REP_PROTOCOL_SUCCESS:
3027 actual = snprintf(buf, sz, "0");
3028 break;
3029 default:
3030 return (ret);
3031 }
3032 break;
3033 }
3034 default:
3035 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3036 }
3037 if (actual >= sz)
3038 return (REP_PROTOCOL_FAIL_TRUNCATED);
3039
3040 *sz_out = actual;
3041 return (REP_PROTOCOL_SUCCESS);
3042 }
3043
3044 int
rc_node_get_property_type(rc_node_ptr_t * npp,rep_protocol_value_type_t * out)3045 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3046 {
3047 rc_node_t *np;
3048
3049 RC_NODE_PTR_GET_CHECK(np, npp);
3050
3051 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3052 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3053
3054 *out = np->rn_valtype;
3055
3056 return (REP_PROTOCOL_SUCCESS);
3057 }
3058
3059 /*
3060 * Get np's parent. If np is deleted, returns _DELETED. Otherwise puts a hold
3061 * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3062 */
3063 static int
rc_node_parent(rc_node_t * np,rc_node_t ** out)3064 rc_node_parent(rc_node_t *np, rc_node_t **out)
3065 {
3066 rc_node_t *pnp;
3067 rc_node_t *np_orig;
3068
3069 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3070 RC_NODE_CHECK_AND_LOCK(np);
3071 } else {
3072 np = np->rn_cchain[0];
3073 RC_NODE_CHECK_AND_LOCK(np);
3074 }
3075
3076 np_orig = np;
3077 rc_node_hold_locked(np); /* simplifies the remainder */
3078
3079 for (;;) {
3080 if (!rc_node_wait_flag(np,
3081 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3082 rc_node_rele_locked(np);
3083 return (REP_PROTOCOL_FAIL_DELETED);
3084 }
3085
3086 if (!(np->rn_flags & RC_NODE_OLD))
3087 break;
3088
3089 rc_node_rele_locked(np);
3090 np = cache_lookup(&np_orig->rn_id);
3091 assert(np != np_orig);
3092
3093 if (np == NULL)
3094 goto deleted;
3095 (void) pthread_mutex_lock(&np->rn_lock);
3096 }
3097
3098 /* guaranteed to succeed without dropping the lock */
3099 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3100 (void) pthread_mutex_unlock(&np->rn_lock);
3101 *out = NULL;
3102 rc_node_rele(np);
3103 return (REP_PROTOCOL_FAIL_DELETED);
3104 }
3105
3106 assert(np->rn_parent != NULL);
3107 pnp = np->rn_parent;
3108 (void) pthread_mutex_unlock(&np->rn_lock);
3109
3110 (void) pthread_mutex_lock(&pnp->rn_lock);
3111 (void) pthread_mutex_lock(&np->rn_lock);
3112 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3113 (void) pthread_mutex_unlock(&np->rn_lock);
3114
3115 rc_node_hold_locked(pnp);
3116
3117 (void) pthread_mutex_unlock(&pnp->rn_lock);
3118
3119 rc_node_rele(np);
3120 *out = pnp;
3121 return (REP_PROTOCOL_SUCCESS);
3122
3123 deleted:
3124 rc_node_rele(np);
3125 return (REP_PROTOCOL_FAIL_DELETED);
3126 }
3127
3128 /*
3129 * Fails with
3130 * _NOT_SET
3131 * _DELETED
3132 */
3133 static int
rc_node_ptr_parent(rc_node_ptr_t * npp,rc_node_t ** out)3134 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3135 {
3136 rc_node_t *np;
3137
3138 RC_NODE_PTR_GET_CHECK(np, npp);
3139
3140 return (rc_node_parent(np, out));
3141 }
3142
3143 /*
3144 * Fails with
3145 * _NOT_SET - npp is not set
3146 * _DELETED - the node npp pointed at has been deleted
3147 * _TYPE_MISMATCH - npp's node's parent is not of type type
3148 *
3149 * If npp points to a scope, can also fail with
3150 * _NOT_FOUND - scope has no parent
3151 */
3152 int
rc_node_get_parent(rc_node_ptr_t * npp,uint32_t type,rc_node_ptr_t * out)3153 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3154 {
3155 rc_node_t *pnp;
3156 int rc;
3157
3158 if (npp->rnp_node != NULL &&
3159 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3160 return (rc_scope_parent_scope(npp, type, out));
3161
3162 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3163 rc_node_clear(out, 0);
3164 return (rc);
3165 }
3166
3167 if (type != pnp->rn_id.rl_type) {
3168 rc_node_rele(pnp);
3169 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3170 }
3171
3172 rc_node_assign(out, pnp);
3173 rc_node_rele(pnp);
3174
3175 return (REP_PROTOCOL_SUCCESS);
3176 }
3177
3178 int
rc_node_parent_type(rc_node_ptr_t * npp,uint32_t * type_out)3179 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3180 {
3181 rc_node_t *pnp;
3182 int rc;
3183
3184 if (npp->rnp_node != NULL &&
3185 npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3186 *type_out = REP_PROTOCOL_ENTITY_SCOPE;
3187 return (REP_PROTOCOL_SUCCESS);
3188 }
3189
3190 if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3191 return (rc);
3192
3193 *type_out = pnp->rn_id.rl_type;
3194
3195 rc_node_rele(pnp);
3196
3197 return (REP_PROTOCOL_SUCCESS);
3198 }
3199
3200 /*
3201 * Fails with
3202 * _INVALID_TYPE - type is invalid
3203 * _TYPE_MISMATCH - np doesn't carry children of type type
3204 * _DELETED - np has been deleted
3205 * _NOT_FOUND - no child with that name/type combo found
3206 * _NO_RESOURCES
3207 * _BACKEND_ACCESS
3208 */
3209 int
rc_node_get_child(rc_node_ptr_t * npp,const char * name,uint32_t type,rc_node_ptr_t * outp)3210 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3211 rc_node_ptr_t *outp)
3212 {
3213 rc_node_t *np, *cp;
3214 rc_node_t *child = NULL;
3215 int ret, idx;
3216
3217 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3218 if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3219 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3220 ret = rc_node_find_named_child(np, name, type, &child);
3221 } else {
3222 (void) pthread_mutex_unlock(&np->rn_lock);
3223 ret = REP_PROTOCOL_SUCCESS;
3224 for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3225 cp = np->rn_cchain[idx];
3226 if (cp == NULL)
3227 break;
3228 RC_NODE_CHECK_AND_LOCK(cp);
3229 ret = rc_node_find_named_child(cp, name, type,
3230 &child);
3231 (void) pthread_mutex_unlock(&cp->rn_lock);
3232 /*
3233 * loop only if we succeeded, but no child of
3234 * the correct name was found.
3235 */
3236 if (ret != REP_PROTOCOL_SUCCESS ||
3237 child != NULL)
3238 break;
3239 }
3240 (void) pthread_mutex_lock(&np->rn_lock);
3241 }
3242 }
3243 (void) pthread_mutex_unlock(&np->rn_lock);
3244
3245 if (ret == REP_PROTOCOL_SUCCESS) {
3246 rc_node_assign(outp, child);
3247 if (child != NULL)
3248 rc_node_rele(child);
3249 else
3250 ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3251 } else {
3252 rc_node_assign(outp, NULL);
3253 }
3254 return (ret);
3255 }
3256
3257 int
rc_node_update(rc_node_ptr_t * npp)3258 rc_node_update(rc_node_ptr_t *npp)
3259 {
3260 cache_bucket_t *bp;
3261 rc_node_t *np = npp->rnp_node;
3262 rc_node_t *nnp;
3263 rc_node_t *cpg = NULL;
3264
3265 if (np != NULL &&
3266 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3267 /*
3268 * If we're updating a composed property group, actually
3269 * update the top-level property group & return the
3270 * appropriate value. But leave *nnp pointing at us.
3271 */
3272 cpg = np;
3273 np = np->rn_cchain[0];
3274 }
3275
3276 RC_NODE_CHECK(np);
3277
3278 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3279 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3280 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3281
3282 for (;;) {
3283 bp = cache_hold(np->rn_hash);
3284 nnp = cache_lookup_unlocked(bp, &np->rn_id);
3285 if (nnp == NULL) {
3286 cache_release(bp);
3287 rc_node_clear(npp, 1);
3288 return (REP_PROTOCOL_FAIL_DELETED);
3289 }
3290 /*
3291 * grab the lock before dropping the cache bucket, so
3292 * that no one else can sneak in
3293 */
3294 (void) pthread_mutex_lock(&nnp->rn_lock);
3295 cache_release(bp);
3296
3297 if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3298 !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3299 break;
3300
3301 rc_node_rele_locked(nnp);
3302 }
3303
3304 /*
3305 * If it is dead, we want to update it so that it will continue to
3306 * report being dead.
3307 */
3308 if (nnp->rn_flags & RC_NODE_DEAD) {
3309 (void) pthread_mutex_unlock(&nnp->rn_lock);
3310 if (nnp != np && cpg == NULL)
3311 rc_node_assign(npp, nnp); /* updated */
3312 rc_node_rele(nnp);
3313 return (REP_PROTOCOL_FAIL_DELETED);
3314 }
3315
3316 assert(!(nnp->rn_flags & RC_NODE_OLD));
3317 (void) pthread_mutex_unlock(&nnp->rn_lock);
3318
3319 if (nnp != np && cpg == NULL)
3320 rc_node_assign(npp, nnp); /* updated */
3321
3322 rc_node_rele(nnp);
3323
3324 return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3325 }
3326
3327 /*
3328 * does a generic modification check, for creation, deletion, and snapshot
3329 * management only. Property group transactions have different checks.
3330 *
3331 * The string returned to *match_auth must be freed.
3332 */
3333 static perm_status_t
rc_node_modify_permission_check(char ** match_auth)3334 rc_node_modify_permission_check(char **match_auth)
3335 {
3336 permcheck_t *pcp;
3337 perm_status_t granted = PERM_GRANTED;
3338 int rc;
3339
3340 *match_auth = NULL;
3341 #ifdef NATIVE_BUILD
3342 if (!client_is_privileged()) {
3343 granted = PERM_DENIED;
3344 }
3345 return (granted);
3346 #else
3347 if (is_main_repository == 0)
3348 return (PERM_GRANTED);
3349 pcp = pc_create();
3350 if (pcp != NULL) {
3351 rc = perm_add_enabling(pcp, AUTH_MODIFY);
3352
3353 if (rc == REP_PROTOCOL_SUCCESS) {
3354 granted = perm_granted(pcp);
3355
3356 if ((granted == PERM_GRANTED) ||
3357 (granted == PERM_DENIED)) {
3358 /*
3359 * Copy off the authorization
3360 * string before freeing pcp.
3361 */
3362 *match_auth =
3363 strdup(pcp->pc_auth_string);
3364 if (*match_auth == NULL)
3365 granted = PERM_FAIL;
3366 }
3367 } else {
3368 granted = PERM_FAIL;
3369 }
3370
3371 pc_free(pcp);
3372 } else {
3373 granted = PERM_FAIL;
3374 }
3375
3376 return (granted);
3377 #endif /* NATIVE_BUILD */
3378 }
3379
3380 /*
3381 * Native builds are done to create svc.configd-native. This program runs
3382 * only on the Solaris build machines to create the seed repository, and it
3383 * is compiled against the build machine's header files. The ADT_smf_*
3384 * symbols may not be defined in these header files. For this reason
3385 * smf_annotation_event(), smf_audit_event() and special_property_event()
3386 * are not compiled for native builds.
3387 */
3388 #ifndef NATIVE_BUILD
3389
3390 /*
3391 * This function generates an annotation audit event if one has been setup.
3392 * Annotation events should only be generated immediately before the audit
3393 * record from the first attempt to modify the repository from a client
3394 * which has requested an annotation.
3395 */
3396 static void
smf_annotation_event(int status,int return_val)3397 smf_annotation_event(int status, int return_val)
3398 {
3399 adt_session_data_t *session;
3400 adt_event_data_t *event = NULL;
3401 char file[MAXPATHLEN];
3402 char operation[REP_PROTOCOL_NAME_LEN];
3403
3404 /* Don't audit if we're using an alternate repository. */
3405 if (is_main_repository == 0)
3406 return;
3407
3408 if (client_annotation_needed(operation, sizeof (operation), file,
3409 sizeof (file)) == 0) {
3410 return;
3411 }
3412 if (file[0] == 0) {
3413 (void) strlcpy(file, "NO FILE", sizeof (file));
3414 }
3415 if (operation[0] == 0) {
3416 (void) strlcpy(operation, "NO OPERATION",
3417 sizeof (operation));
3418 }
3419 if ((session = get_audit_session()) == NULL)
3420 return;
3421 if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3422 uu_warn("smf_annotation_event cannot allocate event "
3423 "data. %s\n", strerror(errno));
3424 return;
3425 }
3426 event->adt_smf_annotation.operation = operation;
3427 event->adt_smf_annotation.file = file;
3428 if (adt_put_event(event, status, return_val) == 0) {
3429 client_annotation_finished();
3430 } else {
3431 uu_warn("smf_annotation_event failed to put event. "
3432 "%s\n", strerror(errno));
3433 }
3434 adt_free_event(event);
3435 }
3436 #endif
3437
3438 /*
3439 * smf_audit_event interacts with the security auditing system to generate
3440 * an audit event structure. It establishes an audit session and allocates
3441 * an audit event. The event is filled in from the audit data, and
3442 * adt_put_event is called to generate the event.
3443 */
3444 static void
smf_audit_event(au_event_t event_id,int status,int return_val,audit_event_data_t * data)3445 smf_audit_event(au_event_t event_id, int status, int return_val,
3446 audit_event_data_t *data)
3447 {
3448 #ifndef NATIVE_BUILD
3449 char *auth_used;
3450 char *fmri;
3451 char *prop_value;
3452 adt_session_data_t *session;
3453 adt_event_data_t *event = NULL;
3454
3455 /* Don't audit if we're using an alternate repository */
3456 if (is_main_repository == 0)
3457 return;
3458
3459 smf_annotation_event(status, return_val);
3460 if ((session = get_audit_session()) == NULL)
3461 return;
3462 if ((event = adt_alloc_event(session, event_id)) == NULL) {
3463 uu_warn("smf_audit_event cannot allocate event "
3464 "data. %s\n", strerror(errno));
3465 return;
3466 }
3467
3468 /*
3469 * Handle possibility of NULL authorization strings, FMRIs and
3470 * property values.
3471 */
3472 if (data->ed_auth == NULL) {
3473 auth_used = "PRIVILEGED";
3474 } else {
3475 auth_used = data->ed_auth;
3476 }
3477 if (data->ed_fmri == NULL) {
3478 syslog(LOG_WARNING, "smf_audit_event called with "
3479 "empty FMRI string");
3480 fmri = "UNKNOWN FMRI";
3481 } else {
3482 fmri = data->ed_fmri;
3483 }
3484 if (data->ed_prop_value == NULL) {
3485 prop_value = "";
3486 } else {
3487 prop_value = data->ed_prop_value;
3488 }
3489
3490 /* Fill in the event data. */
3491 switch (event_id) {
3492 case ADT_smf_attach_snap:
3493 event->adt_smf_attach_snap.auth_used = auth_used;
3494 event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3495 event->adt_smf_attach_snap.old_name = data->ed_old_name;
3496 event->adt_smf_attach_snap.new_fmri = fmri;
3497 event->adt_smf_attach_snap.new_name = data->ed_snapname;
3498 break;
3499 case ADT_smf_change_prop:
3500 event->adt_smf_change_prop.auth_used = auth_used;
3501 event->adt_smf_change_prop.fmri = fmri;
3502 event->adt_smf_change_prop.type = data->ed_type;
3503 event->adt_smf_change_prop.value = prop_value;
3504 break;
3505 case ADT_smf_clear:
3506 event->adt_smf_clear.auth_used = auth_used;
3507 event->adt_smf_clear.fmri = fmri;
3508 break;
3509 case ADT_smf_create:
3510 event->adt_smf_create.fmri = fmri;
3511 event->adt_smf_create.auth_used = auth_used;
3512 break;
3513 case ADT_smf_create_npg:
3514 event->adt_smf_create_npg.auth_used = auth_used;
3515 event->adt_smf_create_npg.fmri = fmri;
3516 event->adt_smf_create_npg.type = data->ed_type;
3517 break;
3518 case ADT_smf_create_pg:
3519 event->adt_smf_create_pg.auth_used = auth_used;
3520 event->adt_smf_create_pg.fmri = fmri;
3521 event->adt_smf_create_pg.type = data->ed_type;
3522 break;
3523 case ADT_smf_create_prop:
3524 event->adt_smf_create_prop.auth_used = auth_used;
3525 event->adt_smf_create_prop.fmri = fmri;
3526 event->adt_smf_create_prop.type = data->ed_type;
3527 event->adt_smf_create_prop.value = prop_value;
3528 break;
3529 case ADT_smf_create_snap:
3530 event->adt_smf_create_snap.auth_used = auth_used;
3531 event->adt_smf_create_snap.fmri = fmri;
3532 event->adt_smf_create_snap.name = data->ed_snapname;
3533 break;
3534 case ADT_smf_degrade:
3535 event->adt_smf_degrade.auth_used = auth_used;
3536 event->adt_smf_degrade.fmri = fmri;
3537 break;
3538 case ADT_smf_delete:
3539 event->adt_smf_delete.fmri = fmri;
3540 event->adt_smf_delete.auth_used = auth_used;
3541 break;
3542 case ADT_smf_delete_npg:
3543 event->adt_smf_delete_npg.auth_used = auth_used;
3544 event->adt_smf_delete_npg.fmri = fmri;
3545 event->adt_smf_delete_npg.type = data->ed_type;
3546 break;
3547 case ADT_smf_delete_pg:
3548 event->adt_smf_delete_pg.auth_used = auth_used;
3549 event->adt_smf_delete_pg.fmri = fmri;
3550 event->adt_smf_delete_pg.type = data->ed_type;
3551 break;
3552 case ADT_smf_delete_prop:
3553 event->adt_smf_delete_prop.auth_used = auth_used;
3554 event->adt_smf_delete_prop.fmri = fmri;
3555 break;
3556 case ADT_smf_delete_snap:
3557 event->adt_smf_delete_snap.auth_used = auth_used;
3558 event->adt_smf_delete_snap.fmri = fmri;
3559 event->adt_smf_delete_snap.name = data->ed_snapname;
3560 break;
3561 case ADT_smf_disable:
3562 event->adt_smf_disable.auth_used = auth_used;
3563 event->adt_smf_disable.fmri = fmri;
3564 break;
3565 case ADT_smf_enable:
3566 event->adt_smf_enable.auth_used = auth_used;
3567 event->adt_smf_enable.fmri = fmri;
3568 break;
3569 case ADT_smf_immediate_degrade:
3570 event->adt_smf_immediate_degrade.auth_used = auth_used;
3571 event->adt_smf_immediate_degrade.fmri = fmri;
3572 break;
3573 case ADT_smf_immediate_maintenance:
3574 event->adt_smf_immediate_maintenance.auth_used = auth_used;
3575 event->adt_smf_immediate_maintenance.fmri = fmri;
3576 break;
3577 case ADT_smf_immtmp_maintenance:
3578 event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3579 event->adt_smf_immtmp_maintenance.fmri = fmri;
3580 break;
3581 case ADT_smf_maintenance:
3582 event->adt_smf_maintenance.auth_used = auth_used;
3583 event->adt_smf_maintenance.fmri = fmri;
3584 break;
3585 case ADT_smf_milestone:
3586 event->adt_smf_milestone.auth_used = auth_used;
3587 event->adt_smf_milestone.fmri = fmri;
3588 break;
3589 case ADT_smf_read_prop:
3590 event->adt_smf_read_prop.auth_used = auth_used;
3591 event->adt_smf_read_prop.fmri = fmri;
3592 break;
3593 case ADT_smf_refresh:
3594 event->adt_smf_refresh.auth_used = auth_used;
3595 event->adt_smf_refresh.fmri = fmri;
3596 break;
3597 case ADT_smf_restart:
3598 event->adt_smf_restart.auth_used = auth_used;
3599 event->adt_smf_restart.fmri = fmri;
3600 break;
3601 case ADT_smf_tmp_disable:
3602 event->adt_smf_tmp_disable.auth_used = auth_used;
3603 event->adt_smf_tmp_disable.fmri = fmri;
3604 break;
3605 case ADT_smf_tmp_enable:
3606 event->adt_smf_tmp_enable.auth_used = auth_used;
3607 event->adt_smf_tmp_enable.fmri = fmri;
3608 break;
3609 case ADT_smf_tmp_maintenance:
3610 event->adt_smf_tmp_maintenance.auth_used = auth_used;
3611 event->adt_smf_tmp_maintenance.fmri = fmri;
3612 break;
3613 default:
3614 abort(); /* Need to cover all SMF event IDs */
3615 }
3616
3617 if (adt_put_event(event, status, return_val) != 0) {
3618 uu_warn("smf_audit_event failed to put event. %s\n",
3619 strerror(errno));
3620 }
3621 adt_free_event(event);
3622 #endif
3623 }
3624
3625 #ifndef NATIVE_BUILD
3626 /*
3627 * Determine if the combination of the property group at pg_name and the
3628 * property at prop_name are in the set of special startd properties. If
3629 * they are, a special audit event will be generated.
3630 */
3631 static void
special_property_event(audit_event_data_t * evdp,const char * prop_name,char * pg_name,int status,int return_val,tx_commit_data_t * tx_data,size_t cmd_no)3632 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3633 char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3634 size_t cmd_no)
3635 {
3636 au_event_t event_id;
3637 audit_special_prop_item_t search_key;
3638 audit_special_prop_item_t *found;
3639
3640 /* Use bsearch to find the special property information. */
3641 search_key.api_prop_name = prop_name;
3642 search_key.api_pg_name = pg_name;
3643 found = (audit_special_prop_item_t *)bsearch(&search_key,
3644 special_props_list, SPECIAL_PROP_COUNT,
3645 sizeof (special_props_list[0]), special_prop_compare);
3646 if (found == NULL) {
3647 /* Not a special property. */
3648 return;
3649 }
3650
3651 /* Get the event id */
3652 if (found->api_event_func == NULL) {
3653 event_id = found->api_event_id;
3654 } else {
3655 if ((*found->api_event_func)(tx_data, cmd_no,
3656 found->api_pg_name, &event_id) < 0)
3657 return;
3658 }
3659
3660 /* Generate the event. */
3661 smf_audit_event(event_id, status, return_val, evdp);
3662 }
3663 #endif /* NATIVE_BUILD */
3664
3665 /*
3666 * Return a pointer to a string containing all the values of the command
3667 * specified by cmd_no with each value enclosed in quotes. It is up to the
3668 * caller to free the memory at the returned pointer.
3669 */
3670 static char *
generate_value_list(tx_commit_data_t * tx_data,size_t cmd_no)3671 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3672 {
3673 const char *cp;
3674 const char *cur_value;
3675 size_t byte_count = 0;
3676 uint32_t i;
3677 uint32_t nvalues;
3678 size_t str_size = 0;
3679 char *values = NULL;
3680 char *vp;
3681
3682 if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3683 return (NULL);
3684 /*
3685 * First determine the size of the buffer that we will need. We
3686 * will represent each property value surrounded by quotes with a
3687 * space separating the values. Thus, we need to find the total
3688 * size of all the value strings and add 3 for each value.
3689 *
3690 * There is one catch, though. We need to escape any internal
3691 * quote marks in the values. So for each quote in the value we
3692 * need to add another byte to the buffer size.
3693 */
3694 for (i = 0; i < nvalues; i++) {
3695 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3696 REP_PROTOCOL_SUCCESS)
3697 return (NULL);
3698 for (cp = cur_value; *cp != 0; cp++) {
3699 byte_count += (*cp == '"') ? 2 : 1;
3700 }
3701 byte_count += 3; /* surrounding quotes & space */
3702 }
3703 byte_count++; /* nul terminator */
3704 values = malloc(byte_count);
3705 if (values == NULL)
3706 return (NULL);
3707 *values = 0;
3708
3709 /* Now build up the string of values. */
3710 for (i = 0; i < nvalues; i++) {
3711 if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3712 REP_PROTOCOL_SUCCESS) {
3713 free(values);
3714 return (NULL);
3715 }
3716 (void) strlcat(values, "\"", byte_count);
3717 for (cp = cur_value, vp = values + strlen(values);
3718 *cp != 0; cp++) {
3719 if (*cp == '"') {
3720 *vp++ = '\\';
3721 *vp++ = '"';
3722 } else {
3723 *vp++ = *cp;
3724 }
3725 }
3726 *vp = 0;
3727 str_size = strlcat(values, "\" ", byte_count);
3728 assert(str_size < byte_count);
3729 }
3730 if (str_size > 0)
3731 values[str_size - 1] = 0; /* get rid of trailing space */
3732 return (values);
3733 }
3734
3735 /*
3736 * generate_property_events takes the transaction commit data at tx_data
3737 * and generates an audit event for each command.
3738 *
3739 * Native builds are done to create svc.configd-native. This program runs
3740 * only on the Solaris build machines to create the seed repository. Thus,
3741 * no audit events should be generated when running svc.configd-native.
3742 */
3743 static void
generate_property_events(tx_commit_data_t * tx_data,char * pg_fmri,char * auth_string,int auth_status,int auth_ret_value)3744 generate_property_events(
3745 tx_commit_data_t *tx_data,
3746 char *pg_fmri, /* FMRI of property group */
3747 char *auth_string,
3748 int auth_status,
3749 int auth_ret_value)
3750 {
3751 #ifndef NATIVE_BUILD
3752 enum rep_protocol_transaction_action action;
3753 audit_event_data_t audit_data;
3754 size_t count;
3755 size_t cmd_no;
3756 char *cp;
3757 au_event_t event_id;
3758 char fmri[REP_PROTOCOL_FMRI_LEN];
3759 char pg_name[REP_PROTOCOL_NAME_LEN];
3760 char *pg_end; /* End of prop. group fmri */
3761 const char *prop_name;
3762 uint32_t ptype;
3763 char prop_type[3];
3764 enum rep_protocol_responseid rc;
3765 size_t sz_out;
3766
3767 /* Make sure we have something to do. */
3768 if (tx_data == NULL)
3769 return;
3770 if ((count = tx_cmd_count(tx_data)) == 0)
3771 return;
3772
3773 /* Copy the property group fmri */
3774 pg_end = fmri;
3775 pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3776
3777 /*
3778 * Get the property group name. It is the first component after
3779 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3780 */
3781 cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3782 if (cp == NULL) {
3783 pg_name[0] = 0;
3784 } else {
3785 cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3786 (void) strlcpy(pg_name, cp, sizeof (pg_name));
3787 }
3788
3789 audit_data.ed_auth = auth_string;
3790 audit_data.ed_fmri = fmri;
3791 audit_data.ed_type = prop_type;
3792
3793 /*
3794 * Property type is two characters (see
3795 * rep_protocol_value_type_t), so terminate the string.
3796 */
3797 prop_type[2] = 0;
3798
3799 for (cmd_no = 0; cmd_no < count; cmd_no++) {
3800 /* Construct FMRI of the property */
3801 *pg_end = 0;
3802 if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3803 REP_PROTOCOL_SUCCESS) {
3804 continue;
3805 }
3806 rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3807 prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3808 if (rc != REP_PROTOCOL_SUCCESS) {
3809 /*
3810 * If we can't get the FMRI, we'll abandon this
3811 * command
3812 */
3813 continue;
3814 }
3815
3816 /* Generate special property event if necessary. */
3817 special_property_event(&audit_data, prop_name, pg_name,
3818 auth_status, auth_ret_value, tx_data, cmd_no);
3819
3820 /* Capture rest of audit data. */
3821 if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3822 REP_PROTOCOL_SUCCESS) {
3823 continue;
3824 }
3825 prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3826 prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3827 audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3828
3829 /* Determine the event type. */
3830 if (tx_cmd_action(tx_data, cmd_no, &action) !=
3831 REP_PROTOCOL_SUCCESS) {
3832 free(audit_data.ed_prop_value);
3833 continue;
3834 }
3835 switch (action) {
3836 case REP_PROTOCOL_TX_ENTRY_NEW:
3837 event_id = ADT_smf_create_prop;
3838 break;
3839 case REP_PROTOCOL_TX_ENTRY_CLEAR:
3840 event_id = ADT_smf_change_prop;
3841 break;
3842 case REP_PROTOCOL_TX_ENTRY_REPLACE:
3843 event_id = ADT_smf_change_prop;
3844 break;
3845 case REP_PROTOCOL_TX_ENTRY_DELETE:
3846 event_id = ADT_smf_delete_prop;
3847 break;
3848 default:
3849 assert(0); /* Missing a case */
3850 free(audit_data.ed_prop_value);
3851 continue;
3852 }
3853
3854 /* Generate the event. */
3855 smf_audit_event(event_id, auth_status, auth_ret_value,
3856 &audit_data);
3857 free(audit_data.ed_prop_value);
3858 }
3859 #endif /* NATIVE_BUILD */
3860 }
3861
3862 /*
3863 * Fails with
3864 * _DELETED - node has been deleted
3865 * _NOT_SET - npp is reset
3866 * _NOT_APPLICABLE - type is _PROPERTYGRP
3867 * _INVALID_TYPE - node is corrupt or type is invalid
3868 * _TYPE_MISMATCH - node cannot have children of type type
3869 * _BAD_REQUEST - name is invalid
3870 * cannot create children for this type of node
3871 * _NO_RESOURCES - out of memory, or could not allocate new id
3872 * _PERMISSION_DENIED
3873 * _BACKEND_ACCESS
3874 * _BACKEND_READONLY
3875 * _EXISTS - child already exists
3876 * _TRUNCATED - truncated FMRI for the audit record
3877 */
3878 int
rc_node_create_child(rc_node_ptr_t * npp,uint32_t type,const char * name,rc_node_ptr_t * cpp)3879 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3880 rc_node_ptr_t *cpp)
3881 {
3882 rc_node_t *np;
3883 rc_node_t *cp = NULL;
3884 int rc;
3885 perm_status_t perm_rc;
3886 size_t sz_out;
3887 char fmri[REP_PROTOCOL_FMRI_LEN];
3888 audit_event_data_t audit_data;
3889
3890 rc_node_clear(cpp, 0);
3891
3892 /*
3893 * rc_node_modify_permission_check() must be called before the node
3894 * is locked. This is because the library functions that check
3895 * authorizations can trigger calls back into configd.
3896 */
3897 perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3898 switch (perm_rc) {
3899 case PERM_DENIED:
3900 /*
3901 * We continue in this case, so that an audit event can be
3902 * generated later in the function.
3903 */
3904 break;
3905 case PERM_GRANTED:
3906 break;
3907 case PERM_GONE:
3908 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3909 case PERM_FAIL:
3910 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3911 default:
3912 bad_error(rc_node_modify_permission_check, perm_rc);
3913 }
3914
3915 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
3916
3917 audit_data.ed_fmri = fmri;
3918
3919 /*
3920 * there is a separate interface for creating property groups
3921 */
3922 if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3923 (void) pthread_mutex_unlock(&np->rn_lock);
3924 free(audit_data.ed_auth);
3925 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3926 }
3927
3928 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3929 (void) pthread_mutex_unlock(&np->rn_lock);
3930 np = np->rn_cchain[0];
3931 if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3932 free(audit_data.ed_auth);
3933 return (rc);
3934 }
3935 }
3936
3937 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3938 REP_PROTOCOL_SUCCESS) {
3939 (void) pthread_mutex_unlock(&np->rn_lock);
3940 free(audit_data.ed_auth);
3941 return (rc);
3942 }
3943 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3944 (void) pthread_mutex_unlock(&np->rn_lock);
3945 free(audit_data.ed_auth);
3946 return (rc);
3947 }
3948
3949 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3950 name, type)) != REP_PROTOCOL_SUCCESS) {
3951 (void) pthread_mutex_unlock(&np->rn_lock);
3952 free(audit_data.ed_auth);
3953 return (rc);
3954 }
3955 if (perm_rc == PERM_DENIED) {
3956 (void) pthread_mutex_unlock(&np->rn_lock);
3957 smf_audit_event(ADT_smf_create, ADT_FAILURE,
3958 ADT_FAIL_VALUE_AUTH, &audit_data);
3959 free(audit_data.ed_auth);
3960 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3961 }
3962
3963 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3964 audit_data.ed_auth);
3965 (void) pthread_mutex_unlock(&np->rn_lock);
3966
3967 rc = object_create(np, type, name, &cp);
3968 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3969
3970 if (rc == REP_PROTOCOL_SUCCESS) {
3971 rc_node_assign(cpp, cp);
3972 rc_node_rele(cp);
3973 }
3974
3975 (void) pthread_mutex_lock(&np->rn_lock);
3976 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3977 (void) pthread_mutex_unlock(&np->rn_lock);
3978
3979 if (rc == REP_PROTOCOL_SUCCESS) {
3980 smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
3981 &audit_data);
3982 }
3983
3984 free(audit_data.ed_auth);
3985
3986 return (rc);
3987 }
3988
3989 int
rc_node_create_child_pg(rc_node_ptr_t * npp,uint32_t type,const char * name,const char * pgtype,uint32_t flags,rc_node_ptr_t * cpp)3990 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3991 const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
3992 {
3993 rc_node_t *np;
3994 rc_node_t *cp;
3995 int rc;
3996 permcheck_t *pcp;
3997 perm_status_t granted;
3998 char fmri[REP_PROTOCOL_FMRI_LEN];
3999 audit_event_data_t audit_data;
4000 au_event_t event_id;
4001 size_t sz_out;
4002
4003 audit_data.ed_auth = NULL;
4004 audit_data.ed_fmri = fmri;
4005 audit_data.ed_type = (char *)pgtype;
4006
4007 rc_node_clear(cpp, 0);
4008
4009 /* verify flags is valid */
4010 if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4011 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4012
4013 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4014
4015 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4016 rc_node_rele(np);
4017 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4018 }
4019
4020 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4021 REP_PROTOCOL_SUCCESS) {
4022 rc_node_rele(np);
4023 return (rc);
4024 }
4025 if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4026 (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4027 rc_node_rele(np);
4028 return (rc);
4029 }
4030
4031 #ifdef NATIVE_BUILD
4032 if (!client_is_privileged()) {
4033 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4034 }
4035 #else
4036 if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4037 event_id = ADT_smf_create_npg;
4038 } else {
4039 event_id = ADT_smf_create_pg;
4040 }
4041 if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4042 name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4043 rc_node_rele(np);
4044 return (rc);
4045 }
4046
4047 if (is_main_repository) {
4048 /* Must have .smf.modify or smf.modify.<type> authorization */
4049 pcp = pc_create();
4050 if (pcp != NULL) {
4051 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4052
4053 if (rc == REP_PROTOCOL_SUCCESS) {
4054 const char * const auth =
4055 perm_auth_for_pgtype(pgtype);
4056
4057 if (auth != NULL)
4058 rc = perm_add_enabling(pcp, auth);
4059 }
4060
4061 /*
4062 * .manage or $action_authorization can be used to
4063 * create the actions pg and the general_ovr pg.
4064 */
4065 if (rc == REP_PROTOCOL_SUCCESS &&
4066 (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4067 np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4068 ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4069 strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4070 (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4071 strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4072 rc = perm_add_enabling(pcp, AUTH_MANAGE);
4073
4074 if (rc == REP_PROTOCOL_SUCCESS)
4075 rc = perm_add_inst_action_auth(pcp, np);
4076 }
4077
4078 if (rc == REP_PROTOCOL_SUCCESS) {
4079 granted = perm_granted(pcp);
4080
4081 rc = map_granted_status(granted, pcp,
4082 &audit_data.ed_auth);
4083 if (granted == PERM_GONE) {
4084 /* No auditing if client gone. */
4085 pc_free(pcp);
4086 rc_node_rele(np);
4087 return (rc);
4088 }
4089 }
4090
4091 pc_free(pcp);
4092 } else {
4093 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4094 }
4095
4096 } else {
4097 rc = REP_PROTOCOL_SUCCESS;
4098 }
4099 #endif /* NATIVE_BUILD */
4100
4101
4102 if (rc != REP_PROTOCOL_SUCCESS) {
4103 rc_node_rele(np);
4104 if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4105 smf_audit_event(event_id, ADT_FAILURE,
4106 ADT_FAIL_VALUE_AUTH, &audit_data);
4107 }
4108 if (audit_data.ed_auth != NULL)
4109 free(audit_data.ed_auth);
4110 return (rc);
4111 }
4112
4113 (void) pthread_mutex_lock(&np->rn_lock);
4114 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4115 audit_data.ed_auth);
4116 (void) pthread_mutex_unlock(&np->rn_lock);
4117
4118 rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4119
4120 if (rc == REP_PROTOCOL_SUCCESS) {
4121 rc_node_assign(cpp, cp);
4122 rc_node_rele(cp);
4123 }
4124
4125 (void) pthread_mutex_lock(&np->rn_lock);
4126 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4127 (void) pthread_mutex_unlock(&np->rn_lock);
4128
4129 if (rc == REP_PROTOCOL_SUCCESS) {
4130 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4131 &audit_data);
4132 }
4133 if (audit_data.ed_auth != NULL)
4134 free(audit_data.ed_auth);
4135
4136 return (rc);
4137 }
4138
4139 static void
rc_pg_notify_fire(rc_node_pg_notify_t * pnp)4140 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4141 {
4142 assert(MUTEX_HELD(&rc_pg_notify_lock));
4143
4144 if (pnp->rnpn_pg != NULL) {
4145 uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4146 (void) close(pnp->rnpn_fd);
4147
4148 pnp->rnpn_pg = NULL;
4149 pnp->rnpn_fd = -1;
4150 } else {
4151 assert(pnp->rnpn_fd == -1);
4152 }
4153 }
4154
4155 static void
rc_notify_node_delete(rc_notify_delete_t * ndp,rc_node_t * np_arg)4156 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4157 {
4158 rc_node_t *svc = NULL;
4159 rc_node_t *inst = NULL;
4160 rc_node_t *pg = NULL;
4161 rc_node_t *np = np_arg;
4162 rc_node_t *nnp;
4163
4164 while (svc == NULL) {
4165 (void) pthread_mutex_lock(&np->rn_lock);
4166 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4167 (void) pthread_mutex_unlock(&np->rn_lock);
4168 goto cleanup;
4169 }
4170 nnp = np->rn_parent;
4171 rc_node_hold_locked(np); /* hold it in place */
4172
4173 switch (np->rn_id.rl_type) {
4174 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4175 assert(pg == NULL);
4176 pg = np;
4177 break;
4178 case REP_PROTOCOL_ENTITY_INSTANCE:
4179 assert(inst == NULL);
4180 inst = np;
4181 break;
4182 case REP_PROTOCOL_ENTITY_SERVICE:
4183 assert(svc == NULL);
4184 svc = np;
4185 break;
4186 default:
4187 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4188 rc_node_rele_locked(np);
4189 goto cleanup;
4190 }
4191
4192 (void) pthread_mutex_unlock(&np->rn_lock);
4193
4194 np = nnp;
4195 if (np == NULL)
4196 goto cleanup;
4197 }
4198
4199 rc_notify_deletion(ndp,
4200 svc->rn_name,
4201 inst != NULL ? inst->rn_name : NULL,
4202 pg != NULL ? pg->rn_name : NULL);
4203
4204 ndp = NULL;
4205
4206 cleanup:
4207 if (ndp != NULL)
4208 uu_free(ndp);
4209
4210 for (;;) {
4211 if (svc != NULL) {
4212 np = svc;
4213 svc = NULL;
4214 } else if (inst != NULL) {
4215 np = inst;
4216 inst = NULL;
4217 } else if (pg != NULL) {
4218 np = pg;
4219 pg = NULL;
4220 } else
4221 break;
4222
4223 (void) pthread_mutex_lock(&np->rn_lock);
4224 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4225 rc_node_rele_locked(np);
4226 }
4227 }
4228
4229 /*
4230 * Hold RC_NODE_DYING_FLAGS on np's descendents. If andformer is true, do
4231 * the same down the rn_former chain.
4232 */
4233 static void
rc_node_delete_hold(rc_node_t * np,int andformer)4234 rc_node_delete_hold(rc_node_t *np, int andformer)
4235 {
4236 rc_node_t *cp;
4237
4238 again:
4239 assert(MUTEX_HELD(&np->rn_lock));
4240 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4241
4242 for (cp = uu_list_first(np->rn_children); cp != NULL;
4243 cp = uu_list_next(np->rn_children, cp)) {
4244 (void) pthread_mutex_lock(&cp->rn_lock);
4245 (void) pthread_mutex_unlock(&np->rn_lock);
4246 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4247 /*
4248 * already marked as dead -- can't happen, since that
4249 * would require setting RC_NODE_CHILDREN_CHANGING
4250 * in np, and we're holding that...
4251 */
4252 abort();
4253 }
4254 rc_node_delete_hold(cp, andformer); /* recurse, drop lock */
4255
4256 (void) pthread_mutex_lock(&np->rn_lock);
4257 }
4258 if (andformer && (cp = np->rn_former) != NULL) {
4259 (void) pthread_mutex_lock(&cp->rn_lock);
4260 (void) pthread_mutex_unlock(&np->rn_lock);
4261 if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4262 abort(); /* can't happen, see above */
4263 np = cp;
4264 goto again; /* tail-recurse down rn_former */
4265 }
4266 (void) pthread_mutex_unlock(&np->rn_lock);
4267 }
4268
4269 /*
4270 * N.B.: this function drops np->rn_lock on the way out.
4271 */
4272 static void
rc_node_delete_rele(rc_node_t * np,int andformer)4273 rc_node_delete_rele(rc_node_t *np, int andformer)
4274 {
4275 rc_node_t *cp;
4276
4277 again:
4278 assert(MUTEX_HELD(&np->rn_lock));
4279 assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4280
4281 for (cp = uu_list_first(np->rn_children); cp != NULL;
4282 cp = uu_list_next(np->rn_children, cp)) {
4283 (void) pthread_mutex_lock(&cp->rn_lock);
4284 (void) pthread_mutex_unlock(&np->rn_lock);
4285 rc_node_delete_rele(cp, andformer); /* recurse, drop lock */
4286 (void) pthread_mutex_lock(&np->rn_lock);
4287 }
4288 if (andformer && (cp = np->rn_former) != NULL) {
4289 (void) pthread_mutex_lock(&cp->rn_lock);
4290 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4291 (void) pthread_mutex_unlock(&np->rn_lock);
4292
4293 np = cp;
4294 goto again; /* tail-recurse down rn_former */
4295 }
4296 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4297 (void) pthread_mutex_unlock(&np->rn_lock);
4298 }
4299
4300 static void
rc_node_finish_delete(rc_node_t * cp)4301 rc_node_finish_delete(rc_node_t *cp)
4302 {
4303 cache_bucket_t *bp;
4304 rc_node_pg_notify_t *pnp;
4305
4306 assert(MUTEX_HELD(&cp->rn_lock));
4307
4308 if (!(cp->rn_flags & RC_NODE_OLD)) {
4309 assert(cp->rn_flags & RC_NODE_IN_PARENT);
4310 if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4311 abort(); /* can't happen, see above */
4312 }
4313 cp->rn_flags &= ~RC_NODE_IN_PARENT;
4314 cp->rn_parent = NULL;
4315 rc_node_free_fmri(cp);
4316 }
4317
4318 cp->rn_flags |= RC_NODE_DEAD;
4319
4320 /*
4321 * If this node is not out-dated, we need to remove it from
4322 * the notify list and cache hash table.
4323 */
4324 if (!(cp->rn_flags & RC_NODE_OLD)) {
4325 assert(cp->rn_refs > 0); /* can't go away yet */
4326 (void) pthread_mutex_unlock(&cp->rn_lock);
4327
4328 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4329 while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4330 rc_pg_notify_fire(pnp);
4331 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4332 rc_notify_remove_node(cp);
4333
4334 bp = cache_hold(cp->rn_hash);
4335 (void) pthread_mutex_lock(&cp->rn_lock);
4336 cache_remove_unlocked(bp, cp);
4337 cache_release(bp);
4338 }
4339 }
4340
4341 /*
4342 * For each child, call rc_node_finish_delete() and recurse. If andformer
4343 * is set, also recurse down rn_former. Finally release np, which might
4344 * free it.
4345 */
4346 static void
rc_node_delete_children(rc_node_t * np,int andformer)4347 rc_node_delete_children(rc_node_t *np, int andformer)
4348 {
4349 rc_node_t *cp;
4350
4351 again:
4352 assert(np->rn_refs > 0);
4353 assert(MUTEX_HELD(&np->rn_lock));
4354 assert(np->rn_flags & RC_NODE_DEAD);
4355
4356 while ((cp = uu_list_first(np->rn_children)) != NULL) {
4357 uu_list_remove(np->rn_children, cp);
4358 (void) pthread_mutex_lock(&cp->rn_lock);
4359 (void) pthread_mutex_unlock(&np->rn_lock);
4360 rc_node_hold_locked(cp); /* hold while we recurse */
4361 rc_node_finish_delete(cp);
4362 rc_node_delete_children(cp, andformer); /* drops lock + ref */
4363 (void) pthread_mutex_lock(&np->rn_lock);
4364 }
4365
4366 /*
4367 * When we drop cp's lock, all the children will be gone, so we
4368 * can release DYING_FLAGS.
4369 */
4370 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4371 if (andformer && (cp = np->rn_former) != NULL) {
4372 np->rn_former = NULL; /* unlink */
4373 (void) pthread_mutex_lock(&cp->rn_lock);
4374
4375 /*
4376 * Register the ephemeral reference created by reading
4377 * np->rn_former into cp. Note that the persistent
4378 * reference (np->rn_former) is locked because we haven't
4379 * dropped np's lock since we dropped its RC_NODE_IN_TX
4380 * (via RC_NODE_DYING_FLAGS).
4381 */
4382 rc_node_hold_ephemeral_locked(cp);
4383
4384 (void) pthread_mutex_unlock(&np->rn_lock);
4385 cp->rn_flags &= ~RC_NODE_ON_FORMER;
4386
4387 rc_node_hold_locked(cp); /* hold while we loop */
4388
4389 rc_node_finish_delete(cp);
4390
4391 rc_node_rele(np); /* drop the old reference */
4392
4393 np = cp;
4394 goto again; /* tail-recurse down rn_former */
4395 }
4396 rc_node_rele_locked(np);
4397 }
4398
4399 /*
4400 * The last client or child reference to np, which must be either
4401 * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed. We'll destroy any
4402 * remaining references (e.g., rn_former) and call rc_node_destroy() to
4403 * free np.
4404 */
4405 static void
rc_node_no_client_refs(rc_node_t * np)4406 rc_node_no_client_refs(rc_node_t *np)
4407 {
4408 int unrefed;
4409 rc_node_t *current, *cur;
4410
4411 assert(MUTEX_HELD(&np->rn_lock));
4412 assert(np->rn_refs == 0);
4413 assert(np->rn_other_refs == 0);
4414 assert(np->rn_other_refs_held == 0);
4415
4416 if (np->rn_flags & RC_NODE_DEAD) {
4417 /*
4418 * The node is DEAD, so the deletion code should have
4419 * destroyed all rn_children or rn_former references.
4420 * Since the last client or child reference has been
4421 * destroyed, we're free to destroy np. Unless another
4422 * thread has an ephemeral reference, in which case we'll
4423 * pass the buck.
4424 */
4425 if (np->rn_erefs > 1) {
4426 --np->rn_erefs;
4427 NODE_UNLOCK(np);
4428 return;
4429 }
4430
4431 (void) pthread_mutex_unlock(&np->rn_lock);
4432 rc_node_destroy(np);
4433 return;
4434 }
4435
4436 /* We only collect DEAD and OLD nodes, thank you. */
4437 assert(np->rn_flags & RC_NODE_OLD);
4438
4439 /*
4440 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4441 * nodes. But it's vulnerable to unfriendly scheduling, so full
4442 * use of rn_erefs should supersede it someday.
4443 */
4444 if (np->rn_flags & RC_NODE_UNREFED) {
4445 (void) pthread_mutex_unlock(&np->rn_lock);
4446 return;
4447 }
4448 np->rn_flags |= RC_NODE_UNREFED;
4449
4450 /*
4451 * Now we'll remove the node from the rn_former chain and take its
4452 * DYING_FLAGS.
4453 */
4454
4455 /*
4456 * Since this node is OLD, it should be on an rn_former chain. To
4457 * remove it, we must find the current in-hash object and grab its
4458 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4459 */
4460
4461 (void) pthread_mutex_unlock(&np->rn_lock);
4462
4463 for (;;) {
4464 current = cache_lookup(&np->rn_id);
4465
4466 if (current == NULL) {
4467 (void) pthread_mutex_lock(&np->rn_lock);
4468
4469 if (np->rn_flags & RC_NODE_DEAD)
4470 goto died;
4471
4472 /*
4473 * We are trying to unreference this node, but the
4474 * owner of the former list does not exist. It must
4475 * be the case that another thread is deleting this
4476 * entire sub-branch, but has not yet reached us.
4477 * We will in short order be deleted.
4478 */
4479 np->rn_flags &= ~RC_NODE_UNREFED;
4480 (void) pthread_mutex_unlock(&np->rn_lock);
4481 return;
4482 }
4483
4484 if (current == np) {
4485 /*
4486 * no longer unreferenced
4487 */
4488 (void) pthread_mutex_lock(&np->rn_lock);
4489 np->rn_flags &= ~RC_NODE_UNREFED;
4490 /* held in cache_lookup() */
4491 rc_node_rele_locked(np);
4492 return;
4493 }
4494
4495 (void) pthread_mutex_lock(¤t->rn_lock);
4496 if (current->rn_flags & RC_NODE_OLD) {
4497 /*
4498 * current has been replaced since we looked it
4499 * up. Try again.
4500 */
4501 /* held in cache_lookup() */
4502 rc_node_rele_locked(current);
4503 continue;
4504 }
4505
4506 if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4507 /*
4508 * current has been deleted since we looked it up. Try
4509 * again.
4510 */
4511 /* held in cache_lookup() */
4512 rc_node_rele_locked(current);
4513 continue;
4514 }
4515
4516 /*
4517 * rc_node_hold_flag() might have dropped current's lock, so
4518 * check OLD again.
4519 */
4520 if (!(current->rn_flags & RC_NODE_OLD)) {
4521 /* Not old. Stop looping. */
4522 (void) pthread_mutex_unlock(¤t->rn_lock);
4523 break;
4524 }
4525
4526 rc_node_rele_flag(current, RC_NODE_IN_TX);
4527 rc_node_rele_locked(current);
4528 }
4529
4530 /* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4531 (void) pthread_mutex_lock(&np->rn_lock);
4532
4533 /*
4534 * While we didn't have the lock, a thread may have added
4535 * a reference or changed the flags.
4536 */
4537 if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4538 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4539 np->rn_other_refs_held != 0) {
4540 np->rn_flags &= ~RC_NODE_UNREFED;
4541
4542 (void) pthread_mutex_lock(¤t->rn_lock);
4543 rc_node_rele_flag(current, RC_NODE_IN_TX);
4544 /* held by cache_lookup() */
4545 rc_node_rele_locked(current);
4546 return;
4547 }
4548
4549 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4550 /*
4551 * Someone deleted the node while we were waiting for
4552 * DYING_FLAGS. Undo the modifications to current.
4553 */
4554 (void) pthread_mutex_unlock(&np->rn_lock);
4555
4556 rc_node_rele_flag(current, RC_NODE_IN_TX);
4557 /* held by cache_lookup() */
4558 rc_node_rele_locked(current);
4559
4560 (void) pthread_mutex_lock(&np->rn_lock);
4561 goto died;
4562 }
4563
4564 /* Take RC_NODE_DYING_FLAGS on np's descendents. */
4565 rc_node_delete_hold(np, 0); /* drops np->rn_lock */
4566
4567 /* Mark np DEAD. This requires the lock. */
4568 (void) pthread_mutex_lock(&np->rn_lock);
4569
4570 /* Recheck for new references. */
4571 if (!(np->rn_flags & RC_NODE_OLD) ||
4572 np->rn_refs != 0 || np->rn_other_refs != 0 ||
4573 np->rn_other_refs_held != 0) {
4574 np->rn_flags &= ~RC_NODE_UNREFED;
4575 rc_node_delete_rele(np, 0); /* drops np's lock */
4576
4577 (void) pthread_mutex_lock(¤t->rn_lock);
4578 rc_node_rele_flag(current, RC_NODE_IN_TX);
4579 /* held by cache_lookup() */
4580 rc_node_rele_locked(current);
4581 return;
4582 }
4583
4584 np->rn_flags |= RC_NODE_DEAD;
4585
4586 /*
4587 * Delete the children. This calls rc_node_rele_locked() on np at
4588 * the end, so add a reference to keep the count from going
4589 * negative. It will recurse with RC_NODE_DEAD set, so we'll call
4590 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4591 * shouldn't actually free() np.
4592 */
4593 rc_node_hold_locked(np);
4594 rc_node_delete_children(np, 0); /* unlocks np */
4595
4596 /* Remove np from current's rn_former chain. */
4597 (void) pthread_mutex_lock(¤t->rn_lock);
4598 for (cur = current; cur != NULL && cur->rn_former != np;
4599 cur = cur->rn_former)
4600 ;
4601 assert(cur != NULL && cur != np);
4602
4603 cur->rn_former = np->rn_former;
4604 np->rn_former = NULL;
4605
4606 rc_node_rele_flag(current, RC_NODE_IN_TX);
4607 /* held by cache_lookup() */
4608 rc_node_rele_locked(current);
4609
4610 /* Clear ON_FORMER and UNREFED, and destroy. */
4611 (void) pthread_mutex_lock(&np->rn_lock);
4612 assert(np->rn_flags & RC_NODE_ON_FORMER);
4613 np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4614
4615 if (np->rn_erefs > 1) {
4616 /* Still referenced. Stay execution. */
4617 --np->rn_erefs;
4618 NODE_UNLOCK(np);
4619 return;
4620 }
4621
4622 (void) pthread_mutex_unlock(&np->rn_lock);
4623 rc_node_destroy(np);
4624 return;
4625
4626 died:
4627 /*
4628 * Another thread marked np DEAD. If there still aren't any
4629 * persistent references, destroy the node.
4630 */
4631 np->rn_flags &= ~RC_NODE_UNREFED;
4632
4633 unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4634 np->rn_other_refs_held == 0);
4635
4636 if (np->rn_erefs > 0)
4637 --np->rn_erefs;
4638
4639 if (unrefed && np->rn_erefs > 0) {
4640 NODE_UNLOCK(np);
4641 return;
4642 }
4643
4644 (void) pthread_mutex_unlock(&np->rn_lock);
4645
4646 if (unrefed)
4647 rc_node_destroy(np);
4648 }
4649
4650 static au_event_t
get_delete_event_id(rep_protocol_entity_t entity,uint32_t pgflags)4651 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4652 {
4653 au_event_t id = 0;
4654
4655 #ifndef NATIVE_BUILD
4656 switch (entity) {
4657 case REP_PROTOCOL_ENTITY_SERVICE:
4658 case REP_PROTOCOL_ENTITY_INSTANCE:
4659 id = ADT_smf_delete;
4660 break;
4661 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4662 id = ADT_smf_delete_snap;
4663 break;
4664 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4665 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4666 if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4667 id = ADT_smf_delete_npg;
4668 } else {
4669 id = ADT_smf_delete_pg;
4670 }
4671 break;
4672 default:
4673 abort();
4674 }
4675 #endif /* NATIVE_BUILD */
4676 return (id);
4677 }
4678
4679 /*
4680 * Fails with
4681 * _NOT_SET
4682 * _DELETED
4683 * _BAD_REQUEST
4684 * _PERMISSION_DENIED
4685 * _NO_RESOURCES
4686 * _TRUNCATED
4687 * and whatever object_delete() fails with.
4688 */
4689 int
rc_node_delete(rc_node_ptr_t * npp)4690 rc_node_delete(rc_node_ptr_t *npp)
4691 {
4692 rc_node_t *np, *np_orig;
4693 rc_node_t *pp = NULL;
4694 int rc;
4695 rc_node_pg_notify_t *pnp;
4696 cache_bucket_t *bp;
4697 rc_notify_delete_t *ndp;
4698 permcheck_t *pcp;
4699 int granted;
4700 au_event_t event_id = 0;
4701 size_t sz_out;
4702 audit_event_data_t audit_data;
4703 int audit_failure = 0;
4704
4705 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4706
4707 audit_data.ed_fmri = NULL;
4708 audit_data.ed_auth = NULL;
4709 audit_data.ed_snapname = NULL;
4710 audit_data.ed_type = NULL;
4711
4712 switch (np->rn_id.rl_type) {
4713 case REP_PROTOCOL_ENTITY_SERVICE:
4714 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4715 np->rn_pgflags);
4716 break;
4717 case REP_PROTOCOL_ENTITY_INSTANCE:
4718 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4719 np->rn_pgflags);
4720 break;
4721 case REP_PROTOCOL_ENTITY_SNAPSHOT:
4722 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4723 np->rn_pgflags);
4724 audit_data.ed_snapname = strdup(np->rn_name);
4725 if (audit_data.ed_snapname == NULL) {
4726 (void) pthread_mutex_unlock(&np->rn_lock);
4727 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4728 }
4729 break; /* deletable */
4730
4731 case REP_PROTOCOL_ENTITY_SCOPE:
4732 case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4733 /* Scopes and snaplevels are indelible. */
4734 (void) pthread_mutex_unlock(&np->rn_lock);
4735 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4736
4737 case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4738 (void) pthread_mutex_unlock(&np->rn_lock);
4739 np = np->rn_cchain[0];
4740 RC_NODE_CHECK_AND_LOCK(np);
4741 event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4742 np->rn_pgflags);
4743 break;
4744
4745 case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4746 if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4747 event_id =
4748 get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4749 np->rn_pgflags);
4750 audit_data.ed_type = strdup(np->rn_type);
4751 if (audit_data.ed_type == NULL) {
4752 (void) pthread_mutex_unlock(&np->rn_lock);
4753 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4754 }
4755 break;
4756 }
4757
4758 /* Snapshot property groups are indelible. */
4759 (void) pthread_mutex_unlock(&np->rn_lock);
4760 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4761
4762 case REP_PROTOCOL_ENTITY_PROPERTY:
4763 (void) pthread_mutex_unlock(&np->rn_lock);
4764 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4765
4766 default:
4767 assert(0);
4768 abort();
4769 break;
4770 }
4771
4772 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4773 if (audit_data.ed_fmri == NULL) {
4774 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4775 goto cleanout;
4776 }
4777 np_orig = np;
4778 rc_node_hold_locked(np); /* simplifies rest of the code */
4779
4780 /*
4781 * The following loop is to deal with the fact that snapshots and
4782 * property groups are moving targets -- changes to them result
4783 * in a new "child" node. Since we can only delete from the top node,
4784 * we have to loop until we have a non-RC_NODE_OLD version.
4785 */
4786 for (;;) {
4787 if (!rc_node_wait_flag(np,
4788 RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4789 rc_node_rele_locked(np);
4790 rc = REP_PROTOCOL_FAIL_DELETED;
4791 goto cleanout;
4792 }
4793
4794 if (np->rn_flags & RC_NODE_OLD) {
4795 rc_node_rele_locked(np);
4796 np = cache_lookup(&np_orig->rn_id);
4797 assert(np != np_orig);
4798
4799 if (np == NULL) {
4800 rc = REP_PROTOCOL_FAIL_DELETED;
4801 goto fail;
4802 }
4803 (void) pthread_mutex_lock(&np->rn_lock);
4804 continue;
4805 }
4806
4807 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4808 rc_node_rele_locked(np);
4809 rc_node_clear(npp, 1);
4810 rc = REP_PROTOCOL_FAIL_DELETED;
4811 }
4812
4813 /*
4814 * Mark our parent as children changing. this call drops our
4815 * lock and the RC_NODE_USING_PARENT flag, and returns with
4816 * pp's lock held
4817 */
4818 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4819 if (pp == NULL) {
4820 /* our parent is gone, we're going next... */
4821 rc_node_rele(np);
4822
4823 rc_node_clear(npp, 1);
4824 rc = REP_PROTOCOL_FAIL_DELETED;
4825 goto cleanout;
4826 }
4827
4828 rc_node_hold_locked(pp); /* hold for later */
4829 (void) pthread_mutex_unlock(&pp->rn_lock);
4830
4831 (void) pthread_mutex_lock(&np->rn_lock);
4832 if (!(np->rn_flags & RC_NODE_OLD))
4833 break; /* not old -- we're done */
4834
4835 (void) pthread_mutex_unlock(&np->rn_lock);
4836 (void) pthread_mutex_lock(&pp->rn_lock);
4837 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4838 rc_node_rele_locked(pp);
4839 (void) pthread_mutex_lock(&np->rn_lock);
4840 continue; /* loop around and try again */
4841 }
4842 /*
4843 * Everyone out of the pool -- we grab everything but
4844 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4845 * any changes from occurring while we are attempting to
4846 * delete the node.
4847 */
4848 if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4849 (void) pthread_mutex_unlock(&np->rn_lock);
4850 rc = REP_PROTOCOL_FAIL_DELETED;
4851 goto fail;
4852 }
4853
4854 assert(!(np->rn_flags & RC_NODE_OLD));
4855
4856 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4857 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4858 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4859 (void) pthread_mutex_unlock(&np->rn_lock);
4860 goto fail;
4861 }
4862
4863 #ifdef NATIVE_BUILD
4864 if (!client_is_privileged()) {
4865 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4866 }
4867 #else
4868 if (is_main_repository) {
4869 /* permission check */
4870 (void) pthread_mutex_unlock(&np->rn_lock);
4871 pcp = pc_create();
4872 if (pcp != NULL) {
4873 rc = perm_add_enabling(pcp, AUTH_MODIFY);
4874
4875 /* add .smf.modify.<type> for pgs. */
4876 if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4877 REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4878 const char * const auth =
4879 perm_auth_for_pgtype(np->rn_type);
4880
4881 if (auth != NULL)
4882 rc = perm_add_enabling(pcp, auth);
4883 }
4884
4885 if (rc == REP_PROTOCOL_SUCCESS) {
4886 granted = perm_granted(pcp);
4887
4888 rc = map_granted_status(granted, pcp,
4889 &audit_data.ed_auth);
4890 if (granted == PERM_GONE) {
4891 /* No need to audit if client gone. */
4892 pc_free(pcp);
4893 rc_node_rele_flag(np,
4894 RC_NODE_DYING_FLAGS);
4895 return (rc);
4896 }
4897 if (granted == PERM_DENIED)
4898 audit_failure = 1;
4899 }
4900
4901 pc_free(pcp);
4902 } else {
4903 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4904 }
4905
4906 (void) pthread_mutex_lock(&np->rn_lock);
4907 } else {
4908 rc = REP_PROTOCOL_SUCCESS;
4909 }
4910 #endif /* NATIVE_BUILD */
4911
4912 if (rc != REP_PROTOCOL_SUCCESS) {
4913 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4914 (void) pthread_mutex_unlock(&np->rn_lock);
4915 goto fail;
4916 }
4917
4918 ndp = uu_zalloc(sizeof (*ndp));
4919 if (ndp == NULL) {
4920 rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4921 (void) pthread_mutex_unlock(&np->rn_lock);
4922 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4923 goto fail;
4924 }
4925
4926 rc_node_delete_hold(np, 1); /* hold entire subgraph, drop lock */
4927
4928 rc = object_delete(np);
4929
4930 if (rc != REP_PROTOCOL_SUCCESS) {
4931 (void) pthread_mutex_lock(&np->rn_lock);
4932 rc_node_delete_rele(np, 1); /* drops lock */
4933 uu_free(ndp);
4934 goto fail;
4935 }
4936
4937 /*
4938 * Now, delicately unlink and delete the object.
4939 *
4940 * Create the delete notification, atomically remove
4941 * from the hash table and set the NODE_DEAD flag, and
4942 * remove from the parent's children list.
4943 */
4944 rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4945
4946 bp = cache_hold(np->rn_hash);
4947
4948 (void) pthread_mutex_lock(&np->rn_lock);
4949 cache_remove_unlocked(bp, np);
4950 cache_release(bp);
4951
4952 np->rn_flags |= RC_NODE_DEAD;
4953
4954 if (pp != NULL) {
4955 /*
4956 * Remove from pp's rn_children. This requires pp's lock,
4957 * so we must drop np's lock to respect lock order.
4958 */
4959 (void) pthread_mutex_unlock(&np->rn_lock);
4960 (void) pthread_mutex_lock(&pp->rn_lock);
4961 (void) pthread_mutex_lock(&np->rn_lock);
4962
4963 uu_list_remove(pp->rn_children, np);
4964
4965 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4966
4967 (void) pthread_mutex_unlock(&pp->rn_lock);
4968
4969 np->rn_flags &= ~RC_NODE_IN_PARENT;
4970 }
4971
4972 /*
4973 * finally, propagate death to our children (including marking
4974 * them DEAD), handle notifications, and release our hold.
4975 */
4976 rc_node_hold_locked(np); /* hold for delete */
4977 rc_node_delete_children(np, 1); /* drops DYING_FLAGS, lock, ref */
4978
4979 rc_node_clear(npp, 1);
4980
4981 (void) pthread_mutex_lock(&rc_pg_notify_lock);
4982 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4983 rc_pg_notify_fire(pnp);
4984 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
4985 rc_notify_remove_node(np);
4986
4987 rc_node_rele(np);
4988
4989 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4990 &audit_data);
4991 free(audit_data.ed_auth);
4992 free(audit_data.ed_snapname);
4993 free(audit_data.ed_type);
4994 free(audit_data.ed_fmri);
4995 return (rc);
4996
4997 fail:
4998 rc_node_rele(np);
4999 if (rc == REP_PROTOCOL_FAIL_DELETED)
5000 rc_node_clear(npp, 1);
5001 if (pp != NULL) {
5002 (void) pthread_mutex_lock(&pp->rn_lock);
5003 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5004 rc_node_rele_locked(pp); /* drop ref and lock */
5005 }
5006 if (audit_failure) {
5007 smf_audit_event(event_id, ADT_FAILURE,
5008 ADT_FAIL_VALUE_AUTH, &audit_data);
5009 }
5010 cleanout:
5011 free(audit_data.ed_auth);
5012 free(audit_data.ed_snapname);
5013 free(audit_data.ed_type);
5014 free(audit_data.ed_fmri);
5015 return (rc);
5016 }
5017
5018 int
rc_node_next_snaplevel(rc_node_ptr_t * npp,rc_node_ptr_t * cpp)5019 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5020 {
5021 rc_node_t *np;
5022 rc_node_t *cp, *pp;
5023 int res;
5024
5025 rc_node_clear(cpp, 0);
5026
5027 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5028
5029 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5030 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5031 (void) pthread_mutex_unlock(&np->rn_lock);
5032 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5033 }
5034
5035 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5036 if ((res = rc_node_fill_children(np,
5037 REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5038 (void) pthread_mutex_unlock(&np->rn_lock);
5039 return (res);
5040 }
5041
5042 for (cp = uu_list_first(np->rn_children);
5043 cp != NULL;
5044 cp = uu_list_next(np->rn_children, cp)) {
5045 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5046 continue;
5047 rc_node_hold(cp);
5048 break;
5049 }
5050
5051 (void) pthread_mutex_unlock(&np->rn_lock);
5052 } else {
5053 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5054 (void) pthread_mutex_unlock(&np->rn_lock);
5055 rc_node_clear(npp, 1);
5056 return (REP_PROTOCOL_FAIL_DELETED);
5057 }
5058
5059 /*
5060 * mark our parent as children changing. This call drops our
5061 * lock and the RC_NODE_USING_PARENT flag, and returns with
5062 * pp's lock held
5063 */
5064 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5065 if (pp == NULL) {
5066 /* our parent is gone, we're going next... */
5067
5068 rc_node_clear(npp, 1);
5069 return (REP_PROTOCOL_FAIL_DELETED);
5070 }
5071
5072 /*
5073 * find the next snaplevel
5074 */
5075 cp = np;
5076 while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5077 cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5078 ;
5079
5080 /* it must match the snaplevel list */
5081 assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5082 (cp != NULL && np->rn_snaplevel->rsl_next ==
5083 cp->rn_snaplevel));
5084
5085 if (cp != NULL)
5086 rc_node_hold(cp);
5087
5088 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5089
5090 (void) pthread_mutex_unlock(&pp->rn_lock);
5091 }
5092
5093 rc_node_assign(cpp, cp);
5094 if (cp != NULL) {
5095 rc_node_rele(cp);
5096
5097 return (REP_PROTOCOL_SUCCESS);
5098 }
5099 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5100 }
5101
5102 /*
5103 * This call takes a snapshot (np) and either:
5104 * an existing snapid (to be associated with np), or
5105 * a non-NULL parentp (from which a new snapshot is taken, and associated
5106 * with np)
5107 *
5108 * To do the association, np is duplicated, the duplicate is made to
5109 * represent the new snapid, and np is replaced with the new rc_node_t on
5110 * np's parent's child list. np is placed on the new node's rn_former list,
5111 * and replaces np in cache_hash (so rc_node_update() will find the new one).
5112 *
5113 * old_fmri and old_name point to the original snap shot's FMRI and name.
5114 * These values are used when generating audit events.
5115 *
5116 * Fails with
5117 * _BAD_REQUEST
5118 * _BACKEND_READONLY
5119 * _DELETED
5120 * _NO_RESOURCES
5121 * _TRUNCATED
5122 * _TYPE_MISMATCH
5123 */
5124 static int
rc_attach_snapshot(rc_node_t * np,uint32_t snapid,rc_node_t * parentp,char * old_fmri,char * old_name)5125 rc_attach_snapshot(
5126 rc_node_t *np,
5127 uint32_t snapid,
5128 rc_node_t *parentp,
5129 char *old_fmri,
5130 char *old_name)
5131 {
5132 rc_node_t *np_orig;
5133 rc_node_t *nnp, *prev;
5134 rc_node_t *pp;
5135 int rc;
5136 size_t sz_out;
5137 perm_status_t granted;
5138 au_event_t event_id;
5139 audit_event_data_t audit_data;
5140
5141 if (parentp == NULL) {
5142 assert(old_fmri != NULL);
5143 } else {
5144 assert(snapid == 0);
5145 }
5146 assert(MUTEX_HELD(&np->rn_lock));
5147
5148 /* Gather the audit data. */
5149 /*
5150 * ADT_smf_* symbols may not be defined in the /usr/include header
5151 * files on the build machine. Thus, the following if-else will
5152 * not be compiled when doing native builds.
5153 */
5154 #ifndef NATIVE_BUILD
5155 if (parentp == NULL) {
5156 event_id = ADT_smf_attach_snap;
5157 } else {
5158 event_id = ADT_smf_create_snap;
5159 }
5160 #endif /* NATIVE_BUILD */
5161 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5162 audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5163 if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5164 (void) pthread_mutex_unlock(&np->rn_lock);
5165 free(audit_data.ed_fmri);
5166 free(audit_data.ed_snapname);
5167 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5168 }
5169 audit_data.ed_auth = NULL;
5170 if (strlcpy(audit_data.ed_snapname, np->rn_name,
5171 REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5172 abort();
5173 }
5174 audit_data.ed_old_fmri = old_fmri;
5175 audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5176
5177 if (parentp == NULL) {
5178 /*
5179 * In the attach case, get the instance FMRIs of the
5180 * snapshots.
5181 */
5182 if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5183 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5184 (void) pthread_mutex_unlock(&np->rn_lock);
5185 free(audit_data.ed_fmri);
5186 free(audit_data.ed_snapname);
5187 return (rc);
5188 }
5189 } else {
5190 /*
5191 * Capture the FMRI of the parent if we're actually going
5192 * to take the snapshot.
5193 */
5194 if ((rc = rc_node_get_fmri_or_fragment(parentp,
5195 audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5196 REP_PROTOCOL_SUCCESS) {
5197 (void) pthread_mutex_unlock(&np->rn_lock);
5198 free(audit_data.ed_fmri);
5199 free(audit_data.ed_snapname);
5200 return (rc);
5201 }
5202 }
5203
5204 np_orig = np;
5205 rc_node_hold_locked(np); /* simplifies the remainder */
5206
5207 (void) pthread_mutex_unlock(&np->rn_lock);
5208 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5209 switch (granted) {
5210 case PERM_DENIED:
5211 smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5212 &audit_data);
5213 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5214 rc_node_rele(np);
5215 goto cleanout;
5216 case PERM_GRANTED:
5217 break;
5218 case PERM_GONE:
5219 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5220 rc_node_rele(np);
5221 goto cleanout;
5222 case PERM_FAIL:
5223 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5224 rc_node_rele(np);
5225 goto cleanout;
5226 default:
5227 bad_error(rc_node_modify_permission_check, granted);
5228 }
5229 (void) pthread_mutex_lock(&np->rn_lock);
5230
5231 /*
5232 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5233 * list from changing.
5234 */
5235 for (;;) {
5236 if (!(np->rn_flags & RC_NODE_OLD)) {
5237 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5238 goto again;
5239 }
5240 pp = rc_node_hold_parent_flag(np,
5241 RC_NODE_CHILDREN_CHANGING);
5242
5243 (void) pthread_mutex_lock(&np->rn_lock);
5244 if (pp == NULL) {
5245 goto again;
5246 }
5247 if (np->rn_flags & RC_NODE_OLD) {
5248 rc_node_rele_flag(pp,
5249 RC_NODE_CHILDREN_CHANGING);
5250 (void) pthread_mutex_unlock(&pp->rn_lock);
5251 goto again;
5252 }
5253 (void) pthread_mutex_unlock(&pp->rn_lock);
5254
5255 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5256 /*
5257 * Can't happen, since we're holding our
5258 * parent's CHILDREN_CHANGING flag...
5259 */
5260 abort();
5261 }
5262 break; /* everything's ready */
5263 }
5264 again:
5265 rc_node_rele_locked(np);
5266 np = cache_lookup(&np_orig->rn_id);
5267
5268 if (np == NULL) {
5269 rc = REP_PROTOCOL_FAIL_DELETED;
5270 goto cleanout;
5271 }
5272
5273 (void) pthread_mutex_lock(&np->rn_lock);
5274 }
5275
5276 if (parentp != NULL) {
5277 if (pp != parentp) {
5278 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5279 goto fail;
5280 }
5281 nnp = NULL;
5282 } else {
5283 /*
5284 * look for a former node with the snapid we need.
5285 */
5286 if (np->rn_snapshot_id == snapid) {
5287 rc_node_rele_flag(np, RC_NODE_IN_TX);
5288 rc_node_rele_locked(np);
5289
5290 (void) pthread_mutex_lock(&pp->rn_lock);
5291 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5292 (void) pthread_mutex_unlock(&pp->rn_lock);
5293 rc = REP_PROTOCOL_SUCCESS; /* nothing to do */
5294 goto cleanout;
5295 }
5296
5297 prev = np;
5298 while ((nnp = prev->rn_former) != NULL) {
5299 if (nnp->rn_snapshot_id == snapid) {
5300 rc_node_hold(nnp);
5301 break; /* existing node with that id */
5302 }
5303 prev = nnp;
5304 }
5305 }
5306
5307 if (nnp == NULL) {
5308 prev = NULL;
5309 nnp = rc_node_alloc();
5310 if (nnp == NULL) {
5311 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5312 goto fail;
5313 }
5314
5315 nnp->rn_id = np->rn_id; /* structure assignment */
5316 nnp->rn_hash = np->rn_hash;
5317 nnp->rn_name = strdup(np->rn_name);
5318 nnp->rn_snapshot_id = snapid;
5319 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5320
5321 if (nnp->rn_name == NULL) {
5322 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5323 goto fail;
5324 }
5325 }
5326
5327 (void) pthread_mutex_unlock(&np->rn_lock);
5328
5329 rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5330
5331 if (parentp != NULL)
5332 nnp->rn_snapshot_id = snapid; /* fill in new snapid */
5333 else
5334 assert(nnp->rn_snapshot_id == snapid);
5335
5336 (void) pthread_mutex_lock(&np->rn_lock);
5337 if (rc != REP_PROTOCOL_SUCCESS)
5338 goto fail;
5339
5340 /*
5341 * fix up the former chain
5342 */
5343 if (prev != NULL) {
5344 prev->rn_former = nnp->rn_former;
5345 (void) pthread_mutex_lock(&nnp->rn_lock);
5346 nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5347 nnp->rn_former = NULL;
5348 (void) pthread_mutex_unlock(&nnp->rn_lock);
5349 }
5350 np->rn_flags |= RC_NODE_OLD;
5351 (void) pthread_mutex_unlock(&np->rn_lock);
5352
5353 /*
5354 * replace np with nnp
5355 */
5356 rc_node_relink_child(pp, np, nnp);
5357
5358 rc_node_rele(np);
5359 smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5360 rc = REP_PROTOCOL_SUCCESS;
5361
5362 cleanout:
5363 free(audit_data.ed_auth);
5364 free(audit_data.ed_fmri);
5365 free(audit_data.ed_snapname);
5366 return (rc);
5367
5368 fail:
5369 rc_node_rele_flag(np, RC_NODE_IN_TX);
5370 rc_node_rele_locked(np);
5371 (void) pthread_mutex_lock(&pp->rn_lock);
5372 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5373 (void) pthread_mutex_unlock(&pp->rn_lock);
5374
5375 if (nnp != NULL) {
5376 if (prev == NULL)
5377 rc_node_destroy(nnp);
5378 else
5379 rc_node_rele(nnp);
5380 }
5381
5382 free(audit_data.ed_auth);
5383 free(audit_data.ed_fmri);
5384 free(audit_data.ed_snapname);
5385 return (rc);
5386 }
5387
5388 int
rc_snapshot_take_new(rc_node_ptr_t * npp,const char * svcname,const char * instname,const char * name,rc_node_ptr_t * outpp)5389 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5390 const char *instname, const char *name, rc_node_ptr_t *outpp)
5391 {
5392 perm_status_t granted;
5393 rc_node_t *np;
5394 rc_node_t *outp = NULL;
5395 int rc, perm_rc;
5396 char fmri[REP_PROTOCOL_FMRI_LEN];
5397 audit_event_data_t audit_data;
5398 size_t sz_out;
5399
5400 rc_node_clear(outpp, 0);
5401
5402 /*
5403 * rc_node_modify_permission_check() must be called before the node
5404 * is locked. This is because the library functions that check
5405 * authorizations can trigger calls back into configd.
5406 */
5407 granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5408 switch (granted) {
5409 case PERM_DENIED:
5410 /*
5411 * We continue in this case, so that we can generate an
5412 * audit event later in this function.
5413 */
5414 perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5415 break;
5416 case PERM_GRANTED:
5417 perm_rc = REP_PROTOCOL_SUCCESS;
5418 break;
5419 case PERM_GONE:
5420 /* No need to produce audit event if client is gone. */
5421 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5422 case PERM_FAIL:
5423 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5424 default:
5425 bad_error("rc_node_modify_permission_check", granted);
5426 break;
5427 }
5428
5429 RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5430 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5431 (void) pthread_mutex_unlock(&np->rn_lock);
5432 free(audit_data.ed_auth);
5433 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5434 }
5435
5436 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5437 if (rc != REP_PROTOCOL_SUCCESS) {
5438 (void) pthread_mutex_unlock(&np->rn_lock);
5439 free(audit_data.ed_auth);
5440 return (rc);
5441 }
5442
5443 if (svcname != NULL && (rc =
5444 rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5445 REP_PROTOCOL_SUCCESS) {
5446 (void) pthread_mutex_unlock(&np->rn_lock);
5447 free(audit_data.ed_auth);
5448 return (rc);
5449 }
5450
5451 if (instname != NULL && (rc =
5452 rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5453 REP_PROTOCOL_SUCCESS) {
5454 (void) pthread_mutex_unlock(&np->rn_lock);
5455 free(audit_data.ed_auth);
5456 return (rc);
5457 }
5458
5459 audit_data.ed_fmri = fmri;
5460 audit_data.ed_snapname = (char *)name;
5461
5462 if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5463 &sz_out)) != REP_PROTOCOL_SUCCESS) {
5464 (void) pthread_mutex_unlock(&np->rn_lock);
5465 free(audit_data.ed_auth);
5466 return (rc);
5467 }
5468 if (perm_rc != REP_PROTOCOL_SUCCESS) {
5469 (void) pthread_mutex_unlock(&np->rn_lock);
5470 smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5471 ADT_FAIL_VALUE_AUTH, &audit_data);
5472 free(audit_data.ed_auth);
5473 return (perm_rc);
5474 }
5475
5476 HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5477 audit_data.ed_auth);
5478 (void) pthread_mutex_unlock(&np->rn_lock);
5479
5480 rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5481
5482 if (rc == REP_PROTOCOL_SUCCESS) {
5483 rc_node_assign(outpp, outp);
5484 rc_node_rele(outp);
5485 }
5486
5487 (void) pthread_mutex_lock(&np->rn_lock);
5488 rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5489 (void) pthread_mutex_unlock(&np->rn_lock);
5490
5491 if (rc == REP_PROTOCOL_SUCCESS) {
5492 smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5493 &audit_data);
5494 }
5495 if (audit_data.ed_auth != NULL)
5496 free(audit_data.ed_auth);
5497 return (rc);
5498 }
5499
5500 int
rc_snapshot_take_attach(rc_node_ptr_t * npp,rc_node_ptr_t * outpp)5501 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5502 {
5503 rc_node_t *np, *outp;
5504
5505 RC_NODE_PTR_GET_CHECK(np, npp);
5506 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5507 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5508 }
5509
5510 RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5511 if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5512 (void) pthread_mutex_unlock(&outp->rn_lock);
5513 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5514 }
5515
5516 return (rc_attach_snapshot(outp, 0, np, NULL,
5517 NULL)); /* drops outp's lock */
5518 }
5519
5520 int
rc_snapshot_attach(rc_node_ptr_t * npp,rc_node_ptr_t * cpp)5521 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5522 {
5523 rc_node_t *np;
5524 rc_node_t *cp;
5525 uint32_t snapid;
5526 char old_name[REP_PROTOCOL_NAME_LEN];
5527 int rc;
5528 size_t sz_out;
5529 char old_fmri[REP_PROTOCOL_FMRI_LEN];
5530
5531 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5532 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5533 (void) pthread_mutex_unlock(&np->rn_lock);
5534 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5535 }
5536 snapid = np->rn_snapshot_id;
5537 rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5538 &sz_out);
5539 (void) pthread_mutex_unlock(&np->rn_lock);
5540 if (rc != REP_PROTOCOL_SUCCESS)
5541 return (rc);
5542 if (np->rn_name != NULL) {
5543 if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5544 sizeof (old_name)) {
5545 return (REP_PROTOCOL_FAIL_TRUNCATED);
5546 }
5547 }
5548
5549 RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5550 if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5551 (void) pthread_mutex_unlock(&cp->rn_lock);
5552 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5553 }
5554
5555 rc = rc_attach_snapshot(cp, snapid, NULL,
5556 old_fmri, old_name); /* drops cp's lock */
5557 return (rc);
5558 }
5559
5560 /*
5561 * If the pgname property group under ent has type pgtype, and it has a
5562 * propname property with type ptype, return _SUCCESS. If pgtype is NULL,
5563 * it is not checked. If ent is not a service node, we will return _SUCCESS if
5564 * a property meeting the requirements exists in either the instance or its
5565 * parent.
5566 *
5567 * Returns
5568 * _SUCCESS - see above
5569 * _DELETED - ent or one of its ancestors was deleted
5570 * _NO_RESOURCES - no resources
5571 * _NOT_FOUND - no matching property was found
5572 */
5573 static int
rc_svc_prop_exists(rc_node_t * ent,const char * pgname,const char * pgtype,const char * propname,rep_protocol_value_type_t ptype)5574 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5575 const char *propname, rep_protocol_value_type_t ptype)
5576 {
5577 int ret;
5578 rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5579
5580 assert(!MUTEX_HELD(&ent->rn_lock));
5581
5582 (void) pthread_mutex_lock(&ent->rn_lock);
5583 ret = rc_node_find_named_child(ent, pgname,
5584 REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5585 (void) pthread_mutex_unlock(&ent->rn_lock);
5586
5587 switch (ret) {
5588 case REP_PROTOCOL_SUCCESS:
5589 break;
5590
5591 case REP_PROTOCOL_FAIL_DELETED:
5592 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5593 return (ret);
5594
5595 default:
5596 bad_error("rc_node_find_named_child", ret);
5597 }
5598
5599 if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5600 ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5601 &svc);
5602 if (ret != REP_PROTOCOL_SUCCESS) {
5603 assert(ret == REP_PROTOCOL_FAIL_DELETED);
5604 if (pg != NULL)
5605 rc_node_rele(pg);
5606 return (ret);
5607 }
5608 assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5609
5610 (void) pthread_mutex_lock(&svc->rn_lock);
5611 ret = rc_node_find_named_child(svc, pgname,
5612 REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5613 (void) pthread_mutex_unlock(&svc->rn_lock);
5614
5615 rc_node_rele(svc);
5616
5617 switch (ret) {
5618 case REP_PROTOCOL_SUCCESS:
5619 break;
5620
5621 case REP_PROTOCOL_FAIL_DELETED:
5622 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5623 if (pg != NULL)
5624 rc_node_rele(pg);
5625 return (ret);
5626
5627 default:
5628 bad_error("rc_node_find_named_child", ret);
5629 }
5630 }
5631
5632 if (pg != NULL &&
5633 pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5634 rc_node_rele(pg);
5635 pg = NULL;
5636 }
5637
5638 if (spg != NULL &&
5639 pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5640 rc_node_rele(spg);
5641 spg = NULL;
5642 }
5643
5644 if (pg == NULL) {
5645 if (spg == NULL)
5646 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5647 pg = spg;
5648 spg = NULL;
5649 }
5650
5651 /*
5652 * At this point, pg is non-NULL, and is a property group node of the
5653 * correct type. spg, if non-NULL, is also a property group node of
5654 * the correct type. Check for the property in pg first, then spg
5655 * (if applicable).
5656 */
5657 (void) pthread_mutex_lock(&pg->rn_lock);
5658 ret = rc_node_find_named_child(pg, propname,
5659 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5660 (void) pthread_mutex_unlock(&pg->rn_lock);
5661 rc_node_rele(pg);
5662 switch (ret) {
5663 case REP_PROTOCOL_SUCCESS:
5664 if (prop != NULL) {
5665 if (prop->rn_valtype == ptype) {
5666 rc_node_rele(prop);
5667 if (spg != NULL)
5668 rc_node_rele(spg);
5669 return (REP_PROTOCOL_SUCCESS);
5670 }
5671 rc_node_rele(prop);
5672 }
5673 break;
5674
5675 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5676 if (spg != NULL)
5677 rc_node_rele(spg);
5678 return (ret);
5679
5680 case REP_PROTOCOL_FAIL_DELETED:
5681 break;
5682
5683 default:
5684 bad_error("rc_node_find_named_child", ret);
5685 }
5686
5687 if (spg == NULL)
5688 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5689
5690 pg = spg;
5691
5692 (void) pthread_mutex_lock(&pg->rn_lock);
5693 ret = rc_node_find_named_child(pg, propname,
5694 REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5695 (void) pthread_mutex_unlock(&pg->rn_lock);
5696 rc_node_rele(pg);
5697 switch (ret) {
5698 case REP_PROTOCOL_SUCCESS:
5699 if (prop != NULL) {
5700 if (prop->rn_valtype == ptype) {
5701 rc_node_rele(prop);
5702 return (REP_PROTOCOL_SUCCESS);
5703 }
5704 rc_node_rele(prop);
5705 }
5706 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5707
5708 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5709 return (ret);
5710
5711 case REP_PROTOCOL_FAIL_DELETED:
5712 return (REP_PROTOCOL_FAIL_NOT_FOUND);
5713
5714 default:
5715 bad_error("rc_node_find_named_child", ret);
5716 }
5717
5718 return (REP_PROTOCOL_SUCCESS);
5719 }
5720
5721 /*
5722 * Given a property group node, returns _SUCCESS if the property group may
5723 * be read without any special authorization.
5724 *
5725 * Fails with:
5726 * _DELETED - np or an ancestor node was deleted
5727 * _TYPE_MISMATCH - np does not refer to a property group
5728 * _NO_RESOURCES - no resources
5729 * _PERMISSION_DENIED - authorization is required
5730 */
5731 static int
rc_node_pg_check_read_protect(rc_node_t * np)5732 rc_node_pg_check_read_protect(rc_node_t *np)
5733 {
5734 int ret;
5735 rc_node_t *ent;
5736
5737 assert(!MUTEX_HELD(&np->rn_lock));
5738
5739 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5740 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5741
5742 if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5743 strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5744 strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5745 return (REP_PROTOCOL_SUCCESS);
5746
5747 ret = rc_node_parent(np, &ent);
5748
5749 if (ret != REP_PROTOCOL_SUCCESS)
5750 return (ret);
5751
5752 ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5753 AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5754
5755 rc_node_rele(ent);
5756
5757 switch (ret) {
5758 case REP_PROTOCOL_FAIL_NOT_FOUND:
5759 return (REP_PROTOCOL_SUCCESS);
5760 case REP_PROTOCOL_SUCCESS:
5761 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5762 case REP_PROTOCOL_FAIL_DELETED:
5763 case REP_PROTOCOL_FAIL_NO_RESOURCES:
5764 return (ret);
5765 default:
5766 bad_error("rc_svc_prop_exists", ret);
5767 }
5768
5769 return (REP_PROTOCOL_SUCCESS);
5770 }
5771
5772 /*
5773 * Fails with
5774 * _DELETED - np's node or parent has been deleted
5775 * _TYPE_MISMATCH - np's node is not a property
5776 * _NO_RESOURCES - out of memory
5777 * _PERMISSION_DENIED - no authorization to read this property's value(s)
5778 * _BAD_REQUEST - np's parent is not a property group
5779 */
5780 static int
rc_node_property_may_read(rc_node_t * np)5781 rc_node_property_may_read(rc_node_t *np)
5782 {
5783 int ret;
5784 perm_status_t granted = PERM_DENIED;
5785 rc_node_t *pgp;
5786 permcheck_t *pcp;
5787 audit_event_data_t audit_data;
5788 size_t sz_out;
5789
5790 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5791 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5792
5793 if (client_is_privileged())
5794 return (REP_PROTOCOL_SUCCESS);
5795
5796 #ifdef NATIVE_BUILD
5797 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5798 #else
5799 ret = rc_node_parent(np, &pgp);
5800
5801 if (ret != REP_PROTOCOL_SUCCESS)
5802 return (ret);
5803
5804 if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5805 rc_node_rele(pgp);
5806 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5807 }
5808
5809 ret = rc_node_pg_check_read_protect(pgp);
5810
5811 if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5812 rc_node_rele(pgp);
5813 return (ret);
5814 }
5815
5816 pcp = pc_create();
5817
5818 if (pcp == NULL) {
5819 rc_node_rele(pgp);
5820 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5821 }
5822
5823 ret = perm_add_enabling(pcp, AUTH_MODIFY);
5824
5825 if (ret == REP_PROTOCOL_SUCCESS) {
5826 const char * const auth =
5827 perm_auth_for_pgtype(pgp->rn_type);
5828
5829 if (auth != NULL)
5830 ret = perm_add_enabling(pcp, auth);
5831 }
5832
5833 /*
5834 * If you are permitted to modify the value, you may also
5835 * read it. This means that both the MODIFY and VALUE
5836 * authorizations are acceptable. We don't allow requests
5837 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5838 * however, to avoid leaking possibly valuable information
5839 * since such a user can't change the property anyway.
5840 */
5841 if (ret == REP_PROTOCOL_SUCCESS)
5842 ret = perm_add_enabling_values(pcp, pgp,
5843 AUTH_PROP_MODIFY);
5844
5845 if (ret == REP_PROTOCOL_SUCCESS &&
5846 strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5847 ret = perm_add_enabling_values(pcp, pgp,
5848 AUTH_PROP_VALUE);
5849
5850 if (ret == REP_PROTOCOL_SUCCESS)
5851 ret = perm_add_enabling_values(pcp, pgp,
5852 AUTH_PROP_READ);
5853
5854 rc_node_rele(pgp);
5855
5856 if (ret == REP_PROTOCOL_SUCCESS) {
5857 granted = perm_granted(pcp);
5858 if (granted == PERM_FAIL)
5859 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5860 if (granted == PERM_GONE)
5861 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5862 }
5863
5864 if (ret == REP_PROTOCOL_SUCCESS) {
5865 /* Generate a read_prop audit event. */
5866 audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5867 if (audit_data.ed_fmri == NULL)
5868 ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5869 }
5870 if (ret == REP_PROTOCOL_SUCCESS) {
5871 ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5872 REP_PROTOCOL_FMRI_LEN, &sz_out);
5873 }
5874 if (ret == REP_PROTOCOL_SUCCESS) {
5875 int status;
5876 int ret_value;
5877
5878 if (granted == PERM_DENIED) {
5879 status = ADT_FAILURE;
5880 ret_value = ADT_FAIL_VALUE_AUTH;
5881 } else {
5882 status = ADT_SUCCESS;
5883 ret_value = ADT_SUCCESS;
5884 }
5885 audit_data.ed_auth = pcp->pc_auth_string;
5886 smf_audit_event(ADT_smf_read_prop,
5887 status, ret_value, &audit_data);
5888 }
5889 free(audit_data.ed_fmri);
5890
5891 pc_free(pcp);
5892
5893 if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5894 ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5895
5896 return (ret);
5897 #endif /* NATIVE_BUILD */
5898 }
5899
5900 /*
5901 * Iteration
5902 */
5903 static int
rc_iter_filter_name(rc_node_t * np,void * s)5904 rc_iter_filter_name(rc_node_t *np, void *s)
5905 {
5906 const char *name = s;
5907
5908 return (strcmp(np->rn_name, name) == 0);
5909 }
5910
5911 static int
rc_iter_filter_type(rc_node_t * np,void * s)5912 rc_iter_filter_type(rc_node_t *np, void *s)
5913 {
5914 const char *type = s;
5915
5916 return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5917 }
5918
5919 /*ARGSUSED*/
5920 static int
rc_iter_null_filter(rc_node_t * np,void * s)5921 rc_iter_null_filter(rc_node_t *np, void *s)
5922 {
5923 return (1);
5924 }
5925
5926 /*
5927 * Allocate & initialize an rc_node_iter_t structure. Essentially, ensure
5928 * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5929 * If successful, leaves a hold on np & increments np->rn_other_refs
5930 *
5931 * If composed is true, then set up for iteration across the top level of np's
5932 * composition chain. If successful, leaves a hold on np and increments
5933 * rn_other_refs for the top level of np's composition chain.
5934 *
5935 * Fails with
5936 * _NO_RESOURCES
5937 * _INVALID_TYPE
5938 * _TYPE_MISMATCH - np cannot carry type children
5939 * _DELETED
5940 */
5941 static int
rc_iter_create(rc_node_iter_t ** resp,rc_node_t * np,uint32_t type,rc_iter_filter_func * filter,void * arg,boolean_t composed)5942 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5943 rc_iter_filter_func *filter, void *arg, boolean_t composed)
5944 {
5945 rc_node_iter_t *nip;
5946 int res;
5947
5948 assert(*resp == NULL);
5949
5950 nip = uu_zalloc(sizeof (*nip));
5951 if (nip == NULL)
5952 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5953
5954 /* np is held by the client's rc_node_ptr_t */
5955 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5956 composed = 1;
5957
5958 if (!composed) {
5959 (void) pthread_mutex_lock(&np->rn_lock);
5960
5961 if ((res = rc_node_fill_children(np, type)) !=
5962 REP_PROTOCOL_SUCCESS) {
5963 (void) pthread_mutex_unlock(&np->rn_lock);
5964 uu_free(nip);
5965 return (res);
5966 }
5967
5968 nip->rni_clevel = -1;
5969
5970 nip->rni_iter = uu_list_walk_start(np->rn_children,
5971 UU_WALK_ROBUST);
5972 if (nip->rni_iter != NULL) {
5973 nip->rni_iter_node = np;
5974 rc_node_hold_other(np);
5975 } else {
5976 (void) pthread_mutex_unlock(&np->rn_lock);
5977 uu_free(nip);
5978 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5979 }
5980 (void) pthread_mutex_unlock(&np->rn_lock);
5981 } else {
5982 rc_node_t *ent;
5983
5984 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5985 /* rn_cchain isn't valid until children are loaded. */
5986 (void) pthread_mutex_lock(&np->rn_lock);
5987 res = rc_node_fill_children(np,
5988 REP_PROTOCOL_ENTITY_SNAPLEVEL);
5989 (void) pthread_mutex_unlock(&np->rn_lock);
5990 if (res != REP_PROTOCOL_SUCCESS) {
5991 uu_free(nip);
5992 return (res);
5993 }
5994
5995 /* Check for an empty snapshot. */
5996 if (np->rn_cchain[0] == NULL)
5997 goto empty;
5998 }
5999
6000 /* Start at the top of the composition chain. */
6001 for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6002 if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6003 /* Empty composition chain. */
6004 empty:
6005 nip->rni_clevel = -1;
6006 nip->rni_iter = NULL;
6007 /* It's ok, iter_next() will return _DONE. */
6008 goto out;
6009 }
6010
6011 ent = np->rn_cchain[nip->rni_clevel];
6012 assert(ent != NULL);
6013
6014 if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6015 break;
6016
6017 /* Someone deleted it, so try the next one. */
6018 }
6019
6020 res = rc_node_fill_children(ent, type);
6021
6022 if (res == REP_PROTOCOL_SUCCESS) {
6023 nip->rni_iter = uu_list_walk_start(ent->rn_children,
6024 UU_WALK_ROBUST);
6025
6026 if (nip->rni_iter == NULL)
6027 res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6028 else {
6029 nip->rni_iter_node = ent;
6030 rc_node_hold_other(ent);
6031 }
6032 }
6033
6034 if (res != REP_PROTOCOL_SUCCESS) {
6035 (void) pthread_mutex_unlock(&ent->rn_lock);
6036 uu_free(nip);
6037 return (res);
6038 }
6039
6040 (void) pthread_mutex_unlock(&ent->rn_lock);
6041 }
6042
6043 out:
6044 rc_node_hold(np); /* released by rc_iter_end() */
6045 nip->rni_parent = np;
6046 nip->rni_type = type;
6047 nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6048 nip->rni_filter_arg = arg;
6049 *resp = nip;
6050 return (REP_PROTOCOL_SUCCESS);
6051 }
6052
6053 static void
rc_iter_end(rc_node_iter_t * iter)6054 rc_iter_end(rc_node_iter_t *iter)
6055 {
6056 rc_node_t *np = iter->rni_parent;
6057
6058 if (iter->rni_clevel >= 0)
6059 np = np->rn_cchain[iter->rni_clevel];
6060
6061 assert(MUTEX_HELD(&np->rn_lock));
6062 if (iter->rni_iter != NULL)
6063 uu_list_walk_end(iter->rni_iter);
6064 iter->rni_iter = NULL;
6065
6066 (void) pthread_mutex_unlock(&np->rn_lock);
6067 rc_node_rele(iter->rni_parent);
6068 if (iter->rni_iter_node != NULL)
6069 rc_node_rele_other(iter->rni_iter_node);
6070 }
6071
6072 /*
6073 * Fails with
6074 * _NOT_SET - npp is reset
6075 * _DELETED - npp's node has been deleted
6076 * _NOT_APPLICABLE - npp's node is not a property
6077 * _NO_RESOURCES - out of memory
6078 */
6079 static int
rc_node_setup_value_iter(rc_node_ptr_t * npp,rc_node_iter_t ** iterp)6080 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6081 {
6082 rc_node_t *np;
6083
6084 rc_node_iter_t *nip;
6085
6086 assert(*iterp == NULL);
6087
6088 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6089
6090 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6091 (void) pthread_mutex_unlock(&np->rn_lock);
6092 return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6093 }
6094
6095 nip = uu_zalloc(sizeof (*nip));
6096 if (nip == NULL) {
6097 (void) pthread_mutex_unlock(&np->rn_lock);
6098 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6099 }
6100
6101 nip->rni_parent = np;
6102 nip->rni_iter = NULL;
6103 nip->rni_clevel = -1;
6104 nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6105 nip->rni_offset = 0;
6106 nip->rni_last_offset = 0;
6107
6108 rc_node_hold_locked(np);
6109
6110 *iterp = nip;
6111 (void) pthread_mutex_unlock(&np->rn_lock);
6112
6113 return (REP_PROTOCOL_SUCCESS);
6114 }
6115
6116 /*
6117 * Returns:
6118 * _NO_RESOURCES - out of memory
6119 * _NOT_SET - npp is reset
6120 * _DELETED - npp's node has been deleted
6121 * _TYPE_MISMATCH - npp's node is not a property
6122 * _NOT_FOUND - property has no values
6123 * _TRUNCATED - property has >1 values (first is written into out)
6124 * _SUCCESS - property has 1 value (which is written into out)
6125 * _PERMISSION_DENIED - no authorization to read property value(s)
6126 *
6127 * We shorten *sz_out to not include anything after the final '\0'.
6128 */
6129 int
rc_node_get_property_value(rc_node_ptr_t * npp,struct rep_protocol_value_response * out,size_t * sz_out)6130 rc_node_get_property_value(rc_node_ptr_t *npp,
6131 struct rep_protocol_value_response *out, size_t *sz_out)
6132 {
6133 rc_node_t *np;
6134 size_t w;
6135 int ret;
6136
6137 assert(*sz_out == sizeof (*out));
6138
6139 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6140 ret = rc_node_property_may_read(np);
6141 rc_node_rele(np);
6142
6143 if (ret != REP_PROTOCOL_SUCCESS)
6144 return (ret);
6145
6146 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6147
6148 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6149 (void) pthread_mutex_unlock(&np->rn_lock);
6150 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6151 }
6152
6153 if (np->rn_values_size == 0) {
6154 (void) pthread_mutex_unlock(&np->rn_lock);
6155 return (REP_PROTOCOL_FAIL_NOT_FOUND);
6156 }
6157 out->rpr_type = np->rn_valtype;
6158 w = strlcpy(out->rpr_value, &np->rn_values[0],
6159 sizeof (out->rpr_value));
6160
6161 if (w >= sizeof (out->rpr_value))
6162 backend_panic("value too large");
6163
6164 *sz_out = offsetof(struct rep_protocol_value_response,
6165 rpr_value[w + 1]);
6166
6167 ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6168 REP_PROTOCOL_SUCCESS;
6169 (void) pthread_mutex_unlock(&np->rn_lock);
6170 return (ret);
6171 }
6172
6173 int
rc_iter_next_value(rc_node_iter_t * iter,struct rep_protocol_value_response * out,size_t * sz_out,int repeat)6174 rc_iter_next_value(rc_node_iter_t *iter,
6175 struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6176 {
6177 rc_node_t *np = iter->rni_parent;
6178 const char *vals;
6179 size_t len;
6180
6181 size_t start;
6182 size_t w;
6183 int ret;
6184
6185 rep_protocol_responseid_t result;
6186
6187 assert(*sz_out == sizeof (*out));
6188
6189 (void) memset(out, '\0', *sz_out);
6190
6191 if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6192 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6193
6194 RC_NODE_CHECK(np);
6195 ret = rc_node_property_may_read(np);
6196
6197 if (ret != REP_PROTOCOL_SUCCESS)
6198 return (ret);
6199
6200 RC_NODE_CHECK_AND_LOCK(np);
6201
6202 vals = np->rn_values;
6203 len = np->rn_values_size;
6204
6205 out->rpr_type = np->rn_valtype;
6206
6207 start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6208
6209 if (len == 0 || start >= len) {
6210 result = REP_PROTOCOL_DONE;
6211 *sz_out -= sizeof (out->rpr_value);
6212 } else {
6213 w = strlcpy(out->rpr_value, &vals[start],
6214 sizeof (out->rpr_value));
6215
6216 if (w >= sizeof (out->rpr_value))
6217 backend_panic("value too large");
6218
6219 *sz_out = offsetof(struct rep_protocol_value_response,
6220 rpr_value[w + 1]);
6221
6222 /*
6223 * update the offsets if we're not repeating
6224 */
6225 if (!repeat) {
6226 iter->rni_last_offset = iter->rni_offset;
6227 iter->rni_offset += (w + 1);
6228 }
6229
6230 result = REP_PROTOCOL_SUCCESS;
6231 }
6232
6233 (void) pthread_mutex_unlock(&np->rn_lock);
6234 return (result);
6235 }
6236
6237 /*
6238 * Entry point for ITER_START from client.c. Validate the arguments & call
6239 * rc_iter_create().
6240 *
6241 * Fails with
6242 * _NOT_SET
6243 * _DELETED
6244 * _TYPE_MISMATCH - np cannot carry type children
6245 * _BAD_REQUEST - flags is invalid
6246 * pattern is invalid
6247 * _NO_RESOURCES
6248 * _INVALID_TYPE
6249 * _TYPE_MISMATCH - *npp cannot have children of type
6250 * _BACKEND_ACCESS
6251 */
6252 int
rc_node_setup_iter(rc_node_ptr_t * npp,rc_node_iter_t ** iterp,uint32_t type,uint32_t flags,const char * pattern)6253 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6254 uint32_t type, uint32_t flags, const char *pattern)
6255 {
6256 rc_node_t *np;
6257 rc_iter_filter_func *f = NULL;
6258 int rc;
6259
6260 RC_NODE_PTR_GET_CHECK(np, npp);
6261
6262 if (pattern != NULL && pattern[0] == '\0')
6263 pattern = NULL;
6264
6265 if (type == REP_PROTOCOL_ENTITY_VALUE) {
6266 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6267 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6268 if (flags != RP_ITER_START_ALL || pattern != NULL)
6269 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6270
6271 rc = rc_node_setup_value_iter(npp, iterp);
6272 assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6273 return (rc);
6274 }
6275
6276 if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6277 REP_PROTOCOL_SUCCESS)
6278 return (rc);
6279
6280 if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6281 (pattern == NULL))
6282 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6283
6284 /* Composition only works for instances & snapshots. */
6285 if ((flags & RP_ITER_START_COMPOSED) &&
6286 (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6287 np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6288 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6289
6290 if (pattern != NULL) {
6291 if ((rc = rc_check_type_name(type, pattern)) !=
6292 REP_PROTOCOL_SUCCESS)
6293 return (rc);
6294 pattern = strdup(pattern);
6295 if (pattern == NULL)
6296 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6297 }
6298
6299 switch (flags & RP_ITER_START_FILT_MASK) {
6300 case RP_ITER_START_ALL:
6301 f = NULL;
6302 break;
6303 case RP_ITER_START_EXACT:
6304 f = rc_iter_filter_name;
6305 break;
6306 case RP_ITER_START_PGTYPE:
6307 if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6308 free((void *)pattern);
6309 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6310 }
6311 f = rc_iter_filter_type;
6312 break;
6313 default:
6314 free((void *)pattern);
6315 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6316 }
6317
6318 rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6319 flags & RP_ITER_START_COMPOSED);
6320 if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6321 free((void *)pattern);
6322
6323 return (rc);
6324 }
6325
6326 /*
6327 * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6328 * the filter.
6329 * For composed iterators, then check to see if there's an overlapping entity
6330 * (see embedded comments). If we reach the end of the list, start over at
6331 * the next level.
6332 *
6333 * Returns
6334 * _BAD_REQUEST - iter walks values
6335 * _TYPE_MISMATCH - iter does not walk type entities
6336 * _DELETED - parent was deleted
6337 * _NO_RESOURCES
6338 * _INVALID_TYPE - type is invalid
6339 * _DONE
6340 * _SUCCESS
6341 *
6342 * For composed property group iterators, can also return
6343 * _TYPE_MISMATCH - parent cannot have type children
6344 */
6345 int
rc_iter_next(rc_node_iter_t * iter,rc_node_ptr_t * out,uint32_t type)6346 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6347 {
6348 rc_node_t *np = iter->rni_parent;
6349 rc_node_t *res;
6350 int rc;
6351
6352 if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6353 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6354
6355 if (iter->rni_iter == NULL) {
6356 rc_node_clear(out, 0);
6357 return (REP_PROTOCOL_DONE);
6358 }
6359
6360 if (iter->rni_type != type) {
6361 rc_node_clear(out, 0);
6362 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6363 }
6364
6365 (void) pthread_mutex_lock(&np->rn_lock); /* held by _iter_create() */
6366
6367 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6368 (void) pthread_mutex_unlock(&np->rn_lock);
6369 rc_node_clear(out, 1);
6370 return (REP_PROTOCOL_FAIL_DELETED);
6371 }
6372
6373 if (iter->rni_clevel >= 0) {
6374 /* Composed iterator. Iterate over appropriate level. */
6375 (void) pthread_mutex_unlock(&np->rn_lock);
6376 np = np->rn_cchain[iter->rni_clevel];
6377 /*
6378 * If iter->rni_parent is an instance or a snapshot, np must
6379 * be valid since iter holds iter->rni_parent & possible
6380 * levels (service, instance, snaplevel) cannot be destroyed
6381 * while rni_parent is held. If iter->rni_parent is
6382 * a composed property group then rc_node_setup_cpg() put
6383 * a hold on np.
6384 */
6385
6386 (void) pthread_mutex_lock(&np->rn_lock);
6387
6388 if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6389 (void) pthread_mutex_unlock(&np->rn_lock);
6390 rc_node_clear(out, 1);
6391 return (REP_PROTOCOL_FAIL_DELETED);
6392 }
6393 }
6394
6395 assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6396
6397 for (;;) {
6398 res = uu_list_walk_next(iter->rni_iter);
6399 if (res == NULL) {
6400 rc_node_t *parent = iter->rni_parent;
6401
6402 #if COMPOSITION_DEPTH == 2
6403 if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6404 /* release walker and lock */
6405 rc_iter_end(iter);
6406 break;
6407 }
6408
6409 /* Stop walking current level. */
6410 uu_list_walk_end(iter->rni_iter);
6411 iter->rni_iter = NULL;
6412 (void) pthread_mutex_unlock(&np->rn_lock);
6413 rc_node_rele_other(iter->rni_iter_node);
6414 iter->rni_iter_node = NULL;
6415
6416 /* Start walking next level. */
6417 ++iter->rni_clevel;
6418 np = parent->rn_cchain[iter->rni_clevel];
6419 assert(np != NULL);
6420 #else
6421 #error This code must be updated.
6422 #endif
6423
6424 (void) pthread_mutex_lock(&np->rn_lock);
6425
6426 rc = rc_node_fill_children(np, iter->rni_type);
6427
6428 if (rc == REP_PROTOCOL_SUCCESS) {
6429 iter->rni_iter =
6430 uu_list_walk_start(np->rn_children,
6431 UU_WALK_ROBUST);
6432
6433 if (iter->rni_iter == NULL)
6434 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6435 else {
6436 iter->rni_iter_node = np;
6437 rc_node_hold_other(np);
6438 }
6439 }
6440
6441 if (rc != REP_PROTOCOL_SUCCESS) {
6442 (void) pthread_mutex_unlock(&np->rn_lock);
6443 rc_node_clear(out, 0);
6444 return (rc);
6445 }
6446
6447 continue;
6448 }
6449
6450 if (res->rn_id.rl_type != type ||
6451 !iter->rni_filter(res, iter->rni_filter_arg))
6452 continue;
6453
6454 /*
6455 * If we're composed and not at the top level, check to see if
6456 * there's an entity at a higher level with the same name. If
6457 * so, skip this one.
6458 */
6459 if (iter->rni_clevel > 0) {
6460 rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6461 rc_node_t *pg;
6462
6463 #if COMPOSITION_DEPTH == 2
6464 assert(iter->rni_clevel == 1);
6465
6466 (void) pthread_mutex_unlock(&np->rn_lock);
6467 (void) pthread_mutex_lock(&ent->rn_lock);
6468 rc = rc_node_find_named_child(ent, res->rn_name, type,
6469 &pg);
6470 if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6471 rc_node_rele(pg);
6472 (void) pthread_mutex_unlock(&ent->rn_lock);
6473 if (rc != REP_PROTOCOL_SUCCESS) {
6474 rc_node_clear(out, 0);
6475 return (rc);
6476 }
6477 (void) pthread_mutex_lock(&np->rn_lock);
6478
6479 /* Make sure np isn't being deleted all of a sudden. */
6480 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6481 (void) pthread_mutex_unlock(&np->rn_lock);
6482 rc_node_clear(out, 1);
6483 return (REP_PROTOCOL_FAIL_DELETED);
6484 }
6485
6486 if (pg != NULL)
6487 /* Keep going. */
6488 continue;
6489 #else
6490 #error This code must be updated.
6491 #endif
6492 }
6493
6494 /*
6495 * If we're composed, iterating over property groups, and not
6496 * at the bottom level, check to see if there's a pg at lower
6497 * level with the same name. If so, return a cpg.
6498 */
6499 if (iter->rni_clevel >= 0 &&
6500 type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6501 iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6502 #if COMPOSITION_DEPTH == 2
6503 rc_node_t *pg;
6504 rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6505
6506 rc_node_hold(res); /* While we drop np->rn_lock */
6507
6508 (void) pthread_mutex_unlock(&np->rn_lock);
6509 (void) pthread_mutex_lock(&ent->rn_lock);
6510 rc = rc_node_find_named_child(ent, res->rn_name, type,
6511 &pg);
6512 /* holds pg if not NULL */
6513 (void) pthread_mutex_unlock(&ent->rn_lock);
6514 if (rc != REP_PROTOCOL_SUCCESS) {
6515 rc_node_rele(res);
6516 rc_node_clear(out, 0);
6517 return (rc);
6518 }
6519
6520 (void) pthread_mutex_lock(&np->rn_lock);
6521 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6522 (void) pthread_mutex_unlock(&np->rn_lock);
6523 rc_node_rele(res);
6524 if (pg != NULL)
6525 rc_node_rele(pg);
6526 rc_node_clear(out, 1);
6527 return (REP_PROTOCOL_FAIL_DELETED);
6528 }
6529
6530 if (pg == NULL) {
6531 (void) pthread_mutex_unlock(&np->rn_lock);
6532 rc_node_rele(res);
6533 (void) pthread_mutex_lock(&np->rn_lock);
6534 if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6535 (void) pthread_mutex_unlock(&np->
6536 rn_lock);
6537 rc_node_clear(out, 1);
6538 return (REP_PROTOCOL_FAIL_DELETED);
6539 }
6540 } else {
6541 rc_node_t *cpg;
6542
6543 /* Keep res held for rc_node_setup_cpg(). */
6544
6545 cpg = rc_node_alloc();
6546 if (cpg == NULL) {
6547 (void) pthread_mutex_unlock(
6548 &np->rn_lock);
6549 rc_node_rele(res);
6550 rc_node_rele(pg);
6551 rc_node_clear(out, 0);
6552 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6553 }
6554
6555 switch (rc_node_setup_cpg(cpg, res, pg)) {
6556 case REP_PROTOCOL_SUCCESS:
6557 res = cpg;
6558 break;
6559
6560 case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6561 /* Nevermind. */
6562 (void) pthread_mutex_unlock(&np->
6563 rn_lock);
6564 rc_node_destroy(cpg);
6565 rc_node_rele(pg);
6566 rc_node_rele(res);
6567 (void) pthread_mutex_lock(&np->
6568 rn_lock);
6569 if (!rc_node_wait_flag(np,
6570 RC_NODE_DYING)) {
6571 (void) pthread_mutex_unlock(&
6572 np->rn_lock);
6573 rc_node_clear(out, 1);
6574 return
6575 (REP_PROTOCOL_FAIL_DELETED);
6576 }
6577 break;
6578
6579 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6580 rc_node_destroy(cpg);
6581 (void) pthread_mutex_unlock(
6582 &np->rn_lock);
6583 rc_node_rele(res);
6584 rc_node_rele(pg);
6585 rc_node_clear(out, 0);
6586 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6587
6588 default:
6589 assert(0);
6590 abort();
6591 }
6592 }
6593 #else
6594 #error This code must be updated.
6595 #endif
6596 }
6597
6598 rc_node_hold(res);
6599 (void) pthread_mutex_unlock(&np->rn_lock);
6600 break;
6601 }
6602 rc_node_assign(out, res);
6603
6604 if (res == NULL)
6605 return (REP_PROTOCOL_DONE);
6606 rc_node_rele(res);
6607 return (REP_PROTOCOL_SUCCESS);
6608 }
6609
6610 void
rc_iter_destroy(rc_node_iter_t ** nipp)6611 rc_iter_destroy(rc_node_iter_t **nipp)
6612 {
6613 rc_node_iter_t *nip = *nipp;
6614 rc_node_t *np;
6615
6616 if (nip == NULL)
6617 return; /* already freed */
6618
6619 np = nip->rni_parent;
6620
6621 if (nip->rni_filter_arg != NULL)
6622 free(nip->rni_filter_arg);
6623 nip->rni_filter_arg = NULL;
6624
6625 if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6626 nip->rni_iter != NULL) {
6627 if (nip->rni_clevel < 0)
6628 (void) pthread_mutex_lock(&np->rn_lock);
6629 else
6630 (void) pthread_mutex_lock(
6631 &np->rn_cchain[nip->rni_clevel]->rn_lock);
6632 rc_iter_end(nip); /* release walker and lock */
6633 }
6634 nip->rni_parent = NULL;
6635
6636 uu_free(nip);
6637 *nipp = NULL;
6638 }
6639
6640 int
rc_node_setup_tx(rc_node_ptr_t * npp,rc_node_ptr_t * txp)6641 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6642 {
6643 rc_node_t *np;
6644 permcheck_t *pcp;
6645 int ret;
6646 perm_status_t granted;
6647 rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6648 char *auth_string = NULL;
6649
6650 RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6651
6652 if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6653 rc_node_rele(np);
6654 np = np->rn_cchain[0];
6655 RC_NODE_CHECK_AND_HOLD(np);
6656 }
6657
6658 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6659 rc_node_rele(np);
6660 return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6661 }
6662
6663 if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6664 rc_node_rele(np);
6665 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6666 }
6667
6668 #ifdef NATIVE_BUILD
6669 if (client_is_privileged())
6670 goto skip_checks;
6671 rc_node_rele(np);
6672 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6673 #else
6674 if (is_main_repository == 0)
6675 goto skip_checks;
6676
6677 /* permission check */
6678 pcp = pc_create();
6679 if (pcp == NULL) {
6680 rc_node_rele(np);
6681 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6682 }
6683
6684 if (np->rn_id.rl_ids[ID_INSTANCE] != 0 && /* instance pg */
6685 ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6686 strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6687 (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6688 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6689 rc_node_t *instn;
6690
6691 /* solaris.smf.modify can be used */
6692 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6693 if (ret != REP_PROTOCOL_SUCCESS) {
6694 pc_free(pcp);
6695 rc_node_rele(np);
6696 return (ret);
6697 }
6698
6699 /* solaris.smf.manage can be used. */
6700 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6701
6702 if (ret != REP_PROTOCOL_SUCCESS) {
6703 pc_free(pcp);
6704 rc_node_rele(np);
6705 return (ret);
6706 }
6707
6708 /* general/action_authorization values can be used. */
6709 ret = rc_node_parent(np, &instn);
6710 if (ret != REP_PROTOCOL_SUCCESS) {
6711 assert(ret == REP_PROTOCOL_FAIL_DELETED);
6712 rc_node_rele(np);
6713 pc_free(pcp);
6714 return (REP_PROTOCOL_FAIL_DELETED);
6715 }
6716
6717 assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6718
6719 ret = perm_add_inst_action_auth(pcp, instn);
6720 rc_node_rele(instn);
6721 switch (ret) {
6722 case REP_PROTOCOL_SUCCESS:
6723 break;
6724
6725 case REP_PROTOCOL_FAIL_DELETED:
6726 case REP_PROTOCOL_FAIL_NO_RESOURCES:
6727 rc_node_rele(np);
6728 pc_free(pcp);
6729 return (ret);
6730
6731 default:
6732 bad_error("perm_add_inst_action_auth", ret);
6733 }
6734
6735 if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6736 authorized = RC_AUTH_PASSED; /* No check on commit. */
6737 } else {
6738 ret = perm_add_enabling(pcp, AUTH_MODIFY);
6739
6740 if (ret == REP_PROTOCOL_SUCCESS) {
6741 /* propertygroup-type-specific authorization */
6742 /* no locking because rn_type won't change anyway */
6743 const char * const auth =
6744 perm_auth_for_pgtype(np->rn_type);
6745
6746 if (auth != NULL)
6747 ret = perm_add_enabling(pcp, auth);
6748 }
6749
6750 if (ret == REP_PROTOCOL_SUCCESS)
6751 /* propertygroup/transaction-type-specific auths */
6752 ret =
6753 perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6754
6755 if (ret == REP_PROTOCOL_SUCCESS)
6756 ret =
6757 perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6758
6759 /* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6760 if (ret == REP_PROTOCOL_SUCCESS &&
6761 strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6762 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6763 ret = perm_add_enabling(pcp, AUTH_MANAGE);
6764
6765 if (ret != REP_PROTOCOL_SUCCESS) {
6766 pc_free(pcp);
6767 rc_node_rele(np);
6768 return (ret);
6769 }
6770 }
6771
6772 granted = perm_granted(pcp);
6773 ret = map_granted_status(granted, pcp, &auth_string);
6774 pc_free(pcp);
6775
6776 if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6777 (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6778 free(auth_string);
6779 rc_node_rele(np);
6780 return (ret);
6781 }
6782
6783 if (granted == PERM_DENIED) {
6784 /*
6785 * If we get here, the authorization failed.
6786 * Unfortunately, we don't have enough information at this
6787 * point to generate the security audit events. We'll only
6788 * get that information when the client tries to commit the
6789 * event. Thus, we'll remember the failed authorization,
6790 * so that we can generate the audit events later.
6791 */
6792 authorized = RC_AUTH_FAILED;
6793 }
6794 #endif /* NATIVE_BUILD */
6795
6796 skip_checks:
6797 rc_node_assign(txp, np);
6798 txp->rnp_authorized = authorized;
6799 if (authorized != RC_AUTH_UNKNOWN) {
6800 /* Save the authorization string. */
6801 if (txp->rnp_auth_string != NULL)
6802 free((void *)txp->rnp_auth_string);
6803 txp->rnp_auth_string = auth_string;
6804 auth_string = NULL; /* Don't free until done with txp. */
6805 }
6806
6807 rc_node_rele(np);
6808 if (auth_string != NULL)
6809 free(auth_string);
6810 return (REP_PROTOCOL_SUCCESS);
6811 }
6812
6813 /*
6814 * Return 1 if the given transaction commands only modify the values of
6815 * properties other than "modify_authorization". Return -1 if any of the
6816 * commands are invalid, and 0 otherwise.
6817 */
6818 static int
tx_allow_value(const void * cmds_arg,size_t cmds_sz,rc_node_t * pg)6819 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6820 {
6821 const struct rep_protocol_transaction_cmd *cmds;
6822 uintptr_t loc;
6823 uint32_t sz;
6824 rc_node_t *prop;
6825 boolean_t ok;
6826
6827 assert(!MUTEX_HELD(&pg->rn_lock));
6828
6829 loc = (uintptr_t)cmds_arg;
6830
6831 while (cmds_sz > 0) {
6832 cmds = (struct rep_protocol_transaction_cmd *)loc;
6833
6834 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6835 return (-1);
6836
6837 sz = cmds->rptc_size;
6838 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6839 return (-1);
6840
6841 sz = TX_SIZE(sz);
6842 if (sz > cmds_sz)
6843 return (-1);
6844
6845 switch (cmds[0].rptc_action) {
6846 case REP_PROTOCOL_TX_ENTRY_CLEAR:
6847 break;
6848
6849 case REP_PROTOCOL_TX_ENTRY_REPLACE:
6850 /* Check type */
6851 (void) pthread_mutex_lock(&pg->rn_lock);
6852 ok = B_FALSE;
6853 if (rc_node_find_named_child(pg,
6854 (const char *)cmds[0].rptc_data,
6855 REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6856 REP_PROTOCOL_SUCCESS) {
6857 if (prop != NULL) {
6858 ok = prop->rn_valtype ==
6859 cmds[0].rptc_type;
6860 /*
6861 * rc_node_find_named_child()
6862 * places a hold on prop which we
6863 * do not need to hang on to.
6864 */
6865 rc_node_rele(prop);
6866 }
6867 }
6868 (void) pthread_mutex_unlock(&pg->rn_lock);
6869 if (ok)
6870 break;
6871 return (0);
6872
6873 default:
6874 return (0);
6875 }
6876
6877 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6878 == 0)
6879 return (0);
6880
6881 loc += sz;
6882 cmds_sz -= sz;
6883 }
6884
6885 return (1);
6886 }
6887
6888 /*
6889 * Return 1 if any of the given transaction commands affect
6890 * "action_authorization". Return -1 if any of the commands are invalid and
6891 * 0 in all other cases.
6892 */
6893 static int
tx_modifies_action(const void * cmds_arg,size_t cmds_sz)6894 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6895 {
6896 const struct rep_protocol_transaction_cmd *cmds;
6897 uintptr_t loc;
6898 uint32_t sz;
6899
6900 loc = (uintptr_t)cmds_arg;
6901
6902 while (cmds_sz > 0) {
6903 cmds = (struct rep_protocol_transaction_cmd *)loc;
6904
6905 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6906 return (-1);
6907
6908 sz = cmds->rptc_size;
6909 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6910 return (-1);
6911
6912 sz = TX_SIZE(sz);
6913 if (sz > cmds_sz)
6914 return (-1);
6915
6916 if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6917 == 0)
6918 return (1);
6919
6920 loc += sz;
6921 cmds_sz -= sz;
6922 }
6923
6924 return (0);
6925 }
6926
6927 /*
6928 * Returns 1 if the transaction commands only modify properties named
6929 * 'enabled'.
6930 */
6931 static int
tx_only_enabled(const void * cmds_arg,size_t cmds_sz)6932 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6933 {
6934 const struct rep_protocol_transaction_cmd *cmd;
6935 uintptr_t loc;
6936 uint32_t sz;
6937
6938 loc = (uintptr_t)cmds_arg;
6939
6940 while (cmds_sz > 0) {
6941 cmd = (struct rep_protocol_transaction_cmd *)loc;
6942
6943 if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6944 return (-1);
6945
6946 sz = cmd->rptc_size;
6947 if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6948 return (-1);
6949
6950 sz = TX_SIZE(sz);
6951 if (sz > cmds_sz)
6952 return (-1);
6953
6954 if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6955 != 0)
6956 return (0);
6957
6958 loc += sz;
6959 cmds_sz -= sz;
6960 }
6961
6962 return (1);
6963 }
6964
6965 int
rc_tx_commit(rc_node_ptr_t * txp,const void * cmds,size_t cmds_sz)6966 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6967 {
6968 rc_node_t *np = txp->rnp_node;
6969 rc_node_t *pp;
6970 rc_node_t *nnp;
6971 rc_node_pg_notify_t *pnp;
6972 int rc;
6973 permcheck_t *pcp;
6974 perm_status_t granted;
6975 int normal;
6976 char *pg_fmri = NULL;
6977 char *auth_string = NULL;
6978 int auth_status = ADT_SUCCESS;
6979 int auth_ret_value = ADT_SUCCESS;
6980 size_t sz_out;
6981 int tx_flag = 1;
6982 tx_commit_data_t *tx_data = NULL;
6983
6984 RC_NODE_CHECK(np);
6985
6986 if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6987 (txp->rnp_auth_string != NULL)) {
6988 auth_string = strdup(txp->rnp_auth_string);
6989 if (auth_string == NULL)
6990 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6991 }
6992
6993 if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
6994 is_main_repository) {
6995 #ifdef NATIVE_BUILD
6996 if (!client_is_privileged()) {
6997 return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6998 }
6999 #else
7000 /* permission check: depends on contents of transaction */
7001 pcp = pc_create();
7002 if (pcp == NULL)
7003 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7004
7005 /* If normal is cleared, we won't do the normal checks. */
7006 normal = 1;
7007 rc = REP_PROTOCOL_SUCCESS;
7008
7009 if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
7010 strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
7011 /* Touching general[framework]/action_authorization? */
7012 rc = tx_modifies_action(cmds, cmds_sz);
7013 if (rc == -1) {
7014 pc_free(pcp);
7015 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7016 }
7017
7018 if (rc) {
7019 /*
7020 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7021 * can be used.
7022 */
7023 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7024
7025 if (rc == REP_PROTOCOL_SUCCESS)
7026 rc = perm_add_enabling(pcp,
7027 AUTH_MANAGE);
7028
7029 normal = 0;
7030 } else {
7031 rc = REP_PROTOCOL_SUCCESS;
7032 }
7033 } else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7034 strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7035 strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7036 rc_node_t *instn;
7037
7038 rc = tx_only_enabled(cmds, cmds_sz);
7039 if (rc == -1) {
7040 pc_free(pcp);
7041 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7042 }
7043
7044 if (rc) {
7045 rc = rc_node_parent(np, &instn);
7046 if (rc != REP_PROTOCOL_SUCCESS) {
7047 assert(rc == REP_PROTOCOL_FAIL_DELETED);
7048 pc_free(pcp);
7049 return (rc);
7050 }
7051
7052 assert(instn->rn_id.rl_type ==
7053 REP_PROTOCOL_ENTITY_INSTANCE);
7054
7055 rc = perm_add_inst_action_auth(pcp, instn);
7056 rc_node_rele(instn);
7057 switch (rc) {
7058 case REP_PROTOCOL_SUCCESS:
7059 break;
7060
7061 case REP_PROTOCOL_FAIL_DELETED:
7062 case REP_PROTOCOL_FAIL_NO_RESOURCES:
7063 pc_free(pcp);
7064 return (rc);
7065
7066 default:
7067 bad_error("perm_add_inst_action_auth",
7068 rc);
7069 }
7070 } else {
7071 rc = REP_PROTOCOL_SUCCESS;
7072 }
7073 }
7074
7075 if (rc == REP_PROTOCOL_SUCCESS && normal) {
7076 rc = perm_add_enabling(pcp, AUTH_MODIFY);
7077
7078 if (rc == REP_PROTOCOL_SUCCESS) {
7079 /* Add pgtype-specific authorization. */
7080 const char * const auth =
7081 perm_auth_for_pgtype(np->rn_type);
7082
7083 if (auth != NULL)
7084 rc = perm_add_enabling(pcp, auth);
7085 }
7086
7087 /* Add pg-specific modify_authorization auths. */
7088 if (rc == REP_PROTOCOL_SUCCESS)
7089 rc = perm_add_enabling_values(pcp, np,
7090 AUTH_PROP_MODIFY);
7091
7092 /* If value_authorization values are ok, add them. */
7093 if (rc == REP_PROTOCOL_SUCCESS) {
7094 rc = tx_allow_value(cmds, cmds_sz, np);
7095 if (rc == -1)
7096 rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7097 else if (rc)
7098 rc = perm_add_enabling_values(pcp, np,
7099 AUTH_PROP_VALUE);
7100 }
7101 }
7102
7103 if (rc == REP_PROTOCOL_SUCCESS) {
7104 granted = perm_granted(pcp);
7105 rc = map_granted_status(granted, pcp, &auth_string);
7106 if ((granted == PERM_DENIED) && auth_string) {
7107 /*
7108 * _PERMISSION_DENIED should not cause us
7109 * to exit at this point, because we still
7110 * want to generate an audit event.
7111 */
7112 rc = REP_PROTOCOL_SUCCESS;
7113 }
7114 }
7115
7116 pc_free(pcp);
7117
7118 if (rc != REP_PROTOCOL_SUCCESS)
7119 goto cleanout;
7120
7121 if (granted == PERM_DENIED) {
7122 auth_status = ADT_FAILURE;
7123 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7124 tx_flag = 0;
7125 }
7126 #endif /* NATIVE_BUILD */
7127 } else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7128 auth_status = ADT_FAILURE;
7129 auth_ret_value = ADT_FAIL_VALUE_AUTH;
7130 tx_flag = 0;
7131 }
7132
7133 pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7134 if (pg_fmri == NULL) {
7135 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7136 goto cleanout;
7137 }
7138 if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7139 REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7140 goto cleanout;
7141 }
7142
7143 /*
7144 * Parse the transaction commands into a useful form.
7145 */
7146 if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7147 REP_PROTOCOL_SUCCESS) {
7148 goto cleanout;
7149 }
7150
7151 if (tx_flag == 0) {
7152 /* Authorization failed. Generate audit events. */
7153 generate_property_events(tx_data, pg_fmri, auth_string,
7154 auth_status, auth_ret_value);
7155 rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7156 goto cleanout;
7157 }
7158
7159 nnp = rc_node_alloc();
7160 if (nnp == NULL) {
7161 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7162 goto cleanout;
7163 }
7164
7165 nnp->rn_id = np->rn_id; /* structure assignment */
7166 nnp->rn_hash = np->rn_hash;
7167 nnp->rn_name = strdup(np->rn_name);
7168 nnp->rn_type = strdup(np->rn_type);
7169 nnp->rn_pgflags = np->rn_pgflags;
7170
7171 nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7172
7173 if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7174 rc_node_destroy(nnp);
7175 rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7176 goto cleanout;
7177 }
7178
7179 (void) pthread_mutex_lock(&np->rn_lock);
7180
7181 /*
7182 * We must have all of the old properties in the cache, or the
7183 * database deletions could cause inconsistencies.
7184 */
7185 if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7186 REP_PROTOCOL_SUCCESS) {
7187 (void) pthread_mutex_unlock(&np->rn_lock);
7188 rc_node_destroy(nnp);
7189 goto cleanout;
7190 }
7191
7192 if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7193 (void) pthread_mutex_unlock(&np->rn_lock);
7194 rc_node_destroy(nnp);
7195 rc = REP_PROTOCOL_FAIL_DELETED;
7196 goto cleanout;
7197 }
7198
7199 if (np->rn_flags & RC_NODE_OLD) {
7200 rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7201 (void) pthread_mutex_unlock(&np->rn_lock);
7202 rc_node_destroy(nnp);
7203 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7204 goto cleanout;
7205 }
7206
7207 pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7208 if (pp == NULL) {
7209 /* our parent is gone, we're going next... */
7210 rc_node_destroy(nnp);
7211 (void) pthread_mutex_lock(&np->rn_lock);
7212 if (np->rn_flags & RC_NODE_OLD) {
7213 (void) pthread_mutex_unlock(&np->rn_lock);
7214 rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7215 goto cleanout;
7216 }
7217 (void) pthread_mutex_unlock(&np->rn_lock);
7218 rc = REP_PROTOCOL_FAIL_DELETED;
7219 goto cleanout;
7220 }
7221 (void) pthread_mutex_unlock(&pp->rn_lock);
7222
7223 /*
7224 * prepare for the transaction
7225 */
7226 (void) pthread_mutex_lock(&np->rn_lock);
7227 if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7228 (void) pthread_mutex_unlock(&np->rn_lock);
7229 (void) pthread_mutex_lock(&pp->rn_lock);
7230 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7231 (void) pthread_mutex_unlock(&pp->rn_lock);
7232 rc_node_destroy(nnp);
7233 rc = REP_PROTOCOL_FAIL_DELETED;
7234 goto cleanout;
7235 }
7236 nnp->rn_gen_id = np->rn_gen_id;
7237 (void) pthread_mutex_unlock(&np->rn_lock);
7238
7239 /* Sets nnp->rn_gen_id on success. */
7240 rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7241
7242 (void) pthread_mutex_lock(&np->rn_lock);
7243 if (rc != REP_PROTOCOL_SUCCESS) {
7244 rc_node_rele_flag(np, RC_NODE_IN_TX);
7245 (void) pthread_mutex_unlock(&np->rn_lock);
7246 (void) pthread_mutex_lock(&pp->rn_lock);
7247 rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7248 (void) pthread_mutex_unlock(&pp->rn_lock);
7249 rc_node_destroy(nnp);
7250 rc_node_clear(txp, 0);
7251 if (rc == REP_PROTOCOL_DONE)
7252 rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7253 goto cleanout;
7254 }
7255
7256 /*
7257 * Notify waiters
7258 */
7259 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7260 while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7261 rc_pg_notify_fire(pnp);
7262 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7263
7264 np->rn_flags |= RC_NODE_OLD;
7265 (void) pthread_mutex_unlock(&np->rn_lock);
7266
7267 rc_notify_remove_node(np);
7268
7269 /*
7270 * replace np with nnp
7271 */
7272 rc_node_relink_child(pp, np, nnp);
7273
7274 /*
7275 * all done -- clear the transaction.
7276 */
7277 rc_node_clear(txp, 0);
7278 generate_property_events(tx_data, pg_fmri, auth_string,
7279 auth_status, auth_ret_value);
7280
7281 rc = REP_PROTOCOL_SUCCESS;
7282
7283 cleanout:
7284 free(auth_string);
7285 free(pg_fmri);
7286 tx_commit_data_free(tx_data);
7287 return (rc);
7288 }
7289
7290 void
rc_pg_notify_init(rc_node_pg_notify_t * pnp)7291 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7292 {
7293 uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7294 pnp->rnpn_pg = NULL;
7295 pnp->rnpn_fd = -1;
7296 }
7297
7298 int
rc_pg_notify_setup(rc_node_pg_notify_t * pnp,rc_node_ptr_t * npp,int fd)7299 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7300 {
7301 rc_node_t *np;
7302
7303 RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7304
7305 if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7306 (void) pthread_mutex_unlock(&np->rn_lock);
7307 return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7308 }
7309
7310 /*
7311 * wait for any transaction in progress to complete
7312 */
7313 if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7314 (void) pthread_mutex_unlock(&np->rn_lock);
7315 return (REP_PROTOCOL_FAIL_DELETED);
7316 }
7317
7318 if (np->rn_flags & RC_NODE_OLD) {
7319 (void) pthread_mutex_unlock(&np->rn_lock);
7320 return (REP_PROTOCOL_FAIL_NOT_LATEST);
7321 }
7322
7323 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7324 rc_pg_notify_fire(pnp);
7325 pnp->rnpn_pg = np;
7326 pnp->rnpn_fd = fd;
7327 (void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7328 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7329
7330 (void) pthread_mutex_unlock(&np->rn_lock);
7331 return (REP_PROTOCOL_SUCCESS);
7332 }
7333
7334 void
rc_pg_notify_fini(rc_node_pg_notify_t * pnp)7335 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7336 {
7337 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7338 rc_pg_notify_fire(pnp);
7339 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7340
7341 uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7342 }
7343
7344 void
rc_notify_info_init(rc_notify_info_t * rnip)7345 rc_notify_info_init(rc_notify_info_t *rnip)
7346 {
7347 int i;
7348
7349 uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7350 uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7351 rc_notify_pool);
7352
7353 rnip->rni_notify.rcn_node = NULL;
7354 rnip->rni_notify.rcn_info = rnip;
7355
7356 bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7357 bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7358
7359 (void) pthread_cond_init(&rnip->rni_cv, NULL);
7360
7361 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7362 rnip->rni_namelist[i] = NULL;
7363 rnip->rni_typelist[i] = NULL;
7364 }
7365 }
7366
7367 static void
rc_notify_info_insert_locked(rc_notify_info_t * rnip)7368 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7369 {
7370 assert(MUTEX_HELD(&rc_pg_notify_lock));
7371
7372 assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7373
7374 rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7375 (void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7376 (void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7377 }
7378
7379 static void
rc_notify_info_remove_locked(rc_notify_info_t * rnip)7380 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7381 {
7382 rc_notify_t *me = &rnip->rni_notify;
7383 rc_notify_t *np;
7384
7385 assert(MUTEX_HELD(&rc_pg_notify_lock));
7386
7387 assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7388
7389 assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7390 rnip->rni_flags |= RC_NOTIFY_DRAIN;
7391 (void) pthread_cond_broadcast(&rnip->rni_cv);
7392
7393 (void) uu_list_remove(rc_notify_info_list, rnip);
7394
7395 /*
7396 * clean up any notifications at the beginning of the list
7397 */
7398 if (uu_list_first(rc_notify_list) == me) {
7399 /*
7400 * We can't call rc_notify_remove_locked() unless
7401 * rc_notify_in_use is 0.
7402 */
7403 while (rc_notify_in_use) {
7404 (void) pthread_cond_wait(&rc_pg_notify_cv,
7405 &rc_pg_notify_lock);
7406 }
7407 while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7408 np->rcn_info == NULL)
7409 rc_notify_remove_locked(np);
7410 }
7411 (void) uu_list_remove(rc_notify_list, me);
7412
7413 while (rnip->rni_waiters) {
7414 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7415 (void) pthread_cond_broadcast(&rnip->rni_cv);
7416 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7417 }
7418
7419 rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7420 }
7421
7422 static int
rc_notify_info_add_watch(rc_notify_info_t * rnip,const char ** arr,const char * name)7423 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7424 const char *name)
7425 {
7426 int i;
7427 int rc;
7428 char *f;
7429
7430 rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7431 if (rc != REP_PROTOCOL_SUCCESS)
7432 return (rc);
7433
7434 f = strdup(name);
7435 if (f == NULL)
7436 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7437
7438 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7439
7440 while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7441 (void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7442
7443 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7444 if (arr[i] == NULL)
7445 break;
7446
7447 /*
7448 * Don't add name if it's already being tracked.
7449 */
7450 if (strcmp(arr[i], f) == 0) {
7451 free(f);
7452 goto out;
7453 }
7454 }
7455
7456 if (i == RC_NOTIFY_MAX_NAMES) {
7457 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7458 free(f);
7459 return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7460 }
7461
7462 arr[i] = f;
7463
7464 out:
7465 if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7466 rc_notify_info_insert_locked(rnip);
7467
7468 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7469 return (REP_PROTOCOL_SUCCESS);
7470 }
7471
7472 int
rc_notify_info_add_name(rc_notify_info_t * rnip,const char * name)7473 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7474 {
7475 return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7476 }
7477
7478 int
rc_notify_info_add_type(rc_notify_info_t * rnip,const char * type)7479 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7480 {
7481 return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7482 }
7483
7484 /*
7485 * Wait for and report an event of interest to rnip, a notification client
7486 */
7487 int
rc_notify_info_wait(rc_notify_info_t * rnip,rc_node_ptr_t * out,char * outp,size_t sz)7488 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7489 char *outp, size_t sz)
7490 {
7491 rc_notify_t *np;
7492 rc_notify_t *me = &rnip->rni_notify;
7493 rc_node_t *nnp;
7494 rc_notify_delete_t *ndp;
7495
7496 int am_first_info;
7497
7498 if (sz > 0)
7499 outp[0] = 0;
7500
7501 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7502
7503 while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7504 RC_NOTIFY_ACTIVE) {
7505 /*
7506 * If I'm first on the notify list, it is my job to
7507 * clean up any notifications I pass by. I can't do that
7508 * if someone is blocking the list from removals, so I
7509 * have to wait until they have all drained.
7510 */
7511 am_first_info = (uu_list_first(rc_notify_list) == me);
7512 if (am_first_info && rc_notify_in_use) {
7513 rnip->rni_waiters++;
7514 (void) pthread_cond_wait(&rc_pg_notify_cv,
7515 &rc_pg_notify_lock);
7516 rnip->rni_waiters--;
7517 continue;
7518 }
7519
7520 /*
7521 * Search the list for a node of interest.
7522 */
7523 np = uu_list_next(rc_notify_list, me);
7524 while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7525 rc_notify_t *next = uu_list_next(rc_notify_list, np);
7526
7527 if (am_first_info) {
7528 if (np->rcn_info) {
7529 /*
7530 * Passing another client -- stop
7531 * cleaning up notifications
7532 */
7533 am_first_info = 0;
7534 } else {
7535 rc_notify_remove_locked(np);
7536 }
7537 }
7538 np = next;
7539 }
7540
7541 /*
7542 * Nothing of interest -- wait for notification
7543 */
7544 if (np == NULL) {
7545 rnip->rni_waiters++;
7546 (void) pthread_cond_wait(&rnip->rni_cv,
7547 &rc_pg_notify_lock);
7548 rnip->rni_waiters--;
7549 continue;
7550 }
7551
7552 /*
7553 * found something to report -- move myself after the
7554 * notification and process it.
7555 */
7556 (void) uu_list_remove(rc_notify_list, me);
7557 (void) uu_list_insert_after(rc_notify_list, np, me);
7558
7559 if ((ndp = np->rcn_delete) != NULL) {
7560 (void) strlcpy(outp, ndp->rnd_fmri, sz);
7561 if (am_first_info)
7562 rc_notify_remove_locked(np);
7563 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7564 rc_node_clear(out, 0);
7565 return (REP_PROTOCOL_SUCCESS);
7566 }
7567
7568 nnp = np->rcn_node;
7569 assert(nnp != NULL);
7570
7571 /*
7572 * We can't bump nnp's reference count without grabbing its
7573 * lock, and rc_pg_notify_lock is a leaf lock. So we
7574 * temporarily block all removals to keep nnp from
7575 * disappearing.
7576 */
7577 rc_notify_in_use++;
7578 assert(rc_notify_in_use > 0);
7579 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7580
7581 rc_node_assign(out, nnp);
7582
7583 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7584 assert(rc_notify_in_use > 0);
7585 rc_notify_in_use--;
7586
7587 if (am_first_info) {
7588 /*
7589 * While we had the lock dropped, another thread
7590 * may have also incremented rc_notify_in_use. We
7591 * need to make sure that we're back to 0 before
7592 * removing the node.
7593 */
7594 while (rc_notify_in_use) {
7595 (void) pthread_cond_wait(&rc_pg_notify_cv,
7596 &rc_pg_notify_lock);
7597 }
7598 rc_notify_remove_locked(np);
7599 }
7600 if (rc_notify_in_use == 0)
7601 (void) pthread_cond_broadcast(&rc_pg_notify_cv);
7602 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7603
7604 return (REP_PROTOCOL_SUCCESS);
7605 }
7606 /*
7607 * If we're the last one out, let people know it's clear.
7608 */
7609 if (rnip->rni_waiters == 0)
7610 (void) pthread_cond_broadcast(&rnip->rni_cv);
7611 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7612 return (REP_PROTOCOL_DONE);
7613 }
7614
7615 static void
rc_notify_info_reset(rc_notify_info_t * rnip)7616 rc_notify_info_reset(rc_notify_info_t *rnip)
7617 {
7618 int i;
7619
7620 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7621 if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7622 rc_notify_info_remove_locked(rnip);
7623 assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7624 rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7625 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7626
7627 for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7628 if (rnip->rni_namelist[i] != NULL) {
7629 free((void *)rnip->rni_namelist[i]);
7630 rnip->rni_namelist[i] = NULL;
7631 }
7632 if (rnip->rni_typelist[i] != NULL) {
7633 free((void *)rnip->rni_typelist[i]);
7634 rnip->rni_typelist[i] = NULL;
7635 }
7636 }
7637
7638 (void) pthread_mutex_lock(&rc_pg_notify_lock);
7639 rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7640 (void) pthread_mutex_unlock(&rc_pg_notify_lock);
7641 }
7642
7643 void
rc_notify_info_fini(rc_notify_info_t * rnip)7644 rc_notify_info_fini(rc_notify_info_t *rnip)
7645 {
7646 rc_notify_info_reset(rnip);
7647
7648 uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7649 uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7650 rc_notify_pool);
7651 }
7652