xref: /titanic_41/usr/src/cmd/svc/configd/rc_node.c (revision 7a17cfad7ff3427e1ce7ecdbf566e442a7025ec9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * rc_node.c - In-memory SCF object management
29  *
30  * This layer manages the in-memory cache (the Repository Cache) of SCF
31  * data.  Read requests are usually satisfied from here, but may require
32  * load calls to the "object" layer.  Modify requests always write-through
33  * to the object layer.
34  *
35  * SCF data comprises scopes, services, instances, snapshots, snaplevels,
36  * property groups, properties, and property values.  All but the last are
37  * known here as "entities" and are represented by rc_node_t data
38  * structures.  (Property values are kept in the rn_values member of the
39  * respective property, not as separate objects.)  All entities besides
40  * the "localhost" scope have some entity as a parent, and therefore form
41  * a tree.
42  *
43  * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
44  * the "localhost" scope.  The tree is filled in from the database on-demand
45  * by rc_node_fill_children().
46  *
47  * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
48  * lookup.
49  *
50  * Multiple threads may service client requests, so access to each
51  * rc_node_t is synchronized by its rn_lock member.  Some fields are
52  * protected by bits in the rn_flags field instead, to support operations
53  * which need to drop rn_lock, for example to respect locking order.  Such
54  * flags should be manipulated with the rc_node_{hold,rele}_flag()
55  * functions.
56  *
57  * We track references to nodes to tell when they can be free()d.  rn_refs
58  * should be incremented with rc_node_hold() on the creation of client
59  * references (rc_node_ptr_t's and rc_iter_t's).  rn_erefs ("ephemeral
60  * references") should be incremented when a pointer is read into a local
61  * variable of a thread, with rc_node_hold_ephemeral_locked().  This
62  * hasn't been fully implemented, however, so rc_node_rele() tolerates
63  * rn_erefs being 0.  Some code which predates rn_erefs counts ephemeral
64  * references in rn_refs.  Other references are tracked by the
65  * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
66  * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
67  *
68  * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
69  * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
70  * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
71  * etc.).  Once you have locked an rc_node_t you must check its rn_flags for
72  * RC_NODE_DEAD before you can use it.  This is usually done with the
73  * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
74  * functions & RC_NODE_*() macros), which fail if the object has died.
75  *
76  * When a transactional node (property group or snapshot) is updated,
77  * a new node takes the place of the old node in the global hash and the
78  * old node is hung off of the rn_former list of the new node.  At the
79  * same time, all of its children have their rn_parent_ref pointer set,
80  * and any holds they have are reflected in the old node's rn_other_refs
81  * count.  This is automatically kept up to date until the final reference
82  * to the subgraph is dropped, at which point the node is unrefed and
83  * destroyed, along with all of its children.
84  *
85  * Because name service lookups may take a long time and, more importantly
86  * may trigger additional accesses to the repository, perm_granted() must be
87  * called without holding any locks.
88  *
89  * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
90  * call via rc_node_setup_iter() to populate the rn_children uu_list of the
91  * rc_node_t * in question and a call to uu_list_walk_start() on that list.  For
92  * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
93  * apropriate child.
94  *
95  * An ITER_START for an ENTITY_VALUE makes sure the node has its values
96  * filled, and sets up the iterator.  An ITER_READ_VALUE just copies out
97  * the proper values and updates the offset information.
98  *
99  * To allow aliases, snapshots are implemented with a level of indirection.
100  * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
101  * snapshot.c which contains the authoritative snaplevel information.  The
102  * snapid is "assigned" by rc_attach_snapshot().
103  *
104  * We provide the client layer with rc_node_ptr_t's to reference objects.
105  * Objects referred to by them are automatically held & released by
106  * rc_node_assign() & rc_node_clear().  The RC_NODE_PTR_*() macros are used at
107  * client.c entry points to read the pointers.  They fetch the pointer to the
108  * object, return (from the function) if it is dead, and lock, hold, or hold
109  * a flag of the object.
110  */
111 
112 /*
113  * Permission checking is authorization-based: some operations may only
114  * proceed if the user has been assigned at least one of a set of
115  * authorization strings.  The set of enabling authorizations depends on the
116  * operation and the target object.  The set of authorizations assigned to
117  * a user is determined by reading /etc/security/policy.conf, querying the
118  * user_attr database, and possibly querying the prof_attr database, as per
119  * chkauthattr() in libsecdb.
120  *
121  * The fastest way to decide whether the two sets intersect is by entering the
122  * strings into a hash table and detecting collisions, which takes linear time
123  * in the total size of the sets.  Except for the authorization patterns which
124  * may be assigned to users, which without advanced pattern-matching
125  * algorithms will take O(n) in the number of enabling authorizations, per
126  * pattern.
127  *
128  * We can achieve some practical speed-ups by noting that if we enter all of
129  * the authorizations from one of the sets into the hash table we can merely
130  * check the elements of the second set for existence without adding them.
131  * This reduces memory requirements and hash table clutter.  The enabling set
132  * is well suited for this because it is internal to configd (for now, at
133  * least).  Combine this with short-circuiting and we can even minimize the
134  * number of queries to the security databases (user_attr & prof_attr).
135  *
136  * To force this usage onto clients we provide functions for adding
137  * authorizations to the enabling set of a permission context structure
138  * (perm_add_*()) and one to decide whether the the user associated with the
139  * current door call client possesses any of them (perm_granted()).
140  *
141  * At some point, a generic version of this should move to libsecdb.
142  *
143  * While entering the enabling strings into the hash table, we keep track
144  * of which is the most specific for use in generating auditing events.
145  * See the "Collecting the Authorization String" section of the "SMF Audit
146  * Events" block comment below.
147  */
148 
149 /*
150  * Composition is the combination of sets of properties.  The sets are ordered
151  * and properties in higher sets obscure properties of the same name in lower
152  * sets.  Here we present a composed view of an instance's properties as the
153  * union of its properties and its service's properties.  Similarly the
154  * properties of snaplevels are combined to form a composed view of the
155  * properties of a snapshot (which should match the composed view of the
156  * properties of the instance when the snapshot was taken).
157  *
158  * In terms of the client interface, the client may request that a property
159  * group iterator for an instance or snapshot be composed.  Property groups
160  * traversed by such an iterator may not have the target entity as a parent.
161  * Similarly, the properties traversed by a property iterator for those
162  * property groups may not have the property groups iterated as parents.
163  *
164  * Implementation requires that iterators for instances and snapshots be
165  * composition-savvy, and that we have a "composed property group" entity
166  * which represents the composition of a number of property groups.  Iteration
167  * over "composed property groups" yields properties which may have different
168  * parents, but for all other operations a composed property group behaves
169  * like the top-most property group it represents.
170  *
171  * The implementation is based on the rn_cchain[] array of rc_node_t pointers
172  * in rc_node_t.  For instances, the pointers point to the instance and its
173  * parent service.  For snapshots they point to the child snaplevels, and for
174  * composed property groups they point to property groups.  A composed
175  * iterator carries an index into rn_cchain[].  Thus most of the magic ends up
176  * int the rc_iter_*() code.
177  */
178 /*
179  * SMF Audit Events:
180  * ================
181  *
182  * To maintain security, SMF generates audit events whenever
183  * privileged operations are attempted.  See the System Administration
184  * Guide:Security Services answerbook for a discussion of the Solaris
185  * audit system.
186  *
187  * The SMF audit event codes are defined in adt_event.h by symbols
188  * starting with ADT_smf_ and are described in audit_event.txt.  The
189  * audit record structures are defined in the SMF section of adt.xml.
190  * adt.xml is used to automatically generate adt_event.h which
191  * contains the definitions that we code to in this file.  For the
192  * most part the audit events map closely to actions that you would
193  * perform with svcadm or svccfg, but there are some special cases
194  * which we'll discuss later.
195  *
196  * The software associated with SMF audit events falls into three
197  * categories:
198  * 	- collecting information to be written to the audit
199  *	  records
200  *	- using the adt_* functions in
201  *	  usr/src/lib/libbsm/common/adt.c to generate the audit
202  *	  records.
203  * 	- handling special cases
204  *
205  * Collecting Information:
206  * ----------------------
207  *
208  * Most all of the audit events require the FMRI of the affected
209  * object and the authorization string that was used.  The one
210  * exception is ADT_smf_annotation which we'll talk about later.
211  *
212  * Collecting the FMRI:
213  *
214  * The rc_node structure has a member called rn_fmri which points to
215  * its FMRI.  This is initialized by a call to rc_node_build_fmri()
216  * when the node's parent is established.  The reason for doing it
217  * at this time is that a node's FMRI is basically the concatenation
218  * of the parent's FMRI and the node's name with the appropriate
219  * decoration.  rc_node_build_fmri() does this concatenation and
220  * decorating.  It is called from rc_node_link_child() and
221  * rc_node_relink_child() where a node is linked to its parent.
222  *
223  * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
224  * when it is needed.  It returns rn_fmri if it is set.  If the node
225  * is at the top level, however, rn_fmri won't be set because it was
226  * never linked to a parent.  In this case,
227  * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
228  * its node type and its name, rn_name.
229  *
230  * Collecting the Authorization String:
231  *
232  * Naturally, the authorization string is captured during the
233  * authorization checking process.  Acceptable authorization strings
234  * are added to a permcheck_t hash table as noted in the section on
235  * permission checking above.  Once all entries have been added to the
236  * hash table, perm_granted() is called.  If the client is authorized,
237  * perm_granted() returns with pc_auth_string of the permcheck_t
238  * structure pointing to the authorization string.
239  *
240  * This works fine if the client is authorized, but what happens if
241  * the client is not authorized?  We need to report the required
242  * authorization string.  This is the authorization that would have
243  * been used if permission had been granted.  perm_granted() will
244  * find no match, so it needs to decide which string in the hash
245  * table to use as the required authorization string.  It needs to do
246  * this, because configd is still going to generate an event.  A
247  * design decision was made to use the most specific authorization
248  * in the hash table.  The pc_auth_type enum designates the
249  * specificity of an authorization string.  For example, an
250  * authorization string that is declared in an instance PG is more
251  * specific than one that is declared in a service PG.
252  *
253  * The pc_add() function keeps track of the most specific
254  * authorization in the hash table.  It does this using the
255  * pc_specific and pc_specific_type members of the permcheck
256  * structure.  pc_add() updates these members whenever a more
257  * specific authorization string is added to the hash table.  Thus, if
258  * an authorization match is not found, perm_granted() will return
259  * with pc_auth_string in the permcheck_t pointing to the string that
260  * is referenced by pc_specific.
261  *
262  * Generating the Audit Events:
263  * ===========================
264  *
265  * As the functions in this file process requests for clients of
266  * configd, they gather the information that is required for an audit
267  * event.  Eventually, the request processing gets to the point where
268  * the authorization is rejected or to the point where the requested
269  * action was attempted.  At these two points smf_audit_event() is
270  * called.
271  *
272  * smf_audit_event() takes 4 parameters:
273  * 	- the event ID which is one of the ADT_smf_* symbols from
274  *	  adt_event.h.
275  * 	- status to pass to adt_put_event()
276  * 	- return value to pass to adt_put_event()
277  * 	- the event data (see audit_event_data structure)
278  *
279  * All interactions with the auditing software require an audit
280  * session.  We use one audit session per configd client.  We keep
281  * track of the audit session in the repcache_client structure.
282  * smf_audit_event() calls get_audit_session() to get the session
283  * pointer.
284  *
285  * smf_audit_event() then calls adt_alloc_event() to allocate an
286  * adt_event_data union which is defined in adt_event.h, copies the
287  * data into the appropriate members of the union and calls
288  * adt_put_event() to generate the event.
289  *
290  * Special Cases:
291  * =============
292  *
293  * There are three major types of special cases:
294  *
295  * 	- gathering event information for each action in a
296  *	  transaction
297  * 	- Higher level events represented by special property
298  *	  group/property name combinations.  Many of these are
299  *	  restarter actions.
300  * 	- ADT_smf_annotation event
301  *
302  * Processing Transaction Actions:
303  * ------------------------------
304  *
305  * A transaction can contain multiple actions to modify, create or
306  * delete one or more properties.  We need to capture information so
307  * that we can generate an event for each property action.  The
308  * transaction information is stored in a tx_commmit_data_t, and
309  * object.c provides accessor functions to retrieve data from this
310  * structure.  rc_tx_commit() obtains a tx_commit_data_t by calling
311  * tx_commit_data_new() and passes this to object_tx_commit() to
312  * commit the transaction.  Then we call generate_property_events() to
313  * generate an audit event for each property action.
314  *
315  * Special Properties:
316  * ------------------
317  *
318  * There are combinations of property group/property name that are special.
319  * They are special because they have specific meaning to startd.  startd
320  * interprets them in a service-independent fashion.
321  * restarter_actions/refresh and general/enabled are two examples of these.
322  * A special event is generated for these properties in addition to the
323  * regular property event described in the previous section.  The special
324  * properties are declared as an array of audit_special_prop_item
325  * structures at special_props_list in rc_node.c.
326  *
327  * In the previous section, we mentioned the
328  * generate_property_event() function that generates an event for
329  * every property action.  Before generating the event,
330  * generate_property_event() calls special_property_event().
331  * special_property_event() checks to see if the action involves a
332  * special property.  If it does, it generates a special audit
333  * event.
334  *
335  * ADT_smf_annotation event:
336  * ------------------------
337  *
338  * This is a special event unlike any other.  It allows the svccfg
339  * program to store an annotation in the event log before a series
340  * of transactions is processed.  It is used with the import and
341  * apply svccfg commands.  svccfg uses the rep_protocol_annotation
342  * message to pass the operation (import or apply) and the file name
343  * to configd.  The set_annotation() function in client.c stores
344  * these away in the a repcache_client structure.  The address of
345  * this structure is saved in the thread_info structure.
346  *
347  * Before it generates any events, smf_audit_event() calls
348  * smf_annotation_event().  smf_annotation_event() calls
349  * client_annotation_needed() which is defined in client.c.  If an
350  * annotation is needed client_annotation_needed() returns the
351  * operation and filename strings that were saved from the
352  * rep_protocol_annotation message.  smf_annotation_event() then
353  * generates the ADT_smf_annotation event.
354  */
355 
356 #include <assert.h>
357 #include <atomic.h>
358 #include <bsm/adt_event.h>
359 #include <errno.h>
360 #include <libuutil.h>
361 #include <libscf.h>
362 #include <libscf_priv.h>
363 #include <prof_attr.h>
364 #include <pthread.h>
365 #include <pwd.h>
366 #include <stdio.h>
367 #include <stdlib.h>
368 #include <strings.h>
369 #include <sys/types.h>
370 #include <syslog.h>
371 #include <unistd.h>
372 #include <user_attr.h>
373 
374 #include "configd.h"
375 
376 #define	AUTH_PREFIX		"solaris.smf."
377 #define	AUTH_MANAGE		AUTH_PREFIX "manage"
378 #define	AUTH_MODIFY		AUTH_PREFIX "modify"
379 #define	AUTH_MODIFY_PREFIX	AUTH_MODIFY "."
380 #define	AUTH_PG_ACTIONS		SCF_PG_RESTARTER_ACTIONS
381 #define	AUTH_PG_ACTIONS_TYPE	SCF_PG_RESTARTER_ACTIONS_TYPE
382 #define	AUTH_PG_GENERAL		SCF_PG_GENERAL
383 #define	AUTH_PG_GENERAL_TYPE	SCF_PG_GENERAL_TYPE
384 #define	AUTH_PG_GENERAL_OVR	SCF_PG_GENERAL_OVR
385 #define	AUTH_PG_GENERAL_OVR_TYPE  SCF_PG_GENERAL_OVR_TYPE
386 #define	AUTH_PROP_ACTION	"action_authorization"
387 #define	AUTH_PROP_ENABLED	"enabled"
388 #define	AUTH_PROP_MODIFY	"modify_authorization"
389 #define	AUTH_PROP_VALUE		"value_authorization"
390 #define	AUTH_PROP_READ		"read_authorization"
391 /* libsecdb should take care of this. */
392 #define	RBAC_AUTH_SEP		","
393 
394 #define	MAX_VALID_CHILDREN 3
395 
396 /*
397  * The ADT_smf_* symbols may not be defined on the build machine.  Because
398  * of this, we do not want to compile the _smf_aud_event() function when
399  * doing native builds.
400  */
401 #ifdef	NATIVE_BUILD
402 #define	smf_audit_event(i, s, r, d)
403 #else
404 #define	smf_audit_event(i, s, r, d)	_smf_audit_event(i, s, r, d)
405 #endif	/* NATIVE_BUILD */
406 
407 typedef struct rc_type_info {
408 	uint32_t	rt_type;		/* matches array index */
409 	uint32_t	rt_num_ids;
410 	uint32_t	rt_name_flags;
411 	uint32_t	rt_valid_children[MAX_VALID_CHILDREN];
412 } rc_type_info_t;
413 
414 #define	RT_NO_NAME	-1U
415 
416 static rc_type_info_t rc_types[] = {
417 	{REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
418 	{REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
419 	    {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
420 	{REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
421 	    {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
422 	{REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
423 	    {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
424 	{REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
425 	    {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
426 	{REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
427 	    {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
428 	{REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
429 	    {REP_PROTOCOL_ENTITY_PROPERTY}},
430 	{REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
431 	    {REP_PROTOCOL_ENTITY_PROPERTY}},
432 	{REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
433 	{-1UL}
434 };
435 #define	NUM_TYPES	((sizeof (rc_types) / sizeof (*rc_types)))
436 
437 /* Element of a permcheck_t hash table. */
438 struct pc_elt {
439 	struct pc_elt	*pce_next;
440 	char		pce_auth[1];
441 };
442 
443 /*
444  * If an authorization fails, we must decide which of the elements in the
445  * permcheck hash table to use in the audit event.  That is to say of all
446  * the strings in the hash table, we must choose one and use it in the audit
447  * event.  It is desirable to use the most specific string in the audit
448  * event.
449  *
450  * The pc_auth_type specifies the types (sources) of authorization
451  * strings.  The enum is ordered in increasing specificity.
452  */
453 typedef enum pc_auth_type {
454 	PC_AUTH_NONE = 0,	/* no auth string available. */
455 	PC_AUTH_SMF,		/* strings coded into SMF. */
456 	PC_AUTH_SVC,		/* strings specified in PG of a service. */
457 	PC_AUTH_INST		/* strings specified in PG of an instance. */
458 } pc_auth_type_t;
459 
460 /*
461  * The following enum is used to represent the results of the checks to see
462  * if the client has the appropriate permissions to perform an action.
463  */
464 typedef enum perm_status {
465 	PERM_DENIED = 0,	/* Permission denied. */
466 	PERM_GRANTED,		/* Client has authorizations. */
467 	PERM_GONE,		/* Door client went away. */
468 	PERM_FAIL		/* Generic failure. e.g. resources */
469 } perm_status_t;
470 
471 /* An authorization set hash table. */
472 typedef struct {
473 	struct pc_elt	**pc_buckets;
474 	uint_t		pc_bnum;		/* number of buckets */
475 	uint_t		pc_enum;		/* number of elements */
476 	struct pc_elt	*pc_specific;		/* most specific element */
477 	pc_auth_type_t	pc_specific_type;	/* type of pc_specific */
478 	char		*pc_auth_string;	/* authorization string */
479 						/* for audit events */
480 } permcheck_t;
481 
482 /*
483  * Structure for holding audit event data.  Not all events use all members
484  * of the structure.
485  */
486 typedef struct audit_event_data {
487 	char		*ed_auth;	/* authorization string. */
488 	char		*ed_fmri;	/* affected FMRI. */
489 	char		*ed_snapname;	/* name of snapshot. */
490 	char		*ed_old_fmri;	/* old fmri in attach case. */
491 	char		*ed_old_name;	/* old snapshot in attach case. */
492 	char		*ed_type;	/* prop. group or prop. type. */
493 	char		*ed_prop_value;	/* property value. */
494 } audit_event_data_t;
495 
496 /*
497  * Pointer to function to do special processing to get audit event ID.
498  * Audit event IDs are defined in /usr/include/bsm/adt_event.h.  Function
499  * returns 0 if ID successfully retrieved.  Otherwise it returns -1.
500  */
501 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
502     au_event_t *);
503 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
504     au_event_t *);
505 
506 static uu_list_pool_t *rc_children_pool;
507 static uu_list_pool_t *rc_pg_notify_pool;
508 static uu_list_pool_t *rc_notify_pool;
509 static uu_list_pool_t *rc_notify_info_pool;
510 
511 static rc_node_t *rc_scope;
512 
513 static pthread_mutex_t	rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
514 static pthread_cond_t	rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
515 static uint_t		rc_notify_in_use;	/* blocks removals */
516 
517 /*
518  * Some combinations of property group/property name require a special
519  * audit event to be generated when there is a change.
520  * audit_special_prop_item_t is used to specify these special cases.  The
521  * special_props_list array defines a list of these special properties.
522  */
523 typedef struct audit_special_prop_item {
524 	const char	*api_pg_name;	/* property group name. */
525 	const char	*api_prop_name;	/* property name. */
526 	au_event_t	api_event_id;	/* event id or 0. */
527 	spc_getid_fn_t	api_event_func; /* function to get event id. */
528 } audit_special_prop_item_t;
529 
530 /*
531  * Native builds are done using the build machine's standard include
532  * files.  These files may not yet have the definitions for the ADT_smf_*
533  * symbols.  Thus, we do not compile this table when doing native builds.
534  */
535 #ifndef	NATIVE_BUILD
536 /*
537  * The following special_props_list array specifies property group/property
538  * name combinations that have specific meaning to startd.  A special event
539  * is generated for these combinations in addition to the regular property
540  * event.
541  *
542  * At run time this array gets sorted.  See the call to qsort(3C) in
543  * rc_node_init().  The array is sorted, so that bsearch(3C) can be used
544  * to do lookups.
545  */
546 static audit_special_prop_item_t special_props_list[] = {
547 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
548 	    NULL},
549 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
550 	    ADT_smf_immediate_degrade, NULL},
551 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
552 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
553 	    ADT_smf_maintenance, NULL},
554 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
555 	    ADT_smf_immediate_maintenance, NULL},
556 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
557 	    ADT_smf_immtmp_maintenance, NULL},
558 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
559 	    ADT_smf_tmp_maintenance, NULL},
560 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
561 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
562 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
563 	{SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
564 	{SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
565 	{SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
566 	{SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
567 };
568 #define	SPECIAL_PROP_COUNT	(sizeof (special_props_list) /\
569 	sizeof (audit_special_prop_item_t))
570 #endif	/* NATIVE_BUILD */
571 
572 /*
573  * We support an arbitrary number of clients interested in events for certain
574  * types of changes.  Each client is represented by an rc_notify_info_t, and
575  * all clients are chained onto the rc_notify_info_list.
576  *
577  * The rc_notify_list is the global notification list.  Each entry is of
578  * type rc_notify_t, which is embedded in one of three other structures:
579  *
580  *	rc_node_t		property group update notification
581  *	rc_notify_delete_t	object deletion notification
582  *	rc_notify_info_t	notification clients
583  *
584  * Which type of object is determined by which pointer in the rc_notify_t is
585  * non-NULL.
586  *
587  * New notifications and clients are added to the end of the list.
588  * Notifications no-one is interested in are never added to the list.
589  *
590  * Clients use their position in the list to track which notifications they
591  * have not yet reported.  As they process notifications, they move forward
592  * in the list past them.  There is always a client at the beginning of the
593  * list -- as he moves past notifications, he removes them from the list and
594  * cleans them up.
595  *
596  * The rc_pg_notify_lock protects all notification state.  The rc_pg_notify_cv
597  * is used for global signalling, and each client has a cv which he waits for
598  * events of interest on.
599  */
600 static uu_list_t	*rc_notify_info_list;
601 static uu_list_t	*rc_notify_list;
602 
603 #define	HASH_SIZE	512
604 #define	HASH_MASK	(HASH_SIZE - 1)
605 
606 #pragma align 64(cache_hash)
607 static cache_bucket_t cache_hash[HASH_SIZE];
608 
609 #define	CACHE_BUCKET(h)		(&cache_hash[(h) & HASH_MASK])
610 
611 
612 static void rc_node_no_client_refs(rc_node_t *np);
613 
614 
615 static uint32_t
616 rc_node_hash(rc_node_lookup_t *lp)
617 {
618 	uint32_t type = lp->rl_type;
619 	uint32_t backend = lp->rl_backend;
620 	uint32_t mainid = lp->rl_main_id;
621 	uint32_t *ids = lp->rl_ids;
622 
623 	rc_type_info_t *tp = &rc_types[type];
624 	uint32_t num_ids;
625 	uint32_t left;
626 	uint32_t hash;
627 
628 	assert(backend == BACKEND_TYPE_NORMAL ||
629 	    backend == BACKEND_TYPE_NONPERSIST);
630 
631 	assert(type > 0 && type < NUM_TYPES);
632 	num_ids = tp->rt_num_ids;
633 
634 	left = MAX_IDS - num_ids;
635 	assert(num_ids <= MAX_IDS);
636 
637 	hash = type * 7 + mainid * 5 + backend;
638 
639 	while (num_ids-- > 0)
640 		hash = hash * 11 + *ids++ * 7;
641 
642 	/*
643 	 * the rest should be zeroed
644 	 */
645 	while (left-- > 0)
646 		assert(*ids++ == 0);
647 
648 	return (hash);
649 }
650 
651 static int
652 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
653 {
654 	rc_node_lookup_t *r = &np->rn_id;
655 	rc_type_info_t *tp;
656 	uint32_t type;
657 	uint32_t num_ids;
658 
659 	if (r->rl_main_id != l->rl_main_id)
660 		return (0);
661 
662 	type = r->rl_type;
663 	if (type != l->rl_type)
664 		return (0);
665 
666 	assert(type > 0 && type < NUM_TYPES);
667 
668 	tp = &rc_types[r->rl_type];
669 	num_ids = tp->rt_num_ids;
670 
671 	assert(num_ids <= MAX_IDS);
672 	while (num_ids-- > 0)
673 		if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
674 			return (0);
675 
676 	return (1);
677 }
678 
679 /*
680  * Register an ephemeral reference to np.  This should be done while both
681  * the persistent reference from which the np pointer was read is locked
682  * and np itself is locked.  This guarantees that another thread which
683  * thinks it has the last reference will yield without destroying the
684  * node.
685  */
686 static void
687 rc_node_hold_ephemeral_locked(rc_node_t *np)
688 {
689 	assert(MUTEX_HELD(&np->rn_lock));
690 
691 	++np->rn_erefs;
692 }
693 
694 /*
695  * the "other" references on a node are maintained in an atomically
696  * updated refcount, rn_other_refs.  This can be bumped from arbitrary
697  * context, and tracks references to a possibly out-of-date node's children.
698  *
699  * To prevent the node from disappearing between the final drop of
700  * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
701  * 0->1 transitions and decremented (with the node lock held) on 1->0
702  * transitions.
703  */
704 static void
705 rc_node_hold_other(rc_node_t *np)
706 {
707 	if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
708 		atomic_add_32(&np->rn_other_refs_held, 1);
709 		assert(np->rn_other_refs_held > 0);
710 	}
711 	assert(np->rn_other_refs > 0);
712 }
713 
714 /*
715  * No node locks may be held
716  */
717 static void
718 rc_node_rele_other(rc_node_t *np)
719 {
720 	assert(np->rn_other_refs > 0);
721 	if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
722 		(void) pthread_mutex_lock(&np->rn_lock);
723 		assert(np->rn_other_refs_held > 0);
724 		if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
725 		    np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
726 			/*
727 			 * This was the last client reference.  Destroy
728 			 * any other references and free() the node.
729 			 */
730 			rc_node_no_client_refs(np);
731 		} else {
732 			(void) pthread_mutex_unlock(&np->rn_lock);
733 		}
734 	}
735 }
736 
737 static void
738 rc_node_hold_locked(rc_node_t *np)
739 {
740 	assert(MUTEX_HELD(&np->rn_lock));
741 
742 	if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
743 		rc_node_hold_other(np->rn_parent_ref);
744 	np->rn_refs++;
745 	assert(np->rn_refs > 0);
746 }
747 
748 static void
749 rc_node_hold(rc_node_t *np)
750 {
751 	(void) pthread_mutex_lock(&np->rn_lock);
752 	rc_node_hold_locked(np);
753 	(void) pthread_mutex_unlock(&np->rn_lock);
754 }
755 
756 static void
757 rc_node_rele_locked(rc_node_t *np)
758 {
759 	int unref = 0;
760 	rc_node_t *par_ref = NULL;
761 
762 	assert(MUTEX_HELD(&np->rn_lock));
763 	assert(np->rn_refs > 0);
764 
765 	if (--np->rn_refs == 0) {
766 		if (np->rn_flags & RC_NODE_PARENT_REF)
767 			par_ref = np->rn_parent_ref;
768 
769 		/*
770 		 * Composed property groups are only as good as their
771 		 * references.
772 		 */
773 		if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
774 			np->rn_flags |= RC_NODE_DEAD;
775 
776 		if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
777 		    np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
778 			unref = 1;
779 	}
780 
781 	if (unref) {
782 		/*
783 		 * This was the last client reference.  Destroy any other
784 		 * references and free() the node.
785 		 */
786 		rc_node_no_client_refs(np);
787 	} else {
788 		/*
789 		 * rn_erefs can be 0 if we acquired the reference in
790 		 * a path which hasn't been updated to increment rn_erefs.
791 		 * When all paths which end here are updated, we should
792 		 * assert rn_erefs > 0 and always decrement it.
793 		 */
794 		if (np->rn_erefs > 0)
795 			--np->rn_erefs;
796 		(void) pthread_mutex_unlock(&np->rn_lock);
797 	}
798 
799 	if (par_ref != NULL)
800 		rc_node_rele_other(par_ref);
801 }
802 
803 void
804 rc_node_rele(rc_node_t *np)
805 {
806 	(void) pthread_mutex_lock(&np->rn_lock);
807 	rc_node_rele_locked(np);
808 }
809 
810 static cache_bucket_t *
811 cache_hold(uint32_t h)
812 {
813 	cache_bucket_t *bp = CACHE_BUCKET(h);
814 	(void) pthread_mutex_lock(&bp->cb_lock);
815 	return (bp);
816 }
817 
818 static void
819 cache_release(cache_bucket_t *bp)
820 {
821 	(void) pthread_mutex_unlock(&bp->cb_lock);
822 }
823 
824 static rc_node_t *
825 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
826 {
827 	uint32_t h = rc_node_hash(lp);
828 	rc_node_t *np;
829 
830 	assert(MUTEX_HELD(&bp->cb_lock));
831 	assert(bp == CACHE_BUCKET(h));
832 
833 	for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
834 		if (np->rn_hash == h && rc_node_match(np, lp)) {
835 			rc_node_hold(np);
836 			return (np);
837 		}
838 	}
839 
840 	return (NULL);
841 }
842 
843 static rc_node_t *
844 cache_lookup(rc_node_lookup_t *lp)
845 {
846 	uint32_t h;
847 	cache_bucket_t *bp;
848 	rc_node_t *np;
849 
850 	h = rc_node_hash(lp);
851 	bp = cache_hold(h);
852 
853 	np = cache_lookup_unlocked(bp, lp);
854 
855 	cache_release(bp);
856 
857 	return (np);
858 }
859 
860 static void
861 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
862 {
863 	assert(MUTEX_HELD(&bp->cb_lock));
864 	assert(np->rn_hash == rc_node_hash(&np->rn_id));
865 	assert(bp == CACHE_BUCKET(np->rn_hash));
866 
867 	assert(np->rn_hash_next == NULL);
868 
869 	np->rn_hash_next = bp->cb_head;
870 	bp->cb_head = np;
871 }
872 
873 static void
874 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
875 {
876 	rc_node_t **npp;
877 
878 	assert(MUTEX_HELD(&bp->cb_lock));
879 	assert(np->rn_hash == rc_node_hash(&np->rn_id));
880 	assert(bp == CACHE_BUCKET(np->rn_hash));
881 
882 	for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
883 		if (*npp == np)
884 			break;
885 
886 	assert(*npp == np);
887 	*npp = np->rn_hash_next;
888 	np->rn_hash_next = NULL;
889 }
890 
891 /*
892  * verify that the 'parent' type can have a child typed 'child'
893  * Fails with
894  *   _INVALID_TYPE - argument is invalid
895  *   _TYPE_MISMATCH - parent type cannot have children of type child
896  */
897 static int
898 rc_check_parent_child(uint32_t parent, uint32_t child)
899 {
900 	int idx;
901 	uint32_t type;
902 
903 	if (parent == 0 || parent >= NUM_TYPES ||
904 	    child == 0 || child >= NUM_TYPES)
905 		return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
906 
907 	for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
908 		type = rc_types[parent].rt_valid_children[idx];
909 		if (type == child)
910 			return (REP_PROTOCOL_SUCCESS);
911 	}
912 
913 	return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
914 }
915 
916 /*
917  * Fails with
918  *   _INVALID_TYPE - type is invalid
919  *   _BAD_REQUEST - name is an invalid name for a node of type type
920  */
921 int
922 rc_check_type_name(uint32_t type, const char *name)
923 {
924 	if (type == 0 || type >= NUM_TYPES)
925 		return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
926 
927 	if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
928 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
929 
930 	return (REP_PROTOCOL_SUCCESS);
931 }
932 
933 static int
934 rc_check_pgtype_name(const char *name)
935 {
936 	if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
937 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
938 
939 	return (REP_PROTOCOL_SUCCESS);
940 }
941 
942 /*
943  * rc_node_free_fmri should be called whenever a node loses its parent.
944  * The reason is that the node's fmri string is built up by concatenating
945  * its name to the parent's fmri.  Thus, when the node no longer has a
946  * parent, its fmri is no longer valid.
947  */
948 static void
949 rc_node_free_fmri(rc_node_t *np)
950 {
951 	if (np->rn_fmri != NULL) {
952 		free((void *)np->rn_fmri);
953 		np->rn_fmri = NULL;
954 	}
955 }
956 
957 /*
958  * Concatenate the appropriate separator and the FMRI element to the base
959  * FMRI string at fmri.
960  *
961  * Fails with
962  *	_TRUNCATED	Not enough room in buffer at fmri.
963  */
964 static int
965 rc_concat_fmri_element(
966 	char *fmri,			/* base fmri */
967 	size_t bufsize,			/* size of buf at fmri */
968 	size_t *sz_out,			/* receives result size. */
969 	const char *element,		/* element name to concat */
970 	rep_protocol_entity_t type)	/* type of element */
971 {
972 	size_t actual;
973 	const char *name = element;
974 	int rc;
975 	const char *separator;
976 
977 	if (bufsize > 0)
978 		*sz_out = strlen(fmri);
979 	else
980 		*sz_out = 0;
981 
982 	switch (type) {
983 	case REP_PROTOCOL_ENTITY_SCOPE:
984 		if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
985 			/*
986 			 * No need to display scope information if we are
987 			 * in the local scope.
988 			 */
989 			separator = SCF_FMRI_SVC_PREFIX;
990 			name = NULL;
991 		} else {
992 			/*
993 			 * Need to display scope information, because it is
994 			 * not the local scope.
995 			 */
996 			separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
997 		}
998 		break;
999 	case REP_PROTOCOL_ENTITY_SERVICE:
1000 		separator = SCF_FMRI_SERVICE_PREFIX;
1001 		break;
1002 	case REP_PROTOCOL_ENTITY_INSTANCE:
1003 		separator = SCF_FMRI_INSTANCE_PREFIX;
1004 		break;
1005 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
1006 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
1007 		separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1008 		break;
1009 	case REP_PROTOCOL_ENTITY_PROPERTY:
1010 		separator = SCF_FMRI_PROPERTY_PREFIX;
1011 		break;
1012 	case REP_PROTOCOL_ENTITY_VALUE:
1013 		/*
1014 		 * A value does not have a separate FMRI from its property,
1015 		 * so there is nothing to concat.
1016 		 */
1017 		return (REP_PROTOCOL_SUCCESS);
1018 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
1019 	case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1020 		/* Snapshots do not have FMRIs, so there is nothing to do. */
1021 		return (REP_PROTOCOL_SUCCESS);
1022 	default:
1023 		(void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1024 		    __FILE__, __LINE__, type);
1025 		abort();	/* Missing a case in switch if we get here. */
1026 	}
1027 
1028 	/* Concatenate separator and element to the fmri buffer. */
1029 
1030 	actual = strlcat(fmri, separator, bufsize);
1031 	if (name != NULL) {
1032 		if (actual < bufsize) {
1033 			actual = strlcat(fmri, name, bufsize);
1034 		} else {
1035 			actual += strlen(name);
1036 		}
1037 	}
1038 	if (actual < bufsize) {
1039 		rc = REP_PROTOCOL_SUCCESS;
1040 	} else {
1041 		rc = REP_PROTOCOL_FAIL_TRUNCATED;
1042 	}
1043 	*sz_out = actual;
1044 	return (rc);
1045 }
1046 
1047 /*
1048  * Get the FMRI for the node at np.  The fmri will be placed in buf.  On
1049  * success sz_out will be set to the size of the fmri in buf.  If
1050  * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1051  * of the buffer that would be required to avoid truncation.
1052  *
1053  * Fails with
1054  *	_TRUNCATED	not enough room in buf for the FMRI.
1055  */
1056 static int
1057 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1058     size_t *sz_out)
1059 {
1060 	size_t fmri_len = 0;
1061 	int r;
1062 
1063 	if (bufsize > 0)
1064 		*buf = 0;
1065 	*sz_out = 0;
1066 
1067 	if (np->rn_fmri == NULL) {
1068 		/*
1069 		 * A NULL rn_fmri implies that this is a top level scope.
1070 		 * Child nodes will always have an rn_fmri established
1071 		 * because both rc_node_link_child() and
1072 		 * rc_node_relink_child() call rc_node_build_fmri().  In
1073 		 * this case, we'll just return our name preceded by the
1074 		 * appropriate FMRI decorations.
1075 		 */
1076 		assert(np->rn_parent == NULL);
1077 		r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1078 		    np->rn_id.rl_type);
1079 		if (r != REP_PROTOCOL_SUCCESS)
1080 			return (r);
1081 	} else {
1082 		/* We have an fmri, so return it. */
1083 		fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1084 	}
1085 
1086 	*sz_out = fmri_len;
1087 
1088 	if (fmri_len >= bufsize)
1089 		return (REP_PROTOCOL_FAIL_TRUNCATED);
1090 
1091 	return (REP_PROTOCOL_SUCCESS);
1092 }
1093 
1094 /*
1095  * Build an FMRI string for this node and save it in rn_fmri.
1096  *
1097  * The basic strategy here is to get the fmri of our parent and then
1098  * concatenate the appropriate separator followed by our name.  If our name
1099  * is null, the resulting fmri will just be a copy of the parent fmri.
1100  * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1101  * set.  Also the rn_lock for this node should be held.
1102  *
1103  * Fails with
1104  *	_NO_RESOURCES	Could not allocate memory.
1105  */
1106 static int
1107 rc_node_build_fmri(rc_node_t *np)
1108 {
1109 	size_t actual;
1110 	char fmri[REP_PROTOCOL_FMRI_LEN];
1111 	int rc;
1112 	size_t	sz = REP_PROTOCOL_FMRI_LEN;
1113 
1114 	assert(MUTEX_HELD(&np->rn_lock));
1115 	assert(np->rn_flags & RC_NODE_USING_PARENT);
1116 
1117 	rc_node_free_fmri(np);
1118 
1119 	rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1120 	assert(rc == REP_PROTOCOL_SUCCESS);
1121 
1122 	if (np->rn_name != NULL) {
1123 		rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1124 		    np->rn_id.rl_type);
1125 		assert(rc == REP_PROTOCOL_SUCCESS);
1126 		np->rn_fmri = strdup(fmri);
1127 	} else {
1128 		np->rn_fmri = strdup(fmri);
1129 	}
1130 	if (np->rn_fmri == NULL) {
1131 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1132 	} else {
1133 		rc = REP_PROTOCOL_SUCCESS;
1134 	}
1135 
1136 	return (rc);
1137 }
1138 
1139 /*
1140  * Get the FMRI of the node at np placing the result in fmri.  Then
1141  * concatenate the additional element to fmri.  The type variable indicates
1142  * the type of element, so that the appropriate separator can be
1143  * generated.  size is the number of bytes in the buffer at fmri, and
1144  * sz_out receives the size of the generated string.  If the result is
1145  * truncated, sz_out will receive the size of the buffer that would be
1146  * required to avoid truncation.
1147  *
1148  * Fails with
1149  *	_TRUNCATED	Not enough room in buffer at fmri.
1150  */
1151 static int
1152 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1153     const char *element, rep_protocol_entity_t type)
1154 {
1155 	int rc;
1156 
1157 	if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1158 	    REP_PROTOCOL_SUCCESS) {
1159 		return (rc);
1160 	}
1161 	if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1162 	    REP_PROTOCOL_SUCCESS) {
1163 		return (rc);
1164 	}
1165 
1166 	return (REP_PROTOCOL_SUCCESS);
1167 }
1168 
1169 static int
1170 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1171 {
1172 	rc_node_t *nnp = np->rcn_node;
1173 	int i;
1174 
1175 	assert(MUTEX_HELD(&rc_pg_notify_lock));
1176 
1177 	if (np->rcn_delete != NULL) {
1178 		assert(np->rcn_info == NULL && np->rcn_node == NULL);
1179 		return (1);		/* everyone likes deletes */
1180 	}
1181 	if (np->rcn_node == NULL) {
1182 		assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1183 		return (0);
1184 	}
1185 	assert(np->rcn_info == NULL);
1186 
1187 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1188 		if (rnip->rni_namelist[i] != NULL) {
1189 			if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1190 				return (1);
1191 		}
1192 		if (rnip->rni_typelist[i] != NULL) {
1193 			if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1194 				return (1);
1195 		}
1196 	}
1197 	return (0);
1198 }
1199 
1200 static void
1201 rc_notify_insert_node(rc_node_t *nnp)
1202 {
1203 	rc_notify_t *np = &nnp->rn_notify;
1204 	rc_notify_info_t *nip;
1205 	int found = 0;
1206 
1207 	assert(np->rcn_info == NULL);
1208 
1209 	if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1210 		return;
1211 
1212 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1213 	np->rcn_node = nnp;
1214 	for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1215 	    nip = uu_list_next(rc_notify_info_list, nip)) {
1216 		if (rc_notify_info_interested(nip, np)) {
1217 			(void) pthread_cond_broadcast(&nip->rni_cv);
1218 			found++;
1219 		}
1220 	}
1221 	if (found)
1222 		(void) uu_list_insert_before(rc_notify_list, NULL, np);
1223 	else
1224 		np->rcn_node = NULL;
1225 
1226 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1227 }
1228 
1229 static void
1230 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1231     const char *instance, const char *pg)
1232 {
1233 	rc_notify_info_t *nip;
1234 
1235 	uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1236 	    rc_notify_pool);
1237 	ndp->rnd_notify.rcn_delete = ndp;
1238 
1239 	(void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1240 	    "svc:/%s%s%s%s%s", service,
1241 	    (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1242 	    (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1243 
1244 	/*
1245 	 * add to notification list, notify watchers
1246 	 */
1247 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1248 	for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1249 	    nip = uu_list_next(rc_notify_info_list, nip))
1250 		(void) pthread_cond_broadcast(&nip->rni_cv);
1251 	(void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1252 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1253 }
1254 
1255 static void
1256 rc_notify_remove_node(rc_node_t *nnp)
1257 {
1258 	rc_notify_t *np = &nnp->rn_notify;
1259 
1260 	assert(np->rcn_info == NULL);
1261 	assert(!MUTEX_HELD(&nnp->rn_lock));
1262 
1263 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1264 	while (np->rcn_node != NULL) {
1265 		if (rc_notify_in_use) {
1266 			(void) pthread_cond_wait(&rc_pg_notify_cv,
1267 			    &rc_pg_notify_lock);
1268 			continue;
1269 		}
1270 		(void) uu_list_remove(rc_notify_list, np);
1271 		np->rcn_node = NULL;
1272 		break;
1273 	}
1274 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1275 }
1276 
1277 static void
1278 rc_notify_remove_locked(rc_notify_t *np)
1279 {
1280 	assert(MUTEX_HELD(&rc_pg_notify_lock));
1281 	assert(rc_notify_in_use == 0);
1282 
1283 	(void) uu_list_remove(rc_notify_list, np);
1284 	if (np->rcn_node) {
1285 		np->rcn_node = NULL;
1286 	} else if (np->rcn_delete) {
1287 		uu_free(np->rcn_delete);
1288 	} else {
1289 		assert(0);	/* CAN'T HAPPEN */
1290 	}
1291 }
1292 
1293 /*
1294  * Permission checking functions.  See comment atop this file.
1295  */
1296 #ifndef NATIVE_BUILD
1297 static permcheck_t *
1298 pc_create()
1299 {
1300 	permcheck_t *p;
1301 
1302 	p = uu_zalloc(sizeof (*p));
1303 	if (p == NULL)
1304 		return (NULL);
1305 	p->pc_bnum = 8;			/* Normal case will only have 2 elts. */
1306 	p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1307 	if (p->pc_buckets == NULL) {
1308 		uu_free(p);
1309 		return (NULL);
1310 	}
1311 
1312 	p->pc_enum = 0;
1313 	return (p);
1314 }
1315 
1316 static void
1317 pc_free(permcheck_t *pcp)
1318 {
1319 	uint_t i;
1320 	struct pc_elt *ep, *next;
1321 
1322 	for (i = 0; i < pcp->pc_bnum; ++i) {
1323 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1324 			next = ep->pce_next;
1325 			free(ep);
1326 		}
1327 	}
1328 
1329 	free(pcp->pc_buckets);
1330 	free(pcp);
1331 }
1332 
1333 static uint32_t
1334 pc_hash(const char *auth)
1335 {
1336 	uint32_t h = 0, g;
1337 	const char *p;
1338 
1339 	/*
1340 	 * Generic hash function from uts/common/os/modhash.c.
1341 	 */
1342 	for (p = auth; *p != '\0'; ++p) {
1343 		h = (h << 4) + *p;
1344 		g = (h & 0xf0000000);
1345 		if (g != 0) {
1346 			h ^= (g >> 24);
1347 			h ^= g;
1348 		}
1349 	}
1350 
1351 	return (h);
1352 }
1353 
1354 static perm_status_t
1355 pc_exists(permcheck_t *pcp, const char *auth)
1356 {
1357 	uint32_t h;
1358 	struct pc_elt *ep;
1359 
1360 	h = pc_hash(auth);
1361 	for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1362 	    ep != NULL;
1363 	    ep = ep->pce_next) {
1364 		if (strcmp(auth, ep->pce_auth) == 0) {
1365 			pcp->pc_auth_string = ep->pce_auth;
1366 			return (PERM_GRANTED);
1367 		}
1368 	}
1369 
1370 	return (PERM_DENIED);
1371 }
1372 
1373 static perm_status_t
1374 pc_match(permcheck_t *pcp, const char *pattern)
1375 {
1376 	uint_t i;
1377 	struct pc_elt *ep;
1378 
1379 	for (i = 0; i < pcp->pc_bnum; ++i) {
1380 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1381 			if (_auth_match(pattern, ep->pce_auth)) {
1382 				pcp->pc_auth_string = ep->pce_auth;
1383 				return (PERM_GRANTED);
1384 			}
1385 		}
1386 	}
1387 
1388 	return (PERM_DENIED);
1389 }
1390 
1391 static int
1392 pc_grow(permcheck_t *pcp)
1393 {
1394 	uint_t new_bnum, i, j;
1395 	struct pc_elt **new_buckets;
1396 	struct pc_elt *ep, *next;
1397 
1398 	new_bnum = pcp->pc_bnum * 2;
1399 	if (new_bnum < pcp->pc_bnum)
1400 		/* Homey don't play that. */
1401 		return (-1);
1402 
1403 	new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1404 	if (new_buckets == NULL)
1405 		return (-1);
1406 
1407 	for (i = 0; i < pcp->pc_bnum; ++i) {
1408 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1409 			next = ep->pce_next;
1410 			j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1411 			ep->pce_next = new_buckets[j];
1412 			new_buckets[j] = ep;
1413 		}
1414 	}
1415 
1416 	uu_free(pcp->pc_buckets);
1417 	pcp->pc_buckets = new_buckets;
1418 	pcp->pc_bnum = new_bnum;
1419 
1420 	return (0);
1421 }
1422 
1423 static int
1424 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1425 {
1426 	struct pc_elt *ep;
1427 	uint_t i;
1428 
1429 	ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1430 	if (ep == NULL)
1431 		return (-1);
1432 
1433 	/* Grow if pc_enum / pc_bnum > 3/4. */
1434 	if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1435 		/* Failure is not a stopper; we'll try again next time. */
1436 		(void) pc_grow(pcp);
1437 
1438 	(void) strcpy(ep->pce_auth, auth);
1439 
1440 	i = pc_hash(auth) & (pcp->pc_bnum - 1);
1441 	ep->pce_next = pcp->pc_buckets[i];
1442 	pcp->pc_buckets[i] = ep;
1443 
1444 	if (auth_type > pcp->pc_specific_type) {
1445 		pcp->pc_specific_type = auth_type;
1446 		pcp->pc_specific = ep;
1447 	}
1448 
1449 	++pcp->pc_enum;
1450 
1451 	return (0);
1452 }
1453 
1454 /*
1455  * For the type of a property group, return the authorization which may be
1456  * used to modify it.
1457  */
1458 static const char *
1459 perm_auth_for_pgtype(const char *pgtype)
1460 {
1461 	if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1462 		return (AUTH_MODIFY_PREFIX "method");
1463 	else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1464 		return (AUTH_MODIFY_PREFIX "dependency");
1465 	else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1466 		return (AUTH_MODIFY_PREFIX "application");
1467 	else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1468 		return (AUTH_MODIFY_PREFIX "framework");
1469 	else
1470 		return (NULL);
1471 }
1472 
1473 /*
1474  * Fails with
1475  *   _NO_RESOURCES - out of memory
1476  */
1477 static int
1478 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1479     pc_auth_type_t auth_type)
1480 {
1481 	return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1482 	    REP_PROTOCOL_FAIL_NO_RESOURCES);
1483 }
1484 
1485 /*
1486  * Fails with
1487  *   _NO_RESOURCES - out of memory
1488  */
1489 static int
1490 perm_add_enabling(permcheck_t *pcp, const char *auth)
1491 {
1492 	return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1493 }
1494 
1495 /* Note that perm_add_enabling_values() is defined below. */
1496 
1497 /*
1498  * perm_granted() returns PERM_GRANTED if the current door caller has one of
1499  * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1500  * the door client went away and PERM_FAIL if an error (usually lack of
1501  * memory) occurs.  check_auth_list() checks an RBAC_AUTH_SEP-separated
1502  * list of authorizations for existence in pcp, and check_prof_list()
1503  * checks the authorizations granted to an RBAC_AUTH_SEP-separated list of
1504  * profiles.
1505  */
1506 static perm_status_t
1507 check_auth_list(permcheck_t *pcp, char *authlist)
1508 {
1509 	char *auth, *lasts;
1510 	perm_status_t ret;
1511 
1512 	for (auth = (char *)strtok_r(authlist, RBAC_AUTH_SEP, &lasts);
1513 	    auth != NULL;
1514 	    auth = (char *)strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) {
1515 		if (strchr(auth, KV_WILDCHAR) == NULL)
1516 			ret = pc_exists(pcp, auth);
1517 		else
1518 			ret = pc_match(pcp, auth);
1519 
1520 		if (ret != PERM_DENIED)
1521 			return (ret);
1522 	}
1523 
1524 	/*
1525 	 * If we failed, choose the most specific auth string for use in
1526 	 * the audit event.
1527 	 */
1528 	assert(pcp->pc_specific != NULL);
1529 	pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1530 
1531 	return (PERM_DENIED);
1532 }
1533 
1534 static perm_status_t
1535 check_prof_list(permcheck_t *pcp, char *proflist)
1536 {
1537 	char *prof, *lasts, *authlist, *subproflist;
1538 	profattr_t *pap;
1539 	perm_status_t ret = PERM_DENIED;
1540 
1541 	for (prof = strtok_r(proflist, RBAC_AUTH_SEP, &lasts);
1542 	    prof != NULL;
1543 	    prof = strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) {
1544 		pap = getprofnam(prof);
1545 		if (pap == NULL)
1546 			continue;
1547 
1548 		authlist = kva_match(pap->attr, PROFATTR_AUTHS_KW);
1549 		if (authlist != NULL)
1550 			ret = check_auth_list(pcp, authlist);
1551 
1552 		if (ret == PERM_DENIED) {
1553 			subproflist = kva_match(pap->attr, PROFATTR_PROFS_KW);
1554 			if (subproflist != NULL)
1555 				/* depth check to avoid infinite recursion? */
1556 				ret = check_prof_list(pcp, subproflist);
1557 		}
1558 
1559 		free_profattr(pap);
1560 		if (ret != PERM_DENIED)
1561 			return (ret);
1562 	}
1563 
1564 	return (ret);
1565 }
1566 
1567 static perm_status_t
1568 perm_granted(permcheck_t *pcp)
1569 {
1570 	ucred_t *uc;
1571 
1572 	perm_status_t ret = PERM_DENIED;
1573 	int rv;
1574 	uid_t uid;
1575 	userattr_t *uap;
1576 	char *authlist, *userattr_authlist, *proflist, *def_prof = NULL;
1577 	struct passwd pw;
1578 	char pwbuf[1024];	/* XXX should be NSS_BUFLEN_PASSWD */
1579 
1580 	/* Get the uid */
1581 	if ((uc = get_ucred()) == NULL) {
1582 		if (errno == EINVAL) {
1583 			/*
1584 			 * Client is no longer waiting for our response (e.g.,
1585 			 * it received a signal & resumed with EINTR).
1586 			 * Punting with door_return() would be nice but we
1587 			 * need to release all of the locks & references we
1588 			 * hold.  And we must report failure to the client
1589 			 * layer to keep it from ignoring retries as
1590 			 * already-done (idempotency & all that).  None of the
1591 			 * error codes fit very well, so we might as well
1592 			 * force the return of _PERMISSION_DENIED since we
1593 			 * couldn't determine the user.
1594 			 */
1595 			return (PERM_GONE);
1596 		}
1597 		assert(0);
1598 		abort();
1599 	}
1600 
1601 	uid = ucred_geteuid(uc);
1602 	assert(uid != (uid_t)-1);
1603 
1604 	if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1605 		return (PERM_FAIL);
1606 	}
1607 
1608 	/*
1609 	 * Get user's default authorizations from policy.conf
1610 	 */
1611 	rv = _get_user_defs(pw.pw_name, &authlist, &def_prof);
1612 
1613 	if (rv != 0)
1614 		return (PERM_FAIL);
1615 
1616 	if (authlist != NULL) {
1617 		ret = check_auth_list(pcp, authlist);
1618 
1619 		if (ret != PERM_DENIED) {
1620 			_free_user_defs(authlist, def_prof);
1621 			return (ret);
1622 		}
1623 	}
1624 
1625 	/*
1626 	 * Put off checking def_prof for later in an attempt to consolidate
1627 	 * prof_attr accesses.
1628 	 */
1629 
1630 	uap = getusernam(pw.pw_name);
1631 	if (uap != NULL) {
1632 		/* Get the authorizations from user_attr. */
1633 		userattr_authlist = kva_match(uap->attr, USERATTR_AUTHS_KW);
1634 		if (userattr_authlist != NULL) {
1635 			ret = check_auth_list(pcp, userattr_authlist);
1636 		}
1637 	}
1638 
1639 	if ((ret == PERM_DENIED) && (def_prof != NULL)) {
1640 		/* Check generic profiles. */
1641 		ret = check_prof_list(pcp, def_prof);
1642 	}
1643 
1644 	if ((ret == PERM_DENIED) && (uap != NULL)) {
1645 		proflist = kva_match(uap->attr, USERATTR_PROFILES_KW);
1646 		if (proflist != NULL)
1647 			ret = check_prof_list(pcp, proflist);
1648 	}
1649 
1650 	_free_user_defs(authlist, def_prof);
1651 	if (uap != NULL)
1652 		free_userattr(uap);
1653 
1654 	return (ret);
1655 }
1656 
1657 static int
1658 map_granted_status(perm_status_t status, permcheck_t *pcp,
1659     char **match_auth)
1660 {
1661 	int rc;
1662 
1663 	*match_auth = NULL;
1664 	switch (status) {
1665 	case PERM_DENIED:
1666 		*match_auth = strdup(pcp->pc_auth_string);
1667 		if (*match_auth == NULL)
1668 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1669 		else
1670 			rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1671 		break;
1672 	case PERM_GRANTED:
1673 		*match_auth = strdup(pcp->pc_auth_string);
1674 		if (*match_auth == NULL)
1675 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1676 		else
1677 			rc = REP_PROTOCOL_SUCCESS;
1678 		break;
1679 	case PERM_GONE:
1680 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1681 		break;
1682 	case PERM_FAIL:
1683 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1684 		break;
1685 	}
1686 	return (rc);
1687 }
1688 #endif /* NATIVE_BUILD */
1689 
1690 /*
1691  * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1692  * serialize certain actions, and to wait for certain operations to complete
1693  *
1694  * The waiting flags are:
1695  *	RC_NODE_CHILDREN_CHANGING
1696  *		The child list is being built or changed (due to creation
1697  *		or deletion).  All iterators pause.
1698  *
1699  *	RC_NODE_USING_PARENT
1700  *		Someone is actively using the parent pointer, so we can't
1701  *		be removed from the parent list.
1702  *
1703  *	RC_NODE_CREATING_CHILD
1704  *		A child is being created -- locks out other creations, to
1705  *		prevent insert-insert races.
1706  *
1707  *	RC_NODE_IN_TX
1708  *		This object is running a transaction.
1709  *
1710  *	RC_NODE_DYING
1711  *		This node might be dying.  Always set as a set, using
1712  *		RC_NODE_DYING_FLAGS (which is everything but
1713  *		RC_NODE_USING_PARENT)
1714  */
1715 static int
1716 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1717 {
1718 	assert(MUTEX_HELD(&np->rn_lock));
1719 	assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1720 
1721 	while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1722 		(void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1723 	}
1724 	if (np->rn_flags & RC_NODE_DEAD)
1725 		return (0);
1726 
1727 	np->rn_flags |= flag;
1728 	return (1);
1729 }
1730 
1731 static void
1732 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1733 {
1734 	assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1735 	assert(MUTEX_HELD(&np->rn_lock));
1736 	assert((np->rn_flags & flag) == flag);
1737 	np->rn_flags &= ~flag;
1738 	(void) pthread_cond_broadcast(&np->rn_cv);
1739 }
1740 
1741 /*
1742  * wait until a particular flag has cleared.  Fails if the object dies.
1743  */
1744 static int
1745 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1746 {
1747 	assert(MUTEX_HELD(&np->rn_lock));
1748 	while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1749 		(void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1750 
1751 	return (!(np->rn_flags & RC_NODE_DEAD));
1752 }
1753 
1754 /*
1755  * On entry, np's lock must be held, and this thread must be holding
1756  * RC_NODE_USING_PARENT.  On return, both of them are released.
1757  *
1758  * If the return value is NULL, np either does not have a parent, or
1759  * the parent has been marked DEAD.
1760  *
1761  * If the return value is non-NULL, it is the parent of np, and both
1762  * its lock and the requested flags are held.
1763  */
1764 static rc_node_t *
1765 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1766 {
1767 	rc_node_t *pp;
1768 
1769 	assert(MUTEX_HELD(&np->rn_lock));
1770 	assert(np->rn_flags & RC_NODE_USING_PARENT);
1771 
1772 	if ((pp = np->rn_parent) == NULL) {
1773 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1774 		(void) pthread_mutex_unlock(&np->rn_lock);
1775 		return (NULL);
1776 	}
1777 	(void) pthread_mutex_unlock(&np->rn_lock);
1778 
1779 	(void) pthread_mutex_lock(&pp->rn_lock);
1780 	(void) pthread_mutex_lock(&np->rn_lock);
1781 	rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1782 	(void) pthread_mutex_unlock(&np->rn_lock);
1783 
1784 	if (!rc_node_hold_flag(pp, flag)) {
1785 		(void) pthread_mutex_unlock(&pp->rn_lock);
1786 		return (NULL);
1787 	}
1788 	return (pp);
1789 }
1790 
1791 rc_node_t *
1792 rc_node_alloc(void)
1793 {
1794 	rc_node_t *np = uu_zalloc(sizeof (*np));
1795 
1796 	if (np == NULL)
1797 		return (NULL);
1798 
1799 	(void) pthread_mutex_init(&np->rn_lock, NULL);
1800 	(void) pthread_cond_init(&np->rn_cv, NULL);
1801 
1802 	np->rn_children = uu_list_create(rc_children_pool, np, 0);
1803 	np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1804 
1805 	uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1806 
1807 	uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1808 	    rc_notify_pool);
1809 
1810 	return (np);
1811 }
1812 
1813 void
1814 rc_node_destroy(rc_node_t *np)
1815 {
1816 	int i;
1817 
1818 	if (np->rn_flags & RC_NODE_UNREFED)
1819 		return;				/* being handled elsewhere */
1820 
1821 	assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1822 	assert(np->rn_former == NULL);
1823 
1824 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1825 		/* Release the holds from rc_iter_next(). */
1826 		for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1827 			/* rn_cchain[i] may be NULL for empty snapshots. */
1828 			if (np->rn_cchain[i] != NULL)
1829 				rc_node_rele(np->rn_cchain[i]);
1830 		}
1831 	}
1832 
1833 	if (np->rn_name != NULL)
1834 		free((void *)np->rn_name);
1835 	np->rn_name = NULL;
1836 	if (np->rn_type != NULL)
1837 		free((void *)np->rn_type);
1838 	np->rn_type = NULL;
1839 	if (np->rn_values != NULL)
1840 		object_free_values(np->rn_values, np->rn_valtype,
1841 		    np->rn_values_count, np->rn_values_size);
1842 	np->rn_values = NULL;
1843 	rc_node_free_fmri(np);
1844 
1845 	if (np->rn_snaplevel != NULL)
1846 		rc_snaplevel_rele(np->rn_snaplevel);
1847 	np->rn_snaplevel = NULL;
1848 
1849 	uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1850 
1851 	uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1852 	    rc_notify_pool);
1853 
1854 	assert(uu_list_first(np->rn_children) == NULL);
1855 	uu_list_destroy(np->rn_children);
1856 	uu_list_destroy(np->rn_pg_notify_list);
1857 
1858 	(void) pthread_mutex_destroy(&np->rn_lock);
1859 	(void) pthread_cond_destroy(&np->rn_cv);
1860 
1861 	uu_free(np);
1862 }
1863 
1864 /*
1865  * Link in a child node.
1866  *
1867  * Because of the lock ordering, cp has to already be in the hash table with
1868  * its lock dropped before we get it.  To prevent anyone from noticing that
1869  * it is parentless, the creation code sets the RC_NODE_USING_PARENT.  Once
1870  * we've linked it in, we release the flag.
1871  */
1872 static void
1873 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1874 {
1875 	assert(!MUTEX_HELD(&np->rn_lock));
1876 	assert(!MUTEX_HELD(&cp->rn_lock));
1877 
1878 	(void) pthread_mutex_lock(&np->rn_lock);
1879 	(void) pthread_mutex_lock(&cp->rn_lock);
1880 	assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1881 	    (cp->rn_flags & RC_NODE_USING_PARENT));
1882 
1883 	assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1884 	    REP_PROTOCOL_SUCCESS);
1885 
1886 	cp->rn_parent = np;
1887 	cp->rn_flags |= RC_NODE_IN_PARENT;
1888 	(void) uu_list_insert_before(np->rn_children, NULL, cp);
1889 	(void) rc_node_build_fmri(cp);
1890 
1891 	(void) pthread_mutex_unlock(&np->rn_lock);
1892 
1893 	rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1894 	(void) pthread_mutex_unlock(&cp->rn_lock);
1895 }
1896 
1897 /*
1898  * Sets the rn_parent_ref field of all the children of np to pp -- always
1899  * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1900  *
1901  * This is used when we mark a node RC_NODE_OLD, so that when the object and
1902  * its children are no longer referenced, they will all be deleted as a unit.
1903  */
1904 static void
1905 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1906 {
1907 	rc_node_t *cp;
1908 
1909 	assert(MUTEX_HELD(&np->rn_lock));
1910 
1911 	for (cp = uu_list_first(np->rn_children); cp != NULL;
1912 	    cp = uu_list_next(np->rn_children, cp)) {
1913 		(void) pthread_mutex_lock(&cp->rn_lock);
1914 		if (cp->rn_flags & RC_NODE_PARENT_REF) {
1915 			assert(cp->rn_parent_ref == pp);
1916 		} else {
1917 			assert(cp->rn_parent_ref == NULL);
1918 
1919 			cp->rn_flags |= RC_NODE_PARENT_REF;
1920 			cp->rn_parent_ref = pp;
1921 			if (cp->rn_refs != 0)
1922 				rc_node_hold_other(pp);
1923 		}
1924 		rc_node_setup_parent_ref(cp, pp);		/* recurse */
1925 		(void) pthread_mutex_unlock(&cp->rn_lock);
1926 	}
1927 }
1928 
1929 /*
1930  * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1931  *
1932  * Requirements:
1933  *	*no* node locks may be held.
1934  *	pp must be held with RC_NODE_CHILDREN_CHANGING
1935  *	newp and np must be held with RC_NODE_IN_TX
1936  *	np must be marked RC_NODE_IN_PARENT, newp must not be
1937  *	np must be marked RC_NODE_OLD
1938  *
1939  * Afterwards:
1940  *	pp's RC_NODE_CHILDREN_CHANGING is dropped
1941  *	newp and np's RC_NODE_IN_TX is dropped
1942  *	newp->rn_former = np;
1943  *	newp is RC_NODE_IN_PARENT, np is not.
1944  *	interested notify subscribers have been notified of newp's new status.
1945  */
1946 static void
1947 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1948 {
1949 	cache_bucket_t *bp;
1950 	/*
1951 	 * First, swap np and nnp in the cache.  newp's RC_NODE_IN_TX flag
1952 	 * keeps rc_node_update() from seeing it until we are done.
1953 	 */
1954 	bp = cache_hold(newp->rn_hash);
1955 	cache_remove_unlocked(bp, np);
1956 	cache_insert_unlocked(bp, newp);
1957 	cache_release(bp);
1958 
1959 	/*
1960 	 * replace np with newp in pp's list, and attach it to newp's rn_former
1961 	 * link.
1962 	 */
1963 	(void) pthread_mutex_lock(&pp->rn_lock);
1964 	assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1965 
1966 	(void) pthread_mutex_lock(&newp->rn_lock);
1967 	assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1968 	assert(newp->rn_flags & RC_NODE_IN_TX);
1969 
1970 	(void) pthread_mutex_lock(&np->rn_lock);
1971 	assert(np->rn_flags & RC_NODE_IN_PARENT);
1972 	assert(np->rn_flags & RC_NODE_OLD);
1973 	assert(np->rn_flags & RC_NODE_IN_TX);
1974 
1975 	newp->rn_parent = pp;
1976 	newp->rn_flags |= RC_NODE_IN_PARENT;
1977 
1978 	/*
1979 	 * Note that we carefully add newp before removing np -- this
1980 	 * keeps iterators on the list from missing us.
1981 	 */
1982 	(void) uu_list_insert_after(pp->rn_children, np, newp);
1983 	(void) rc_node_build_fmri(newp);
1984 	(void) uu_list_remove(pp->rn_children, np);
1985 
1986 	/*
1987 	 * re-set np
1988 	 */
1989 	newp->rn_former = np;
1990 	np->rn_parent = NULL;
1991 	np->rn_flags &= ~RC_NODE_IN_PARENT;
1992 	np->rn_flags |= RC_NODE_ON_FORMER;
1993 
1994 	rc_notify_insert_node(newp);
1995 
1996 	rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1997 	(void) pthread_mutex_unlock(&pp->rn_lock);
1998 	rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1999 	(void) pthread_mutex_unlock(&newp->rn_lock);
2000 	rc_node_setup_parent_ref(np, np);
2001 	rc_node_rele_flag(np, RC_NODE_IN_TX);
2002 	(void) pthread_mutex_unlock(&np->rn_lock);
2003 }
2004 
2005 /*
2006  * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
2007  * 'cp' is used (and returned) if the node does not yet exist.  If it does
2008  * exist, 'cp' is freed, and the existent node is returned instead.
2009  */
2010 rc_node_t *
2011 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2012     rc_node_t *pp)
2013 {
2014 	rc_node_t *np;
2015 	cache_bucket_t *bp;
2016 	uint32_t h = rc_node_hash(nip);
2017 
2018 	assert(cp->rn_refs == 0);
2019 
2020 	bp = cache_hold(h);
2021 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2022 		cache_release(bp);
2023 
2024 		/*
2025 		 * make sure it matches our expectations
2026 		 */
2027 		(void) pthread_mutex_lock(&np->rn_lock);
2028 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2029 			assert(np->rn_parent == pp);
2030 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2031 			assert(strcmp(np->rn_name, name) == 0);
2032 			assert(np->rn_type == NULL);
2033 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2034 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2035 		}
2036 		(void) pthread_mutex_unlock(&np->rn_lock);
2037 
2038 		rc_node_destroy(cp);
2039 		return (np);
2040 	}
2041 
2042 	/*
2043 	 * No one is there -- setup & install the new node.
2044 	 */
2045 	np = cp;
2046 	rc_node_hold(np);
2047 	np->rn_id = *nip;
2048 	np->rn_hash = h;
2049 	np->rn_name = strdup(name);
2050 
2051 	np->rn_flags |= RC_NODE_USING_PARENT;
2052 
2053 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
2054 #if COMPOSITION_DEPTH == 2
2055 		np->rn_cchain[0] = np;
2056 		np->rn_cchain[1] = pp;
2057 #else
2058 #error This code must be updated.
2059 #endif
2060 	}
2061 
2062 	cache_insert_unlocked(bp, np);
2063 	cache_release(bp);		/* we are now visible */
2064 
2065 	rc_node_link_child(pp, np);
2066 
2067 	return (np);
2068 }
2069 
2070 /*
2071  * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
2072  * 'cp' is used (and returned) if the node does not yet exist.  If it does
2073  * exist, 'cp' is freed, and the existent node is returned instead.
2074  */
2075 rc_node_t *
2076 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2077     uint32_t snap_id, rc_node_t *pp)
2078 {
2079 	rc_node_t *np;
2080 	cache_bucket_t *bp;
2081 	uint32_t h = rc_node_hash(nip);
2082 
2083 	assert(cp->rn_refs == 0);
2084 
2085 	bp = cache_hold(h);
2086 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2087 		cache_release(bp);
2088 
2089 		/*
2090 		 * make sure it matches our expectations
2091 		 */
2092 		(void) pthread_mutex_lock(&np->rn_lock);
2093 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2094 			assert(np->rn_parent == pp);
2095 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2096 			assert(strcmp(np->rn_name, name) == 0);
2097 			assert(np->rn_type == NULL);
2098 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2099 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2100 		}
2101 		(void) pthread_mutex_unlock(&np->rn_lock);
2102 
2103 		rc_node_destroy(cp);
2104 		return (np);
2105 	}
2106 
2107 	/*
2108 	 * No one is there -- create a new node.
2109 	 */
2110 	np = cp;
2111 	rc_node_hold(np);
2112 	np->rn_id = *nip;
2113 	np->rn_hash = h;
2114 	np->rn_name = strdup(name);
2115 	np->rn_snapshot_id = snap_id;
2116 
2117 	np->rn_flags |= RC_NODE_USING_PARENT;
2118 
2119 	cache_insert_unlocked(bp, np);
2120 	cache_release(bp);		/* we are now visible */
2121 
2122 	rc_node_link_child(pp, np);
2123 
2124 	return (np);
2125 }
2126 
2127 /*
2128  * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists.  'cp' is
2129  * used (and returned) if the node does not yet exist.  If it does exist, 'cp'
2130  * is freed, and the existent node is returned instead.
2131  */
2132 rc_node_t *
2133 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2134     rc_snaplevel_t *lvl, rc_node_t *pp)
2135 {
2136 	rc_node_t *np;
2137 	cache_bucket_t *bp;
2138 	uint32_t h = rc_node_hash(nip);
2139 
2140 	assert(cp->rn_refs == 0);
2141 
2142 	bp = cache_hold(h);
2143 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2144 		cache_release(bp);
2145 
2146 		/*
2147 		 * make sure it matches our expectations
2148 		 */
2149 		(void) pthread_mutex_lock(&np->rn_lock);
2150 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2151 			assert(np->rn_parent == pp);
2152 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2153 			assert(np->rn_name == NULL);
2154 			assert(np->rn_type == NULL);
2155 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2156 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2157 		}
2158 		(void) pthread_mutex_unlock(&np->rn_lock);
2159 
2160 		rc_node_destroy(cp);
2161 		return (np);
2162 	}
2163 
2164 	/*
2165 	 * No one is there -- create a new node.
2166 	 */
2167 	np = cp;
2168 	rc_node_hold(np);	/* released in snapshot_fill_children() */
2169 	np->rn_id = *nip;
2170 	np->rn_hash = h;
2171 
2172 	rc_snaplevel_hold(lvl);
2173 	np->rn_snaplevel = lvl;
2174 
2175 	np->rn_flags |= RC_NODE_USING_PARENT;
2176 
2177 	cache_insert_unlocked(bp, np);
2178 	cache_release(bp);		/* we are now visible */
2179 
2180 	/* Add this snaplevel to the snapshot's composition chain. */
2181 	assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2182 	pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2183 
2184 	rc_node_link_child(pp, np);
2185 
2186 	return (np);
2187 }
2188 
2189 /*
2190  * Returns NULL if strdup() fails.
2191  */
2192 rc_node_t *
2193 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2194     const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2195 {
2196 	rc_node_t *np;
2197 	cache_bucket_t *bp;
2198 
2199 	uint32_t h = rc_node_hash(nip);
2200 	bp = cache_hold(h);
2201 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2202 		cache_release(bp);
2203 
2204 		/*
2205 		 * make sure it matches our expectations (don't check
2206 		 * the generation number or parent, since someone could
2207 		 * have gotten a transaction through while we weren't
2208 		 * looking)
2209 		 */
2210 		(void) pthread_mutex_lock(&np->rn_lock);
2211 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2212 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2213 			assert(strcmp(np->rn_name, name) == 0);
2214 			assert(strcmp(np->rn_type, type) == 0);
2215 			assert(np->rn_pgflags == flags);
2216 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2217 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2218 		}
2219 		(void) pthread_mutex_unlock(&np->rn_lock);
2220 
2221 		rc_node_destroy(cp);
2222 		return (np);
2223 	}
2224 
2225 	np = cp;
2226 	rc_node_hold(np);		/* released in fill_pg_callback() */
2227 	np->rn_id = *nip;
2228 	np->rn_hash = h;
2229 	np->rn_name = strdup(name);
2230 	if (np->rn_name == NULL) {
2231 		rc_node_rele(np);
2232 		return (NULL);
2233 	}
2234 	np->rn_type = strdup(type);
2235 	if (np->rn_type == NULL) {
2236 		free((void *)np->rn_name);
2237 		rc_node_rele(np);
2238 		return (NULL);
2239 	}
2240 	np->rn_pgflags = flags;
2241 	np->rn_gen_id = gen_id;
2242 
2243 	np->rn_flags |= RC_NODE_USING_PARENT;
2244 
2245 	cache_insert_unlocked(bp, np);
2246 	cache_release(bp);		/* we are now visible */
2247 
2248 	rc_node_link_child(pp, np);
2249 
2250 	return (np);
2251 }
2252 
2253 #if COMPOSITION_DEPTH == 2
2254 /*
2255  * Initialize a "composed property group" which represents the composition of
2256  * property groups pg1 & pg2.  It is ephemeral: once created & returned for an
2257  * ITER_READ request, keeping it out of cache_hash and any child lists
2258  * prevents it from being looked up.  Operations besides iteration are passed
2259  * through to pg1.
2260  *
2261  * pg1 & pg2 should be held before entering this function.  They will be
2262  * released in rc_node_destroy().
2263  */
2264 static int
2265 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2266 {
2267 	if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2268 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2269 
2270 	cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2271 	cpg->rn_name = strdup(pg1->rn_name);
2272 	if (cpg->rn_name == NULL)
2273 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2274 
2275 	cpg->rn_cchain[0] = pg1;
2276 	cpg->rn_cchain[1] = pg2;
2277 
2278 	return (REP_PROTOCOL_SUCCESS);
2279 }
2280 #else
2281 #error This code must be updated.
2282 #endif
2283 
2284 /*
2285  * Fails with _NO_RESOURCES.
2286  */
2287 int
2288 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2289     const char *name, rep_protocol_value_type_t type,
2290     const char *vals, size_t count, size_t size)
2291 {
2292 	rc_node_t *np;
2293 	cache_bucket_t *bp;
2294 
2295 	uint32_t h = rc_node_hash(nip);
2296 	bp = cache_hold(h);
2297 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2298 		cache_release(bp);
2299 		/*
2300 		 * make sure it matches our expectations
2301 		 */
2302 		(void) pthread_mutex_lock(&np->rn_lock);
2303 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2304 			assert(np->rn_parent == pp);
2305 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2306 			assert(strcmp(np->rn_name, name) == 0);
2307 			assert(np->rn_valtype == type);
2308 			assert(np->rn_values_count == count);
2309 			assert(np->rn_values_size == size);
2310 			assert(vals == NULL ||
2311 			    memcmp(np->rn_values, vals, size) == 0);
2312 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2313 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2314 		}
2315 		rc_node_rele_locked(np);
2316 		object_free_values(vals, type, count, size);
2317 		return (REP_PROTOCOL_SUCCESS);
2318 	}
2319 
2320 	/*
2321 	 * No one is there -- create a new node.
2322 	 */
2323 	np = rc_node_alloc();
2324 	if (np == NULL) {
2325 		cache_release(bp);
2326 		object_free_values(vals, type, count, size);
2327 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2328 	}
2329 	np->rn_id = *nip;
2330 	np->rn_hash = h;
2331 	np->rn_name = strdup(name);
2332 	if (np->rn_name == NULL) {
2333 		cache_release(bp);
2334 		object_free_values(vals, type, count, size);
2335 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2336 	}
2337 
2338 	np->rn_valtype = type;
2339 	np->rn_values = vals;
2340 	np->rn_values_count = count;
2341 	np->rn_values_size = size;
2342 
2343 	np->rn_flags |= RC_NODE_USING_PARENT;
2344 
2345 	cache_insert_unlocked(bp, np);
2346 	cache_release(bp);		/* we are now visible */
2347 
2348 	rc_node_link_child(pp, np);
2349 
2350 	return (REP_PROTOCOL_SUCCESS);
2351 }
2352 
2353 /*
2354  * This function implements a decision table to determine the event ID for
2355  * changes to the enabled (SCF_PROPERTY_ENABLED) property.  The event ID is
2356  * determined by the value of the first property in the command specified
2357  * by cmd_no and the name of the property group.  Here is the decision
2358  * table:
2359  *
2360  *				Property Group Name
2361  *	Property	------------------------------------------
2362  *	Value		SCF_PG_GENERAL		SCF_PG_GENERAL_OVR
2363  *	--------	--------------		------------------
2364  *	"0"		ADT_smf_disable		ADT_smf_tmp_disable
2365  *	"1"		ADT_smf_enable		ADT_smf_tmp_enable
2366  *
2367  * This function is called by special_property_event through a function
2368  * pointer in the special_props_list array.
2369  *
2370  * Since the ADT_smf_* symbols may not be defined in the build machine's
2371  * include files, this function is not compiled when doing native builds.
2372  */
2373 #ifndef NATIVE_BUILD
2374 static int
2375 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2376     au_event_t *event_id)
2377 {
2378 	const char *value;
2379 	uint32_t nvalues;
2380 	int enable;
2381 
2382 	/*
2383 	 * First, check property value.
2384 	 */
2385 	if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2386 		return (-1);
2387 	if (nvalues == 0)
2388 		return (-1);
2389 	if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2390 		return (-1);
2391 	if (strcmp(value, "0") == 0) {
2392 		enable = 0;
2393 	} else if (strcmp(value, "1") == 0) {
2394 		enable = 1;
2395 	} else {
2396 		return (-1);
2397 	}
2398 
2399 	/*
2400 	 * Now check property group name.
2401 	 */
2402 	if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2403 		*event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2404 		return (0);
2405 	} else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2406 		*event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2407 		return (0);
2408 	}
2409 	return (-1);
2410 }
2411 #endif	/* NATIVE_BUILD */
2412 
2413 /*
2414  * This function compares two audit_special_prop_item_t structures
2415  * represented by item1 and item2.  It returns an integer greater than 0 if
2416  * item1 is greater than item2.  It returns 0 if they are equal and an
2417  * integer less than 0 if item1 is less than item2.  api_prop_name and
2418  * api_pg_name are the key fields for sorting.
2419  *
2420  * This function is suitable for calls to bsearch(3C) and qsort(3C).
2421  */
2422 static int
2423 special_prop_compare(const void *item1, const void *item2)
2424 {
2425 	const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2426 	const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2427 	int r;
2428 
2429 	r = strcmp(a->api_prop_name, b->api_prop_name);
2430 	if (r == 0) {
2431 		/*
2432 		 * Primary keys are the same, so check the secondary key.
2433 		 */
2434 		r = strcmp(a->api_pg_name, b->api_pg_name);
2435 	}
2436 	return (r);
2437 }
2438 
2439 int
2440 rc_node_init(void)
2441 {
2442 	rc_node_t *np;
2443 	cache_bucket_t *bp;
2444 
2445 	rc_children_pool = uu_list_pool_create("rc_children_pool",
2446 	    sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2447 	    NULL, UU_LIST_POOL_DEBUG);
2448 
2449 	rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2450 	    sizeof (rc_node_pg_notify_t),
2451 	    offsetof(rc_node_pg_notify_t, rnpn_node),
2452 	    NULL, UU_LIST_POOL_DEBUG);
2453 
2454 	rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2455 	    sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2456 	    NULL, UU_LIST_POOL_DEBUG);
2457 
2458 	rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2459 	    sizeof (rc_notify_info_t),
2460 	    offsetof(rc_notify_info_t, rni_list_node),
2461 	    NULL, UU_LIST_POOL_DEBUG);
2462 
2463 	if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2464 	    rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2465 		uu_die("out of memory");
2466 
2467 	rc_notify_list = uu_list_create(rc_notify_pool,
2468 	    &rc_notify_list, 0);
2469 
2470 	rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2471 	    &rc_notify_info_list, 0);
2472 
2473 	if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2474 		uu_die("out of memory");
2475 
2476 	/*
2477 	 * Sort the special_props_list array so that it can be searched
2478 	 * with bsearch(3C).
2479 	 *
2480 	 * The special_props_list array is not compiled into the native
2481 	 * build code, so there is no need to call qsort if NATIVE_BUILD is
2482 	 * defined.
2483 	 */
2484 #ifndef	NATIVE_BUILD
2485 	qsort(special_props_list, SPECIAL_PROP_COUNT,
2486 	    sizeof (special_props_list[0]), special_prop_compare);
2487 #endif	/* NATIVE_BUILD */
2488 
2489 	if ((np = rc_node_alloc()) == NULL)
2490 		uu_die("out of memory");
2491 
2492 	rc_node_hold(np);
2493 	np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2494 	np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2495 	np->rn_hash = rc_node_hash(&np->rn_id);
2496 	np->rn_name = "localhost";
2497 
2498 	bp = cache_hold(np->rn_hash);
2499 	cache_insert_unlocked(bp, np);
2500 	cache_release(bp);
2501 
2502 	rc_scope = np;
2503 	return (1);
2504 }
2505 
2506 /*
2507  * Fails with
2508  *   _INVALID_TYPE - type is invalid
2509  *   _TYPE_MISMATCH - np doesn't carry children of type type
2510  *   _DELETED - np has been deleted
2511  *   _NO_RESOURCES
2512  */
2513 static int
2514 rc_node_fill_children(rc_node_t *np, uint32_t type)
2515 {
2516 	int rc;
2517 
2518 	assert(MUTEX_HELD(&np->rn_lock));
2519 
2520 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2521 	    REP_PROTOCOL_SUCCESS)
2522 		return (rc);
2523 
2524 	if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2525 		return (REP_PROTOCOL_FAIL_DELETED);
2526 
2527 	if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2528 		rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2529 		return (REP_PROTOCOL_SUCCESS);
2530 	}
2531 
2532 	(void) pthread_mutex_unlock(&np->rn_lock);
2533 	rc = object_fill_children(np);
2534 	(void) pthread_mutex_lock(&np->rn_lock);
2535 
2536 	if (rc == REP_PROTOCOL_SUCCESS) {
2537 		np->rn_flags |= RC_NODE_HAS_CHILDREN;
2538 	}
2539 	rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2540 
2541 	return (rc);
2542 }
2543 
2544 /*
2545  * Returns
2546  *   _INVALID_TYPE - type is invalid
2547  *   _TYPE_MISMATCH - np doesn't carry children of type type
2548  *   _DELETED - np has been deleted
2549  *   _NO_RESOURCES
2550  *   _SUCCESS - if *cpp is not NULL, it is held
2551  */
2552 static int
2553 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2554     rc_node_t **cpp)
2555 {
2556 	int ret;
2557 	rc_node_t *cp;
2558 
2559 	assert(MUTEX_HELD(&np->rn_lock));
2560 	assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2561 
2562 	ret = rc_node_fill_children(np, type);
2563 	if (ret != REP_PROTOCOL_SUCCESS)
2564 		return (ret);
2565 
2566 	for (cp = uu_list_first(np->rn_children);
2567 	    cp != NULL;
2568 	    cp = uu_list_next(np->rn_children, cp)) {
2569 		if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2570 			break;
2571 	}
2572 
2573 	if (cp != NULL)
2574 		rc_node_hold(cp);
2575 	*cpp = cp;
2576 
2577 	return (REP_PROTOCOL_SUCCESS);
2578 }
2579 
2580 static int rc_node_parent(rc_node_t *, rc_node_t **);
2581 
2582 /*
2583  * Returns
2584  *   _INVALID_TYPE - type is invalid
2585  *   _DELETED - np or an ancestor has been deleted
2586  *   _NOT_FOUND - no ancestor of specified type exists
2587  *   _SUCCESS - *app is held
2588  */
2589 static int
2590 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2591 {
2592 	int ret;
2593 	rc_node_t *parent, *np_orig;
2594 
2595 	if (type >= REP_PROTOCOL_ENTITY_MAX)
2596 		return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2597 
2598 	np_orig = np;
2599 
2600 	while (np->rn_id.rl_type > type) {
2601 		ret = rc_node_parent(np, &parent);
2602 		if (np != np_orig)
2603 			rc_node_rele(np);
2604 		if (ret != REP_PROTOCOL_SUCCESS)
2605 			return (ret);
2606 		np = parent;
2607 	}
2608 
2609 	if (np->rn_id.rl_type == type) {
2610 		*app = parent;
2611 		return (REP_PROTOCOL_SUCCESS);
2612 	}
2613 
2614 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
2615 }
2616 
2617 #ifndef NATIVE_BUILD
2618 /*
2619  * If the propname property exists in pg, and it is of type string, add its
2620  * values as authorizations to pcp.  pg must not be locked on entry, and it is
2621  * returned unlocked.  Returns
2622  *   _DELETED - pg was deleted
2623  *   _NO_RESOURCES
2624  *   _NOT_FOUND - pg has no property named propname
2625  *   _SUCCESS
2626  */
2627 static int
2628 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2629 {
2630 	rc_node_t *prop;
2631 	int result;
2632 
2633 	uint_t count;
2634 	const char *cp;
2635 
2636 	assert(!MUTEX_HELD(&pg->rn_lock));
2637 	assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2638 
2639 	(void) pthread_mutex_lock(&pg->rn_lock);
2640 	result = rc_node_find_named_child(pg, propname,
2641 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2642 	(void) pthread_mutex_unlock(&pg->rn_lock);
2643 	if (result != REP_PROTOCOL_SUCCESS) {
2644 		switch (result) {
2645 		case REP_PROTOCOL_FAIL_DELETED:
2646 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
2647 			return (result);
2648 
2649 		case REP_PROTOCOL_FAIL_INVALID_TYPE:
2650 		case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2651 		default:
2652 			bad_error("rc_node_find_named_child", result);
2653 		}
2654 	}
2655 
2656 	if (prop == NULL)
2657 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
2658 
2659 	/* rn_valtype is immutable, so no locking. */
2660 	if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2661 		rc_node_rele(prop);
2662 		return (REP_PROTOCOL_SUCCESS);
2663 	}
2664 
2665 	(void) pthread_mutex_lock(&prop->rn_lock);
2666 	for (count = prop->rn_values_count, cp = prop->rn_values;
2667 	    count > 0;
2668 	    --count) {
2669 		result = perm_add_enabling_type(pcp, cp,
2670 		    (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2671 		    PC_AUTH_SVC);
2672 		if (result != REP_PROTOCOL_SUCCESS)
2673 			break;
2674 
2675 		cp = strchr(cp, '\0') + 1;
2676 	}
2677 
2678 	rc_node_rele_locked(prop);
2679 
2680 	return (result);
2681 }
2682 
2683 /*
2684  * Assuming that ent is a service or instance node, if the pgname property
2685  * group has type pgtype, and it has a propname property with string type, add
2686  * its values as authorizations to pcp.  If pgtype is NULL, it is not checked.
2687  * Returns
2688  *   _SUCCESS
2689  *   _DELETED - ent was deleted
2690  *   _NO_RESOURCES - no resources
2691  *   _NOT_FOUND - ent does not have pgname pg or propname property
2692  */
2693 static int
2694 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2695     const char *pgtype, const char *propname)
2696 {
2697 	int r;
2698 	rc_node_t *pg;
2699 
2700 	assert(!MUTEX_HELD(&ent->rn_lock));
2701 
2702 	(void) pthread_mutex_lock(&ent->rn_lock);
2703 	r = rc_node_find_named_child(ent, pgname,
2704 	    REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2705 	(void) pthread_mutex_unlock(&ent->rn_lock);
2706 
2707 	switch (r) {
2708 	case REP_PROTOCOL_SUCCESS:
2709 		break;
2710 
2711 	case REP_PROTOCOL_FAIL_DELETED:
2712 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
2713 		return (r);
2714 
2715 	default:
2716 		bad_error("rc_node_find_named_child", r);
2717 	}
2718 
2719 	if (pg == NULL)
2720 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
2721 
2722 	if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2723 		r = perm_add_pg_prop_values(pcp, pg, propname);
2724 		switch (r) {
2725 		case REP_PROTOCOL_FAIL_DELETED:
2726 			r = REP_PROTOCOL_FAIL_NOT_FOUND;
2727 			break;
2728 
2729 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
2730 		case REP_PROTOCOL_SUCCESS:
2731 		case REP_PROTOCOL_FAIL_NOT_FOUND:
2732 			break;
2733 
2734 		default:
2735 			bad_error("perm_add_pg_prop_values", r);
2736 		}
2737 	}
2738 
2739 	rc_node_rele(pg);
2740 
2741 	return (r);
2742 }
2743 
2744 /*
2745  * If pg has a property named propname, and is string typed, add its values as
2746  * authorizations to pcp.  If pg has no such property, and its parent is an
2747  * instance, walk up to the service and try doing the same with the property
2748  * of the same name from the property group of the same name.  Returns
2749  *   _SUCCESS
2750  *   _NO_RESOURCES
2751  *   _DELETED - pg (or an ancestor) was deleted
2752  */
2753 static int
2754 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2755 {
2756 	int r;
2757 	char pgname[REP_PROTOCOL_NAME_LEN + 1];
2758 	rc_node_t *svc;
2759 	size_t sz;
2760 
2761 	r = perm_add_pg_prop_values(pcp, pg, propname);
2762 
2763 	if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2764 		return (r);
2765 
2766 	assert(!MUTEX_HELD(&pg->rn_lock));
2767 
2768 	if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2769 		return (REP_PROTOCOL_SUCCESS);
2770 
2771 	sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2772 	assert(sz < sizeof (pgname));
2773 
2774 	/*
2775 	 * If pg is a child of an instance or snapshot, we want to compose the
2776 	 * authorization property with the service's (if it exists).  The
2777 	 * snapshot case applies only to read_authorization.  In all other
2778 	 * cases, the pg's parent will be the instance.
2779 	 */
2780 	r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2781 	if (r != REP_PROTOCOL_SUCCESS) {
2782 		assert(r == REP_PROTOCOL_FAIL_DELETED);
2783 		return (r);
2784 	}
2785 	assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2786 
2787 	r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2788 
2789 	rc_node_rele(svc);
2790 
2791 	if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2792 		r = REP_PROTOCOL_SUCCESS;
2793 
2794 	return (r);
2795 }
2796 
2797 /*
2798  * Call perm_add_enabling_values() for the "action_authorization" property of
2799  * the "general" property group of inst.  Returns
2800  *   _DELETED - inst (or an ancestor) was deleted
2801  *   _NO_RESOURCES
2802  *   _SUCCESS
2803  */
2804 static int
2805 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2806 {
2807 	int r;
2808 	rc_node_t *svc;
2809 
2810 	assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2811 
2812 	r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2813 	    AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2814 
2815 	if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2816 		return (r);
2817 
2818 	r = rc_node_parent(inst, &svc);
2819 	if (r != REP_PROTOCOL_SUCCESS) {
2820 		assert(r == REP_PROTOCOL_FAIL_DELETED);
2821 		return (r);
2822 	}
2823 
2824 	r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2825 	    AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2826 
2827 	return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2828 }
2829 #endif /* NATIVE_BUILD */
2830 
2831 void
2832 rc_node_ptr_init(rc_node_ptr_t *out)
2833 {
2834 	out->rnp_node = NULL;
2835 	out->rnp_auth_string = NULL;
2836 	out->rnp_authorized = RC_AUTH_UNKNOWN;
2837 	out->rnp_deleted = 0;
2838 }
2839 
2840 void
2841 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2842 {
2843 	if (npp->rnp_auth_string != NULL) {
2844 		free((void *)npp->rnp_auth_string);
2845 		npp->rnp_auth_string = NULL;
2846 	}
2847 }
2848 
2849 static void
2850 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2851 {
2852 	rc_node_t *cur = out->rnp_node;
2853 	if (val != NULL)
2854 		rc_node_hold(val);
2855 	out->rnp_node = val;
2856 	if (cur != NULL) {
2857 		NODE_LOCK(cur);
2858 
2859 		/*
2860 		 * Register the ephemeral reference created by reading
2861 		 * out->rnp_node into cur.  Note that the persistent
2862 		 * reference we're destroying is locked by the client
2863 		 * layer.
2864 		 */
2865 		rc_node_hold_ephemeral_locked(cur);
2866 
2867 		rc_node_rele_locked(cur);
2868 	}
2869 	out->rnp_authorized = RC_AUTH_UNKNOWN;
2870 	rc_node_ptr_free_mem(out);
2871 	out->rnp_deleted = 0;
2872 }
2873 
2874 void
2875 rc_node_clear(rc_node_ptr_t *out, int deleted)
2876 {
2877 	rc_node_assign(out, NULL);
2878 	out->rnp_deleted = deleted;
2879 }
2880 
2881 void
2882 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2883 {
2884 	rc_node_assign(out, val->rnp_node);
2885 }
2886 
2887 /*
2888  * rc_node_check()/RC_NODE_CHECK()
2889  *	generic "entry" checks, run before the use of an rc_node pointer.
2890  *
2891  * Fails with
2892  *   _NOT_SET
2893  *   _DELETED
2894  */
2895 static int
2896 rc_node_check_and_lock(rc_node_t *np)
2897 {
2898 	int result = REP_PROTOCOL_SUCCESS;
2899 	if (np == NULL)
2900 		return (REP_PROTOCOL_FAIL_NOT_SET);
2901 
2902 	(void) pthread_mutex_lock(&np->rn_lock);
2903 	if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2904 		result = REP_PROTOCOL_FAIL_DELETED;
2905 		(void) pthread_mutex_unlock(&np->rn_lock);
2906 	}
2907 
2908 	return (result);
2909 }
2910 
2911 /*
2912  * Fails with
2913  *   _NOT_SET - ptr is reset
2914  *   _DELETED - node has been deleted
2915  */
2916 static rc_node_t *
2917 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2918 {
2919 	rc_node_t *np = npp->rnp_node;
2920 	if (np == NULL) {
2921 		if (npp->rnp_deleted)
2922 			*res = REP_PROTOCOL_FAIL_DELETED;
2923 		else
2924 			*res = REP_PROTOCOL_FAIL_NOT_SET;
2925 		return (NULL);
2926 	}
2927 
2928 	(void) pthread_mutex_lock(&np->rn_lock);
2929 	if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2930 		(void) pthread_mutex_unlock(&np->rn_lock);
2931 		rc_node_clear(npp, 1);
2932 		*res = REP_PROTOCOL_FAIL_DELETED;
2933 		return (NULL);
2934 	}
2935 	return (np);
2936 }
2937 
2938 #define	RC_NODE_CHECK_AND_LOCK(n) {					\
2939 	int rc__res;							\
2940 	if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2941 		return (rc__res);					\
2942 }
2943 
2944 #define	RC_NODE_CHECK(n) {						\
2945 	RC_NODE_CHECK_AND_LOCK(n);					\
2946 	(void) pthread_mutex_unlock(&(n)->rn_lock);			\
2947 }
2948 
2949 #define	RC_NODE_CHECK_AND_HOLD(n) {					\
2950 	RC_NODE_CHECK_AND_LOCK(n);					\
2951 	rc_node_hold_locked(n);						\
2952 	(void) pthread_mutex_unlock(&(n)->rn_lock);			\
2953 }
2954 
2955 #define	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) {			\
2956 	int rc__res;							\
2957 	if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL)	\
2958 		return (rc__res);					\
2959 }
2960 
2961 #define	RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) {		\
2962 	int rc__res;							\
2963 	if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == 	\
2964 	    NULL) {							\
2965 		if ((mem) != NULL)					\
2966 			free((mem));					\
2967 		return (rc__res);					\
2968 	}								\
2969 }
2970 
2971 #define	RC_NODE_PTR_GET_CHECK(np, npp) {				\
2972 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);			\
2973 	(void) pthread_mutex_unlock(&(np)->rn_lock);			\
2974 }
2975 
2976 #define	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) {			\
2977 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);			\
2978 	rc_node_hold_locked(np);					\
2979 	(void) pthread_mutex_unlock(&(np)->rn_lock);			\
2980 }
2981 
2982 #define	HOLD_FLAG_OR_RETURN(np, flag) {					\
2983 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2984 	assert(!((np)->rn_flags & RC_NODE_DEAD));			\
2985 	if (!rc_node_hold_flag((np), flag)) {				\
2986 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2987 		return (REP_PROTOCOL_FAIL_DELETED);			\
2988 	}								\
2989 }
2990 
2991 #define	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) {		\
2992 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2993 	if (!rc_node_hold_flag((np), flag)) {				\
2994 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2995 		assert((np) == (npp)->rnp_node);			\
2996 		rc_node_clear(npp, 1);					\
2997 		if ((mem) != NULL)					\
2998 			free((mem));					\
2999 		return (REP_PROTOCOL_FAIL_DELETED);			\
3000 	}								\
3001 }
3002 
3003 int
3004 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
3005 {
3006 	if (type != REP_PROTOCOL_ENTITY_SCOPE) {
3007 		rc_node_clear(out, 0);
3008 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3009 	}
3010 
3011 	/*
3012 	 * the main scope never gets destroyed
3013 	 */
3014 	rc_node_assign(out, rc_scope);
3015 
3016 	return (REP_PROTOCOL_SUCCESS);
3017 }
3018 
3019 /*
3020  * Fails with
3021  *   _NOT_SET - npp is not set
3022  *   _DELETED - the node npp pointed at has been deleted
3023  *   _TYPE_MISMATCH - type is not _SCOPE
3024  *   _NOT_FOUND - scope has no parent
3025  */
3026 static int
3027 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3028 {
3029 	rc_node_t *np;
3030 
3031 	rc_node_clear(out, 0);
3032 
3033 	RC_NODE_PTR_GET_CHECK(np, npp);
3034 
3035 	if (type != REP_PROTOCOL_ENTITY_SCOPE)
3036 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3037 
3038 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
3039 }
3040 
3041 static int rc_node_pg_check_read_protect(rc_node_t *);
3042 
3043 /*
3044  * Fails with
3045  *   _NOT_SET
3046  *   _DELETED
3047  *   _NOT_APPLICABLE
3048  *   _NOT_FOUND
3049  *   _BAD_REQUEST
3050  *   _TRUNCATED
3051  *   _NO_RESOURCES
3052  */
3053 int
3054 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
3055     size_t *sz_out)
3056 {
3057 	size_t actual;
3058 	rc_node_t *np;
3059 
3060 	assert(sz == *sz_out);
3061 
3062 	RC_NODE_PTR_GET_CHECK(np, npp);
3063 
3064 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3065 		np = np->rn_cchain[0];
3066 		RC_NODE_CHECK(np);
3067 	}
3068 
3069 	switch (answertype) {
3070 	case RP_ENTITY_NAME_NAME:
3071 		if (np->rn_name == NULL)
3072 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3073 		actual = strlcpy(buf, np->rn_name, sz);
3074 		break;
3075 	case RP_ENTITY_NAME_PGTYPE:
3076 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3077 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3078 		actual = strlcpy(buf, np->rn_type, sz);
3079 		break;
3080 	case RP_ENTITY_NAME_PGFLAGS:
3081 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3082 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3083 		actual = snprintf(buf, sz, "%d", np->rn_pgflags);
3084 		break;
3085 	case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
3086 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3087 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3088 		actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3089 		break;
3090 	case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3091 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3092 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3093 		actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3094 		break;
3095 	case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3096 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3097 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3098 		if (np->rn_snaplevel->rsl_instance == NULL)
3099 			return (REP_PROTOCOL_FAIL_NOT_FOUND);
3100 		actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3101 		break;
3102 	case RP_ENTITY_NAME_PGREADPROT:
3103 	{
3104 		int ret;
3105 
3106 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3107 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3108 		ret = rc_node_pg_check_read_protect(np);
3109 		assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3110 		switch (ret) {
3111 		case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3112 			actual = snprintf(buf, sz, "1");
3113 			break;
3114 		case REP_PROTOCOL_SUCCESS:
3115 			actual = snprintf(buf, sz, "0");
3116 			break;
3117 		default:
3118 			return (ret);
3119 		}
3120 		break;
3121 	}
3122 	default:
3123 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3124 	}
3125 	if (actual >= sz)
3126 		return (REP_PROTOCOL_FAIL_TRUNCATED);
3127 
3128 	*sz_out = actual;
3129 	return (REP_PROTOCOL_SUCCESS);
3130 }
3131 
3132 int
3133 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3134 {
3135 	rc_node_t *np;
3136 
3137 	RC_NODE_PTR_GET_CHECK(np, npp);
3138 
3139 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3140 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3141 
3142 	*out = np->rn_valtype;
3143 
3144 	return (REP_PROTOCOL_SUCCESS);
3145 }
3146 
3147 /*
3148  * Get np's parent.  If np is deleted, returns _DELETED.  Otherwise puts a hold
3149  * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3150  */
3151 static int
3152 rc_node_parent(rc_node_t *np, rc_node_t **out)
3153 {
3154 	rc_node_t *pnp;
3155 	rc_node_t *np_orig;
3156 
3157 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3158 		RC_NODE_CHECK_AND_LOCK(np);
3159 	} else {
3160 		np = np->rn_cchain[0];
3161 		RC_NODE_CHECK_AND_LOCK(np);
3162 	}
3163 
3164 	np_orig = np;
3165 	rc_node_hold_locked(np);		/* simplifies the remainder */
3166 
3167 	for (;;) {
3168 		if (!rc_node_wait_flag(np,
3169 		    RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3170 			rc_node_rele_locked(np);
3171 			return (REP_PROTOCOL_FAIL_DELETED);
3172 		}
3173 
3174 		if (!(np->rn_flags & RC_NODE_OLD))
3175 			break;
3176 
3177 		rc_node_rele_locked(np);
3178 		np = cache_lookup(&np_orig->rn_id);
3179 		assert(np != np_orig);
3180 
3181 		if (np == NULL)
3182 			goto deleted;
3183 		(void) pthread_mutex_lock(&np->rn_lock);
3184 	}
3185 
3186 	/* guaranteed to succeed without dropping the lock */
3187 	if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3188 		(void) pthread_mutex_unlock(&np->rn_lock);
3189 		*out = NULL;
3190 		rc_node_rele(np);
3191 		return (REP_PROTOCOL_FAIL_DELETED);
3192 	}
3193 
3194 	assert(np->rn_parent != NULL);
3195 	pnp = np->rn_parent;
3196 	(void) pthread_mutex_unlock(&np->rn_lock);
3197 
3198 	(void) pthread_mutex_lock(&pnp->rn_lock);
3199 	(void) pthread_mutex_lock(&np->rn_lock);
3200 	rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3201 	(void) pthread_mutex_unlock(&np->rn_lock);
3202 
3203 	rc_node_hold_locked(pnp);
3204 
3205 	(void) pthread_mutex_unlock(&pnp->rn_lock);
3206 
3207 	rc_node_rele(np);
3208 	*out = pnp;
3209 	return (REP_PROTOCOL_SUCCESS);
3210 
3211 deleted:
3212 	rc_node_rele(np);
3213 	return (REP_PROTOCOL_FAIL_DELETED);
3214 }
3215 
3216 /*
3217  * Fails with
3218  *   _NOT_SET
3219  *   _DELETED
3220  */
3221 static int
3222 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3223 {
3224 	rc_node_t *np;
3225 
3226 	RC_NODE_PTR_GET_CHECK(np, npp);
3227 
3228 	return (rc_node_parent(np, out));
3229 }
3230 
3231 /*
3232  * Fails with
3233  *   _NOT_SET - npp is not set
3234  *   _DELETED - the node npp pointed at has been deleted
3235  *   _TYPE_MISMATCH - npp's node's parent is not of type type
3236  *
3237  * If npp points to a scope, can also fail with
3238  *   _NOT_FOUND - scope has no parent
3239  */
3240 int
3241 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3242 {
3243 	rc_node_t *pnp;
3244 	int rc;
3245 
3246 	if (npp->rnp_node != NULL &&
3247 	    npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3248 		return (rc_scope_parent_scope(npp, type, out));
3249 
3250 	if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3251 		rc_node_clear(out, 0);
3252 		return (rc);
3253 	}
3254 
3255 	if (type != pnp->rn_id.rl_type) {
3256 		rc_node_rele(pnp);
3257 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3258 	}
3259 
3260 	rc_node_assign(out, pnp);
3261 	rc_node_rele(pnp);
3262 
3263 	return (REP_PROTOCOL_SUCCESS);
3264 }
3265 
3266 int
3267 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3268 {
3269 	rc_node_t *pnp;
3270 	int rc;
3271 
3272 	if (npp->rnp_node != NULL &&
3273 	    npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3274 		*type_out = REP_PROTOCOL_ENTITY_SCOPE;
3275 		return (REP_PROTOCOL_SUCCESS);
3276 	}
3277 
3278 	if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3279 		return (rc);
3280 
3281 	*type_out = pnp->rn_id.rl_type;
3282 
3283 	rc_node_rele(pnp);
3284 
3285 	return (REP_PROTOCOL_SUCCESS);
3286 }
3287 
3288 /*
3289  * Fails with
3290  *   _INVALID_TYPE - type is invalid
3291  *   _TYPE_MISMATCH - np doesn't carry children of type type
3292  *   _DELETED - np has been deleted
3293  *   _NOT_FOUND - no child with that name/type combo found
3294  *   _NO_RESOURCES
3295  *   _BACKEND_ACCESS
3296  */
3297 int
3298 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3299     rc_node_ptr_t *outp)
3300 {
3301 	rc_node_t *np, *cp;
3302 	rc_node_t *child = NULL;
3303 	int ret, idx;
3304 
3305 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3306 	if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3307 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3308 			ret = rc_node_find_named_child(np, name, type, &child);
3309 		} else {
3310 			(void) pthread_mutex_unlock(&np->rn_lock);
3311 			ret = REP_PROTOCOL_SUCCESS;
3312 			for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3313 				cp = np->rn_cchain[idx];
3314 				if (cp == NULL)
3315 					break;
3316 				RC_NODE_CHECK_AND_LOCK(cp);
3317 				ret = rc_node_find_named_child(cp, name, type,
3318 				    &child);
3319 				(void) pthread_mutex_unlock(&cp->rn_lock);
3320 				/*
3321 				 * loop only if we succeeded, but no child of
3322 				 * the correct name was found.
3323 				 */
3324 				if (ret != REP_PROTOCOL_SUCCESS ||
3325 				    child != NULL)
3326 					break;
3327 			}
3328 			(void) pthread_mutex_lock(&np->rn_lock);
3329 		}
3330 	}
3331 	(void) pthread_mutex_unlock(&np->rn_lock);
3332 
3333 	if (ret == REP_PROTOCOL_SUCCESS) {
3334 		rc_node_assign(outp, child);
3335 		if (child != NULL)
3336 			rc_node_rele(child);
3337 		else
3338 			ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3339 	} else {
3340 		rc_node_assign(outp, NULL);
3341 	}
3342 	return (ret);
3343 }
3344 
3345 int
3346 rc_node_update(rc_node_ptr_t *npp)
3347 {
3348 	cache_bucket_t *bp;
3349 	rc_node_t *np = npp->rnp_node;
3350 	rc_node_t *nnp;
3351 	rc_node_t *cpg = NULL;
3352 
3353 	if (np != NULL &&
3354 	    np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3355 		/*
3356 		 * If we're updating a composed property group, actually
3357 		 * update the top-level property group & return the
3358 		 * appropriate value.  But leave *nnp pointing at us.
3359 		 */
3360 		cpg = np;
3361 		np = np->rn_cchain[0];
3362 	}
3363 
3364 	RC_NODE_CHECK(np);
3365 
3366 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3367 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3368 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3369 
3370 	for (;;) {
3371 		bp = cache_hold(np->rn_hash);
3372 		nnp = cache_lookup_unlocked(bp, &np->rn_id);
3373 		if (nnp == NULL) {
3374 			cache_release(bp);
3375 			rc_node_clear(npp, 1);
3376 			return (REP_PROTOCOL_FAIL_DELETED);
3377 		}
3378 		/*
3379 		 * grab the lock before dropping the cache bucket, so
3380 		 * that no one else can sneak in
3381 		 */
3382 		(void) pthread_mutex_lock(&nnp->rn_lock);
3383 		cache_release(bp);
3384 
3385 		if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3386 		    !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3387 			break;
3388 
3389 		rc_node_rele_locked(nnp);
3390 	}
3391 
3392 	/*
3393 	 * If it is dead, we want to update it so that it will continue to
3394 	 * report being dead.
3395 	 */
3396 	if (nnp->rn_flags & RC_NODE_DEAD) {
3397 		(void) pthread_mutex_unlock(&nnp->rn_lock);
3398 		if (nnp != np && cpg == NULL)
3399 			rc_node_assign(npp, nnp);	/* updated */
3400 		rc_node_rele(nnp);
3401 		return (REP_PROTOCOL_FAIL_DELETED);
3402 	}
3403 
3404 	assert(!(nnp->rn_flags & RC_NODE_OLD));
3405 	(void) pthread_mutex_unlock(&nnp->rn_lock);
3406 
3407 	if (nnp != np && cpg == NULL)
3408 		rc_node_assign(npp, nnp);		/* updated */
3409 
3410 	rc_node_rele(nnp);
3411 
3412 	return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3413 }
3414 
3415 /*
3416  * does a generic modification check, for creation, deletion, and snapshot
3417  * management only.  Property group transactions have different checks.
3418  *
3419  * The string returned to *match_auth must be freed.
3420  */
3421 static perm_status_t
3422 rc_node_modify_permission_check(char **match_auth)
3423 {
3424 	permcheck_t *pcp;
3425 	perm_status_t granted = PERM_GRANTED;
3426 	int rc;
3427 
3428 	*match_auth = NULL;
3429 #ifdef NATIVE_BUILD
3430 	if (!client_is_privileged()) {
3431 		granted = PERM_DENIED;
3432 	}
3433 	return (granted);
3434 #else
3435 	if (is_main_repository == 0)
3436 		return (PERM_GRANTED);
3437 	pcp = pc_create();
3438 	if (pcp != NULL) {
3439 		rc = perm_add_enabling(pcp, AUTH_MODIFY);
3440 
3441 		if (rc == REP_PROTOCOL_SUCCESS) {
3442 			granted = perm_granted(pcp);
3443 
3444 			if ((granted == PERM_GRANTED) ||
3445 			    (granted == PERM_DENIED)) {
3446 				/*
3447 				 * Copy off the authorization
3448 				 * string before freeing pcp.
3449 				 */
3450 				*match_auth =
3451 				    strdup(pcp->pc_auth_string);
3452 				if (*match_auth == NULL)
3453 					granted = PERM_FAIL;
3454 			}
3455 		} else {
3456 			granted = PERM_FAIL;
3457 		}
3458 
3459 		pc_free(pcp);
3460 	} else {
3461 		granted = PERM_FAIL;
3462 	}
3463 
3464 	return (granted);
3465 #endif /* NATIVE_BUILD */
3466 }
3467 
3468 /*
3469  * Native builds are done to create svc.configd-native.  This program runs
3470  * only on the Solaris build machines to create the seed repository, and it
3471  * is compiled against the build machine's header files.  The ADT_smf_*
3472  * symbols may not be defined in these header files.  For this reason
3473  * smf_annotation_event(), _smf_audit_event() and special_property_event()
3474  * are not compiled for native builds.
3475  */
3476 #ifndef	NATIVE_BUILD
3477 
3478 /*
3479  * This function generates an annotation audit event if one has been setup.
3480  * Annotation events should only be generated immediately before the audit
3481  * record from the first attempt to modify the repository from a client
3482  * which has requested an annotation.
3483  */
3484 static void
3485 smf_annotation_event(int status, int return_val)
3486 {
3487 	adt_session_data_t *session;
3488 	adt_event_data_t *event = NULL;
3489 	char file[MAXPATHLEN];
3490 	char operation[REP_PROTOCOL_NAME_LEN];
3491 
3492 	/* Don't audit if we're using an alternate repository. */
3493 	if (is_main_repository == 0)
3494 		return;
3495 
3496 	if (client_annotation_needed(operation, sizeof (operation), file,
3497 	    sizeof (file)) == 0) {
3498 		return;
3499 	}
3500 	if (file[0] == 0) {
3501 		(void) strlcpy(file, "NO FILE", sizeof (file));
3502 	}
3503 	if (operation[0] == 0) {
3504 		(void) strlcpy(operation, "NO OPERATION",
3505 		    sizeof (operation));
3506 	}
3507 	if ((session = get_audit_session()) == NULL)
3508 		return;
3509 	if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3510 		uu_warn("smf_annotation_event cannot allocate event "
3511 		    "data.  %s\n", strerror(errno));
3512 		return;
3513 	}
3514 	event->adt_smf_annotation.operation = operation;
3515 	event->adt_smf_annotation.file = file;
3516 	if (adt_put_event(event, status, return_val) == 0) {
3517 		client_annotation_finished();
3518 	} else {
3519 		uu_warn("smf_annotation_event failed to put event.  "
3520 		    "%s\n", strerror(errno));
3521 	}
3522 	adt_free_event(event);
3523 }
3524 
3525 /*
3526  * _smf_audit_event interacts with the security auditing system to generate
3527  * an audit event structure.  It establishes an audit session and allocates
3528  * an audit event.  The event is filled in from the audit data, and
3529  * adt_put_event is called to generate the event.
3530  */
3531 static void
3532 _smf_audit_event(au_event_t event_id, int status, int return_val,
3533     audit_event_data_t *data)
3534 {
3535 	char *auth_used;
3536 	char *fmri;
3537 	char *prop_value;
3538 	adt_session_data_t *session;
3539 	adt_event_data_t *event = NULL;
3540 
3541 	/* Don't audit if we're using an alternate repository */
3542 	if (is_main_repository == 0)
3543 		return;
3544 
3545 	smf_annotation_event(status, return_val);
3546 	if ((session = get_audit_session()) == NULL)
3547 		return;
3548 	if ((event = adt_alloc_event(session, event_id)) == NULL) {
3549 		uu_warn("_smf_audit_event cannot allocate event "
3550 		    "data.  %s\n", strerror(errno));
3551 		return;
3552 	}
3553 
3554 	/*
3555 	 * Handle possibility of NULL authorization strings, FMRIs and
3556 	 * property values.
3557 	 */
3558 	if (data->ed_auth == NULL) {
3559 		auth_used = "PRIVILEGED";
3560 	} else {
3561 		auth_used = data->ed_auth;
3562 	}
3563 	if (data->ed_fmri == NULL) {
3564 		syslog(LOG_WARNING, "_smf_audit_event called with "
3565 		    "empty FMRI string");
3566 		fmri = "UNKNOWN FMRI";
3567 	} else {
3568 		fmri = data->ed_fmri;
3569 	}
3570 	if (data->ed_prop_value == NULL) {
3571 		prop_value = "";
3572 	} else {
3573 		prop_value = data->ed_prop_value;
3574 	}
3575 
3576 	/* Fill in the event data. */
3577 	switch (event_id) {
3578 	case ADT_smf_attach_snap:
3579 		event->adt_smf_attach_snap.auth_used = auth_used;
3580 		event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3581 		event->adt_smf_attach_snap.old_name = data->ed_old_name;
3582 		event->adt_smf_attach_snap.new_fmri = fmri;
3583 		event->adt_smf_attach_snap.new_name = data->ed_snapname;
3584 		break;
3585 	case ADT_smf_change_prop:
3586 		event->adt_smf_change_prop.auth_used = auth_used;
3587 		event->adt_smf_change_prop.fmri = fmri;
3588 		event->adt_smf_change_prop.type = data->ed_type;
3589 		event->adt_smf_change_prop.value = prop_value;
3590 		break;
3591 	case ADT_smf_clear:
3592 		event->adt_smf_clear.auth_used = auth_used;
3593 		event->adt_smf_clear.fmri = fmri;
3594 		break;
3595 	case ADT_smf_create:
3596 		event->adt_smf_create.fmri = fmri;
3597 		event->adt_smf_create.auth_used = auth_used;
3598 		break;
3599 	case ADT_smf_create_npg:
3600 		event->adt_smf_create_npg.auth_used = auth_used;
3601 		event->adt_smf_create_npg.fmri = fmri;
3602 		event->adt_smf_create_npg.type = data->ed_type;
3603 		break;
3604 	case ADT_smf_create_pg:
3605 		event->adt_smf_create_pg.auth_used = auth_used;
3606 		event->adt_smf_create_pg.fmri = fmri;
3607 		event->adt_smf_create_pg.type = data->ed_type;
3608 		break;
3609 	case ADT_smf_create_prop:
3610 		event->adt_smf_create_prop.auth_used = auth_used;
3611 		event->adt_smf_create_prop.fmri = fmri;
3612 		event->adt_smf_create_prop.type = data->ed_type;
3613 		event->adt_smf_create_prop.value = prop_value;
3614 		break;
3615 	case ADT_smf_create_snap:
3616 		event->adt_smf_create_snap.auth_used = auth_used;
3617 		event->adt_smf_create_snap.fmri = fmri;
3618 		event->adt_smf_create_snap.name = data->ed_snapname;
3619 		break;
3620 	case ADT_smf_degrade:
3621 		event->adt_smf_degrade.auth_used = auth_used;
3622 		event->adt_smf_degrade.fmri = fmri;
3623 		break;
3624 	case ADT_smf_delete:
3625 		event->adt_smf_delete.fmri = fmri;
3626 		event->adt_smf_delete.auth_used = auth_used;
3627 		break;
3628 	case ADT_smf_delete_npg:
3629 		event->adt_smf_delete_npg.auth_used = auth_used;
3630 		event->adt_smf_delete_npg.fmri = fmri;
3631 		event->adt_smf_delete_npg.type = data->ed_type;
3632 		break;
3633 	case ADT_smf_delete_pg:
3634 		event->adt_smf_delete_pg.auth_used = auth_used;
3635 		event->adt_smf_delete_pg.fmri = fmri;
3636 		event->adt_smf_delete_pg.type = data->ed_type;
3637 		break;
3638 	case ADT_smf_delete_prop:
3639 		event->adt_smf_delete_prop.auth_used = auth_used;
3640 		event->adt_smf_delete_prop.fmri = fmri;
3641 		break;
3642 	case ADT_smf_delete_snap:
3643 		event->adt_smf_delete_snap.auth_used = auth_used;
3644 		event->adt_smf_delete_snap.fmri = fmri;
3645 		event->adt_smf_delete_snap.name = data->ed_snapname;
3646 		break;
3647 	case ADT_smf_disable:
3648 		event->adt_smf_disable.auth_used = auth_used;
3649 		event->adt_smf_disable.fmri = fmri;
3650 		break;
3651 	case ADT_smf_enable:
3652 		event->adt_smf_enable.auth_used = auth_used;
3653 		event->adt_smf_enable.fmri = fmri;
3654 		break;
3655 	case ADT_smf_immediate_degrade:
3656 		event->adt_smf_immediate_degrade.auth_used = auth_used;
3657 		event->adt_smf_immediate_degrade.fmri = fmri;
3658 		break;
3659 	case ADT_smf_immediate_maintenance:
3660 		event->adt_smf_immediate_maintenance.auth_used = auth_used;
3661 		event->adt_smf_immediate_maintenance.fmri = fmri;
3662 		break;
3663 	case ADT_smf_immtmp_maintenance:
3664 		event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3665 		event->adt_smf_immtmp_maintenance.fmri = fmri;
3666 		break;
3667 	case ADT_smf_maintenance:
3668 		event->adt_smf_maintenance.auth_used = auth_used;
3669 		event->adt_smf_maintenance.fmri = fmri;
3670 		break;
3671 	case ADT_smf_milestone:
3672 		event->adt_smf_milestone.auth_used = auth_used;
3673 		event->adt_smf_milestone.fmri = fmri;
3674 		break;
3675 	case ADT_smf_read_prop:
3676 		event->adt_smf_read_prop.auth_used = auth_used;
3677 		event->adt_smf_read_prop.fmri = fmri;
3678 		break;
3679 	case ADT_smf_refresh:
3680 		event->adt_smf_refresh.auth_used = auth_used;
3681 		event->adt_smf_refresh.fmri = fmri;
3682 		break;
3683 	case ADT_smf_restart:
3684 		event->adt_smf_restart.auth_used = auth_used;
3685 		event->adt_smf_restart.fmri = fmri;
3686 		break;
3687 	case ADT_smf_tmp_disable:
3688 		event->adt_smf_tmp_disable.auth_used = auth_used;
3689 		event->adt_smf_tmp_disable.fmri = fmri;
3690 		break;
3691 	case ADT_smf_tmp_enable:
3692 		event->adt_smf_tmp_enable.auth_used = auth_used;
3693 		event->adt_smf_tmp_enable.fmri = fmri;
3694 		break;
3695 	case ADT_smf_tmp_maintenance:
3696 		event->adt_smf_tmp_maintenance.auth_used = auth_used;
3697 		event->adt_smf_tmp_maintenance.fmri = fmri;
3698 		break;
3699 	default:
3700 		abort();	/* Need to cover all SMF event IDs */
3701 	}
3702 
3703 	if (adt_put_event(event, status, return_val) != 0) {
3704 		uu_warn("_smf_audit_event failed to put event.  %s\n",
3705 		    strerror(errno));
3706 	}
3707 	adt_free_event(event);
3708 }
3709 
3710 /*
3711  * Determine if the combination of the property group at pg_name and the
3712  * property at prop_name are in the set of special startd properties.  If
3713  * they are, a special audit event will be generated.
3714  */
3715 static void
3716 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3717     char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3718     size_t cmd_no)
3719 {
3720 	au_event_t event_id;
3721 	audit_special_prop_item_t search_key;
3722 	audit_special_prop_item_t *found;
3723 
3724 	/* Use bsearch to find the special property information. */
3725 	search_key.api_prop_name = prop_name;
3726 	search_key.api_pg_name = pg_name;
3727 	found = (audit_special_prop_item_t *)bsearch(&search_key,
3728 	    special_props_list, SPECIAL_PROP_COUNT,
3729 	    sizeof (special_props_list[0]), special_prop_compare);
3730 	if (found == NULL) {
3731 		/* Not a special property. */
3732 		return;
3733 	}
3734 
3735 	/* Get the event id */
3736 	if (found->api_event_func == NULL) {
3737 		event_id = found->api_event_id;
3738 	} else {
3739 		if ((*found->api_event_func)(tx_data, cmd_no,
3740 		    found->api_pg_name, &event_id) < 0)
3741 			return;
3742 	}
3743 
3744 	/* Generate the event. */
3745 	smf_audit_event(event_id, status, return_val, evdp);
3746 }
3747 #endif	/* NATIVE_BUILD */
3748 
3749 /*
3750  * Return a pointer to a string containing all the values of the command
3751  * specified by cmd_no with each value enclosed in quotes.  It is up to the
3752  * caller to free the memory at the returned pointer.
3753  */
3754 static char *
3755 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3756 {
3757 	const char *cp;
3758 	const char *cur_value;
3759 	size_t byte_count = 0;
3760 	uint32_t i;
3761 	uint32_t nvalues;
3762 	size_t str_size = 0;
3763 	char *values = NULL;
3764 	char *vp;
3765 
3766 	if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3767 		return (NULL);
3768 	/*
3769 	 * First determine the size of the buffer that we will need.  We
3770 	 * will represent each property value surrounded by quotes with a
3771 	 * space separating the values.  Thus, we need to find the total
3772 	 * size of all the value strings and add 3 for each value.
3773 	 *
3774 	 * There is one catch, though.  We need to escape any internal
3775 	 * quote marks in the values.  So for each quote in the value we
3776 	 * need to add another byte to the buffer size.
3777 	 */
3778 	for (i = 0; i < nvalues; i++) {
3779 		if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3780 		    REP_PROTOCOL_SUCCESS)
3781 			return (NULL);
3782 		for (cp = cur_value; *cp != 0; cp++) {
3783 			byte_count += (*cp == '"') ? 2 : 1;
3784 		}
3785 		byte_count += 3;	/* surrounding quotes & space */
3786 	}
3787 	byte_count++;		/* nul terminator */
3788 	values = malloc(byte_count);
3789 	if (values == NULL)
3790 		return (NULL);
3791 	*values = 0;
3792 
3793 	/* Now build up the string of values. */
3794 	for (i = 0; i < nvalues; i++) {
3795 		if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3796 		    REP_PROTOCOL_SUCCESS) {
3797 			free(values);
3798 			return (NULL);
3799 		}
3800 		(void) strlcat(values, "\"", byte_count);
3801 		for (cp = cur_value, vp = values + strlen(values);
3802 		    *cp != 0; cp++) {
3803 			if (*cp == '"') {
3804 				*vp++ = '\\';
3805 				*vp++ = '"';
3806 			} else {
3807 				*vp++ = *cp;
3808 			}
3809 		}
3810 		*vp = 0;
3811 		str_size = strlcat(values, "\" ", byte_count);
3812 		assert(str_size < byte_count);
3813 	}
3814 	if (str_size > 0)
3815 		values[str_size - 1] = 0;	/* get rid of trailing space */
3816 	return (values);
3817 }
3818 
3819 /*
3820  * generate_property_events takes the transaction commit data at tx_data
3821  * and generates an audit event for each command.
3822  *
3823  * Native builds are done to create svc.configd-native.  This program runs
3824  * only on the Solaris build machines to create the seed repository.  Thus,
3825  * no audit events should be generated when running svc.configd-native.
3826  */
3827 static void
3828 generate_property_events(
3829 	tx_commit_data_t *tx_data,
3830 	char *pg_fmri,		/* FMRI of property group */
3831 	char *auth_string,
3832 	int auth_status,
3833 	int auth_ret_value)
3834 {
3835 #ifndef	NATIVE_BUILD
3836 	enum rep_protocol_transaction_action action;
3837 	audit_event_data_t audit_data;
3838 	size_t count;
3839 	size_t cmd_no;
3840 	char *cp;
3841 	au_event_t event_id;
3842 	char fmri[REP_PROTOCOL_FMRI_LEN];
3843 	char pg_name[REP_PROTOCOL_NAME_LEN];
3844 	char *pg_end;		/* End of prop. group fmri */
3845 	const char *prop_name;
3846 	uint32_t ptype;
3847 	char prop_type[3];
3848 	enum rep_protocol_responseid rc;
3849 	size_t sz_out;
3850 
3851 	/* Make sure we have something to do. */
3852 	if (tx_data == NULL)
3853 		return;
3854 	if ((count = tx_cmd_count(tx_data)) == 0)
3855 		return;
3856 
3857 	/* Copy the property group fmri */
3858 	pg_end = fmri;
3859 	pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3860 
3861 	/*
3862 	 * Get the property group name.  It is the first component after
3863 	 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3864 	 */
3865 	cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3866 	if (cp == NULL) {
3867 		pg_name[0] = 0;
3868 	} else {
3869 		cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3870 		(void) strlcpy(pg_name, cp, sizeof (pg_name));
3871 	}
3872 
3873 	audit_data.ed_auth = auth_string;
3874 	audit_data.ed_fmri = fmri;
3875 	audit_data.ed_type = prop_type;
3876 
3877 	/*
3878 	 * Property type is two characters (see
3879 	 * rep_protocol_value_type_t), so terminate the string.
3880 	 */
3881 	prop_type[2] = 0;
3882 
3883 	for (cmd_no = 0; cmd_no < count; cmd_no++) {
3884 		/* Construct FMRI of the property */
3885 		*pg_end = 0;
3886 		if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3887 		    REP_PROTOCOL_SUCCESS) {
3888 			continue;
3889 		}
3890 		rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3891 		    prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3892 		if (rc != REP_PROTOCOL_SUCCESS) {
3893 			/*
3894 			 * If we can't get the FMRI, we'll abandon this
3895 			 * command
3896 			 */
3897 			continue;
3898 		}
3899 
3900 		/* Generate special property event if necessary. */
3901 		special_property_event(&audit_data, prop_name, pg_name,
3902 		    auth_status, auth_ret_value, tx_data, cmd_no);
3903 
3904 		/* Capture rest of audit data. */
3905 		if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3906 		    REP_PROTOCOL_SUCCESS) {
3907 			continue;
3908 		}
3909 		prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3910 		prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3911 		audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3912 
3913 		/* Determine the event type. */
3914 		if (tx_cmd_action(tx_data, cmd_no, &action) !=
3915 		    REP_PROTOCOL_SUCCESS) {
3916 			free(audit_data.ed_prop_value);
3917 			continue;
3918 		}
3919 		switch (action) {
3920 		case REP_PROTOCOL_TX_ENTRY_NEW:
3921 			event_id = ADT_smf_create_prop;
3922 			break;
3923 		case REP_PROTOCOL_TX_ENTRY_CLEAR:
3924 			event_id = ADT_smf_change_prop;
3925 			break;
3926 		case REP_PROTOCOL_TX_ENTRY_REPLACE:
3927 			event_id = ADT_smf_change_prop;
3928 			break;
3929 		case REP_PROTOCOL_TX_ENTRY_DELETE:
3930 			event_id = ADT_smf_delete_prop;
3931 			break;
3932 		default:
3933 			assert(0);	/* Missing a case */
3934 			free(audit_data.ed_prop_value);
3935 			continue;
3936 		}
3937 
3938 		/* Generate the event. */
3939 		smf_audit_event(event_id, auth_status, auth_ret_value,
3940 		    &audit_data);
3941 		free(audit_data.ed_prop_value);
3942 	}
3943 #endif /* NATIVE_BUILD */
3944 }
3945 
3946 /*
3947  * Fails with
3948  *   _DELETED - node has been deleted
3949  *   _NOT_SET - npp is reset
3950  *   _NOT_APPLICABLE - type is _PROPERTYGRP
3951  *   _INVALID_TYPE - node is corrupt or type is invalid
3952  *   _TYPE_MISMATCH - node cannot have children of type type
3953  *   _BAD_REQUEST - name is invalid
3954  *		    cannot create children for this type of node
3955  *   _NO_RESOURCES - out of memory, or could not allocate new id
3956  *   _PERMISSION_DENIED
3957  *   _BACKEND_ACCESS
3958  *   _BACKEND_READONLY
3959  *   _EXISTS - child already exists
3960  *   _TRUNCATED - truncated FMRI for the audit record
3961  */
3962 int
3963 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3964     rc_node_ptr_t *cpp)
3965 {
3966 	rc_node_t *np;
3967 	rc_node_t *cp = NULL;
3968 	int rc;
3969 	perm_status_t perm_rc;
3970 	size_t sz_out;
3971 	char fmri[REP_PROTOCOL_FMRI_LEN];
3972 	audit_event_data_t audit_data;
3973 
3974 	rc_node_clear(cpp, 0);
3975 
3976 	/*
3977 	 * rc_node_modify_permission_check() must be called before the node
3978 	 * is locked.  This is because the library functions that check
3979 	 * authorizations can trigger calls back into configd.
3980 	 */
3981 	perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3982 	switch (perm_rc) {
3983 	case PERM_DENIED:
3984 		/*
3985 		 * We continue in this case, so that an audit event can be
3986 		 * generated later in the function.
3987 		 */
3988 		break;
3989 	case PERM_GRANTED:
3990 		break;
3991 	case PERM_GONE:
3992 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3993 	case PERM_FAIL:
3994 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3995 	default:
3996 		bad_error(rc_node_modify_permission_check, perm_rc);
3997 	}
3998 
3999 	RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
4000 
4001 	audit_data.ed_fmri = fmri;
4002 
4003 	/*
4004 	 * there is a separate interface for creating property groups
4005 	 */
4006 	if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4007 		(void) pthread_mutex_unlock(&np->rn_lock);
4008 		free(audit_data.ed_auth);
4009 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4010 	}
4011 
4012 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
4013 		(void) pthread_mutex_unlock(&np->rn_lock);
4014 		np = np->rn_cchain[0];
4015 		if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
4016 			free(audit_data.ed_auth);
4017 			return (rc);
4018 		}
4019 	}
4020 
4021 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4022 	    REP_PROTOCOL_SUCCESS) {
4023 		(void) pthread_mutex_unlock(&np->rn_lock);
4024 		free(audit_data.ed_auth);
4025 		return (rc);
4026 	}
4027 	if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
4028 		(void) pthread_mutex_unlock(&np->rn_lock);
4029 		free(audit_data.ed_auth);
4030 		return (rc);
4031 	}
4032 
4033 	if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4034 	    name, type)) != REP_PROTOCOL_SUCCESS) {
4035 		(void) pthread_mutex_unlock(&np->rn_lock);
4036 		free(audit_data.ed_auth);
4037 		return (rc);
4038 	}
4039 	if (perm_rc == PERM_DENIED) {
4040 		(void) pthread_mutex_unlock(&np->rn_lock);
4041 		smf_audit_event(ADT_smf_create, ADT_FAILURE,
4042 		    ADT_FAIL_VALUE_AUTH, &audit_data);
4043 		free(audit_data.ed_auth);
4044 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4045 	}
4046 
4047 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4048 	    audit_data.ed_auth);
4049 	(void) pthread_mutex_unlock(&np->rn_lock);
4050 
4051 	rc = object_create(np, type, name, &cp);
4052 	assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4053 
4054 	if (rc == REP_PROTOCOL_SUCCESS) {
4055 		rc_node_assign(cpp, cp);
4056 		rc_node_rele(cp);
4057 	}
4058 
4059 	(void) pthread_mutex_lock(&np->rn_lock);
4060 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4061 	(void) pthread_mutex_unlock(&np->rn_lock);
4062 
4063 	if (rc == REP_PROTOCOL_SUCCESS) {
4064 		smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
4065 		    &audit_data);
4066 	}
4067 
4068 	free(audit_data.ed_auth);
4069 
4070 	return (rc);
4071 }
4072 
4073 int
4074 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
4075     const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
4076 {
4077 	rc_node_t *np;
4078 	rc_node_t *cp;
4079 	int rc;
4080 	permcheck_t *pcp;
4081 	perm_status_t granted;
4082 	char fmri[REP_PROTOCOL_FMRI_LEN];
4083 	audit_event_data_t audit_data;
4084 	au_event_t event_id;
4085 	size_t sz_out;
4086 
4087 	audit_data.ed_auth = NULL;
4088 	audit_data.ed_fmri = fmri;
4089 	audit_data.ed_type = (char *)pgtype;
4090 
4091 	rc_node_clear(cpp, 0);
4092 
4093 	/* verify flags is valid */
4094 	if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4095 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4096 
4097 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4098 
4099 	if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4100 		rc_node_rele(np);
4101 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4102 	}
4103 
4104 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4105 	    REP_PROTOCOL_SUCCESS) {
4106 		rc_node_rele(np);
4107 		return (rc);
4108 	}
4109 	if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4110 	    (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4111 		rc_node_rele(np);
4112 		return (rc);
4113 	}
4114 
4115 #ifdef NATIVE_BUILD
4116 	if (!client_is_privileged()) {
4117 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4118 	}
4119 #else
4120 	if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4121 		event_id = ADT_smf_create_npg;
4122 	} else {
4123 		event_id = ADT_smf_create_pg;
4124 	}
4125 	if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4126 	    name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4127 		rc_node_rele(np);
4128 		return (rc);
4129 	}
4130 
4131 	if (is_main_repository) {
4132 		/* Must have .smf.modify or smf.modify.<type> authorization */
4133 		pcp = pc_create();
4134 		if (pcp != NULL) {
4135 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
4136 
4137 			if (rc == REP_PROTOCOL_SUCCESS) {
4138 				const char * const auth =
4139 				    perm_auth_for_pgtype(pgtype);
4140 
4141 				if (auth != NULL)
4142 					rc = perm_add_enabling(pcp, auth);
4143 			}
4144 
4145 			/*
4146 			 * .manage or $action_authorization can be used to
4147 			 * create the actions pg and the general_ovr pg.
4148 			 */
4149 			if (rc == REP_PROTOCOL_SUCCESS &&
4150 			    (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4151 			    np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4152 			    ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4153 			    strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4154 			    (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4155 			    strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4156 				rc = perm_add_enabling(pcp, AUTH_MANAGE);
4157 
4158 				if (rc == REP_PROTOCOL_SUCCESS)
4159 					rc = perm_add_inst_action_auth(pcp, np);
4160 			}
4161 
4162 			if (rc == REP_PROTOCOL_SUCCESS) {
4163 				granted = perm_granted(pcp);
4164 
4165 				rc = map_granted_status(granted, pcp,
4166 				    &audit_data.ed_auth);
4167 				if (granted == PERM_GONE) {
4168 					/* No auditing if client gone. */
4169 					pc_free(pcp);
4170 					rc_node_rele(np);
4171 					return (rc);
4172 				}
4173 			}
4174 
4175 			pc_free(pcp);
4176 		} else {
4177 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4178 		}
4179 
4180 	} else {
4181 		rc = REP_PROTOCOL_SUCCESS;
4182 	}
4183 #endif /* NATIVE_BUILD */
4184 
4185 
4186 	if (rc != REP_PROTOCOL_SUCCESS) {
4187 		rc_node_rele(np);
4188 		if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4189 			smf_audit_event(event_id, ADT_FAILURE,
4190 			    ADT_FAIL_VALUE_AUTH, &audit_data);
4191 		}
4192 		if (audit_data.ed_auth != NULL)
4193 			free(audit_data.ed_auth);
4194 		return (rc);
4195 	}
4196 
4197 	(void) pthread_mutex_lock(&np->rn_lock);
4198 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4199 	    audit_data.ed_auth);
4200 	(void) pthread_mutex_unlock(&np->rn_lock);
4201 
4202 	rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4203 
4204 	if (rc == REP_PROTOCOL_SUCCESS) {
4205 		rc_node_assign(cpp, cp);
4206 		rc_node_rele(cp);
4207 	}
4208 
4209 	(void) pthread_mutex_lock(&np->rn_lock);
4210 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4211 	(void) pthread_mutex_unlock(&np->rn_lock);
4212 
4213 	if (rc == REP_PROTOCOL_SUCCESS) {
4214 		smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4215 		    &audit_data);
4216 	}
4217 	if (audit_data.ed_auth != NULL)
4218 		free(audit_data.ed_auth);
4219 
4220 	return (rc);
4221 }
4222 
4223 static void
4224 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4225 {
4226 	assert(MUTEX_HELD(&rc_pg_notify_lock));
4227 
4228 	if (pnp->rnpn_pg != NULL) {
4229 		uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4230 		(void) close(pnp->rnpn_fd);
4231 
4232 		pnp->rnpn_pg = NULL;
4233 		pnp->rnpn_fd = -1;
4234 	} else {
4235 		assert(pnp->rnpn_fd == -1);
4236 	}
4237 }
4238 
4239 static void
4240 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4241 {
4242 	rc_node_t *svc = NULL;
4243 	rc_node_t *inst = NULL;
4244 	rc_node_t *pg = NULL;
4245 	rc_node_t *np = np_arg;
4246 	rc_node_t *nnp;
4247 
4248 	while (svc == NULL) {
4249 		(void) pthread_mutex_lock(&np->rn_lock);
4250 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4251 			(void) pthread_mutex_unlock(&np->rn_lock);
4252 			goto cleanup;
4253 		}
4254 		nnp = np->rn_parent;
4255 		rc_node_hold_locked(np);	/* hold it in place */
4256 
4257 		switch (np->rn_id.rl_type) {
4258 		case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4259 			assert(pg == NULL);
4260 			pg = np;
4261 			break;
4262 		case REP_PROTOCOL_ENTITY_INSTANCE:
4263 			assert(inst == NULL);
4264 			inst = np;
4265 			break;
4266 		case REP_PROTOCOL_ENTITY_SERVICE:
4267 			assert(svc == NULL);
4268 			svc = np;
4269 			break;
4270 		default:
4271 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4272 			rc_node_rele_locked(np);
4273 			goto cleanup;
4274 		}
4275 
4276 		(void) pthread_mutex_unlock(&np->rn_lock);
4277 
4278 		np = nnp;
4279 		if (np == NULL)
4280 			goto cleanup;
4281 	}
4282 
4283 	rc_notify_deletion(ndp,
4284 	    svc->rn_name,
4285 	    inst != NULL ? inst->rn_name : NULL,
4286 	    pg != NULL ? pg->rn_name : NULL);
4287 
4288 	ndp = NULL;
4289 
4290 cleanup:
4291 	if (ndp != NULL)
4292 		uu_free(ndp);
4293 
4294 	for (;;) {
4295 		if (svc != NULL) {
4296 			np = svc;
4297 			svc = NULL;
4298 		} else if (inst != NULL) {
4299 			np = inst;
4300 			inst = NULL;
4301 		} else if (pg != NULL) {
4302 			np = pg;
4303 			pg = NULL;
4304 		} else
4305 			break;
4306 
4307 		(void) pthread_mutex_lock(&np->rn_lock);
4308 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4309 		rc_node_rele_locked(np);
4310 	}
4311 }
4312 
4313 /*
4314  * Hold RC_NODE_DYING_FLAGS on np's descendents.  If andformer is true, do
4315  * the same down the rn_former chain.
4316  */
4317 static void
4318 rc_node_delete_hold(rc_node_t *np, int andformer)
4319 {
4320 	rc_node_t *cp;
4321 
4322 again:
4323 	assert(MUTEX_HELD(&np->rn_lock));
4324 	assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4325 
4326 	for (cp = uu_list_first(np->rn_children); cp != NULL;
4327 	    cp = uu_list_next(np->rn_children, cp)) {
4328 		(void) pthread_mutex_lock(&cp->rn_lock);
4329 		(void) pthread_mutex_unlock(&np->rn_lock);
4330 		if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4331 			/*
4332 			 * already marked as dead -- can't happen, since that
4333 			 * would require setting RC_NODE_CHILDREN_CHANGING
4334 			 * in np, and we're holding that...
4335 			 */
4336 			abort();
4337 		}
4338 		rc_node_delete_hold(cp, andformer);	/* recurse, drop lock */
4339 
4340 		(void) pthread_mutex_lock(&np->rn_lock);
4341 	}
4342 	if (andformer && (cp = np->rn_former) != NULL) {
4343 		(void) pthread_mutex_lock(&cp->rn_lock);
4344 		(void) pthread_mutex_unlock(&np->rn_lock);
4345 		if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4346 			abort();		/* can't happen, see above */
4347 		np = cp;
4348 		goto again;		/* tail-recurse down rn_former */
4349 	}
4350 	(void) pthread_mutex_unlock(&np->rn_lock);
4351 }
4352 
4353 /*
4354  * N.B.:  this function drops np->rn_lock on the way out.
4355  */
4356 static void
4357 rc_node_delete_rele(rc_node_t *np, int andformer)
4358 {
4359 	rc_node_t *cp;
4360 
4361 again:
4362 	assert(MUTEX_HELD(&np->rn_lock));
4363 	assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4364 
4365 	for (cp = uu_list_first(np->rn_children); cp != NULL;
4366 	    cp = uu_list_next(np->rn_children, cp)) {
4367 		(void) pthread_mutex_lock(&cp->rn_lock);
4368 		(void) pthread_mutex_unlock(&np->rn_lock);
4369 		rc_node_delete_rele(cp, andformer);	/* recurse, drop lock */
4370 		(void) pthread_mutex_lock(&np->rn_lock);
4371 	}
4372 	if (andformer && (cp = np->rn_former) != NULL) {
4373 		(void) pthread_mutex_lock(&cp->rn_lock);
4374 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4375 		(void) pthread_mutex_unlock(&np->rn_lock);
4376 
4377 		np = cp;
4378 		goto again;		/* tail-recurse down rn_former */
4379 	}
4380 	rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4381 	(void) pthread_mutex_unlock(&np->rn_lock);
4382 }
4383 
4384 static void
4385 rc_node_finish_delete(rc_node_t *cp)
4386 {
4387 	cache_bucket_t *bp;
4388 	rc_node_pg_notify_t *pnp;
4389 
4390 	assert(MUTEX_HELD(&cp->rn_lock));
4391 
4392 	if (!(cp->rn_flags & RC_NODE_OLD)) {
4393 		assert(cp->rn_flags & RC_NODE_IN_PARENT);
4394 		if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4395 			abort();		/* can't happen, see above */
4396 		}
4397 		cp->rn_flags &= ~RC_NODE_IN_PARENT;
4398 		cp->rn_parent = NULL;
4399 		rc_node_free_fmri(cp);
4400 	}
4401 
4402 	cp->rn_flags |= RC_NODE_DEAD;
4403 
4404 	/*
4405 	 * If this node is not out-dated, we need to remove it from
4406 	 * the notify list and cache hash table.
4407 	 */
4408 	if (!(cp->rn_flags & RC_NODE_OLD)) {
4409 		assert(cp->rn_refs > 0);	/* can't go away yet */
4410 		(void) pthread_mutex_unlock(&cp->rn_lock);
4411 
4412 		(void) pthread_mutex_lock(&rc_pg_notify_lock);
4413 		while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4414 			rc_pg_notify_fire(pnp);
4415 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
4416 		rc_notify_remove_node(cp);
4417 
4418 		bp = cache_hold(cp->rn_hash);
4419 		(void) pthread_mutex_lock(&cp->rn_lock);
4420 		cache_remove_unlocked(bp, cp);
4421 		cache_release(bp);
4422 	}
4423 }
4424 
4425 /*
4426  * For each child, call rc_node_finish_delete() and recurse.  If andformer
4427  * is set, also recurse down rn_former.  Finally release np, which might
4428  * free it.
4429  */
4430 static void
4431 rc_node_delete_children(rc_node_t *np, int andformer)
4432 {
4433 	rc_node_t *cp;
4434 
4435 again:
4436 	assert(np->rn_refs > 0);
4437 	assert(MUTEX_HELD(&np->rn_lock));
4438 	assert(np->rn_flags & RC_NODE_DEAD);
4439 
4440 	while ((cp = uu_list_first(np->rn_children)) != NULL) {
4441 		uu_list_remove(np->rn_children, cp);
4442 		(void) pthread_mutex_lock(&cp->rn_lock);
4443 		(void) pthread_mutex_unlock(&np->rn_lock);
4444 		rc_node_hold_locked(cp);	/* hold while we recurse */
4445 		rc_node_finish_delete(cp);
4446 		rc_node_delete_children(cp, andformer);	/* drops lock + ref */
4447 		(void) pthread_mutex_lock(&np->rn_lock);
4448 	}
4449 
4450 	/*
4451 	 * When we drop cp's lock, all the children will be gone, so we
4452 	 * can release DYING_FLAGS.
4453 	 */
4454 	rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4455 	if (andformer && (cp = np->rn_former) != NULL) {
4456 		np->rn_former = NULL;		/* unlink */
4457 		(void) pthread_mutex_lock(&cp->rn_lock);
4458 
4459 		/*
4460 		 * Register the ephemeral reference created by reading
4461 		 * np->rn_former into cp.  Note that the persistent
4462 		 * reference (np->rn_former) is locked because we haven't
4463 		 * dropped np's lock since we dropped its RC_NODE_IN_TX
4464 		 * (via RC_NODE_DYING_FLAGS).
4465 		 */
4466 		rc_node_hold_ephemeral_locked(cp);
4467 
4468 		(void) pthread_mutex_unlock(&np->rn_lock);
4469 		cp->rn_flags &= ~RC_NODE_ON_FORMER;
4470 
4471 		rc_node_hold_locked(cp);	/* hold while we loop */
4472 
4473 		rc_node_finish_delete(cp);
4474 
4475 		rc_node_rele(np);		/* drop the old reference */
4476 
4477 		np = cp;
4478 		goto again;		/* tail-recurse down rn_former */
4479 	}
4480 	rc_node_rele_locked(np);
4481 }
4482 
4483 /*
4484  * The last client or child reference to np, which must be either
4485  * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed.  We'll destroy any
4486  * remaining references (e.g., rn_former) and call rc_node_destroy() to
4487  * free np.
4488  */
4489 static void
4490 rc_node_no_client_refs(rc_node_t *np)
4491 {
4492 	int unrefed;
4493 	rc_node_t *current, *cur;
4494 
4495 	assert(MUTEX_HELD(&np->rn_lock));
4496 	assert(np->rn_refs == 0);
4497 	assert(np->rn_other_refs == 0);
4498 	assert(np->rn_other_refs_held == 0);
4499 
4500 	if (np->rn_flags & RC_NODE_DEAD) {
4501 		/*
4502 		 * The node is DEAD, so the deletion code should have
4503 		 * destroyed all rn_children or rn_former references.
4504 		 * Since the last client or child reference has been
4505 		 * destroyed, we're free to destroy np.  Unless another
4506 		 * thread has an ephemeral reference, in which case we'll
4507 		 * pass the buck.
4508 		 */
4509 		if (np->rn_erefs > 1) {
4510 			--np->rn_erefs;
4511 			NODE_UNLOCK(np);
4512 			return;
4513 		}
4514 
4515 		(void) pthread_mutex_unlock(&np->rn_lock);
4516 		rc_node_destroy(np);
4517 		return;
4518 	}
4519 
4520 	/* We only collect DEAD and OLD nodes, thank you. */
4521 	assert(np->rn_flags & RC_NODE_OLD);
4522 
4523 	/*
4524 	 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4525 	 * nodes.  But it's vulnerable to unfriendly scheduling, so full
4526 	 * use of rn_erefs should supersede it someday.
4527 	 */
4528 	if (np->rn_flags & RC_NODE_UNREFED) {
4529 		(void) pthread_mutex_unlock(&np->rn_lock);
4530 		return;
4531 	}
4532 	np->rn_flags |= RC_NODE_UNREFED;
4533 
4534 	/*
4535 	 * Now we'll remove the node from the rn_former chain and take its
4536 	 * DYING_FLAGS.
4537 	 */
4538 
4539 	/*
4540 	 * Since this node is OLD, it should be on an rn_former chain.  To
4541 	 * remove it, we must find the current in-hash object and grab its
4542 	 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4543 	 */
4544 
4545 	(void) pthread_mutex_unlock(&np->rn_lock);
4546 
4547 	for (;;) {
4548 		current = cache_lookup(&np->rn_id);
4549 
4550 		if (current == NULL) {
4551 			(void) pthread_mutex_lock(&np->rn_lock);
4552 
4553 			if (np->rn_flags & RC_NODE_DEAD)
4554 				goto died;
4555 
4556 			/*
4557 			 * We are trying to unreference this node, but the
4558 			 * owner of the former list does not exist.  It must
4559 			 * be the case that another thread is deleting this
4560 			 * entire sub-branch, but has not yet reached us.
4561 			 * We will in short order be deleted.
4562 			 */
4563 			np->rn_flags &= ~RC_NODE_UNREFED;
4564 			(void) pthread_mutex_unlock(&np->rn_lock);
4565 			return;
4566 		}
4567 
4568 		if (current == np) {
4569 			/*
4570 			 * no longer unreferenced
4571 			 */
4572 			(void) pthread_mutex_lock(&np->rn_lock);
4573 			np->rn_flags &= ~RC_NODE_UNREFED;
4574 			/* held in cache_lookup() */
4575 			rc_node_rele_locked(np);
4576 			return;
4577 		}
4578 
4579 		(void) pthread_mutex_lock(&current->rn_lock);
4580 		if (current->rn_flags & RC_NODE_OLD) {
4581 			/*
4582 			 * current has been replaced since we looked it
4583 			 * up.  Try again.
4584 			 */
4585 			/* held in cache_lookup() */
4586 			rc_node_rele_locked(current);
4587 			continue;
4588 		}
4589 
4590 		if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4591 			/*
4592 			 * current has been deleted since we looked it up.  Try
4593 			 * again.
4594 			 */
4595 			/* held in cache_lookup() */
4596 			rc_node_rele_locked(current);
4597 			continue;
4598 		}
4599 
4600 		/*
4601 		 * rc_node_hold_flag() might have dropped current's lock, so
4602 		 * check OLD again.
4603 		 */
4604 		if (!(current->rn_flags & RC_NODE_OLD)) {
4605 			/* Not old.  Stop looping. */
4606 			(void) pthread_mutex_unlock(&current->rn_lock);
4607 			break;
4608 		}
4609 
4610 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4611 		rc_node_rele_locked(current);
4612 	}
4613 
4614 	/* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4615 	(void) pthread_mutex_lock(&np->rn_lock);
4616 
4617 	/*
4618 	 * While we didn't have the lock, a thread may have added
4619 	 * a reference or changed the flags.
4620 	 */
4621 	if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4622 	    np->rn_refs != 0 || np->rn_other_refs != 0 ||
4623 	    np->rn_other_refs_held != 0) {
4624 		np->rn_flags &= ~RC_NODE_UNREFED;
4625 
4626 		(void) pthread_mutex_lock(&current->rn_lock);
4627 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4628 		/* held by cache_lookup() */
4629 		rc_node_rele_locked(current);
4630 		return;
4631 	}
4632 
4633 	if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4634 		/*
4635 		 * Someone deleted the node while we were waiting for
4636 		 * DYING_FLAGS.  Undo the modifications to current.
4637 		 */
4638 		(void) pthread_mutex_unlock(&np->rn_lock);
4639 
4640 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4641 		/* held by cache_lookup() */
4642 		rc_node_rele_locked(current);
4643 
4644 		(void) pthread_mutex_lock(&np->rn_lock);
4645 		goto died;
4646 	}
4647 
4648 	/* Take RC_NODE_DYING_FLAGS on np's descendents. */
4649 	rc_node_delete_hold(np, 0);		/* drops np->rn_lock */
4650 
4651 	/* Mark np DEAD.  This requires the lock. */
4652 	(void) pthread_mutex_lock(&np->rn_lock);
4653 
4654 	/* Recheck for new references. */
4655 	if (!(np->rn_flags & RC_NODE_OLD) ||
4656 	    np->rn_refs != 0 || np->rn_other_refs != 0 ||
4657 	    np->rn_other_refs_held != 0) {
4658 		np->rn_flags &= ~RC_NODE_UNREFED;
4659 		rc_node_delete_rele(np, 0);	/* drops np's lock */
4660 
4661 		(void) pthread_mutex_lock(&current->rn_lock);
4662 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4663 		/* held by cache_lookup() */
4664 		rc_node_rele_locked(current);
4665 		return;
4666 	}
4667 
4668 	np->rn_flags |= RC_NODE_DEAD;
4669 
4670 	/*
4671 	 * Delete the children.  This calls rc_node_rele_locked() on np at
4672 	 * the end, so add a reference to keep the count from going
4673 	 * negative.  It will recurse with RC_NODE_DEAD set, so we'll call
4674 	 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4675 	 * shouldn't actually free() np.
4676 	 */
4677 	rc_node_hold_locked(np);
4678 	rc_node_delete_children(np, 0);		/* unlocks np */
4679 
4680 	/* Remove np from current's rn_former chain. */
4681 	(void) pthread_mutex_lock(&current->rn_lock);
4682 	for (cur = current; cur != NULL && cur->rn_former != np;
4683 	    cur = cur->rn_former)
4684 		;
4685 	assert(cur != NULL && cur != np);
4686 
4687 	cur->rn_former = np->rn_former;
4688 	np->rn_former = NULL;
4689 
4690 	rc_node_rele_flag(current, RC_NODE_IN_TX);
4691 	/* held by cache_lookup() */
4692 	rc_node_rele_locked(current);
4693 
4694 	/* Clear ON_FORMER and UNREFED, and destroy. */
4695 	(void) pthread_mutex_lock(&np->rn_lock);
4696 	assert(np->rn_flags & RC_NODE_ON_FORMER);
4697 	np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4698 
4699 	if (np->rn_erefs > 1) {
4700 		/* Still referenced.  Stay execution. */
4701 		--np->rn_erefs;
4702 		NODE_UNLOCK(np);
4703 		return;
4704 	}
4705 
4706 	(void) pthread_mutex_unlock(&np->rn_lock);
4707 	rc_node_destroy(np);
4708 	return;
4709 
4710 died:
4711 	/*
4712 	 * Another thread marked np DEAD.  If there still aren't any
4713 	 * persistent references, destroy the node.
4714 	 */
4715 	np->rn_flags &= ~RC_NODE_UNREFED;
4716 
4717 	unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4718 	    np->rn_other_refs_held == 0);
4719 
4720 	if (np->rn_erefs > 0)
4721 		--np->rn_erefs;
4722 
4723 	if (unrefed && np->rn_erefs > 0) {
4724 		NODE_UNLOCK(np);
4725 		return;
4726 	}
4727 
4728 	(void) pthread_mutex_unlock(&np->rn_lock);
4729 
4730 	if (unrefed)
4731 		rc_node_destroy(np);
4732 }
4733 
4734 static au_event_t
4735 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4736 {
4737 	au_event_t	id = 0;
4738 
4739 #ifndef NATIVE_BUILD
4740 	switch (entity) {
4741 	case REP_PROTOCOL_ENTITY_SERVICE:
4742 	case REP_PROTOCOL_ENTITY_INSTANCE:
4743 		id = ADT_smf_delete;
4744 		break;
4745 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
4746 		id = ADT_smf_delete_snap;
4747 		break;
4748 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4749 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4750 		if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4751 			id = ADT_smf_delete_npg;
4752 		} else {
4753 			id = ADT_smf_delete_pg;
4754 		}
4755 		break;
4756 	default:
4757 		abort();
4758 	}
4759 #endif	/* NATIVE_BUILD */
4760 	return (id);
4761 }
4762 
4763 /*
4764  * Fails with
4765  *   _NOT_SET
4766  *   _DELETED
4767  *   _BAD_REQUEST
4768  *   _PERMISSION_DENIED
4769  *   _NO_RESOURCES
4770  *   _TRUNCATED
4771  * and whatever object_delete() fails with.
4772  */
4773 int
4774 rc_node_delete(rc_node_ptr_t *npp)
4775 {
4776 	rc_node_t *np, *np_orig;
4777 	rc_node_t *pp = NULL;
4778 	int rc;
4779 	rc_node_pg_notify_t *pnp;
4780 	cache_bucket_t *bp;
4781 	rc_notify_delete_t *ndp;
4782 	permcheck_t *pcp;
4783 	int granted;
4784 	au_event_t event_id = 0;
4785 	size_t sz_out;
4786 	audit_event_data_t audit_data;
4787 	int audit_failure = 0;
4788 
4789 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4790 
4791 	audit_data.ed_fmri = NULL;
4792 	audit_data.ed_auth = NULL;
4793 	audit_data.ed_snapname = NULL;
4794 	audit_data.ed_type = NULL;
4795 
4796 	switch (np->rn_id.rl_type) {
4797 	case REP_PROTOCOL_ENTITY_SERVICE:
4798 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4799 		    np->rn_pgflags);
4800 		break;
4801 	case REP_PROTOCOL_ENTITY_INSTANCE:
4802 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4803 		    np->rn_pgflags);
4804 		break;
4805 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
4806 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4807 		    np->rn_pgflags);
4808 		audit_data.ed_snapname = strdup(np->rn_name);
4809 		if (audit_data.ed_snapname == NULL) {
4810 			(void) pthread_mutex_unlock(&np->rn_lock);
4811 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4812 		}
4813 		break;			/* deletable */
4814 
4815 	case REP_PROTOCOL_ENTITY_SCOPE:
4816 	case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4817 		/* Scopes and snaplevels are indelible. */
4818 		(void) pthread_mutex_unlock(&np->rn_lock);
4819 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4820 
4821 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4822 		(void) pthread_mutex_unlock(&np->rn_lock);
4823 		np = np->rn_cchain[0];
4824 		RC_NODE_CHECK_AND_LOCK(np);
4825 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4826 		    np->rn_pgflags);
4827 		break;
4828 
4829 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4830 		if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4831 			event_id =
4832 			    get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4833 			    np->rn_pgflags);
4834 			audit_data.ed_type = strdup(np->rn_type);
4835 			if (audit_data.ed_type == NULL) {
4836 				(void) pthread_mutex_unlock(&np->rn_lock);
4837 				return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4838 			}
4839 			break;
4840 		}
4841 
4842 		/* Snapshot property groups are indelible. */
4843 		(void) pthread_mutex_unlock(&np->rn_lock);
4844 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4845 
4846 	case REP_PROTOCOL_ENTITY_PROPERTY:
4847 		(void) pthread_mutex_unlock(&np->rn_lock);
4848 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4849 
4850 	default:
4851 		assert(0);
4852 		abort();
4853 		break;
4854 	}
4855 
4856 	audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4857 	if (audit_data.ed_fmri == NULL) {
4858 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4859 		goto cleanout;
4860 	}
4861 	np_orig = np;
4862 	rc_node_hold_locked(np);	/* simplifies rest of the code */
4863 
4864 again:
4865 	/*
4866 	 * The following loop is to deal with the fact that snapshots and
4867 	 * property groups are moving targets -- changes to them result
4868 	 * in a new "child" node.  Since we can only delete from the top node,
4869 	 * we have to loop until we have a non-RC_NODE_OLD version.
4870 	 */
4871 	for (;;) {
4872 		if (!rc_node_wait_flag(np,
4873 		    RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4874 			rc_node_rele_locked(np);
4875 			rc = REP_PROTOCOL_FAIL_DELETED;
4876 			goto cleanout;
4877 		}
4878 
4879 		if (np->rn_flags & RC_NODE_OLD) {
4880 			rc_node_rele_locked(np);
4881 			np = cache_lookup(&np_orig->rn_id);
4882 			assert(np != np_orig);
4883 
4884 			if (np == NULL) {
4885 				rc = REP_PROTOCOL_FAIL_DELETED;
4886 				goto fail;
4887 			}
4888 			(void) pthread_mutex_lock(&np->rn_lock);
4889 			continue;
4890 		}
4891 
4892 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4893 			rc_node_rele_locked(np);
4894 			rc_node_clear(npp, 1);
4895 			rc = REP_PROTOCOL_FAIL_DELETED;
4896 		}
4897 
4898 		/*
4899 		 * Mark our parent as children changing.  this call drops our
4900 		 * lock and the RC_NODE_USING_PARENT flag, and returns with
4901 		 * pp's lock held
4902 		 */
4903 		pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4904 		if (pp == NULL) {
4905 			/* our parent is gone, we're going next... */
4906 			rc_node_rele(np);
4907 
4908 			rc_node_clear(npp, 1);
4909 			rc = REP_PROTOCOL_FAIL_DELETED;
4910 			goto cleanout;
4911 		}
4912 
4913 		rc_node_hold_locked(pp);		/* hold for later */
4914 		(void) pthread_mutex_unlock(&pp->rn_lock);
4915 
4916 		(void) pthread_mutex_lock(&np->rn_lock);
4917 		if (!(np->rn_flags & RC_NODE_OLD))
4918 			break;			/* not old -- we're done */
4919 
4920 		(void) pthread_mutex_unlock(&np->rn_lock);
4921 		(void) pthread_mutex_lock(&pp->rn_lock);
4922 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4923 		rc_node_rele_locked(pp);
4924 		(void) pthread_mutex_lock(&np->rn_lock);
4925 		continue;			/* loop around and try again */
4926 	}
4927 	/*
4928 	 * Everyone out of the pool -- we grab everything but
4929 	 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4930 	 * any changes from occurring while we are attempting to
4931 	 * delete the node.
4932 	 */
4933 	if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4934 		(void) pthread_mutex_unlock(&np->rn_lock);
4935 		rc = REP_PROTOCOL_FAIL_DELETED;
4936 		goto fail;
4937 	}
4938 
4939 	assert(!(np->rn_flags & RC_NODE_OLD));
4940 
4941 	if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4942 	    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4943 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4944 		(void) pthread_mutex_unlock(&np->rn_lock);
4945 		goto fail;
4946 	}
4947 
4948 #ifdef NATIVE_BUILD
4949 	if (!client_is_privileged()) {
4950 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4951 	}
4952 #else
4953 	if (is_main_repository) {
4954 		/* permission check */
4955 		(void) pthread_mutex_unlock(&np->rn_lock);
4956 		pcp = pc_create();
4957 		if (pcp != NULL) {
4958 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
4959 
4960 			/* add .smf.modify.<type> for pgs. */
4961 			if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4962 			    REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4963 				const char * const auth =
4964 				    perm_auth_for_pgtype(np->rn_type);
4965 
4966 				if (auth != NULL)
4967 					rc = perm_add_enabling(pcp, auth);
4968 			}
4969 
4970 			if (rc == REP_PROTOCOL_SUCCESS) {
4971 				granted = perm_granted(pcp);
4972 
4973 				rc = map_granted_status(granted, pcp,
4974 				    &audit_data.ed_auth);
4975 				if (granted == PERM_GONE) {
4976 					/* No need to audit if client gone. */
4977 					pc_free(pcp);
4978 					rc_node_rele_flag(np,
4979 					    RC_NODE_DYING_FLAGS);
4980 					return (rc);
4981 				}
4982 				if (granted == PERM_DENIED)
4983 					audit_failure = 1;
4984 			}
4985 
4986 			pc_free(pcp);
4987 		} else {
4988 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4989 		}
4990 
4991 		(void) pthread_mutex_lock(&np->rn_lock);
4992 	} else {
4993 		rc = REP_PROTOCOL_SUCCESS;
4994 	}
4995 #endif /* NATIVE_BUILD */
4996 
4997 	if (rc != REP_PROTOCOL_SUCCESS) {
4998 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4999 		(void) pthread_mutex_unlock(&np->rn_lock);
5000 		goto fail;
5001 	}
5002 
5003 	ndp = uu_zalloc(sizeof (*ndp));
5004 	if (ndp == NULL) {
5005 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
5006 		(void) pthread_mutex_unlock(&np->rn_lock);
5007 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5008 		goto fail;
5009 	}
5010 
5011 	rc_node_delete_hold(np, 1);	/* hold entire subgraph, drop lock */
5012 
5013 	rc = object_delete(np);
5014 
5015 	if (rc != REP_PROTOCOL_SUCCESS) {
5016 		(void) pthread_mutex_lock(&np->rn_lock);
5017 		rc_node_delete_rele(np, 1);		/* drops lock */
5018 		uu_free(ndp);
5019 		goto fail;
5020 	}
5021 
5022 	/*
5023 	 * Now, delicately unlink and delete the object.
5024 	 *
5025 	 * Create the delete notification, atomically remove
5026 	 * from the hash table and set the NODE_DEAD flag, and
5027 	 * remove from the parent's children list.
5028 	 */
5029 	rc_notify_node_delete(ndp, np); /* frees or uses ndp */
5030 
5031 	bp = cache_hold(np->rn_hash);
5032 
5033 	(void) pthread_mutex_lock(&np->rn_lock);
5034 	cache_remove_unlocked(bp, np);
5035 	cache_release(bp);
5036 
5037 	np->rn_flags |= RC_NODE_DEAD;
5038 
5039 	if (pp != NULL) {
5040 		/*
5041 		 * Remove from pp's rn_children.  This requires pp's lock,
5042 		 * so we must drop np's lock to respect lock order.
5043 		 */
5044 		(void) pthread_mutex_unlock(&np->rn_lock);
5045 		(void) pthread_mutex_lock(&pp->rn_lock);
5046 		(void) pthread_mutex_lock(&np->rn_lock);
5047 
5048 		uu_list_remove(pp->rn_children, np);
5049 
5050 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5051 
5052 		(void) pthread_mutex_unlock(&pp->rn_lock);
5053 
5054 		np->rn_flags &= ~RC_NODE_IN_PARENT;
5055 	}
5056 
5057 	/*
5058 	 * finally, propagate death to our children (including marking
5059 	 * them DEAD), handle notifications, and release our hold.
5060 	 */
5061 	rc_node_hold_locked(np);	/* hold for delete */
5062 	rc_node_delete_children(np, 1);	/* drops DYING_FLAGS, lock, ref */
5063 
5064 	rc_node_clear(npp, 1);
5065 
5066 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
5067 	while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
5068 		rc_pg_notify_fire(pnp);
5069 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
5070 	rc_notify_remove_node(np);
5071 
5072 	rc_node_rele(np);
5073 
5074 	smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
5075 	    &audit_data);
5076 	free(audit_data.ed_auth);
5077 	free(audit_data.ed_snapname);
5078 	free(audit_data.ed_type);
5079 	free(audit_data.ed_fmri);
5080 	return (rc);
5081 
5082 fail:
5083 	rc_node_rele(np);
5084 	if (rc == REP_PROTOCOL_FAIL_DELETED)
5085 		rc_node_clear(npp, 1);
5086 	if (pp != NULL) {
5087 		(void) pthread_mutex_lock(&pp->rn_lock);
5088 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5089 		rc_node_rele_locked(pp);	/* drop ref and lock */
5090 	}
5091 	if (audit_failure) {
5092 		smf_audit_event(event_id, ADT_FAILURE,
5093 		    ADT_FAIL_VALUE_AUTH, &audit_data);
5094 	}
5095 cleanout:
5096 	free(audit_data.ed_auth);
5097 	free(audit_data.ed_snapname);
5098 	free(audit_data.ed_type);
5099 	free(audit_data.ed_fmri);
5100 	return (rc);
5101 }
5102 
5103 int
5104 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5105 {
5106 	rc_node_t *np;
5107 	rc_node_t *cp, *pp;
5108 	int res;
5109 
5110 	rc_node_clear(cpp, 0);
5111 
5112 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5113 
5114 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5115 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5116 		(void) pthread_mutex_unlock(&np->rn_lock);
5117 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5118 	}
5119 
5120 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5121 		if ((res = rc_node_fill_children(np,
5122 		    REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5123 			(void) pthread_mutex_unlock(&np->rn_lock);
5124 			return (res);
5125 		}
5126 
5127 		for (cp = uu_list_first(np->rn_children);
5128 		    cp != NULL;
5129 		    cp = uu_list_next(np->rn_children, cp)) {
5130 			if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5131 				continue;
5132 			rc_node_hold(cp);
5133 			break;
5134 		}
5135 
5136 		(void) pthread_mutex_unlock(&np->rn_lock);
5137 	} else {
5138 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5139 			(void) pthread_mutex_unlock(&np->rn_lock);
5140 			rc_node_clear(npp, 1);
5141 			return (REP_PROTOCOL_FAIL_DELETED);
5142 		}
5143 
5144 		/*
5145 		 * mark our parent as children changing.  This call drops our
5146 		 * lock and the RC_NODE_USING_PARENT flag, and returns with
5147 		 * pp's lock held
5148 		 */
5149 		pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5150 		if (pp == NULL) {
5151 			/* our parent is gone, we're going next... */
5152 
5153 			rc_node_clear(npp, 1);
5154 			return (REP_PROTOCOL_FAIL_DELETED);
5155 		}
5156 
5157 		/*
5158 		 * find the next snaplevel
5159 		 */
5160 		cp = np;
5161 		while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5162 		    cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5163 			;
5164 
5165 		/* it must match the snaplevel list */
5166 		assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5167 		    (cp != NULL && np->rn_snaplevel->rsl_next ==
5168 		    cp->rn_snaplevel));
5169 
5170 		if (cp != NULL)
5171 			rc_node_hold(cp);
5172 
5173 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5174 
5175 		(void) pthread_mutex_unlock(&pp->rn_lock);
5176 	}
5177 
5178 	rc_node_assign(cpp, cp);
5179 	if (cp != NULL) {
5180 		rc_node_rele(cp);
5181 
5182 		return (REP_PROTOCOL_SUCCESS);
5183 	}
5184 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
5185 }
5186 
5187 /*
5188  * This call takes a snapshot (np) and either:
5189  *	an existing snapid (to be associated with np), or
5190  *	a non-NULL parentp (from which a new snapshot is taken, and associated
5191  *	    with np)
5192  *
5193  * To do the association, np is duplicated, the duplicate is made to
5194  * represent the new snapid, and np is replaced with the new rc_node_t on
5195  * np's parent's child list. np is placed on the new node's rn_former list,
5196  * and replaces np in cache_hash (so rc_node_update() will find the new one).
5197  *
5198  * old_fmri and old_name point to the original snap shot's FMRI and name.
5199  * These values are used when generating audit events.
5200  *
5201  * Fails with
5202  *	_BAD_REQUEST
5203  *	_BACKEND_READONLY
5204  *	_DELETED
5205  *	_NO_RESOURCES
5206  *	_TRUNCATED
5207  *	_TYPE_MISMATCH
5208  */
5209 static int
5210 rc_attach_snapshot(
5211 	rc_node_t *np,
5212 	uint32_t snapid,
5213 	rc_node_t *parentp,
5214 	char *old_fmri,
5215 	char *old_name)
5216 {
5217 	rc_node_t *np_orig;
5218 	rc_node_t *nnp, *prev;
5219 	rc_node_t *pp;
5220 	int rc;
5221 	size_t sz_out;
5222 	perm_status_t granted;
5223 	au_event_t event_id;
5224 	audit_event_data_t audit_data;
5225 
5226 	if (parentp == NULL) {
5227 		assert(old_fmri != NULL);
5228 	} else {
5229 		assert(snapid == 0);
5230 	}
5231 	assert(MUTEX_HELD(&np->rn_lock));
5232 
5233 	/* Gather the audit data. */
5234 	/*
5235 	 * ADT_smf_* symbols may not be defined in the /usr/include header
5236 	 * files on the build machine.  Thus, the following if-else will
5237 	 * not be compiled when doing native builds.
5238 	 */
5239 #ifndef	NATIVE_BUILD
5240 	if (parentp == NULL) {
5241 		event_id = ADT_smf_attach_snap;
5242 	} else {
5243 		event_id = ADT_smf_create_snap;
5244 	}
5245 #endif	/* NATIVE_BUILD */
5246 	audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5247 	audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5248 	if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5249 		(void) pthread_mutex_unlock(&np->rn_lock);
5250 		free(audit_data.ed_fmri);
5251 		free(audit_data.ed_snapname);
5252 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5253 	}
5254 	audit_data.ed_auth = NULL;
5255 	if (strlcpy(audit_data.ed_snapname, np->rn_name,
5256 	    REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5257 		abort();
5258 	}
5259 	audit_data.ed_old_fmri = old_fmri;
5260 	audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5261 
5262 	if (parentp == NULL) {
5263 		/*
5264 		 * In the attach case, get the instance FMRIs of the
5265 		 * snapshots.
5266 		 */
5267 		if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5268 		    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5269 			(void) pthread_mutex_unlock(&np->rn_lock);
5270 			free(audit_data.ed_fmri);
5271 			free(audit_data.ed_snapname);
5272 			return (rc);
5273 		}
5274 	} else {
5275 		/*
5276 		 * Capture the FMRI of the parent if we're actually going
5277 		 * to take the snapshot.
5278 		 */
5279 		if ((rc = rc_node_get_fmri_or_fragment(parentp,
5280 		    audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5281 		    REP_PROTOCOL_SUCCESS) {
5282 			(void) pthread_mutex_unlock(&np->rn_lock);
5283 			free(audit_data.ed_fmri);
5284 			free(audit_data.ed_snapname);
5285 			return (rc);
5286 		}
5287 	}
5288 
5289 	np_orig = np;
5290 	rc_node_hold_locked(np);		/* simplifies the remainder */
5291 
5292 	(void) pthread_mutex_unlock(&np->rn_lock);
5293 	granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5294 	switch (granted) {
5295 	case PERM_DENIED:
5296 		smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5297 		    &audit_data);
5298 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5299 		rc_node_rele(np);
5300 		goto cleanout;
5301 	case PERM_GRANTED:
5302 		break;
5303 	case PERM_GONE:
5304 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5305 		rc_node_rele(np);
5306 		goto cleanout;
5307 	case PERM_FAIL:
5308 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5309 		rc_node_rele(np);
5310 		goto cleanout;
5311 	default:
5312 		bad_error(rc_node_modify_permission_check, granted);
5313 	}
5314 	(void) pthread_mutex_lock(&np->rn_lock);
5315 
5316 	/*
5317 	 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5318 	 * list from changing.
5319 	 */
5320 	for (;;) {
5321 		if (!(np->rn_flags & RC_NODE_OLD)) {
5322 			if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5323 				goto again;
5324 			}
5325 			pp = rc_node_hold_parent_flag(np,
5326 			    RC_NODE_CHILDREN_CHANGING);
5327 
5328 			(void) pthread_mutex_lock(&np->rn_lock);
5329 			if (pp == NULL) {
5330 				goto again;
5331 			}
5332 			if (np->rn_flags & RC_NODE_OLD) {
5333 				rc_node_rele_flag(pp,
5334 				    RC_NODE_CHILDREN_CHANGING);
5335 				(void) pthread_mutex_unlock(&pp->rn_lock);
5336 				goto again;
5337 			}
5338 			(void) pthread_mutex_unlock(&pp->rn_lock);
5339 
5340 			if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5341 				/*
5342 				 * Can't happen, since we're holding our
5343 				 * parent's CHILDREN_CHANGING flag...
5344 				 */
5345 				abort();
5346 			}
5347 			break;			/* everything's ready */
5348 		}
5349 again:
5350 		rc_node_rele_locked(np);
5351 		np = cache_lookup(&np_orig->rn_id);
5352 
5353 		if (np == NULL) {
5354 			rc = REP_PROTOCOL_FAIL_DELETED;
5355 			goto cleanout;
5356 		}
5357 
5358 		(void) pthread_mutex_lock(&np->rn_lock);
5359 	}
5360 
5361 	if (parentp != NULL) {
5362 		if (pp != parentp) {
5363 			rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5364 			goto fail;
5365 		}
5366 		nnp = NULL;
5367 	} else {
5368 		/*
5369 		 * look for a former node with the snapid we need.
5370 		 */
5371 		if (np->rn_snapshot_id == snapid) {
5372 			rc_node_rele_flag(np, RC_NODE_IN_TX);
5373 			rc_node_rele_locked(np);
5374 
5375 			(void) pthread_mutex_lock(&pp->rn_lock);
5376 			rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5377 			(void) pthread_mutex_unlock(&pp->rn_lock);
5378 			rc = REP_PROTOCOL_SUCCESS;	/* nothing to do */
5379 			goto cleanout;
5380 		}
5381 
5382 		prev = np;
5383 		while ((nnp = prev->rn_former) != NULL) {
5384 			if (nnp->rn_snapshot_id == snapid) {
5385 				rc_node_hold(nnp);
5386 				break;		/* existing node with that id */
5387 			}
5388 			prev = nnp;
5389 		}
5390 	}
5391 
5392 	if (nnp == NULL) {
5393 		prev = NULL;
5394 		nnp = rc_node_alloc();
5395 		if (nnp == NULL) {
5396 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5397 			goto fail;
5398 		}
5399 
5400 		nnp->rn_id = np->rn_id;		/* structure assignment */
5401 		nnp->rn_hash = np->rn_hash;
5402 		nnp->rn_name = strdup(np->rn_name);
5403 		nnp->rn_snapshot_id = snapid;
5404 		nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5405 
5406 		if (nnp->rn_name == NULL) {
5407 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5408 			goto fail;
5409 		}
5410 	}
5411 
5412 	(void) pthread_mutex_unlock(&np->rn_lock);
5413 
5414 	rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5415 
5416 	if (parentp != NULL)
5417 		nnp->rn_snapshot_id = snapid;	/* fill in new snapid */
5418 	else
5419 		assert(nnp->rn_snapshot_id == snapid);
5420 
5421 	(void) pthread_mutex_lock(&np->rn_lock);
5422 	if (rc != REP_PROTOCOL_SUCCESS)
5423 		goto fail;
5424 
5425 	/*
5426 	 * fix up the former chain
5427 	 */
5428 	if (prev != NULL) {
5429 		prev->rn_former = nnp->rn_former;
5430 		(void) pthread_mutex_lock(&nnp->rn_lock);
5431 		nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5432 		nnp->rn_former = NULL;
5433 		(void) pthread_mutex_unlock(&nnp->rn_lock);
5434 	}
5435 	np->rn_flags |= RC_NODE_OLD;
5436 	(void) pthread_mutex_unlock(&np->rn_lock);
5437 
5438 	/*
5439 	 * replace np with nnp
5440 	 */
5441 	rc_node_relink_child(pp, np, nnp);
5442 
5443 	rc_node_rele(np);
5444 	smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5445 	rc = REP_PROTOCOL_SUCCESS;
5446 
5447 cleanout:
5448 	free(audit_data.ed_auth);
5449 	free(audit_data.ed_fmri);
5450 	free(audit_data.ed_snapname);
5451 	return (rc);
5452 
5453 fail:
5454 	rc_node_rele_flag(np, RC_NODE_IN_TX);
5455 	rc_node_rele_locked(np);
5456 	(void) pthread_mutex_lock(&pp->rn_lock);
5457 	rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5458 	(void) pthread_mutex_unlock(&pp->rn_lock);
5459 
5460 	if (nnp != NULL) {
5461 		if (prev == NULL)
5462 			rc_node_destroy(nnp);
5463 		else
5464 			rc_node_rele(nnp);
5465 	}
5466 
5467 	free(audit_data.ed_auth);
5468 	free(audit_data.ed_fmri);
5469 	free(audit_data.ed_snapname);
5470 	return (rc);
5471 }
5472 
5473 int
5474 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5475     const char *instname, const char *name, rc_node_ptr_t *outpp)
5476 {
5477 	perm_status_t granted;
5478 	rc_node_t *np;
5479 	rc_node_t *outp = NULL;
5480 	int rc, perm_rc;
5481 	char fmri[REP_PROTOCOL_FMRI_LEN];
5482 	audit_event_data_t audit_data;
5483 	size_t sz_out;
5484 
5485 	rc_node_clear(outpp, 0);
5486 
5487 	/*
5488 	 * rc_node_modify_permission_check() must be called before the node
5489 	 * is locked.  This is because the library functions that check
5490 	 * authorizations can trigger calls back into configd.
5491 	 */
5492 	granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5493 	switch (granted) {
5494 	case PERM_DENIED:
5495 		/*
5496 		 * We continue in this case, so that we can generate an
5497 		 * audit event later in this function.
5498 		 */
5499 		perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5500 		break;
5501 	case PERM_GRANTED:
5502 		perm_rc = REP_PROTOCOL_SUCCESS;
5503 		break;
5504 	case PERM_GONE:
5505 		/* No need to produce audit event if client is gone. */
5506 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5507 	case PERM_FAIL:
5508 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5509 	default:
5510 		bad_error("rc_node_modify_permission_check", granted);
5511 		break;
5512 	}
5513 
5514 	RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5515 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5516 		(void) pthread_mutex_unlock(&np->rn_lock);
5517 		free(audit_data.ed_auth);
5518 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5519 	}
5520 
5521 	rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5522 	if (rc != REP_PROTOCOL_SUCCESS) {
5523 		(void) pthread_mutex_unlock(&np->rn_lock);
5524 		free(audit_data.ed_auth);
5525 		return (rc);
5526 	}
5527 
5528 	if (svcname != NULL && (rc =
5529 	    rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5530 	    REP_PROTOCOL_SUCCESS) {
5531 		(void) pthread_mutex_unlock(&np->rn_lock);
5532 		free(audit_data.ed_auth);
5533 		return (rc);
5534 	}
5535 
5536 	if (instname != NULL && (rc =
5537 	    rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5538 	    REP_PROTOCOL_SUCCESS) {
5539 		(void) pthread_mutex_unlock(&np->rn_lock);
5540 		free(audit_data.ed_auth);
5541 		return (rc);
5542 	}
5543 
5544 	audit_data.ed_fmri = fmri;
5545 	audit_data.ed_snapname = (char *)name;
5546 
5547 	if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5548 	    &sz_out)) != REP_PROTOCOL_SUCCESS) {
5549 		(void) pthread_mutex_unlock(&np->rn_lock);
5550 		free(audit_data.ed_auth);
5551 		return (rc);
5552 	}
5553 	if (perm_rc != REP_PROTOCOL_SUCCESS) {
5554 		(void) pthread_mutex_unlock(&np->rn_lock);
5555 		smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5556 		    ADT_FAIL_VALUE_AUTH, &audit_data);
5557 		free(audit_data.ed_auth);
5558 		return (perm_rc);
5559 	}
5560 
5561 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5562 	    audit_data.ed_auth);
5563 	(void) pthread_mutex_unlock(&np->rn_lock);
5564 
5565 	rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5566 
5567 	if (rc == REP_PROTOCOL_SUCCESS) {
5568 		rc_node_assign(outpp, outp);
5569 		rc_node_rele(outp);
5570 	}
5571 
5572 	(void) pthread_mutex_lock(&np->rn_lock);
5573 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5574 	(void) pthread_mutex_unlock(&np->rn_lock);
5575 
5576 	if (rc == REP_PROTOCOL_SUCCESS) {
5577 		smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5578 		    &audit_data);
5579 	}
5580 	if (audit_data.ed_auth != NULL)
5581 		free(audit_data.ed_auth);
5582 	return (rc);
5583 }
5584 
5585 int
5586 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5587 {
5588 	rc_node_t *np, *outp;
5589 
5590 	RC_NODE_PTR_GET_CHECK(np, npp);
5591 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5592 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5593 	}
5594 
5595 	RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5596 	if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5597 		(void) pthread_mutex_unlock(&outp->rn_lock);
5598 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5599 	}
5600 
5601 	return (rc_attach_snapshot(outp, 0, np, NULL,
5602 	    NULL));					/* drops outp's lock */
5603 }
5604 
5605 int
5606 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5607 {
5608 	rc_node_t *np;
5609 	rc_node_t *cp;
5610 	uint32_t snapid;
5611 	char old_name[REP_PROTOCOL_NAME_LEN];
5612 	int rc;
5613 	size_t sz_out;
5614 	char old_fmri[REP_PROTOCOL_FMRI_LEN];
5615 
5616 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5617 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5618 		(void) pthread_mutex_unlock(&np->rn_lock);
5619 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5620 	}
5621 	snapid = np->rn_snapshot_id;
5622 	rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5623 	    &sz_out);
5624 	(void) pthread_mutex_unlock(&np->rn_lock);
5625 	if (rc != REP_PROTOCOL_SUCCESS)
5626 		return (rc);
5627 	if (np->rn_name != NULL) {
5628 		if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5629 		    sizeof (old_name)) {
5630 			return (REP_PROTOCOL_FAIL_TRUNCATED);
5631 		}
5632 	}
5633 
5634 	RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5635 	if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5636 		(void) pthread_mutex_unlock(&cp->rn_lock);
5637 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5638 	}
5639 
5640 	rc = rc_attach_snapshot(cp, snapid, NULL,
5641 	    old_fmri, old_name);			/* drops cp's lock */
5642 	return (rc);
5643 }
5644 
5645 /*
5646  * If the pgname property group under ent has type pgtype, and it has a
5647  * propname property with type ptype, return _SUCCESS.  If pgtype is NULL,
5648  * it is not checked.  If ent is not a service node, we will return _SUCCESS if
5649  * a property meeting the requirements exists in either the instance or its
5650  * parent.
5651  *
5652  * Returns
5653  *   _SUCCESS - see above
5654  *   _DELETED - ent or one of its ancestors was deleted
5655  *   _NO_RESOURCES - no resources
5656  *   _NOT_FOUND - no matching property was found
5657  */
5658 static int
5659 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5660     const char *propname, rep_protocol_value_type_t ptype)
5661 {
5662 	int ret;
5663 	rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5664 
5665 	assert(!MUTEX_HELD(&ent->rn_lock));
5666 
5667 	(void) pthread_mutex_lock(&ent->rn_lock);
5668 	ret = rc_node_find_named_child(ent, pgname,
5669 	    REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5670 	(void) pthread_mutex_unlock(&ent->rn_lock);
5671 
5672 	switch (ret) {
5673 	case REP_PROTOCOL_SUCCESS:
5674 		break;
5675 
5676 	case REP_PROTOCOL_FAIL_DELETED:
5677 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5678 		return (ret);
5679 
5680 	default:
5681 		bad_error("rc_node_find_named_child", ret);
5682 	}
5683 
5684 	if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5685 		ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5686 		    &svc);
5687 		if (ret != REP_PROTOCOL_SUCCESS) {
5688 			assert(ret == REP_PROTOCOL_FAIL_DELETED);
5689 			if (pg != NULL)
5690 				rc_node_rele(pg);
5691 			return (ret);
5692 		}
5693 		assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5694 
5695 		(void) pthread_mutex_lock(&svc->rn_lock);
5696 		ret = rc_node_find_named_child(svc, pgname,
5697 		    REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5698 		(void) pthread_mutex_unlock(&svc->rn_lock);
5699 
5700 		rc_node_rele(svc);
5701 
5702 		switch (ret) {
5703 		case REP_PROTOCOL_SUCCESS:
5704 			break;
5705 
5706 		case REP_PROTOCOL_FAIL_DELETED:
5707 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
5708 			if (pg != NULL)
5709 				rc_node_rele(pg);
5710 			return (ret);
5711 
5712 		default:
5713 			bad_error("rc_node_find_named_child", ret);
5714 		}
5715 	}
5716 
5717 	if (pg != NULL &&
5718 	    pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5719 		rc_node_rele(pg);
5720 		pg = NULL;
5721 	}
5722 
5723 	if (spg != NULL &&
5724 	    pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5725 		rc_node_rele(spg);
5726 		spg = NULL;
5727 	}
5728 
5729 	if (pg == NULL) {
5730 		if (spg == NULL)
5731 			return (REP_PROTOCOL_FAIL_NOT_FOUND);
5732 		pg = spg;
5733 		spg = NULL;
5734 	}
5735 
5736 	/*
5737 	 * At this point, pg is non-NULL, and is a property group node of the
5738 	 * correct type.  spg, if non-NULL, is also a property group node of
5739 	 * the correct type.  Check for the property in pg first, then spg
5740 	 * (if applicable).
5741 	 */
5742 	(void) pthread_mutex_lock(&pg->rn_lock);
5743 	ret = rc_node_find_named_child(pg, propname,
5744 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5745 	(void) pthread_mutex_unlock(&pg->rn_lock);
5746 	rc_node_rele(pg);
5747 	switch (ret) {
5748 	case REP_PROTOCOL_SUCCESS:
5749 		if (prop != NULL) {
5750 			if (prop->rn_valtype == ptype) {
5751 				rc_node_rele(prop);
5752 				if (spg != NULL)
5753 					rc_node_rele(spg);
5754 				return (REP_PROTOCOL_SUCCESS);
5755 			}
5756 			rc_node_rele(prop);
5757 		}
5758 		break;
5759 
5760 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5761 		if (spg != NULL)
5762 			rc_node_rele(spg);
5763 		return (ret);
5764 
5765 	case REP_PROTOCOL_FAIL_DELETED:
5766 		break;
5767 
5768 	default:
5769 		bad_error("rc_node_find_named_child", ret);
5770 	}
5771 
5772 	if (spg == NULL)
5773 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5774 
5775 	pg = spg;
5776 
5777 	(void) pthread_mutex_lock(&pg->rn_lock);
5778 	ret = rc_node_find_named_child(pg, propname,
5779 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5780 	(void) pthread_mutex_unlock(&pg->rn_lock);
5781 	rc_node_rele(pg);
5782 	switch (ret) {
5783 	case REP_PROTOCOL_SUCCESS:
5784 		if (prop != NULL) {
5785 			if (prop->rn_valtype == ptype) {
5786 				rc_node_rele(prop);
5787 				return (REP_PROTOCOL_SUCCESS);
5788 			}
5789 			rc_node_rele(prop);
5790 		}
5791 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5792 
5793 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5794 		return (ret);
5795 
5796 	case REP_PROTOCOL_FAIL_DELETED:
5797 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5798 
5799 	default:
5800 		bad_error("rc_node_find_named_child", ret);
5801 	}
5802 
5803 	return (REP_PROTOCOL_SUCCESS);
5804 }
5805 
5806 /*
5807  * Given a property group node, returns _SUCCESS if the property group may
5808  * be read without any special authorization.
5809  *
5810  * Fails with:
5811  *   _DELETED - np or an ancestor node was deleted
5812  *   _TYPE_MISMATCH - np does not refer to a property group
5813  *   _NO_RESOURCES - no resources
5814  *   _PERMISSION_DENIED - authorization is required
5815  */
5816 static int
5817 rc_node_pg_check_read_protect(rc_node_t *np)
5818 {
5819 	int ret;
5820 	rc_node_t *ent;
5821 
5822 	assert(!MUTEX_HELD(&np->rn_lock));
5823 
5824 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5825 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5826 
5827 	if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5828 	    strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5829 	    strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5830 		return (REP_PROTOCOL_SUCCESS);
5831 
5832 	ret = rc_node_parent(np, &ent);
5833 
5834 	if (ret != REP_PROTOCOL_SUCCESS)
5835 		return (ret);
5836 
5837 	ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5838 	    AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5839 
5840 	rc_node_rele(ent);
5841 
5842 	switch (ret) {
5843 	case REP_PROTOCOL_FAIL_NOT_FOUND:
5844 		return (REP_PROTOCOL_SUCCESS);
5845 	case REP_PROTOCOL_SUCCESS:
5846 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5847 	case REP_PROTOCOL_FAIL_DELETED:
5848 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5849 		return (ret);
5850 	default:
5851 		bad_error("rc_svc_prop_exists", ret);
5852 	}
5853 
5854 	return (REP_PROTOCOL_SUCCESS);
5855 }
5856 
5857 /*
5858  * Fails with
5859  *   _DELETED - np's node or parent has been deleted
5860  *   _TYPE_MISMATCH - np's node is not a property
5861  *   _NO_RESOURCES - out of memory
5862  *   _PERMISSION_DENIED - no authorization to read this property's value(s)
5863  *   _BAD_REQUEST - np's parent is not a property group
5864  */
5865 static int
5866 rc_node_property_may_read(rc_node_t *np)
5867 {
5868 	int ret;
5869 	perm_status_t granted = PERM_DENIED;
5870 	rc_node_t *pgp;
5871 	permcheck_t *pcp;
5872 	audit_event_data_t audit_data;
5873 	size_t sz_out;
5874 
5875 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5876 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5877 
5878 	if (client_is_privileged())
5879 		return (REP_PROTOCOL_SUCCESS);
5880 
5881 #ifdef NATIVE_BUILD
5882 	return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5883 #else
5884 	ret = rc_node_parent(np, &pgp);
5885 
5886 	if (ret != REP_PROTOCOL_SUCCESS)
5887 		return (ret);
5888 
5889 	if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5890 		rc_node_rele(pgp);
5891 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5892 	}
5893 
5894 	ret = rc_node_pg_check_read_protect(pgp);
5895 
5896 	if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5897 		rc_node_rele(pgp);
5898 		return (ret);
5899 	}
5900 
5901 	pcp = pc_create();
5902 
5903 	if (pcp == NULL) {
5904 		rc_node_rele(pgp);
5905 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5906 	}
5907 
5908 	ret = perm_add_enabling(pcp, AUTH_MODIFY);
5909 
5910 	if (ret == REP_PROTOCOL_SUCCESS) {
5911 		const char * const auth =
5912 		    perm_auth_for_pgtype(pgp->rn_type);
5913 
5914 		if (auth != NULL)
5915 			ret = perm_add_enabling(pcp, auth);
5916 	}
5917 
5918 	/*
5919 	 * If you are permitted to modify the value, you may also
5920 	 * read it.  This means that both the MODIFY and VALUE
5921 	 * authorizations are acceptable.  We don't allow requests
5922 	 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5923 	 * however, to avoid leaking possibly valuable information
5924 	 * since such a user can't change the property anyway.
5925 	 */
5926 	if (ret == REP_PROTOCOL_SUCCESS)
5927 		ret = perm_add_enabling_values(pcp, pgp,
5928 		    AUTH_PROP_MODIFY);
5929 
5930 	if (ret == REP_PROTOCOL_SUCCESS &&
5931 	    strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5932 		ret = perm_add_enabling_values(pcp, pgp,
5933 		    AUTH_PROP_VALUE);
5934 
5935 	if (ret == REP_PROTOCOL_SUCCESS)
5936 		ret = perm_add_enabling_values(pcp, pgp,
5937 		    AUTH_PROP_READ);
5938 
5939 	rc_node_rele(pgp);
5940 
5941 	if (ret == REP_PROTOCOL_SUCCESS) {
5942 		granted = perm_granted(pcp);
5943 		if (granted == PERM_FAIL)
5944 			ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5945 		if (granted == PERM_GONE)
5946 			ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5947 	}
5948 
5949 	if (ret == REP_PROTOCOL_SUCCESS) {
5950 		/* Generate a read_prop audit event. */
5951 		audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5952 		if (audit_data.ed_fmri == NULL)
5953 			ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5954 	}
5955 	if (ret == REP_PROTOCOL_SUCCESS) {
5956 		ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5957 		    REP_PROTOCOL_FMRI_LEN, &sz_out);
5958 	}
5959 	if (ret == REP_PROTOCOL_SUCCESS) {
5960 		int status;
5961 		int ret_value;
5962 
5963 		if (granted == PERM_DENIED) {
5964 			status = ADT_FAILURE;
5965 			ret_value = ADT_FAIL_VALUE_AUTH;
5966 		} else {
5967 			status = ADT_SUCCESS;
5968 			ret_value = ADT_SUCCESS;
5969 		}
5970 		audit_data.ed_auth = pcp->pc_auth_string;
5971 		smf_audit_event(ADT_smf_read_prop,
5972 		    status, ret_value, &audit_data);
5973 	}
5974 	free(audit_data.ed_fmri);
5975 
5976 	pc_free(pcp);
5977 
5978 	if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5979 		ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5980 
5981 	return (ret);
5982 #endif	/* NATIVE_BUILD */
5983 }
5984 
5985 /*
5986  * Iteration
5987  */
5988 static int
5989 rc_iter_filter_name(rc_node_t *np, void *s)
5990 {
5991 	const char *name = s;
5992 
5993 	return (strcmp(np->rn_name, name) == 0);
5994 }
5995 
5996 static int
5997 rc_iter_filter_type(rc_node_t *np, void *s)
5998 {
5999 	const char *type = s;
6000 
6001 	return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
6002 }
6003 
6004 /*ARGSUSED*/
6005 static int
6006 rc_iter_null_filter(rc_node_t *np, void *s)
6007 {
6008 	return (1);
6009 }
6010 
6011 /*
6012  * Allocate & initialize an rc_node_iter_t structure.  Essentially, ensure
6013  * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
6014  * If successful, leaves a hold on np & increments np->rn_other_refs
6015  *
6016  * If composed is true, then set up for iteration across the top level of np's
6017  * composition chain.  If successful, leaves a hold on np and increments
6018  * rn_other_refs for the top level of np's composition chain.
6019  *
6020  * Fails with
6021  *   _NO_RESOURCES
6022  *   _INVALID_TYPE
6023  *   _TYPE_MISMATCH - np cannot carry type children
6024  *   _DELETED
6025  */
6026 static int
6027 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
6028     rc_iter_filter_func *filter, void *arg, boolean_t composed)
6029 {
6030 	rc_node_iter_t *nip;
6031 	int res;
6032 
6033 	assert(*resp == NULL);
6034 
6035 	nip = uu_zalloc(sizeof (*nip));
6036 	if (nip == NULL)
6037 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6038 
6039 	/* np is held by the client's rc_node_ptr_t */
6040 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
6041 		composed = 1;
6042 
6043 	if (!composed) {
6044 		(void) pthread_mutex_lock(&np->rn_lock);
6045 
6046 		if ((res = rc_node_fill_children(np, type)) !=
6047 		    REP_PROTOCOL_SUCCESS) {
6048 			(void) pthread_mutex_unlock(&np->rn_lock);
6049 			uu_free(nip);
6050 			return (res);
6051 		}
6052 
6053 		nip->rni_clevel = -1;
6054 
6055 		nip->rni_iter = uu_list_walk_start(np->rn_children,
6056 		    UU_WALK_ROBUST);
6057 		if (nip->rni_iter != NULL) {
6058 			nip->rni_iter_node = np;
6059 			rc_node_hold_other(np);
6060 		} else {
6061 			(void) pthread_mutex_unlock(&np->rn_lock);
6062 			uu_free(nip);
6063 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6064 		}
6065 		(void) pthread_mutex_unlock(&np->rn_lock);
6066 	} else {
6067 		rc_node_t *ent;
6068 
6069 		if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
6070 			/* rn_cchain isn't valid until children are loaded. */
6071 			(void) pthread_mutex_lock(&np->rn_lock);
6072 			res = rc_node_fill_children(np,
6073 			    REP_PROTOCOL_ENTITY_SNAPLEVEL);
6074 			(void) pthread_mutex_unlock(&np->rn_lock);
6075 			if (res != REP_PROTOCOL_SUCCESS) {
6076 				uu_free(nip);
6077 				return (res);
6078 			}
6079 
6080 			/* Check for an empty snapshot. */
6081 			if (np->rn_cchain[0] == NULL)
6082 				goto empty;
6083 		}
6084 
6085 		/* Start at the top of the composition chain. */
6086 		for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6087 			if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6088 				/* Empty composition chain. */
6089 empty:
6090 				nip->rni_clevel = -1;
6091 				nip->rni_iter = NULL;
6092 				/* It's ok, iter_next() will return _DONE. */
6093 				goto out;
6094 			}
6095 
6096 			ent = np->rn_cchain[nip->rni_clevel];
6097 			assert(ent != NULL);
6098 
6099 			if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6100 				break;
6101 
6102 			/* Someone deleted it, so try the next one. */
6103 		}
6104 
6105 		res = rc_node_fill_children(ent, type);
6106 
6107 		if (res == REP_PROTOCOL_SUCCESS) {
6108 			nip->rni_iter = uu_list_walk_start(ent->rn_children,
6109 			    UU_WALK_ROBUST);
6110 
6111 			if (nip->rni_iter == NULL)
6112 				res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6113 			else {
6114 				nip->rni_iter_node = ent;
6115 				rc_node_hold_other(ent);
6116 			}
6117 		}
6118 
6119 		if (res != REP_PROTOCOL_SUCCESS) {
6120 			(void) pthread_mutex_unlock(&ent->rn_lock);
6121 			uu_free(nip);
6122 			return (res);
6123 		}
6124 
6125 		(void) pthread_mutex_unlock(&ent->rn_lock);
6126 	}
6127 
6128 out:
6129 	rc_node_hold(np);		/* released by rc_iter_end() */
6130 	nip->rni_parent = np;
6131 	nip->rni_type = type;
6132 	nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6133 	nip->rni_filter_arg = arg;
6134 	*resp = nip;
6135 	return (REP_PROTOCOL_SUCCESS);
6136 }
6137 
6138 static void
6139 rc_iter_end(rc_node_iter_t *iter)
6140 {
6141 	rc_node_t *np = iter->rni_parent;
6142 
6143 	if (iter->rni_clevel >= 0)
6144 		np = np->rn_cchain[iter->rni_clevel];
6145 
6146 	assert(MUTEX_HELD(&np->rn_lock));
6147 	if (iter->rni_iter != NULL)
6148 		uu_list_walk_end(iter->rni_iter);
6149 	iter->rni_iter = NULL;
6150 
6151 	(void) pthread_mutex_unlock(&np->rn_lock);
6152 	rc_node_rele(iter->rni_parent);
6153 	if (iter->rni_iter_node != NULL)
6154 		rc_node_rele_other(iter->rni_iter_node);
6155 }
6156 
6157 /*
6158  * Fails with
6159  *   _NOT_SET - npp is reset
6160  *   _DELETED - npp's node has been deleted
6161  *   _NOT_APPLICABLE - npp's node is not a property
6162  *   _NO_RESOURCES - out of memory
6163  */
6164 static int
6165 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6166 {
6167 	rc_node_t *np;
6168 
6169 	rc_node_iter_t *nip;
6170 
6171 	assert(*iterp == NULL);
6172 
6173 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6174 
6175 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6176 		(void) pthread_mutex_unlock(&np->rn_lock);
6177 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6178 	}
6179 
6180 	nip = uu_zalloc(sizeof (*nip));
6181 	if (nip == NULL) {
6182 		(void) pthread_mutex_unlock(&np->rn_lock);
6183 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6184 	}
6185 
6186 	nip->rni_parent = np;
6187 	nip->rni_iter = NULL;
6188 	nip->rni_clevel = -1;
6189 	nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6190 	nip->rni_offset = 0;
6191 	nip->rni_last_offset = 0;
6192 
6193 	rc_node_hold_locked(np);
6194 
6195 	*iterp = nip;
6196 	(void) pthread_mutex_unlock(&np->rn_lock);
6197 
6198 	return (REP_PROTOCOL_SUCCESS);
6199 }
6200 
6201 /*
6202  * Returns:
6203  *   _NO_RESOURCES - out of memory
6204  *   _NOT_SET - npp is reset
6205  *   _DELETED - npp's node has been deleted
6206  *   _TYPE_MISMATCH - npp's node is not a property
6207  *   _NOT_FOUND - property has no values
6208  *   _TRUNCATED - property has >1 values (first is written into out)
6209  *   _SUCCESS - property has 1 value (which is written into out)
6210  *   _PERMISSION_DENIED - no authorization to read property value(s)
6211  *
6212  * We shorten *sz_out to not include anything after the final '\0'.
6213  */
6214 int
6215 rc_node_get_property_value(rc_node_ptr_t *npp,
6216     struct rep_protocol_value_response *out, size_t *sz_out)
6217 {
6218 	rc_node_t *np;
6219 	size_t w;
6220 	int ret;
6221 
6222 	assert(*sz_out == sizeof (*out));
6223 
6224 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6225 	ret = rc_node_property_may_read(np);
6226 	rc_node_rele(np);
6227 
6228 	if (ret != REP_PROTOCOL_SUCCESS)
6229 		return (ret);
6230 
6231 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6232 
6233 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6234 		(void) pthread_mutex_unlock(&np->rn_lock);
6235 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6236 	}
6237 
6238 	if (np->rn_values_size == 0) {
6239 		(void) pthread_mutex_unlock(&np->rn_lock);
6240 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
6241 	}
6242 	out->rpr_type = np->rn_valtype;
6243 	w = strlcpy(out->rpr_value, &np->rn_values[0],
6244 	    sizeof (out->rpr_value));
6245 
6246 	if (w >= sizeof (out->rpr_value))
6247 		backend_panic("value too large");
6248 
6249 	*sz_out = offsetof(struct rep_protocol_value_response,
6250 	    rpr_value[w + 1]);
6251 
6252 	ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6253 	    REP_PROTOCOL_SUCCESS;
6254 	(void) pthread_mutex_unlock(&np->rn_lock);
6255 	return (ret);
6256 }
6257 
6258 int
6259 rc_iter_next_value(rc_node_iter_t *iter,
6260     struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6261 {
6262 	rc_node_t *np = iter->rni_parent;
6263 	const char *vals;
6264 	size_t len;
6265 
6266 	size_t start;
6267 	size_t w;
6268 	int ret;
6269 
6270 	rep_protocol_responseid_t result;
6271 
6272 	assert(*sz_out == sizeof (*out));
6273 
6274 	(void) memset(out, '\0', *sz_out);
6275 
6276 	if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6277 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6278 
6279 	RC_NODE_CHECK(np);
6280 	ret = rc_node_property_may_read(np);
6281 
6282 	if (ret != REP_PROTOCOL_SUCCESS)
6283 		return (ret);
6284 
6285 	RC_NODE_CHECK_AND_LOCK(np);
6286 
6287 	vals = np->rn_values;
6288 	len = np->rn_values_size;
6289 
6290 	out->rpr_type = np->rn_valtype;
6291 
6292 	start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6293 
6294 	if (len == 0 || start >= len) {
6295 		result = REP_PROTOCOL_DONE;
6296 		*sz_out -= sizeof (out->rpr_value);
6297 	} else {
6298 		w = strlcpy(out->rpr_value, &vals[start],
6299 		    sizeof (out->rpr_value));
6300 
6301 		if (w >= sizeof (out->rpr_value))
6302 			backend_panic("value too large");
6303 
6304 		*sz_out = offsetof(struct rep_protocol_value_response,
6305 		    rpr_value[w + 1]);
6306 
6307 		/*
6308 		 * update the offsets if we're not repeating
6309 		 */
6310 		if (!repeat) {
6311 			iter->rni_last_offset = iter->rni_offset;
6312 			iter->rni_offset += (w + 1);
6313 		}
6314 
6315 		result = REP_PROTOCOL_SUCCESS;
6316 	}
6317 
6318 	(void) pthread_mutex_unlock(&np->rn_lock);
6319 	return (result);
6320 }
6321 
6322 /*
6323  * Entry point for ITER_START from client.c.  Validate the arguments & call
6324  * rc_iter_create().
6325  *
6326  * Fails with
6327  *   _NOT_SET
6328  *   _DELETED
6329  *   _TYPE_MISMATCH - np cannot carry type children
6330  *   _BAD_REQUEST - flags is invalid
6331  *		    pattern is invalid
6332  *   _NO_RESOURCES
6333  *   _INVALID_TYPE
6334  *   _TYPE_MISMATCH - *npp cannot have children of type
6335  *   _BACKEND_ACCESS
6336  */
6337 int
6338 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6339     uint32_t type, uint32_t flags, const char *pattern)
6340 {
6341 	rc_node_t *np;
6342 	rc_iter_filter_func *f = NULL;
6343 	int rc;
6344 
6345 	RC_NODE_PTR_GET_CHECK(np, npp);
6346 
6347 	if (pattern != NULL && pattern[0] == '\0')
6348 		pattern = NULL;
6349 
6350 	if (type == REP_PROTOCOL_ENTITY_VALUE) {
6351 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6352 			return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6353 		if (flags != RP_ITER_START_ALL || pattern != NULL)
6354 			return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6355 
6356 		rc = rc_node_setup_value_iter(npp, iterp);
6357 		assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6358 		return (rc);
6359 	}
6360 
6361 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6362 	    REP_PROTOCOL_SUCCESS)
6363 		return (rc);
6364 
6365 	if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6366 	    (pattern == NULL))
6367 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6368 
6369 	/* Composition only works for instances & snapshots. */
6370 	if ((flags & RP_ITER_START_COMPOSED) &&
6371 	    (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6372 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6373 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6374 
6375 	if (pattern != NULL) {
6376 		if ((rc = rc_check_type_name(type, pattern)) !=
6377 		    REP_PROTOCOL_SUCCESS)
6378 			return (rc);
6379 		pattern = strdup(pattern);
6380 		if (pattern == NULL)
6381 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6382 	}
6383 
6384 	switch (flags & RP_ITER_START_FILT_MASK) {
6385 	case RP_ITER_START_ALL:
6386 		f = NULL;
6387 		break;
6388 	case RP_ITER_START_EXACT:
6389 		f = rc_iter_filter_name;
6390 		break;
6391 	case RP_ITER_START_PGTYPE:
6392 		if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6393 			free((void *)pattern);
6394 			return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6395 		}
6396 		f = rc_iter_filter_type;
6397 		break;
6398 	default:
6399 		free((void *)pattern);
6400 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6401 	}
6402 
6403 	rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6404 	    flags & RP_ITER_START_COMPOSED);
6405 	if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6406 		free((void *)pattern);
6407 
6408 	return (rc);
6409 }
6410 
6411 /*
6412  * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6413  * the filter.
6414  * For composed iterators, then check to see if there's an overlapping entity
6415  * (see embedded comments).  If we reach the end of the list, start over at
6416  * the next level.
6417  *
6418  * Returns
6419  *   _BAD_REQUEST - iter walks values
6420  *   _TYPE_MISMATCH - iter does not walk type entities
6421  *   _DELETED - parent was deleted
6422  *   _NO_RESOURCES
6423  *   _INVALID_TYPE - type is invalid
6424  *   _DONE
6425  *   _SUCCESS
6426  *
6427  * For composed property group iterators, can also return
6428  *   _TYPE_MISMATCH - parent cannot have type children
6429  */
6430 int
6431 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6432 {
6433 	rc_node_t *np = iter->rni_parent;
6434 	rc_node_t *res;
6435 	int rc;
6436 
6437 	if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6438 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6439 
6440 	if (iter->rni_iter == NULL) {
6441 		rc_node_clear(out, 0);
6442 		return (REP_PROTOCOL_DONE);
6443 	}
6444 
6445 	if (iter->rni_type != type) {
6446 		rc_node_clear(out, 0);
6447 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6448 	}
6449 
6450 	(void) pthread_mutex_lock(&np->rn_lock);  /* held by _iter_create() */
6451 
6452 	if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6453 		(void) pthread_mutex_unlock(&np->rn_lock);
6454 		rc_node_clear(out, 1);
6455 		return (REP_PROTOCOL_FAIL_DELETED);
6456 	}
6457 
6458 	if (iter->rni_clevel >= 0) {
6459 		/* Composed iterator.  Iterate over appropriate level. */
6460 		(void) pthread_mutex_unlock(&np->rn_lock);
6461 		np = np->rn_cchain[iter->rni_clevel];
6462 		/*
6463 		 * If iter->rni_parent is an instance or a snapshot, np must
6464 		 * be valid since iter holds iter->rni_parent & possible
6465 		 * levels (service, instance, snaplevel) cannot be destroyed
6466 		 * while rni_parent is held.  If iter->rni_parent is
6467 		 * a composed property group then rc_node_setup_cpg() put
6468 		 * a hold on np.
6469 		 */
6470 
6471 		(void) pthread_mutex_lock(&np->rn_lock);
6472 
6473 		if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6474 			(void) pthread_mutex_unlock(&np->rn_lock);
6475 			rc_node_clear(out, 1);
6476 			return (REP_PROTOCOL_FAIL_DELETED);
6477 		}
6478 	}
6479 
6480 	assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6481 
6482 	for (;;) {
6483 		res = uu_list_walk_next(iter->rni_iter);
6484 		if (res == NULL) {
6485 			rc_node_t *parent = iter->rni_parent;
6486 
6487 #if COMPOSITION_DEPTH == 2
6488 			if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6489 				/* release walker and lock */
6490 				rc_iter_end(iter);
6491 				break;
6492 			}
6493 
6494 			/* Stop walking current level. */
6495 			uu_list_walk_end(iter->rni_iter);
6496 			iter->rni_iter = NULL;
6497 			(void) pthread_mutex_unlock(&np->rn_lock);
6498 			rc_node_rele_other(iter->rni_iter_node);
6499 			iter->rni_iter_node = NULL;
6500 
6501 			/* Start walking next level. */
6502 			++iter->rni_clevel;
6503 			np = parent->rn_cchain[iter->rni_clevel];
6504 			assert(np != NULL);
6505 #else
6506 #error This code must be updated.
6507 #endif
6508 
6509 			(void) pthread_mutex_lock(&np->rn_lock);
6510 
6511 			rc = rc_node_fill_children(np, iter->rni_type);
6512 
6513 			if (rc == REP_PROTOCOL_SUCCESS) {
6514 				iter->rni_iter =
6515 				    uu_list_walk_start(np->rn_children,
6516 				    UU_WALK_ROBUST);
6517 
6518 				if (iter->rni_iter == NULL)
6519 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6520 				else {
6521 					iter->rni_iter_node = np;
6522 					rc_node_hold_other(np);
6523 				}
6524 			}
6525 
6526 			if (rc != REP_PROTOCOL_SUCCESS) {
6527 				(void) pthread_mutex_unlock(&np->rn_lock);
6528 				rc_node_clear(out, 0);
6529 				return (rc);
6530 			}
6531 
6532 			continue;
6533 		}
6534 
6535 		if (res->rn_id.rl_type != type ||
6536 		    !iter->rni_filter(res, iter->rni_filter_arg))
6537 			continue;
6538 
6539 		/*
6540 		 * If we're composed and not at the top level, check to see if
6541 		 * there's an entity at a higher level with the same name.  If
6542 		 * so, skip this one.
6543 		 */
6544 		if (iter->rni_clevel > 0) {
6545 			rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6546 			rc_node_t *pg;
6547 
6548 #if COMPOSITION_DEPTH == 2
6549 			assert(iter->rni_clevel == 1);
6550 
6551 			(void) pthread_mutex_unlock(&np->rn_lock);
6552 			(void) pthread_mutex_lock(&ent->rn_lock);
6553 			rc = rc_node_find_named_child(ent, res->rn_name, type,
6554 			    &pg);
6555 			if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6556 				rc_node_rele(pg);
6557 			(void) pthread_mutex_unlock(&ent->rn_lock);
6558 			if (rc != REP_PROTOCOL_SUCCESS) {
6559 				rc_node_clear(out, 0);
6560 				return (rc);
6561 			}
6562 			(void) pthread_mutex_lock(&np->rn_lock);
6563 
6564 			/* Make sure np isn't being deleted all of a sudden. */
6565 			if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6566 				(void) pthread_mutex_unlock(&np->rn_lock);
6567 				rc_node_clear(out, 1);
6568 				return (REP_PROTOCOL_FAIL_DELETED);
6569 			}
6570 
6571 			if (pg != NULL)
6572 				/* Keep going. */
6573 				continue;
6574 #else
6575 #error This code must be updated.
6576 #endif
6577 		}
6578 
6579 		/*
6580 		 * If we're composed, iterating over property groups, and not
6581 		 * at the bottom level, check to see if there's a pg at lower
6582 		 * level with the same name.  If so, return a cpg.
6583 		 */
6584 		if (iter->rni_clevel >= 0 &&
6585 		    type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6586 		    iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6587 #if COMPOSITION_DEPTH == 2
6588 			rc_node_t *pg;
6589 			rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6590 
6591 			rc_node_hold(res);	/* While we drop np->rn_lock */
6592 
6593 			(void) pthread_mutex_unlock(&np->rn_lock);
6594 			(void) pthread_mutex_lock(&ent->rn_lock);
6595 			rc = rc_node_find_named_child(ent, res->rn_name, type,
6596 			    &pg);
6597 			/* holds pg if not NULL */
6598 			(void) pthread_mutex_unlock(&ent->rn_lock);
6599 			if (rc != REP_PROTOCOL_SUCCESS) {
6600 				rc_node_rele(res);
6601 				rc_node_clear(out, 0);
6602 				return (rc);
6603 			}
6604 
6605 			(void) pthread_mutex_lock(&np->rn_lock);
6606 			if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6607 				(void) pthread_mutex_unlock(&np->rn_lock);
6608 				rc_node_rele(res);
6609 				if (pg != NULL)
6610 					rc_node_rele(pg);
6611 				rc_node_clear(out, 1);
6612 				return (REP_PROTOCOL_FAIL_DELETED);
6613 			}
6614 
6615 			if (pg == NULL) {
6616 				rc_node_rele(res);
6617 			} else {
6618 				rc_node_t *cpg;
6619 
6620 				/* Keep res held for rc_node_setup_cpg(). */
6621 
6622 				cpg = rc_node_alloc();
6623 				if (cpg == NULL) {
6624 					(void) pthread_mutex_unlock(
6625 					    &np->rn_lock);
6626 					rc_node_rele(res);
6627 					rc_node_rele(pg);
6628 					rc_node_clear(out, 0);
6629 					return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6630 				}
6631 
6632 				switch (rc_node_setup_cpg(cpg, res, pg)) {
6633 				case REP_PROTOCOL_SUCCESS:
6634 					res = cpg;
6635 					break;
6636 
6637 				case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6638 					/* Nevermind. */
6639 					rc_node_destroy(cpg);
6640 					rc_node_rele(pg);
6641 					rc_node_rele(res);
6642 					break;
6643 
6644 				case REP_PROTOCOL_FAIL_NO_RESOURCES:
6645 					rc_node_destroy(cpg);
6646 					(void) pthread_mutex_unlock(
6647 					    &np->rn_lock);
6648 					rc_node_rele(res);
6649 					rc_node_rele(pg);
6650 					rc_node_clear(out, 0);
6651 					return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6652 
6653 				default:
6654 					assert(0);
6655 					abort();
6656 				}
6657 			}
6658 #else
6659 #error This code must be updated.
6660 #endif
6661 		}
6662 
6663 		rc_node_hold(res);
6664 		(void) pthread_mutex_unlock(&np->rn_lock);
6665 		break;
6666 	}
6667 	rc_node_assign(out, res);
6668 
6669 	if (res == NULL)
6670 		return (REP_PROTOCOL_DONE);
6671 	rc_node_rele(res);
6672 	return (REP_PROTOCOL_SUCCESS);
6673 }
6674 
6675 void
6676 rc_iter_destroy(rc_node_iter_t **nipp)
6677 {
6678 	rc_node_iter_t *nip = *nipp;
6679 	rc_node_t *np;
6680 
6681 	if (nip == NULL)
6682 		return;				/* already freed */
6683 
6684 	np = nip->rni_parent;
6685 
6686 	if (nip->rni_filter_arg != NULL)
6687 		free(nip->rni_filter_arg);
6688 	nip->rni_filter_arg = NULL;
6689 
6690 	if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6691 	    nip->rni_iter != NULL) {
6692 		if (nip->rni_clevel < 0)
6693 			(void) pthread_mutex_lock(&np->rn_lock);
6694 		else
6695 			(void) pthread_mutex_lock(
6696 			    &np->rn_cchain[nip->rni_clevel]->rn_lock);
6697 		rc_iter_end(nip);		/* release walker and lock */
6698 	}
6699 	nip->rni_parent = NULL;
6700 
6701 	uu_free(nip);
6702 	*nipp = NULL;
6703 }
6704 
6705 int
6706 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6707 {
6708 	rc_node_t *np;
6709 	permcheck_t *pcp;
6710 	int ret;
6711 	perm_status_t granted;
6712 	rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6713 	char *auth_string = NULL;
6714 
6715 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6716 
6717 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6718 		rc_node_rele(np);
6719 		np = np->rn_cchain[0];
6720 		RC_NODE_CHECK_AND_HOLD(np);
6721 	}
6722 
6723 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6724 		rc_node_rele(np);
6725 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6726 	}
6727 
6728 	if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6729 		rc_node_rele(np);
6730 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6731 	}
6732 
6733 #ifdef NATIVE_BUILD
6734 	if (client_is_privileged())
6735 		goto skip_checks;
6736 	rc_node_rele(np);
6737 	return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6738 #else
6739 	if (is_main_repository == 0)
6740 		goto skip_checks;
6741 
6742 	/* permission check */
6743 	pcp = pc_create();
6744 	if (pcp == NULL) {
6745 		rc_node_rele(np);
6746 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6747 	}
6748 
6749 	if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&	/* instance pg */
6750 	    ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6751 	    strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6752 	    (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6753 	    strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6754 		rc_node_t *instn;
6755 
6756 		/* solaris.smf.modify can be used */
6757 		ret = perm_add_enabling(pcp, AUTH_MODIFY);
6758 		if (ret != REP_PROTOCOL_SUCCESS) {
6759 			pc_free(pcp);
6760 			rc_node_rele(np);
6761 			return (ret);
6762 		}
6763 
6764 		/* solaris.smf.manage can be used. */
6765 		ret = perm_add_enabling(pcp, AUTH_MANAGE);
6766 
6767 		if (ret != REP_PROTOCOL_SUCCESS) {
6768 			pc_free(pcp);
6769 			rc_node_rele(np);
6770 			return (ret);
6771 		}
6772 
6773 		/* general/action_authorization values can be used. */
6774 		ret = rc_node_parent(np, &instn);
6775 		if (ret != REP_PROTOCOL_SUCCESS) {
6776 			assert(ret == REP_PROTOCOL_FAIL_DELETED);
6777 			rc_node_rele(np);
6778 			pc_free(pcp);
6779 			return (REP_PROTOCOL_FAIL_DELETED);
6780 		}
6781 
6782 		assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6783 
6784 		ret = perm_add_inst_action_auth(pcp, instn);
6785 		rc_node_rele(instn);
6786 		switch (ret) {
6787 		case REP_PROTOCOL_SUCCESS:
6788 			break;
6789 
6790 		case REP_PROTOCOL_FAIL_DELETED:
6791 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
6792 			rc_node_rele(np);
6793 			pc_free(pcp);
6794 			return (ret);
6795 
6796 		default:
6797 			bad_error("perm_add_inst_action_auth", ret);
6798 		}
6799 
6800 		if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6801 			authorized = RC_AUTH_PASSED; /* No check on commit. */
6802 	} else {
6803 		ret = perm_add_enabling(pcp, AUTH_MODIFY);
6804 
6805 		if (ret == REP_PROTOCOL_SUCCESS) {
6806 			/* propertygroup-type-specific authorization */
6807 			/* no locking because rn_type won't change anyway */
6808 			const char * const auth =
6809 			    perm_auth_for_pgtype(np->rn_type);
6810 
6811 			if (auth != NULL)
6812 				ret = perm_add_enabling(pcp, auth);
6813 		}
6814 
6815 		if (ret == REP_PROTOCOL_SUCCESS)
6816 			/* propertygroup/transaction-type-specific auths */
6817 			ret =
6818 			    perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6819 
6820 		if (ret == REP_PROTOCOL_SUCCESS)
6821 			ret =
6822 			    perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6823 
6824 		/* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6825 		if (ret == REP_PROTOCOL_SUCCESS &&
6826 		    strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6827 		    strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6828 			ret = perm_add_enabling(pcp, AUTH_MANAGE);
6829 
6830 		if (ret != REP_PROTOCOL_SUCCESS) {
6831 			pc_free(pcp);
6832 			rc_node_rele(np);
6833 			return (ret);
6834 		}
6835 	}
6836 
6837 	granted = perm_granted(pcp);
6838 	ret = map_granted_status(granted, pcp, &auth_string);
6839 	pc_free(pcp);
6840 
6841 	if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6842 	    (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6843 		free(auth_string);
6844 		rc_node_rele(np);
6845 		return (ret);
6846 	}
6847 
6848 	if (granted == PERM_DENIED) {
6849 		/*
6850 		 * If we get here, the authorization failed.
6851 		 * Unfortunately, we don't have enough information at this
6852 		 * point to generate the security audit events.  We'll only
6853 		 * get that information when the client tries to commit the
6854 		 * event.  Thus, we'll remember the failed authorization,
6855 		 * so that we can generate the audit events later.
6856 		 */
6857 		authorized = RC_AUTH_FAILED;
6858 	}
6859 #endif /* NATIVE_BUILD */
6860 
6861 skip_checks:
6862 	rc_node_assign(txp, np);
6863 	txp->rnp_authorized = authorized;
6864 	if (authorized != RC_AUTH_UNKNOWN) {
6865 		/* Save the authorization string. */
6866 		if (txp->rnp_auth_string != NULL)
6867 			free((void *)txp->rnp_auth_string);
6868 		txp->rnp_auth_string = auth_string;
6869 		auth_string = NULL;	/* Don't free until done with txp. */
6870 	}
6871 
6872 	rc_node_rele(np);
6873 	if (auth_string != NULL)
6874 		free(auth_string);
6875 	return (REP_PROTOCOL_SUCCESS);
6876 }
6877 
6878 /*
6879  * Return 1 if the given transaction commands only modify the values of
6880  * properties other than "modify_authorization".  Return -1 if any of the
6881  * commands are invalid, and 0 otherwise.
6882  */
6883 static int
6884 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6885 {
6886 	const struct rep_protocol_transaction_cmd *cmds;
6887 	uintptr_t loc;
6888 	uint32_t sz;
6889 	rc_node_t *prop;
6890 	boolean_t ok;
6891 
6892 	assert(!MUTEX_HELD(&pg->rn_lock));
6893 
6894 	loc = (uintptr_t)cmds_arg;
6895 
6896 	while (cmds_sz > 0) {
6897 		cmds = (struct rep_protocol_transaction_cmd *)loc;
6898 
6899 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6900 			return (-1);
6901 
6902 		sz = cmds->rptc_size;
6903 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6904 			return (-1);
6905 
6906 		sz = TX_SIZE(sz);
6907 		if (sz > cmds_sz)
6908 			return (-1);
6909 
6910 		switch (cmds[0].rptc_action) {
6911 		case REP_PROTOCOL_TX_ENTRY_CLEAR:
6912 			break;
6913 
6914 		case REP_PROTOCOL_TX_ENTRY_REPLACE:
6915 			/* Check type */
6916 			(void) pthread_mutex_lock(&pg->rn_lock);
6917 			if (rc_node_find_named_child(pg,
6918 			    (const char *)cmds[0].rptc_data,
6919 			    REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6920 			    REP_PROTOCOL_SUCCESS) {
6921 				ok = (prop != NULL &&
6922 				    prop->rn_valtype == cmds[0].rptc_type);
6923 			} else {
6924 				/* Return more particular error? */
6925 				ok = B_FALSE;
6926 			}
6927 			(void) pthread_mutex_unlock(&pg->rn_lock);
6928 			if (ok)
6929 				break;
6930 			return (0);
6931 
6932 		default:
6933 			return (0);
6934 		}
6935 
6936 		if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6937 		    == 0)
6938 			return (0);
6939 
6940 		loc += sz;
6941 		cmds_sz -= sz;
6942 	}
6943 
6944 	return (1);
6945 }
6946 
6947 /*
6948  * Return 1 if any of the given transaction commands affect
6949  * "action_authorization".  Return -1 if any of the commands are invalid and
6950  * 0 in all other cases.
6951  */
6952 static int
6953 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6954 {
6955 	const struct rep_protocol_transaction_cmd *cmds;
6956 	uintptr_t loc;
6957 	uint32_t sz;
6958 
6959 	loc = (uintptr_t)cmds_arg;
6960 
6961 	while (cmds_sz > 0) {
6962 		cmds = (struct rep_protocol_transaction_cmd *)loc;
6963 
6964 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6965 			return (-1);
6966 
6967 		sz = cmds->rptc_size;
6968 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6969 			return (-1);
6970 
6971 		sz = TX_SIZE(sz);
6972 		if (sz > cmds_sz)
6973 			return (-1);
6974 
6975 		if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6976 		    == 0)
6977 			return (1);
6978 
6979 		loc += sz;
6980 		cmds_sz -= sz;
6981 	}
6982 
6983 	return (0);
6984 }
6985 
6986 /*
6987  * Returns 1 if the transaction commands only modify properties named
6988  * 'enabled'.
6989  */
6990 static int
6991 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6992 {
6993 	const struct rep_protocol_transaction_cmd *cmd;
6994 	uintptr_t loc;
6995 	uint32_t sz;
6996 
6997 	loc = (uintptr_t)cmds_arg;
6998 
6999 	while (cmds_sz > 0) {
7000 		cmd = (struct rep_protocol_transaction_cmd *)loc;
7001 
7002 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
7003 			return (-1);
7004 
7005 		sz = cmd->rptc_size;
7006 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
7007 			return (-1);
7008 
7009 		sz = TX_SIZE(sz);
7010 		if (sz > cmds_sz)
7011 			return (-1);
7012 
7013 		if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
7014 		    != 0)
7015 			return (0);
7016 
7017 		loc += sz;
7018 		cmds_sz -= sz;
7019 	}
7020 
7021 	return (1);
7022 }
7023 
7024 int
7025 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
7026 {
7027 	rc_node_t *np = txp->rnp_node;
7028 	rc_node_t *pp;
7029 	rc_node_t *nnp;
7030 	rc_node_pg_notify_t *pnp;
7031 	int rc;
7032 	permcheck_t *pcp;
7033 	perm_status_t granted;
7034 	int normal;
7035 	char *pg_fmri = NULL;
7036 	char *auth_string = NULL;
7037 	int auth_status = ADT_SUCCESS;
7038 	int auth_ret_value = ADT_SUCCESS;
7039 	size_t sz_out;
7040 	int tx_flag = 1;
7041 	tx_commit_data_t *tx_data = NULL;
7042 
7043 	RC_NODE_CHECK(np);
7044 
7045 	if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
7046 	    (txp->rnp_auth_string != NULL)) {
7047 		auth_string = strdup(txp->rnp_auth_string);
7048 		if (auth_string == NULL)
7049 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7050 	}
7051 
7052 	if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
7053 	    is_main_repository) {
7054 #ifdef NATIVE_BUILD
7055 		if (!client_is_privileged()) {
7056 			return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
7057 		}
7058 #else
7059 		/* permission check: depends on contents of transaction */
7060 		pcp = pc_create();
7061 		if (pcp == NULL)
7062 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7063 
7064 		/* If normal is cleared, we won't do the normal checks. */
7065 		normal = 1;
7066 		rc = REP_PROTOCOL_SUCCESS;
7067 
7068 		if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
7069 		    strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
7070 			/* Touching general[framework]/action_authorization? */
7071 			rc = tx_modifies_action(cmds, cmds_sz);
7072 			if (rc == -1) {
7073 				pc_free(pcp);
7074 				return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7075 			}
7076 
7077 			if (rc) {
7078 				/*
7079 				 * Yes: only AUTH_MODIFY and AUTH_MANAGE
7080 				 * can be used.
7081 				 */
7082 				rc = perm_add_enabling(pcp, AUTH_MODIFY);
7083 
7084 				if (rc == REP_PROTOCOL_SUCCESS)
7085 					rc = perm_add_enabling(pcp,
7086 					    AUTH_MANAGE);
7087 
7088 				normal = 0;
7089 			} else {
7090 				rc = REP_PROTOCOL_SUCCESS;
7091 			}
7092 		} else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7093 		    strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7094 		    strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7095 			rc_node_t *instn;
7096 
7097 			rc = tx_only_enabled(cmds, cmds_sz);
7098 			if (rc == -1) {
7099 				pc_free(pcp);
7100 				return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7101 			}
7102 
7103 			if (rc) {
7104 				rc = rc_node_parent(np, &instn);
7105 				if (rc != REP_PROTOCOL_SUCCESS) {
7106 					assert(rc == REP_PROTOCOL_FAIL_DELETED);
7107 					pc_free(pcp);
7108 					return (rc);
7109 				}
7110 
7111 				assert(instn->rn_id.rl_type ==
7112 				    REP_PROTOCOL_ENTITY_INSTANCE);
7113 
7114 				rc = perm_add_inst_action_auth(pcp, instn);
7115 				rc_node_rele(instn);
7116 				switch (rc) {
7117 				case REP_PROTOCOL_SUCCESS:
7118 					break;
7119 
7120 				case REP_PROTOCOL_FAIL_DELETED:
7121 				case REP_PROTOCOL_FAIL_NO_RESOURCES:
7122 					pc_free(pcp);
7123 					return (rc);
7124 
7125 				default:
7126 					bad_error("perm_add_inst_action_auth",
7127 					    rc);
7128 				}
7129 			} else {
7130 				rc = REP_PROTOCOL_SUCCESS;
7131 			}
7132 		}
7133 
7134 		if (rc == REP_PROTOCOL_SUCCESS && normal) {
7135 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
7136 
7137 			if (rc == REP_PROTOCOL_SUCCESS) {
7138 				/* Add pgtype-specific authorization. */
7139 				const char * const auth =
7140 				    perm_auth_for_pgtype(np->rn_type);
7141 
7142 				if (auth != NULL)
7143 					rc = perm_add_enabling(pcp, auth);
7144 			}
7145 
7146 			/* Add pg-specific modify_authorization auths. */
7147 			if (rc == REP_PROTOCOL_SUCCESS)
7148 				rc = perm_add_enabling_values(pcp, np,
7149 				    AUTH_PROP_MODIFY);
7150 
7151 			/* If value_authorization values are ok, add them. */
7152 			if (rc == REP_PROTOCOL_SUCCESS) {
7153 				rc = tx_allow_value(cmds, cmds_sz, np);
7154 				if (rc == -1)
7155 					rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7156 				else if (rc)
7157 					rc = perm_add_enabling_values(pcp, np,
7158 					    AUTH_PROP_VALUE);
7159 			}
7160 		}
7161 
7162 		if (rc == REP_PROTOCOL_SUCCESS) {
7163 			granted = perm_granted(pcp);
7164 			rc = map_granted_status(granted, pcp, &auth_string);
7165 			if ((granted == PERM_DENIED) && auth_string) {
7166 				/*
7167 				 * _PERMISSION_DENIED should not cause us
7168 				 * to exit at this point, because we still
7169 				 * want to generate an audit event.
7170 				 */
7171 				rc = REP_PROTOCOL_SUCCESS;
7172 			}
7173 		}
7174 
7175 		pc_free(pcp);
7176 
7177 		if (rc != REP_PROTOCOL_SUCCESS)
7178 			goto cleanout;
7179 
7180 		if (granted == PERM_DENIED) {
7181 			auth_status = ADT_FAILURE;
7182 			auth_ret_value = ADT_FAIL_VALUE_AUTH;
7183 			tx_flag = 0;
7184 		}
7185 #endif /* NATIVE_BUILD */
7186 	} else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7187 		auth_status = ADT_FAILURE;
7188 		auth_ret_value = ADT_FAIL_VALUE_AUTH;
7189 		tx_flag = 0;
7190 	}
7191 
7192 	pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7193 	if (pg_fmri == NULL) {
7194 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7195 		goto cleanout;
7196 	}
7197 	if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7198 	    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7199 		goto cleanout;
7200 	}
7201 
7202 	/*
7203 	 * Parse the transaction commands into a useful form.
7204 	 */
7205 	if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7206 	    REP_PROTOCOL_SUCCESS) {
7207 		goto cleanout;
7208 	}
7209 
7210 	if (tx_flag == 0) {
7211 		/* Authorization failed.  Generate audit events. */
7212 		generate_property_events(tx_data, pg_fmri, auth_string,
7213 		    auth_status, auth_ret_value);
7214 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7215 		goto cleanout;
7216 	}
7217 
7218 	nnp = rc_node_alloc();
7219 	if (nnp == NULL) {
7220 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7221 		goto cleanout;
7222 	}
7223 
7224 	nnp->rn_id = np->rn_id;			/* structure assignment */
7225 	nnp->rn_hash = np->rn_hash;
7226 	nnp->rn_name = strdup(np->rn_name);
7227 	nnp->rn_type = strdup(np->rn_type);
7228 	nnp->rn_pgflags = np->rn_pgflags;
7229 
7230 	nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7231 
7232 	if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7233 		rc_node_destroy(nnp);
7234 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7235 		goto cleanout;
7236 	}
7237 
7238 	(void) pthread_mutex_lock(&np->rn_lock);
7239 
7240 	/*
7241 	 * We must have all of the old properties in the cache, or the
7242 	 * database deletions could cause inconsistencies.
7243 	 */
7244 	if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7245 	    REP_PROTOCOL_SUCCESS) {
7246 		(void) pthread_mutex_unlock(&np->rn_lock);
7247 		rc_node_destroy(nnp);
7248 		goto cleanout;
7249 	}
7250 
7251 	if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7252 		(void) pthread_mutex_unlock(&np->rn_lock);
7253 		rc_node_destroy(nnp);
7254 		rc = REP_PROTOCOL_FAIL_DELETED;
7255 		goto cleanout;
7256 	}
7257 
7258 	if (np->rn_flags & RC_NODE_OLD) {
7259 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7260 		(void) pthread_mutex_unlock(&np->rn_lock);
7261 		rc_node_destroy(nnp);
7262 		rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7263 		goto cleanout;
7264 	}
7265 
7266 	pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7267 	if (pp == NULL) {
7268 		/* our parent is gone, we're going next... */
7269 		rc_node_destroy(nnp);
7270 		(void) pthread_mutex_lock(&np->rn_lock);
7271 		if (np->rn_flags & RC_NODE_OLD) {
7272 			(void) pthread_mutex_unlock(&np->rn_lock);
7273 			rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7274 			goto cleanout;
7275 		}
7276 		(void) pthread_mutex_unlock(&np->rn_lock);
7277 		rc = REP_PROTOCOL_FAIL_DELETED;
7278 		goto cleanout;
7279 	}
7280 	(void) pthread_mutex_unlock(&pp->rn_lock);
7281 
7282 	/*
7283 	 * prepare for the transaction
7284 	 */
7285 	(void) pthread_mutex_lock(&np->rn_lock);
7286 	if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7287 		(void) pthread_mutex_unlock(&np->rn_lock);
7288 		(void) pthread_mutex_lock(&pp->rn_lock);
7289 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7290 		(void) pthread_mutex_unlock(&pp->rn_lock);
7291 		rc_node_destroy(nnp);
7292 		rc = REP_PROTOCOL_FAIL_DELETED;
7293 		goto cleanout;
7294 	}
7295 	nnp->rn_gen_id = np->rn_gen_id;
7296 	(void) pthread_mutex_unlock(&np->rn_lock);
7297 
7298 	/* Sets nnp->rn_gen_id on success. */
7299 	rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7300 
7301 	(void) pthread_mutex_lock(&np->rn_lock);
7302 	if (rc != REP_PROTOCOL_SUCCESS) {
7303 		rc_node_rele_flag(np, RC_NODE_IN_TX);
7304 		(void) pthread_mutex_unlock(&np->rn_lock);
7305 		(void) pthread_mutex_lock(&pp->rn_lock);
7306 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7307 		(void) pthread_mutex_unlock(&pp->rn_lock);
7308 		rc_node_destroy(nnp);
7309 		rc_node_clear(txp, 0);
7310 		if (rc == REP_PROTOCOL_DONE)
7311 			rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7312 		goto cleanout;
7313 	}
7314 
7315 	/*
7316 	 * Notify waiters
7317 	 */
7318 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7319 	while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7320 		rc_pg_notify_fire(pnp);
7321 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7322 
7323 	np->rn_flags |= RC_NODE_OLD;
7324 	(void) pthread_mutex_unlock(&np->rn_lock);
7325 
7326 	rc_notify_remove_node(np);
7327 
7328 	/*
7329 	 * replace np with nnp
7330 	 */
7331 	rc_node_relink_child(pp, np, nnp);
7332 
7333 	/*
7334 	 * all done -- clear the transaction.
7335 	 */
7336 	rc_node_clear(txp, 0);
7337 	generate_property_events(tx_data, pg_fmri, auth_string,
7338 	    auth_status, auth_ret_value);
7339 
7340 	rc = REP_PROTOCOL_SUCCESS;
7341 
7342 cleanout:
7343 	free(auth_string);
7344 	free(pg_fmri);
7345 	tx_commit_data_free(tx_data);
7346 	return (rc);
7347 }
7348 
7349 void
7350 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7351 {
7352 	uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7353 	pnp->rnpn_pg = NULL;
7354 	pnp->rnpn_fd = -1;
7355 }
7356 
7357 int
7358 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7359 {
7360 	rc_node_t *np;
7361 
7362 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7363 
7364 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7365 		(void) pthread_mutex_unlock(&np->rn_lock);
7366 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7367 	}
7368 
7369 	/*
7370 	 * wait for any transaction in progress to complete
7371 	 */
7372 	if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7373 		(void) pthread_mutex_unlock(&np->rn_lock);
7374 		return (REP_PROTOCOL_FAIL_DELETED);
7375 	}
7376 
7377 	if (np->rn_flags & RC_NODE_OLD) {
7378 		(void) pthread_mutex_unlock(&np->rn_lock);
7379 		return (REP_PROTOCOL_FAIL_NOT_LATEST);
7380 	}
7381 
7382 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7383 	rc_pg_notify_fire(pnp);
7384 	pnp->rnpn_pg = np;
7385 	pnp->rnpn_fd = fd;
7386 	(void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7387 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7388 
7389 	(void) pthread_mutex_unlock(&np->rn_lock);
7390 	return (REP_PROTOCOL_SUCCESS);
7391 }
7392 
7393 void
7394 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7395 {
7396 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7397 	rc_pg_notify_fire(pnp);
7398 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7399 
7400 	uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7401 }
7402 
7403 void
7404 rc_notify_info_init(rc_notify_info_t *rnip)
7405 {
7406 	int i;
7407 
7408 	uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7409 	uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7410 	    rc_notify_pool);
7411 
7412 	rnip->rni_notify.rcn_node = NULL;
7413 	rnip->rni_notify.rcn_info = rnip;
7414 
7415 	bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7416 	bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7417 
7418 	(void) pthread_cond_init(&rnip->rni_cv, NULL);
7419 
7420 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7421 		rnip->rni_namelist[i] = NULL;
7422 		rnip->rni_typelist[i] = NULL;
7423 	}
7424 }
7425 
7426 static void
7427 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7428 {
7429 	assert(MUTEX_HELD(&rc_pg_notify_lock));
7430 
7431 	assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7432 
7433 	rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7434 	(void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7435 	(void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7436 }
7437 
7438 static void
7439 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7440 {
7441 	rc_notify_t *me = &rnip->rni_notify;
7442 	rc_notify_t *np;
7443 
7444 	assert(MUTEX_HELD(&rc_pg_notify_lock));
7445 
7446 	assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7447 
7448 	assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7449 	rnip->rni_flags |= RC_NOTIFY_DRAIN;
7450 	(void) pthread_cond_broadcast(&rnip->rni_cv);
7451 
7452 	(void) uu_list_remove(rc_notify_info_list, rnip);
7453 
7454 	/*
7455 	 * clean up any notifications at the beginning of the list
7456 	 */
7457 	if (uu_list_first(rc_notify_list) == me) {
7458 		while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7459 		    np->rcn_info == NULL)
7460 			rc_notify_remove_locked(np);
7461 	}
7462 	(void) uu_list_remove(rc_notify_list, me);
7463 
7464 	while (rnip->rni_waiters) {
7465 		(void) pthread_cond_broadcast(&rc_pg_notify_cv);
7466 		(void) pthread_cond_broadcast(&rnip->rni_cv);
7467 		(void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7468 	}
7469 
7470 	rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7471 }
7472 
7473 static int
7474 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7475     const char *name)
7476 {
7477 	int i;
7478 	int rc;
7479 	char *f;
7480 
7481 	rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7482 	if (rc != REP_PROTOCOL_SUCCESS)
7483 		return (rc);
7484 
7485 	f = strdup(name);
7486 	if (f == NULL)
7487 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7488 
7489 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7490 
7491 	while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7492 		(void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7493 
7494 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7495 		if (arr[i] == NULL)
7496 			break;
7497 
7498 		/*
7499 		 * Don't add name if it's already being tracked.
7500 		 */
7501 		if (strcmp(arr[i], f) == 0) {
7502 			free(f);
7503 			goto out;
7504 		}
7505 	}
7506 
7507 	if (i == RC_NOTIFY_MAX_NAMES) {
7508 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7509 		free(f);
7510 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7511 	}
7512 
7513 	arr[i] = f;
7514 
7515 out:
7516 	if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7517 		rc_notify_info_insert_locked(rnip);
7518 
7519 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7520 	return (REP_PROTOCOL_SUCCESS);
7521 }
7522 
7523 int
7524 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7525 {
7526 	return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7527 }
7528 
7529 int
7530 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7531 {
7532 	return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7533 }
7534 
7535 /*
7536  * Wait for and report an event of interest to rnip, a notification client
7537  */
7538 int
7539 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7540     char *outp, size_t sz)
7541 {
7542 	rc_notify_t *np;
7543 	rc_notify_t *me = &rnip->rni_notify;
7544 	rc_node_t *nnp;
7545 	rc_notify_delete_t *ndp;
7546 
7547 	int am_first_info;
7548 
7549 	if (sz > 0)
7550 		outp[0] = 0;
7551 
7552 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7553 
7554 	while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7555 	    RC_NOTIFY_ACTIVE) {
7556 		/*
7557 		 * If I'm first on the notify list, it is my job to
7558 		 * clean up any notifications I pass by.  I can't do that
7559 		 * if someone is blocking the list from removals, so I
7560 		 * have to wait until they have all drained.
7561 		 */
7562 		am_first_info = (uu_list_first(rc_notify_list) == me);
7563 		if (am_first_info && rc_notify_in_use) {
7564 			rnip->rni_waiters++;
7565 			(void) pthread_cond_wait(&rc_pg_notify_cv,
7566 			    &rc_pg_notify_lock);
7567 			rnip->rni_waiters--;
7568 			continue;
7569 		}
7570 
7571 		/*
7572 		 * Search the list for a node of interest.
7573 		 */
7574 		np = uu_list_next(rc_notify_list, me);
7575 		while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7576 			rc_notify_t *next = uu_list_next(rc_notify_list, np);
7577 
7578 			if (am_first_info) {
7579 				if (np->rcn_info) {
7580 					/*
7581 					 * Passing another client -- stop
7582 					 * cleaning up notifications
7583 					 */
7584 					am_first_info = 0;
7585 				} else {
7586 					rc_notify_remove_locked(np);
7587 				}
7588 			}
7589 			np = next;
7590 		}
7591 
7592 		/*
7593 		 * Nothing of interest -- wait for notification
7594 		 */
7595 		if (np == NULL) {
7596 			rnip->rni_waiters++;
7597 			(void) pthread_cond_wait(&rnip->rni_cv,
7598 			    &rc_pg_notify_lock);
7599 			rnip->rni_waiters--;
7600 			continue;
7601 		}
7602 
7603 		/*
7604 		 * found something to report -- move myself after the
7605 		 * notification and process it.
7606 		 */
7607 		(void) uu_list_remove(rc_notify_list, me);
7608 		(void) uu_list_insert_after(rc_notify_list, np, me);
7609 
7610 		if ((ndp = np->rcn_delete) != NULL) {
7611 			(void) strlcpy(outp, ndp->rnd_fmri, sz);
7612 			if (am_first_info)
7613 				rc_notify_remove_locked(np);
7614 			(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7615 			rc_node_clear(out, 0);
7616 			return (REP_PROTOCOL_SUCCESS);
7617 		}
7618 
7619 		nnp = np->rcn_node;
7620 		assert(nnp != NULL);
7621 
7622 		/*
7623 		 * We can't bump nnp's reference count without grabbing its
7624 		 * lock, and rc_pg_notify_lock is a leaf lock.  So we
7625 		 * temporarily block all removals to keep nnp from
7626 		 * disappearing.
7627 		 */
7628 		rc_notify_in_use++;
7629 		assert(rc_notify_in_use > 0);
7630 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7631 
7632 		rc_node_assign(out, nnp);
7633 
7634 		(void) pthread_mutex_lock(&rc_pg_notify_lock);
7635 		assert(rc_notify_in_use > 0);
7636 		rc_notify_in_use--;
7637 		if (am_first_info)
7638 			rc_notify_remove_locked(np);
7639 		if (rc_notify_in_use == 0)
7640 			(void) pthread_cond_broadcast(&rc_pg_notify_cv);
7641 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7642 
7643 		return (REP_PROTOCOL_SUCCESS);
7644 	}
7645 	/*
7646 	 * If we're the last one out, let people know it's clear.
7647 	 */
7648 	if (rnip->rni_waiters == 0)
7649 		(void) pthread_cond_broadcast(&rnip->rni_cv);
7650 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7651 	return (REP_PROTOCOL_DONE);
7652 }
7653 
7654 static void
7655 rc_notify_info_reset(rc_notify_info_t *rnip)
7656 {
7657 	int i;
7658 
7659 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7660 	if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7661 		rc_notify_info_remove_locked(rnip);
7662 	assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7663 	rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7664 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7665 
7666 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7667 		if (rnip->rni_namelist[i] != NULL) {
7668 			free((void *)rnip->rni_namelist[i]);
7669 			rnip->rni_namelist[i] = NULL;
7670 		}
7671 		if (rnip->rni_typelist[i] != NULL) {
7672 			free((void *)rnip->rni_typelist[i]);
7673 			rnip->rni_typelist[i] = NULL;
7674 		}
7675 	}
7676 
7677 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7678 	rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7679 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7680 }
7681 
7682 void
7683 rc_notify_info_fini(rc_notify_info_t *rnip)
7684 {
7685 	rc_notify_info_reset(rnip);
7686 
7687 	uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7688 	uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7689 	    rc_notify_pool);
7690 }
7691