xref: /titanic_41/usr/src/cmd/svc/configd/rc_node.c (revision f885d00f4e3c96a769ce0228a732da31ad9d0b78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * rc_node.c - In-memory SCF object management
28  *
29  * This layer manages the in-memory cache (the Repository Cache) of SCF
30  * data.  Read requests are usually satisfied from here, but may require
31  * load calls to the "object" layer.  Modify requests always write-through
32  * to the object layer.
33  *
34  * SCF data comprises scopes, services, instances, snapshots, snaplevels,
35  * property groups, properties, and property values.  All but the last are
36  * known here as "entities" and are represented by rc_node_t data
37  * structures.  (Property values are kept in the rn_values member of the
38  * respective property, not as separate objects.)  All entities besides
39  * the "localhost" scope have some entity as a parent, and therefore form
40  * a tree.
41  *
42  * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
43  * the "localhost" scope.  The tree is filled in from the database on-demand
44  * by rc_node_fill_children().
45  *
46  * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
47  * lookup.
48  *
49  * Multiple threads may service client requests, so access to each
50  * rc_node_t is synchronized by its rn_lock member.  Some fields are
51  * protected by bits in the rn_flags field instead, to support operations
52  * which need to drop rn_lock, for example to respect locking order.  Such
53  * flags should be manipulated with the rc_node_{hold,rele}_flag()
54  * functions.
55  *
56  * We track references to nodes to tell when they can be free()d.  rn_refs
57  * should be incremented with rc_node_hold() on the creation of client
58  * references (rc_node_ptr_t's and rc_iter_t's).  rn_erefs ("ephemeral
59  * references") should be incremented when a pointer is read into a local
60  * variable of a thread, with rc_node_hold_ephemeral_locked().  This
61  * hasn't been fully implemented, however, so rc_node_rele() tolerates
62  * rn_erefs being 0.  Some code which predates rn_erefs counts ephemeral
63  * references in rn_refs.  Other references are tracked by the
64  * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
65  * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
66  *
67  * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
68  * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
69  * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
70  * etc.).  Once you have locked an rc_node_t you must check its rn_flags for
71  * RC_NODE_DEAD before you can use it.  This is usually done with the
72  * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
73  * functions & RC_NODE_*() macros), which fail if the object has died.
74  *
75  * When a transactional node (property group or snapshot) is updated,
76  * a new node takes the place of the old node in the global hash and the
77  * old node is hung off of the rn_former list of the new node.  At the
78  * same time, all of its children have their rn_parent_ref pointer set,
79  * and any holds they have are reflected in the old node's rn_other_refs
80  * count.  This is automatically kept up to date until the final reference
81  * to the subgraph is dropped, at which point the node is unrefed and
82  * destroyed, along with all of its children.
83  *
84  * Because name service lookups may take a long time and, more importantly
85  * may trigger additional accesses to the repository, perm_granted() must be
86  * called without holding any locks.
87  *
88  * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
89  * call via rc_node_setup_iter() to populate the rn_children uu_list of the
90  * rc_node_t * in question and a call to uu_list_walk_start() on that list.  For
91  * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
92  * apropriate child.
93  *
94  * An ITER_START for an ENTITY_VALUE makes sure the node has its values
95  * filled, and sets up the iterator.  An ITER_READ_VALUE just copies out
96  * the proper values and updates the offset information.
97  *
98  * To allow aliases, snapshots are implemented with a level of indirection.
99  * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
100  * snapshot.c which contains the authoritative snaplevel information.  The
101  * snapid is "assigned" by rc_attach_snapshot().
102  *
103  * We provide the client layer with rc_node_ptr_t's to reference objects.
104  * Objects referred to by them are automatically held & released by
105  * rc_node_assign() & rc_node_clear().  The RC_NODE_PTR_*() macros are used at
106  * client.c entry points to read the pointers.  They fetch the pointer to the
107  * object, return (from the function) if it is dead, and lock, hold, or hold
108  * a flag of the object.
109  */
110 
111 /*
112  * Permission checking is authorization-based: some operations may only
113  * proceed if the user has been assigned at least one of a set of
114  * authorization strings.  The set of enabling authorizations depends on the
115  * operation and the target object.  The set of authorizations assigned to
116  * a user is determined by an algorithm defined in libsecdb.
117  *
118  * The fastest way to decide whether the two sets intersect is by entering the
119  * strings into a hash table and detecting collisions, which takes linear time
120  * in the total size of the sets.  Except for the authorization patterns which
121  * may be assigned to users, which without advanced pattern-matching
122  * algorithms will take O(n) in the number of enabling authorizations, per
123  * pattern.
124  *
125  * We can achieve some practical speed-ups by noting that if we enter all of
126  * the authorizations from one of the sets into the hash table we can merely
127  * check the elements of the second set for existence without adding them.
128  * This reduces memory requirements and hash table clutter.  The enabling set
129  * is well suited for this because it is internal to configd (for now, at
130  * least).  Combine this with short-circuiting and we can even minimize the
131  * number of queries to the security databases (user_attr & prof_attr).
132  *
133  * To force this usage onto clients we provide functions for adding
134  * authorizations to the enabling set of a permission context structure
135  * (perm_add_*()) and one to decide whether the the user associated with the
136  * current door call client possesses any of them (perm_granted()).
137  *
138  * At some point, a generic version of this should move to libsecdb.
139  *
140  * While entering the enabling strings into the hash table, we keep track
141  * of which is the most specific for use in generating auditing events.
142  * See the "Collecting the Authorization String" section of the "SMF Audit
143  * Events" block comment below.
144  */
145 
146 /*
147  * Composition is the combination of sets of properties.  The sets are ordered
148  * and properties in higher sets obscure properties of the same name in lower
149  * sets.  Here we present a composed view of an instance's properties as the
150  * union of its properties and its service's properties.  Similarly the
151  * properties of snaplevels are combined to form a composed view of the
152  * properties of a snapshot (which should match the composed view of the
153  * properties of the instance when the snapshot was taken).
154  *
155  * In terms of the client interface, the client may request that a property
156  * group iterator for an instance or snapshot be composed.  Property groups
157  * traversed by such an iterator may not have the target entity as a parent.
158  * Similarly, the properties traversed by a property iterator for those
159  * property groups may not have the property groups iterated as parents.
160  *
161  * Implementation requires that iterators for instances and snapshots be
162  * composition-savvy, and that we have a "composed property group" entity
163  * which represents the composition of a number of property groups.  Iteration
164  * over "composed property groups" yields properties which may have different
165  * parents, but for all other operations a composed property group behaves
166  * like the top-most property group it represents.
167  *
168  * The implementation is based on the rn_cchain[] array of rc_node_t pointers
169  * in rc_node_t.  For instances, the pointers point to the instance and its
170  * parent service.  For snapshots they point to the child snaplevels, and for
171  * composed property groups they point to property groups.  A composed
172  * iterator carries an index into rn_cchain[].  Thus most of the magic ends up
173  * int the rc_iter_*() code.
174  */
175 /*
176  * SMF Audit Events:
177  * ================
178  *
179  * To maintain security, SMF generates audit events whenever
180  * privileged operations are attempted.  See the System Administration
181  * Guide:Security Services answerbook for a discussion of the Solaris
182  * audit system.
183  *
184  * The SMF audit event codes are defined in adt_event.h by symbols
185  * starting with ADT_smf_ and are described in audit_event.txt.  The
186  * audit record structures are defined in the SMF section of adt.xml.
187  * adt.xml is used to automatically generate adt_event.h which
188  * contains the definitions that we code to in this file.  For the
189  * most part the audit events map closely to actions that you would
190  * perform with svcadm or svccfg, but there are some special cases
191  * which we'll discuss later.
192  *
193  * The software associated with SMF audit events falls into three
194  * categories:
195  * 	- collecting information to be written to the audit
196  *	  records
197  *	- using the adt_* functions in
198  *	  usr/src/lib/libbsm/common/adt.c to generate the audit
199  *	  records.
200  * 	- handling special cases
201  *
202  * Collecting Information:
203  * ----------------------
204  *
205  * Most all of the audit events require the FMRI of the affected
206  * object and the authorization string that was used.  The one
207  * exception is ADT_smf_annotation which we'll talk about later.
208  *
209  * Collecting the FMRI:
210  *
211  * The rc_node structure has a member called rn_fmri which points to
212  * its FMRI.  This is initialized by a call to rc_node_build_fmri()
213  * when the node's parent is established.  The reason for doing it
214  * at this time is that a node's FMRI is basically the concatenation
215  * of the parent's FMRI and the node's name with the appropriate
216  * decoration.  rc_node_build_fmri() does this concatenation and
217  * decorating.  It is called from rc_node_link_child() and
218  * rc_node_relink_child() where a node is linked to its parent.
219  *
220  * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
221  * when it is needed.  It returns rn_fmri if it is set.  If the node
222  * is at the top level, however, rn_fmri won't be set because it was
223  * never linked to a parent.  In this case,
224  * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
225  * its node type and its name, rn_name.
226  *
227  * Collecting the Authorization String:
228  *
229  * Naturally, the authorization string is captured during the
230  * authorization checking process.  Acceptable authorization strings
231  * are added to a permcheck_t hash table as noted in the section on
232  * permission checking above.  Once all entries have been added to the
233  * hash table, perm_granted() is called.  If the client is authorized,
234  * perm_granted() returns with pc_auth_string of the permcheck_t
235  * structure pointing to the authorization string.
236  *
237  * This works fine if the client is authorized, but what happens if
238  * the client is not authorized?  We need to report the required
239  * authorization string.  This is the authorization that would have
240  * been used if permission had been granted.  perm_granted() will
241  * find no match, so it needs to decide which string in the hash
242  * table to use as the required authorization string.  It needs to do
243  * this, because configd is still going to generate an event.  A
244  * design decision was made to use the most specific authorization
245  * in the hash table.  The pc_auth_type enum designates the
246  * specificity of an authorization string.  For example, an
247  * authorization string that is declared in an instance PG is more
248  * specific than one that is declared in a service PG.
249  *
250  * The pc_add() function keeps track of the most specific
251  * authorization in the hash table.  It does this using the
252  * pc_specific and pc_specific_type members of the permcheck
253  * structure.  pc_add() updates these members whenever a more
254  * specific authorization string is added to the hash table.  Thus, if
255  * an authorization match is not found, perm_granted() will return
256  * with pc_auth_string in the permcheck_t pointing to the string that
257  * is referenced by pc_specific.
258  *
259  * Generating the Audit Events:
260  * ===========================
261  *
262  * As the functions in this file process requests for clients of
263  * configd, they gather the information that is required for an audit
264  * event.  Eventually, the request processing gets to the point where
265  * the authorization is rejected or to the point where the requested
266  * action was attempted.  At these two points smf_audit_event() is
267  * called.
268  *
269  * smf_audit_event() takes 4 parameters:
270  * 	- the event ID which is one of the ADT_smf_* symbols from
271  *	  adt_event.h.
272  * 	- status to pass to adt_put_event()
273  * 	- return value to pass to adt_put_event()
274  * 	- the event data (see audit_event_data structure)
275  *
276  * All interactions with the auditing software require an audit
277  * session.  We use one audit session per configd client.  We keep
278  * track of the audit session in the repcache_client structure.
279  * smf_audit_event() calls get_audit_session() to get the session
280  * pointer.
281  *
282  * smf_audit_event() then calls adt_alloc_event() to allocate an
283  * adt_event_data union which is defined in adt_event.h, copies the
284  * data into the appropriate members of the union and calls
285  * adt_put_event() to generate the event.
286  *
287  * Special Cases:
288  * =============
289  *
290  * There are three major types of special cases:
291  *
292  * 	- gathering event information for each action in a
293  *	  transaction
294  * 	- Higher level events represented by special property
295  *	  group/property name combinations.  Many of these are
296  *	  restarter actions.
297  * 	- ADT_smf_annotation event
298  *
299  * Processing Transaction Actions:
300  * ------------------------------
301  *
302  * A transaction can contain multiple actions to modify, create or
303  * delete one or more properties.  We need to capture information so
304  * that we can generate an event for each property action.  The
305  * transaction information is stored in a tx_commmit_data_t, and
306  * object.c provides accessor functions to retrieve data from this
307  * structure.  rc_tx_commit() obtains a tx_commit_data_t by calling
308  * tx_commit_data_new() and passes this to object_tx_commit() to
309  * commit the transaction.  Then we call generate_property_events() to
310  * generate an audit event for each property action.
311  *
312  * Special Properties:
313  * ------------------
314  *
315  * There are combinations of property group/property name that are special.
316  * They are special because they have specific meaning to startd.  startd
317  * interprets them in a service-independent fashion.
318  * restarter_actions/refresh and general/enabled are two examples of these.
319  * A special event is generated for these properties in addition to the
320  * regular property event described in the previous section.  The special
321  * properties are declared as an array of audit_special_prop_item
322  * structures at special_props_list in rc_node.c.
323  *
324  * In the previous section, we mentioned the
325  * generate_property_event() function that generates an event for
326  * every property action.  Before generating the event,
327  * generate_property_event() calls special_property_event().
328  * special_property_event() checks to see if the action involves a
329  * special property.  If it does, it generates a special audit
330  * event.
331  *
332  * ADT_smf_annotation event:
333  * ------------------------
334  *
335  * This is a special event unlike any other.  It allows the svccfg
336  * program to store an annotation in the event log before a series
337  * of transactions is processed.  It is used with the import and
338  * apply svccfg commands.  svccfg uses the rep_protocol_annotation
339  * message to pass the operation (import or apply) and the file name
340  * to configd.  The set_annotation() function in client.c stores
341  * these away in the a repcache_client structure.  The address of
342  * this structure is saved in the thread_info structure.
343  *
344  * Before it generates any events, smf_audit_event() calls
345  * smf_annotation_event().  smf_annotation_event() calls
346  * client_annotation_needed() which is defined in client.c.  If an
347  * annotation is needed client_annotation_needed() returns the
348  * operation and filename strings that were saved from the
349  * rep_protocol_annotation message.  smf_annotation_event() then
350  * generates the ADT_smf_annotation event.
351  */
352 
353 #include <assert.h>
354 #include <atomic.h>
355 #include <bsm/adt_event.h>
356 #include <errno.h>
357 #include <libuutil.h>
358 #include <libscf.h>
359 #include <libscf_priv.h>
360 #include <pthread.h>
361 #include <pwd.h>
362 #include <stdio.h>
363 #include <stdlib.h>
364 #include <strings.h>
365 #include <sys/types.h>
366 #include <syslog.h>
367 #include <unistd.h>
368 #include <secdb.h>
369 
370 #include "configd.h"
371 
372 #define	AUTH_PREFIX		"solaris.smf."
373 #define	AUTH_MANAGE		AUTH_PREFIX "manage"
374 #define	AUTH_MODIFY		AUTH_PREFIX "modify"
375 #define	AUTH_MODIFY_PREFIX	AUTH_MODIFY "."
376 #define	AUTH_PG_ACTIONS		SCF_PG_RESTARTER_ACTIONS
377 #define	AUTH_PG_ACTIONS_TYPE	SCF_PG_RESTARTER_ACTIONS_TYPE
378 #define	AUTH_PG_GENERAL		SCF_PG_GENERAL
379 #define	AUTH_PG_GENERAL_TYPE	SCF_PG_GENERAL_TYPE
380 #define	AUTH_PG_GENERAL_OVR	SCF_PG_GENERAL_OVR
381 #define	AUTH_PG_GENERAL_OVR_TYPE  SCF_PG_GENERAL_OVR_TYPE
382 #define	AUTH_PROP_ACTION	"action_authorization"
383 #define	AUTH_PROP_ENABLED	"enabled"
384 #define	AUTH_PROP_MODIFY	"modify_authorization"
385 #define	AUTH_PROP_VALUE		"value_authorization"
386 #define	AUTH_PROP_READ		"read_authorization"
387 
388 #define	MAX_VALID_CHILDREN 3
389 
390 /*
391  * The ADT_smf_* symbols may not be defined on the build machine.  Because
392  * of this, we do not want to compile the _smf_aud_event() function when
393  * doing native builds.
394  */
395 #ifdef	NATIVE_BUILD
396 #define	smf_audit_event(i, s, r, d)
397 #else
398 #define	smf_audit_event(i, s, r, d)	_smf_audit_event(i, s, r, d)
399 #endif	/* NATIVE_BUILD */
400 
401 typedef struct rc_type_info {
402 	uint32_t	rt_type;		/* matches array index */
403 	uint32_t	rt_num_ids;
404 	uint32_t	rt_name_flags;
405 	uint32_t	rt_valid_children[MAX_VALID_CHILDREN];
406 } rc_type_info_t;
407 
408 #define	RT_NO_NAME	-1U
409 
410 static rc_type_info_t rc_types[] = {
411 	{REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
412 	{REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
413 	    {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
414 	{REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
415 	    {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
416 	{REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
417 	    {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
418 	{REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
419 	    {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
420 	{REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
421 	    {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
422 	{REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
423 	    {REP_PROTOCOL_ENTITY_PROPERTY}},
424 	{REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
425 	    {REP_PROTOCOL_ENTITY_PROPERTY}},
426 	{REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
427 	{-1UL}
428 };
429 #define	NUM_TYPES	((sizeof (rc_types) / sizeof (*rc_types)))
430 
431 /* Element of a permcheck_t hash table. */
432 struct pc_elt {
433 	struct pc_elt	*pce_next;
434 	char		pce_auth[1];
435 };
436 
437 /*
438  * If an authorization fails, we must decide which of the elements in the
439  * permcheck hash table to use in the audit event.  That is to say of all
440  * the strings in the hash table, we must choose one and use it in the audit
441  * event.  It is desirable to use the most specific string in the audit
442  * event.
443  *
444  * The pc_auth_type specifies the types (sources) of authorization
445  * strings.  The enum is ordered in increasing specificity.
446  */
447 typedef enum pc_auth_type {
448 	PC_AUTH_NONE = 0,	/* no auth string available. */
449 	PC_AUTH_SMF,		/* strings coded into SMF. */
450 	PC_AUTH_SVC,		/* strings specified in PG of a service. */
451 	PC_AUTH_INST		/* strings specified in PG of an instance. */
452 } pc_auth_type_t;
453 
454 /*
455  * The following enum is used to represent the results of the checks to see
456  * if the client has the appropriate permissions to perform an action.
457  */
458 typedef enum perm_status {
459 	PERM_DENIED = 0,	/* Permission denied. */
460 	PERM_GRANTED,		/* Client has authorizations. */
461 	PERM_GONE,		/* Door client went away. */
462 	PERM_FAIL		/* Generic failure. e.g. resources */
463 } perm_status_t;
464 
465 /* An authorization set hash table. */
466 typedef struct {
467 	struct pc_elt	**pc_buckets;
468 	uint_t		pc_bnum;		/* number of buckets */
469 	uint_t		pc_enum;		/* number of elements */
470 	struct pc_elt	*pc_specific;		/* most specific element */
471 	pc_auth_type_t	pc_specific_type;	/* type of pc_specific */
472 	char		*pc_auth_string;	/* authorization string */
473 						/* for audit events */
474 } permcheck_t;
475 
476 /*
477  * Structure for holding audit event data.  Not all events use all members
478  * of the structure.
479  */
480 typedef struct audit_event_data {
481 	char		*ed_auth;	/* authorization string. */
482 	char		*ed_fmri;	/* affected FMRI. */
483 	char		*ed_snapname;	/* name of snapshot. */
484 	char		*ed_old_fmri;	/* old fmri in attach case. */
485 	char		*ed_old_name;	/* old snapshot in attach case. */
486 	char		*ed_type;	/* prop. group or prop. type. */
487 	char		*ed_prop_value;	/* property value. */
488 } audit_event_data_t;
489 
490 /*
491  * Pointer to function to do special processing to get audit event ID.
492  * Audit event IDs are defined in /usr/include/bsm/adt_event.h.  Function
493  * returns 0 if ID successfully retrieved.  Otherwise it returns -1.
494  */
495 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
496     au_event_t *);
497 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
498     au_event_t *);
499 
500 static uu_list_pool_t *rc_children_pool;
501 static uu_list_pool_t *rc_pg_notify_pool;
502 static uu_list_pool_t *rc_notify_pool;
503 static uu_list_pool_t *rc_notify_info_pool;
504 
505 static rc_node_t *rc_scope;
506 
507 static pthread_mutex_t	rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
508 static pthread_cond_t	rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
509 static uint_t		rc_notify_in_use;	/* blocks removals */
510 
511 /*
512  * Some combinations of property group/property name require a special
513  * audit event to be generated when there is a change.
514  * audit_special_prop_item_t is used to specify these special cases.  The
515  * special_props_list array defines a list of these special properties.
516  */
517 typedef struct audit_special_prop_item {
518 	const char	*api_pg_name;	/* property group name. */
519 	const char	*api_prop_name;	/* property name. */
520 	au_event_t	api_event_id;	/* event id or 0. */
521 	spc_getid_fn_t	api_event_func; /* function to get event id. */
522 } audit_special_prop_item_t;
523 
524 /*
525  * Native builds are done using the build machine's standard include
526  * files.  These files may not yet have the definitions for the ADT_smf_*
527  * symbols.  Thus, we do not compile this table when doing native builds.
528  */
529 #ifndef	NATIVE_BUILD
530 /*
531  * The following special_props_list array specifies property group/property
532  * name combinations that have specific meaning to startd.  A special event
533  * is generated for these combinations in addition to the regular property
534  * event.
535  *
536  * At run time this array gets sorted.  See the call to qsort(3C) in
537  * rc_node_init().  The array is sorted, so that bsearch(3C) can be used
538  * to do lookups.
539  */
540 static audit_special_prop_item_t special_props_list[] = {
541 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
542 	    NULL},
543 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
544 	    ADT_smf_immediate_degrade, NULL},
545 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
546 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
547 	    ADT_smf_maintenance, NULL},
548 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
549 	    ADT_smf_immediate_maintenance, NULL},
550 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
551 	    ADT_smf_immtmp_maintenance, NULL},
552 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
553 	    ADT_smf_tmp_maintenance, NULL},
554 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
555 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
556 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
557 	{SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
558 	{SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
559 	{SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
560 	{SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
561 };
562 #define	SPECIAL_PROP_COUNT	(sizeof (special_props_list) /\
563 	sizeof (audit_special_prop_item_t))
564 #endif	/* NATIVE_BUILD */
565 
566 /*
567  * We support an arbitrary number of clients interested in events for certain
568  * types of changes.  Each client is represented by an rc_notify_info_t, and
569  * all clients are chained onto the rc_notify_info_list.
570  *
571  * The rc_notify_list is the global notification list.  Each entry is of
572  * type rc_notify_t, which is embedded in one of three other structures:
573  *
574  *	rc_node_t		property group update notification
575  *	rc_notify_delete_t	object deletion notification
576  *	rc_notify_info_t	notification clients
577  *
578  * Which type of object is determined by which pointer in the rc_notify_t is
579  * non-NULL.
580  *
581  * New notifications and clients are added to the end of the list.
582  * Notifications no-one is interested in are never added to the list.
583  *
584  * Clients use their position in the list to track which notifications they
585  * have not yet reported.  As they process notifications, they move forward
586  * in the list past them.  There is always a client at the beginning of the
587  * list -- as he moves past notifications, he removes them from the list and
588  * cleans them up.
589  *
590  * The rc_pg_notify_lock protects all notification state.  The rc_pg_notify_cv
591  * is used for global signalling, and each client has a cv which he waits for
592  * events of interest on.
593  */
594 static uu_list_t	*rc_notify_info_list;
595 static uu_list_t	*rc_notify_list;
596 
597 #define	HASH_SIZE	512
598 #define	HASH_MASK	(HASH_SIZE - 1)
599 
600 #pragma align 64(cache_hash)
601 static cache_bucket_t cache_hash[HASH_SIZE];
602 
603 #define	CACHE_BUCKET(h)		(&cache_hash[(h) & HASH_MASK])
604 
605 
606 static void rc_node_no_client_refs(rc_node_t *np);
607 
608 
609 static uint32_t
610 rc_node_hash(rc_node_lookup_t *lp)
611 {
612 	uint32_t type = lp->rl_type;
613 	uint32_t backend = lp->rl_backend;
614 	uint32_t mainid = lp->rl_main_id;
615 	uint32_t *ids = lp->rl_ids;
616 
617 	rc_type_info_t *tp = &rc_types[type];
618 	uint32_t num_ids;
619 	uint32_t left;
620 	uint32_t hash;
621 
622 	assert(backend == BACKEND_TYPE_NORMAL ||
623 	    backend == BACKEND_TYPE_NONPERSIST);
624 
625 	assert(type > 0 && type < NUM_TYPES);
626 	num_ids = tp->rt_num_ids;
627 
628 	left = MAX_IDS - num_ids;
629 	assert(num_ids <= MAX_IDS);
630 
631 	hash = type * 7 + mainid * 5 + backend;
632 
633 	while (num_ids-- > 0)
634 		hash = hash * 11 + *ids++ * 7;
635 
636 	/*
637 	 * the rest should be zeroed
638 	 */
639 	while (left-- > 0)
640 		assert(*ids++ == 0);
641 
642 	return (hash);
643 }
644 
645 static int
646 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
647 {
648 	rc_node_lookup_t *r = &np->rn_id;
649 	rc_type_info_t *tp;
650 	uint32_t type;
651 	uint32_t num_ids;
652 
653 	if (r->rl_main_id != l->rl_main_id)
654 		return (0);
655 
656 	type = r->rl_type;
657 	if (type != l->rl_type)
658 		return (0);
659 
660 	assert(type > 0 && type < NUM_TYPES);
661 
662 	tp = &rc_types[r->rl_type];
663 	num_ids = tp->rt_num_ids;
664 
665 	assert(num_ids <= MAX_IDS);
666 	while (num_ids-- > 0)
667 		if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
668 			return (0);
669 
670 	return (1);
671 }
672 
673 /*
674  * Register an ephemeral reference to np.  This should be done while both
675  * the persistent reference from which the np pointer was read is locked
676  * and np itself is locked.  This guarantees that another thread which
677  * thinks it has the last reference will yield without destroying the
678  * node.
679  */
680 static void
681 rc_node_hold_ephemeral_locked(rc_node_t *np)
682 {
683 	assert(MUTEX_HELD(&np->rn_lock));
684 
685 	++np->rn_erefs;
686 }
687 
688 /*
689  * the "other" references on a node are maintained in an atomically
690  * updated refcount, rn_other_refs.  This can be bumped from arbitrary
691  * context, and tracks references to a possibly out-of-date node's children.
692  *
693  * To prevent the node from disappearing between the final drop of
694  * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
695  * 0->1 transitions and decremented (with the node lock held) on 1->0
696  * transitions.
697  */
698 static void
699 rc_node_hold_other(rc_node_t *np)
700 {
701 	if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
702 		atomic_add_32(&np->rn_other_refs_held, 1);
703 		assert(np->rn_other_refs_held > 0);
704 	}
705 	assert(np->rn_other_refs > 0);
706 }
707 
708 /*
709  * No node locks may be held
710  */
711 static void
712 rc_node_rele_other(rc_node_t *np)
713 {
714 	assert(np->rn_other_refs > 0);
715 	if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
716 		(void) pthread_mutex_lock(&np->rn_lock);
717 		assert(np->rn_other_refs_held > 0);
718 		if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
719 		    np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
720 			/*
721 			 * This was the last client reference.  Destroy
722 			 * any other references and free() the node.
723 			 */
724 			rc_node_no_client_refs(np);
725 		} else {
726 			(void) pthread_mutex_unlock(&np->rn_lock);
727 		}
728 	}
729 }
730 
731 static void
732 rc_node_hold_locked(rc_node_t *np)
733 {
734 	assert(MUTEX_HELD(&np->rn_lock));
735 
736 	if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
737 		rc_node_hold_other(np->rn_parent_ref);
738 	np->rn_refs++;
739 	assert(np->rn_refs > 0);
740 }
741 
742 static void
743 rc_node_hold(rc_node_t *np)
744 {
745 	(void) pthread_mutex_lock(&np->rn_lock);
746 	rc_node_hold_locked(np);
747 	(void) pthread_mutex_unlock(&np->rn_lock);
748 }
749 
750 static void
751 rc_node_rele_locked(rc_node_t *np)
752 {
753 	int unref = 0;
754 	rc_node_t *par_ref = NULL;
755 
756 	assert(MUTEX_HELD(&np->rn_lock));
757 	assert(np->rn_refs > 0);
758 
759 	if (--np->rn_refs == 0) {
760 		if (np->rn_flags & RC_NODE_PARENT_REF)
761 			par_ref = np->rn_parent_ref;
762 
763 		/*
764 		 * Composed property groups are only as good as their
765 		 * references.
766 		 */
767 		if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
768 			np->rn_flags |= RC_NODE_DEAD;
769 
770 		if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
771 		    np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
772 			unref = 1;
773 	}
774 
775 	if (unref) {
776 		/*
777 		 * This was the last client reference.  Destroy any other
778 		 * references and free() the node.
779 		 */
780 		rc_node_no_client_refs(np);
781 	} else {
782 		/*
783 		 * rn_erefs can be 0 if we acquired the reference in
784 		 * a path which hasn't been updated to increment rn_erefs.
785 		 * When all paths which end here are updated, we should
786 		 * assert rn_erefs > 0 and always decrement it.
787 		 */
788 		if (np->rn_erefs > 0)
789 			--np->rn_erefs;
790 		(void) pthread_mutex_unlock(&np->rn_lock);
791 	}
792 
793 	if (par_ref != NULL)
794 		rc_node_rele_other(par_ref);
795 }
796 
797 void
798 rc_node_rele(rc_node_t *np)
799 {
800 	(void) pthread_mutex_lock(&np->rn_lock);
801 	rc_node_rele_locked(np);
802 }
803 
804 static cache_bucket_t *
805 cache_hold(uint32_t h)
806 {
807 	cache_bucket_t *bp = CACHE_BUCKET(h);
808 	(void) pthread_mutex_lock(&bp->cb_lock);
809 	return (bp);
810 }
811 
812 static void
813 cache_release(cache_bucket_t *bp)
814 {
815 	(void) pthread_mutex_unlock(&bp->cb_lock);
816 }
817 
818 static rc_node_t *
819 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
820 {
821 	uint32_t h = rc_node_hash(lp);
822 	rc_node_t *np;
823 
824 	assert(MUTEX_HELD(&bp->cb_lock));
825 	assert(bp == CACHE_BUCKET(h));
826 
827 	for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
828 		if (np->rn_hash == h && rc_node_match(np, lp)) {
829 			rc_node_hold(np);
830 			return (np);
831 		}
832 	}
833 
834 	return (NULL);
835 }
836 
837 static rc_node_t *
838 cache_lookup(rc_node_lookup_t *lp)
839 {
840 	uint32_t h;
841 	cache_bucket_t *bp;
842 	rc_node_t *np;
843 
844 	h = rc_node_hash(lp);
845 	bp = cache_hold(h);
846 
847 	np = cache_lookup_unlocked(bp, lp);
848 
849 	cache_release(bp);
850 
851 	return (np);
852 }
853 
854 static void
855 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
856 {
857 	assert(MUTEX_HELD(&bp->cb_lock));
858 	assert(np->rn_hash == rc_node_hash(&np->rn_id));
859 	assert(bp == CACHE_BUCKET(np->rn_hash));
860 
861 	assert(np->rn_hash_next == NULL);
862 
863 	np->rn_hash_next = bp->cb_head;
864 	bp->cb_head = np;
865 }
866 
867 static void
868 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
869 {
870 	rc_node_t **npp;
871 
872 	assert(MUTEX_HELD(&bp->cb_lock));
873 	assert(np->rn_hash == rc_node_hash(&np->rn_id));
874 	assert(bp == CACHE_BUCKET(np->rn_hash));
875 
876 	for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
877 		if (*npp == np)
878 			break;
879 
880 	assert(*npp == np);
881 	*npp = np->rn_hash_next;
882 	np->rn_hash_next = NULL;
883 }
884 
885 /*
886  * verify that the 'parent' type can have a child typed 'child'
887  * Fails with
888  *   _INVALID_TYPE - argument is invalid
889  *   _TYPE_MISMATCH - parent type cannot have children of type child
890  */
891 static int
892 rc_check_parent_child(uint32_t parent, uint32_t child)
893 {
894 	int idx;
895 	uint32_t type;
896 
897 	if (parent == 0 || parent >= NUM_TYPES ||
898 	    child == 0 || child >= NUM_TYPES)
899 		return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
900 
901 	for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
902 		type = rc_types[parent].rt_valid_children[idx];
903 		if (type == child)
904 			return (REP_PROTOCOL_SUCCESS);
905 	}
906 
907 	return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
908 }
909 
910 /*
911  * Fails with
912  *   _INVALID_TYPE - type is invalid
913  *   _BAD_REQUEST - name is an invalid name for a node of type type
914  */
915 int
916 rc_check_type_name(uint32_t type, const char *name)
917 {
918 	if (type == 0 || type >= NUM_TYPES)
919 		return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
920 
921 	if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
922 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
923 
924 	return (REP_PROTOCOL_SUCCESS);
925 }
926 
927 static int
928 rc_check_pgtype_name(const char *name)
929 {
930 	if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
931 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
932 
933 	return (REP_PROTOCOL_SUCCESS);
934 }
935 
936 /*
937  * rc_node_free_fmri should be called whenever a node loses its parent.
938  * The reason is that the node's fmri string is built up by concatenating
939  * its name to the parent's fmri.  Thus, when the node no longer has a
940  * parent, its fmri is no longer valid.
941  */
942 static void
943 rc_node_free_fmri(rc_node_t *np)
944 {
945 	if (np->rn_fmri != NULL) {
946 		free((void *)np->rn_fmri);
947 		np->rn_fmri = NULL;
948 	}
949 }
950 
951 /*
952  * Concatenate the appropriate separator and the FMRI element to the base
953  * FMRI string at fmri.
954  *
955  * Fails with
956  *	_TRUNCATED	Not enough room in buffer at fmri.
957  */
958 static int
959 rc_concat_fmri_element(
960 	char *fmri,			/* base fmri */
961 	size_t bufsize,			/* size of buf at fmri */
962 	size_t *sz_out,			/* receives result size. */
963 	const char *element,		/* element name to concat */
964 	rep_protocol_entity_t type)	/* type of element */
965 {
966 	size_t actual;
967 	const char *name = element;
968 	int rc;
969 	const char *separator;
970 
971 	if (bufsize > 0)
972 		*sz_out = strlen(fmri);
973 	else
974 		*sz_out = 0;
975 
976 	switch (type) {
977 	case REP_PROTOCOL_ENTITY_SCOPE:
978 		if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
979 			/*
980 			 * No need to display scope information if we are
981 			 * in the local scope.
982 			 */
983 			separator = SCF_FMRI_SVC_PREFIX;
984 			name = NULL;
985 		} else {
986 			/*
987 			 * Need to display scope information, because it is
988 			 * not the local scope.
989 			 */
990 			separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
991 		}
992 		break;
993 	case REP_PROTOCOL_ENTITY_SERVICE:
994 		separator = SCF_FMRI_SERVICE_PREFIX;
995 		break;
996 	case REP_PROTOCOL_ENTITY_INSTANCE:
997 		separator = SCF_FMRI_INSTANCE_PREFIX;
998 		break;
999 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
1000 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
1001 		separator = SCF_FMRI_PROPERTYGRP_PREFIX;
1002 		break;
1003 	case REP_PROTOCOL_ENTITY_PROPERTY:
1004 		separator = SCF_FMRI_PROPERTY_PREFIX;
1005 		break;
1006 	case REP_PROTOCOL_ENTITY_VALUE:
1007 		/*
1008 		 * A value does not have a separate FMRI from its property,
1009 		 * so there is nothing to concat.
1010 		 */
1011 		return (REP_PROTOCOL_SUCCESS);
1012 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
1013 	case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1014 		/* Snapshots do not have FMRIs, so there is nothing to do. */
1015 		return (REP_PROTOCOL_SUCCESS);
1016 	default:
1017 		(void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1018 		    __FILE__, __LINE__, type);
1019 		abort();	/* Missing a case in switch if we get here. */
1020 	}
1021 
1022 	/* Concatenate separator and element to the fmri buffer. */
1023 
1024 	actual = strlcat(fmri, separator, bufsize);
1025 	if (name != NULL) {
1026 		if (actual < bufsize) {
1027 			actual = strlcat(fmri, name, bufsize);
1028 		} else {
1029 			actual += strlen(name);
1030 		}
1031 	}
1032 	if (actual < bufsize) {
1033 		rc = REP_PROTOCOL_SUCCESS;
1034 	} else {
1035 		rc = REP_PROTOCOL_FAIL_TRUNCATED;
1036 	}
1037 	*sz_out = actual;
1038 	return (rc);
1039 }
1040 
1041 /*
1042  * Get the FMRI for the node at np.  The fmri will be placed in buf.  On
1043  * success sz_out will be set to the size of the fmri in buf.  If
1044  * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1045  * of the buffer that would be required to avoid truncation.
1046  *
1047  * Fails with
1048  *	_TRUNCATED	not enough room in buf for the FMRI.
1049  */
1050 static int
1051 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1052     size_t *sz_out)
1053 {
1054 	size_t fmri_len = 0;
1055 	int r;
1056 
1057 	if (bufsize > 0)
1058 		*buf = 0;
1059 	*sz_out = 0;
1060 
1061 	if (np->rn_fmri == NULL) {
1062 		/*
1063 		 * A NULL rn_fmri implies that this is a top level scope.
1064 		 * Child nodes will always have an rn_fmri established
1065 		 * because both rc_node_link_child() and
1066 		 * rc_node_relink_child() call rc_node_build_fmri().  In
1067 		 * this case, we'll just return our name preceded by the
1068 		 * appropriate FMRI decorations.
1069 		 */
1070 		assert(np->rn_parent == NULL);
1071 		r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1072 		    np->rn_id.rl_type);
1073 		if (r != REP_PROTOCOL_SUCCESS)
1074 			return (r);
1075 	} else {
1076 		/* We have an fmri, so return it. */
1077 		fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1078 	}
1079 
1080 	*sz_out = fmri_len;
1081 
1082 	if (fmri_len >= bufsize)
1083 		return (REP_PROTOCOL_FAIL_TRUNCATED);
1084 
1085 	return (REP_PROTOCOL_SUCCESS);
1086 }
1087 
1088 /*
1089  * Build an FMRI string for this node and save it in rn_fmri.
1090  *
1091  * The basic strategy here is to get the fmri of our parent and then
1092  * concatenate the appropriate separator followed by our name.  If our name
1093  * is null, the resulting fmri will just be a copy of the parent fmri.
1094  * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1095  * set.  Also the rn_lock for this node should be held.
1096  *
1097  * Fails with
1098  *	_NO_RESOURCES	Could not allocate memory.
1099  */
1100 static int
1101 rc_node_build_fmri(rc_node_t *np)
1102 {
1103 	size_t actual;
1104 	char fmri[REP_PROTOCOL_FMRI_LEN];
1105 	int rc;
1106 	size_t	sz = REP_PROTOCOL_FMRI_LEN;
1107 
1108 	assert(MUTEX_HELD(&np->rn_lock));
1109 	assert(np->rn_flags & RC_NODE_USING_PARENT);
1110 
1111 	rc_node_free_fmri(np);
1112 
1113 	rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1114 	assert(rc == REP_PROTOCOL_SUCCESS);
1115 
1116 	if (np->rn_name != NULL) {
1117 		rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1118 		    np->rn_id.rl_type);
1119 		assert(rc == REP_PROTOCOL_SUCCESS);
1120 		np->rn_fmri = strdup(fmri);
1121 	} else {
1122 		np->rn_fmri = strdup(fmri);
1123 	}
1124 	if (np->rn_fmri == NULL) {
1125 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1126 	} else {
1127 		rc = REP_PROTOCOL_SUCCESS;
1128 	}
1129 
1130 	return (rc);
1131 }
1132 
1133 /*
1134  * Get the FMRI of the node at np placing the result in fmri.  Then
1135  * concatenate the additional element to fmri.  The type variable indicates
1136  * the type of element, so that the appropriate separator can be
1137  * generated.  size is the number of bytes in the buffer at fmri, and
1138  * sz_out receives the size of the generated string.  If the result is
1139  * truncated, sz_out will receive the size of the buffer that would be
1140  * required to avoid truncation.
1141  *
1142  * Fails with
1143  *	_TRUNCATED	Not enough room in buffer at fmri.
1144  */
1145 static int
1146 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1147     const char *element, rep_protocol_entity_t type)
1148 {
1149 	int rc;
1150 
1151 	if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1152 	    REP_PROTOCOL_SUCCESS) {
1153 		return (rc);
1154 	}
1155 	if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1156 	    REP_PROTOCOL_SUCCESS) {
1157 		return (rc);
1158 	}
1159 
1160 	return (REP_PROTOCOL_SUCCESS);
1161 }
1162 
1163 static int
1164 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1165 {
1166 	rc_node_t *nnp = np->rcn_node;
1167 	int i;
1168 
1169 	assert(MUTEX_HELD(&rc_pg_notify_lock));
1170 
1171 	if (np->rcn_delete != NULL) {
1172 		assert(np->rcn_info == NULL && np->rcn_node == NULL);
1173 		return (1);		/* everyone likes deletes */
1174 	}
1175 	if (np->rcn_node == NULL) {
1176 		assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1177 		return (0);
1178 	}
1179 	assert(np->rcn_info == NULL);
1180 
1181 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1182 		if (rnip->rni_namelist[i] != NULL) {
1183 			if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1184 				return (1);
1185 		}
1186 		if (rnip->rni_typelist[i] != NULL) {
1187 			if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1188 				return (1);
1189 		}
1190 	}
1191 	return (0);
1192 }
1193 
1194 static void
1195 rc_notify_insert_node(rc_node_t *nnp)
1196 {
1197 	rc_notify_t *np = &nnp->rn_notify;
1198 	rc_notify_info_t *nip;
1199 	int found = 0;
1200 
1201 	assert(np->rcn_info == NULL);
1202 
1203 	if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1204 		return;
1205 
1206 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1207 	np->rcn_node = nnp;
1208 	for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1209 	    nip = uu_list_next(rc_notify_info_list, nip)) {
1210 		if (rc_notify_info_interested(nip, np)) {
1211 			(void) pthread_cond_broadcast(&nip->rni_cv);
1212 			found++;
1213 		}
1214 	}
1215 	if (found)
1216 		(void) uu_list_insert_before(rc_notify_list, NULL, np);
1217 	else
1218 		np->rcn_node = NULL;
1219 
1220 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1221 }
1222 
1223 static void
1224 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1225     const char *instance, const char *pg)
1226 {
1227 	rc_notify_info_t *nip;
1228 
1229 	uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1230 	    rc_notify_pool);
1231 	ndp->rnd_notify.rcn_delete = ndp;
1232 
1233 	(void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1234 	    "svc:/%s%s%s%s%s", service,
1235 	    (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1236 	    (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1237 
1238 	/*
1239 	 * add to notification list, notify watchers
1240 	 */
1241 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1242 	for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1243 	    nip = uu_list_next(rc_notify_info_list, nip))
1244 		(void) pthread_cond_broadcast(&nip->rni_cv);
1245 	(void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1246 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1247 }
1248 
1249 static void
1250 rc_notify_remove_node(rc_node_t *nnp)
1251 {
1252 	rc_notify_t *np = &nnp->rn_notify;
1253 
1254 	assert(np->rcn_info == NULL);
1255 	assert(!MUTEX_HELD(&nnp->rn_lock));
1256 
1257 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1258 	while (np->rcn_node != NULL) {
1259 		if (rc_notify_in_use) {
1260 			(void) pthread_cond_wait(&rc_pg_notify_cv,
1261 			    &rc_pg_notify_lock);
1262 			continue;
1263 		}
1264 		(void) uu_list_remove(rc_notify_list, np);
1265 		np->rcn_node = NULL;
1266 		break;
1267 	}
1268 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1269 }
1270 
1271 static void
1272 rc_notify_remove_locked(rc_notify_t *np)
1273 {
1274 	assert(MUTEX_HELD(&rc_pg_notify_lock));
1275 	assert(rc_notify_in_use == 0);
1276 
1277 	(void) uu_list_remove(rc_notify_list, np);
1278 	if (np->rcn_node) {
1279 		np->rcn_node = NULL;
1280 	} else if (np->rcn_delete) {
1281 		uu_free(np->rcn_delete);
1282 	} else {
1283 		assert(0);	/* CAN'T HAPPEN */
1284 	}
1285 }
1286 
1287 /*
1288  * Permission checking functions.  See comment atop this file.
1289  */
1290 #ifndef NATIVE_BUILD
1291 static permcheck_t *
1292 pc_create()
1293 {
1294 	permcheck_t *p;
1295 
1296 	p = uu_zalloc(sizeof (*p));
1297 	if (p == NULL)
1298 		return (NULL);
1299 	p->pc_bnum = 8;			/* Normal case will only have 2 elts. */
1300 	p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1301 	if (p->pc_buckets == NULL) {
1302 		uu_free(p);
1303 		return (NULL);
1304 	}
1305 
1306 	p->pc_enum = 0;
1307 	return (p);
1308 }
1309 
1310 static void
1311 pc_free(permcheck_t *pcp)
1312 {
1313 	uint_t i;
1314 	struct pc_elt *ep, *next;
1315 
1316 	for (i = 0; i < pcp->pc_bnum; ++i) {
1317 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1318 			next = ep->pce_next;
1319 			free(ep);
1320 		}
1321 	}
1322 
1323 	free(pcp->pc_buckets);
1324 	free(pcp);
1325 }
1326 
1327 static uint32_t
1328 pc_hash(const char *auth)
1329 {
1330 	uint32_t h = 0, g;
1331 	const char *p;
1332 
1333 	/*
1334 	 * Generic hash function from uts/common/os/modhash.c.
1335 	 */
1336 	for (p = auth; *p != '\0'; ++p) {
1337 		h = (h << 4) + *p;
1338 		g = (h & 0xf0000000);
1339 		if (g != 0) {
1340 			h ^= (g >> 24);
1341 			h ^= g;
1342 		}
1343 	}
1344 
1345 	return (h);
1346 }
1347 
1348 static perm_status_t
1349 pc_exists(permcheck_t *pcp, const char *auth)
1350 {
1351 	uint32_t h;
1352 	struct pc_elt *ep;
1353 
1354 	h = pc_hash(auth);
1355 	for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1356 	    ep != NULL;
1357 	    ep = ep->pce_next) {
1358 		if (strcmp(auth, ep->pce_auth) == 0) {
1359 			pcp->pc_auth_string = ep->pce_auth;
1360 			return (PERM_GRANTED);
1361 		}
1362 	}
1363 
1364 	return (PERM_DENIED);
1365 }
1366 
1367 static perm_status_t
1368 pc_match(permcheck_t *pcp, const char *pattern)
1369 {
1370 	uint_t i;
1371 	struct pc_elt *ep;
1372 
1373 	for (i = 0; i < pcp->pc_bnum; ++i) {
1374 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1375 			if (_auth_match(pattern, ep->pce_auth)) {
1376 				pcp->pc_auth_string = ep->pce_auth;
1377 				return (PERM_GRANTED);
1378 			}
1379 		}
1380 	}
1381 
1382 	return (PERM_DENIED);
1383 }
1384 
1385 static int
1386 pc_grow(permcheck_t *pcp)
1387 {
1388 	uint_t new_bnum, i, j;
1389 	struct pc_elt **new_buckets;
1390 	struct pc_elt *ep, *next;
1391 
1392 	new_bnum = pcp->pc_bnum * 2;
1393 	if (new_bnum < pcp->pc_bnum)
1394 		/* Homey don't play that. */
1395 		return (-1);
1396 
1397 	new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1398 	if (new_buckets == NULL)
1399 		return (-1);
1400 
1401 	for (i = 0; i < pcp->pc_bnum; ++i) {
1402 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1403 			next = ep->pce_next;
1404 			j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1405 			ep->pce_next = new_buckets[j];
1406 			new_buckets[j] = ep;
1407 		}
1408 	}
1409 
1410 	uu_free(pcp->pc_buckets);
1411 	pcp->pc_buckets = new_buckets;
1412 	pcp->pc_bnum = new_bnum;
1413 
1414 	return (0);
1415 }
1416 
1417 static int
1418 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1419 {
1420 	struct pc_elt *ep;
1421 	uint_t i;
1422 
1423 	ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1424 	if (ep == NULL)
1425 		return (-1);
1426 
1427 	/* Grow if pc_enum / pc_bnum > 3/4. */
1428 	if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1429 		/* Failure is not a stopper; we'll try again next time. */
1430 		(void) pc_grow(pcp);
1431 
1432 	(void) strcpy(ep->pce_auth, auth);
1433 
1434 	i = pc_hash(auth) & (pcp->pc_bnum - 1);
1435 	ep->pce_next = pcp->pc_buckets[i];
1436 	pcp->pc_buckets[i] = ep;
1437 
1438 	if (auth_type > pcp->pc_specific_type) {
1439 		pcp->pc_specific_type = auth_type;
1440 		pcp->pc_specific = ep;
1441 	}
1442 
1443 	++pcp->pc_enum;
1444 
1445 	return (0);
1446 }
1447 
1448 /*
1449  * For the type of a property group, return the authorization which may be
1450  * used to modify it.
1451  */
1452 static const char *
1453 perm_auth_for_pgtype(const char *pgtype)
1454 {
1455 	if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1456 		return (AUTH_MODIFY_PREFIX "method");
1457 	else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1458 		return (AUTH_MODIFY_PREFIX "dependency");
1459 	else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1460 		return (AUTH_MODIFY_PREFIX "application");
1461 	else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1462 		return (AUTH_MODIFY_PREFIX "framework");
1463 	else
1464 		return (NULL);
1465 }
1466 
1467 /*
1468  * Fails with
1469  *   _NO_RESOURCES - out of memory
1470  */
1471 static int
1472 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1473     pc_auth_type_t auth_type)
1474 {
1475 	return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1476 	    REP_PROTOCOL_FAIL_NO_RESOURCES);
1477 }
1478 
1479 /*
1480  * Fails with
1481  *   _NO_RESOURCES - out of memory
1482  */
1483 static int
1484 perm_add_enabling(permcheck_t *pcp, const char *auth)
1485 {
1486 	return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1487 }
1488 
1489 /* Note that perm_add_enabling_values() is defined below. */
1490 
1491 /*
1492  * perm_granted() returns PERM_GRANTED if the current door caller has one of
1493  * the enabling authorizations in pcp, PERM_DENIED if it doesn't, PERM_GONE if
1494  * the door client went away and PERM_FAIL if an error (usually lack of
1495  * memory) occurs.  auth_cb() checks each and every authorizations as
1496  * enumerated by _enum_auths.  When we find a result other than PERM_DENIED,
1497  * we short-cut the enumeration and return non-zero.
1498  */
1499 
1500 static int
1501 auth_cb(const char *auth, void *ctxt, void *vres)
1502 {
1503 	permcheck_t *pcp = ctxt;
1504 	int *pret = vres;
1505 
1506 	if (strchr(auth, KV_WILDCHAR) == NULL)
1507 		*pret = pc_exists(pcp, auth);
1508 	else
1509 		*pret = pc_match(pcp, auth);
1510 
1511 	if (*pret != PERM_DENIED)
1512 		return (1);
1513 	/*
1514 	 * If we failed, choose the most specific auth string for use in
1515 	 * the audit event.
1516 	 */
1517 	assert(pcp->pc_specific != NULL);
1518 	pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1519 
1520 	return (0);		/* Tells that we need to continue */
1521 }
1522 
1523 static perm_status_t
1524 perm_granted(permcheck_t *pcp)
1525 {
1526 	ucred_t *uc;
1527 
1528 	perm_status_t ret = PERM_DENIED;
1529 	uid_t uid;
1530 	struct passwd pw;
1531 	char pwbuf[1024];	/* XXX should be NSS_BUFLEN_PASSWD */
1532 
1533 	/* Get the uid */
1534 	if ((uc = get_ucred()) == NULL) {
1535 		if (errno == EINVAL) {
1536 			/*
1537 			 * Client is no longer waiting for our response (e.g.,
1538 			 * it received a signal & resumed with EINTR).
1539 			 * Punting with door_return() would be nice but we
1540 			 * need to release all of the locks & references we
1541 			 * hold.  And we must report failure to the client
1542 			 * layer to keep it from ignoring retries as
1543 			 * already-done (idempotency & all that).  None of the
1544 			 * error codes fit very well, so we might as well
1545 			 * force the return of _PERMISSION_DENIED since we
1546 			 * couldn't determine the user.
1547 			 */
1548 			return (PERM_GONE);
1549 		}
1550 		assert(0);
1551 		abort();
1552 	}
1553 
1554 	uid = ucred_geteuid(uc);
1555 	assert(uid != (uid_t)-1);
1556 
1557 	if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1558 		return (PERM_FAIL);
1559 	}
1560 
1561 	/*
1562 	 * Enumerate all the auths defined for the user and return the
1563 	 * result in ret.
1564 	 */
1565 	if (_enum_auths(pw.pw_name, auth_cb, pcp, &ret) < 0)
1566 		return (PERM_FAIL);
1567 
1568 	return (ret);
1569 }
1570 
1571 static int
1572 map_granted_status(perm_status_t status, permcheck_t *pcp,
1573     char **match_auth)
1574 {
1575 	int rc;
1576 
1577 	*match_auth = NULL;
1578 	switch (status) {
1579 	case PERM_DENIED:
1580 		*match_auth = strdup(pcp->pc_auth_string);
1581 		if (*match_auth == NULL)
1582 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1583 		else
1584 			rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1585 		break;
1586 	case PERM_GRANTED:
1587 		*match_auth = strdup(pcp->pc_auth_string);
1588 		if (*match_auth == NULL)
1589 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1590 		else
1591 			rc = REP_PROTOCOL_SUCCESS;
1592 		break;
1593 	case PERM_GONE:
1594 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
1595 		break;
1596 	case PERM_FAIL:
1597 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1598 		break;
1599 	}
1600 	return (rc);
1601 }
1602 #endif /* NATIVE_BUILD */
1603 
1604 /*
1605  * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1606  * serialize certain actions, and to wait for certain operations to complete
1607  *
1608  * The waiting flags are:
1609  *	RC_NODE_CHILDREN_CHANGING
1610  *		The child list is being built or changed (due to creation
1611  *		or deletion).  All iterators pause.
1612  *
1613  *	RC_NODE_USING_PARENT
1614  *		Someone is actively using the parent pointer, so we can't
1615  *		be removed from the parent list.
1616  *
1617  *	RC_NODE_CREATING_CHILD
1618  *		A child is being created -- locks out other creations, to
1619  *		prevent insert-insert races.
1620  *
1621  *	RC_NODE_IN_TX
1622  *		This object is running a transaction.
1623  *
1624  *	RC_NODE_DYING
1625  *		This node might be dying.  Always set as a set, using
1626  *		RC_NODE_DYING_FLAGS (which is everything but
1627  *		RC_NODE_USING_PARENT)
1628  */
1629 static int
1630 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1631 {
1632 	assert(MUTEX_HELD(&np->rn_lock));
1633 	assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1634 
1635 	while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1636 		(void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1637 	}
1638 	if (np->rn_flags & RC_NODE_DEAD)
1639 		return (0);
1640 
1641 	np->rn_flags |= flag;
1642 	return (1);
1643 }
1644 
1645 static void
1646 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1647 {
1648 	assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1649 	assert(MUTEX_HELD(&np->rn_lock));
1650 	assert((np->rn_flags & flag) == flag);
1651 	np->rn_flags &= ~flag;
1652 	(void) pthread_cond_broadcast(&np->rn_cv);
1653 }
1654 
1655 /*
1656  * wait until a particular flag has cleared.  Fails if the object dies.
1657  */
1658 static int
1659 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1660 {
1661 	assert(MUTEX_HELD(&np->rn_lock));
1662 	while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1663 		(void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1664 
1665 	return (!(np->rn_flags & RC_NODE_DEAD));
1666 }
1667 
1668 /*
1669  * On entry, np's lock must be held, and this thread must be holding
1670  * RC_NODE_USING_PARENT.  On return, both of them are released.
1671  *
1672  * If the return value is NULL, np either does not have a parent, or
1673  * the parent has been marked DEAD.
1674  *
1675  * If the return value is non-NULL, it is the parent of np, and both
1676  * its lock and the requested flags are held.
1677  */
1678 static rc_node_t *
1679 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1680 {
1681 	rc_node_t *pp;
1682 
1683 	assert(MUTEX_HELD(&np->rn_lock));
1684 	assert(np->rn_flags & RC_NODE_USING_PARENT);
1685 
1686 	if ((pp = np->rn_parent) == NULL) {
1687 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1688 		(void) pthread_mutex_unlock(&np->rn_lock);
1689 		return (NULL);
1690 	}
1691 	(void) pthread_mutex_unlock(&np->rn_lock);
1692 
1693 	(void) pthread_mutex_lock(&pp->rn_lock);
1694 	(void) pthread_mutex_lock(&np->rn_lock);
1695 	rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1696 	(void) pthread_mutex_unlock(&np->rn_lock);
1697 
1698 	if (!rc_node_hold_flag(pp, flag)) {
1699 		(void) pthread_mutex_unlock(&pp->rn_lock);
1700 		return (NULL);
1701 	}
1702 	return (pp);
1703 }
1704 
1705 rc_node_t *
1706 rc_node_alloc(void)
1707 {
1708 	rc_node_t *np = uu_zalloc(sizeof (*np));
1709 
1710 	if (np == NULL)
1711 		return (NULL);
1712 
1713 	(void) pthread_mutex_init(&np->rn_lock, NULL);
1714 	(void) pthread_cond_init(&np->rn_cv, NULL);
1715 
1716 	np->rn_children = uu_list_create(rc_children_pool, np, 0);
1717 	np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1718 
1719 	uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1720 
1721 	uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1722 	    rc_notify_pool);
1723 
1724 	return (np);
1725 }
1726 
1727 void
1728 rc_node_destroy(rc_node_t *np)
1729 {
1730 	int i;
1731 
1732 	if (np->rn_flags & RC_NODE_UNREFED)
1733 		return;				/* being handled elsewhere */
1734 
1735 	assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1736 	assert(np->rn_former == NULL);
1737 
1738 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1739 		/* Release the holds from rc_iter_next(). */
1740 		for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1741 			/* rn_cchain[i] may be NULL for empty snapshots. */
1742 			if (np->rn_cchain[i] != NULL)
1743 				rc_node_rele(np->rn_cchain[i]);
1744 		}
1745 	}
1746 
1747 	if (np->rn_name != NULL)
1748 		free((void *)np->rn_name);
1749 	np->rn_name = NULL;
1750 	if (np->rn_type != NULL)
1751 		free((void *)np->rn_type);
1752 	np->rn_type = NULL;
1753 	if (np->rn_values != NULL)
1754 		object_free_values(np->rn_values, np->rn_valtype,
1755 		    np->rn_values_count, np->rn_values_size);
1756 	np->rn_values = NULL;
1757 	rc_node_free_fmri(np);
1758 
1759 	if (np->rn_snaplevel != NULL)
1760 		rc_snaplevel_rele(np->rn_snaplevel);
1761 	np->rn_snaplevel = NULL;
1762 
1763 	uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1764 
1765 	uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1766 	    rc_notify_pool);
1767 
1768 	assert(uu_list_first(np->rn_children) == NULL);
1769 	uu_list_destroy(np->rn_children);
1770 	uu_list_destroy(np->rn_pg_notify_list);
1771 
1772 	(void) pthread_mutex_destroy(&np->rn_lock);
1773 	(void) pthread_cond_destroy(&np->rn_cv);
1774 
1775 	uu_free(np);
1776 }
1777 
1778 /*
1779  * Link in a child node.
1780  *
1781  * Because of the lock ordering, cp has to already be in the hash table with
1782  * its lock dropped before we get it.  To prevent anyone from noticing that
1783  * it is parentless, the creation code sets the RC_NODE_USING_PARENT.  Once
1784  * we've linked it in, we release the flag.
1785  */
1786 static void
1787 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1788 {
1789 	assert(!MUTEX_HELD(&np->rn_lock));
1790 	assert(!MUTEX_HELD(&cp->rn_lock));
1791 
1792 	(void) pthread_mutex_lock(&np->rn_lock);
1793 	(void) pthread_mutex_lock(&cp->rn_lock);
1794 	assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1795 	    (cp->rn_flags & RC_NODE_USING_PARENT));
1796 
1797 	assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1798 	    REP_PROTOCOL_SUCCESS);
1799 
1800 	cp->rn_parent = np;
1801 	cp->rn_flags |= RC_NODE_IN_PARENT;
1802 	(void) uu_list_insert_before(np->rn_children, NULL, cp);
1803 	(void) rc_node_build_fmri(cp);
1804 
1805 	(void) pthread_mutex_unlock(&np->rn_lock);
1806 
1807 	rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1808 	(void) pthread_mutex_unlock(&cp->rn_lock);
1809 }
1810 
1811 /*
1812  * Sets the rn_parent_ref field of all the children of np to pp -- always
1813  * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1814  *
1815  * This is used when we mark a node RC_NODE_OLD, so that when the object and
1816  * its children are no longer referenced, they will all be deleted as a unit.
1817  */
1818 static void
1819 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1820 {
1821 	rc_node_t *cp;
1822 
1823 	assert(MUTEX_HELD(&np->rn_lock));
1824 
1825 	for (cp = uu_list_first(np->rn_children); cp != NULL;
1826 	    cp = uu_list_next(np->rn_children, cp)) {
1827 		(void) pthread_mutex_lock(&cp->rn_lock);
1828 		if (cp->rn_flags & RC_NODE_PARENT_REF) {
1829 			assert(cp->rn_parent_ref == pp);
1830 		} else {
1831 			assert(cp->rn_parent_ref == NULL);
1832 
1833 			cp->rn_flags |= RC_NODE_PARENT_REF;
1834 			cp->rn_parent_ref = pp;
1835 			if (cp->rn_refs != 0)
1836 				rc_node_hold_other(pp);
1837 		}
1838 		rc_node_setup_parent_ref(cp, pp);		/* recurse */
1839 		(void) pthread_mutex_unlock(&cp->rn_lock);
1840 	}
1841 }
1842 
1843 /*
1844  * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1845  *
1846  * Requirements:
1847  *	*no* node locks may be held.
1848  *	pp must be held with RC_NODE_CHILDREN_CHANGING
1849  *	newp and np must be held with RC_NODE_IN_TX
1850  *	np must be marked RC_NODE_IN_PARENT, newp must not be
1851  *	np must be marked RC_NODE_OLD
1852  *
1853  * Afterwards:
1854  *	pp's RC_NODE_CHILDREN_CHANGING is dropped
1855  *	newp and np's RC_NODE_IN_TX is dropped
1856  *	newp->rn_former = np;
1857  *	newp is RC_NODE_IN_PARENT, np is not.
1858  *	interested notify subscribers have been notified of newp's new status.
1859  */
1860 static void
1861 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1862 {
1863 	cache_bucket_t *bp;
1864 	/*
1865 	 * First, swap np and nnp in the cache.  newp's RC_NODE_IN_TX flag
1866 	 * keeps rc_node_update() from seeing it until we are done.
1867 	 */
1868 	bp = cache_hold(newp->rn_hash);
1869 	cache_remove_unlocked(bp, np);
1870 	cache_insert_unlocked(bp, newp);
1871 	cache_release(bp);
1872 
1873 	/*
1874 	 * replace np with newp in pp's list, and attach it to newp's rn_former
1875 	 * link.
1876 	 */
1877 	(void) pthread_mutex_lock(&pp->rn_lock);
1878 	assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1879 
1880 	(void) pthread_mutex_lock(&newp->rn_lock);
1881 	assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1882 	assert(newp->rn_flags & RC_NODE_IN_TX);
1883 
1884 	(void) pthread_mutex_lock(&np->rn_lock);
1885 	assert(np->rn_flags & RC_NODE_IN_PARENT);
1886 	assert(np->rn_flags & RC_NODE_OLD);
1887 	assert(np->rn_flags & RC_NODE_IN_TX);
1888 
1889 	newp->rn_parent = pp;
1890 	newp->rn_flags |= RC_NODE_IN_PARENT;
1891 
1892 	/*
1893 	 * Note that we carefully add newp before removing np -- this
1894 	 * keeps iterators on the list from missing us.
1895 	 */
1896 	(void) uu_list_insert_after(pp->rn_children, np, newp);
1897 	(void) rc_node_build_fmri(newp);
1898 	(void) uu_list_remove(pp->rn_children, np);
1899 
1900 	/*
1901 	 * re-set np
1902 	 */
1903 	newp->rn_former = np;
1904 	np->rn_parent = NULL;
1905 	np->rn_flags &= ~RC_NODE_IN_PARENT;
1906 	np->rn_flags |= RC_NODE_ON_FORMER;
1907 
1908 	rc_notify_insert_node(newp);
1909 
1910 	rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1911 	(void) pthread_mutex_unlock(&pp->rn_lock);
1912 	rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1913 	(void) pthread_mutex_unlock(&newp->rn_lock);
1914 	rc_node_setup_parent_ref(np, np);
1915 	rc_node_rele_flag(np, RC_NODE_IN_TX);
1916 	(void) pthread_mutex_unlock(&np->rn_lock);
1917 }
1918 
1919 /*
1920  * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1921  * 'cp' is used (and returned) if the node does not yet exist.  If it does
1922  * exist, 'cp' is freed, and the existent node is returned instead.
1923  */
1924 rc_node_t *
1925 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1926     rc_node_t *pp)
1927 {
1928 	rc_node_t *np;
1929 	cache_bucket_t *bp;
1930 	uint32_t h = rc_node_hash(nip);
1931 
1932 	assert(cp->rn_refs == 0);
1933 
1934 	bp = cache_hold(h);
1935 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1936 		cache_release(bp);
1937 
1938 		/*
1939 		 * make sure it matches our expectations
1940 		 */
1941 		(void) pthread_mutex_lock(&np->rn_lock);
1942 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1943 			assert(np->rn_parent == pp);
1944 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1945 			assert(strcmp(np->rn_name, name) == 0);
1946 			assert(np->rn_type == NULL);
1947 			assert(np->rn_flags & RC_NODE_IN_PARENT);
1948 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1949 		}
1950 		(void) pthread_mutex_unlock(&np->rn_lock);
1951 
1952 		rc_node_destroy(cp);
1953 		return (np);
1954 	}
1955 
1956 	/*
1957 	 * No one is there -- setup & install the new node.
1958 	 */
1959 	np = cp;
1960 	rc_node_hold(np);
1961 	np->rn_id = *nip;
1962 	np->rn_hash = h;
1963 	np->rn_name = strdup(name);
1964 
1965 	np->rn_flags |= RC_NODE_USING_PARENT;
1966 
1967 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
1968 #if COMPOSITION_DEPTH == 2
1969 		np->rn_cchain[0] = np;
1970 		np->rn_cchain[1] = pp;
1971 #else
1972 #error This code must be updated.
1973 #endif
1974 	}
1975 
1976 	cache_insert_unlocked(bp, np);
1977 	cache_release(bp);		/* we are now visible */
1978 
1979 	rc_node_link_child(pp, np);
1980 
1981 	return (np);
1982 }
1983 
1984 /*
1985  * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
1986  * 'cp' is used (and returned) if the node does not yet exist.  If it does
1987  * exist, 'cp' is freed, and the existent node is returned instead.
1988  */
1989 rc_node_t *
1990 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1991     uint32_t snap_id, rc_node_t *pp)
1992 {
1993 	rc_node_t *np;
1994 	cache_bucket_t *bp;
1995 	uint32_t h = rc_node_hash(nip);
1996 
1997 	assert(cp->rn_refs == 0);
1998 
1999 	bp = cache_hold(h);
2000 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2001 		cache_release(bp);
2002 
2003 		/*
2004 		 * make sure it matches our expectations
2005 		 */
2006 		(void) pthread_mutex_lock(&np->rn_lock);
2007 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2008 			assert(np->rn_parent == pp);
2009 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2010 			assert(strcmp(np->rn_name, name) == 0);
2011 			assert(np->rn_type == NULL);
2012 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2013 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2014 		}
2015 		(void) pthread_mutex_unlock(&np->rn_lock);
2016 
2017 		rc_node_destroy(cp);
2018 		return (np);
2019 	}
2020 
2021 	/*
2022 	 * No one is there -- create a new node.
2023 	 */
2024 	np = cp;
2025 	rc_node_hold(np);
2026 	np->rn_id = *nip;
2027 	np->rn_hash = h;
2028 	np->rn_name = strdup(name);
2029 	np->rn_snapshot_id = snap_id;
2030 
2031 	np->rn_flags |= RC_NODE_USING_PARENT;
2032 
2033 	cache_insert_unlocked(bp, np);
2034 	cache_release(bp);		/* we are now visible */
2035 
2036 	rc_node_link_child(pp, np);
2037 
2038 	return (np);
2039 }
2040 
2041 /*
2042  * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists.  'cp' is
2043  * used (and returned) if the node does not yet exist.  If it does exist, 'cp'
2044  * is freed, and the existent node is returned instead.
2045  */
2046 rc_node_t *
2047 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2048     rc_snaplevel_t *lvl, rc_node_t *pp)
2049 {
2050 	rc_node_t *np;
2051 	cache_bucket_t *bp;
2052 	uint32_t h = rc_node_hash(nip);
2053 
2054 	assert(cp->rn_refs == 0);
2055 
2056 	bp = cache_hold(h);
2057 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2058 		cache_release(bp);
2059 
2060 		/*
2061 		 * make sure it matches our expectations
2062 		 */
2063 		(void) pthread_mutex_lock(&np->rn_lock);
2064 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2065 			assert(np->rn_parent == pp);
2066 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2067 			assert(np->rn_name == NULL);
2068 			assert(np->rn_type == NULL);
2069 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2070 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2071 		}
2072 		(void) pthread_mutex_unlock(&np->rn_lock);
2073 
2074 		rc_node_destroy(cp);
2075 		return (np);
2076 	}
2077 
2078 	/*
2079 	 * No one is there -- create a new node.
2080 	 */
2081 	np = cp;
2082 	rc_node_hold(np);	/* released in snapshot_fill_children() */
2083 	np->rn_id = *nip;
2084 	np->rn_hash = h;
2085 
2086 	rc_snaplevel_hold(lvl);
2087 	np->rn_snaplevel = lvl;
2088 
2089 	np->rn_flags |= RC_NODE_USING_PARENT;
2090 
2091 	cache_insert_unlocked(bp, np);
2092 	cache_release(bp);		/* we are now visible */
2093 
2094 	/* Add this snaplevel to the snapshot's composition chain. */
2095 	assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2096 	pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2097 
2098 	rc_node_link_child(pp, np);
2099 
2100 	return (np);
2101 }
2102 
2103 /*
2104  * Returns NULL if strdup() fails.
2105  */
2106 rc_node_t *
2107 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2108     const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2109 {
2110 	rc_node_t *np;
2111 	cache_bucket_t *bp;
2112 
2113 	uint32_t h = rc_node_hash(nip);
2114 	bp = cache_hold(h);
2115 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2116 		cache_release(bp);
2117 
2118 		/*
2119 		 * make sure it matches our expectations (don't check
2120 		 * the generation number or parent, since someone could
2121 		 * have gotten a transaction through while we weren't
2122 		 * looking)
2123 		 */
2124 		(void) pthread_mutex_lock(&np->rn_lock);
2125 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2126 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2127 			assert(strcmp(np->rn_name, name) == 0);
2128 			assert(strcmp(np->rn_type, type) == 0);
2129 			assert(np->rn_pgflags == flags);
2130 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2131 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2132 		}
2133 		(void) pthread_mutex_unlock(&np->rn_lock);
2134 
2135 		rc_node_destroy(cp);
2136 		return (np);
2137 	}
2138 
2139 	np = cp;
2140 	rc_node_hold(np);		/* released in fill_pg_callback() */
2141 	np->rn_id = *nip;
2142 	np->rn_hash = h;
2143 	np->rn_name = strdup(name);
2144 	if (np->rn_name == NULL) {
2145 		rc_node_rele(np);
2146 		return (NULL);
2147 	}
2148 	np->rn_type = strdup(type);
2149 	if (np->rn_type == NULL) {
2150 		free((void *)np->rn_name);
2151 		rc_node_rele(np);
2152 		return (NULL);
2153 	}
2154 	np->rn_pgflags = flags;
2155 	np->rn_gen_id = gen_id;
2156 
2157 	np->rn_flags |= RC_NODE_USING_PARENT;
2158 
2159 	cache_insert_unlocked(bp, np);
2160 	cache_release(bp);		/* we are now visible */
2161 
2162 	rc_node_link_child(pp, np);
2163 
2164 	return (np);
2165 }
2166 
2167 #if COMPOSITION_DEPTH == 2
2168 /*
2169  * Initialize a "composed property group" which represents the composition of
2170  * property groups pg1 & pg2.  It is ephemeral: once created & returned for an
2171  * ITER_READ request, keeping it out of cache_hash and any child lists
2172  * prevents it from being looked up.  Operations besides iteration are passed
2173  * through to pg1.
2174  *
2175  * pg1 & pg2 should be held before entering this function.  They will be
2176  * released in rc_node_destroy().
2177  */
2178 static int
2179 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2180 {
2181 	if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2182 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2183 
2184 	cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2185 	cpg->rn_name = strdup(pg1->rn_name);
2186 	if (cpg->rn_name == NULL)
2187 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2188 
2189 	cpg->rn_cchain[0] = pg1;
2190 	cpg->rn_cchain[1] = pg2;
2191 
2192 	return (REP_PROTOCOL_SUCCESS);
2193 }
2194 #else
2195 #error This code must be updated.
2196 #endif
2197 
2198 /*
2199  * Fails with _NO_RESOURCES.
2200  */
2201 int
2202 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2203     const char *name, rep_protocol_value_type_t type,
2204     const char *vals, size_t count, size_t size)
2205 {
2206 	rc_node_t *np;
2207 	cache_bucket_t *bp;
2208 
2209 	uint32_t h = rc_node_hash(nip);
2210 	bp = cache_hold(h);
2211 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2212 		cache_release(bp);
2213 		/*
2214 		 * make sure it matches our expectations
2215 		 */
2216 		(void) pthread_mutex_lock(&np->rn_lock);
2217 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2218 			assert(np->rn_parent == pp);
2219 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2220 			assert(strcmp(np->rn_name, name) == 0);
2221 			assert(np->rn_valtype == type);
2222 			assert(np->rn_values_count == count);
2223 			assert(np->rn_values_size == size);
2224 			assert(vals == NULL ||
2225 			    memcmp(np->rn_values, vals, size) == 0);
2226 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2227 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2228 		}
2229 		rc_node_rele_locked(np);
2230 		object_free_values(vals, type, count, size);
2231 		return (REP_PROTOCOL_SUCCESS);
2232 	}
2233 
2234 	/*
2235 	 * No one is there -- create a new node.
2236 	 */
2237 	np = rc_node_alloc();
2238 	if (np == NULL) {
2239 		cache_release(bp);
2240 		object_free_values(vals, type, count, size);
2241 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2242 	}
2243 	np->rn_id = *nip;
2244 	np->rn_hash = h;
2245 	np->rn_name = strdup(name);
2246 	if (np->rn_name == NULL) {
2247 		cache_release(bp);
2248 		object_free_values(vals, type, count, size);
2249 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2250 	}
2251 
2252 	np->rn_valtype = type;
2253 	np->rn_values = vals;
2254 	np->rn_values_count = count;
2255 	np->rn_values_size = size;
2256 
2257 	np->rn_flags |= RC_NODE_USING_PARENT;
2258 
2259 	cache_insert_unlocked(bp, np);
2260 	cache_release(bp);		/* we are now visible */
2261 
2262 	rc_node_link_child(pp, np);
2263 
2264 	return (REP_PROTOCOL_SUCCESS);
2265 }
2266 
2267 /*
2268  * This function implements a decision table to determine the event ID for
2269  * changes to the enabled (SCF_PROPERTY_ENABLED) property.  The event ID is
2270  * determined by the value of the first property in the command specified
2271  * by cmd_no and the name of the property group.  Here is the decision
2272  * table:
2273  *
2274  *				Property Group Name
2275  *	Property	------------------------------------------
2276  *	Value		SCF_PG_GENERAL		SCF_PG_GENERAL_OVR
2277  *	--------	--------------		------------------
2278  *	"0"		ADT_smf_disable		ADT_smf_tmp_disable
2279  *	"1"		ADT_smf_enable		ADT_smf_tmp_enable
2280  *
2281  * This function is called by special_property_event through a function
2282  * pointer in the special_props_list array.
2283  *
2284  * Since the ADT_smf_* symbols may not be defined in the build machine's
2285  * include files, this function is not compiled when doing native builds.
2286  */
2287 #ifndef NATIVE_BUILD
2288 static int
2289 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2290     au_event_t *event_id)
2291 {
2292 	const char *value;
2293 	uint32_t nvalues;
2294 	int enable;
2295 
2296 	/*
2297 	 * First, check property value.
2298 	 */
2299 	if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2300 		return (-1);
2301 	if (nvalues == 0)
2302 		return (-1);
2303 	if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2304 		return (-1);
2305 	if (strcmp(value, "0") == 0) {
2306 		enable = 0;
2307 	} else if (strcmp(value, "1") == 0) {
2308 		enable = 1;
2309 	} else {
2310 		return (-1);
2311 	}
2312 
2313 	/*
2314 	 * Now check property group name.
2315 	 */
2316 	if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2317 		*event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2318 		return (0);
2319 	} else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2320 		*event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2321 		return (0);
2322 	}
2323 	return (-1);
2324 }
2325 #endif	/* NATIVE_BUILD */
2326 
2327 /*
2328  * This function compares two audit_special_prop_item_t structures
2329  * represented by item1 and item2.  It returns an integer greater than 0 if
2330  * item1 is greater than item2.  It returns 0 if they are equal and an
2331  * integer less than 0 if item1 is less than item2.  api_prop_name and
2332  * api_pg_name are the key fields for sorting.
2333  *
2334  * This function is suitable for calls to bsearch(3C) and qsort(3C).
2335  */
2336 static int
2337 special_prop_compare(const void *item1, const void *item2)
2338 {
2339 	const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2340 	const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2341 	int r;
2342 
2343 	r = strcmp(a->api_prop_name, b->api_prop_name);
2344 	if (r == 0) {
2345 		/*
2346 		 * Primary keys are the same, so check the secondary key.
2347 		 */
2348 		r = strcmp(a->api_pg_name, b->api_pg_name);
2349 	}
2350 	return (r);
2351 }
2352 
2353 int
2354 rc_node_init(void)
2355 {
2356 	rc_node_t *np;
2357 	cache_bucket_t *bp;
2358 
2359 	rc_children_pool = uu_list_pool_create("rc_children_pool",
2360 	    sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2361 	    NULL, UU_LIST_POOL_DEBUG);
2362 
2363 	rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2364 	    sizeof (rc_node_pg_notify_t),
2365 	    offsetof(rc_node_pg_notify_t, rnpn_node),
2366 	    NULL, UU_LIST_POOL_DEBUG);
2367 
2368 	rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2369 	    sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2370 	    NULL, UU_LIST_POOL_DEBUG);
2371 
2372 	rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2373 	    sizeof (rc_notify_info_t),
2374 	    offsetof(rc_notify_info_t, rni_list_node),
2375 	    NULL, UU_LIST_POOL_DEBUG);
2376 
2377 	if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2378 	    rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2379 		uu_die("out of memory");
2380 
2381 	rc_notify_list = uu_list_create(rc_notify_pool,
2382 	    &rc_notify_list, 0);
2383 
2384 	rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2385 	    &rc_notify_info_list, 0);
2386 
2387 	if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2388 		uu_die("out of memory");
2389 
2390 	/*
2391 	 * Sort the special_props_list array so that it can be searched
2392 	 * with bsearch(3C).
2393 	 *
2394 	 * The special_props_list array is not compiled into the native
2395 	 * build code, so there is no need to call qsort if NATIVE_BUILD is
2396 	 * defined.
2397 	 */
2398 #ifndef	NATIVE_BUILD
2399 	qsort(special_props_list, SPECIAL_PROP_COUNT,
2400 	    sizeof (special_props_list[0]), special_prop_compare);
2401 #endif	/* NATIVE_BUILD */
2402 
2403 	if ((np = rc_node_alloc()) == NULL)
2404 		uu_die("out of memory");
2405 
2406 	rc_node_hold(np);
2407 	np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2408 	np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2409 	np->rn_hash = rc_node_hash(&np->rn_id);
2410 	np->rn_name = "localhost";
2411 
2412 	bp = cache_hold(np->rn_hash);
2413 	cache_insert_unlocked(bp, np);
2414 	cache_release(bp);
2415 
2416 	rc_scope = np;
2417 	return (1);
2418 }
2419 
2420 /*
2421  * Fails with
2422  *   _INVALID_TYPE - type is invalid
2423  *   _TYPE_MISMATCH - np doesn't carry children of type type
2424  *   _DELETED - np has been deleted
2425  *   _NO_RESOURCES
2426  */
2427 static int
2428 rc_node_fill_children(rc_node_t *np, uint32_t type)
2429 {
2430 	int rc;
2431 
2432 	assert(MUTEX_HELD(&np->rn_lock));
2433 
2434 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2435 	    REP_PROTOCOL_SUCCESS)
2436 		return (rc);
2437 
2438 	if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2439 		return (REP_PROTOCOL_FAIL_DELETED);
2440 
2441 	if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2442 		rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2443 		return (REP_PROTOCOL_SUCCESS);
2444 	}
2445 
2446 	(void) pthread_mutex_unlock(&np->rn_lock);
2447 	rc = object_fill_children(np);
2448 	(void) pthread_mutex_lock(&np->rn_lock);
2449 
2450 	if (rc == REP_PROTOCOL_SUCCESS) {
2451 		np->rn_flags |= RC_NODE_HAS_CHILDREN;
2452 	}
2453 	rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2454 
2455 	return (rc);
2456 }
2457 
2458 /*
2459  * Returns
2460  *   _INVALID_TYPE - type is invalid
2461  *   _TYPE_MISMATCH - np doesn't carry children of type type
2462  *   _DELETED - np has been deleted
2463  *   _NO_RESOURCES
2464  *   _SUCCESS - if *cpp is not NULL, it is held
2465  */
2466 static int
2467 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2468     rc_node_t **cpp)
2469 {
2470 	int ret;
2471 	rc_node_t *cp;
2472 
2473 	assert(MUTEX_HELD(&np->rn_lock));
2474 	assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2475 
2476 	ret = rc_node_fill_children(np, type);
2477 	if (ret != REP_PROTOCOL_SUCCESS)
2478 		return (ret);
2479 
2480 	for (cp = uu_list_first(np->rn_children);
2481 	    cp != NULL;
2482 	    cp = uu_list_next(np->rn_children, cp)) {
2483 		if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2484 			break;
2485 	}
2486 
2487 	if (cp != NULL)
2488 		rc_node_hold(cp);
2489 	*cpp = cp;
2490 
2491 	return (REP_PROTOCOL_SUCCESS);
2492 }
2493 
2494 static int rc_node_parent(rc_node_t *, rc_node_t **);
2495 
2496 /*
2497  * Returns
2498  *   _INVALID_TYPE - type is invalid
2499  *   _DELETED - np or an ancestor has been deleted
2500  *   _NOT_FOUND - no ancestor of specified type exists
2501  *   _SUCCESS - *app is held
2502  */
2503 static int
2504 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2505 {
2506 	int ret;
2507 	rc_node_t *parent, *np_orig;
2508 
2509 	if (type >= REP_PROTOCOL_ENTITY_MAX)
2510 		return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2511 
2512 	np_orig = np;
2513 
2514 	while (np->rn_id.rl_type > type) {
2515 		ret = rc_node_parent(np, &parent);
2516 		if (np != np_orig)
2517 			rc_node_rele(np);
2518 		if (ret != REP_PROTOCOL_SUCCESS)
2519 			return (ret);
2520 		np = parent;
2521 	}
2522 
2523 	if (np->rn_id.rl_type == type) {
2524 		*app = parent;
2525 		return (REP_PROTOCOL_SUCCESS);
2526 	}
2527 
2528 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
2529 }
2530 
2531 #ifndef NATIVE_BUILD
2532 /*
2533  * If the propname property exists in pg, and it is of type string, add its
2534  * values as authorizations to pcp.  pg must not be locked on entry, and it is
2535  * returned unlocked.  Returns
2536  *   _DELETED - pg was deleted
2537  *   _NO_RESOURCES
2538  *   _NOT_FOUND - pg has no property named propname
2539  *   _SUCCESS
2540  */
2541 static int
2542 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2543 {
2544 	rc_node_t *prop;
2545 	int result;
2546 
2547 	uint_t count;
2548 	const char *cp;
2549 
2550 	assert(!MUTEX_HELD(&pg->rn_lock));
2551 	assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2552 
2553 	(void) pthread_mutex_lock(&pg->rn_lock);
2554 	result = rc_node_find_named_child(pg, propname,
2555 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2556 	(void) pthread_mutex_unlock(&pg->rn_lock);
2557 	if (result != REP_PROTOCOL_SUCCESS) {
2558 		switch (result) {
2559 		case REP_PROTOCOL_FAIL_DELETED:
2560 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
2561 			return (result);
2562 
2563 		case REP_PROTOCOL_FAIL_INVALID_TYPE:
2564 		case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2565 		default:
2566 			bad_error("rc_node_find_named_child", result);
2567 		}
2568 	}
2569 
2570 	if (prop == NULL)
2571 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
2572 
2573 	/* rn_valtype is immutable, so no locking. */
2574 	if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2575 		rc_node_rele(prop);
2576 		return (REP_PROTOCOL_SUCCESS);
2577 	}
2578 
2579 	(void) pthread_mutex_lock(&prop->rn_lock);
2580 	for (count = prop->rn_values_count, cp = prop->rn_values;
2581 	    count > 0;
2582 	    --count) {
2583 		result = perm_add_enabling_type(pcp, cp,
2584 		    (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2585 		    PC_AUTH_SVC);
2586 		if (result != REP_PROTOCOL_SUCCESS)
2587 			break;
2588 
2589 		cp = strchr(cp, '\0') + 1;
2590 	}
2591 
2592 	rc_node_rele_locked(prop);
2593 
2594 	return (result);
2595 }
2596 
2597 /*
2598  * Assuming that ent is a service or instance node, if the pgname property
2599  * group has type pgtype, and it has a propname property with string type, add
2600  * its values as authorizations to pcp.  If pgtype is NULL, it is not checked.
2601  * Returns
2602  *   _SUCCESS
2603  *   _DELETED - ent was deleted
2604  *   _NO_RESOURCES - no resources
2605  *   _NOT_FOUND - ent does not have pgname pg or propname property
2606  */
2607 static int
2608 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2609     const char *pgtype, const char *propname)
2610 {
2611 	int r;
2612 	rc_node_t *pg;
2613 
2614 	assert(!MUTEX_HELD(&ent->rn_lock));
2615 
2616 	(void) pthread_mutex_lock(&ent->rn_lock);
2617 	r = rc_node_find_named_child(ent, pgname,
2618 	    REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2619 	(void) pthread_mutex_unlock(&ent->rn_lock);
2620 
2621 	switch (r) {
2622 	case REP_PROTOCOL_SUCCESS:
2623 		break;
2624 
2625 	case REP_PROTOCOL_FAIL_DELETED:
2626 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
2627 		return (r);
2628 
2629 	default:
2630 		bad_error("rc_node_find_named_child", r);
2631 	}
2632 
2633 	if (pg == NULL)
2634 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
2635 
2636 	if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2637 		r = perm_add_pg_prop_values(pcp, pg, propname);
2638 		switch (r) {
2639 		case REP_PROTOCOL_FAIL_DELETED:
2640 			r = REP_PROTOCOL_FAIL_NOT_FOUND;
2641 			break;
2642 
2643 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
2644 		case REP_PROTOCOL_SUCCESS:
2645 		case REP_PROTOCOL_FAIL_NOT_FOUND:
2646 			break;
2647 
2648 		default:
2649 			bad_error("perm_add_pg_prop_values", r);
2650 		}
2651 	}
2652 
2653 	rc_node_rele(pg);
2654 
2655 	return (r);
2656 }
2657 
2658 /*
2659  * If pg has a property named propname, and is string typed, add its values as
2660  * authorizations to pcp.  If pg has no such property, and its parent is an
2661  * instance, walk up to the service and try doing the same with the property
2662  * of the same name from the property group of the same name.  Returns
2663  *   _SUCCESS
2664  *   _NO_RESOURCES
2665  *   _DELETED - pg (or an ancestor) was deleted
2666  */
2667 static int
2668 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2669 {
2670 	int r;
2671 	char pgname[REP_PROTOCOL_NAME_LEN + 1];
2672 	rc_node_t *svc;
2673 	size_t sz;
2674 
2675 	r = perm_add_pg_prop_values(pcp, pg, propname);
2676 
2677 	if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2678 		return (r);
2679 
2680 	assert(!MUTEX_HELD(&pg->rn_lock));
2681 
2682 	if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2683 		return (REP_PROTOCOL_SUCCESS);
2684 
2685 	sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2686 	assert(sz < sizeof (pgname));
2687 
2688 	/*
2689 	 * If pg is a child of an instance or snapshot, we want to compose the
2690 	 * authorization property with the service's (if it exists).  The
2691 	 * snapshot case applies only to read_authorization.  In all other
2692 	 * cases, the pg's parent will be the instance.
2693 	 */
2694 	r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2695 	if (r != REP_PROTOCOL_SUCCESS) {
2696 		assert(r == REP_PROTOCOL_FAIL_DELETED);
2697 		return (r);
2698 	}
2699 	assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2700 
2701 	r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2702 
2703 	rc_node_rele(svc);
2704 
2705 	if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2706 		r = REP_PROTOCOL_SUCCESS;
2707 
2708 	return (r);
2709 }
2710 
2711 /*
2712  * Call perm_add_enabling_values() for the "action_authorization" property of
2713  * the "general" property group of inst.  Returns
2714  *   _DELETED - inst (or an ancestor) was deleted
2715  *   _NO_RESOURCES
2716  *   _SUCCESS
2717  */
2718 static int
2719 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2720 {
2721 	int r;
2722 	rc_node_t *svc;
2723 
2724 	assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2725 
2726 	r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2727 	    AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2728 
2729 	if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2730 		return (r);
2731 
2732 	r = rc_node_parent(inst, &svc);
2733 	if (r != REP_PROTOCOL_SUCCESS) {
2734 		assert(r == REP_PROTOCOL_FAIL_DELETED);
2735 		return (r);
2736 	}
2737 
2738 	r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2739 	    AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2740 
2741 	return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2742 }
2743 #endif /* NATIVE_BUILD */
2744 
2745 void
2746 rc_node_ptr_init(rc_node_ptr_t *out)
2747 {
2748 	out->rnp_node = NULL;
2749 	out->rnp_auth_string = NULL;
2750 	out->rnp_authorized = RC_AUTH_UNKNOWN;
2751 	out->rnp_deleted = 0;
2752 }
2753 
2754 void
2755 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2756 {
2757 	if (npp->rnp_auth_string != NULL) {
2758 		free((void *)npp->rnp_auth_string);
2759 		npp->rnp_auth_string = NULL;
2760 	}
2761 }
2762 
2763 static void
2764 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2765 {
2766 	rc_node_t *cur = out->rnp_node;
2767 	if (val != NULL)
2768 		rc_node_hold(val);
2769 	out->rnp_node = val;
2770 	if (cur != NULL) {
2771 		NODE_LOCK(cur);
2772 
2773 		/*
2774 		 * Register the ephemeral reference created by reading
2775 		 * out->rnp_node into cur.  Note that the persistent
2776 		 * reference we're destroying is locked by the client
2777 		 * layer.
2778 		 */
2779 		rc_node_hold_ephemeral_locked(cur);
2780 
2781 		rc_node_rele_locked(cur);
2782 	}
2783 	out->rnp_authorized = RC_AUTH_UNKNOWN;
2784 	rc_node_ptr_free_mem(out);
2785 	out->rnp_deleted = 0;
2786 }
2787 
2788 void
2789 rc_node_clear(rc_node_ptr_t *out, int deleted)
2790 {
2791 	rc_node_assign(out, NULL);
2792 	out->rnp_deleted = deleted;
2793 }
2794 
2795 void
2796 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2797 {
2798 	rc_node_assign(out, val->rnp_node);
2799 }
2800 
2801 /*
2802  * rc_node_check()/RC_NODE_CHECK()
2803  *	generic "entry" checks, run before the use of an rc_node pointer.
2804  *
2805  * Fails with
2806  *   _NOT_SET
2807  *   _DELETED
2808  */
2809 static int
2810 rc_node_check_and_lock(rc_node_t *np)
2811 {
2812 	int result = REP_PROTOCOL_SUCCESS;
2813 	if (np == NULL)
2814 		return (REP_PROTOCOL_FAIL_NOT_SET);
2815 
2816 	(void) pthread_mutex_lock(&np->rn_lock);
2817 	if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2818 		result = REP_PROTOCOL_FAIL_DELETED;
2819 		(void) pthread_mutex_unlock(&np->rn_lock);
2820 	}
2821 
2822 	return (result);
2823 }
2824 
2825 /*
2826  * Fails with
2827  *   _NOT_SET - ptr is reset
2828  *   _DELETED - node has been deleted
2829  */
2830 static rc_node_t *
2831 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2832 {
2833 	rc_node_t *np = npp->rnp_node;
2834 	if (np == NULL) {
2835 		if (npp->rnp_deleted)
2836 			*res = REP_PROTOCOL_FAIL_DELETED;
2837 		else
2838 			*res = REP_PROTOCOL_FAIL_NOT_SET;
2839 		return (NULL);
2840 	}
2841 
2842 	(void) pthread_mutex_lock(&np->rn_lock);
2843 	if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2844 		(void) pthread_mutex_unlock(&np->rn_lock);
2845 		rc_node_clear(npp, 1);
2846 		*res = REP_PROTOCOL_FAIL_DELETED;
2847 		return (NULL);
2848 	}
2849 	return (np);
2850 }
2851 
2852 #define	RC_NODE_CHECK_AND_LOCK(n) {					\
2853 	int rc__res;							\
2854 	if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2855 		return (rc__res);					\
2856 }
2857 
2858 #define	RC_NODE_CHECK(n) {						\
2859 	RC_NODE_CHECK_AND_LOCK(n);					\
2860 	(void) pthread_mutex_unlock(&(n)->rn_lock);			\
2861 }
2862 
2863 #define	RC_NODE_CHECK_AND_HOLD(n) {					\
2864 	RC_NODE_CHECK_AND_LOCK(n);					\
2865 	rc_node_hold_locked(n);						\
2866 	(void) pthread_mutex_unlock(&(n)->rn_lock);			\
2867 }
2868 
2869 #define	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) {			\
2870 	int rc__res;							\
2871 	if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL)	\
2872 		return (rc__res);					\
2873 }
2874 
2875 #define	RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, mem) {		\
2876 	int rc__res;							\
2877 	if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == 	\
2878 	    NULL) {							\
2879 		if ((mem) != NULL)					\
2880 			free((mem));					\
2881 		return (rc__res);					\
2882 	}								\
2883 }
2884 
2885 #define	RC_NODE_PTR_GET_CHECK(np, npp) {				\
2886 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);			\
2887 	(void) pthread_mutex_unlock(&(np)->rn_lock);			\
2888 }
2889 
2890 #define	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) {			\
2891 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);			\
2892 	rc_node_hold_locked(np);					\
2893 	(void) pthread_mutex_unlock(&(np)->rn_lock);			\
2894 }
2895 
2896 #define	HOLD_FLAG_OR_RETURN(np, flag) {					\
2897 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2898 	assert(!((np)->rn_flags & RC_NODE_DEAD));			\
2899 	if (!rc_node_hold_flag((np), flag)) {				\
2900 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2901 		return (REP_PROTOCOL_FAIL_DELETED);			\
2902 	}								\
2903 }
2904 
2905 #define	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) {		\
2906 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2907 	if (!rc_node_hold_flag((np), flag)) {				\
2908 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2909 		assert((np) == (npp)->rnp_node);			\
2910 		rc_node_clear(npp, 1);					\
2911 		if ((mem) != NULL)					\
2912 			free((mem));					\
2913 		return (REP_PROTOCOL_FAIL_DELETED);			\
2914 	}								\
2915 }
2916 
2917 int
2918 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2919 {
2920 	if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2921 		rc_node_clear(out, 0);
2922 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2923 	}
2924 
2925 	/*
2926 	 * the main scope never gets destroyed
2927 	 */
2928 	rc_node_assign(out, rc_scope);
2929 
2930 	return (REP_PROTOCOL_SUCCESS);
2931 }
2932 
2933 /*
2934  * Fails with
2935  *   _NOT_SET - npp is not set
2936  *   _DELETED - the node npp pointed at has been deleted
2937  *   _TYPE_MISMATCH - type is not _SCOPE
2938  *   _NOT_FOUND - scope has no parent
2939  */
2940 static int
2941 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2942 {
2943 	rc_node_t *np;
2944 
2945 	rc_node_clear(out, 0);
2946 
2947 	RC_NODE_PTR_GET_CHECK(np, npp);
2948 
2949 	if (type != REP_PROTOCOL_ENTITY_SCOPE)
2950 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2951 
2952 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
2953 }
2954 
2955 static int rc_node_pg_check_read_protect(rc_node_t *);
2956 
2957 /*
2958  * Fails with
2959  *   _NOT_SET
2960  *   _DELETED
2961  *   _NOT_APPLICABLE
2962  *   _NOT_FOUND
2963  *   _BAD_REQUEST
2964  *   _TRUNCATED
2965  *   _NO_RESOURCES
2966  */
2967 int
2968 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
2969     size_t *sz_out)
2970 {
2971 	size_t actual;
2972 	rc_node_t *np;
2973 
2974 	assert(sz == *sz_out);
2975 
2976 	RC_NODE_PTR_GET_CHECK(np, npp);
2977 
2978 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
2979 		np = np->rn_cchain[0];
2980 		RC_NODE_CHECK(np);
2981 	}
2982 
2983 	switch (answertype) {
2984 	case RP_ENTITY_NAME_NAME:
2985 		if (np->rn_name == NULL)
2986 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2987 		actual = strlcpy(buf, np->rn_name, sz);
2988 		break;
2989 	case RP_ENTITY_NAME_PGTYPE:
2990 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2991 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2992 		actual = strlcpy(buf, np->rn_type, sz);
2993 		break;
2994 	case RP_ENTITY_NAME_PGFLAGS:
2995 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
2996 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
2997 		actual = snprintf(buf, sz, "%d", np->rn_pgflags);
2998 		break;
2999 	case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
3000 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3001 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3002 		actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3003 		break;
3004 	case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3005 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3006 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3007 		actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3008 		break;
3009 	case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3010 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3011 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3012 		if (np->rn_snaplevel->rsl_instance == NULL)
3013 			return (REP_PROTOCOL_FAIL_NOT_FOUND);
3014 		actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3015 		break;
3016 	case RP_ENTITY_NAME_PGREADPROT:
3017 	{
3018 		int ret;
3019 
3020 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3021 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3022 		ret = rc_node_pg_check_read_protect(np);
3023 		assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3024 		switch (ret) {
3025 		case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3026 			actual = snprintf(buf, sz, "1");
3027 			break;
3028 		case REP_PROTOCOL_SUCCESS:
3029 			actual = snprintf(buf, sz, "0");
3030 			break;
3031 		default:
3032 			return (ret);
3033 		}
3034 		break;
3035 	}
3036 	default:
3037 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3038 	}
3039 	if (actual >= sz)
3040 		return (REP_PROTOCOL_FAIL_TRUNCATED);
3041 
3042 	*sz_out = actual;
3043 	return (REP_PROTOCOL_SUCCESS);
3044 }
3045 
3046 int
3047 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3048 {
3049 	rc_node_t *np;
3050 
3051 	RC_NODE_PTR_GET_CHECK(np, npp);
3052 
3053 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3054 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3055 
3056 	*out = np->rn_valtype;
3057 
3058 	return (REP_PROTOCOL_SUCCESS);
3059 }
3060 
3061 /*
3062  * Get np's parent.  If np is deleted, returns _DELETED.  Otherwise puts a hold
3063  * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3064  */
3065 static int
3066 rc_node_parent(rc_node_t *np, rc_node_t **out)
3067 {
3068 	rc_node_t *pnp;
3069 	rc_node_t *np_orig;
3070 
3071 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3072 		RC_NODE_CHECK_AND_LOCK(np);
3073 	} else {
3074 		np = np->rn_cchain[0];
3075 		RC_NODE_CHECK_AND_LOCK(np);
3076 	}
3077 
3078 	np_orig = np;
3079 	rc_node_hold_locked(np);		/* simplifies the remainder */
3080 
3081 	for (;;) {
3082 		if (!rc_node_wait_flag(np,
3083 		    RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3084 			rc_node_rele_locked(np);
3085 			return (REP_PROTOCOL_FAIL_DELETED);
3086 		}
3087 
3088 		if (!(np->rn_flags & RC_NODE_OLD))
3089 			break;
3090 
3091 		rc_node_rele_locked(np);
3092 		np = cache_lookup(&np_orig->rn_id);
3093 		assert(np != np_orig);
3094 
3095 		if (np == NULL)
3096 			goto deleted;
3097 		(void) pthread_mutex_lock(&np->rn_lock);
3098 	}
3099 
3100 	/* guaranteed to succeed without dropping the lock */
3101 	if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3102 		(void) pthread_mutex_unlock(&np->rn_lock);
3103 		*out = NULL;
3104 		rc_node_rele(np);
3105 		return (REP_PROTOCOL_FAIL_DELETED);
3106 	}
3107 
3108 	assert(np->rn_parent != NULL);
3109 	pnp = np->rn_parent;
3110 	(void) pthread_mutex_unlock(&np->rn_lock);
3111 
3112 	(void) pthread_mutex_lock(&pnp->rn_lock);
3113 	(void) pthread_mutex_lock(&np->rn_lock);
3114 	rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3115 	(void) pthread_mutex_unlock(&np->rn_lock);
3116 
3117 	rc_node_hold_locked(pnp);
3118 
3119 	(void) pthread_mutex_unlock(&pnp->rn_lock);
3120 
3121 	rc_node_rele(np);
3122 	*out = pnp;
3123 	return (REP_PROTOCOL_SUCCESS);
3124 
3125 deleted:
3126 	rc_node_rele(np);
3127 	return (REP_PROTOCOL_FAIL_DELETED);
3128 }
3129 
3130 /*
3131  * Fails with
3132  *   _NOT_SET
3133  *   _DELETED
3134  */
3135 static int
3136 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3137 {
3138 	rc_node_t *np;
3139 
3140 	RC_NODE_PTR_GET_CHECK(np, npp);
3141 
3142 	return (rc_node_parent(np, out));
3143 }
3144 
3145 /*
3146  * Fails with
3147  *   _NOT_SET - npp is not set
3148  *   _DELETED - the node npp pointed at has been deleted
3149  *   _TYPE_MISMATCH - npp's node's parent is not of type type
3150  *
3151  * If npp points to a scope, can also fail with
3152  *   _NOT_FOUND - scope has no parent
3153  */
3154 int
3155 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3156 {
3157 	rc_node_t *pnp;
3158 	int rc;
3159 
3160 	if (npp->rnp_node != NULL &&
3161 	    npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3162 		return (rc_scope_parent_scope(npp, type, out));
3163 
3164 	if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3165 		rc_node_clear(out, 0);
3166 		return (rc);
3167 	}
3168 
3169 	if (type != pnp->rn_id.rl_type) {
3170 		rc_node_rele(pnp);
3171 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3172 	}
3173 
3174 	rc_node_assign(out, pnp);
3175 	rc_node_rele(pnp);
3176 
3177 	return (REP_PROTOCOL_SUCCESS);
3178 }
3179 
3180 int
3181 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3182 {
3183 	rc_node_t *pnp;
3184 	int rc;
3185 
3186 	if (npp->rnp_node != NULL &&
3187 	    npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3188 		*type_out = REP_PROTOCOL_ENTITY_SCOPE;
3189 		return (REP_PROTOCOL_SUCCESS);
3190 	}
3191 
3192 	if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3193 		return (rc);
3194 
3195 	*type_out = pnp->rn_id.rl_type;
3196 
3197 	rc_node_rele(pnp);
3198 
3199 	return (REP_PROTOCOL_SUCCESS);
3200 }
3201 
3202 /*
3203  * Fails with
3204  *   _INVALID_TYPE - type is invalid
3205  *   _TYPE_MISMATCH - np doesn't carry children of type type
3206  *   _DELETED - np has been deleted
3207  *   _NOT_FOUND - no child with that name/type combo found
3208  *   _NO_RESOURCES
3209  *   _BACKEND_ACCESS
3210  */
3211 int
3212 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3213     rc_node_ptr_t *outp)
3214 {
3215 	rc_node_t *np, *cp;
3216 	rc_node_t *child = NULL;
3217 	int ret, idx;
3218 
3219 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3220 	if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3221 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3222 			ret = rc_node_find_named_child(np, name, type, &child);
3223 		} else {
3224 			(void) pthread_mutex_unlock(&np->rn_lock);
3225 			ret = REP_PROTOCOL_SUCCESS;
3226 			for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3227 				cp = np->rn_cchain[idx];
3228 				if (cp == NULL)
3229 					break;
3230 				RC_NODE_CHECK_AND_LOCK(cp);
3231 				ret = rc_node_find_named_child(cp, name, type,
3232 				    &child);
3233 				(void) pthread_mutex_unlock(&cp->rn_lock);
3234 				/*
3235 				 * loop only if we succeeded, but no child of
3236 				 * the correct name was found.
3237 				 */
3238 				if (ret != REP_PROTOCOL_SUCCESS ||
3239 				    child != NULL)
3240 					break;
3241 			}
3242 			(void) pthread_mutex_lock(&np->rn_lock);
3243 		}
3244 	}
3245 	(void) pthread_mutex_unlock(&np->rn_lock);
3246 
3247 	if (ret == REP_PROTOCOL_SUCCESS) {
3248 		rc_node_assign(outp, child);
3249 		if (child != NULL)
3250 			rc_node_rele(child);
3251 		else
3252 			ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3253 	} else {
3254 		rc_node_assign(outp, NULL);
3255 	}
3256 	return (ret);
3257 }
3258 
3259 int
3260 rc_node_update(rc_node_ptr_t *npp)
3261 {
3262 	cache_bucket_t *bp;
3263 	rc_node_t *np = npp->rnp_node;
3264 	rc_node_t *nnp;
3265 	rc_node_t *cpg = NULL;
3266 
3267 	if (np != NULL &&
3268 	    np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3269 		/*
3270 		 * If we're updating a composed property group, actually
3271 		 * update the top-level property group & return the
3272 		 * appropriate value.  But leave *nnp pointing at us.
3273 		 */
3274 		cpg = np;
3275 		np = np->rn_cchain[0];
3276 	}
3277 
3278 	RC_NODE_CHECK(np);
3279 
3280 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3281 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3282 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3283 
3284 	for (;;) {
3285 		bp = cache_hold(np->rn_hash);
3286 		nnp = cache_lookup_unlocked(bp, &np->rn_id);
3287 		if (nnp == NULL) {
3288 			cache_release(bp);
3289 			rc_node_clear(npp, 1);
3290 			return (REP_PROTOCOL_FAIL_DELETED);
3291 		}
3292 		/*
3293 		 * grab the lock before dropping the cache bucket, so
3294 		 * that no one else can sneak in
3295 		 */
3296 		(void) pthread_mutex_lock(&nnp->rn_lock);
3297 		cache_release(bp);
3298 
3299 		if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3300 		    !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3301 			break;
3302 
3303 		rc_node_rele_locked(nnp);
3304 	}
3305 
3306 	/*
3307 	 * If it is dead, we want to update it so that it will continue to
3308 	 * report being dead.
3309 	 */
3310 	if (nnp->rn_flags & RC_NODE_DEAD) {
3311 		(void) pthread_mutex_unlock(&nnp->rn_lock);
3312 		if (nnp != np && cpg == NULL)
3313 			rc_node_assign(npp, nnp);	/* updated */
3314 		rc_node_rele(nnp);
3315 		return (REP_PROTOCOL_FAIL_DELETED);
3316 	}
3317 
3318 	assert(!(nnp->rn_flags & RC_NODE_OLD));
3319 	(void) pthread_mutex_unlock(&nnp->rn_lock);
3320 
3321 	if (nnp != np && cpg == NULL)
3322 		rc_node_assign(npp, nnp);		/* updated */
3323 
3324 	rc_node_rele(nnp);
3325 
3326 	return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3327 }
3328 
3329 /*
3330  * does a generic modification check, for creation, deletion, and snapshot
3331  * management only.  Property group transactions have different checks.
3332  *
3333  * The string returned to *match_auth must be freed.
3334  */
3335 static perm_status_t
3336 rc_node_modify_permission_check(char **match_auth)
3337 {
3338 	permcheck_t *pcp;
3339 	perm_status_t granted = PERM_GRANTED;
3340 	int rc;
3341 
3342 	*match_auth = NULL;
3343 #ifdef NATIVE_BUILD
3344 	if (!client_is_privileged()) {
3345 		granted = PERM_DENIED;
3346 	}
3347 	return (granted);
3348 #else
3349 	if (is_main_repository == 0)
3350 		return (PERM_GRANTED);
3351 	pcp = pc_create();
3352 	if (pcp != NULL) {
3353 		rc = perm_add_enabling(pcp, AUTH_MODIFY);
3354 
3355 		if (rc == REP_PROTOCOL_SUCCESS) {
3356 			granted = perm_granted(pcp);
3357 
3358 			if ((granted == PERM_GRANTED) ||
3359 			    (granted == PERM_DENIED)) {
3360 				/*
3361 				 * Copy off the authorization
3362 				 * string before freeing pcp.
3363 				 */
3364 				*match_auth =
3365 				    strdup(pcp->pc_auth_string);
3366 				if (*match_auth == NULL)
3367 					granted = PERM_FAIL;
3368 			}
3369 		} else {
3370 			granted = PERM_FAIL;
3371 		}
3372 
3373 		pc_free(pcp);
3374 	} else {
3375 		granted = PERM_FAIL;
3376 	}
3377 
3378 	return (granted);
3379 #endif /* NATIVE_BUILD */
3380 }
3381 
3382 /*
3383  * Native builds are done to create svc.configd-native.  This program runs
3384  * only on the Solaris build machines to create the seed repository, and it
3385  * is compiled against the build machine's header files.  The ADT_smf_*
3386  * symbols may not be defined in these header files.  For this reason
3387  * smf_annotation_event(), _smf_audit_event() and special_property_event()
3388  * are not compiled for native builds.
3389  */
3390 #ifndef	NATIVE_BUILD
3391 
3392 /*
3393  * This function generates an annotation audit event if one has been setup.
3394  * Annotation events should only be generated immediately before the audit
3395  * record from the first attempt to modify the repository from a client
3396  * which has requested an annotation.
3397  */
3398 static void
3399 smf_annotation_event(int status, int return_val)
3400 {
3401 	adt_session_data_t *session;
3402 	adt_event_data_t *event = NULL;
3403 	char file[MAXPATHLEN];
3404 	char operation[REP_PROTOCOL_NAME_LEN];
3405 
3406 	/* Don't audit if we're using an alternate repository. */
3407 	if (is_main_repository == 0)
3408 		return;
3409 
3410 	if (client_annotation_needed(operation, sizeof (operation), file,
3411 	    sizeof (file)) == 0) {
3412 		return;
3413 	}
3414 	if (file[0] == 0) {
3415 		(void) strlcpy(file, "NO FILE", sizeof (file));
3416 	}
3417 	if (operation[0] == 0) {
3418 		(void) strlcpy(operation, "NO OPERATION",
3419 		    sizeof (operation));
3420 	}
3421 	if ((session = get_audit_session()) == NULL)
3422 		return;
3423 	if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3424 		uu_warn("smf_annotation_event cannot allocate event "
3425 		    "data.  %s\n", strerror(errno));
3426 		return;
3427 	}
3428 	event->adt_smf_annotation.operation = operation;
3429 	event->adt_smf_annotation.file = file;
3430 	if (adt_put_event(event, status, return_val) == 0) {
3431 		client_annotation_finished();
3432 	} else {
3433 		uu_warn("smf_annotation_event failed to put event.  "
3434 		    "%s\n", strerror(errno));
3435 	}
3436 	adt_free_event(event);
3437 }
3438 
3439 /*
3440  * _smf_audit_event interacts with the security auditing system to generate
3441  * an audit event structure.  It establishes an audit session and allocates
3442  * an audit event.  The event is filled in from the audit data, and
3443  * adt_put_event is called to generate the event.
3444  */
3445 static void
3446 _smf_audit_event(au_event_t event_id, int status, int return_val,
3447     audit_event_data_t *data)
3448 {
3449 	char *auth_used;
3450 	char *fmri;
3451 	char *prop_value;
3452 	adt_session_data_t *session;
3453 	adt_event_data_t *event = NULL;
3454 
3455 	/* Don't audit if we're using an alternate repository */
3456 	if (is_main_repository == 0)
3457 		return;
3458 
3459 	smf_annotation_event(status, return_val);
3460 	if ((session = get_audit_session()) == NULL)
3461 		return;
3462 	if ((event = adt_alloc_event(session, event_id)) == NULL) {
3463 		uu_warn("_smf_audit_event cannot allocate event "
3464 		    "data.  %s\n", strerror(errno));
3465 		return;
3466 	}
3467 
3468 	/*
3469 	 * Handle possibility of NULL authorization strings, FMRIs and
3470 	 * property values.
3471 	 */
3472 	if (data->ed_auth == NULL) {
3473 		auth_used = "PRIVILEGED";
3474 	} else {
3475 		auth_used = data->ed_auth;
3476 	}
3477 	if (data->ed_fmri == NULL) {
3478 		syslog(LOG_WARNING, "_smf_audit_event called with "
3479 		    "empty FMRI string");
3480 		fmri = "UNKNOWN FMRI";
3481 	} else {
3482 		fmri = data->ed_fmri;
3483 	}
3484 	if (data->ed_prop_value == NULL) {
3485 		prop_value = "";
3486 	} else {
3487 		prop_value = data->ed_prop_value;
3488 	}
3489 
3490 	/* Fill in the event data. */
3491 	switch (event_id) {
3492 	case ADT_smf_attach_snap:
3493 		event->adt_smf_attach_snap.auth_used = auth_used;
3494 		event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3495 		event->adt_smf_attach_snap.old_name = data->ed_old_name;
3496 		event->adt_smf_attach_snap.new_fmri = fmri;
3497 		event->adt_smf_attach_snap.new_name = data->ed_snapname;
3498 		break;
3499 	case ADT_smf_change_prop:
3500 		event->adt_smf_change_prop.auth_used = auth_used;
3501 		event->adt_smf_change_prop.fmri = fmri;
3502 		event->adt_smf_change_prop.type = data->ed_type;
3503 		event->adt_smf_change_prop.value = prop_value;
3504 		break;
3505 	case ADT_smf_clear:
3506 		event->adt_smf_clear.auth_used = auth_used;
3507 		event->adt_smf_clear.fmri = fmri;
3508 		break;
3509 	case ADT_smf_create:
3510 		event->adt_smf_create.fmri = fmri;
3511 		event->adt_smf_create.auth_used = auth_used;
3512 		break;
3513 	case ADT_smf_create_npg:
3514 		event->adt_smf_create_npg.auth_used = auth_used;
3515 		event->adt_smf_create_npg.fmri = fmri;
3516 		event->adt_smf_create_npg.type = data->ed_type;
3517 		break;
3518 	case ADT_smf_create_pg:
3519 		event->adt_smf_create_pg.auth_used = auth_used;
3520 		event->adt_smf_create_pg.fmri = fmri;
3521 		event->adt_smf_create_pg.type = data->ed_type;
3522 		break;
3523 	case ADT_smf_create_prop:
3524 		event->adt_smf_create_prop.auth_used = auth_used;
3525 		event->adt_smf_create_prop.fmri = fmri;
3526 		event->adt_smf_create_prop.type = data->ed_type;
3527 		event->adt_smf_create_prop.value = prop_value;
3528 		break;
3529 	case ADT_smf_create_snap:
3530 		event->adt_smf_create_snap.auth_used = auth_used;
3531 		event->adt_smf_create_snap.fmri = fmri;
3532 		event->adt_smf_create_snap.name = data->ed_snapname;
3533 		break;
3534 	case ADT_smf_degrade:
3535 		event->adt_smf_degrade.auth_used = auth_used;
3536 		event->adt_smf_degrade.fmri = fmri;
3537 		break;
3538 	case ADT_smf_delete:
3539 		event->adt_smf_delete.fmri = fmri;
3540 		event->adt_smf_delete.auth_used = auth_used;
3541 		break;
3542 	case ADT_smf_delete_npg:
3543 		event->adt_smf_delete_npg.auth_used = auth_used;
3544 		event->adt_smf_delete_npg.fmri = fmri;
3545 		event->adt_smf_delete_npg.type = data->ed_type;
3546 		break;
3547 	case ADT_smf_delete_pg:
3548 		event->adt_smf_delete_pg.auth_used = auth_used;
3549 		event->adt_smf_delete_pg.fmri = fmri;
3550 		event->adt_smf_delete_pg.type = data->ed_type;
3551 		break;
3552 	case ADT_smf_delete_prop:
3553 		event->adt_smf_delete_prop.auth_used = auth_used;
3554 		event->adt_smf_delete_prop.fmri = fmri;
3555 		break;
3556 	case ADT_smf_delete_snap:
3557 		event->adt_smf_delete_snap.auth_used = auth_used;
3558 		event->adt_smf_delete_snap.fmri = fmri;
3559 		event->adt_smf_delete_snap.name = data->ed_snapname;
3560 		break;
3561 	case ADT_smf_disable:
3562 		event->adt_smf_disable.auth_used = auth_used;
3563 		event->adt_smf_disable.fmri = fmri;
3564 		break;
3565 	case ADT_smf_enable:
3566 		event->adt_smf_enable.auth_used = auth_used;
3567 		event->adt_smf_enable.fmri = fmri;
3568 		break;
3569 	case ADT_smf_immediate_degrade:
3570 		event->adt_smf_immediate_degrade.auth_used = auth_used;
3571 		event->adt_smf_immediate_degrade.fmri = fmri;
3572 		break;
3573 	case ADT_smf_immediate_maintenance:
3574 		event->adt_smf_immediate_maintenance.auth_used = auth_used;
3575 		event->adt_smf_immediate_maintenance.fmri = fmri;
3576 		break;
3577 	case ADT_smf_immtmp_maintenance:
3578 		event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3579 		event->adt_smf_immtmp_maintenance.fmri = fmri;
3580 		break;
3581 	case ADT_smf_maintenance:
3582 		event->adt_smf_maintenance.auth_used = auth_used;
3583 		event->adt_smf_maintenance.fmri = fmri;
3584 		break;
3585 	case ADT_smf_milestone:
3586 		event->adt_smf_milestone.auth_used = auth_used;
3587 		event->adt_smf_milestone.fmri = fmri;
3588 		break;
3589 	case ADT_smf_read_prop:
3590 		event->adt_smf_read_prop.auth_used = auth_used;
3591 		event->adt_smf_read_prop.fmri = fmri;
3592 		break;
3593 	case ADT_smf_refresh:
3594 		event->adt_smf_refresh.auth_used = auth_used;
3595 		event->adt_smf_refresh.fmri = fmri;
3596 		break;
3597 	case ADT_smf_restart:
3598 		event->adt_smf_restart.auth_used = auth_used;
3599 		event->adt_smf_restart.fmri = fmri;
3600 		break;
3601 	case ADT_smf_tmp_disable:
3602 		event->adt_smf_tmp_disable.auth_used = auth_used;
3603 		event->adt_smf_tmp_disable.fmri = fmri;
3604 		break;
3605 	case ADT_smf_tmp_enable:
3606 		event->adt_smf_tmp_enable.auth_used = auth_used;
3607 		event->adt_smf_tmp_enable.fmri = fmri;
3608 		break;
3609 	case ADT_smf_tmp_maintenance:
3610 		event->adt_smf_tmp_maintenance.auth_used = auth_used;
3611 		event->adt_smf_tmp_maintenance.fmri = fmri;
3612 		break;
3613 	default:
3614 		abort();	/* Need to cover all SMF event IDs */
3615 	}
3616 
3617 	if (adt_put_event(event, status, return_val) != 0) {
3618 		uu_warn("_smf_audit_event failed to put event.  %s\n",
3619 		    strerror(errno));
3620 	}
3621 	adt_free_event(event);
3622 }
3623 
3624 /*
3625  * Determine if the combination of the property group at pg_name and the
3626  * property at prop_name are in the set of special startd properties.  If
3627  * they are, a special audit event will be generated.
3628  */
3629 static void
3630 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3631     char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3632     size_t cmd_no)
3633 {
3634 	au_event_t event_id;
3635 	audit_special_prop_item_t search_key;
3636 	audit_special_prop_item_t *found;
3637 
3638 	/* Use bsearch to find the special property information. */
3639 	search_key.api_prop_name = prop_name;
3640 	search_key.api_pg_name = pg_name;
3641 	found = (audit_special_prop_item_t *)bsearch(&search_key,
3642 	    special_props_list, SPECIAL_PROP_COUNT,
3643 	    sizeof (special_props_list[0]), special_prop_compare);
3644 	if (found == NULL) {
3645 		/* Not a special property. */
3646 		return;
3647 	}
3648 
3649 	/* Get the event id */
3650 	if (found->api_event_func == NULL) {
3651 		event_id = found->api_event_id;
3652 	} else {
3653 		if ((*found->api_event_func)(tx_data, cmd_no,
3654 		    found->api_pg_name, &event_id) < 0)
3655 			return;
3656 	}
3657 
3658 	/* Generate the event. */
3659 	smf_audit_event(event_id, status, return_val, evdp);
3660 }
3661 #endif	/* NATIVE_BUILD */
3662 
3663 /*
3664  * Return a pointer to a string containing all the values of the command
3665  * specified by cmd_no with each value enclosed in quotes.  It is up to the
3666  * caller to free the memory at the returned pointer.
3667  */
3668 static char *
3669 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3670 {
3671 	const char *cp;
3672 	const char *cur_value;
3673 	size_t byte_count = 0;
3674 	uint32_t i;
3675 	uint32_t nvalues;
3676 	size_t str_size = 0;
3677 	char *values = NULL;
3678 	char *vp;
3679 
3680 	if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3681 		return (NULL);
3682 	/*
3683 	 * First determine the size of the buffer that we will need.  We
3684 	 * will represent each property value surrounded by quotes with a
3685 	 * space separating the values.  Thus, we need to find the total
3686 	 * size of all the value strings and add 3 for each value.
3687 	 *
3688 	 * There is one catch, though.  We need to escape any internal
3689 	 * quote marks in the values.  So for each quote in the value we
3690 	 * need to add another byte to the buffer size.
3691 	 */
3692 	for (i = 0; i < nvalues; i++) {
3693 		if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3694 		    REP_PROTOCOL_SUCCESS)
3695 			return (NULL);
3696 		for (cp = cur_value; *cp != 0; cp++) {
3697 			byte_count += (*cp == '"') ? 2 : 1;
3698 		}
3699 		byte_count += 3;	/* surrounding quotes & space */
3700 	}
3701 	byte_count++;		/* nul terminator */
3702 	values = malloc(byte_count);
3703 	if (values == NULL)
3704 		return (NULL);
3705 	*values = 0;
3706 
3707 	/* Now build up the string of values. */
3708 	for (i = 0; i < nvalues; i++) {
3709 		if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3710 		    REP_PROTOCOL_SUCCESS) {
3711 			free(values);
3712 			return (NULL);
3713 		}
3714 		(void) strlcat(values, "\"", byte_count);
3715 		for (cp = cur_value, vp = values + strlen(values);
3716 		    *cp != 0; cp++) {
3717 			if (*cp == '"') {
3718 				*vp++ = '\\';
3719 				*vp++ = '"';
3720 			} else {
3721 				*vp++ = *cp;
3722 			}
3723 		}
3724 		*vp = 0;
3725 		str_size = strlcat(values, "\" ", byte_count);
3726 		assert(str_size < byte_count);
3727 	}
3728 	if (str_size > 0)
3729 		values[str_size - 1] = 0;	/* get rid of trailing space */
3730 	return (values);
3731 }
3732 
3733 /*
3734  * generate_property_events takes the transaction commit data at tx_data
3735  * and generates an audit event for each command.
3736  *
3737  * Native builds are done to create svc.configd-native.  This program runs
3738  * only on the Solaris build machines to create the seed repository.  Thus,
3739  * no audit events should be generated when running svc.configd-native.
3740  */
3741 static void
3742 generate_property_events(
3743 	tx_commit_data_t *tx_data,
3744 	char *pg_fmri,		/* FMRI of property group */
3745 	char *auth_string,
3746 	int auth_status,
3747 	int auth_ret_value)
3748 {
3749 #ifndef	NATIVE_BUILD
3750 	enum rep_protocol_transaction_action action;
3751 	audit_event_data_t audit_data;
3752 	size_t count;
3753 	size_t cmd_no;
3754 	char *cp;
3755 	au_event_t event_id;
3756 	char fmri[REP_PROTOCOL_FMRI_LEN];
3757 	char pg_name[REP_PROTOCOL_NAME_LEN];
3758 	char *pg_end;		/* End of prop. group fmri */
3759 	const char *prop_name;
3760 	uint32_t ptype;
3761 	char prop_type[3];
3762 	enum rep_protocol_responseid rc;
3763 	size_t sz_out;
3764 
3765 	/* Make sure we have something to do. */
3766 	if (tx_data == NULL)
3767 		return;
3768 	if ((count = tx_cmd_count(tx_data)) == 0)
3769 		return;
3770 
3771 	/* Copy the property group fmri */
3772 	pg_end = fmri;
3773 	pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3774 
3775 	/*
3776 	 * Get the property group name.  It is the first component after
3777 	 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3778 	 */
3779 	cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3780 	if (cp == NULL) {
3781 		pg_name[0] = 0;
3782 	} else {
3783 		cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3784 		(void) strlcpy(pg_name, cp, sizeof (pg_name));
3785 	}
3786 
3787 	audit_data.ed_auth = auth_string;
3788 	audit_data.ed_fmri = fmri;
3789 	audit_data.ed_type = prop_type;
3790 
3791 	/*
3792 	 * Property type is two characters (see
3793 	 * rep_protocol_value_type_t), so terminate the string.
3794 	 */
3795 	prop_type[2] = 0;
3796 
3797 	for (cmd_no = 0; cmd_no < count; cmd_no++) {
3798 		/* Construct FMRI of the property */
3799 		*pg_end = 0;
3800 		if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3801 		    REP_PROTOCOL_SUCCESS) {
3802 			continue;
3803 		}
3804 		rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3805 		    prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3806 		if (rc != REP_PROTOCOL_SUCCESS) {
3807 			/*
3808 			 * If we can't get the FMRI, we'll abandon this
3809 			 * command
3810 			 */
3811 			continue;
3812 		}
3813 
3814 		/* Generate special property event if necessary. */
3815 		special_property_event(&audit_data, prop_name, pg_name,
3816 		    auth_status, auth_ret_value, tx_data, cmd_no);
3817 
3818 		/* Capture rest of audit data. */
3819 		if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3820 		    REP_PROTOCOL_SUCCESS) {
3821 			continue;
3822 		}
3823 		prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3824 		prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3825 		audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3826 
3827 		/* Determine the event type. */
3828 		if (tx_cmd_action(tx_data, cmd_no, &action) !=
3829 		    REP_PROTOCOL_SUCCESS) {
3830 			free(audit_data.ed_prop_value);
3831 			continue;
3832 		}
3833 		switch (action) {
3834 		case REP_PROTOCOL_TX_ENTRY_NEW:
3835 			event_id = ADT_smf_create_prop;
3836 			break;
3837 		case REP_PROTOCOL_TX_ENTRY_CLEAR:
3838 			event_id = ADT_smf_change_prop;
3839 			break;
3840 		case REP_PROTOCOL_TX_ENTRY_REPLACE:
3841 			event_id = ADT_smf_change_prop;
3842 			break;
3843 		case REP_PROTOCOL_TX_ENTRY_DELETE:
3844 			event_id = ADT_smf_delete_prop;
3845 			break;
3846 		default:
3847 			assert(0);	/* Missing a case */
3848 			free(audit_data.ed_prop_value);
3849 			continue;
3850 		}
3851 
3852 		/* Generate the event. */
3853 		smf_audit_event(event_id, auth_status, auth_ret_value,
3854 		    &audit_data);
3855 		free(audit_data.ed_prop_value);
3856 	}
3857 #endif /* NATIVE_BUILD */
3858 }
3859 
3860 /*
3861  * Fails with
3862  *   _DELETED - node has been deleted
3863  *   _NOT_SET - npp is reset
3864  *   _NOT_APPLICABLE - type is _PROPERTYGRP
3865  *   _INVALID_TYPE - node is corrupt or type is invalid
3866  *   _TYPE_MISMATCH - node cannot have children of type type
3867  *   _BAD_REQUEST - name is invalid
3868  *		    cannot create children for this type of node
3869  *   _NO_RESOURCES - out of memory, or could not allocate new id
3870  *   _PERMISSION_DENIED
3871  *   _BACKEND_ACCESS
3872  *   _BACKEND_READONLY
3873  *   _EXISTS - child already exists
3874  *   _TRUNCATED - truncated FMRI for the audit record
3875  */
3876 int
3877 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3878     rc_node_ptr_t *cpp)
3879 {
3880 	rc_node_t *np;
3881 	rc_node_t *cp = NULL;
3882 	int rc;
3883 	perm_status_t perm_rc;
3884 	size_t sz_out;
3885 	char fmri[REP_PROTOCOL_FMRI_LEN];
3886 	audit_event_data_t audit_data;
3887 
3888 	rc_node_clear(cpp, 0);
3889 
3890 	/*
3891 	 * rc_node_modify_permission_check() must be called before the node
3892 	 * is locked.  This is because the library functions that check
3893 	 * authorizations can trigger calls back into configd.
3894 	 */
3895 	perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3896 	switch (perm_rc) {
3897 	case PERM_DENIED:
3898 		/*
3899 		 * We continue in this case, so that an audit event can be
3900 		 * generated later in the function.
3901 		 */
3902 		break;
3903 	case PERM_GRANTED:
3904 		break;
3905 	case PERM_GONE:
3906 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3907 	case PERM_FAIL:
3908 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
3909 	default:
3910 		bad_error(rc_node_modify_permission_check, perm_rc);
3911 	}
3912 
3913 	RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
3914 
3915 	audit_data.ed_fmri = fmri;
3916 
3917 	/*
3918 	 * there is a separate interface for creating property groups
3919 	 */
3920 	if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3921 		(void) pthread_mutex_unlock(&np->rn_lock);
3922 		free(audit_data.ed_auth);
3923 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3924 	}
3925 
3926 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3927 		(void) pthread_mutex_unlock(&np->rn_lock);
3928 		np = np->rn_cchain[0];
3929 		if ((rc = rc_node_check_and_lock(np)) != REP_PROTOCOL_SUCCESS) {
3930 			free(audit_data.ed_auth);
3931 			return (rc);
3932 		}
3933 	}
3934 
3935 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3936 	    REP_PROTOCOL_SUCCESS) {
3937 		(void) pthread_mutex_unlock(&np->rn_lock);
3938 		free(audit_data.ed_auth);
3939 		return (rc);
3940 	}
3941 	if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3942 		(void) pthread_mutex_unlock(&np->rn_lock);
3943 		free(audit_data.ed_auth);
3944 		return (rc);
3945 	}
3946 
3947 	if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3948 	    name, type)) != REP_PROTOCOL_SUCCESS) {
3949 		(void) pthread_mutex_unlock(&np->rn_lock);
3950 		free(audit_data.ed_auth);
3951 		return (rc);
3952 	}
3953 	if (perm_rc == PERM_DENIED) {
3954 		(void) pthread_mutex_unlock(&np->rn_lock);
3955 		smf_audit_event(ADT_smf_create, ADT_FAILURE,
3956 		    ADT_FAIL_VALUE_AUTH, &audit_data);
3957 		free(audit_data.ed_auth);
3958 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
3959 	}
3960 
3961 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3962 	    audit_data.ed_auth);
3963 	(void) pthread_mutex_unlock(&np->rn_lock);
3964 
3965 	rc = object_create(np, type, name, &cp);
3966 	assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3967 
3968 	if (rc == REP_PROTOCOL_SUCCESS) {
3969 		rc_node_assign(cpp, cp);
3970 		rc_node_rele(cp);
3971 	}
3972 
3973 	(void) pthread_mutex_lock(&np->rn_lock);
3974 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3975 	(void) pthread_mutex_unlock(&np->rn_lock);
3976 
3977 	if (rc == REP_PROTOCOL_SUCCESS) {
3978 		smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
3979 		    &audit_data);
3980 	}
3981 
3982 	free(audit_data.ed_auth);
3983 
3984 	return (rc);
3985 }
3986 
3987 int
3988 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
3989     const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
3990 {
3991 	rc_node_t *np;
3992 	rc_node_t *cp;
3993 	int rc;
3994 	permcheck_t *pcp;
3995 	perm_status_t granted;
3996 	char fmri[REP_PROTOCOL_FMRI_LEN];
3997 	audit_event_data_t audit_data;
3998 	au_event_t event_id;
3999 	size_t sz_out;
4000 
4001 	audit_data.ed_auth = NULL;
4002 	audit_data.ed_fmri = fmri;
4003 	audit_data.ed_type = (char *)pgtype;
4004 
4005 	rc_node_clear(cpp, 0);
4006 
4007 	/* verify flags is valid */
4008 	if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4009 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4010 
4011 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4012 
4013 	if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4014 		rc_node_rele(np);
4015 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4016 	}
4017 
4018 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4019 	    REP_PROTOCOL_SUCCESS) {
4020 		rc_node_rele(np);
4021 		return (rc);
4022 	}
4023 	if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4024 	    (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4025 		rc_node_rele(np);
4026 		return (rc);
4027 	}
4028 
4029 #ifdef NATIVE_BUILD
4030 	if (!client_is_privileged()) {
4031 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4032 	}
4033 #else
4034 	if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4035 		event_id = ADT_smf_create_npg;
4036 	} else {
4037 		event_id = ADT_smf_create_pg;
4038 	}
4039 	if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4040 	    name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4041 		rc_node_rele(np);
4042 		return (rc);
4043 	}
4044 
4045 	if (is_main_repository) {
4046 		/* Must have .smf.modify or smf.modify.<type> authorization */
4047 		pcp = pc_create();
4048 		if (pcp != NULL) {
4049 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
4050 
4051 			if (rc == REP_PROTOCOL_SUCCESS) {
4052 				const char * const auth =
4053 				    perm_auth_for_pgtype(pgtype);
4054 
4055 				if (auth != NULL)
4056 					rc = perm_add_enabling(pcp, auth);
4057 			}
4058 
4059 			/*
4060 			 * .manage or $action_authorization can be used to
4061 			 * create the actions pg and the general_ovr pg.
4062 			 */
4063 			if (rc == REP_PROTOCOL_SUCCESS &&
4064 			    (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4065 			    np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4066 			    ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4067 			    strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4068 			    (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4069 			    strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4070 				rc = perm_add_enabling(pcp, AUTH_MANAGE);
4071 
4072 				if (rc == REP_PROTOCOL_SUCCESS)
4073 					rc = perm_add_inst_action_auth(pcp, np);
4074 			}
4075 
4076 			if (rc == REP_PROTOCOL_SUCCESS) {
4077 				granted = perm_granted(pcp);
4078 
4079 				rc = map_granted_status(granted, pcp,
4080 				    &audit_data.ed_auth);
4081 				if (granted == PERM_GONE) {
4082 					/* No auditing if client gone. */
4083 					pc_free(pcp);
4084 					rc_node_rele(np);
4085 					return (rc);
4086 				}
4087 			}
4088 
4089 			pc_free(pcp);
4090 		} else {
4091 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4092 		}
4093 
4094 	} else {
4095 		rc = REP_PROTOCOL_SUCCESS;
4096 	}
4097 #endif /* NATIVE_BUILD */
4098 
4099 
4100 	if (rc != REP_PROTOCOL_SUCCESS) {
4101 		rc_node_rele(np);
4102 		if (rc != REP_PROTOCOL_FAIL_NO_RESOURCES) {
4103 			smf_audit_event(event_id, ADT_FAILURE,
4104 			    ADT_FAIL_VALUE_AUTH, &audit_data);
4105 		}
4106 		if (audit_data.ed_auth != NULL)
4107 			free(audit_data.ed_auth);
4108 		return (rc);
4109 	}
4110 
4111 	(void) pthread_mutex_lock(&np->rn_lock);
4112 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4113 	    audit_data.ed_auth);
4114 	(void) pthread_mutex_unlock(&np->rn_lock);
4115 
4116 	rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4117 
4118 	if (rc == REP_PROTOCOL_SUCCESS) {
4119 		rc_node_assign(cpp, cp);
4120 		rc_node_rele(cp);
4121 	}
4122 
4123 	(void) pthread_mutex_lock(&np->rn_lock);
4124 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4125 	(void) pthread_mutex_unlock(&np->rn_lock);
4126 
4127 	if (rc == REP_PROTOCOL_SUCCESS) {
4128 		smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4129 		    &audit_data);
4130 	}
4131 	if (audit_data.ed_auth != NULL)
4132 		free(audit_data.ed_auth);
4133 
4134 	return (rc);
4135 }
4136 
4137 static void
4138 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4139 {
4140 	assert(MUTEX_HELD(&rc_pg_notify_lock));
4141 
4142 	if (pnp->rnpn_pg != NULL) {
4143 		uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4144 		(void) close(pnp->rnpn_fd);
4145 
4146 		pnp->rnpn_pg = NULL;
4147 		pnp->rnpn_fd = -1;
4148 	} else {
4149 		assert(pnp->rnpn_fd == -1);
4150 	}
4151 }
4152 
4153 static void
4154 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4155 {
4156 	rc_node_t *svc = NULL;
4157 	rc_node_t *inst = NULL;
4158 	rc_node_t *pg = NULL;
4159 	rc_node_t *np = np_arg;
4160 	rc_node_t *nnp;
4161 
4162 	while (svc == NULL) {
4163 		(void) pthread_mutex_lock(&np->rn_lock);
4164 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4165 			(void) pthread_mutex_unlock(&np->rn_lock);
4166 			goto cleanup;
4167 		}
4168 		nnp = np->rn_parent;
4169 		rc_node_hold_locked(np);	/* hold it in place */
4170 
4171 		switch (np->rn_id.rl_type) {
4172 		case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4173 			assert(pg == NULL);
4174 			pg = np;
4175 			break;
4176 		case REP_PROTOCOL_ENTITY_INSTANCE:
4177 			assert(inst == NULL);
4178 			inst = np;
4179 			break;
4180 		case REP_PROTOCOL_ENTITY_SERVICE:
4181 			assert(svc == NULL);
4182 			svc = np;
4183 			break;
4184 		default:
4185 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4186 			rc_node_rele_locked(np);
4187 			goto cleanup;
4188 		}
4189 
4190 		(void) pthread_mutex_unlock(&np->rn_lock);
4191 
4192 		np = nnp;
4193 		if (np == NULL)
4194 			goto cleanup;
4195 	}
4196 
4197 	rc_notify_deletion(ndp,
4198 	    svc->rn_name,
4199 	    inst != NULL ? inst->rn_name : NULL,
4200 	    pg != NULL ? pg->rn_name : NULL);
4201 
4202 	ndp = NULL;
4203 
4204 cleanup:
4205 	if (ndp != NULL)
4206 		uu_free(ndp);
4207 
4208 	for (;;) {
4209 		if (svc != NULL) {
4210 			np = svc;
4211 			svc = NULL;
4212 		} else if (inst != NULL) {
4213 			np = inst;
4214 			inst = NULL;
4215 		} else if (pg != NULL) {
4216 			np = pg;
4217 			pg = NULL;
4218 		} else
4219 			break;
4220 
4221 		(void) pthread_mutex_lock(&np->rn_lock);
4222 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4223 		rc_node_rele_locked(np);
4224 	}
4225 }
4226 
4227 /*
4228  * Hold RC_NODE_DYING_FLAGS on np's descendents.  If andformer is true, do
4229  * the same down the rn_former chain.
4230  */
4231 static void
4232 rc_node_delete_hold(rc_node_t *np, int andformer)
4233 {
4234 	rc_node_t *cp;
4235 
4236 again:
4237 	assert(MUTEX_HELD(&np->rn_lock));
4238 	assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4239 
4240 	for (cp = uu_list_first(np->rn_children); cp != NULL;
4241 	    cp = uu_list_next(np->rn_children, cp)) {
4242 		(void) pthread_mutex_lock(&cp->rn_lock);
4243 		(void) pthread_mutex_unlock(&np->rn_lock);
4244 		if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4245 			/*
4246 			 * already marked as dead -- can't happen, since that
4247 			 * would require setting RC_NODE_CHILDREN_CHANGING
4248 			 * in np, and we're holding that...
4249 			 */
4250 			abort();
4251 		}
4252 		rc_node_delete_hold(cp, andformer);	/* recurse, drop lock */
4253 
4254 		(void) pthread_mutex_lock(&np->rn_lock);
4255 	}
4256 	if (andformer && (cp = np->rn_former) != NULL) {
4257 		(void) pthread_mutex_lock(&cp->rn_lock);
4258 		(void) pthread_mutex_unlock(&np->rn_lock);
4259 		if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4260 			abort();		/* can't happen, see above */
4261 		np = cp;
4262 		goto again;		/* tail-recurse down rn_former */
4263 	}
4264 	(void) pthread_mutex_unlock(&np->rn_lock);
4265 }
4266 
4267 /*
4268  * N.B.:  this function drops np->rn_lock on the way out.
4269  */
4270 static void
4271 rc_node_delete_rele(rc_node_t *np, int andformer)
4272 {
4273 	rc_node_t *cp;
4274 
4275 again:
4276 	assert(MUTEX_HELD(&np->rn_lock));
4277 	assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4278 
4279 	for (cp = uu_list_first(np->rn_children); cp != NULL;
4280 	    cp = uu_list_next(np->rn_children, cp)) {
4281 		(void) pthread_mutex_lock(&cp->rn_lock);
4282 		(void) pthread_mutex_unlock(&np->rn_lock);
4283 		rc_node_delete_rele(cp, andformer);	/* recurse, drop lock */
4284 		(void) pthread_mutex_lock(&np->rn_lock);
4285 	}
4286 	if (andformer && (cp = np->rn_former) != NULL) {
4287 		(void) pthread_mutex_lock(&cp->rn_lock);
4288 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4289 		(void) pthread_mutex_unlock(&np->rn_lock);
4290 
4291 		np = cp;
4292 		goto again;		/* tail-recurse down rn_former */
4293 	}
4294 	rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4295 	(void) pthread_mutex_unlock(&np->rn_lock);
4296 }
4297 
4298 static void
4299 rc_node_finish_delete(rc_node_t *cp)
4300 {
4301 	cache_bucket_t *bp;
4302 	rc_node_pg_notify_t *pnp;
4303 
4304 	assert(MUTEX_HELD(&cp->rn_lock));
4305 
4306 	if (!(cp->rn_flags & RC_NODE_OLD)) {
4307 		assert(cp->rn_flags & RC_NODE_IN_PARENT);
4308 		if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4309 			abort();		/* can't happen, see above */
4310 		}
4311 		cp->rn_flags &= ~RC_NODE_IN_PARENT;
4312 		cp->rn_parent = NULL;
4313 		rc_node_free_fmri(cp);
4314 	}
4315 
4316 	cp->rn_flags |= RC_NODE_DEAD;
4317 
4318 	/*
4319 	 * If this node is not out-dated, we need to remove it from
4320 	 * the notify list and cache hash table.
4321 	 */
4322 	if (!(cp->rn_flags & RC_NODE_OLD)) {
4323 		assert(cp->rn_refs > 0);	/* can't go away yet */
4324 		(void) pthread_mutex_unlock(&cp->rn_lock);
4325 
4326 		(void) pthread_mutex_lock(&rc_pg_notify_lock);
4327 		while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4328 			rc_pg_notify_fire(pnp);
4329 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
4330 		rc_notify_remove_node(cp);
4331 
4332 		bp = cache_hold(cp->rn_hash);
4333 		(void) pthread_mutex_lock(&cp->rn_lock);
4334 		cache_remove_unlocked(bp, cp);
4335 		cache_release(bp);
4336 	}
4337 }
4338 
4339 /*
4340  * For each child, call rc_node_finish_delete() and recurse.  If andformer
4341  * is set, also recurse down rn_former.  Finally release np, which might
4342  * free it.
4343  */
4344 static void
4345 rc_node_delete_children(rc_node_t *np, int andformer)
4346 {
4347 	rc_node_t *cp;
4348 
4349 again:
4350 	assert(np->rn_refs > 0);
4351 	assert(MUTEX_HELD(&np->rn_lock));
4352 	assert(np->rn_flags & RC_NODE_DEAD);
4353 
4354 	while ((cp = uu_list_first(np->rn_children)) != NULL) {
4355 		uu_list_remove(np->rn_children, cp);
4356 		(void) pthread_mutex_lock(&cp->rn_lock);
4357 		(void) pthread_mutex_unlock(&np->rn_lock);
4358 		rc_node_hold_locked(cp);	/* hold while we recurse */
4359 		rc_node_finish_delete(cp);
4360 		rc_node_delete_children(cp, andformer);	/* drops lock + ref */
4361 		(void) pthread_mutex_lock(&np->rn_lock);
4362 	}
4363 
4364 	/*
4365 	 * When we drop cp's lock, all the children will be gone, so we
4366 	 * can release DYING_FLAGS.
4367 	 */
4368 	rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4369 	if (andformer && (cp = np->rn_former) != NULL) {
4370 		np->rn_former = NULL;		/* unlink */
4371 		(void) pthread_mutex_lock(&cp->rn_lock);
4372 
4373 		/*
4374 		 * Register the ephemeral reference created by reading
4375 		 * np->rn_former into cp.  Note that the persistent
4376 		 * reference (np->rn_former) is locked because we haven't
4377 		 * dropped np's lock since we dropped its RC_NODE_IN_TX
4378 		 * (via RC_NODE_DYING_FLAGS).
4379 		 */
4380 		rc_node_hold_ephemeral_locked(cp);
4381 
4382 		(void) pthread_mutex_unlock(&np->rn_lock);
4383 		cp->rn_flags &= ~RC_NODE_ON_FORMER;
4384 
4385 		rc_node_hold_locked(cp);	/* hold while we loop */
4386 
4387 		rc_node_finish_delete(cp);
4388 
4389 		rc_node_rele(np);		/* drop the old reference */
4390 
4391 		np = cp;
4392 		goto again;		/* tail-recurse down rn_former */
4393 	}
4394 	rc_node_rele_locked(np);
4395 }
4396 
4397 /*
4398  * The last client or child reference to np, which must be either
4399  * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed.  We'll destroy any
4400  * remaining references (e.g., rn_former) and call rc_node_destroy() to
4401  * free np.
4402  */
4403 static void
4404 rc_node_no_client_refs(rc_node_t *np)
4405 {
4406 	int unrefed;
4407 	rc_node_t *current, *cur;
4408 
4409 	assert(MUTEX_HELD(&np->rn_lock));
4410 	assert(np->rn_refs == 0);
4411 	assert(np->rn_other_refs == 0);
4412 	assert(np->rn_other_refs_held == 0);
4413 
4414 	if (np->rn_flags & RC_NODE_DEAD) {
4415 		/*
4416 		 * The node is DEAD, so the deletion code should have
4417 		 * destroyed all rn_children or rn_former references.
4418 		 * Since the last client or child reference has been
4419 		 * destroyed, we're free to destroy np.  Unless another
4420 		 * thread has an ephemeral reference, in which case we'll
4421 		 * pass the buck.
4422 		 */
4423 		if (np->rn_erefs > 1) {
4424 			--np->rn_erefs;
4425 			NODE_UNLOCK(np);
4426 			return;
4427 		}
4428 
4429 		(void) pthread_mutex_unlock(&np->rn_lock);
4430 		rc_node_destroy(np);
4431 		return;
4432 	}
4433 
4434 	/* We only collect DEAD and OLD nodes, thank you. */
4435 	assert(np->rn_flags & RC_NODE_OLD);
4436 
4437 	/*
4438 	 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4439 	 * nodes.  But it's vulnerable to unfriendly scheduling, so full
4440 	 * use of rn_erefs should supersede it someday.
4441 	 */
4442 	if (np->rn_flags & RC_NODE_UNREFED) {
4443 		(void) pthread_mutex_unlock(&np->rn_lock);
4444 		return;
4445 	}
4446 	np->rn_flags |= RC_NODE_UNREFED;
4447 
4448 	/*
4449 	 * Now we'll remove the node from the rn_former chain and take its
4450 	 * DYING_FLAGS.
4451 	 */
4452 
4453 	/*
4454 	 * Since this node is OLD, it should be on an rn_former chain.  To
4455 	 * remove it, we must find the current in-hash object and grab its
4456 	 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4457 	 */
4458 
4459 	(void) pthread_mutex_unlock(&np->rn_lock);
4460 
4461 	for (;;) {
4462 		current = cache_lookup(&np->rn_id);
4463 
4464 		if (current == NULL) {
4465 			(void) pthread_mutex_lock(&np->rn_lock);
4466 
4467 			if (np->rn_flags & RC_NODE_DEAD)
4468 				goto died;
4469 
4470 			/*
4471 			 * We are trying to unreference this node, but the
4472 			 * owner of the former list does not exist.  It must
4473 			 * be the case that another thread is deleting this
4474 			 * entire sub-branch, but has not yet reached us.
4475 			 * We will in short order be deleted.
4476 			 */
4477 			np->rn_flags &= ~RC_NODE_UNREFED;
4478 			(void) pthread_mutex_unlock(&np->rn_lock);
4479 			return;
4480 		}
4481 
4482 		if (current == np) {
4483 			/*
4484 			 * no longer unreferenced
4485 			 */
4486 			(void) pthread_mutex_lock(&np->rn_lock);
4487 			np->rn_flags &= ~RC_NODE_UNREFED;
4488 			/* held in cache_lookup() */
4489 			rc_node_rele_locked(np);
4490 			return;
4491 		}
4492 
4493 		(void) pthread_mutex_lock(&current->rn_lock);
4494 		if (current->rn_flags & RC_NODE_OLD) {
4495 			/*
4496 			 * current has been replaced since we looked it
4497 			 * up.  Try again.
4498 			 */
4499 			/* held in cache_lookup() */
4500 			rc_node_rele_locked(current);
4501 			continue;
4502 		}
4503 
4504 		if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4505 			/*
4506 			 * current has been deleted since we looked it up.  Try
4507 			 * again.
4508 			 */
4509 			/* held in cache_lookup() */
4510 			rc_node_rele_locked(current);
4511 			continue;
4512 		}
4513 
4514 		/*
4515 		 * rc_node_hold_flag() might have dropped current's lock, so
4516 		 * check OLD again.
4517 		 */
4518 		if (!(current->rn_flags & RC_NODE_OLD)) {
4519 			/* Not old.  Stop looping. */
4520 			(void) pthread_mutex_unlock(&current->rn_lock);
4521 			break;
4522 		}
4523 
4524 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4525 		rc_node_rele_locked(current);
4526 	}
4527 
4528 	/* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4529 	(void) pthread_mutex_lock(&np->rn_lock);
4530 
4531 	/*
4532 	 * While we didn't have the lock, a thread may have added
4533 	 * a reference or changed the flags.
4534 	 */
4535 	if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4536 	    np->rn_refs != 0 || np->rn_other_refs != 0 ||
4537 	    np->rn_other_refs_held != 0) {
4538 		np->rn_flags &= ~RC_NODE_UNREFED;
4539 
4540 		(void) pthread_mutex_lock(&current->rn_lock);
4541 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4542 		/* held by cache_lookup() */
4543 		rc_node_rele_locked(current);
4544 		return;
4545 	}
4546 
4547 	if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4548 		/*
4549 		 * Someone deleted the node while we were waiting for
4550 		 * DYING_FLAGS.  Undo the modifications to current.
4551 		 */
4552 		(void) pthread_mutex_unlock(&np->rn_lock);
4553 
4554 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4555 		/* held by cache_lookup() */
4556 		rc_node_rele_locked(current);
4557 
4558 		(void) pthread_mutex_lock(&np->rn_lock);
4559 		goto died;
4560 	}
4561 
4562 	/* Take RC_NODE_DYING_FLAGS on np's descendents. */
4563 	rc_node_delete_hold(np, 0);		/* drops np->rn_lock */
4564 
4565 	/* Mark np DEAD.  This requires the lock. */
4566 	(void) pthread_mutex_lock(&np->rn_lock);
4567 
4568 	/* Recheck for new references. */
4569 	if (!(np->rn_flags & RC_NODE_OLD) ||
4570 	    np->rn_refs != 0 || np->rn_other_refs != 0 ||
4571 	    np->rn_other_refs_held != 0) {
4572 		np->rn_flags &= ~RC_NODE_UNREFED;
4573 		rc_node_delete_rele(np, 0);	/* drops np's lock */
4574 
4575 		(void) pthread_mutex_lock(&current->rn_lock);
4576 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4577 		/* held by cache_lookup() */
4578 		rc_node_rele_locked(current);
4579 		return;
4580 	}
4581 
4582 	np->rn_flags |= RC_NODE_DEAD;
4583 
4584 	/*
4585 	 * Delete the children.  This calls rc_node_rele_locked() on np at
4586 	 * the end, so add a reference to keep the count from going
4587 	 * negative.  It will recurse with RC_NODE_DEAD set, so we'll call
4588 	 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4589 	 * shouldn't actually free() np.
4590 	 */
4591 	rc_node_hold_locked(np);
4592 	rc_node_delete_children(np, 0);		/* unlocks np */
4593 
4594 	/* Remove np from current's rn_former chain. */
4595 	(void) pthread_mutex_lock(&current->rn_lock);
4596 	for (cur = current; cur != NULL && cur->rn_former != np;
4597 	    cur = cur->rn_former)
4598 		;
4599 	assert(cur != NULL && cur != np);
4600 
4601 	cur->rn_former = np->rn_former;
4602 	np->rn_former = NULL;
4603 
4604 	rc_node_rele_flag(current, RC_NODE_IN_TX);
4605 	/* held by cache_lookup() */
4606 	rc_node_rele_locked(current);
4607 
4608 	/* Clear ON_FORMER and UNREFED, and destroy. */
4609 	(void) pthread_mutex_lock(&np->rn_lock);
4610 	assert(np->rn_flags & RC_NODE_ON_FORMER);
4611 	np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4612 
4613 	if (np->rn_erefs > 1) {
4614 		/* Still referenced.  Stay execution. */
4615 		--np->rn_erefs;
4616 		NODE_UNLOCK(np);
4617 		return;
4618 	}
4619 
4620 	(void) pthread_mutex_unlock(&np->rn_lock);
4621 	rc_node_destroy(np);
4622 	return;
4623 
4624 died:
4625 	/*
4626 	 * Another thread marked np DEAD.  If there still aren't any
4627 	 * persistent references, destroy the node.
4628 	 */
4629 	np->rn_flags &= ~RC_NODE_UNREFED;
4630 
4631 	unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4632 	    np->rn_other_refs_held == 0);
4633 
4634 	if (np->rn_erefs > 0)
4635 		--np->rn_erefs;
4636 
4637 	if (unrefed && np->rn_erefs > 0) {
4638 		NODE_UNLOCK(np);
4639 		return;
4640 	}
4641 
4642 	(void) pthread_mutex_unlock(&np->rn_lock);
4643 
4644 	if (unrefed)
4645 		rc_node_destroy(np);
4646 }
4647 
4648 static au_event_t
4649 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4650 {
4651 	au_event_t	id = 0;
4652 
4653 #ifndef NATIVE_BUILD
4654 	switch (entity) {
4655 	case REP_PROTOCOL_ENTITY_SERVICE:
4656 	case REP_PROTOCOL_ENTITY_INSTANCE:
4657 		id = ADT_smf_delete;
4658 		break;
4659 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
4660 		id = ADT_smf_delete_snap;
4661 		break;
4662 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4663 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4664 		if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4665 			id = ADT_smf_delete_npg;
4666 		} else {
4667 			id = ADT_smf_delete_pg;
4668 		}
4669 		break;
4670 	default:
4671 		abort();
4672 	}
4673 #endif	/* NATIVE_BUILD */
4674 	return (id);
4675 }
4676 
4677 /*
4678  * Fails with
4679  *   _NOT_SET
4680  *   _DELETED
4681  *   _BAD_REQUEST
4682  *   _PERMISSION_DENIED
4683  *   _NO_RESOURCES
4684  *   _TRUNCATED
4685  * and whatever object_delete() fails with.
4686  */
4687 int
4688 rc_node_delete(rc_node_ptr_t *npp)
4689 {
4690 	rc_node_t *np, *np_orig;
4691 	rc_node_t *pp = NULL;
4692 	int rc;
4693 	rc_node_pg_notify_t *pnp;
4694 	cache_bucket_t *bp;
4695 	rc_notify_delete_t *ndp;
4696 	permcheck_t *pcp;
4697 	int granted;
4698 	au_event_t event_id = 0;
4699 	size_t sz_out;
4700 	audit_event_data_t audit_data;
4701 	int audit_failure = 0;
4702 
4703 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4704 
4705 	audit_data.ed_fmri = NULL;
4706 	audit_data.ed_auth = NULL;
4707 	audit_data.ed_snapname = NULL;
4708 	audit_data.ed_type = NULL;
4709 
4710 	switch (np->rn_id.rl_type) {
4711 	case REP_PROTOCOL_ENTITY_SERVICE:
4712 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4713 		    np->rn_pgflags);
4714 		break;
4715 	case REP_PROTOCOL_ENTITY_INSTANCE:
4716 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4717 		    np->rn_pgflags);
4718 		break;
4719 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
4720 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4721 		    np->rn_pgflags);
4722 		audit_data.ed_snapname = strdup(np->rn_name);
4723 		if (audit_data.ed_snapname == NULL) {
4724 			(void) pthread_mutex_unlock(&np->rn_lock);
4725 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4726 		}
4727 		break;			/* deletable */
4728 
4729 	case REP_PROTOCOL_ENTITY_SCOPE:
4730 	case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4731 		/* Scopes and snaplevels are indelible. */
4732 		(void) pthread_mutex_unlock(&np->rn_lock);
4733 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4734 
4735 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4736 		(void) pthread_mutex_unlock(&np->rn_lock);
4737 		np = np->rn_cchain[0];
4738 		RC_NODE_CHECK_AND_LOCK(np);
4739 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4740 		    np->rn_pgflags);
4741 		break;
4742 
4743 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4744 		if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4745 			event_id =
4746 			    get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4747 			    np->rn_pgflags);
4748 			audit_data.ed_type = strdup(np->rn_type);
4749 			if (audit_data.ed_type == NULL) {
4750 				(void) pthread_mutex_unlock(&np->rn_lock);
4751 				return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4752 			}
4753 			break;
4754 		}
4755 
4756 		/* Snapshot property groups are indelible. */
4757 		(void) pthread_mutex_unlock(&np->rn_lock);
4758 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4759 
4760 	case REP_PROTOCOL_ENTITY_PROPERTY:
4761 		(void) pthread_mutex_unlock(&np->rn_lock);
4762 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4763 
4764 	default:
4765 		assert(0);
4766 		abort();
4767 		break;
4768 	}
4769 
4770 	audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4771 	if (audit_data.ed_fmri == NULL) {
4772 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4773 		goto cleanout;
4774 	}
4775 	np_orig = np;
4776 	rc_node_hold_locked(np);	/* simplifies rest of the code */
4777 
4778 again:
4779 	/*
4780 	 * The following loop is to deal with the fact that snapshots and
4781 	 * property groups are moving targets -- changes to them result
4782 	 * in a new "child" node.  Since we can only delete from the top node,
4783 	 * we have to loop until we have a non-RC_NODE_OLD version.
4784 	 */
4785 	for (;;) {
4786 		if (!rc_node_wait_flag(np,
4787 		    RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4788 			rc_node_rele_locked(np);
4789 			rc = REP_PROTOCOL_FAIL_DELETED;
4790 			goto cleanout;
4791 		}
4792 
4793 		if (np->rn_flags & RC_NODE_OLD) {
4794 			rc_node_rele_locked(np);
4795 			np = cache_lookup(&np_orig->rn_id);
4796 			assert(np != np_orig);
4797 
4798 			if (np == NULL) {
4799 				rc = REP_PROTOCOL_FAIL_DELETED;
4800 				goto fail;
4801 			}
4802 			(void) pthread_mutex_lock(&np->rn_lock);
4803 			continue;
4804 		}
4805 
4806 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4807 			rc_node_rele_locked(np);
4808 			rc_node_clear(npp, 1);
4809 			rc = REP_PROTOCOL_FAIL_DELETED;
4810 		}
4811 
4812 		/*
4813 		 * Mark our parent as children changing.  this call drops our
4814 		 * lock and the RC_NODE_USING_PARENT flag, and returns with
4815 		 * pp's lock held
4816 		 */
4817 		pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4818 		if (pp == NULL) {
4819 			/* our parent is gone, we're going next... */
4820 			rc_node_rele(np);
4821 
4822 			rc_node_clear(npp, 1);
4823 			rc = REP_PROTOCOL_FAIL_DELETED;
4824 			goto cleanout;
4825 		}
4826 
4827 		rc_node_hold_locked(pp);		/* hold for later */
4828 		(void) pthread_mutex_unlock(&pp->rn_lock);
4829 
4830 		(void) pthread_mutex_lock(&np->rn_lock);
4831 		if (!(np->rn_flags & RC_NODE_OLD))
4832 			break;			/* not old -- we're done */
4833 
4834 		(void) pthread_mutex_unlock(&np->rn_lock);
4835 		(void) pthread_mutex_lock(&pp->rn_lock);
4836 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4837 		rc_node_rele_locked(pp);
4838 		(void) pthread_mutex_lock(&np->rn_lock);
4839 		continue;			/* loop around and try again */
4840 	}
4841 	/*
4842 	 * Everyone out of the pool -- we grab everything but
4843 	 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4844 	 * any changes from occurring while we are attempting to
4845 	 * delete the node.
4846 	 */
4847 	if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4848 		(void) pthread_mutex_unlock(&np->rn_lock);
4849 		rc = REP_PROTOCOL_FAIL_DELETED;
4850 		goto fail;
4851 	}
4852 
4853 	assert(!(np->rn_flags & RC_NODE_OLD));
4854 
4855 	if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4856 	    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4857 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4858 		(void) pthread_mutex_unlock(&np->rn_lock);
4859 		goto fail;
4860 	}
4861 
4862 #ifdef NATIVE_BUILD
4863 	if (!client_is_privileged()) {
4864 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4865 	}
4866 #else
4867 	if (is_main_repository) {
4868 		/* permission check */
4869 		(void) pthread_mutex_unlock(&np->rn_lock);
4870 		pcp = pc_create();
4871 		if (pcp != NULL) {
4872 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
4873 
4874 			/* add .smf.modify.<type> for pgs. */
4875 			if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4876 			    REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4877 				const char * const auth =
4878 				    perm_auth_for_pgtype(np->rn_type);
4879 
4880 				if (auth != NULL)
4881 					rc = perm_add_enabling(pcp, auth);
4882 			}
4883 
4884 			if (rc == REP_PROTOCOL_SUCCESS) {
4885 				granted = perm_granted(pcp);
4886 
4887 				rc = map_granted_status(granted, pcp,
4888 				    &audit_data.ed_auth);
4889 				if (granted == PERM_GONE) {
4890 					/* No need to audit if client gone. */
4891 					pc_free(pcp);
4892 					rc_node_rele_flag(np,
4893 					    RC_NODE_DYING_FLAGS);
4894 					return (rc);
4895 				}
4896 				if (granted == PERM_DENIED)
4897 					audit_failure = 1;
4898 			}
4899 
4900 			pc_free(pcp);
4901 		} else {
4902 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4903 		}
4904 
4905 		(void) pthread_mutex_lock(&np->rn_lock);
4906 	} else {
4907 		rc = REP_PROTOCOL_SUCCESS;
4908 	}
4909 #endif /* NATIVE_BUILD */
4910 
4911 	if (rc != REP_PROTOCOL_SUCCESS) {
4912 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4913 		(void) pthread_mutex_unlock(&np->rn_lock);
4914 		goto fail;
4915 	}
4916 
4917 	ndp = uu_zalloc(sizeof (*ndp));
4918 	if (ndp == NULL) {
4919 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4920 		(void) pthread_mutex_unlock(&np->rn_lock);
4921 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4922 		goto fail;
4923 	}
4924 
4925 	rc_node_delete_hold(np, 1);	/* hold entire subgraph, drop lock */
4926 
4927 	rc = object_delete(np);
4928 
4929 	if (rc != REP_PROTOCOL_SUCCESS) {
4930 		(void) pthread_mutex_lock(&np->rn_lock);
4931 		rc_node_delete_rele(np, 1);		/* drops lock */
4932 		uu_free(ndp);
4933 		goto fail;
4934 	}
4935 
4936 	/*
4937 	 * Now, delicately unlink and delete the object.
4938 	 *
4939 	 * Create the delete notification, atomically remove
4940 	 * from the hash table and set the NODE_DEAD flag, and
4941 	 * remove from the parent's children list.
4942 	 */
4943 	rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4944 
4945 	bp = cache_hold(np->rn_hash);
4946 
4947 	(void) pthread_mutex_lock(&np->rn_lock);
4948 	cache_remove_unlocked(bp, np);
4949 	cache_release(bp);
4950 
4951 	np->rn_flags |= RC_NODE_DEAD;
4952 
4953 	if (pp != NULL) {
4954 		/*
4955 		 * Remove from pp's rn_children.  This requires pp's lock,
4956 		 * so we must drop np's lock to respect lock order.
4957 		 */
4958 		(void) pthread_mutex_unlock(&np->rn_lock);
4959 		(void) pthread_mutex_lock(&pp->rn_lock);
4960 		(void) pthread_mutex_lock(&np->rn_lock);
4961 
4962 		uu_list_remove(pp->rn_children, np);
4963 
4964 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4965 
4966 		(void) pthread_mutex_unlock(&pp->rn_lock);
4967 
4968 		np->rn_flags &= ~RC_NODE_IN_PARENT;
4969 	}
4970 
4971 	/*
4972 	 * finally, propagate death to our children (including marking
4973 	 * them DEAD), handle notifications, and release our hold.
4974 	 */
4975 	rc_node_hold_locked(np);	/* hold for delete */
4976 	rc_node_delete_children(np, 1);	/* drops DYING_FLAGS, lock, ref */
4977 
4978 	rc_node_clear(npp, 1);
4979 
4980 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
4981 	while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
4982 		rc_pg_notify_fire(pnp);
4983 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
4984 	rc_notify_remove_node(np);
4985 
4986 	rc_node_rele(np);
4987 
4988 	smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4989 	    &audit_data);
4990 	free(audit_data.ed_auth);
4991 	free(audit_data.ed_snapname);
4992 	free(audit_data.ed_type);
4993 	free(audit_data.ed_fmri);
4994 	return (rc);
4995 
4996 fail:
4997 	rc_node_rele(np);
4998 	if (rc == REP_PROTOCOL_FAIL_DELETED)
4999 		rc_node_clear(npp, 1);
5000 	if (pp != NULL) {
5001 		(void) pthread_mutex_lock(&pp->rn_lock);
5002 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5003 		rc_node_rele_locked(pp);	/* drop ref and lock */
5004 	}
5005 	if (audit_failure) {
5006 		smf_audit_event(event_id, ADT_FAILURE,
5007 		    ADT_FAIL_VALUE_AUTH, &audit_data);
5008 	}
5009 cleanout:
5010 	free(audit_data.ed_auth);
5011 	free(audit_data.ed_snapname);
5012 	free(audit_data.ed_type);
5013 	free(audit_data.ed_fmri);
5014 	return (rc);
5015 }
5016 
5017 int
5018 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5019 {
5020 	rc_node_t *np;
5021 	rc_node_t *cp, *pp;
5022 	int res;
5023 
5024 	rc_node_clear(cpp, 0);
5025 
5026 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5027 
5028 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5029 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5030 		(void) pthread_mutex_unlock(&np->rn_lock);
5031 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5032 	}
5033 
5034 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5035 		if ((res = rc_node_fill_children(np,
5036 		    REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5037 			(void) pthread_mutex_unlock(&np->rn_lock);
5038 			return (res);
5039 		}
5040 
5041 		for (cp = uu_list_first(np->rn_children);
5042 		    cp != NULL;
5043 		    cp = uu_list_next(np->rn_children, cp)) {
5044 			if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5045 				continue;
5046 			rc_node_hold(cp);
5047 			break;
5048 		}
5049 
5050 		(void) pthread_mutex_unlock(&np->rn_lock);
5051 	} else {
5052 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5053 			(void) pthread_mutex_unlock(&np->rn_lock);
5054 			rc_node_clear(npp, 1);
5055 			return (REP_PROTOCOL_FAIL_DELETED);
5056 		}
5057 
5058 		/*
5059 		 * mark our parent as children changing.  This call drops our
5060 		 * lock and the RC_NODE_USING_PARENT flag, and returns with
5061 		 * pp's lock held
5062 		 */
5063 		pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5064 		if (pp == NULL) {
5065 			/* our parent is gone, we're going next... */
5066 
5067 			rc_node_clear(npp, 1);
5068 			return (REP_PROTOCOL_FAIL_DELETED);
5069 		}
5070 
5071 		/*
5072 		 * find the next snaplevel
5073 		 */
5074 		cp = np;
5075 		while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5076 		    cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5077 			;
5078 
5079 		/* it must match the snaplevel list */
5080 		assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5081 		    (cp != NULL && np->rn_snaplevel->rsl_next ==
5082 		    cp->rn_snaplevel));
5083 
5084 		if (cp != NULL)
5085 			rc_node_hold(cp);
5086 
5087 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5088 
5089 		(void) pthread_mutex_unlock(&pp->rn_lock);
5090 	}
5091 
5092 	rc_node_assign(cpp, cp);
5093 	if (cp != NULL) {
5094 		rc_node_rele(cp);
5095 
5096 		return (REP_PROTOCOL_SUCCESS);
5097 	}
5098 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
5099 }
5100 
5101 /*
5102  * This call takes a snapshot (np) and either:
5103  *	an existing snapid (to be associated with np), or
5104  *	a non-NULL parentp (from which a new snapshot is taken, and associated
5105  *	    with np)
5106  *
5107  * To do the association, np is duplicated, the duplicate is made to
5108  * represent the new snapid, and np is replaced with the new rc_node_t on
5109  * np's parent's child list. np is placed on the new node's rn_former list,
5110  * and replaces np in cache_hash (so rc_node_update() will find the new one).
5111  *
5112  * old_fmri and old_name point to the original snap shot's FMRI and name.
5113  * These values are used when generating audit events.
5114  *
5115  * Fails with
5116  *	_BAD_REQUEST
5117  *	_BACKEND_READONLY
5118  *	_DELETED
5119  *	_NO_RESOURCES
5120  *	_TRUNCATED
5121  *	_TYPE_MISMATCH
5122  */
5123 static int
5124 rc_attach_snapshot(
5125 	rc_node_t *np,
5126 	uint32_t snapid,
5127 	rc_node_t *parentp,
5128 	char *old_fmri,
5129 	char *old_name)
5130 {
5131 	rc_node_t *np_orig;
5132 	rc_node_t *nnp, *prev;
5133 	rc_node_t *pp;
5134 	int rc;
5135 	size_t sz_out;
5136 	perm_status_t granted;
5137 	au_event_t event_id;
5138 	audit_event_data_t audit_data;
5139 
5140 	if (parentp == NULL) {
5141 		assert(old_fmri != NULL);
5142 	} else {
5143 		assert(snapid == 0);
5144 	}
5145 	assert(MUTEX_HELD(&np->rn_lock));
5146 
5147 	/* Gather the audit data. */
5148 	/*
5149 	 * ADT_smf_* symbols may not be defined in the /usr/include header
5150 	 * files on the build machine.  Thus, the following if-else will
5151 	 * not be compiled when doing native builds.
5152 	 */
5153 #ifndef	NATIVE_BUILD
5154 	if (parentp == NULL) {
5155 		event_id = ADT_smf_attach_snap;
5156 	} else {
5157 		event_id = ADT_smf_create_snap;
5158 	}
5159 #endif	/* NATIVE_BUILD */
5160 	audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5161 	audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5162 	if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5163 		(void) pthread_mutex_unlock(&np->rn_lock);
5164 		free(audit_data.ed_fmri);
5165 		free(audit_data.ed_snapname);
5166 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5167 	}
5168 	audit_data.ed_auth = NULL;
5169 	if (strlcpy(audit_data.ed_snapname, np->rn_name,
5170 	    REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5171 		abort();
5172 	}
5173 	audit_data.ed_old_fmri = old_fmri;
5174 	audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5175 
5176 	if (parentp == NULL) {
5177 		/*
5178 		 * In the attach case, get the instance FMRIs of the
5179 		 * snapshots.
5180 		 */
5181 		if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5182 		    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5183 			(void) pthread_mutex_unlock(&np->rn_lock);
5184 			free(audit_data.ed_fmri);
5185 			free(audit_data.ed_snapname);
5186 			return (rc);
5187 		}
5188 	} else {
5189 		/*
5190 		 * Capture the FMRI of the parent if we're actually going
5191 		 * to take the snapshot.
5192 		 */
5193 		if ((rc = rc_node_get_fmri_or_fragment(parentp,
5194 		    audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5195 		    REP_PROTOCOL_SUCCESS) {
5196 			(void) pthread_mutex_unlock(&np->rn_lock);
5197 			free(audit_data.ed_fmri);
5198 			free(audit_data.ed_snapname);
5199 			return (rc);
5200 		}
5201 	}
5202 
5203 	np_orig = np;
5204 	rc_node_hold_locked(np);		/* simplifies the remainder */
5205 
5206 	(void) pthread_mutex_unlock(&np->rn_lock);
5207 	granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5208 	switch (granted) {
5209 	case PERM_DENIED:
5210 		smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5211 		    &audit_data);
5212 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5213 		rc_node_rele(np);
5214 		goto cleanout;
5215 	case PERM_GRANTED:
5216 		break;
5217 	case PERM_GONE:
5218 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5219 		rc_node_rele(np);
5220 		goto cleanout;
5221 	case PERM_FAIL:
5222 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5223 		rc_node_rele(np);
5224 		goto cleanout;
5225 	default:
5226 		bad_error(rc_node_modify_permission_check, granted);
5227 	}
5228 	(void) pthread_mutex_lock(&np->rn_lock);
5229 
5230 	/*
5231 	 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5232 	 * list from changing.
5233 	 */
5234 	for (;;) {
5235 		if (!(np->rn_flags & RC_NODE_OLD)) {
5236 			if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5237 				goto again;
5238 			}
5239 			pp = rc_node_hold_parent_flag(np,
5240 			    RC_NODE_CHILDREN_CHANGING);
5241 
5242 			(void) pthread_mutex_lock(&np->rn_lock);
5243 			if (pp == NULL) {
5244 				goto again;
5245 			}
5246 			if (np->rn_flags & RC_NODE_OLD) {
5247 				rc_node_rele_flag(pp,
5248 				    RC_NODE_CHILDREN_CHANGING);
5249 				(void) pthread_mutex_unlock(&pp->rn_lock);
5250 				goto again;
5251 			}
5252 			(void) pthread_mutex_unlock(&pp->rn_lock);
5253 
5254 			if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5255 				/*
5256 				 * Can't happen, since we're holding our
5257 				 * parent's CHILDREN_CHANGING flag...
5258 				 */
5259 				abort();
5260 			}
5261 			break;			/* everything's ready */
5262 		}
5263 again:
5264 		rc_node_rele_locked(np);
5265 		np = cache_lookup(&np_orig->rn_id);
5266 
5267 		if (np == NULL) {
5268 			rc = REP_PROTOCOL_FAIL_DELETED;
5269 			goto cleanout;
5270 		}
5271 
5272 		(void) pthread_mutex_lock(&np->rn_lock);
5273 	}
5274 
5275 	if (parentp != NULL) {
5276 		if (pp != parentp) {
5277 			rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5278 			goto fail;
5279 		}
5280 		nnp = NULL;
5281 	} else {
5282 		/*
5283 		 * look for a former node with the snapid we need.
5284 		 */
5285 		if (np->rn_snapshot_id == snapid) {
5286 			rc_node_rele_flag(np, RC_NODE_IN_TX);
5287 			rc_node_rele_locked(np);
5288 
5289 			(void) pthread_mutex_lock(&pp->rn_lock);
5290 			rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5291 			(void) pthread_mutex_unlock(&pp->rn_lock);
5292 			rc = REP_PROTOCOL_SUCCESS;	/* nothing to do */
5293 			goto cleanout;
5294 		}
5295 
5296 		prev = np;
5297 		while ((nnp = prev->rn_former) != NULL) {
5298 			if (nnp->rn_snapshot_id == snapid) {
5299 				rc_node_hold(nnp);
5300 				break;		/* existing node with that id */
5301 			}
5302 			prev = nnp;
5303 		}
5304 	}
5305 
5306 	if (nnp == NULL) {
5307 		prev = NULL;
5308 		nnp = rc_node_alloc();
5309 		if (nnp == NULL) {
5310 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5311 			goto fail;
5312 		}
5313 
5314 		nnp->rn_id = np->rn_id;		/* structure assignment */
5315 		nnp->rn_hash = np->rn_hash;
5316 		nnp->rn_name = strdup(np->rn_name);
5317 		nnp->rn_snapshot_id = snapid;
5318 		nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5319 
5320 		if (nnp->rn_name == NULL) {
5321 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5322 			goto fail;
5323 		}
5324 	}
5325 
5326 	(void) pthread_mutex_unlock(&np->rn_lock);
5327 
5328 	rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5329 
5330 	if (parentp != NULL)
5331 		nnp->rn_snapshot_id = snapid;	/* fill in new snapid */
5332 	else
5333 		assert(nnp->rn_snapshot_id == snapid);
5334 
5335 	(void) pthread_mutex_lock(&np->rn_lock);
5336 	if (rc != REP_PROTOCOL_SUCCESS)
5337 		goto fail;
5338 
5339 	/*
5340 	 * fix up the former chain
5341 	 */
5342 	if (prev != NULL) {
5343 		prev->rn_former = nnp->rn_former;
5344 		(void) pthread_mutex_lock(&nnp->rn_lock);
5345 		nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5346 		nnp->rn_former = NULL;
5347 		(void) pthread_mutex_unlock(&nnp->rn_lock);
5348 	}
5349 	np->rn_flags |= RC_NODE_OLD;
5350 	(void) pthread_mutex_unlock(&np->rn_lock);
5351 
5352 	/*
5353 	 * replace np with nnp
5354 	 */
5355 	rc_node_relink_child(pp, np, nnp);
5356 
5357 	rc_node_rele(np);
5358 	smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5359 	rc = REP_PROTOCOL_SUCCESS;
5360 
5361 cleanout:
5362 	free(audit_data.ed_auth);
5363 	free(audit_data.ed_fmri);
5364 	free(audit_data.ed_snapname);
5365 	return (rc);
5366 
5367 fail:
5368 	rc_node_rele_flag(np, RC_NODE_IN_TX);
5369 	rc_node_rele_locked(np);
5370 	(void) pthread_mutex_lock(&pp->rn_lock);
5371 	rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5372 	(void) pthread_mutex_unlock(&pp->rn_lock);
5373 
5374 	if (nnp != NULL) {
5375 		if (prev == NULL)
5376 			rc_node_destroy(nnp);
5377 		else
5378 			rc_node_rele(nnp);
5379 	}
5380 
5381 	free(audit_data.ed_auth);
5382 	free(audit_data.ed_fmri);
5383 	free(audit_data.ed_snapname);
5384 	return (rc);
5385 }
5386 
5387 int
5388 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5389     const char *instname, const char *name, rc_node_ptr_t *outpp)
5390 {
5391 	perm_status_t granted;
5392 	rc_node_t *np;
5393 	rc_node_t *outp = NULL;
5394 	int rc, perm_rc;
5395 	char fmri[REP_PROTOCOL_FMRI_LEN];
5396 	audit_event_data_t audit_data;
5397 	size_t sz_out;
5398 
5399 	rc_node_clear(outpp, 0);
5400 
5401 	/*
5402 	 * rc_node_modify_permission_check() must be called before the node
5403 	 * is locked.  This is because the library functions that check
5404 	 * authorizations can trigger calls back into configd.
5405 	 */
5406 	granted = rc_node_modify_permission_check(&audit_data.ed_auth);
5407 	switch (granted) {
5408 	case PERM_DENIED:
5409 		/*
5410 		 * We continue in this case, so that we can generate an
5411 		 * audit event later in this function.
5412 		 */
5413 		perm_rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5414 		break;
5415 	case PERM_GRANTED:
5416 		perm_rc = REP_PROTOCOL_SUCCESS;
5417 		break;
5418 	case PERM_GONE:
5419 		/* No need to produce audit event if client is gone. */
5420 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5421 	case PERM_FAIL:
5422 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5423 	default:
5424 		bad_error("rc_node_modify_permission_check", granted);
5425 		break;
5426 	}
5427 
5428 	RC_NODE_PTR_CHECK_LOCK_OR_FREE_RETURN(np, npp, audit_data.ed_auth);
5429 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5430 		(void) pthread_mutex_unlock(&np->rn_lock);
5431 		free(audit_data.ed_auth);
5432 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5433 	}
5434 
5435 	rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5436 	if (rc != REP_PROTOCOL_SUCCESS) {
5437 		(void) pthread_mutex_unlock(&np->rn_lock);
5438 		free(audit_data.ed_auth);
5439 		return (rc);
5440 	}
5441 
5442 	if (svcname != NULL && (rc =
5443 	    rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5444 	    REP_PROTOCOL_SUCCESS) {
5445 		(void) pthread_mutex_unlock(&np->rn_lock);
5446 		free(audit_data.ed_auth);
5447 		return (rc);
5448 	}
5449 
5450 	if (instname != NULL && (rc =
5451 	    rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5452 	    REP_PROTOCOL_SUCCESS) {
5453 		(void) pthread_mutex_unlock(&np->rn_lock);
5454 		free(audit_data.ed_auth);
5455 		return (rc);
5456 	}
5457 
5458 	audit_data.ed_fmri = fmri;
5459 	audit_data.ed_snapname = (char *)name;
5460 
5461 	if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5462 	    &sz_out)) != REP_PROTOCOL_SUCCESS) {
5463 		(void) pthread_mutex_unlock(&np->rn_lock);
5464 		free(audit_data.ed_auth);
5465 		return (rc);
5466 	}
5467 	if (perm_rc != REP_PROTOCOL_SUCCESS) {
5468 		(void) pthread_mutex_unlock(&np->rn_lock);
5469 		smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5470 		    ADT_FAIL_VALUE_AUTH, &audit_data);
5471 		free(audit_data.ed_auth);
5472 		return (perm_rc);
5473 	}
5474 
5475 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5476 	    audit_data.ed_auth);
5477 	(void) pthread_mutex_unlock(&np->rn_lock);
5478 
5479 	rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5480 
5481 	if (rc == REP_PROTOCOL_SUCCESS) {
5482 		rc_node_assign(outpp, outp);
5483 		rc_node_rele(outp);
5484 	}
5485 
5486 	(void) pthread_mutex_lock(&np->rn_lock);
5487 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5488 	(void) pthread_mutex_unlock(&np->rn_lock);
5489 
5490 	if (rc == REP_PROTOCOL_SUCCESS) {
5491 		smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5492 		    &audit_data);
5493 	}
5494 	if (audit_data.ed_auth != NULL)
5495 		free(audit_data.ed_auth);
5496 	return (rc);
5497 }
5498 
5499 int
5500 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5501 {
5502 	rc_node_t *np, *outp;
5503 
5504 	RC_NODE_PTR_GET_CHECK(np, npp);
5505 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5506 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5507 	}
5508 
5509 	RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5510 	if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5511 		(void) pthread_mutex_unlock(&outp->rn_lock);
5512 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5513 	}
5514 
5515 	return (rc_attach_snapshot(outp, 0, np, NULL,
5516 	    NULL));					/* drops outp's lock */
5517 }
5518 
5519 int
5520 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5521 {
5522 	rc_node_t *np;
5523 	rc_node_t *cp;
5524 	uint32_t snapid;
5525 	char old_name[REP_PROTOCOL_NAME_LEN];
5526 	int rc;
5527 	size_t sz_out;
5528 	char old_fmri[REP_PROTOCOL_FMRI_LEN];
5529 
5530 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5531 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5532 		(void) pthread_mutex_unlock(&np->rn_lock);
5533 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5534 	}
5535 	snapid = np->rn_snapshot_id;
5536 	rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5537 	    &sz_out);
5538 	(void) pthread_mutex_unlock(&np->rn_lock);
5539 	if (rc != REP_PROTOCOL_SUCCESS)
5540 		return (rc);
5541 	if (np->rn_name != NULL) {
5542 		if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5543 		    sizeof (old_name)) {
5544 			return (REP_PROTOCOL_FAIL_TRUNCATED);
5545 		}
5546 	}
5547 
5548 	RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5549 	if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5550 		(void) pthread_mutex_unlock(&cp->rn_lock);
5551 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5552 	}
5553 
5554 	rc = rc_attach_snapshot(cp, snapid, NULL,
5555 	    old_fmri, old_name);			/* drops cp's lock */
5556 	return (rc);
5557 }
5558 
5559 /*
5560  * If the pgname property group under ent has type pgtype, and it has a
5561  * propname property with type ptype, return _SUCCESS.  If pgtype is NULL,
5562  * it is not checked.  If ent is not a service node, we will return _SUCCESS if
5563  * a property meeting the requirements exists in either the instance or its
5564  * parent.
5565  *
5566  * Returns
5567  *   _SUCCESS - see above
5568  *   _DELETED - ent or one of its ancestors was deleted
5569  *   _NO_RESOURCES - no resources
5570  *   _NOT_FOUND - no matching property was found
5571  */
5572 static int
5573 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5574     const char *propname, rep_protocol_value_type_t ptype)
5575 {
5576 	int ret;
5577 	rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5578 
5579 	assert(!MUTEX_HELD(&ent->rn_lock));
5580 
5581 	(void) pthread_mutex_lock(&ent->rn_lock);
5582 	ret = rc_node_find_named_child(ent, pgname,
5583 	    REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5584 	(void) pthread_mutex_unlock(&ent->rn_lock);
5585 
5586 	switch (ret) {
5587 	case REP_PROTOCOL_SUCCESS:
5588 		break;
5589 
5590 	case REP_PROTOCOL_FAIL_DELETED:
5591 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5592 		return (ret);
5593 
5594 	default:
5595 		bad_error("rc_node_find_named_child", ret);
5596 	}
5597 
5598 	if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5599 		ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5600 		    &svc);
5601 		if (ret != REP_PROTOCOL_SUCCESS) {
5602 			assert(ret == REP_PROTOCOL_FAIL_DELETED);
5603 			if (pg != NULL)
5604 				rc_node_rele(pg);
5605 			return (ret);
5606 		}
5607 		assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5608 
5609 		(void) pthread_mutex_lock(&svc->rn_lock);
5610 		ret = rc_node_find_named_child(svc, pgname,
5611 		    REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5612 		(void) pthread_mutex_unlock(&svc->rn_lock);
5613 
5614 		rc_node_rele(svc);
5615 
5616 		switch (ret) {
5617 		case REP_PROTOCOL_SUCCESS:
5618 			break;
5619 
5620 		case REP_PROTOCOL_FAIL_DELETED:
5621 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
5622 			if (pg != NULL)
5623 				rc_node_rele(pg);
5624 			return (ret);
5625 
5626 		default:
5627 			bad_error("rc_node_find_named_child", ret);
5628 		}
5629 	}
5630 
5631 	if (pg != NULL &&
5632 	    pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5633 		rc_node_rele(pg);
5634 		pg = NULL;
5635 	}
5636 
5637 	if (spg != NULL &&
5638 	    pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5639 		rc_node_rele(spg);
5640 		spg = NULL;
5641 	}
5642 
5643 	if (pg == NULL) {
5644 		if (spg == NULL)
5645 			return (REP_PROTOCOL_FAIL_NOT_FOUND);
5646 		pg = spg;
5647 		spg = NULL;
5648 	}
5649 
5650 	/*
5651 	 * At this point, pg is non-NULL, and is a property group node of the
5652 	 * correct type.  spg, if non-NULL, is also a property group node of
5653 	 * the correct type.  Check for the property in pg first, then spg
5654 	 * (if applicable).
5655 	 */
5656 	(void) pthread_mutex_lock(&pg->rn_lock);
5657 	ret = rc_node_find_named_child(pg, propname,
5658 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5659 	(void) pthread_mutex_unlock(&pg->rn_lock);
5660 	rc_node_rele(pg);
5661 	switch (ret) {
5662 	case REP_PROTOCOL_SUCCESS:
5663 		if (prop != NULL) {
5664 			if (prop->rn_valtype == ptype) {
5665 				rc_node_rele(prop);
5666 				if (spg != NULL)
5667 					rc_node_rele(spg);
5668 				return (REP_PROTOCOL_SUCCESS);
5669 			}
5670 			rc_node_rele(prop);
5671 		}
5672 		break;
5673 
5674 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5675 		if (spg != NULL)
5676 			rc_node_rele(spg);
5677 		return (ret);
5678 
5679 	case REP_PROTOCOL_FAIL_DELETED:
5680 		break;
5681 
5682 	default:
5683 		bad_error("rc_node_find_named_child", ret);
5684 	}
5685 
5686 	if (spg == NULL)
5687 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5688 
5689 	pg = spg;
5690 
5691 	(void) pthread_mutex_lock(&pg->rn_lock);
5692 	ret = rc_node_find_named_child(pg, propname,
5693 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5694 	(void) pthread_mutex_unlock(&pg->rn_lock);
5695 	rc_node_rele(pg);
5696 	switch (ret) {
5697 	case REP_PROTOCOL_SUCCESS:
5698 		if (prop != NULL) {
5699 			if (prop->rn_valtype == ptype) {
5700 				rc_node_rele(prop);
5701 				return (REP_PROTOCOL_SUCCESS);
5702 			}
5703 			rc_node_rele(prop);
5704 		}
5705 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5706 
5707 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5708 		return (ret);
5709 
5710 	case REP_PROTOCOL_FAIL_DELETED:
5711 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5712 
5713 	default:
5714 		bad_error("rc_node_find_named_child", ret);
5715 	}
5716 
5717 	return (REP_PROTOCOL_SUCCESS);
5718 }
5719 
5720 /*
5721  * Given a property group node, returns _SUCCESS if the property group may
5722  * be read without any special authorization.
5723  *
5724  * Fails with:
5725  *   _DELETED - np or an ancestor node was deleted
5726  *   _TYPE_MISMATCH - np does not refer to a property group
5727  *   _NO_RESOURCES - no resources
5728  *   _PERMISSION_DENIED - authorization is required
5729  */
5730 static int
5731 rc_node_pg_check_read_protect(rc_node_t *np)
5732 {
5733 	int ret;
5734 	rc_node_t *ent;
5735 
5736 	assert(!MUTEX_HELD(&np->rn_lock));
5737 
5738 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5739 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5740 
5741 	if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5742 	    strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5743 	    strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5744 		return (REP_PROTOCOL_SUCCESS);
5745 
5746 	ret = rc_node_parent(np, &ent);
5747 
5748 	if (ret != REP_PROTOCOL_SUCCESS)
5749 		return (ret);
5750 
5751 	ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5752 	    AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5753 
5754 	rc_node_rele(ent);
5755 
5756 	switch (ret) {
5757 	case REP_PROTOCOL_FAIL_NOT_FOUND:
5758 		return (REP_PROTOCOL_SUCCESS);
5759 	case REP_PROTOCOL_SUCCESS:
5760 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5761 	case REP_PROTOCOL_FAIL_DELETED:
5762 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5763 		return (ret);
5764 	default:
5765 		bad_error("rc_svc_prop_exists", ret);
5766 	}
5767 
5768 	return (REP_PROTOCOL_SUCCESS);
5769 }
5770 
5771 /*
5772  * Fails with
5773  *   _DELETED - np's node or parent has been deleted
5774  *   _TYPE_MISMATCH - np's node is not a property
5775  *   _NO_RESOURCES - out of memory
5776  *   _PERMISSION_DENIED - no authorization to read this property's value(s)
5777  *   _BAD_REQUEST - np's parent is not a property group
5778  */
5779 static int
5780 rc_node_property_may_read(rc_node_t *np)
5781 {
5782 	int ret;
5783 	perm_status_t granted = PERM_DENIED;
5784 	rc_node_t *pgp;
5785 	permcheck_t *pcp;
5786 	audit_event_data_t audit_data;
5787 	size_t sz_out;
5788 
5789 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5790 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5791 
5792 	if (client_is_privileged())
5793 		return (REP_PROTOCOL_SUCCESS);
5794 
5795 #ifdef NATIVE_BUILD
5796 	return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5797 #else
5798 	ret = rc_node_parent(np, &pgp);
5799 
5800 	if (ret != REP_PROTOCOL_SUCCESS)
5801 		return (ret);
5802 
5803 	if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5804 		rc_node_rele(pgp);
5805 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5806 	}
5807 
5808 	ret = rc_node_pg_check_read_protect(pgp);
5809 
5810 	if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5811 		rc_node_rele(pgp);
5812 		return (ret);
5813 	}
5814 
5815 	pcp = pc_create();
5816 
5817 	if (pcp == NULL) {
5818 		rc_node_rele(pgp);
5819 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5820 	}
5821 
5822 	ret = perm_add_enabling(pcp, AUTH_MODIFY);
5823 
5824 	if (ret == REP_PROTOCOL_SUCCESS) {
5825 		const char * const auth =
5826 		    perm_auth_for_pgtype(pgp->rn_type);
5827 
5828 		if (auth != NULL)
5829 			ret = perm_add_enabling(pcp, auth);
5830 	}
5831 
5832 	/*
5833 	 * If you are permitted to modify the value, you may also
5834 	 * read it.  This means that both the MODIFY and VALUE
5835 	 * authorizations are acceptable.  We don't allow requests
5836 	 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5837 	 * however, to avoid leaking possibly valuable information
5838 	 * since such a user can't change the property anyway.
5839 	 */
5840 	if (ret == REP_PROTOCOL_SUCCESS)
5841 		ret = perm_add_enabling_values(pcp, pgp,
5842 		    AUTH_PROP_MODIFY);
5843 
5844 	if (ret == REP_PROTOCOL_SUCCESS &&
5845 	    strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5846 		ret = perm_add_enabling_values(pcp, pgp,
5847 		    AUTH_PROP_VALUE);
5848 
5849 	if (ret == REP_PROTOCOL_SUCCESS)
5850 		ret = perm_add_enabling_values(pcp, pgp,
5851 		    AUTH_PROP_READ);
5852 
5853 	rc_node_rele(pgp);
5854 
5855 	if (ret == REP_PROTOCOL_SUCCESS) {
5856 		granted = perm_granted(pcp);
5857 		if (granted == PERM_FAIL)
5858 			ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5859 		if (granted == PERM_GONE)
5860 			ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5861 	}
5862 
5863 	if (ret == REP_PROTOCOL_SUCCESS) {
5864 		/* Generate a read_prop audit event. */
5865 		audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5866 		if (audit_data.ed_fmri == NULL)
5867 			ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5868 	}
5869 	if (ret == REP_PROTOCOL_SUCCESS) {
5870 		ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5871 		    REP_PROTOCOL_FMRI_LEN, &sz_out);
5872 	}
5873 	if (ret == REP_PROTOCOL_SUCCESS) {
5874 		int status;
5875 		int ret_value;
5876 
5877 		if (granted == PERM_DENIED) {
5878 			status = ADT_FAILURE;
5879 			ret_value = ADT_FAIL_VALUE_AUTH;
5880 		} else {
5881 			status = ADT_SUCCESS;
5882 			ret_value = ADT_SUCCESS;
5883 		}
5884 		audit_data.ed_auth = pcp->pc_auth_string;
5885 		smf_audit_event(ADT_smf_read_prop,
5886 		    status, ret_value, &audit_data);
5887 	}
5888 	free(audit_data.ed_fmri);
5889 
5890 	pc_free(pcp);
5891 
5892 	if ((ret == REP_PROTOCOL_SUCCESS) && (granted == PERM_DENIED))
5893 		ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5894 
5895 	return (ret);
5896 #endif	/* NATIVE_BUILD */
5897 }
5898 
5899 /*
5900  * Iteration
5901  */
5902 static int
5903 rc_iter_filter_name(rc_node_t *np, void *s)
5904 {
5905 	const char *name = s;
5906 
5907 	return (strcmp(np->rn_name, name) == 0);
5908 }
5909 
5910 static int
5911 rc_iter_filter_type(rc_node_t *np, void *s)
5912 {
5913 	const char *type = s;
5914 
5915 	return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5916 }
5917 
5918 /*ARGSUSED*/
5919 static int
5920 rc_iter_null_filter(rc_node_t *np, void *s)
5921 {
5922 	return (1);
5923 }
5924 
5925 /*
5926  * Allocate & initialize an rc_node_iter_t structure.  Essentially, ensure
5927  * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5928  * If successful, leaves a hold on np & increments np->rn_other_refs
5929  *
5930  * If composed is true, then set up for iteration across the top level of np's
5931  * composition chain.  If successful, leaves a hold on np and increments
5932  * rn_other_refs for the top level of np's composition chain.
5933  *
5934  * Fails with
5935  *   _NO_RESOURCES
5936  *   _INVALID_TYPE
5937  *   _TYPE_MISMATCH - np cannot carry type children
5938  *   _DELETED
5939  */
5940 static int
5941 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5942     rc_iter_filter_func *filter, void *arg, boolean_t composed)
5943 {
5944 	rc_node_iter_t *nip;
5945 	int res;
5946 
5947 	assert(*resp == NULL);
5948 
5949 	nip = uu_zalloc(sizeof (*nip));
5950 	if (nip == NULL)
5951 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5952 
5953 	/* np is held by the client's rc_node_ptr_t */
5954 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5955 		composed = 1;
5956 
5957 	if (!composed) {
5958 		(void) pthread_mutex_lock(&np->rn_lock);
5959 
5960 		if ((res = rc_node_fill_children(np, type)) !=
5961 		    REP_PROTOCOL_SUCCESS) {
5962 			(void) pthread_mutex_unlock(&np->rn_lock);
5963 			uu_free(nip);
5964 			return (res);
5965 		}
5966 
5967 		nip->rni_clevel = -1;
5968 
5969 		nip->rni_iter = uu_list_walk_start(np->rn_children,
5970 		    UU_WALK_ROBUST);
5971 		if (nip->rni_iter != NULL) {
5972 			nip->rni_iter_node = np;
5973 			rc_node_hold_other(np);
5974 		} else {
5975 			(void) pthread_mutex_unlock(&np->rn_lock);
5976 			uu_free(nip);
5977 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5978 		}
5979 		(void) pthread_mutex_unlock(&np->rn_lock);
5980 	} else {
5981 		rc_node_t *ent;
5982 
5983 		if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5984 			/* rn_cchain isn't valid until children are loaded. */
5985 			(void) pthread_mutex_lock(&np->rn_lock);
5986 			res = rc_node_fill_children(np,
5987 			    REP_PROTOCOL_ENTITY_SNAPLEVEL);
5988 			(void) pthread_mutex_unlock(&np->rn_lock);
5989 			if (res != REP_PROTOCOL_SUCCESS) {
5990 				uu_free(nip);
5991 				return (res);
5992 			}
5993 
5994 			/* Check for an empty snapshot. */
5995 			if (np->rn_cchain[0] == NULL)
5996 				goto empty;
5997 		}
5998 
5999 		/* Start at the top of the composition chain. */
6000 		for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6001 			if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6002 				/* Empty composition chain. */
6003 empty:
6004 				nip->rni_clevel = -1;
6005 				nip->rni_iter = NULL;
6006 				/* It's ok, iter_next() will return _DONE. */
6007 				goto out;
6008 			}
6009 
6010 			ent = np->rn_cchain[nip->rni_clevel];
6011 			assert(ent != NULL);
6012 
6013 			if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6014 				break;
6015 
6016 			/* Someone deleted it, so try the next one. */
6017 		}
6018 
6019 		res = rc_node_fill_children(ent, type);
6020 
6021 		if (res == REP_PROTOCOL_SUCCESS) {
6022 			nip->rni_iter = uu_list_walk_start(ent->rn_children,
6023 			    UU_WALK_ROBUST);
6024 
6025 			if (nip->rni_iter == NULL)
6026 				res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6027 			else {
6028 				nip->rni_iter_node = ent;
6029 				rc_node_hold_other(ent);
6030 			}
6031 		}
6032 
6033 		if (res != REP_PROTOCOL_SUCCESS) {
6034 			(void) pthread_mutex_unlock(&ent->rn_lock);
6035 			uu_free(nip);
6036 			return (res);
6037 		}
6038 
6039 		(void) pthread_mutex_unlock(&ent->rn_lock);
6040 	}
6041 
6042 out:
6043 	rc_node_hold(np);		/* released by rc_iter_end() */
6044 	nip->rni_parent = np;
6045 	nip->rni_type = type;
6046 	nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6047 	nip->rni_filter_arg = arg;
6048 	*resp = nip;
6049 	return (REP_PROTOCOL_SUCCESS);
6050 }
6051 
6052 static void
6053 rc_iter_end(rc_node_iter_t *iter)
6054 {
6055 	rc_node_t *np = iter->rni_parent;
6056 
6057 	if (iter->rni_clevel >= 0)
6058 		np = np->rn_cchain[iter->rni_clevel];
6059 
6060 	assert(MUTEX_HELD(&np->rn_lock));
6061 	if (iter->rni_iter != NULL)
6062 		uu_list_walk_end(iter->rni_iter);
6063 	iter->rni_iter = NULL;
6064 
6065 	(void) pthread_mutex_unlock(&np->rn_lock);
6066 	rc_node_rele(iter->rni_parent);
6067 	if (iter->rni_iter_node != NULL)
6068 		rc_node_rele_other(iter->rni_iter_node);
6069 }
6070 
6071 /*
6072  * Fails with
6073  *   _NOT_SET - npp is reset
6074  *   _DELETED - npp's node has been deleted
6075  *   _NOT_APPLICABLE - npp's node is not a property
6076  *   _NO_RESOURCES - out of memory
6077  */
6078 static int
6079 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6080 {
6081 	rc_node_t *np;
6082 
6083 	rc_node_iter_t *nip;
6084 
6085 	assert(*iterp == NULL);
6086 
6087 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6088 
6089 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6090 		(void) pthread_mutex_unlock(&np->rn_lock);
6091 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6092 	}
6093 
6094 	nip = uu_zalloc(sizeof (*nip));
6095 	if (nip == NULL) {
6096 		(void) pthread_mutex_unlock(&np->rn_lock);
6097 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6098 	}
6099 
6100 	nip->rni_parent = np;
6101 	nip->rni_iter = NULL;
6102 	nip->rni_clevel = -1;
6103 	nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6104 	nip->rni_offset = 0;
6105 	nip->rni_last_offset = 0;
6106 
6107 	rc_node_hold_locked(np);
6108 
6109 	*iterp = nip;
6110 	(void) pthread_mutex_unlock(&np->rn_lock);
6111 
6112 	return (REP_PROTOCOL_SUCCESS);
6113 }
6114 
6115 /*
6116  * Returns:
6117  *   _NO_RESOURCES - out of memory
6118  *   _NOT_SET - npp is reset
6119  *   _DELETED - npp's node has been deleted
6120  *   _TYPE_MISMATCH - npp's node is not a property
6121  *   _NOT_FOUND - property has no values
6122  *   _TRUNCATED - property has >1 values (first is written into out)
6123  *   _SUCCESS - property has 1 value (which is written into out)
6124  *   _PERMISSION_DENIED - no authorization to read property value(s)
6125  *
6126  * We shorten *sz_out to not include anything after the final '\0'.
6127  */
6128 int
6129 rc_node_get_property_value(rc_node_ptr_t *npp,
6130     struct rep_protocol_value_response *out, size_t *sz_out)
6131 {
6132 	rc_node_t *np;
6133 	size_t w;
6134 	int ret;
6135 
6136 	assert(*sz_out == sizeof (*out));
6137 
6138 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6139 	ret = rc_node_property_may_read(np);
6140 	rc_node_rele(np);
6141 
6142 	if (ret != REP_PROTOCOL_SUCCESS)
6143 		return (ret);
6144 
6145 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6146 
6147 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6148 		(void) pthread_mutex_unlock(&np->rn_lock);
6149 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6150 	}
6151 
6152 	if (np->rn_values_size == 0) {
6153 		(void) pthread_mutex_unlock(&np->rn_lock);
6154 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
6155 	}
6156 	out->rpr_type = np->rn_valtype;
6157 	w = strlcpy(out->rpr_value, &np->rn_values[0],
6158 	    sizeof (out->rpr_value));
6159 
6160 	if (w >= sizeof (out->rpr_value))
6161 		backend_panic("value too large");
6162 
6163 	*sz_out = offsetof(struct rep_protocol_value_response,
6164 	    rpr_value[w + 1]);
6165 
6166 	ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6167 	    REP_PROTOCOL_SUCCESS;
6168 	(void) pthread_mutex_unlock(&np->rn_lock);
6169 	return (ret);
6170 }
6171 
6172 int
6173 rc_iter_next_value(rc_node_iter_t *iter,
6174     struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6175 {
6176 	rc_node_t *np = iter->rni_parent;
6177 	const char *vals;
6178 	size_t len;
6179 
6180 	size_t start;
6181 	size_t w;
6182 	int ret;
6183 
6184 	rep_protocol_responseid_t result;
6185 
6186 	assert(*sz_out == sizeof (*out));
6187 
6188 	(void) memset(out, '\0', *sz_out);
6189 
6190 	if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6191 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6192 
6193 	RC_NODE_CHECK(np);
6194 	ret = rc_node_property_may_read(np);
6195 
6196 	if (ret != REP_PROTOCOL_SUCCESS)
6197 		return (ret);
6198 
6199 	RC_NODE_CHECK_AND_LOCK(np);
6200 
6201 	vals = np->rn_values;
6202 	len = np->rn_values_size;
6203 
6204 	out->rpr_type = np->rn_valtype;
6205 
6206 	start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6207 
6208 	if (len == 0 || start >= len) {
6209 		result = REP_PROTOCOL_DONE;
6210 		*sz_out -= sizeof (out->rpr_value);
6211 	} else {
6212 		w = strlcpy(out->rpr_value, &vals[start],
6213 		    sizeof (out->rpr_value));
6214 
6215 		if (w >= sizeof (out->rpr_value))
6216 			backend_panic("value too large");
6217 
6218 		*sz_out = offsetof(struct rep_protocol_value_response,
6219 		    rpr_value[w + 1]);
6220 
6221 		/*
6222 		 * update the offsets if we're not repeating
6223 		 */
6224 		if (!repeat) {
6225 			iter->rni_last_offset = iter->rni_offset;
6226 			iter->rni_offset += (w + 1);
6227 		}
6228 
6229 		result = REP_PROTOCOL_SUCCESS;
6230 	}
6231 
6232 	(void) pthread_mutex_unlock(&np->rn_lock);
6233 	return (result);
6234 }
6235 
6236 /*
6237  * Entry point for ITER_START from client.c.  Validate the arguments & call
6238  * rc_iter_create().
6239  *
6240  * Fails with
6241  *   _NOT_SET
6242  *   _DELETED
6243  *   _TYPE_MISMATCH - np cannot carry type children
6244  *   _BAD_REQUEST - flags is invalid
6245  *		    pattern is invalid
6246  *   _NO_RESOURCES
6247  *   _INVALID_TYPE
6248  *   _TYPE_MISMATCH - *npp cannot have children of type
6249  *   _BACKEND_ACCESS
6250  */
6251 int
6252 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6253     uint32_t type, uint32_t flags, const char *pattern)
6254 {
6255 	rc_node_t *np;
6256 	rc_iter_filter_func *f = NULL;
6257 	int rc;
6258 
6259 	RC_NODE_PTR_GET_CHECK(np, npp);
6260 
6261 	if (pattern != NULL && pattern[0] == '\0')
6262 		pattern = NULL;
6263 
6264 	if (type == REP_PROTOCOL_ENTITY_VALUE) {
6265 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6266 			return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6267 		if (flags != RP_ITER_START_ALL || pattern != NULL)
6268 			return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6269 
6270 		rc = rc_node_setup_value_iter(npp, iterp);
6271 		assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6272 		return (rc);
6273 	}
6274 
6275 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6276 	    REP_PROTOCOL_SUCCESS)
6277 		return (rc);
6278 
6279 	if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6280 	    (pattern == NULL))
6281 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6282 
6283 	/* Composition only works for instances & snapshots. */
6284 	if ((flags & RP_ITER_START_COMPOSED) &&
6285 	    (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6286 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6287 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6288 
6289 	if (pattern != NULL) {
6290 		if ((rc = rc_check_type_name(type, pattern)) !=
6291 		    REP_PROTOCOL_SUCCESS)
6292 			return (rc);
6293 		pattern = strdup(pattern);
6294 		if (pattern == NULL)
6295 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6296 	}
6297 
6298 	switch (flags & RP_ITER_START_FILT_MASK) {
6299 	case RP_ITER_START_ALL:
6300 		f = NULL;
6301 		break;
6302 	case RP_ITER_START_EXACT:
6303 		f = rc_iter_filter_name;
6304 		break;
6305 	case RP_ITER_START_PGTYPE:
6306 		if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6307 			free((void *)pattern);
6308 			return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6309 		}
6310 		f = rc_iter_filter_type;
6311 		break;
6312 	default:
6313 		free((void *)pattern);
6314 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6315 	}
6316 
6317 	rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6318 	    flags & RP_ITER_START_COMPOSED);
6319 	if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6320 		free((void *)pattern);
6321 
6322 	return (rc);
6323 }
6324 
6325 /*
6326  * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6327  * the filter.
6328  * For composed iterators, then check to see if there's an overlapping entity
6329  * (see embedded comments).  If we reach the end of the list, start over at
6330  * the next level.
6331  *
6332  * Returns
6333  *   _BAD_REQUEST - iter walks values
6334  *   _TYPE_MISMATCH - iter does not walk type entities
6335  *   _DELETED - parent was deleted
6336  *   _NO_RESOURCES
6337  *   _INVALID_TYPE - type is invalid
6338  *   _DONE
6339  *   _SUCCESS
6340  *
6341  * For composed property group iterators, can also return
6342  *   _TYPE_MISMATCH - parent cannot have type children
6343  */
6344 int
6345 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6346 {
6347 	rc_node_t *np = iter->rni_parent;
6348 	rc_node_t *res;
6349 	int rc;
6350 
6351 	if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6352 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6353 
6354 	if (iter->rni_iter == NULL) {
6355 		rc_node_clear(out, 0);
6356 		return (REP_PROTOCOL_DONE);
6357 	}
6358 
6359 	if (iter->rni_type != type) {
6360 		rc_node_clear(out, 0);
6361 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6362 	}
6363 
6364 	(void) pthread_mutex_lock(&np->rn_lock);  /* held by _iter_create() */
6365 
6366 	if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6367 		(void) pthread_mutex_unlock(&np->rn_lock);
6368 		rc_node_clear(out, 1);
6369 		return (REP_PROTOCOL_FAIL_DELETED);
6370 	}
6371 
6372 	if (iter->rni_clevel >= 0) {
6373 		/* Composed iterator.  Iterate over appropriate level. */
6374 		(void) pthread_mutex_unlock(&np->rn_lock);
6375 		np = np->rn_cchain[iter->rni_clevel];
6376 		/*
6377 		 * If iter->rni_parent is an instance or a snapshot, np must
6378 		 * be valid since iter holds iter->rni_parent & possible
6379 		 * levels (service, instance, snaplevel) cannot be destroyed
6380 		 * while rni_parent is held.  If iter->rni_parent is
6381 		 * a composed property group then rc_node_setup_cpg() put
6382 		 * a hold on np.
6383 		 */
6384 
6385 		(void) pthread_mutex_lock(&np->rn_lock);
6386 
6387 		if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6388 			(void) pthread_mutex_unlock(&np->rn_lock);
6389 			rc_node_clear(out, 1);
6390 			return (REP_PROTOCOL_FAIL_DELETED);
6391 		}
6392 	}
6393 
6394 	assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6395 
6396 	for (;;) {
6397 		res = uu_list_walk_next(iter->rni_iter);
6398 		if (res == NULL) {
6399 			rc_node_t *parent = iter->rni_parent;
6400 
6401 #if COMPOSITION_DEPTH == 2
6402 			if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6403 				/* release walker and lock */
6404 				rc_iter_end(iter);
6405 				break;
6406 			}
6407 
6408 			/* Stop walking current level. */
6409 			uu_list_walk_end(iter->rni_iter);
6410 			iter->rni_iter = NULL;
6411 			(void) pthread_mutex_unlock(&np->rn_lock);
6412 			rc_node_rele_other(iter->rni_iter_node);
6413 			iter->rni_iter_node = NULL;
6414 
6415 			/* Start walking next level. */
6416 			++iter->rni_clevel;
6417 			np = parent->rn_cchain[iter->rni_clevel];
6418 			assert(np != NULL);
6419 #else
6420 #error This code must be updated.
6421 #endif
6422 
6423 			(void) pthread_mutex_lock(&np->rn_lock);
6424 
6425 			rc = rc_node_fill_children(np, iter->rni_type);
6426 
6427 			if (rc == REP_PROTOCOL_SUCCESS) {
6428 				iter->rni_iter =
6429 				    uu_list_walk_start(np->rn_children,
6430 				    UU_WALK_ROBUST);
6431 
6432 				if (iter->rni_iter == NULL)
6433 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6434 				else {
6435 					iter->rni_iter_node = np;
6436 					rc_node_hold_other(np);
6437 				}
6438 			}
6439 
6440 			if (rc != REP_PROTOCOL_SUCCESS) {
6441 				(void) pthread_mutex_unlock(&np->rn_lock);
6442 				rc_node_clear(out, 0);
6443 				return (rc);
6444 			}
6445 
6446 			continue;
6447 		}
6448 
6449 		if (res->rn_id.rl_type != type ||
6450 		    !iter->rni_filter(res, iter->rni_filter_arg))
6451 			continue;
6452 
6453 		/*
6454 		 * If we're composed and not at the top level, check to see if
6455 		 * there's an entity at a higher level with the same name.  If
6456 		 * so, skip this one.
6457 		 */
6458 		if (iter->rni_clevel > 0) {
6459 			rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6460 			rc_node_t *pg;
6461 
6462 #if COMPOSITION_DEPTH == 2
6463 			assert(iter->rni_clevel == 1);
6464 
6465 			(void) pthread_mutex_unlock(&np->rn_lock);
6466 			(void) pthread_mutex_lock(&ent->rn_lock);
6467 			rc = rc_node_find_named_child(ent, res->rn_name, type,
6468 			    &pg);
6469 			if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6470 				rc_node_rele(pg);
6471 			(void) pthread_mutex_unlock(&ent->rn_lock);
6472 			if (rc != REP_PROTOCOL_SUCCESS) {
6473 				rc_node_clear(out, 0);
6474 				return (rc);
6475 			}
6476 			(void) pthread_mutex_lock(&np->rn_lock);
6477 
6478 			/* Make sure np isn't being deleted all of a sudden. */
6479 			if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6480 				(void) pthread_mutex_unlock(&np->rn_lock);
6481 				rc_node_clear(out, 1);
6482 				return (REP_PROTOCOL_FAIL_DELETED);
6483 			}
6484 
6485 			if (pg != NULL)
6486 				/* Keep going. */
6487 				continue;
6488 #else
6489 #error This code must be updated.
6490 #endif
6491 		}
6492 
6493 		/*
6494 		 * If we're composed, iterating over property groups, and not
6495 		 * at the bottom level, check to see if there's a pg at lower
6496 		 * level with the same name.  If so, return a cpg.
6497 		 */
6498 		if (iter->rni_clevel >= 0 &&
6499 		    type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6500 		    iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6501 #if COMPOSITION_DEPTH == 2
6502 			rc_node_t *pg;
6503 			rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6504 
6505 			rc_node_hold(res);	/* While we drop np->rn_lock */
6506 
6507 			(void) pthread_mutex_unlock(&np->rn_lock);
6508 			(void) pthread_mutex_lock(&ent->rn_lock);
6509 			rc = rc_node_find_named_child(ent, res->rn_name, type,
6510 			    &pg);
6511 			/* holds pg if not NULL */
6512 			(void) pthread_mutex_unlock(&ent->rn_lock);
6513 			if (rc != REP_PROTOCOL_SUCCESS) {
6514 				rc_node_rele(res);
6515 				rc_node_clear(out, 0);
6516 				return (rc);
6517 			}
6518 
6519 			(void) pthread_mutex_lock(&np->rn_lock);
6520 			if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6521 				(void) pthread_mutex_unlock(&np->rn_lock);
6522 				rc_node_rele(res);
6523 				if (pg != NULL)
6524 					rc_node_rele(pg);
6525 				rc_node_clear(out, 1);
6526 				return (REP_PROTOCOL_FAIL_DELETED);
6527 			}
6528 
6529 			if (pg == NULL) {
6530 				rc_node_rele(res);
6531 			} else {
6532 				rc_node_t *cpg;
6533 
6534 				/* Keep res held for rc_node_setup_cpg(). */
6535 
6536 				cpg = rc_node_alloc();
6537 				if (cpg == NULL) {
6538 					(void) pthread_mutex_unlock(
6539 					    &np->rn_lock);
6540 					rc_node_rele(res);
6541 					rc_node_rele(pg);
6542 					rc_node_clear(out, 0);
6543 					return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6544 				}
6545 
6546 				switch (rc_node_setup_cpg(cpg, res, pg)) {
6547 				case REP_PROTOCOL_SUCCESS:
6548 					res = cpg;
6549 					break;
6550 
6551 				case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6552 					/* Nevermind. */
6553 					rc_node_destroy(cpg);
6554 					rc_node_rele(pg);
6555 					rc_node_rele(res);
6556 					break;
6557 
6558 				case REP_PROTOCOL_FAIL_NO_RESOURCES:
6559 					rc_node_destroy(cpg);
6560 					(void) pthread_mutex_unlock(
6561 					    &np->rn_lock);
6562 					rc_node_rele(res);
6563 					rc_node_rele(pg);
6564 					rc_node_clear(out, 0);
6565 					return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6566 
6567 				default:
6568 					assert(0);
6569 					abort();
6570 				}
6571 			}
6572 #else
6573 #error This code must be updated.
6574 #endif
6575 		}
6576 
6577 		rc_node_hold(res);
6578 		(void) pthread_mutex_unlock(&np->rn_lock);
6579 		break;
6580 	}
6581 	rc_node_assign(out, res);
6582 
6583 	if (res == NULL)
6584 		return (REP_PROTOCOL_DONE);
6585 	rc_node_rele(res);
6586 	return (REP_PROTOCOL_SUCCESS);
6587 }
6588 
6589 void
6590 rc_iter_destroy(rc_node_iter_t **nipp)
6591 {
6592 	rc_node_iter_t *nip = *nipp;
6593 	rc_node_t *np;
6594 
6595 	if (nip == NULL)
6596 		return;				/* already freed */
6597 
6598 	np = nip->rni_parent;
6599 
6600 	if (nip->rni_filter_arg != NULL)
6601 		free(nip->rni_filter_arg);
6602 	nip->rni_filter_arg = NULL;
6603 
6604 	if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6605 	    nip->rni_iter != NULL) {
6606 		if (nip->rni_clevel < 0)
6607 			(void) pthread_mutex_lock(&np->rn_lock);
6608 		else
6609 			(void) pthread_mutex_lock(
6610 			    &np->rn_cchain[nip->rni_clevel]->rn_lock);
6611 		rc_iter_end(nip);		/* release walker and lock */
6612 	}
6613 	nip->rni_parent = NULL;
6614 
6615 	uu_free(nip);
6616 	*nipp = NULL;
6617 }
6618 
6619 int
6620 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6621 {
6622 	rc_node_t *np;
6623 	permcheck_t *pcp;
6624 	int ret;
6625 	perm_status_t granted;
6626 	rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6627 	char *auth_string = NULL;
6628 
6629 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6630 
6631 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6632 		rc_node_rele(np);
6633 		np = np->rn_cchain[0];
6634 		RC_NODE_CHECK_AND_HOLD(np);
6635 	}
6636 
6637 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6638 		rc_node_rele(np);
6639 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6640 	}
6641 
6642 	if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6643 		rc_node_rele(np);
6644 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6645 	}
6646 
6647 #ifdef NATIVE_BUILD
6648 	if (client_is_privileged())
6649 		goto skip_checks;
6650 	rc_node_rele(np);
6651 	return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6652 #else
6653 	if (is_main_repository == 0)
6654 		goto skip_checks;
6655 
6656 	/* permission check */
6657 	pcp = pc_create();
6658 	if (pcp == NULL) {
6659 		rc_node_rele(np);
6660 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6661 	}
6662 
6663 	if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&	/* instance pg */
6664 	    ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6665 	    strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6666 	    (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6667 	    strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6668 		rc_node_t *instn;
6669 
6670 		/* solaris.smf.modify can be used */
6671 		ret = perm_add_enabling(pcp, AUTH_MODIFY);
6672 		if (ret != REP_PROTOCOL_SUCCESS) {
6673 			pc_free(pcp);
6674 			rc_node_rele(np);
6675 			return (ret);
6676 		}
6677 
6678 		/* solaris.smf.manage can be used. */
6679 		ret = perm_add_enabling(pcp, AUTH_MANAGE);
6680 
6681 		if (ret != REP_PROTOCOL_SUCCESS) {
6682 			pc_free(pcp);
6683 			rc_node_rele(np);
6684 			return (ret);
6685 		}
6686 
6687 		/* general/action_authorization values can be used. */
6688 		ret = rc_node_parent(np, &instn);
6689 		if (ret != REP_PROTOCOL_SUCCESS) {
6690 			assert(ret == REP_PROTOCOL_FAIL_DELETED);
6691 			rc_node_rele(np);
6692 			pc_free(pcp);
6693 			return (REP_PROTOCOL_FAIL_DELETED);
6694 		}
6695 
6696 		assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6697 
6698 		ret = perm_add_inst_action_auth(pcp, instn);
6699 		rc_node_rele(instn);
6700 		switch (ret) {
6701 		case REP_PROTOCOL_SUCCESS:
6702 			break;
6703 
6704 		case REP_PROTOCOL_FAIL_DELETED:
6705 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
6706 			rc_node_rele(np);
6707 			pc_free(pcp);
6708 			return (ret);
6709 
6710 		default:
6711 			bad_error("perm_add_inst_action_auth", ret);
6712 		}
6713 
6714 		if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6715 			authorized = RC_AUTH_PASSED; /* No check on commit. */
6716 	} else {
6717 		ret = perm_add_enabling(pcp, AUTH_MODIFY);
6718 
6719 		if (ret == REP_PROTOCOL_SUCCESS) {
6720 			/* propertygroup-type-specific authorization */
6721 			/* no locking because rn_type won't change anyway */
6722 			const char * const auth =
6723 			    perm_auth_for_pgtype(np->rn_type);
6724 
6725 			if (auth != NULL)
6726 				ret = perm_add_enabling(pcp, auth);
6727 		}
6728 
6729 		if (ret == REP_PROTOCOL_SUCCESS)
6730 			/* propertygroup/transaction-type-specific auths */
6731 			ret =
6732 			    perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6733 
6734 		if (ret == REP_PROTOCOL_SUCCESS)
6735 			ret =
6736 			    perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6737 
6738 		/* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6739 		if (ret == REP_PROTOCOL_SUCCESS &&
6740 		    strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6741 		    strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6742 			ret = perm_add_enabling(pcp, AUTH_MANAGE);
6743 
6744 		if (ret != REP_PROTOCOL_SUCCESS) {
6745 			pc_free(pcp);
6746 			rc_node_rele(np);
6747 			return (ret);
6748 		}
6749 	}
6750 
6751 	granted = perm_granted(pcp);
6752 	ret = map_granted_status(granted, pcp, &auth_string);
6753 	pc_free(pcp);
6754 
6755 	if ((granted == PERM_GONE) || (granted == PERM_FAIL) ||
6756 	    (ret == REP_PROTOCOL_FAIL_NO_RESOURCES)) {
6757 		free(auth_string);
6758 		rc_node_rele(np);
6759 		return (ret);
6760 	}
6761 
6762 	if (granted == PERM_DENIED) {
6763 		/*
6764 		 * If we get here, the authorization failed.
6765 		 * Unfortunately, we don't have enough information at this
6766 		 * point to generate the security audit events.  We'll only
6767 		 * get that information when the client tries to commit the
6768 		 * event.  Thus, we'll remember the failed authorization,
6769 		 * so that we can generate the audit events later.
6770 		 */
6771 		authorized = RC_AUTH_FAILED;
6772 	}
6773 #endif /* NATIVE_BUILD */
6774 
6775 skip_checks:
6776 	rc_node_assign(txp, np);
6777 	txp->rnp_authorized = authorized;
6778 	if (authorized != RC_AUTH_UNKNOWN) {
6779 		/* Save the authorization string. */
6780 		if (txp->rnp_auth_string != NULL)
6781 			free((void *)txp->rnp_auth_string);
6782 		txp->rnp_auth_string = auth_string;
6783 		auth_string = NULL;	/* Don't free until done with txp. */
6784 	}
6785 
6786 	rc_node_rele(np);
6787 	if (auth_string != NULL)
6788 		free(auth_string);
6789 	return (REP_PROTOCOL_SUCCESS);
6790 }
6791 
6792 /*
6793  * Return 1 if the given transaction commands only modify the values of
6794  * properties other than "modify_authorization".  Return -1 if any of the
6795  * commands are invalid, and 0 otherwise.
6796  */
6797 static int
6798 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6799 {
6800 	const struct rep_protocol_transaction_cmd *cmds;
6801 	uintptr_t loc;
6802 	uint32_t sz;
6803 	rc_node_t *prop;
6804 	boolean_t ok;
6805 
6806 	assert(!MUTEX_HELD(&pg->rn_lock));
6807 
6808 	loc = (uintptr_t)cmds_arg;
6809 
6810 	while (cmds_sz > 0) {
6811 		cmds = (struct rep_protocol_transaction_cmd *)loc;
6812 
6813 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6814 			return (-1);
6815 
6816 		sz = cmds->rptc_size;
6817 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6818 			return (-1);
6819 
6820 		sz = TX_SIZE(sz);
6821 		if (sz > cmds_sz)
6822 			return (-1);
6823 
6824 		switch (cmds[0].rptc_action) {
6825 		case REP_PROTOCOL_TX_ENTRY_CLEAR:
6826 			break;
6827 
6828 		case REP_PROTOCOL_TX_ENTRY_REPLACE:
6829 			/* Check type */
6830 			(void) pthread_mutex_lock(&pg->rn_lock);
6831 			if (rc_node_find_named_child(pg,
6832 			    (const char *)cmds[0].rptc_data,
6833 			    REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6834 			    REP_PROTOCOL_SUCCESS) {
6835 				ok = (prop != NULL &&
6836 				    prop->rn_valtype == cmds[0].rptc_type);
6837 			} else {
6838 				/* Return more particular error? */
6839 				ok = B_FALSE;
6840 			}
6841 			(void) pthread_mutex_unlock(&pg->rn_lock);
6842 			if (ok)
6843 				break;
6844 			return (0);
6845 
6846 		default:
6847 			return (0);
6848 		}
6849 
6850 		if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6851 		    == 0)
6852 			return (0);
6853 
6854 		loc += sz;
6855 		cmds_sz -= sz;
6856 	}
6857 
6858 	return (1);
6859 }
6860 
6861 /*
6862  * Return 1 if any of the given transaction commands affect
6863  * "action_authorization".  Return -1 if any of the commands are invalid and
6864  * 0 in all other cases.
6865  */
6866 static int
6867 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6868 {
6869 	const struct rep_protocol_transaction_cmd *cmds;
6870 	uintptr_t loc;
6871 	uint32_t sz;
6872 
6873 	loc = (uintptr_t)cmds_arg;
6874 
6875 	while (cmds_sz > 0) {
6876 		cmds = (struct rep_protocol_transaction_cmd *)loc;
6877 
6878 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6879 			return (-1);
6880 
6881 		sz = cmds->rptc_size;
6882 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6883 			return (-1);
6884 
6885 		sz = TX_SIZE(sz);
6886 		if (sz > cmds_sz)
6887 			return (-1);
6888 
6889 		if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6890 		    == 0)
6891 			return (1);
6892 
6893 		loc += sz;
6894 		cmds_sz -= sz;
6895 	}
6896 
6897 	return (0);
6898 }
6899 
6900 /*
6901  * Returns 1 if the transaction commands only modify properties named
6902  * 'enabled'.
6903  */
6904 static int
6905 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6906 {
6907 	const struct rep_protocol_transaction_cmd *cmd;
6908 	uintptr_t loc;
6909 	uint32_t sz;
6910 
6911 	loc = (uintptr_t)cmds_arg;
6912 
6913 	while (cmds_sz > 0) {
6914 		cmd = (struct rep_protocol_transaction_cmd *)loc;
6915 
6916 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6917 			return (-1);
6918 
6919 		sz = cmd->rptc_size;
6920 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6921 			return (-1);
6922 
6923 		sz = TX_SIZE(sz);
6924 		if (sz > cmds_sz)
6925 			return (-1);
6926 
6927 		if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6928 		    != 0)
6929 			return (0);
6930 
6931 		loc += sz;
6932 		cmds_sz -= sz;
6933 	}
6934 
6935 	return (1);
6936 }
6937 
6938 int
6939 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6940 {
6941 	rc_node_t *np = txp->rnp_node;
6942 	rc_node_t *pp;
6943 	rc_node_t *nnp;
6944 	rc_node_pg_notify_t *pnp;
6945 	int rc;
6946 	permcheck_t *pcp;
6947 	perm_status_t granted;
6948 	int normal;
6949 	char *pg_fmri = NULL;
6950 	char *auth_string = NULL;
6951 	int auth_status = ADT_SUCCESS;
6952 	int auth_ret_value = ADT_SUCCESS;
6953 	size_t sz_out;
6954 	int tx_flag = 1;
6955 	tx_commit_data_t *tx_data = NULL;
6956 
6957 	RC_NODE_CHECK(np);
6958 
6959 	if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6960 	    (txp->rnp_auth_string != NULL)) {
6961 		auth_string = strdup(txp->rnp_auth_string);
6962 		if (auth_string == NULL)
6963 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6964 	}
6965 
6966 	if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
6967 	    is_main_repository) {
6968 #ifdef NATIVE_BUILD
6969 		if (!client_is_privileged()) {
6970 			return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6971 		}
6972 #else
6973 		/* permission check: depends on contents of transaction */
6974 		pcp = pc_create();
6975 		if (pcp == NULL)
6976 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6977 
6978 		/* If normal is cleared, we won't do the normal checks. */
6979 		normal = 1;
6980 		rc = REP_PROTOCOL_SUCCESS;
6981 
6982 		if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6983 		    strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
6984 			/* Touching general[framework]/action_authorization? */
6985 			rc = tx_modifies_action(cmds, cmds_sz);
6986 			if (rc == -1) {
6987 				pc_free(pcp);
6988 				return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6989 			}
6990 
6991 			if (rc) {
6992 				/*
6993 				 * Yes: only AUTH_MODIFY and AUTH_MANAGE
6994 				 * can be used.
6995 				 */
6996 				rc = perm_add_enabling(pcp, AUTH_MODIFY);
6997 
6998 				if (rc == REP_PROTOCOL_SUCCESS)
6999 					rc = perm_add_enabling(pcp,
7000 					    AUTH_MANAGE);
7001 
7002 				normal = 0;
7003 			} else {
7004 				rc = REP_PROTOCOL_SUCCESS;
7005 			}
7006 		} else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
7007 		    strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
7008 		    strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
7009 			rc_node_t *instn;
7010 
7011 			rc = tx_only_enabled(cmds, cmds_sz);
7012 			if (rc == -1) {
7013 				pc_free(pcp);
7014 				return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7015 			}
7016 
7017 			if (rc) {
7018 				rc = rc_node_parent(np, &instn);
7019 				if (rc != REP_PROTOCOL_SUCCESS) {
7020 					assert(rc == REP_PROTOCOL_FAIL_DELETED);
7021 					pc_free(pcp);
7022 					return (rc);
7023 				}
7024 
7025 				assert(instn->rn_id.rl_type ==
7026 				    REP_PROTOCOL_ENTITY_INSTANCE);
7027 
7028 				rc = perm_add_inst_action_auth(pcp, instn);
7029 				rc_node_rele(instn);
7030 				switch (rc) {
7031 				case REP_PROTOCOL_SUCCESS:
7032 					break;
7033 
7034 				case REP_PROTOCOL_FAIL_DELETED:
7035 				case REP_PROTOCOL_FAIL_NO_RESOURCES:
7036 					pc_free(pcp);
7037 					return (rc);
7038 
7039 				default:
7040 					bad_error("perm_add_inst_action_auth",
7041 					    rc);
7042 				}
7043 			} else {
7044 				rc = REP_PROTOCOL_SUCCESS;
7045 			}
7046 		}
7047 
7048 		if (rc == REP_PROTOCOL_SUCCESS && normal) {
7049 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
7050 
7051 			if (rc == REP_PROTOCOL_SUCCESS) {
7052 				/* Add pgtype-specific authorization. */
7053 				const char * const auth =
7054 				    perm_auth_for_pgtype(np->rn_type);
7055 
7056 				if (auth != NULL)
7057 					rc = perm_add_enabling(pcp, auth);
7058 			}
7059 
7060 			/* Add pg-specific modify_authorization auths. */
7061 			if (rc == REP_PROTOCOL_SUCCESS)
7062 				rc = perm_add_enabling_values(pcp, np,
7063 				    AUTH_PROP_MODIFY);
7064 
7065 			/* If value_authorization values are ok, add them. */
7066 			if (rc == REP_PROTOCOL_SUCCESS) {
7067 				rc = tx_allow_value(cmds, cmds_sz, np);
7068 				if (rc == -1)
7069 					rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7070 				else if (rc)
7071 					rc = perm_add_enabling_values(pcp, np,
7072 					    AUTH_PROP_VALUE);
7073 			}
7074 		}
7075 
7076 		if (rc == REP_PROTOCOL_SUCCESS) {
7077 			granted = perm_granted(pcp);
7078 			rc = map_granted_status(granted, pcp, &auth_string);
7079 			if ((granted == PERM_DENIED) && auth_string) {
7080 				/*
7081 				 * _PERMISSION_DENIED should not cause us
7082 				 * to exit at this point, because we still
7083 				 * want to generate an audit event.
7084 				 */
7085 				rc = REP_PROTOCOL_SUCCESS;
7086 			}
7087 		}
7088 
7089 		pc_free(pcp);
7090 
7091 		if (rc != REP_PROTOCOL_SUCCESS)
7092 			goto cleanout;
7093 
7094 		if (granted == PERM_DENIED) {
7095 			auth_status = ADT_FAILURE;
7096 			auth_ret_value = ADT_FAIL_VALUE_AUTH;
7097 			tx_flag = 0;
7098 		}
7099 #endif /* NATIVE_BUILD */
7100 	} else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7101 		auth_status = ADT_FAILURE;
7102 		auth_ret_value = ADT_FAIL_VALUE_AUTH;
7103 		tx_flag = 0;
7104 	}
7105 
7106 	pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7107 	if (pg_fmri == NULL) {
7108 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7109 		goto cleanout;
7110 	}
7111 	if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7112 	    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7113 		goto cleanout;
7114 	}
7115 
7116 	/*
7117 	 * Parse the transaction commands into a useful form.
7118 	 */
7119 	if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7120 	    REP_PROTOCOL_SUCCESS) {
7121 		goto cleanout;
7122 	}
7123 
7124 	if (tx_flag == 0) {
7125 		/* Authorization failed.  Generate audit events. */
7126 		generate_property_events(tx_data, pg_fmri, auth_string,
7127 		    auth_status, auth_ret_value);
7128 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7129 		goto cleanout;
7130 	}
7131 
7132 	nnp = rc_node_alloc();
7133 	if (nnp == NULL) {
7134 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7135 		goto cleanout;
7136 	}
7137 
7138 	nnp->rn_id = np->rn_id;			/* structure assignment */
7139 	nnp->rn_hash = np->rn_hash;
7140 	nnp->rn_name = strdup(np->rn_name);
7141 	nnp->rn_type = strdup(np->rn_type);
7142 	nnp->rn_pgflags = np->rn_pgflags;
7143 
7144 	nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7145 
7146 	if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7147 		rc_node_destroy(nnp);
7148 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7149 		goto cleanout;
7150 	}
7151 
7152 	(void) pthread_mutex_lock(&np->rn_lock);
7153 
7154 	/*
7155 	 * We must have all of the old properties in the cache, or the
7156 	 * database deletions could cause inconsistencies.
7157 	 */
7158 	if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7159 	    REP_PROTOCOL_SUCCESS) {
7160 		(void) pthread_mutex_unlock(&np->rn_lock);
7161 		rc_node_destroy(nnp);
7162 		goto cleanout;
7163 	}
7164 
7165 	if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7166 		(void) pthread_mutex_unlock(&np->rn_lock);
7167 		rc_node_destroy(nnp);
7168 		rc = REP_PROTOCOL_FAIL_DELETED;
7169 		goto cleanout;
7170 	}
7171 
7172 	if (np->rn_flags & RC_NODE_OLD) {
7173 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7174 		(void) pthread_mutex_unlock(&np->rn_lock);
7175 		rc_node_destroy(nnp);
7176 		rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7177 		goto cleanout;
7178 	}
7179 
7180 	pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7181 	if (pp == NULL) {
7182 		/* our parent is gone, we're going next... */
7183 		rc_node_destroy(nnp);
7184 		(void) pthread_mutex_lock(&np->rn_lock);
7185 		if (np->rn_flags & RC_NODE_OLD) {
7186 			(void) pthread_mutex_unlock(&np->rn_lock);
7187 			rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7188 			goto cleanout;
7189 		}
7190 		(void) pthread_mutex_unlock(&np->rn_lock);
7191 		rc = REP_PROTOCOL_FAIL_DELETED;
7192 		goto cleanout;
7193 	}
7194 	(void) pthread_mutex_unlock(&pp->rn_lock);
7195 
7196 	/*
7197 	 * prepare for the transaction
7198 	 */
7199 	(void) pthread_mutex_lock(&np->rn_lock);
7200 	if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7201 		(void) pthread_mutex_unlock(&np->rn_lock);
7202 		(void) pthread_mutex_lock(&pp->rn_lock);
7203 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7204 		(void) pthread_mutex_unlock(&pp->rn_lock);
7205 		rc_node_destroy(nnp);
7206 		rc = REP_PROTOCOL_FAIL_DELETED;
7207 		goto cleanout;
7208 	}
7209 	nnp->rn_gen_id = np->rn_gen_id;
7210 	(void) pthread_mutex_unlock(&np->rn_lock);
7211 
7212 	/* Sets nnp->rn_gen_id on success. */
7213 	rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7214 
7215 	(void) pthread_mutex_lock(&np->rn_lock);
7216 	if (rc != REP_PROTOCOL_SUCCESS) {
7217 		rc_node_rele_flag(np, RC_NODE_IN_TX);
7218 		(void) pthread_mutex_unlock(&np->rn_lock);
7219 		(void) pthread_mutex_lock(&pp->rn_lock);
7220 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7221 		(void) pthread_mutex_unlock(&pp->rn_lock);
7222 		rc_node_destroy(nnp);
7223 		rc_node_clear(txp, 0);
7224 		if (rc == REP_PROTOCOL_DONE)
7225 			rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7226 		goto cleanout;
7227 	}
7228 
7229 	/*
7230 	 * Notify waiters
7231 	 */
7232 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7233 	while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7234 		rc_pg_notify_fire(pnp);
7235 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7236 
7237 	np->rn_flags |= RC_NODE_OLD;
7238 	(void) pthread_mutex_unlock(&np->rn_lock);
7239 
7240 	rc_notify_remove_node(np);
7241 
7242 	/*
7243 	 * replace np with nnp
7244 	 */
7245 	rc_node_relink_child(pp, np, nnp);
7246 
7247 	/*
7248 	 * all done -- clear the transaction.
7249 	 */
7250 	rc_node_clear(txp, 0);
7251 	generate_property_events(tx_data, pg_fmri, auth_string,
7252 	    auth_status, auth_ret_value);
7253 
7254 	rc = REP_PROTOCOL_SUCCESS;
7255 
7256 cleanout:
7257 	free(auth_string);
7258 	free(pg_fmri);
7259 	tx_commit_data_free(tx_data);
7260 	return (rc);
7261 }
7262 
7263 void
7264 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7265 {
7266 	uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7267 	pnp->rnpn_pg = NULL;
7268 	pnp->rnpn_fd = -1;
7269 }
7270 
7271 int
7272 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7273 {
7274 	rc_node_t *np;
7275 
7276 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7277 
7278 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7279 		(void) pthread_mutex_unlock(&np->rn_lock);
7280 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7281 	}
7282 
7283 	/*
7284 	 * wait for any transaction in progress to complete
7285 	 */
7286 	if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7287 		(void) pthread_mutex_unlock(&np->rn_lock);
7288 		return (REP_PROTOCOL_FAIL_DELETED);
7289 	}
7290 
7291 	if (np->rn_flags & RC_NODE_OLD) {
7292 		(void) pthread_mutex_unlock(&np->rn_lock);
7293 		return (REP_PROTOCOL_FAIL_NOT_LATEST);
7294 	}
7295 
7296 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7297 	rc_pg_notify_fire(pnp);
7298 	pnp->rnpn_pg = np;
7299 	pnp->rnpn_fd = fd;
7300 	(void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7301 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7302 
7303 	(void) pthread_mutex_unlock(&np->rn_lock);
7304 	return (REP_PROTOCOL_SUCCESS);
7305 }
7306 
7307 void
7308 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7309 {
7310 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7311 	rc_pg_notify_fire(pnp);
7312 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7313 
7314 	uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7315 }
7316 
7317 void
7318 rc_notify_info_init(rc_notify_info_t *rnip)
7319 {
7320 	int i;
7321 
7322 	uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7323 	uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7324 	    rc_notify_pool);
7325 
7326 	rnip->rni_notify.rcn_node = NULL;
7327 	rnip->rni_notify.rcn_info = rnip;
7328 
7329 	bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7330 	bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7331 
7332 	(void) pthread_cond_init(&rnip->rni_cv, NULL);
7333 
7334 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7335 		rnip->rni_namelist[i] = NULL;
7336 		rnip->rni_typelist[i] = NULL;
7337 	}
7338 }
7339 
7340 static void
7341 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7342 {
7343 	assert(MUTEX_HELD(&rc_pg_notify_lock));
7344 
7345 	assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7346 
7347 	rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7348 	(void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7349 	(void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7350 }
7351 
7352 static void
7353 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7354 {
7355 	rc_notify_t *me = &rnip->rni_notify;
7356 	rc_notify_t *np;
7357 
7358 	assert(MUTEX_HELD(&rc_pg_notify_lock));
7359 
7360 	assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7361 
7362 	assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7363 	rnip->rni_flags |= RC_NOTIFY_DRAIN;
7364 	(void) pthread_cond_broadcast(&rnip->rni_cv);
7365 
7366 	(void) uu_list_remove(rc_notify_info_list, rnip);
7367 
7368 	/*
7369 	 * clean up any notifications at the beginning of the list
7370 	 */
7371 	if (uu_list_first(rc_notify_list) == me) {
7372 		while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7373 		    np->rcn_info == NULL)
7374 			rc_notify_remove_locked(np);
7375 	}
7376 	(void) uu_list_remove(rc_notify_list, me);
7377 
7378 	while (rnip->rni_waiters) {
7379 		(void) pthread_cond_broadcast(&rc_pg_notify_cv);
7380 		(void) pthread_cond_broadcast(&rnip->rni_cv);
7381 		(void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7382 	}
7383 
7384 	rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7385 }
7386 
7387 static int
7388 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7389     const char *name)
7390 {
7391 	int i;
7392 	int rc;
7393 	char *f;
7394 
7395 	rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7396 	if (rc != REP_PROTOCOL_SUCCESS)
7397 		return (rc);
7398 
7399 	f = strdup(name);
7400 	if (f == NULL)
7401 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7402 
7403 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7404 
7405 	while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7406 		(void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7407 
7408 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7409 		if (arr[i] == NULL)
7410 			break;
7411 
7412 		/*
7413 		 * Don't add name if it's already being tracked.
7414 		 */
7415 		if (strcmp(arr[i], f) == 0) {
7416 			free(f);
7417 			goto out;
7418 		}
7419 	}
7420 
7421 	if (i == RC_NOTIFY_MAX_NAMES) {
7422 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7423 		free(f);
7424 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7425 	}
7426 
7427 	arr[i] = f;
7428 
7429 out:
7430 	if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7431 		rc_notify_info_insert_locked(rnip);
7432 
7433 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7434 	return (REP_PROTOCOL_SUCCESS);
7435 }
7436 
7437 int
7438 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7439 {
7440 	return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7441 }
7442 
7443 int
7444 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7445 {
7446 	return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7447 }
7448 
7449 /*
7450  * Wait for and report an event of interest to rnip, a notification client
7451  */
7452 int
7453 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7454     char *outp, size_t sz)
7455 {
7456 	rc_notify_t *np;
7457 	rc_notify_t *me = &rnip->rni_notify;
7458 	rc_node_t *nnp;
7459 	rc_notify_delete_t *ndp;
7460 
7461 	int am_first_info;
7462 
7463 	if (sz > 0)
7464 		outp[0] = 0;
7465 
7466 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7467 
7468 	while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7469 	    RC_NOTIFY_ACTIVE) {
7470 		/*
7471 		 * If I'm first on the notify list, it is my job to
7472 		 * clean up any notifications I pass by.  I can't do that
7473 		 * if someone is blocking the list from removals, so I
7474 		 * have to wait until they have all drained.
7475 		 */
7476 		am_first_info = (uu_list_first(rc_notify_list) == me);
7477 		if (am_first_info && rc_notify_in_use) {
7478 			rnip->rni_waiters++;
7479 			(void) pthread_cond_wait(&rc_pg_notify_cv,
7480 			    &rc_pg_notify_lock);
7481 			rnip->rni_waiters--;
7482 			continue;
7483 		}
7484 
7485 		/*
7486 		 * Search the list for a node of interest.
7487 		 */
7488 		np = uu_list_next(rc_notify_list, me);
7489 		while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7490 			rc_notify_t *next = uu_list_next(rc_notify_list, np);
7491 
7492 			if (am_first_info) {
7493 				if (np->rcn_info) {
7494 					/*
7495 					 * Passing another client -- stop
7496 					 * cleaning up notifications
7497 					 */
7498 					am_first_info = 0;
7499 				} else {
7500 					rc_notify_remove_locked(np);
7501 				}
7502 			}
7503 			np = next;
7504 		}
7505 
7506 		/*
7507 		 * Nothing of interest -- wait for notification
7508 		 */
7509 		if (np == NULL) {
7510 			rnip->rni_waiters++;
7511 			(void) pthread_cond_wait(&rnip->rni_cv,
7512 			    &rc_pg_notify_lock);
7513 			rnip->rni_waiters--;
7514 			continue;
7515 		}
7516 
7517 		/*
7518 		 * found something to report -- move myself after the
7519 		 * notification and process it.
7520 		 */
7521 		(void) uu_list_remove(rc_notify_list, me);
7522 		(void) uu_list_insert_after(rc_notify_list, np, me);
7523 
7524 		if ((ndp = np->rcn_delete) != NULL) {
7525 			(void) strlcpy(outp, ndp->rnd_fmri, sz);
7526 			if (am_first_info)
7527 				rc_notify_remove_locked(np);
7528 			(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7529 			rc_node_clear(out, 0);
7530 			return (REP_PROTOCOL_SUCCESS);
7531 		}
7532 
7533 		nnp = np->rcn_node;
7534 		assert(nnp != NULL);
7535 
7536 		/*
7537 		 * We can't bump nnp's reference count without grabbing its
7538 		 * lock, and rc_pg_notify_lock is a leaf lock.  So we
7539 		 * temporarily block all removals to keep nnp from
7540 		 * disappearing.
7541 		 */
7542 		rc_notify_in_use++;
7543 		assert(rc_notify_in_use > 0);
7544 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7545 
7546 		rc_node_assign(out, nnp);
7547 
7548 		(void) pthread_mutex_lock(&rc_pg_notify_lock);
7549 		assert(rc_notify_in_use > 0);
7550 		rc_notify_in_use--;
7551 		if (am_first_info)
7552 			rc_notify_remove_locked(np);
7553 		if (rc_notify_in_use == 0)
7554 			(void) pthread_cond_broadcast(&rc_pg_notify_cv);
7555 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7556 
7557 		return (REP_PROTOCOL_SUCCESS);
7558 	}
7559 	/*
7560 	 * If we're the last one out, let people know it's clear.
7561 	 */
7562 	if (rnip->rni_waiters == 0)
7563 		(void) pthread_cond_broadcast(&rnip->rni_cv);
7564 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7565 	return (REP_PROTOCOL_DONE);
7566 }
7567 
7568 static void
7569 rc_notify_info_reset(rc_notify_info_t *rnip)
7570 {
7571 	int i;
7572 
7573 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7574 	if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7575 		rc_notify_info_remove_locked(rnip);
7576 	assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7577 	rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7578 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7579 
7580 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7581 		if (rnip->rni_namelist[i] != NULL) {
7582 			free((void *)rnip->rni_namelist[i]);
7583 			rnip->rni_namelist[i] = NULL;
7584 		}
7585 		if (rnip->rni_typelist[i] != NULL) {
7586 			free((void *)rnip->rni_typelist[i]);
7587 			rnip->rni_typelist[i] = NULL;
7588 		}
7589 	}
7590 
7591 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7592 	rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7593 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7594 }
7595 
7596 void
7597 rc_notify_info_fini(rc_notify_info_t *rnip)
7598 {
7599 	rc_notify_info_reset(rnip);
7600 
7601 	uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7602 	uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7603 	    rc_notify_pool);
7604 }
7605