xref: /titanic_41/usr/src/cmd/svc/configd/rc_node.c (revision e11c3f44f531fdff80941ce57c065d2ae861cefc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * rc_node.c - In-memory SCF object management
31  *
32  * This layer manages the in-memory cache (the Repository Cache) of SCF
33  * data.  Read requests are usually satisfied from here, but may require
34  * load calls to the "object" layer.  Modify requests always write-through
35  * to the object layer.
36  *
37  * SCF data comprises scopes, services, instances, snapshots, snaplevels,
38  * property groups, properties, and property values.  All but the last are
39  * known here as "entities" and are represented by rc_node_t data
40  * structures.  (Property values are kept in the rn_values member of the
41  * respective property, not as separate objects.)  All entities besides
42  * the "localhost" scope have some entity as a parent, and therefore form
43  * a tree.
44  *
45  * The entity tree is rooted at rc_scope, which rc_node_init() initializes to
46  * the "localhost" scope.  The tree is filled in from the database on-demand
47  * by rc_node_fill_children().
48  *
49  * rc_node_t's are also placed in the cache_hash[] hash table, for rapid
50  * lookup.
51  *
52  * Multiple threads may service client requests, so access to each
53  * rc_node_t is synchronized by its rn_lock member.  Some fields are
54  * protected by bits in the rn_flags field instead, to support operations
55  * which need to drop rn_lock, for example to respect locking order.  Such
56  * flags should be manipulated with the rc_node_{hold,rele}_flag()
57  * functions.
58  *
59  * We track references to nodes to tell when they can be free()d.  rn_refs
60  * should be incremented with rc_node_hold() on the creation of client
61  * references (rc_node_ptr_t's and rc_iter_t's).  rn_erefs ("ephemeral
62  * references") should be incremented when a pointer is read into a local
63  * variable of a thread, with rc_node_hold_ephemeral_locked().  This
64  * hasn't been fully implemented, however, so rc_node_rele() tolerates
65  * rn_erefs being 0.  Some code which predates rn_erefs counts ephemeral
66  * references in rn_refs.  Other references are tracked by the
67  * rn_other_refs field and the RC_NODE_DEAD, RC_NODE_IN_PARENT,
68  * RC_NODE_OLD, and RC_NODE_ON_FORMER flags.
69  *
70  * Locking rules: To dereference an rc_node_t * (usually to lock it), you must
71  * have a hold (rc_node_hold()) on it or otherwise be sure that it hasn't been
72  * rc_node_destroy()ed (hold a lock on its parent or child, hold a flag,
73  * etc.).  Once you have locked an rc_node_t you must check its rn_flags for
74  * RC_NODE_DEAD before you can use it.  This is usually done with the
75  * rc_node_{wait,hold}_flag() functions (often via the rc_node_check_*()
76  * functions & RC_NODE_*() macros), which fail if the object has died.
77  *
78  * When a transactional node (property group or snapshot) is updated,
79  * a new node takes the place of the old node in the global hash and the
80  * old node is hung off of the rn_former list of the new node.  At the
81  * same time, all of its children have their rn_parent_ref pointer set,
82  * and any holds they have are reflected in the old node's rn_other_refs
83  * count.  This is automatically kept up to date until the final reference
84  * to the subgraph is dropped, at which point the node is unrefed and
85  * destroyed, along with all of its children.
86  *
87  * Because name service lookups may take a long time and, more importantly
88  * may trigger additional accesses to the repository, perm_granted() must be
89  * called without holding any locks.
90  *
91  * An ITER_START for a non-ENTITY_VALUE induces an rc_node_fill_children()
92  * call via rc_node_setup_iter() to populate the rn_children uu_list of the
93  * rc_node_t * in question and a call to uu_list_walk_start() on that list.  For
94  * ITER_READ, rc_iter_next() uses uu_list_walk_next() to find the next
95  * apropriate child.
96  *
97  * An ITER_START for an ENTITY_VALUE makes sure the node has its values
98  * filled, and sets up the iterator.  An ITER_READ_VALUE just copies out
99  * the proper values and updates the offset information.
100  *
101  * To allow aliases, snapshots are implemented with a level of indirection.
102  * A snapshot rc_node_t has a snapid which refers to an rc_snapshot_t in
103  * snapshot.c which contains the authoritative snaplevel information.  The
104  * snapid is "assigned" by rc_attach_snapshot().
105  *
106  * We provide the client layer with rc_node_ptr_t's to reference objects.
107  * Objects referred to by them are automatically held & released by
108  * rc_node_assign() & rc_node_clear().  The RC_NODE_PTR_*() macros are used at
109  * client.c entry points to read the pointers.  They fetch the pointer to the
110  * object, return (from the function) if it is dead, and lock, hold, or hold
111  * a flag of the object.
112  */
113 
114 /*
115  * Permission checking is authorization-based: some operations may only
116  * proceed if the user has been assigned at least one of a set of
117  * authorization strings.  The set of enabling authorizations depends on the
118  * operation and the target object.  The set of authorizations assigned to
119  * a user is determined by reading /etc/security/policy.conf, querying the
120  * user_attr database, and possibly querying the prof_attr database, as per
121  * chkauthattr() in libsecdb.
122  *
123  * The fastest way to decide whether the two sets intersect is by entering the
124  * strings into a hash table and detecting collisions, which takes linear time
125  * in the total size of the sets.  Except for the authorization patterns which
126  * may be assigned to users, which without advanced pattern-matching
127  * algorithms will take O(n) in the number of enabling authorizations, per
128  * pattern.
129  *
130  * We can achieve some practical speed-ups by noting that if we enter all of
131  * the authorizations from one of the sets into the hash table we can merely
132  * check the elements of the second set for existence without adding them.
133  * This reduces memory requirements and hash table clutter.  The enabling set
134  * is well suited for this because it is internal to configd (for now, at
135  * least).  Combine this with short-circuiting and we can even minimize the
136  * number of queries to the security databases (user_attr & prof_attr).
137  *
138  * To force this usage onto clients we provide functions for adding
139  * authorizations to the enabling set of a permission context structure
140  * (perm_add_*()) and one to decide whether the the user associated with the
141  * current door call client possesses any of them (perm_granted()).
142  *
143  * At some point, a generic version of this should move to libsecdb.
144  *
145  * While entering the enabling strings into the hash table, we keep track
146  * of which is the most specific for use in generating auditing events.
147  * See the "Collecting the Authorization String" section of the "SMF Audit
148  * Events" block comment below.
149  */
150 
151 /*
152  * Composition is the combination of sets of properties.  The sets are ordered
153  * and properties in higher sets obscure properties of the same name in lower
154  * sets.  Here we present a composed view of an instance's properties as the
155  * union of its properties and its service's properties.  Similarly the
156  * properties of snaplevels are combined to form a composed view of the
157  * properties of a snapshot (which should match the composed view of the
158  * properties of the instance when the snapshot was taken).
159  *
160  * In terms of the client interface, the client may request that a property
161  * group iterator for an instance or snapshot be composed.  Property groups
162  * traversed by such an iterator may not have the target entity as a parent.
163  * Similarly, the properties traversed by a property iterator for those
164  * property groups may not have the property groups iterated as parents.
165  *
166  * Implementation requires that iterators for instances and snapshots be
167  * composition-savvy, and that we have a "composed property group" entity
168  * which represents the composition of a number of property groups.  Iteration
169  * over "composed property groups" yields properties which may have different
170  * parents, but for all other operations a composed property group behaves
171  * like the top-most property group it represents.
172  *
173  * The implementation is based on the rn_cchain[] array of rc_node_t pointers
174  * in rc_node_t.  For instances, the pointers point to the instance and its
175  * parent service.  For snapshots they point to the child snaplevels, and for
176  * composed property groups they point to property groups.  A composed
177  * iterator carries an index into rn_cchain[].  Thus most of the magic ends up
178  * int the rc_iter_*() code.
179  */
180 /*
181  * SMF Audit Events:
182  * ================
183  *
184  * To maintain security, SMF generates audit events whenever
185  * privileged operations are attempted.  See the System Administration
186  * Guide:Security Services answerbook for a discussion of the Solaris
187  * audit system.
188  *
189  * The SMF audit event codes are defined in adt_event.h by symbols
190  * starting with ADT_smf_ and are described in audit_event.txt.  The
191  * audit record structures are defined in the SMF section of adt.xml.
192  * adt.xml is used to automatically generate adt_event.h which
193  * contains the definitions that we code to in this file.  For the
194  * most part the audit events map closely to actions that you would
195  * perform with svcadm or svccfg, but there are some special cases
196  * which we'll discuss later.
197  *
198  * The software associated with SMF audit events falls into three
199  * categories:
200  * 	- collecting information to be written to the audit
201  *	  records
202  *	- using the adt_* functions in
203  *	  usr/src/lib/libbsm/common/adt.c to generate the audit
204  *	  records.
205  * 	- handling special cases
206  *
207  * Collecting Information:
208  * ----------------------
209  *
210  * Most all of the audit events require the FMRI of the affected
211  * object and the authorization string that was used.  The one
212  * exception is ADT_smf_annotation which we'll talk about later.
213  *
214  * Collecting the FMRI:
215  *
216  * The rc_node structure has a member called rn_fmri which points to
217  * its FMRI.  This is initialized by a call to rc_node_build_fmri()
218  * when the node's parent is established.  The reason for doing it
219  * at this time is that a node's FMRI is basically the concatenation
220  * of the parent's FMRI and the node's name with the appropriate
221  * decoration.  rc_node_build_fmri() does this concatenation and
222  * decorating.  It is called from rc_node_link_child() and
223  * rc_node_relink_child() where a node is linked to its parent.
224  *
225  * rc_node_get_fmri_or_fragment() is called to retrieve a node's FMRI
226  * when it is needed.  It returns rn_fmri if it is set.  If the node
227  * is at the top level, however, rn_fmri won't be set because it was
228  * never linked to a parent.  In this case,
229  * rc_node_get_fmri_or_fragment() constructs an FMRI fragment based on
230  * its node type and its name, rn_name.
231  *
232  * Collecting the Authorization String:
233  *
234  * Naturally, the authorization string is captured during the
235  * authorization checking process.  Acceptable authorization strings
236  * are added to a permcheck_t hash table as noted in the section on
237  * permission checking above.  Once all entries have been added to the
238  * hash table, perm_granted() is called.  If the client is authorized,
239  * perm_granted() returns with pc_auth_string of the permcheck_t
240  * structure pointing to the authorization string.
241  *
242  * This works fine if the client is authorized, but what happens if
243  * the client is not authorized?  We need to report the required
244  * authorization string.  This is the authorization that would have
245  * been used if permission had been granted.  perm_granted() will
246  * find no match, so it needs to decide which string in the hash
247  * table to use as the required authorization string.  It needs to do
248  * this, because configd is still going to generate an event.  A
249  * design decision was made to use the most specific authorization
250  * in the hash table.  The pc_auth_type enum designates the
251  * specificity of an authorization string.  For example, an
252  * authorization string that is declared in an instance PG is more
253  * specific than one that is declared in a service PG.
254  *
255  * The pc_add() function keeps track of the most specific
256  * authorization in the hash table.  It does this using the
257  * pc_specific and pc_specific_type members of the permcheck
258  * structure.  pc_add() updates these members whenever a more
259  * specific authorization string is added to the hash table.  Thus, if
260  * an authorization match is not found, perm_granted() will return
261  * with pc_auth_string in the permcheck_t pointing to the string that
262  * is referenced by pc_specific.
263  *
264  * Generating the Audit Events:
265  * ===========================
266  *
267  * As the functions in this file process requests for clients of
268  * configd, they gather the information that is required for an audit
269  * event.  Eventually, the request processing gets to the point where
270  * the authorization is rejected or to the point where the requested
271  * action was attempted.  At these two points smf_audit_event() is
272  * called.
273  *
274  * smf_audit_event() takes 4 parameters:
275  * 	- the event ID which is one of the ADT_smf_* symbols from
276  *	  adt_event.h.
277  * 	- status to pass to adt_put_event()
278  * 	- return value to pass to adt_put_event()
279  * 	- the event data (see audit_event_data structure)
280  *
281  * All interactions with the auditing software require an audit
282  * session.  We use one audit session per configd client.  We keep
283  * track of the audit session in the repcache_client structure.
284  * smf_audit_event() calls get_audit_session() to get the session
285  * pointer.
286  *
287  * smf_audit_event() then calls adt_alloc_event() to allocate an
288  * adt_event_data union which is defined in adt_event.h, copies the
289  * data into the appropriate members of the union and calls
290  * adt_put_event() to generate the event.
291  *
292  * Special Cases:
293  * =============
294  *
295  * There are three major types of special cases:
296  *
297  * 	- gathering event information for each action in a
298  *	  transaction
299  * 	- Higher level events represented by special property
300  *	  group/property name combinations.  Many of these are
301  *	  restarter actions.
302  * 	- ADT_smf_annotation event
303  *
304  * Processing Transaction Actions:
305  * ------------------------------
306  *
307  * A transaction can contain multiple actions to modify, create or
308  * delete one or more properties.  We need to capture information so
309  * that we can generate an event for each property action.  The
310  * transaction information is stored in a tx_commmit_data_t, and
311  * object.c provides accessor functions to retrieve data from this
312  * structure.  rc_tx_commit() obtains a tx_commit_data_t by calling
313  * tx_commit_data_new() and passes this to object_tx_commit() to
314  * commit the transaction.  Then we call generate_property_events() to
315  * generate an audit event for each property action.
316  *
317  * Special Properties:
318  * ------------------
319  *
320  * There are combinations of property group/property name that are special.
321  * They are special because they have specific meaning to startd.  startd
322  * interprets them in a service-independent fashion.
323  * restarter_actions/refresh and general/enabled are two examples of these.
324  * A special event is generated for these properties in addition to the
325  * regular property event described in the previous section.  The special
326  * properties are declared as an array of audit_special_prop_item
327  * structures at special_props_list in rc_node.c.
328  *
329  * In the previous section, we mentioned the
330  * generate_property_event() function that generates an event for
331  * every property action.  Before generating the event,
332  * generate_property_event() calls special_property_event().
333  * special_property_event() checks to see if the action involves a
334  * special property.  If it does, it generates a special audit
335  * event.
336  *
337  * ADT_smf_annotation event:
338  * ------------------------
339  *
340  * This is a special event unlike any other.  It allows the svccfg
341  * program to store an annotation in the event log before a series
342  * of transactions is processed.  It is used with the import and
343  * apply svccfg commands.  svccfg uses the rep_protocol_annotation
344  * message to pass the operation (import or apply) and the file name
345  * to configd.  The set_annotation() function in client.c stores
346  * these away in the a repcache_client structure.  The address of
347  * this structure is saved in the thread_info structure.
348  *
349  * Before it generates any events, smf_audit_event() calls
350  * smf_annotation_event().  smf_annotation_event() calls
351  * client_annotation_needed() which is defined in client.c.  If an
352  * annotation is needed client_annotation_needed() returns the
353  * operation and filename strings that were saved from the
354  * rep_protocol_annotation message.  smf_annotation_event() then
355  * generates the ADT_smf_annotation event.
356  */
357 
358 #include <assert.h>
359 #include <atomic.h>
360 #include <bsm/adt_event.h>
361 #include <errno.h>
362 #include <libuutil.h>
363 #include <libscf.h>
364 #include <libscf_priv.h>
365 #include <prof_attr.h>
366 #include <pthread.h>
367 #include <pwd.h>
368 #include <stdio.h>
369 #include <stdlib.h>
370 #include <strings.h>
371 #include <sys/types.h>
372 #include <syslog.h>
373 #include <unistd.h>
374 #include <user_attr.h>
375 
376 #include "configd.h"
377 
378 #define	AUTH_PREFIX		"solaris.smf."
379 #define	AUTH_MANAGE		AUTH_PREFIX "manage"
380 #define	AUTH_MODIFY		AUTH_PREFIX "modify"
381 #define	AUTH_MODIFY_PREFIX	AUTH_MODIFY "."
382 #define	AUTH_PG_ACTIONS		SCF_PG_RESTARTER_ACTIONS
383 #define	AUTH_PG_ACTIONS_TYPE	SCF_PG_RESTARTER_ACTIONS_TYPE
384 #define	AUTH_PG_GENERAL		SCF_PG_GENERAL
385 #define	AUTH_PG_GENERAL_TYPE	SCF_PG_GENERAL_TYPE
386 #define	AUTH_PG_GENERAL_OVR	SCF_PG_GENERAL_OVR
387 #define	AUTH_PG_GENERAL_OVR_TYPE  SCF_PG_GENERAL_OVR_TYPE
388 #define	AUTH_PROP_ACTION	"action_authorization"
389 #define	AUTH_PROP_ENABLED	"enabled"
390 #define	AUTH_PROP_MODIFY	"modify_authorization"
391 #define	AUTH_PROP_VALUE		"value_authorization"
392 #define	AUTH_PROP_READ		"read_authorization"
393 /* libsecdb should take care of this. */
394 #define	RBAC_AUTH_SEP		","
395 
396 #define	MAX_VALID_CHILDREN 3
397 
398 /*
399  * The ADT_smf_* symbols may not be defined on the build machine.  Because
400  * of this, we do not want to compile the _smf_aud_event() function when
401  * doing native builds.
402  */
403 #ifdef	NATIVE_BUILD
404 #define	smf_audit_event(i, s, r, d)
405 #else
406 #define	smf_audit_event(i, s, r, d)	_smf_audit_event(i, s, r, d)
407 #endif	/* NATIVE_BUILD */
408 
409 typedef struct rc_type_info {
410 	uint32_t	rt_type;		/* matches array index */
411 	uint32_t	rt_num_ids;
412 	uint32_t	rt_name_flags;
413 	uint32_t	rt_valid_children[MAX_VALID_CHILDREN];
414 } rc_type_info_t;
415 
416 #define	RT_NO_NAME	-1U
417 
418 static rc_type_info_t rc_types[] = {
419 	{REP_PROTOCOL_ENTITY_NONE, 0, RT_NO_NAME},
420 	{REP_PROTOCOL_ENTITY_SCOPE, 0, 0,
421 	    {REP_PROTOCOL_ENTITY_SERVICE, REP_PROTOCOL_ENTITY_SCOPE}},
422 	{REP_PROTOCOL_ENTITY_SERVICE, 0, UU_NAME_DOMAIN | UU_NAME_PATH,
423 	    {REP_PROTOCOL_ENTITY_INSTANCE, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
424 	{REP_PROTOCOL_ENTITY_INSTANCE, 1, UU_NAME_DOMAIN,
425 	    {REP_PROTOCOL_ENTITY_SNAPSHOT, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
426 	{REP_PROTOCOL_ENTITY_SNAPSHOT, 2, UU_NAME_DOMAIN,
427 	    {REP_PROTOCOL_ENTITY_SNAPLEVEL, REP_PROTOCOL_ENTITY_PROPERTYGRP}},
428 	{REP_PROTOCOL_ENTITY_SNAPLEVEL, 4, RT_NO_NAME,
429 	    {REP_PROTOCOL_ENTITY_PROPERTYGRP}},
430 	{REP_PROTOCOL_ENTITY_PROPERTYGRP, 5, UU_NAME_DOMAIN,
431 	    {REP_PROTOCOL_ENTITY_PROPERTY}},
432 	{REP_PROTOCOL_ENTITY_CPROPERTYGRP, 0, UU_NAME_DOMAIN,
433 	    {REP_PROTOCOL_ENTITY_PROPERTY}},
434 	{REP_PROTOCOL_ENTITY_PROPERTY, 7, UU_NAME_DOMAIN},
435 	{-1UL}
436 };
437 #define	NUM_TYPES	((sizeof (rc_types) / sizeof (*rc_types)))
438 
439 /* Element of a permcheck_t hash table. */
440 struct pc_elt {
441 	struct pc_elt	*pce_next;
442 	char		pce_auth[1];
443 };
444 
445 /*
446  * If an authorization fails, we must decide which of the elements in the
447  * permcheck hash table to use in the audit event.  That is to say of all
448  * the strings in the hash table, we must choose one and use it in the audit
449  * event.  It is desirable to use the most specific string in the audit
450  * event.
451  *
452  * The pc_auth_type specifies the types (sources) of authorization
453  * strings.  The enum is ordered in increasing specificity.
454  */
455 typedef enum pc_auth_type {
456 	PC_AUTH_NONE = 0,	/* no auth string available. */
457 	PC_AUTH_SMF,		/* strings coded into SMF. */
458 	PC_AUTH_SVC,		/* strings specified in PG of a service. */
459 	PC_AUTH_INST		/* strings specified in PG of an instance. */
460 } pc_auth_type_t;
461 
462 /* An authorization set hash table. */
463 typedef struct {
464 	struct pc_elt	**pc_buckets;
465 	uint_t		pc_bnum;		/* number of buckets */
466 	uint_t		pc_enum;		/* number of elements */
467 	struct pc_elt	*pc_specific;		/* most specific element */
468 	pc_auth_type_t	pc_specific_type;	/* type of pc_specific */
469 	char		*pc_auth_string;	/* authorization string */
470 						/* for audit events */
471 } permcheck_t;
472 
473 /*
474  * Structure for holding audit event data.  Not all events use all members
475  * of the structure.
476  */
477 typedef struct audit_event_data {
478 	char		*ed_auth;	/* authorization string. */
479 	char		*ed_fmri;	/* affected FMRI. */
480 	char		*ed_snapname;	/* name of snapshot. */
481 	char		*ed_old_fmri;	/* old fmri in attach case. */
482 	char		*ed_old_name;	/* old snapshot in attach case. */
483 	char		*ed_type;	/* prop. group or prop. type. */
484 	char		*ed_prop_value;	/* property value. */
485 } audit_event_data_t;
486 
487 /*
488  * Pointer to function to do special processing to get audit event ID.
489  * Audit event IDs are defined in /usr/include/bsm/adt_event.h.  Function
490  * returns 0 if ID successfully retrieved.  Otherwise it returns -1.
491  */
492 typedef int (*spc_getid_fn_t)(tx_commit_data_t *, size_t, const char *,
493     au_event_t *);
494 static int general_enable_id(tx_commit_data_t *, size_t, const char *,
495     au_event_t *);
496 
497 static uu_list_pool_t *rc_children_pool;
498 static uu_list_pool_t *rc_pg_notify_pool;
499 static uu_list_pool_t *rc_notify_pool;
500 static uu_list_pool_t *rc_notify_info_pool;
501 
502 static rc_node_t *rc_scope;
503 
504 static pthread_mutex_t	rc_pg_notify_lock = PTHREAD_MUTEX_INITIALIZER;
505 static pthread_cond_t	rc_pg_notify_cv = PTHREAD_COND_INITIALIZER;
506 static uint_t		rc_notify_in_use;	/* blocks removals */
507 
508 /*
509  * Some combinations of property group/property name require a special
510  * audit event to be generated when there is a change.
511  * audit_special_prop_item_t is used to specify these special cases.  The
512  * special_props_list array defines a list of these special properties.
513  */
514 typedef struct audit_special_prop_item {
515 	const char	*api_pg_name;	/* property group name. */
516 	const char	*api_prop_name;	/* property name. */
517 	au_event_t	api_event_id;	/* event id or 0. */
518 	spc_getid_fn_t	api_event_func; /* function to get event id. */
519 } audit_special_prop_item_t;
520 
521 /*
522  * Native builds are done using the build machine's standard include
523  * files.  These files may not yet have the definitions for the ADT_smf_*
524  * symbols.  Thus, we do not compile this table when doing native builds.
525  */
526 #ifndef	NATIVE_BUILD
527 /*
528  * The following special_props_list array specifies property group/property
529  * name combinations that have specific meaning to startd.  A special event
530  * is generated for these combinations in addition to the regular property
531  * event.
532  *
533  * At run time this array gets sorted.  See the call to qsort(3C) in
534  * rc_node_init().  The array is sorted, so that bsearch(3C) can be used
535  * to do lookups.
536  */
537 static audit_special_prop_item_t special_props_list[] = {
538 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADED, ADT_smf_degrade,
539 	    NULL},
540 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_DEGRADE_IMMEDIATE,
541 	    ADT_smf_immediate_degrade, NULL},
542 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_OFF, ADT_smf_clear, NULL},
543 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON,
544 	    ADT_smf_maintenance, NULL},
545 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMEDIATE,
546 	    ADT_smf_immediate_maintenance, NULL},
547 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_IMMTEMP,
548 	    ADT_smf_immtmp_maintenance, NULL},
549 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_MAINT_ON_TEMPORARY,
550 	    ADT_smf_tmp_maintenance, NULL},
551 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_REFRESH, ADT_smf_refresh, NULL},
552 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTART, ADT_smf_restart, NULL},
553 	{SCF_PG_RESTARTER_ACTIONS, SCF_PROPERTY_RESTORE, ADT_smf_clear, NULL},
554 	{SCF_PG_OPTIONS, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
555 	{SCF_PG_OPTIONS_OVR, SCF_PROPERTY_MILESTONE, ADT_smf_milestone, NULL},
556 	{SCF_PG_GENERAL, SCF_PROPERTY_ENABLED, 0, general_enable_id},
557 	{SCF_PG_GENERAL_OVR, SCF_PROPERTY_ENABLED, 0, general_enable_id}
558 };
559 #define	SPECIAL_PROP_COUNT	(sizeof (special_props_list) /\
560 	sizeof (audit_special_prop_item_t))
561 #endif	/* NATIVE_BUILD */
562 
563 /*
564  * We support an arbitrary number of clients interested in events for certain
565  * types of changes.  Each client is represented by an rc_notify_info_t, and
566  * all clients are chained onto the rc_notify_info_list.
567  *
568  * The rc_notify_list is the global notification list.  Each entry is of
569  * type rc_notify_t, which is embedded in one of three other structures:
570  *
571  *	rc_node_t		property group update notification
572  *	rc_notify_delete_t	object deletion notification
573  *	rc_notify_info_t	notification clients
574  *
575  * Which type of object is determined by which pointer in the rc_notify_t is
576  * non-NULL.
577  *
578  * New notifications and clients are added to the end of the list.
579  * Notifications no-one is interested in are never added to the list.
580  *
581  * Clients use their position in the list to track which notifications they
582  * have not yet reported.  As they process notifications, they move forward
583  * in the list past them.  There is always a client at the beginning of the
584  * list -- as he moves past notifications, he removes them from the list and
585  * cleans them up.
586  *
587  * The rc_pg_notify_lock protects all notification state.  The rc_pg_notify_cv
588  * is used for global signalling, and each client has a cv which he waits for
589  * events of interest on.
590  */
591 static uu_list_t	*rc_notify_info_list;
592 static uu_list_t	*rc_notify_list;
593 
594 #define	HASH_SIZE	512
595 #define	HASH_MASK	(HASH_SIZE - 1)
596 
597 #pragma align 64(cache_hash)
598 static cache_bucket_t cache_hash[HASH_SIZE];
599 
600 #define	CACHE_BUCKET(h)		(&cache_hash[(h) & HASH_MASK])
601 
602 
603 static void rc_node_no_client_refs(rc_node_t *np);
604 
605 
606 static uint32_t
607 rc_node_hash(rc_node_lookup_t *lp)
608 {
609 	uint32_t type = lp->rl_type;
610 	uint32_t backend = lp->rl_backend;
611 	uint32_t mainid = lp->rl_main_id;
612 	uint32_t *ids = lp->rl_ids;
613 
614 	rc_type_info_t *tp = &rc_types[type];
615 	uint32_t num_ids;
616 	uint32_t left;
617 	uint32_t hash;
618 
619 	assert(backend == BACKEND_TYPE_NORMAL ||
620 	    backend == BACKEND_TYPE_NONPERSIST);
621 
622 	assert(type > 0 && type < NUM_TYPES);
623 	num_ids = tp->rt_num_ids;
624 
625 	left = MAX_IDS - num_ids;
626 	assert(num_ids <= MAX_IDS);
627 
628 	hash = type * 7 + mainid * 5 + backend;
629 
630 	while (num_ids-- > 0)
631 		hash = hash * 11 + *ids++ * 7;
632 
633 	/*
634 	 * the rest should be zeroed
635 	 */
636 	while (left-- > 0)
637 		assert(*ids++ == 0);
638 
639 	return (hash);
640 }
641 
642 static int
643 rc_node_match(rc_node_t *np, rc_node_lookup_t *l)
644 {
645 	rc_node_lookup_t *r = &np->rn_id;
646 	rc_type_info_t *tp;
647 	uint32_t type;
648 	uint32_t num_ids;
649 
650 	if (r->rl_main_id != l->rl_main_id)
651 		return (0);
652 
653 	type = r->rl_type;
654 	if (type != l->rl_type)
655 		return (0);
656 
657 	assert(type > 0 && type < NUM_TYPES);
658 
659 	tp = &rc_types[r->rl_type];
660 	num_ids = tp->rt_num_ids;
661 
662 	assert(num_ids <= MAX_IDS);
663 	while (num_ids-- > 0)
664 		if (r->rl_ids[num_ids] != l->rl_ids[num_ids])
665 			return (0);
666 
667 	return (1);
668 }
669 
670 /*
671  * Register an ephemeral reference to np.  This should be done while both
672  * the persistent reference from which the np pointer was read is locked
673  * and np itself is locked.  This guarantees that another thread which
674  * thinks it has the last reference will yield without destroying the
675  * node.
676  */
677 static void
678 rc_node_hold_ephemeral_locked(rc_node_t *np)
679 {
680 	assert(MUTEX_HELD(&np->rn_lock));
681 
682 	++np->rn_erefs;
683 }
684 
685 /*
686  * the "other" references on a node are maintained in an atomically
687  * updated refcount, rn_other_refs.  This can be bumped from arbitrary
688  * context, and tracks references to a possibly out-of-date node's children.
689  *
690  * To prevent the node from disappearing between the final drop of
691  * rn_other_refs and the unref handling, rn_other_refs_held is bumped on
692  * 0->1 transitions and decremented (with the node lock held) on 1->0
693  * transitions.
694  */
695 static void
696 rc_node_hold_other(rc_node_t *np)
697 {
698 	if (atomic_add_32_nv(&np->rn_other_refs, 1) == 1) {
699 		atomic_add_32(&np->rn_other_refs_held, 1);
700 		assert(np->rn_other_refs_held > 0);
701 	}
702 	assert(np->rn_other_refs > 0);
703 }
704 
705 /*
706  * No node locks may be held
707  */
708 static void
709 rc_node_rele_other(rc_node_t *np)
710 {
711 	assert(np->rn_other_refs > 0);
712 	if (atomic_add_32_nv(&np->rn_other_refs, -1) == 0) {
713 		(void) pthread_mutex_lock(&np->rn_lock);
714 		assert(np->rn_other_refs_held > 0);
715 		if (atomic_add_32_nv(&np->rn_other_refs_held, -1) == 0 &&
716 		    np->rn_refs == 0 && (np->rn_flags & RC_NODE_OLD)) {
717 			/*
718 			 * This was the last client reference.  Destroy
719 			 * any other references and free() the node.
720 			 */
721 			rc_node_no_client_refs(np);
722 		} else {
723 			(void) pthread_mutex_unlock(&np->rn_lock);
724 		}
725 	}
726 }
727 
728 static void
729 rc_node_hold_locked(rc_node_t *np)
730 {
731 	assert(MUTEX_HELD(&np->rn_lock));
732 
733 	if (np->rn_refs == 0 && (np->rn_flags & RC_NODE_PARENT_REF))
734 		rc_node_hold_other(np->rn_parent_ref);
735 	np->rn_refs++;
736 	assert(np->rn_refs > 0);
737 }
738 
739 static void
740 rc_node_hold(rc_node_t *np)
741 {
742 	(void) pthread_mutex_lock(&np->rn_lock);
743 	rc_node_hold_locked(np);
744 	(void) pthread_mutex_unlock(&np->rn_lock);
745 }
746 
747 static void
748 rc_node_rele_locked(rc_node_t *np)
749 {
750 	int unref = 0;
751 	rc_node_t *par_ref = NULL;
752 
753 	assert(MUTEX_HELD(&np->rn_lock));
754 	assert(np->rn_refs > 0);
755 
756 	if (--np->rn_refs == 0) {
757 		if (np->rn_flags & RC_NODE_PARENT_REF)
758 			par_ref = np->rn_parent_ref;
759 
760 		/*
761 		 * Composed property groups are only as good as their
762 		 * references.
763 		 */
764 		if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
765 			np->rn_flags |= RC_NODE_DEAD;
766 
767 		if ((np->rn_flags & (RC_NODE_DEAD|RC_NODE_OLD)) &&
768 		    np->rn_other_refs == 0 && np->rn_other_refs_held == 0)
769 			unref = 1;
770 	}
771 
772 	if (unref) {
773 		/*
774 		 * This was the last client reference.  Destroy any other
775 		 * references and free() the node.
776 		 */
777 		rc_node_no_client_refs(np);
778 	} else {
779 		/*
780 		 * rn_erefs can be 0 if we acquired the reference in
781 		 * a path which hasn't been updated to increment rn_erefs.
782 		 * When all paths which end here are updated, we should
783 		 * assert rn_erefs > 0 and always decrement it.
784 		 */
785 		if (np->rn_erefs > 0)
786 			--np->rn_erefs;
787 		(void) pthread_mutex_unlock(&np->rn_lock);
788 	}
789 
790 	if (par_ref != NULL)
791 		rc_node_rele_other(par_ref);
792 }
793 
794 void
795 rc_node_rele(rc_node_t *np)
796 {
797 	(void) pthread_mutex_lock(&np->rn_lock);
798 	rc_node_rele_locked(np);
799 }
800 
801 static cache_bucket_t *
802 cache_hold(uint32_t h)
803 {
804 	cache_bucket_t *bp = CACHE_BUCKET(h);
805 	(void) pthread_mutex_lock(&bp->cb_lock);
806 	return (bp);
807 }
808 
809 static void
810 cache_release(cache_bucket_t *bp)
811 {
812 	(void) pthread_mutex_unlock(&bp->cb_lock);
813 }
814 
815 static rc_node_t *
816 cache_lookup_unlocked(cache_bucket_t *bp, rc_node_lookup_t *lp)
817 {
818 	uint32_t h = rc_node_hash(lp);
819 	rc_node_t *np;
820 
821 	assert(MUTEX_HELD(&bp->cb_lock));
822 	assert(bp == CACHE_BUCKET(h));
823 
824 	for (np = bp->cb_head; np != NULL; np = np->rn_hash_next) {
825 		if (np->rn_hash == h && rc_node_match(np, lp)) {
826 			rc_node_hold(np);
827 			return (np);
828 		}
829 	}
830 
831 	return (NULL);
832 }
833 
834 static rc_node_t *
835 cache_lookup(rc_node_lookup_t *lp)
836 {
837 	uint32_t h;
838 	cache_bucket_t *bp;
839 	rc_node_t *np;
840 
841 	h = rc_node_hash(lp);
842 	bp = cache_hold(h);
843 
844 	np = cache_lookup_unlocked(bp, lp);
845 
846 	cache_release(bp);
847 
848 	return (np);
849 }
850 
851 static void
852 cache_insert_unlocked(cache_bucket_t *bp, rc_node_t *np)
853 {
854 	assert(MUTEX_HELD(&bp->cb_lock));
855 	assert(np->rn_hash == rc_node_hash(&np->rn_id));
856 	assert(bp == CACHE_BUCKET(np->rn_hash));
857 
858 	assert(np->rn_hash_next == NULL);
859 
860 	np->rn_hash_next = bp->cb_head;
861 	bp->cb_head = np;
862 }
863 
864 static void
865 cache_remove_unlocked(cache_bucket_t *bp, rc_node_t *np)
866 {
867 	rc_node_t **npp;
868 
869 	assert(MUTEX_HELD(&bp->cb_lock));
870 	assert(np->rn_hash == rc_node_hash(&np->rn_id));
871 	assert(bp == CACHE_BUCKET(np->rn_hash));
872 
873 	for (npp = &bp->cb_head; *npp != NULL; npp = &(*npp)->rn_hash_next)
874 		if (*npp == np)
875 			break;
876 
877 	assert(*npp == np);
878 	*npp = np->rn_hash_next;
879 	np->rn_hash_next = NULL;
880 }
881 
882 /*
883  * verify that the 'parent' type can have a child typed 'child'
884  * Fails with
885  *   _INVALID_TYPE - argument is invalid
886  *   _TYPE_MISMATCH - parent type cannot have children of type child
887  */
888 static int
889 rc_check_parent_child(uint32_t parent, uint32_t child)
890 {
891 	int idx;
892 	uint32_t type;
893 
894 	if (parent == 0 || parent >= NUM_TYPES ||
895 	    child == 0 || child >= NUM_TYPES)
896 		return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
897 
898 	for (idx = 0; idx < MAX_VALID_CHILDREN; idx++) {
899 		type = rc_types[parent].rt_valid_children[idx];
900 		if (type == child)
901 			return (REP_PROTOCOL_SUCCESS);
902 	}
903 
904 	return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
905 }
906 
907 /*
908  * Fails with
909  *   _INVALID_TYPE - type is invalid
910  *   _BAD_REQUEST - name is an invalid name for a node of type type
911  */
912 int
913 rc_check_type_name(uint32_t type, const char *name)
914 {
915 	if (type == 0 || type >= NUM_TYPES)
916 		return (REP_PROTOCOL_FAIL_INVALID_TYPE); /* invalid types */
917 
918 	if (uu_check_name(name, rc_types[type].rt_name_flags) == -1)
919 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
920 
921 	return (REP_PROTOCOL_SUCCESS);
922 }
923 
924 static int
925 rc_check_pgtype_name(const char *name)
926 {
927 	if (uu_check_name(name, UU_NAME_DOMAIN) == -1)
928 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
929 
930 	return (REP_PROTOCOL_SUCCESS);
931 }
932 
933 /*
934  * rc_node_free_fmri should be called whenever a node loses its parent.
935  * The reason is that the node's fmri string is built up by concatenating
936  * its name to the parent's fmri.  Thus, when the node no longer has a
937  * parent, its fmri is no longer valid.
938  */
939 static void
940 rc_node_free_fmri(rc_node_t *np)
941 {
942 	if (np->rn_fmri != NULL) {
943 		free((void *)np->rn_fmri);
944 		np->rn_fmri = NULL;
945 	}
946 }
947 
948 /*
949  * Concatenate the appropriate separator and the FMRI element to the base
950  * FMRI string at fmri.
951  *
952  * Fails with
953  *	_TRUNCATED	Not enough room in buffer at fmri.
954  */
955 static int
956 rc_concat_fmri_element(
957 	char *fmri,			/* base fmri */
958 	size_t bufsize,			/* size of buf at fmri */
959 	size_t *sz_out,			/* receives result size. */
960 	const char *element,		/* element name to concat */
961 	rep_protocol_entity_t type)	/* type of element */
962 {
963 	size_t actual;
964 	const char *name = element;
965 	int rc;
966 	const char *separator;
967 
968 	if (bufsize > 0)
969 		*sz_out = strlen(fmri);
970 	else
971 		*sz_out = 0;
972 
973 	switch (type) {
974 	case REP_PROTOCOL_ENTITY_SCOPE:
975 		if (strcmp(element, SCF_FMRI_LOCAL_SCOPE) == 0) {
976 			/*
977 			 * No need to display scope information if we are
978 			 * in the local scope.
979 			 */
980 			separator = SCF_FMRI_SVC_PREFIX;
981 			name = NULL;
982 		} else {
983 			/*
984 			 * Need to display scope information, because it is
985 			 * not the local scope.
986 			 */
987 			separator = SCF_FMRI_SVC_PREFIX SCF_FMRI_SCOPE_PREFIX;
988 		}
989 		break;
990 	case REP_PROTOCOL_ENTITY_SERVICE:
991 		separator = SCF_FMRI_SERVICE_PREFIX;
992 		break;
993 	case REP_PROTOCOL_ENTITY_INSTANCE:
994 		separator = SCF_FMRI_INSTANCE_PREFIX;
995 		break;
996 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
997 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
998 		separator = SCF_FMRI_PROPERTYGRP_PREFIX;
999 		break;
1000 	case REP_PROTOCOL_ENTITY_PROPERTY:
1001 		separator = SCF_FMRI_PROPERTY_PREFIX;
1002 		break;
1003 	case REP_PROTOCOL_ENTITY_VALUE:
1004 		/*
1005 		 * A value does not have a separate FMRI from its property,
1006 		 * so there is nothing to concat.
1007 		 */
1008 		return (REP_PROTOCOL_SUCCESS);
1009 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
1010 	case REP_PROTOCOL_ENTITY_SNAPLEVEL:
1011 		/* Snapshots do not have FMRIs, so there is nothing to do. */
1012 		return (REP_PROTOCOL_SUCCESS);
1013 	default:
1014 		(void) fprintf(stderr, "%s:%d: Unknown protocol type %d.\n",
1015 		    __FILE__, __LINE__, type);
1016 		abort();	/* Missing a case in switch if we get here. */
1017 	}
1018 
1019 	/* Concatenate separator and element to the fmri buffer. */
1020 
1021 	actual = strlcat(fmri, separator, bufsize);
1022 	if (name != NULL) {
1023 		if (actual < bufsize) {
1024 			actual = strlcat(fmri, name, bufsize);
1025 		} else {
1026 			actual += strlen(name);
1027 		}
1028 	}
1029 	if (actual < bufsize) {
1030 		rc = REP_PROTOCOL_SUCCESS;
1031 	} else {
1032 		rc = REP_PROTOCOL_FAIL_TRUNCATED;
1033 	}
1034 	*sz_out = actual;
1035 	return (rc);
1036 }
1037 
1038 /*
1039  * Get the FMRI for the node at np.  The fmri will be placed in buf.  On
1040  * success sz_out will be set to the size of the fmri in buf.  If
1041  * REP_PROTOCOL_FAIL_TRUNCATED is returned, sz_out will be set to the size
1042  * of the buffer that would be required to avoid truncation.
1043  *
1044  * Fails with
1045  *	_TRUNCATED	not enough room in buf for the FMRI.
1046  */
1047 static int
1048 rc_node_get_fmri_or_fragment(rc_node_t *np, char *buf, size_t bufsize,
1049     size_t *sz_out)
1050 {
1051 	size_t fmri_len = 0;
1052 	int r;
1053 
1054 	if (bufsize > 0)
1055 		*buf = 0;
1056 	*sz_out = 0;
1057 
1058 	if (np->rn_fmri == NULL) {
1059 		/*
1060 		 * A NULL rn_fmri implies that this is a top level scope.
1061 		 * Child nodes will always have an rn_fmri established
1062 		 * because both rc_node_link_child() and
1063 		 * rc_node_relink_child() call rc_node_build_fmri().  In
1064 		 * this case, we'll just return our name preceded by the
1065 		 * appropriate FMRI decorations.
1066 		 */
1067 		assert(np->rn_parent == NULL);
1068 		r = rc_concat_fmri_element(buf, bufsize, &fmri_len, np->rn_name,
1069 		    np->rn_id.rl_type);
1070 		if (r != REP_PROTOCOL_SUCCESS)
1071 			return (r);
1072 	} else {
1073 		/* We have an fmri, so return it. */
1074 		fmri_len = strlcpy(buf, np->rn_fmri, bufsize);
1075 	}
1076 
1077 	*sz_out = fmri_len;
1078 
1079 	if (fmri_len >= bufsize)
1080 		return (REP_PROTOCOL_FAIL_TRUNCATED);
1081 
1082 	return (REP_PROTOCOL_SUCCESS);
1083 }
1084 
1085 /*
1086  * Build an FMRI string for this node and save it in rn_fmri.
1087  *
1088  * The basic strategy here is to get the fmri of our parent and then
1089  * concatenate the appropriate separator followed by our name.  If our name
1090  * is null, the resulting fmri will just be a copy of the parent fmri.
1091  * rc_node_build_fmri() should be called with the RC_NODE_USING_PARENT flag
1092  * set.  Also the rn_lock for this node should be held.
1093  *
1094  * Fails with
1095  *	_NO_RESOURCES	Could not allocate memory.
1096  */
1097 static int
1098 rc_node_build_fmri(rc_node_t *np)
1099 {
1100 	size_t actual;
1101 	char fmri[REP_PROTOCOL_FMRI_LEN];
1102 	int rc;
1103 	size_t	sz = REP_PROTOCOL_FMRI_LEN;
1104 
1105 	assert(MUTEX_HELD(&np->rn_lock));
1106 	assert(np->rn_flags & RC_NODE_USING_PARENT);
1107 
1108 	rc_node_free_fmri(np);
1109 
1110 	rc = rc_node_get_fmri_or_fragment(np->rn_parent, fmri, sz, &actual);
1111 	assert(rc == REP_PROTOCOL_SUCCESS);
1112 
1113 	if (np->rn_name != NULL) {
1114 		rc = rc_concat_fmri_element(fmri, sz, &actual, np->rn_name,
1115 		    np->rn_id.rl_type);
1116 		assert(rc == REP_PROTOCOL_SUCCESS);
1117 		np->rn_fmri = strdup(fmri);
1118 	} else {
1119 		np->rn_fmri = strdup(fmri);
1120 	}
1121 	if (np->rn_fmri == NULL) {
1122 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
1123 	} else {
1124 		rc = REP_PROTOCOL_SUCCESS;
1125 	}
1126 
1127 	return (rc);
1128 }
1129 
1130 /*
1131  * Get the FMRI of the node at np placing the result in fmri.  Then
1132  * concatenate the additional element to fmri.  The type variable indicates
1133  * the type of element, so that the appropriate separator can be
1134  * generated.  size is the number of bytes in the buffer at fmri, and
1135  * sz_out receives the size of the generated string.  If the result is
1136  * truncated, sz_out will receive the size of the buffer that would be
1137  * required to avoid truncation.
1138  *
1139  * Fails with
1140  *	_TRUNCATED	Not enough room in buffer at fmri.
1141  */
1142 static int
1143 rc_get_fmri_and_concat(rc_node_t *np, char *fmri, size_t size, size_t *sz_out,
1144     const char *element, rep_protocol_entity_t type)
1145 {
1146 	int rc;
1147 
1148 	if ((rc = rc_node_get_fmri_or_fragment(np, fmri, size, sz_out)) !=
1149 	    REP_PROTOCOL_SUCCESS) {
1150 		return (rc);
1151 	}
1152 	if ((rc = rc_concat_fmri_element(fmri, size, sz_out, element, type)) !=
1153 	    REP_PROTOCOL_SUCCESS) {
1154 		return (rc);
1155 	}
1156 
1157 	return (REP_PROTOCOL_SUCCESS);
1158 }
1159 
1160 static int
1161 rc_notify_info_interested(rc_notify_info_t *rnip, rc_notify_t *np)
1162 {
1163 	rc_node_t *nnp = np->rcn_node;
1164 	int i;
1165 
1166 	assert(MUTEX_HELD(&rc_pg_notify_lock));
1167 
1168 	if (np->rcn_delete != NULL) {
1169 		assert(np->rcn_info == NULL && np->rcn_node == NULL);
1170 		return (1);		/* everyone likes deletes */
1171 	}
1172 	if (np->rcn_node == NULL) {
1173 		assert(np->rcn_info != NULL || np->rcn_delete != NULL);
1174 		return (0);
1175 	}
1176 	assert(np->rcn_info == NULL);
1177 
1178 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
1179 		if (rnip->rni_namelist[i] != NULL) {
1180 			if (strcmp(nnp->rn_name, rnip->rni_namelist[i]) == 0)
1181 				return (1);
1182 		}
1183 		if (rnip->rni_typelist[i] != NULL) {
1184 			if (strcmp(nnp->rn_type, rnip->rni_typelist[i]) == 0)
1185 				return (1);
1186 		}
1187 	}
1188 	return (0);
1189 }
1190 
1191 static void
1192 rc_notify_insert_node(rc_node_t *nnp)
1193 {
1194 	rc_notify_t *np = &nnp->rn_notify;
1195 	rc_notify_info_t *nip;
1196 	int found = 0;
1197 
1198 	assert(np->rcn_info == NULL);
1199 
1200 	if (nnp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
1201 		return;
1202 
1203 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1204 	np->rcn_node = nnp;
1205 	for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1206 	    nip = uu_list_next(rc_notify_info_list, nip)) {
1207 		if (rc_notify_info_interested(nip, np)) {
1208 			(void) pthread_cond_broadcast(&nip->rni_cv);
1209 			found++;
1210 		}
1211 	}
1212 	if (found)
1213 		(void) uu_list_insert_before(rc_notify_list, NULL, np);
1214 	else
1215 		np->rcn_node = NULL;
1216 
1217 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1218 }
1219 
1220 static void
1221 rc_notify_deletion(rc_notify_delete_t *ndp, const char *service,
1222     const char *instance, const char *pg)
1223 {
1224 	rc_notify_info_t *nip;
1225 
1226 	uu_list_node_init(&ndp->rnd_notify, &ndp->rnd_notify.rcn_list_node,
1227 	    rc_notify_pool);
1228 	ndp->rnd_notify.rcn_delete = ndp;
1229 
1230 	(void) snprintf(ndp->rnd_fmri, sizeof (ndp->rnd_fmri),
1231 	    "svc:/%s%s%s%s%s", service,
1232 	    (instance != NULL)? ":" : "", (instance != NULL)? instance : "",
1233 	    (pg != NULL)? "/:properties/" : "", (pg != NULL)? pg : "");
1234 
1235 	/*
1236 	 * add to notification list, notify watchers
1237 	 */
1238 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1239 	for (nip = uu_list_first(rc_notify_info_list); nip != NULL;
1240 	    nip = uu_list_next(rc_notify_info_list, nip))
1241 		(void) pthread_cond_broadcast(&nip->rni_cv);
1242 	(void) uu_list_insert_before(rc_notify_list, NULL, ndp);
1243 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1244 }
1245 
1246 static void
1247 rc_notify_remove_node(rc_node_t *nnp)
1248 {
1249 	rc_notify_t *np = &nnp->rn_notify;
1250 
1251 	assert(np->rcn_info == NULL);
1252 	assert(!MUTEX_HELD(&nnp->rn_lock));
1253 
1254 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
1255 	while (np->rcn_node != NULL) {
1256 		if (rc_notify_in_use) {
1257 			(void) pthread_cond_wait(&rc_pg_notify_cv,
1258 			    &rc_pg_notify_lock);
1259 			continue;
1260 		}
1261 		(void) uu_list_remove(rc_notify_list, np);
1262 		np->rcn_node = NULL;
1263 		break;
1264 	}
1265 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
1266 }
1267 
1268 static void
1269 rc_notify_remove_locked(rc_notify_t *np)
1270 {
1271 	assert(MUTEX_HELD(&rc_pg_notify_lock));
1272 	assert(rc_notify_in_use == 0);
1273 
1274 	(void) uu_list_remove(rc_notify_list, np);
1275 	if (np->rcn_node) {
1276 		np->rcn_node = NULL;
1277 	} else if (np->rcn_delete) {
1278 		uu_free(np->rcn_delete);
1279 	} else {
1280 		assert(0);	/* CAN'T HAPPEN */
1281 	}
1282 }
1283 
1284 /*
1285  * Permission checking functions.  See comment atop this file.
1286  */
1287 #ifndef NATIVE_BUILD
1288 static permcheck_t *
1289 pc_create()
1290 {
1291 	permcheck_t *p;
1292 
1293 	p = uu_zalloc(sizeof (*p));
1294 	if (p == NULL)
1295 		return (NULL);
1296 	p->pc_bnum = 8;			/* Normal case will only have 2 elts. */
1297 	p->pc_buckets = uu_zalloc(sizeof (*p->pc_buckets) * p->pc_bnum);
1298 	if (p->pc_buckets == NULL) {
1299 		uu_free(p);
1300 		return (NULL);
1301 	}
1302 
1303 	p->pc_enum = 0;
1304 	return (p);
1305 }
1306 
1307 static void
1308 pc_free(permcheck_t *pcp)
1309 {
1310 	uint_t i;
1311 	struct pc_elt *ep, *next;
1312 
1313 	for (i = 0; i < pcp->pc_bnum; ++i) {
1314 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1315 			next = ep->pce_next;
1316 			free(ep);
1317 		}
1318 	}
1319 
1320 	free(pcp->pc_buckets);
1321 	free(pcp);
1322 }
1323 
1324 static uint32_t
1325 pc_hash(const char *auth)
1326 {
1327 	uint32_t h = 0, g;
1328 	const char *p;
1329 
1330 	/*
1331 	 * Generic hash function from uts/common/os/modhash.c.
1332 	 */
1333 	for (p = auth; *p != '\0'; ++p) {
1334 		h = (h << 4) + *p;
1335 		g = (h & 0xf0000000);
1336 		if (g != 0) {
1337 			h ^= (g >> 24);
1338 			h ^= g;
1339 		}
1340 	}
1341 
1342 	return (h);
1343 }
1344 
1345 static int
1346 pc_exists(permcheck_t *pcp, const char *auth)
1347 {
1348 	uint32_t h;
1349 	struct pc_elt *ep;
1350 
1351 	h = pc_hash(auth);
1352 	for (ep = pcp->pc_buckets[h & (pcp->pc_bnum - 1)];
1353 	    ep != NULL;
1354 	    ep = ep->pce_next) {
1355 		if (strcmp(auth, ep->pce_auth) == 0) {
1356 			pcp->pc_auth_string = ep->pce_auth;
1357 			return (1);
1358 		}
1359 	}
1360 
1361 	return (0);
1362 }
1363 
1364 static int
1365 pc_match(permcheck_t *pcp, const char *pattern)
1366 {
1367 	uint_t i;
1368 	struct pc_elt *ep;
1369 
1370 	for (i = 0; i < pcp->pc_bnum; ++i) {
1371 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = ep->pce_next) {
1372 			if (_auth_match(pattern, ep->pce_auth)) {
1373 				pcp->pc_auth_string = ep->pce_auth;
1374 				return (1);
1375 			}
1376 		}
1377 	}
1378 
1379 	return (0);
1380 }
1381 
1382 static int
1383 pc_grow(permcheck_t *pcp)
1384 {
1385 	uint_t new_bnum, i, j;
1386 	struct pc_elt **new_buckets;
1387 	struct pc_elt *ep, *next;
1388 
1389 	new_bnum = pcp->pc_bnum * 2;
1390 	if (new_bnum < pcp->pc_bnum)
1391 		/* Homey don't play that. */
1392 		return (-1);
1393 
1394 	new_buckets = uu_zalloc(sizeof (*new_buckets) * new_bnum);
1395 	if (new_buckets == NULL)
1396 		return (-1);
1397 
1398 	for (i = 0; i < pcp->pc_bnum; ++i) {
1399 		for (ep = pcp->pc_buckets[i]; ep != NULL; ep = next) {
1400 			next = ep->pce_next;
1401 			j = pc_hash(ep->pce_auth) & (new_bnum - 1);
1402 			ep->pce_next = new_buckets[j];
1403 			new_buckets[j] = ep;
1404 		}
1405 	}
1406 
1407 	uu_free(pcp->pc_buckets);
1408 	pcp->pc_buckets = new_buckets;
1409 	pcp->pc_bnum = new_bnum;
1410 
1411 	return (0);
1412 }
1413 
1414 static int
1415 pc_add(permcheck_t *pcp, const char *auth, pc_auth_type_t auth_type)
1416 {
1417 	struct pc_elt *ep;
1418 	uint_t i;
1419 
1420 	ep = uu_zalloc(offsetof(struct pc_elt, pce_auth) + strlen(auth) + 1);
1421 	if (ep == NULL)
1422 		return (-1);
1423 
1424 	/* Grow if pc_enum / pc_bnum > 3/4. */
1425 	if (pcp->pc_enum * 4 > 3 * pcp->pc_bnum)
1426 		/* Failure is not a stopper; we'll try again next time. */
1427 		(void) pc_grow(pcp);
1428 
1429 	(void) strcpy(ep->pce_auth, auth);
1430 
1431 	i = pc_hash(auth) & (pcp->pc_bnum - 1);
1432 	ep->pce_next = pcp->pc_buckets[i];
1433 	pcp->pc_buckets[i] = ep;
1434 
1435 	if (auth_type > pcp->pc_specific_type) {
1436 		pcp->pc_specific_type = auth_type;
1437 		pcp->pc_specific = ep;
1438 	}
1439 
1440 	++pcp->pc_enum;
1441 
1442 	return (0);
1443 }
1444 
1445 /*
1446  * For the type of a property group, return the authorization which may be
1447  * used to modify it.
1448  */
1449 static const char *
1450 perm_auth_for_pgtype(const char *pgtype)
1451 {
1452 	if (strcmp(pgtype, SCF_GROUP_METHOD) == 0)
1453 		return (AUTH_MODIFY_PREFIX "method");
1454 	else if (strcmp(pgtype, SCF_GROUP_DEPENDENCY) == 0)
1455 		return (AUTH_MODIFY_PREFIX "dependency");
1456 	else if (strcmp(pgtype, SCF_GROUP_APPLICATION) == 0)
1457 		return (AUTH_MODIFY_PREFIX "application");
1458 	else if (strcmp(pgtype, SCF_GROUP_FRAMEWORK) == 0)
1459 		return (AUTH_MODIFY_PREFIX "framework");
1460 	else
1461 		return (NULL);
1462 }
1463 
1464 /*
1465  * Fails with
1466  *   _NO_RESOURCES - out of memory
1467  */
1468 static int
1469 perm_add_enabling_type(permcheck_t *pcp, const char *auth,
1470     pc_auth_type_t auth_type)
1471 {
1472 	return (pc_add(pcp, auth, auth_type) == 0 ? REP_PROTOCOL_SUCCESS :
1473 	    REP_PROTOCOL_FAIL_NO_RESOURCES);
1474 }
1475 
1476 /*
1477  * Fails with
1478  *   _NO_RESOURCES - out of memory
1479  */
1480 static int
1481 perm_add_enabling(permcheck_t *pcp, const char *auth)
1482 {
1483 	return (perm_add_enabling_type(pcp, auth, PC_AUTH_SMF));
1484 }
1485 
1486 /* Note that perm_add_enabling_values() is defined below. */
1487 
1488 /*
1489  * perm_granted() returns 1 if the current door caller has one of the enabling
1490  * authorizations in pcp, 0 if it doesn't, and -1 if an error (usually lack of
1491  * memory) occurs.  check_auth_list() checks an RBAC_AUTH_SEP-separated list
1492  * of authorizations for existence in pcp, and check_prof_list() checks the
1493  * authorizations granted to an RBAC_AUTH_SEP-separated list of profiles.
1494  */
1495 static int
1496 check_auth_list(permcheck_t *pcp, char *authlist)
1497 {
1498 	char *auth, *lasts;
1499 	int ret;
1500 
1501 	for (auth = (char *)strtok_r(authlist, RBAC_AUTH_SEP, &lasts);
1502 	    auth != NULL;
1503 	    auth = (char *)strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) {
1504 		if (strchr(auth, KV_WILDCHAR) == NULL)
1505 			ret = pc_exists(pcp, auth);
1506 		else
1507 			ret = pc_match(pcp, auth);
1508 
1509 		if (ret)
1510 			return (ret);
1511 	}
1512 
1513 	/*
1514 	 * If we failed, choose the most specific auth string for use in
1515 	 * the audit event.
1516 	 */
1517 	assert(pcp->pc_specific != NULL);
1518 	pcp->pc_auth_string = pcp->pc_specific->pce_auth;
1519 
1520 	return (0);
1521 }
1522 
1523 static int
1524 check_prof_list(permcheck_t *pcp, char *proflist)
1525 {
1526 	char *prof, *lasts, *authlist, *subproflist;
1527 	profattr_t *pap;
1528 	int ret = 0;
1529 
1530 	for (prof = strtok_r(proflist, RBAC_AUTH_SEP, &lasts);
1531 	    prof != NULL;
1532 	    prof = strtok_r(NULL, RBAC_AUTH_SEP, &lasts)) {
1533 		pap = getprofnam(prof);
1534 		if (pap == NULL)
1535 			continue;
1536 
1537 		authlist = kva_match(pap->attr, PROFATTR_AUTHS_KW);
1538 		if (authlist != NULL)
1539 			ret = check_auth_list(pcp, authlist);
1540 
1541 		if (!ret) {
1542 			subproflist = kva_match(pap->attr, PROFATTR_PROFS_KW);
1543 			if (subproflist != NULL)
1544 				/* depth check to avoid infinite recursion? */
1545 				ret = check_prof_list(pcp, subproflist);
1546 		}
1547 
1548 		free_profattr(pap);
1549 		if (ret)
1550 			return (ret);
1551 	}
1552 
1553 	return (ret);
1554 }
1555 
1556 static int
1557 perm_granted(permcheck_t *pcp)
1558 {
1559 	ucred_t *uc;
1560 
1561 	int ret = 0;
1562 	uid_t uid;
1563 	userattr_t *uap;
1564 	char *authlist, *userattr_authlist, *proflist, *def_prof = NULL;
1565 	struct passwd pw;
1566 	char pwbuf[1024];	/* XXX should be NSS_BUFLEN_PASSWD */
1567 
1568 	/* Get the uid */
1569 	if ((uc = get_ucred()) == NULL) {
1570 		if (errno == EINVAL) {
1571 			/*
1572 			 * Client is no longer waiting for our response (e.g.,
1573 			 * it received a signal & resumed with EINTR).
1574 			 * Punting with door_return() would be nice but we
1575 			 * need to release all of the locks & references we
1576 			 * hold.  And we must report failure to the client
1577 			 * layer to keep it from ignoring retries as
1578 			 * already-done (idempotency & all that).  None of the
1579 			 * error codes fit very well, so we might as well
1580 			 * force the return of _PERMISSION_DENIED since we
1581 			 * couldn't determine the user.
1582 			 */
1583 			return (0);
1584 		}
1585 		assert(0);
1586 		abort();
1587 	}
1588 
1589 	uid = ucred_geteuid(uc);
1590 	assert(uid != (uid_t)-1);
1591 
1592 	if (getpwuid_r(uid, &pw, pwbuf, sizeof (pwbuf)) == NULL) {
1593 		return (-1);
1594 	}
1595 
1596 	/*
1597 	 * Get user's default authorizations from policy.conf
1598 	 */
1599 	ret = _get_user_defs(pw.pw_name, &authlist, &def_prof);
1600 
1601 	if (ret != 0)
1602 		return (-1);
1603 
1604 	if (authlist != NULL) {
1605 		ret = check_auth_list(pcp, authlist);
1606 
1607 		if (ret) {
1608 			_free_user_defs(authlist, def_prof);
1609 			return (ret);
1610 		}
1611 	}
1612 
1613 	/*
1614 	 * Put off checking def_prof for later in an attempt to consolidate
1615 	 * prof_attr accesses.
1616 	 */
1617 
1618 	uap = getusernam(pw.pw_name);
1619 	if (uap != NULL) {
1620 		/* Get the authorizations from user_attr. */
1621 		userattr_authlist = kva_match(uap->attr, USERATTR_AUTHS_KW);
1622 		if (userattr_authlist != NULL) {
1623 			ret = check_auth_list(pcp, userattr_authlist);
1624 		}
1625 	}
1626 
1627 	if (!ret && def_prof != NULL) {
1628 		/* Check generic profiles. */
1629 		ret = check_prof_list(pcp, def_prof);
1630 	}
1631 
1632 	if (!ret && uap != NULL) {
1633 		proflist = kva_match(uap->attr, USERATTR_PROFILES_KW);
1634 		if (proflist != NULL)
1635 			ret = check_prof_list(pcp, proflist);
1636 	}
1637 
1638 	_free_user_defs(authlist, def_prof);
1639 	if (uap != NULL)
1640 		free_userattr(uap);
1641 
1642 	return (ret);
1643 }
1644 #endif /* NATIVE_BUILD */
1645 
1646 /*
1647  * flags in RC_NODE_WAITING_FLAGS are broadcast when unset, and are used to
1648  * serialize certain actions, and to wait for certain operations to complete
1649  *
1650  * The waiting flags are:
1651  *	RC_NODE_CHILDREN_CHANGING
1652  *		The child list is being built or changed (due to creation
1653  *		or deletion).  All iterators pause.
1654  *
1655  *	RC_NODE_USING_PARENT
1656  *		Someone is actively using the parent pointer, so we can't
1657  *		be removed from the parent list.
1658  *
1659  *	RC_NODE_CREATING_CHILD
1660  *		A child is being created -- locks out other creations, to
1661  *		prevent insert-insert races.
1662  *
1663  *	RC_NODE_IN_TX
1664  *		This object is running a transaction.
1665  *
1666  *	RC_NODE_DYING
1667  *		This node might be dying.  Always set as a set, using
1668  *		RC_NODE_DYING_FLAGS (which is everything but
1669  *		RC_NODE_USING_PARENT)
1670  */
1671 static int
1672 rc_node_hold_flag(rc_node_t *np, uint32_t flag)
1673 {
1674 	assert(MUTEX_HELD(&np->rn_lock));
1675 	assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1676 
1677 	while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag)) {
1678 		(void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1679 	}
1680 	if (np->rn_flags & RC_NODE_DEAD)
1681 		return (0);
1682 
1683 	np->rn_flags |= flag;
1684 	return (1);
1685 }
1686 
1687 static void
1688 rc_node_rele_flag(rc_node_t *np, uint32_t flag)
1689 {
1690 	assert((flag & ~RC_NODE_WAITING_FLAGS) == 0);
1691 	assert(MUTEX_HELD(&np->rn_lock));
1692 	assert((np->rn_flags & flag) == flag);
1693 	np->rn_flags &= ~flag;
1694 	(void) pthread_cond_broadcast(&np->rn_cv);
1695 }
1696 
1697 /*
1698  * wait until a particular flag has cleared.  Fails if the object dies.
1699  */
1700 static int
1701 rc_node_wait_flag(rc_node_t *np, uint32_t flag)
1702 {
1703 	assert(MUTEX_HELD(&np->rn_lock));
1704 	while (!(np->rn_flags & RC_NODE_DEAD) && (np->rn_flags & flag))
1705 		(void) pthread_cond_wait(&np->rn_cv, &np->rn_lock);
1706 
1707 	return (!(np->rn_flags & RC_NODE_DEAD));
1708 }
1709 
1710 /*
1711  * On entry, np's lock must be held, and this thread must be holding
1712  * RC_NODE_USING_PARENT.  On return, both of them are released.
1713  *
1714  * If the return value is NULL, np either does not have a parent, or
1715  * the parent has been marked DEAD.
1716  *
1717  * If the return value is non-NULL, it is the parent of np, and both
1718  * its lock and the requested flags are held.
1719  */
1720 static rc_node_t *
1721 rc_node_hold_parent_flag(rc_node_t *np, uint32_t flag)
1722 {
1723 	rc_node_t *pp;
1724 
1725 	assert(MUTEX_HELD(&np->rn_lock));
1726 	assert(np->rn_flags & RC_NODE_USING_PARENT);
1727 
1728 	if ((pp = np->rn_parent) == NULL) {
1729 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1730 		(void) pthread_mutex_unlock(&np->rn_lock);
1731 		return (NULL);
1732 	}
1733 	(void) pthread_mutex_unlock(&np->rn_lock);
1734 
1735 	(void) pthread_mutex_lock(&pp->rn_lock);
1736 	(void) pthread_mutex_lock(&np->rn_lock);
1737 	rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1738 	(void) pthread_mutex_unlock(&np->rn_lock);
1739 
1740 	if (!rc_node_hold_flag(pp, flag)) {
1741 		(void) pthread_mutex_unlock(&pp->rn_lock);
1742 		return (NULL);
1743 	}
1744 	return (pp);
1745 }
1746 
1747 rc_node_t *
1748 rc_node_alloc(void)
1749 {
1750 	rc_node_t *np = uu_zalloc(sizeof (*np));
1751 
1752 	if (np == NULL)
1753 		return (NULL);
1754 
1755 	(void) pthread_mutex_init(&np->rn_lock, NULL);
1756 	(void) pthread_cond_init(&np->rn_cv, NULL);
1757 
1758 	np->rn_children = uu_list_create(rc_children_pool, np, 0);
1759 	np->rn_pg_notify_list = uu_list_create(rc_pg_notify_pool, np, 0);
1760 
1761 	uu_list_node_init(np, &np->rn_sibling_node, rc_children_pool);
1762 
1763 	uu_list_node_init(&np->rn_notify, &np->rn_notify.rcn_list_node,
1764 	    rc_notify_pool);
1765 
1766 	return (np);
1767 }
1768 
1769 void
1770 rc_node_destroy(rc_node_t *np)
1771 {
1772 	int i;
1773 
1774 	if (np->rn_flags & RC_NODE_UNREFED)
1775 		return;				/* being handled elsewhere */
1776 
1777 	assert(np->rn_refs == 0 && np->rn_other_refs == 0);
1778 	assert(np->rn_former == NULL);
1779 
1780 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
1781 		/* Release the holds from rc_iter_next(). */
1782 		for (i = 0; i < COMPOSITION_DEPTH; ++i) {
1783 			/* rn_cchain[i] may be NULL for empty snapshots. */
1784 			if (np->rn_cchain[i] != NULL)
1785 				rc_node_rele(np->rn_cchain[i]);
1786 		}
1787 	}
1788 
1789 	if (np->rn_name != NULL)
1790 		free((void *)np->rn_name);
1791 	np->rn_name = NULL;
1792 	if (np->rn_type != NULL)
1793 		free((void *)np->rn_type);
1794 	np->rn_type = NULL;
1795 	if (np->rn_values != NULL)
1796 		object_free_values(np->rn_values, np->rn_valtype,
1797 		    np->rn_values_count, np->rn_values_size);
1798 	np->rn_values = NULL;
1799 	rc_node_free_fmri(np);
1800 
1801 	if (np->rn_snaplevel != NULL)
1802 		rc_snaplevel_rele(np->rn_snaplevel);
1803 	np->rn_snaplevel = NULL;
1804 
1805 	uu_list_node_fini(np, &np->rn_sibling_node, rc_children_pool);
1806 
1807 	uu_list_node_fini(&np->rn_notify, &np->rn_notify.rcn_list_node,
1808 	    rc_notify_pool);
1809 
1810 	assert(uu_list_first(np->rn_children) == NULL);
1811 	uu_list_destroy(np->rn_children);
1812 	uu_list_destroy(np->rn_pg_notify_list);
1813 
1814 	(void) pthread_mutex_destroy(&np->rn_lock);
1815 	(void) pthread_cond_destroy(&np->rn_cv);
1816 
1817 	uu_free(np);
1818 }
1819 
1820 /*
1821  * Link in a child node.
1822  *
1823  * Because of the lock ordering, cp has to already be in the hash table with
1824  * its lock dropped before we get it.  To prevent anyone from noticing that
1825  * it is parentless, the creation code sets the RC_NODE_USING_PARENT.  Once
1826  * we've linked it in, we release the flag.
1827  */
1828 static void
1829 rc_node_link_child(rc_node_t *np, rc_node_t *cp)
1830 {
1831 	assert(!MUTEX_HELD(&np->rn_lock));
1832 	assert(!MUTEX_HELD(&cp->rn_lock));
1833 
1834 	(void) pthread_mutex_lock(&np->rn_lock);
1835 	(void) pthread_mutex_lock(&cp->rn_lock);
1836 	assert(!(cp->rn_flags & RC_NODE_IN_PARENT) &&
1837 	    (cp->rn_flags & RC_NODE_USING_PARENT));
1838 
1839 	assert(rc_check_parent_child(np->rn_id.rl_type, cp->rn_id.rl_type) ==
1840 	    REP_PROTOCOL_SUCCESS);
1841 
1842 	cp->rn_parent = np;
1843 	cp->rn_flags |= RC_NODE_IN_PARENT;
1844 	(void) uu_list_insert_before(np->rn_children, NULL, cp);
1845 	(void) rc_node_build_fmri(cp);
1846 
1847 	(void) pthread_mutex_unlock(&np->rn_lock);
1848 
1849 	rc_node_rele_flag(cp, RC_NODE_USING_PARENT);
1850 	(void) pthread_mutex_unlock(&cp->rn_lock);
1851 }
1852 
1853 /*
1854  * Sets the rn_parent_ref field of all the children of np to pp -- always
1855  * initially invoked as rc_node_setup_parent_ref(np, np), we then recurse.
1856  *
1857  * This is used when we mark a node RC_NODE_OLD, so that when the object and
1858  * its children are no longer referenced, they will all be deleted as a unit.
1859  */
1860 static void
1861 rc_node_setup_parent_ref(rc_node_t *np, rc_node_t *pp)
1862 {
1863 	rc_node_t *cp;
1864 
1865 	assert(MUTEX_HELD(&np->rn_lock));
1866 
1867 	for (cp = uu_list_first(np->rn_children); cp != NULL;
1868 	    cp = uu_list_next(np->rn_children, cp)) {
1869 		(void) pthread_mutex_lock(&cp->rn_lock);
1870 		if (cp->rn_flags & RC_NODE_PARENT_REF) {
1871 			assert(cp->rn_parent_ref == pp);
1872 		} else {
1873 			assert(cp->rn_parent_ref == NULL);
1874 
1875 			cp->rn_flags |= RC_NODE_PARENT_REF;
1876 			cp->rn_parent_ref = pp;
1877 			if (cp->rn_refs != 0)
1878 				rc_node_hold_other(pp);
1879 		}
1880 		rc_node_setup_parent_ref(cp, pp);		/* recurse */
1881 		(void) pthread_mutex_unlock(&cp->rn_lock);
1882 	}
1883 }
1884 
1885 /*
1886  * Atomically replace 'np' with 'newp', with a parent of 'pp'.
1887  *
1888  * Requirements:
1889  *	*no* node locks may be held.
1890  *	pp must be held with RC_NODE_CHILDREN_CHANGING
1891  *	newp and np must be held with RC_NODE_IN_TX
1892  *	np must be marked RC_NODE_IN_PARENT, newp must not be
1893  *	np must be marked RC_NODE_OLD
1894  *
1895  * Afterwards:
1896  *	pp's RC_NODE_CHILDREN_CHANGING is dropped
1897  *	newp and np's RC_NODE_IN_TX is dropped
1898  *	newp->rn_former = np;
1899  *	newp is RC_NODE_IN_PARENT, np is not.
1900  *	interested notify subscribers have been notified of newp's new status.
1901  */
1902 static void
1903 rc_node_relink_child(rc_node_t *pp, rc_node_t *np, rc_node_t *newp)
1904 {
1905 	cache_bucket_t *bp;
1906 	/*
1907 	 * First, swap np and nnp in the cache.  newp's RC_NODE_IN_TX flag
1908 	 * keeps rc_node_update() from seeing it until we are done.
1909 	 */
1910 	bp = cache_hold(newp->rn_hash);
1911 	cache_remove_unlocked(bp, np);
1912 	cache_insert_unlocked(bp, newp);
1913 	cache_release(bp);
1914 
1915 	/*
1916 	 * replace np with newp in pp's list, and attach it to newp's rn_former
1917 	 * link.
1918 	 */
1919 	(void) pthread_mutex_lock(&pp->rn_lock);
1920 	assert(pp->rn_flags & RC_NODE_CHILDREN_CHANGING);
1921 
1922 	(void) pthread_mutex_lock(&newp->rn_lock);
1923 	assert(!(newp->rn_flags & RC_NODE_IN_PARENT));
1924 	assert(newp->rn_flags & RC_NODE_IN_TX);
1925 
1926 	(void) pthread_mutex_lock(&np->rn_lock);
1927 	assert(np->rn_flags & RC_NODE_IN_PARENT);
1928 	assert(np->rn_flags & RC_NODE_OLD);
1929 	assert(np->rn_flags & RC_NODE_IN_TX);
1930 
1931 	newp->rn_parent = pp;
1932 	newp->rn_flags |= RC_NODE_IN_PARENT;
1933 
1934 	/*
1935 	 * Note that we carefully add newp before removing np -- this
1936 	 * keeps iterators on the list from missing us.
1937 	 */
1938 	(void) uu_list_insert_after(pp->rn_children, np, newp);
1939 	(void) rc_node_build_fmri(newp);
1940 	(void) uu_list_remove(pp->rn_children, np);
1941 
1942 	/*
1943 	 * re-set np
1944 	 */
1945 	newp->rn_former = np;
1946 	np->rn_parent = NULL;
1947 	np->rn_flags &= ~RC_NODE_IN_PARENT;
1948 	np->rn_flags |= RC_NODE_ON_FORMER;
1949 
1950 	rc_notify_insert_node(newp);
1951 
1952 	rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
1953 	(void) pthread_mutex_unlock(&pp->rn_lock);
1954 	rc_node_rele_flag(newp, RC_NODE_USING_PARENT | RC_NODE_IN_TX);
1955 	(void) pthread_mutex_unlock(&newp->rn_lock);
1956 	rc_node_setup_parent_ref(np, np);
1957 	rc_node_rele_flag(np, RC_NODE_IN_TX);
1958 	(void) pthread_mutex_unlock(&np->rn_lock);
1959 }
1960 
1961 /*
1962  * makes sure a node with lookup 'nip', name 'name', and parent 'pp' exists.
1963  * 'cp' is used (and returned) if the node does not yet exist.  If it does
1964  * exist, 'cp' is freed, and the existent node is returned instead.
1965  */
1966 rc_node_t *
1967 rc_node_setup(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
1968     rc_node_t *pp)
1969 {
1970 	rc_node_t *np;
1971 	cache_bucket_t *bp;
1972 	uint32_t h = rc_node_hash(nip);
1973 
1974 	assert(cp->rn_refs == 0);
1975 
1976 	bp = cache_hold(h);
1977 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
1978 		cache_release(bp);
1979 
1980 		/*
1981 		 * make sure it matches our expectations
1982 		 */
1983 		(void) pthread_mutex_lock(&np->rn_lock);
1984 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
1985 			assert(np->rn_parent == pp);
1986 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
1987 			assert(strcmp(np->rn_name, name) == 0);
1988 			assert(np->rn_type == NULL);
1989 			assert(np->rn_flags & RC_NODE_IN_PARENT);
1990 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
1991 		}
1992 		(void) pthread_mutex_unlock(&np->rn_lock);
1993 
1994 		rc_node_destroy(cp);
1995 		return (np);
1996 	}
1997 
1998 	/*
1999 	 * No one is there -- setup & install the new node.
2000 	 */
2001 	np = cp;
2002 	rc_node_hold(np);
2003 	np->rn_id = *nip;
2004 	np->rn_hash = h;
2005 	np->rn_name = strdup(name);
2006 
2007 	np->rn_flags |= RC_NODE_USING_PARENT;
2008 
2009 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE) {
2010 #if COMPOSITION_DEPTH == 2
2011 		np->rn_cchain[0] = np;
2012 		np->rn_cchain[1] = pp;
2013 #else
2014 #error This code must be updated.
2015 #endif
2016 	}
2017 
2018 	cache_insert_unlocked(bp, np);
2019 	cache_release(bp);		/* we are now visible */
2020 
2021 	rc_node_link_child(pp, np);
2022 
2023 	return (np);
2024 }
2025 
2026 /*
2027  * makes sure a snapshot with lookup 'nip', name 'name', and parent 'pp' exists.
2028  * 'cp' is used (and returned) if the node does not yet exist.  If it does
2029  * exist, 'cp' is freed, and the existent node is returned instead.
2030  */
2031 rc_node_t *
2032 rc_node_setup_snapshot(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2033     uint32_t snap_id, rc_node_t *pp)
2034 {
2035 	rc_node_t *np;
2036 	cache_bucket_t *bp;
2037 	uint32_t h = rc_node_hash(nip);
2038 
2039 	assert(cp->rn_refs == 0);
2040 
2041 	bp = cache_hold(h);
2042 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2043 		cache_release(bp);
2044 
2045 		/*
2046 		 * make sure it matches our expectations
2047 		 */
2048 		(void) pthread_mutex_lock(&np->rn_lock);
2049 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2050 			assert(np->rn_parent == pp);
2051 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2052 			assert(strcmp(np->rn_name, name) == 0);
2053 			assert(np->rn_type == NULL);
2054 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2055 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2056 		}
2057 		(void) pthread_mutex_unlock(&np->rn_lock);
2058 
2059 		rc_node_destroy(cp);
2060 		return (np);
2061 	}
2062 
2063 	/*
2064 	 * No one is there -- create a new node.
2065 	 */
2066 	np = cp;
2067 	rc_node_hold(np);
2068 	np->rn_id = *nip;
2069 	np->rn_hash = h;
2070 	np->rn_name = strdup(name);
2071 	np->rn_snapshot_id = snap_id;
2072 
2073 	np->rn_flags |= RC_NODE_USING_PARENT;
2074 
2075 	cache_insert_unlocked(bp, np);
2076 	cache_release(bp);		/* we are now visible */
2077 
2078 	rc_node_link_child(pp, np);
2079 
2080 	return (np);
2081 }
2082 
2083 /*
2084  * makes sure a snaplevel with lookup 'nip' and parent 'pp' exists.  'cp' is
2085  * used (and returned) if the node does not yet exist.  If it does exist, 'cp'
2086  * is freed, and the existent node is returned instead.
2087  */
2088 rc_node_t *
2089 rc_node_setup_snaplevel(rc_node_t *cp, rc_node_lookup_t *nip,
2090     rc_snaplevel_t *lvl, rc_node_t *pp)
2091 {
2092 	rc_node_t *np;
2093 	cache_bucket_t *bp;
2094 	uint32_t h = rc_node_hash(nip);
2095 
2096 	assert(cp->rn_refs == 0);
2097 
2098 	bp = cache_hold(h);
2099 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2100 		cache_release(bp);
2101 
2102 		/*
2103 		 * make sure it matches our expectations
2104 		 */
2105 		(void) pthread_mutex_lock(&np->rn_lock);
2106 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2107 			assert(np->rn_parent == pp);
2108 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2109 			assert(np->rn_name == NULL);
2110 			assert(np->rn_type == NULL);
2111 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2112 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2113 		}
2114 		(void) pthread_mutex_unlock(&np->rn_lock);
2115 
2116 		rc_node_destroy(cp);
2117 		return (np);
2118 	}
2119 
2120 	/*
2121 	 * No one is there -- create a new node.
2122 	 */
2123 	np = cp;
2124 	rc_node_hold(np);	/* released in snapshot_fill_children() */
2125 	np->rn_id = *nip;
2126 	np->rn_hash = h;
2127 
2128 	rc_snaplevel_hold(lvl);
2129 	np->rn_snaplevel = lvl;
2130 
2131 	np->rn_flags |= RC_NODE_USING_PARENT;
2132 
2133 	cache_insert_unlocked(bp, np);
2134 	cache_release(bp);		/* we are now visible */
2135 
2136 	/* Add this snaplevel to the snapshot's composition chain. */
2137 	assert(pp->rn_cchain[lvl->rsl_level_num - 1] == NULL);
2138 	pp->rn_cchain[lvl->rsl_level_num - 1] = np;
2139 
2140 	rc_node_link_child(pp, np);
2141 
2142 	return (np);
2143 }
2144 
2145 /*
2146  * Returns NULL if strdup() fails.
2147  */
2148 rc_node_t *
2149 rc_node_setup_pg(rc_node_t *cp, rc_node_lookup_t *nip, const char *name,
2150     const char *type, uint32_t flags, uint32_t gen_id, rc_node_t *pp)
2151 {
2152 	rc_node_t *np;
2153 	cache_bucket_t *bp;
2154 
2155 	uint32_t h = rc_node_hash(nip);
2156 	bp = cache_hold(h);
2157 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2158 		cache_release(bp);
2159 
2160 		/*
2161 		 * make sure it matches our expectations (don't check
2162 		 * the generation number or parent, since someone could
2163 		 * have gotten a transaction through while we weren't
2164 		 * looking)
2165 		 */
2166 		(void) pthread_mutex_lock(&np->rn_lock);
2167 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2168 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2169 			assert(strcmp(np->rn_name, name) == 0);
2170 			assert(strcmp(np->rn_type, type) == 0);
2171 			assert(np->rn_pgflags == flags);
2172 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2173 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2174 		}
2175 		(void) pthread_mutex_unlock(&np->rn_lock);
2176 
2177 		rc_node_destroy(cp);
2178 		return (np);
2179 	}
2180 
2181 	np = cp;
2182 	rc_node_hold(np);		/* released in fill_pg_callback() */
2183 	np->rn_id = *nip;
2184 	np->rn_hash = h;
2185 	np->rn_name = strdup(name);
2186 	if (np->rn_name == NULL) {
2187 		rc_node_rele(np);
2188 		return (NULL);
2189 	}
2190 	np->rn_type = strdup(type);
2191 	if (np->rn_type == NULL) {
2192 		free((void *)np->rn_name);
2193 		rc_node_rele(np);
2194 		return (NULL);
2195 	}
2196 	np->rn_pgflags = flags;
2197 	np->rn_gen_id = gen_id;
2198 
2199 	np->rn_flags |= RC_NODE_USING_PARENT;
2200 
2201 	cache_insert_unlocked(bp, np);
2202 	cache_release(bp);		/* we are now visible */
2203 
2204 	rc_node_link_child(pp, np);
2205 
2206 	return (np);
2207 }
2208 
2209 #if COMPOSITION_DEPTH == 2
2210 /*
2211  * Initialize a "composed property group" which represents the composition of
2212  * property groups pg1 & pg2.  It is ephemeral: once created & returned for an
2213  * ITER_READ request, keeping it out of cache_hash and any child lists
2214  * prevents it from being looked up.  Operations besides iteration are passed
2215  * through to pg1.
2216  *
2217  * pg1 & pg2 should be held before entering this function.  They will be
2218  * released in rc_node_destroy().
2219  */
2220 static int
2221 rc_node_setup_cpg(rc_node_t *cpg, rc_node_t *pg1, rc_node_t *pg2)
2222 {
2223 	if (strcmp(pg1->rn_type, pg2->rn_type) != 0)
2224 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2225 
2226 	cpg->rn_id.rl_type = REP_PROTOCOL_ENTITY_CPROPERTYGRP;
2227 	cpg->rn_name = strdup(pg1->rn_name);
2228 	if (cpg->rn_name == NULL)
2229 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2230 
2231 	cpg->rn_cchain[0] = pg1;
2232 	cpg->rn_cchain[1] = pg2;
2233 
2234 	return (REP_PROTOCOL_SUCCESS);
2235 }
2236 #else
2237 #error This code must be updated.
2238 #endif
2239 
2240 /*
2241  * Fails with _NO_RESOURCES.
2242  */
2243 int
2244 rc_node_create_property(rc_node_t *pp, rc_node_lookup_t *nip,
2245     const char *name, rep_protocol_value_type_t type,
2246     const char *vals, size_t count, size_t size)
2247 {
2248 	rc_node_t *np;
2249 	cache_bucket_t *bp;
2250 
2251 	uint32_t h = rc_node_hash(nip);
2252 	bp = cache_hold(h);
2253 	if ((np = cache_lookup_unlocked(bp, nip)) != NULL) {
2254 		cache_release(bp);
2255 		/*
2256 		 * make sure it matches our expectations
2257 		 */
2258 		(void) pthread_mutex_lock(&np->rn_lock);
2259 		if (rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
2260 			assert(np->rn_parent == pp);
2261 			assert(memcmp(&np->rn_id, nip, sizeof (*nip)) == 0);
2262 			assert(strcmp(np->rn_name, name) == 0);
2263 			assert(np->rn_valtype == type);
2264 			assert(np->rn_values_count == count);
2265 			assert(np->rn_values_size == size);
2266 			assert(vals == NULL ||
2267 			    memcmp(np->rn_values, vals, size) == 0);
2268 			assert(np->rn_flags & RC_NODE_IN_PARENT);
2269 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
2270 		}
2271 		rc_node_rele_locked(np);
2272 		object_free_values(vals, type, count, size);
2273 		return (REP_PROTOCOL_SUCCESS);
2274 	}
2275 
2276 	/*
2277 	 * No one is there -- create a new node.
2278 	 */
2279 	np = rc_node_alloc();
2280 	if (np == NULL) {
2281 		cache_release(bp);
2282 		object_free_values(vals, type, count, size);
2283 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2284 	}
2285 	np->rn_id = *nip;
2286 	np->rn_hash = h;
2287 	np->rn_name = strdup(name);
2288 	if (np->rn_name == NULL) {
2289 		cache_release(bp);
2290 		object_free_values(vals, type, count, size);
2291 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
2292 	}
2293 
2294 	np->rn_valtype = type;
2295 	np->rn_values = vals;
2296 	np->rn_values_count = count;
2297 	np->rn_values_size = size;
2298 
2299 	np->rn_flags |= RC_NODE_USING_PARENT;
2300 
2301 	cache_insert_unlocked(bp, np);
2302 	cache_release(bp);		/* we are now visible */
2303 
2304 	rc_node_link_child(pp, np);
2305 
2306 	return (REP_PROTOCOL_SUCCESS);
2307 }
2308 
2309 /*
2310  * This function implements a decision table to determine the event ID for
2311  * changes to the enabled (SCF_PROPERTY_ENABLED) property.  The event ID is
2312  * determined by the value of the first property in the command specified
2313  * by cmd_no and the name of the property group.  Here is the decision
2314  * table:
2315  *
2316  *				Property Group Name
2317  *	Property	------------------------------------------
2318  *	Value		SCF_PG_GENERAL		SCF_PG_GENERAL_OVR
2319  *	--------	--------------		------------------
2320  *	"0"		ADT_smf_disable		ADT_smf_tmp_disable
2321  *	"1"		ADT_smf_enable		ADT_smf_tmp_enable
2322  *
2323  * This function is called by special_property_event through a function
2324  * pointer in the special_props_list array.
2325  *
2326  * Since the ADT_smf_* symbols may not be defined in the build machine's
2327  * include files, this function is not compiled when doing native builds.
2328  */
2329 #ifndef NATIVE_BUILD
2330 static int
2331 general_enable_id(tx_commit_data_t *tx_data, size_t cmd_no, const char *pg,
2332     au_event_t *event_id)
2333 {
2334 	const char *value;
2335 	uint32_t nvalues;
2336 	int enable;
2337 
2338 	/*
2339 	 * First, check property value.
2340 	 */
2341 	if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
2342 		return (-1);
2343 	if (nvalues == 0)
2344 		return (-1);
2345 	if (tx_cmd_value(tx_data, cmd_no, 0, &value) != REP_PROTOCOL_SUCCESS)
2346 		return (-1);
2347 	if (strcmp(value, "0") == 0) {
2348 		enable = 0;
2349 	} else if (strcmp(value, "1") == 0) {
2350 		enable = 1;
2351 	} else {
2352 		return (-1);
2353 	}
2354 
2355 	/*
2356 	 * Now check property group name.
2357 	 */
2358 	if (strcmp(pg, SCF_PG_GENERAL) == 0) {
2359 		*event_id = enable ? ADT_smf_enable : ADT_smf_disable;
2360 		return (0);
2361 	} else if (strcmp(pg, SCF_PG_GENERAL_OVR) == 0) {
2362 		*event_id = enable ? ADT_smf_tmp_enable : ADT_smf_tmp_disable;
2363 		return (0);
2364 	}
2365 	return (-1);
2366 }
2367 #endif	/* NATIVE_BUILD */
2368 
2369 /*
2370  * This function compares two audit_special_prop_item_t structures
2371  * represented by item1 and item2.  It returns an integer greater than 0 if
2372  * item1 is greater than item2.  It returns 0 if they are equal and an
2373  * integer less than 0 if item1 is less than item2.  api_prop_name and
2374  * api_pg_name are the key fields for sorting.
2375  *
2376  * This function is suitable for calls to bsearch(3C) and qsort(3C).
2377  */
2378 static int
2379 special_prop_compare(const void *item1, const void *item2)
2380 {
2381 	const audit_special_prop_item_t *a = (audit_special_prop_item_t *)item1;
2382 	const audit_special_prop_item_t *b = (audit_special_prop_item_t *)item2;
2383 	int r;
2384 
2385 	r = strcmp(a->api_prop_name, b->api_prop_name);
2386 	if (r == 0) {
2387 		/*
2388 		 * Primary keys are the same, so check the secondary key.
2389 		 */
2390 		r = strcmp(a->api_pg_name, b->api_pg_name);
2391 	}
2392 	return (r);
2393 }
2394 
2395 int
2396 rc_node_init(void)
2397 {
2398 	rc_node_t *np;
2399 	cache_bucket_t *bp;
2400 
2401 	rc_children_pool = uu_list_pool_create("rc_children_pool",
2402 	    sizeof (rc_node_t), offsetof(rc_node_t, rn_sibling_node),
2403 	    NULL, UU_LIST_POOL_DEBUG);
2404 
2405 	rc_pg_notify_pool = uu_list_pool_create("rc_pg_notify_pool",
2406 	    sizeof (rc_node_pg_notify_t),
2407 	    offsetof(rc_node_pg_notify_t, rnpn_node),
2408 	    NULL, UU_LIST_POOL_DEBUG);
2409 
2410 	rc_notify_pool = uu_list_pool_create("rc_notify_pool",
2411 	    sizeof (rc_notify_t), offsetof(rc_notify_t, rcn_list_node),
2412 	    NULL, UU_LIST_POOL_DEBUG);
2413 
2414 	rc_notify_info_pool = uu_list_pool_create("rc_notify_info_pool",
2415 	    sizeof (rc_notify_info_t),
2416 	    offsetof(rc_notify_info_t, rni_list_node),
2417 	    NULL, UU_LIST_POOL_DEBUG);
2418 
2419 	if (rc_children_pool == NULL || rc_pg_notify_pool == NULL ||
2420 	    rc_notify_pool == NULL || rc_notify_info_pool == NULL)
2421 		uu_die("out of memory");
2422 
2423 	rc_notify_list = uu_list_create(rc_notify_pool,
2424 	    &rc_notify_list, 0);
2425 
2426 	rc_notify_info_list = uu_list_create(rc_notify_info_pool,
2427 	    &rc_notify_info_list, 0);
2428 
2429 	if (rc_notify_list == NULL || rc_notify_info_list == NULL)
2430 		uu_die("out of memory");
2431 
2432 	/*
2433 	 * Sort the special_props_list array so that it can be searched
2434 	 * with bsearch(3C).
2435 	 *
2436 	 * The special_props_list array is not compiled into the native
2437 	 * build code, so there is no need to call qsort if NATIVE_BUILD is
2438 	 * defined.
2439 	 */
2440 #ifndef	NATIVE_BUILD
2441 	qsort(special_props_list, SPECIAL_PROP_COUNT,
2442 	    sizeof (special_props_list[0]), special_prop_compare);
2443 #endif	/* NATIVE_BUILD */
2444 
2445 	if ((np = rc_node_alloc()) == NULL)
2446 		uu_die("out of memory");
2447 
2448 	rc_node_hold(np);
2449 	np->rn_id.rl_type = REP_PROTOCOL_ENTITY_SCOPE;
2450 	np->rn_id.rl_backend = BACKEND_TYPE_NORMAL;
2451 	np->rn_hash = rc_node_hash(&np->rn_id);
2452 	np->rn_name = "localhost";
2453 
2454 	bp = cache_hold(np->rn_hash);
2455 	cache_insert_unlocked(bp, np);
2456 	cache_release(bp);
2457 
2458 	rc_scope = np;
2459 	return (1);
2460 }
2461 
2462 /*
2463  * Fails with
2464  *   _INVALID_TYPE - type is invalid
2465  *   _TYPE_MISMATCH - np doesn't carry children of type type
2466  *   _DELETED - np has been deleted
2467  *   _NO_RESOURCES
2468  */
2469 static int
2470 rc_node_fill_children(rc_node_t *np, uint32_t type)
2471 {
2472 	int rc;
2473 
2474 	assert(MUTEX_HELD(&np->rn_lock));
2475 
2476 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
2477 	    REP_PROTOCOL_SUCCESS)
2478 		return (rc);
2479 
2480 	if (!rc_node_hold_flag(np, RC_NODE_CHILDREN_CHANGING))
2481 		return (REP_PROTOCOL_FAIL_DELETED);
2482 
2483 	if (np->rn_flags & RC_NODE_HAS_CHILDREN) {
2484 		rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2485 		return (REP_PROTOCOL_SUCCESS);
2486 	}
2487 
2488 	(void) pthread_mutex_unlock(&np->rn_lock);
2489 	rc = object_fill_children(np);
2490 	(void) pthread_mutex_lock(&np->rn_lock);
2491 
2492 	if (rc == REP_PROTOCOL_SUCCESS) {
2493 		np->rn_flags |= RC_NODE_HAS_CHILDREN;
2494 	}
2495 	rc_node_rele_flag(np, RC_NODE_CHILDREN_CHANGING);
2496 
2497 	return (rc);
2498 }
2499 
2500 /*
2501  * Returns
2502  *   _INVALID_TYPE - type is invalid
2503  *   _TYPE_MISMATCH - np doesn't carry children of type type
2504  *   _DELETED - np has been deleted
2505  *   _NO_RESOURCES
2506  *   _SUCCESS - if *cpp is not NULL, it is held
2507  */
2508 static int
2509 rc_node_find_named_child(rc_node_t *np, const char *name, uint32_t type,
2510     rc_node_t **cpp)
2511 {
2512 	int ret;
2513 	rc_node_t *cp;
2514 
2515 	assert(MUTEX_HELD(&np->rn_lock));
2516 	assert(np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP);
2517 
2518 	ret = rc_node_fill_children(np, type);
2519 	if (ret != REP_PROTOCOL_SUCCESS)
2520 		return (ret);
2521 
2522 	for (cp = uu_list_first(np->rn_children);
2523 	    cp != NULL;
2524 	    cp = uu_list_next(np->rn_children, cp)) {
2525 		if (cp->rn_id.rl_type == type && strcmp(cp->rn_name, name) == 0)
2526 			break;
2527 	}
2528 
2529 	if (cp != NULL)
2530 		rc_node_hold(cp);
2531 	*cpp = cp;
2532 
2533 	return (REP_PROTOCOL_SUCCESS);
2534 }
2535 
2536 static int rc_node_parent(rc_node_t *, rc_node_t **);
2537 
2538 /*
2539  * Returns
2540  *   _INVALID_TYPE - type is invalid
2541  *   _DELETED - np or an ancestor has been deleted
2542  *   _NOT_FOUND - no ancestor of specified type exists
2543  *   _SUCCESS - *app is held
2544  */
2545 static int
2546 rc_node_find_ancestor(rc_node_t *np, uint32_t type, rc_node_t **app)
2547 {
2548 	int ret;
2549 	rc_node_t *parent, *np_orig;
2550 
2551 	if (type >= REP_PROTOCOL_ENTITY_MAX)
2552 		return (REP_PROTOCOL_FAIL_INVALID_TYPE);
2553 
2554 	np_orig = np;
2555 
2556 	while (np->rn_id.rl_type > type) {
2557 		ret = rc_node_parent(np, &parent);
2558 		if (np != np_orig)
2559 			rc_node_rele(np);
2560 		if (ret != REP_PROTOCOL_SUCCESS)
2561 			return (ret);
2562 		np = parent;
2563 	}
2564 
2565 	if (np->rn_id.rl_type == type) {
2566 		*app = parent;
2567 		return (REP_PROTOCOL_SUCCESS);
2568 	}
2569 
2570 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
2571 }
2572 
2573 #ifndef NATIVE_BUILD
2574 /*
2575  * If the propname property exists in pg, and it is of type string, add its
2576  * values as authorizations to pcp.  pg must not be locked on entry, and it is
2577  * returned unlocked.  Returns
2578  *   _DELETED - pg was deleted
2579  *   _NO_RESOURCES
2580  *   _NOT_FOUND - pg has no property named propname
2581  *   _SUCCESS
2582  */
2583 static int
2584 perm_add_pg_prop_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2585 {
2586 	rc_node_t *prop;
2587 	int result;
2588 
2589 	uint_t count;
2590 	const char *cp;
2591 
2592 	assert(!MUTEX_HELD(&pg->rn_lock));
2593 	assert(pg->rn_id.rl_type == REP_PROTOCOL_ENTITY_PROPERTYGRP);
2594 
2595 	(void) pthread_mutex_lock(&pg->rn_lock);
2596 	result = rc_node_find_named_child(pg, propname,
2597 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
2598 	(void) pthread_mutex_unlock(&pg->rn_lock);
2599 	if (result != REP_PROTOCOL_SUCCESS) {
2600 		switch (result) {
2601 		case REP_PROTOCOL_FAIL_DELETED:
2602 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
2603 			return (result);
2604 
2605 		case REP_PROTOCOL_FAIL_INVALID_TYPE:
2606 		case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
2607 		default:
2608 			bad_error("rc_node_find_named_child", result);
2609 		}
2610 	}
2611 
2612 	if (prop == NULL)
2613 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
2614 
2615 	/* rn_valtype is immutable, so no locking. */
2616 	if (prop->rn_valtype != REP_PROTOCOL_TYPE_STRING) {
2617 		rc_node_rele(prop);
2618 		return (REP_PROTOCOL_SUCCESS);
2619 	}
2620 
2621 	(void) pthread_mutex_lock(&prop->rn_lock);
2622 	for (count = prop->rn_values_count, cp = prop->rn_values;
2623 	    count > 0;
2624 	    --count) {
2625 		result = perm_add_enabling_type(pcp, cp,
2626 		    (pg->rn_id.rl_ids[ID_INSTANCE]) ? PC_AUTH_INST :
2627 		    PC_AUTH_SVC);
2628 		if (result != REP_PROTOCOL_SUCCESS)
2629 			break;
2630 
2631 		cp = strchr(cp, '\0') + 1;
2632 	}
2633 
2634 	rc_node_rele_locked(prop);
2635 
2636 	return (result);
2637 }
2638 
2639 /*
2640  * Assuming that ent is a service or instance node, if the pgname property
2641  * group has type pgtype, and it has a propname property with string type, add
2642  * its values as authorizations to pcp.  If pgtype is NULL, it is not checked.
2643  * Returns
2644  *   _SUCCESS
2645  *   _DELETED - ent was deleted
2646  *   _NO_RESOURCES - no resources
2647  *   _NOT_FOUND - ent does not have pgname pg or propname property
2648  */
2649 static int
2650 perm_add_ent_prop_values(permcheck_t *pcp, rc_node_t *ent, const char *pgname,
2651     const char *pgtype, const char *propname)
2652 {
2653 	int r;
2654 	rc_node_t *pg;
2655 
2656 	assert(!MUTEX_HELD(&ent->rn_lock));
2657 
2658 	(void) pthread_mutex_lock(&ent->rn_lock);
2659 	r = rc_node_find_named_child(ent, pgname,
2660 	    REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
2661 	(void) pthread_mutex_unlock(&ent->rn_lock);
2662 
2663 	switch (r) {
2664 	case REP_PROTOCOL_SUCCESS:
2665 		break;
2666 
2667 	case REP_PROTOCOL_FAIL_DELETED:
2668 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
2669 		return (r);
2670 
2671 	default:
2672 		bad_error("rc_node_find_named_child", r);
2673 	}
2674 
2675 	if (pg == NULL)
2676 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
2677 
2678 	if (pgtype == NULL || strcmp(pg->rn_type, pgtype) == 0) {
2679 		r = perm_add_pg_prop_values(pcp, pg, propname);
2680 		switch (r) {
2681 		case REP_PROTOCOL_FAIL_DELETED:
2682 			r = REP_PROTOCOL_FAIL_NOT_FOUND;
2683 			break;
2684 
2685 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
2686 		case REP_PROTOCOL_SUCCESS:
2687 		case REP_PROTOCOL_FAIL_NOT_FOUND:
2688 			break;
2689 
2690 		default:
2691 			bad_error("perm_add_pg_prop_values", r);
2692 		}
2693 	}
2694 
2695 	rc_node_rele(pg);
2696 
2697 	return (r);
2698 }
2699 
2700 /*
2701  * If pg has a property named propname, and is string typed, add its values as
2702  * authorizations to pcp.  If pg has no such property, and its parent is an
2703  * instance, walk up to the service and try doing the same with the property
2704  * of the same name from the property group of the same name.  Returns
2705  *   _SUCCESS
2706  *   _NO_RESOURCES
2707  *   _DELETED - pg (or an ancestor) was deleted
2708  */
2709 static int
2710 perm_add_enabling_values(permcheck_t *pcp, rc_node_t *pg, const char *propname)
2711 {
2712 	int r;
2713 	char pgname[REP_PROTOCOL_NAME_LEN + 1];
2714 	rc_node_t *svc;
2715 	size_t sz;
2716 
2717 	r = perm_add_pg_prop_values(pcp, pg, propname);
2718 
2719 	if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2720 		return (r);
2721 
2722 	assert(!MUTEX_HELD(&pg->rn_lock));
2723 
2724 	if (pg->rn_id.rl_ids[ID_INSTANCE] == 0)
2725 		return (REP_PROTOCOL_SUCCESS);
2726 
2727 	sz = strlcpy(pgname, pg->rn_name, sizeof (pgname));
2728 	assert(sz < sizeof (pgname));
2729 
2730 	/*
2731 	 * If pg is a child of an instance or snapshot, we want to compose the
2732 	 * authorization property with the service's (if it exists).  The
2733 	 * snapshot case applies only to read_authorization.  In all other
2734 	 * cases, the pg's parent will be the instance.
2735 	 */
2736 	r = rc_node_find_ancestor(pg, REP_PROTOCOL_ENTITY_SERVICE, &svc);
2737 	if (r != REP_PROTOCOL_SUCCESS) {
2738 		assert(r == REP_PROTOCOL_FAIL_DELETED);
2739 		return (r);
2740 	}
2741 	assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
2742 
2743 	r = perm_add_ent_prop_values(pcp, svc, pgname, NULL, propname);
2744 
2745 	rc_node_rele(svc);
2746 
2747 	if (r == REP_PROTOCOL_FAIL_NOT_FOUND)
2748 		r = REP_PROTOCOL_SUCCESS;
2749 
2750 	return (r);
2751 }
2752 
2753 /*
2754  * Call perm_add_enabling_values() for the "action_authorization" property of
2755  * the "general" property group of inst.  Returns
2756  *   _DELETED - inst (or an ancestor) was deleted
2757  *   _NO_RESOURCES
2758  *   _SUCCESS
2759  */
2760 static int
2761 perm_add_inst_action_auth(permcheck_t *pcp, rc_node_t *inst)
2762 {
2763 	int r;
2764 	rc_node_t *svc;
2765 
2766 	assert(inst->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
2767 
2768 	r = perm_add_ent_prop_values(pcp, inst, AUTH_PG_GENERAL,
2769 	    AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2770 
2771 	if (r != REP_PROTOCOL_FAIL_NOT_FOUND)
2772 		return (r);
2773 
2774 	r = rc_node_parent(inst, &svc);
2775 	if (r != REP_PROTOCOL_SUCCESS) {
2776 		assert(r == REP_PROTOCOL_FAIL_DELETED);
2777 		return (r);
2778 	}
2779 
2780 	r = perm_add_ent_prop_values(pcp, svc, AUTH_PG_GENERAL,
2781 	    AUTH_PG_GENERAL_TYPE, AUTH_PROP_ACTION);
2782 
2783 	return (r == REP_PROTOCOL_FAIL_NOT_FOUND ? REP_PROTOCOL_SUCCESS : r);
2784 }
2785 #endif /* NATIVE_BUILD */
2786 
2787 void
2788 rc_node_ptr_init(rc_node_ptr_t *out)
2789 {
2790 	out->rnp_node = NULL;
2791 	out->rnp_auth_string = NULL;
2792 	out->rnp_authorized = RC_AUTH_UNKNOWN;
2793 	out->rnp_deleted = 0;
2794 }
2795 
2796 void
2797 rc_node_ptr_free_mem(rc_node_ptr_t *npp)
2798 {
2799 	if (npp->rnp_auth_string != NULL) {
2800 		free((void *)npp->rnp_auth_string);
2801 		npp->rnp_auth_string = NULL;
2802 	}
2803 }
2804 
2805 static void
2806 rc_node_assign(rc_node_ptr_t *out, rc_node_t *val)
2807 {
2808 	rc_node_t *cur = out->rnp_node;
2809 	if (val != NULL)
2810 		rc_node_hold(val);
2811 	out->rnp_node = val;
2812 	if (cur != NULL) {
2813 		NODE_LOCK(cur);
2814 
2815 		/*
2816 		 * Register the ephemeral reference created by reading
2817 		 * out->rnp_node into cur.  Note that the persistent
2818 		 * reference we're destroying is locked by the client
2819 		 * layer.
2820 		 */
2821 		rc_node_hold_ephemeral_locked(cur);
2822 
2823 		rc_node_rele_locked(cur);
2824 	}
2825 	out->rnp_authorized = RC_AUTH_UNKNOWN;
2826 	rc_node_ptr_free_mem(out);
2827 	out->rnp_deleted = 0;
2828 }
2829 
2830 void
2831 rc_node_clear(rc_node_ptr_t *out, int deleted)
2832 {
2833 	rc_node_assign(out, NULL);
2834 	out->rnp_deleted = deleted;
2835 }
2836 
2837 void
2838 rc_node_ptr_assign(rc_node_ptr_t *out, const rc_node_ptr_t *val)
2839 {
2840 	rc_node_assign(out, val->rnp_node);
2841 }
2842 
2843 /*
2844  * rc_node_check()/RC_NODE_CHECK()
2845  *	generic "entry" checks, run before the use of an rc_node pointer.
2846  *
2847  * Fails with
2848  *   _NOT_SET
2849  *   _DELETED
2850  */
2851 static int
2852 rc_node_check_and_lock(rc_node_t *np)
2853 {
2854 	int result = REP_PROTOCOL_SUCCESS;
2855 	if (np == NULL)
2856 		return (REP_PROTOCOL_FAIL_NOT_SET);
2857 
2858 	(void) pthread_mutex_lock(&np->rn_lock);
2859 	if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2860 		result = REP_PROTOCOL_FAIL_DELETED;
2861 		(void) pthread_mutex_unlock(&np->rn_lock);
2862 	}
2863 
2864 	return (result);
2865 }
2866 
2867 /*
2868  * Fails with
2869  *   _NOT_SET - ptr is reset
2870  *   _DELETED - node has been deleted
2871  */
2872 static rc_node_t *
2873 rc_node_ptr_check_and_lock(rc_node_ptr_t *npp, int *res)
2874 {
2875 	rc_node_t *np = npp->rnp_node;
2876 	if (np == NULL) {
2877 		if (npp->rnp_deleted)
2878 			*res = REP_PROTOCOL_FAIL_DELETED;
2879 		else
2880 			*res = REP_PROTOCOL_FAIL_NOT_SET;
2881 		return (NULL);
2882 	}
2883 
2884 	(void) pthread_mutex_lock(&np->rn_lock);
2885 	if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
2886 		(void) pthread_mutex_unlock(&np->rn_lock);
2887 		rc_node_clear(npp, 1);
2888 		*res = REP_PROTOCOL_FAIL_DELETED;
2889 		return (NULL);
2890 	}
2891 	return (np);
2892 }
2893 
2894 #define	RC_NODE_CHECK_AND_LOCK(n) {					\
2895 	int rc__res;							\
2896 	if ((rc__res = rc_node_check_and_lock(n)) != REP_PROTOCOL_SUCCESS) \
2897 		return (rc__res);					\
2898 }
2899 
2900 #define	RC_NODE_CHECK(n) {						\
2901 	RC_NODE_CHECK_AND_LOCK(n);					\
2902 	(void) pthread_mutex_unlock(&(n)->rn_lock);			\
2903 }
2904 
2905 #define	RC_NODE_CHECK_AND_HOLD(n) {					\
2906 	RC_NODE_CHECK_AND_LOCK(n);					\
2907 	rc_node_hold_locked(n);						\
2908 	(void) pthread_mutex_unlock(&(n)->rn_lock);			\
2909 }
2910 
2911 #define	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp) {			\
2912 	int rc__res;							\
2913 	if (((np) = rc_node_ptr_check_and_lock(npp, &rc__res)) == NULL)	\
2914 		return (rc__res);					\
2915 }
2916 
2917 #define	RC_NODE_PTR_GET_CHECK(np, npp) {				\
2918 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);			\
2919 	(void) pthread_mutex_unlock(&(np)->rn_lock);			\
2920 }
2921 
2922 #define	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp) {			\
2923 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);			\
2924 	rc_node_hold_locked(np);					\
2925 	(void) pthread_mutex_unlock(&(np)->rn_lock);			\
2926 }
2927 
2928 #define	HOLD_FLAG_OR_RETURN(np, flag) {					\
2929 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2930 	assert(!((np)->rn_flags & RC_NODE_DEAD));			\
2931 	if (!rc_node_hold_flag((np), flag)) {				\
2932 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2933 		return (REP_PROTOCOL_FAIL_DELETED);			\
2934 	}								\
2935 }
2936 
2937 #define	HOLD_PTR_FLAG_OR_RETURN(np, npp, flag) {			\
2938 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2939 	assert(!((np)->rn_flags & RC_NODE_DEAD));			\
2940 	if (!rc_node_hold_flag((np), flag)) {				\
2941 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2942 		assert((np) == (npp)->rnp_node);			\
2943 		rc_node_clear(npp, 1);					\
2944 		return (REP_PROTOCOL_FAIL_DELETED);			\
2945 	}								\
2946 }
2947 
2948 #define	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, flag, mem) {		\
2949 	assert(MUTEX_HELD(&(np)->rn_lock));				\
2950 	assert(!((np)->rn_flags & RC_NODE_DEAD));			\
2951 	if (!rc_node_hold_flag((np), flag)) {				\
2952 		(void) pthread_mutex_unlock(&(np)->rn_lock);		\
2953 		assert((np) == (npp)->rnp_node);			\
2954 		rc_node_clear(npp, 1);					\
2955 		if ((mem) != NULL)					\
2956 			free((mem));					\
2957 		return (REP_PROTOCOL_FAIL_DELETED);			\
2958 	}								\
2959 }
2960 
2961 int
2962 rc_local_scope(uint32_t type, rc_node_ptr_t *out)
2963 {
2964 	if (type != REP_PROTOCOL_ENTITY_SCOPE) {
2965 		rc_node_clear(out, 0);
2966 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2967 	}
2968 
2969 	/*
2970 	 * the main scope never gets destroyed
2971 	 */
2972 	rc_node_assign(out, rc_scope);
2973 
2974 	return (REP_PROTOCOL_SUCCESS);
2975 }
2976 
2977 /*
2978  * Fails with
2979  *   _NOT_SET - npp is not set
2980  *   _DELETED - the node npp pointed at has been deleted
2981  *   _TYPE_MISMATCH - type is not _SCOPE
2982  *   _NOT_FOUND - scope has no parent
2983  */
2984 static int
2985 rc_scope_parent_scope(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
2986 {
2987 	rc_node_t *np;
2988 
2989 	rc_node_clear(out, 0);
2990 
2991 	RC_NODE_PTR_GET_CHECK(np, npp);
2992 
2993 	if (type != REP_PROTOCOL_ENTITY_SCOPE)
2994 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
2995 
2996 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
2997 }
2998 
2999 static int rc_node_pg_check_read_protect(rc_node_t *);
3000 
3001 /*
3002  * Fails with
3003  *   _NOT_SET
3004  *   _DELETED
3005  *   _NOT_APPLICABLE
3006  *   _NOT_FOUND
3007  *   _BAD_REQUEST
3008  *   _TRUNCATED
3009  *   _NO_RESOURCES
3010  */
3011 int
3012 rc_node_name(rc_node_ptr_t *npp, char *buf, size_t sz, uint32_t answertype,
3013     size_t *sz_out)
3014 {
3015 	size_t actual;
3016 	rc_node_t *np;
3017 
3018 	assert(sz == *sz_out);
3019 
3020 	RC_NODE_PTR_GET_CHECK(np, npp);
3021 
3022 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3023 		np = np->rn_cchain[0];
3024 		RC_NODE_CHECK(np);
3025 	}
3026 
3027 	switch (answertype) {
3028 	case RP_ENTITY_NAME_NAME:
3029 		if (np->rn_name == NULL)
3030 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3031 		actual = strlcpy(buf, np->rn_name, sz);
3032 		break;
3033 	case RP_ENTITY_NAME_PGTYPE:
3034 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3035 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3036 		actual = strlcpy(buf, np->rn_type, sz);
3037 		break;
3038 	case RP_ENTITY_NAME_PGFLAGS:
3039 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3040 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3041 		actual = snprintf(buf, sz, "%d", np->rn_pgflags);
3042 		break;
3043 	case RP_ENTITY_NAME_SNAPLEVEL_SCOPE:
3044 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3045 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3046 		actual = strlcpy(buf, np->rn_snaplevel->rsl_scope, sz);
3047 		break;
3048 	case RP_ENTITY_NAME_SNAPLEVEL_SERVICE:
3049 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3050 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3051 		actual = strlcpy(buf, np->rn_snaplevel->rsl_service, sz);
3052 		break;
3053 	case RP_ENTITY_NAME_SNAPLEVEL_INSTANCE:
3054 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
3055 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3056 		if (np->rn_snaplevel->rsl_instance == NULL)
3057 			return (REP_PROTOCOL_FAIL_NOT_FOUND);
3058 		actual = strlcpy(buf, np->rn_snaplevel->rsl_instance, sz);
3059 		break;
3060 	case RP_ENTITY_NAME_PGREADPROT:
3061 	{
3062 		int ret;
3063 
3064 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
3065 			return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3066 		ret = rc_node_pg_check_read_protect(np);
3067 		assert(ret != REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3068 		switch (ret) {
3069 		case REP_PROTOCOL_FAIL_PERMISSION_DENIED:
3070 			actual = snprintf(buf, sz, "1");
3071 			break;
3072 		case REP_PROTOCOL_SUCCESS:
3073 			actual = snprintf(buf, sz, "0");
3074 			break;
3075 		default:
3076 			return (ret);
3077 		}
3078 		break;
3079 	}
3080 	default:
3081 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3082 	}
3083 	if (actual >= sz)
3084 		return (REP_PROTOCOL_FAIL_TRUNCATED);
3085 
3086 	*sz_out = actual;
3087 	return (REP_PROTOCOL_SUCCESS);
3088 }
3089 
3090 int
3091 rc_node_get_property_type(rc_node_ptr_t *npp, rep_protocol_value_type_t *out)
3092 {
3093 	rc_node_t *np;
3094 
3095 	RC_NODE_PTR_GET_CHECK(np, npp);
3096 
3097 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
3098 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3099 
3100 	*out = np->rn_valtype;
3101 
3102 	return (REP_PROTOCOL_SUCCESS);
3103 }
3104 
3105 /*
3106  * Get np's parent.  If np is deleted, returns _DELETED.  Otherwise puts a hold
3107  * on the parent, returns a pointer to it in *out, and returns _SUCCESS.
3108  */
3109 static int
3110 rc_node_parent(rc_node_t *np, rc_node_t **out)
3111 {
3112 	rc_node_t *pnp;
3113 	rc_node_t *np_orig;
3114 
3115 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3116 		RC_NODE_CHECK_AND_LOCK(np);
3117 	} else {
3118 		np = np->rn_cchain[0];
3119 		RC_NODE_CHECK_AND_LOCK(np);
3120 	}
3121 
3122 	np_orig = np;
3123 	rc_node_hold_locked(np);		/* simplifies the remainder */
3124 
3125 	for (;;) {
3126 		if (!rc_node_wait_flag(np,
3127 		    RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
3128 			rc_node_rele_locked(np);
3129 			return (REP_PROTOCOL_FAIL_DELETED);
3130 		}
3131 
3132 		if (!(np->rn_flags & RC_NODE_OLD))
3133 			break;
3134 
3135 		rc_node_rele_locked(np);
3136 		np = cache_lookup(&np_orig->rn_id);
3137 		assert(np != np_orig);
3138 
3139 		if (np == NULL)
3140 			goto deleted;
3141 		(void) pthread_mutex_lock(&np->rn_lock);
3142 	}
3143 
3144 	/* guaranteed to succeed without dropping the lock */
3145 	if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
3146 		(void) pthread_mutex_unlock(&np->rn_lock);
3147 		*out = NULL;
3148 		rc_node_rele(np);
3149 		return (REP_PROTOCOL_FAIL_DELETED);
3150 	}
3151 
3152 	assert(np->rn_parent != NULL);
3153 	pnp = np->rn_parent;
3154 	(void) pthread_mutex_unlock(&np->rn_lock);
3155 
3156 	(void) pthread_mutex_lock(&pnp->rn_lock);
3157 	(void) pthread_mutex_lock(&np->rn_lock);
3158 	rc_node_rele_flag(np, RC_NODE_USING_PARENT);
3159 	(void) pthread_mutex_unlock(&np->rn_lock);
3160 
3161 	rc_node_hold_locked(pnp);
3162 
3163 	(void) pthread_mutex_unlock(&pnp->rn_lock);
3164 
3165 	rc_node_rele(np);
3166 	*out = pnp;
3167 	return (REP_PROTOCOL_SUCCESS);
3168 
3169 deleted:
3170 	rc_node_rele(np);
3171 	return (REP_PROTOCOL_FAIL_DELETED);
3172 }
3173 
3174 /*
3175  * Fails with
3176  *   _NOT_SET
3177  *   _DELETED
3178  */
3179 static int
3180 rc_node_ptr_parent(rc_node_ptr_t *npp, rc_node_t **out)
3181 {
3182 	rc_node_t *np;
3183 
3184 	RC_NODE_PTR_GET_CHECK(np, npp);
3185 
3186 	return (rc_node_parent(np, out));
3187 }
3188 
3189 /*
3190  * Fails with
3191  *   _NOT_SET - npp is not set
3192  *   _DELETED - the node npp pointed at has been deleted
3193  *   _TYPE_MISMATCH - npp's node's parent is not of type type
3194  *
3195  * If npp points to a scope, can also fail with
3196  *   _NOT_FOUND - scope has no parent
3197  */
3198 int
3199 rc_node_get_parent(rc_node_ptr_t *npp, uint32_t type, rc_node_ptr_t *out)
3200 {
3201 	rc_node_t *pnp;
3202 	int rc;
3203 
3204 	if (npp->rnp_node != NULL &&
3205 	    npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE)
3206 		return (rc_scope_parent_scope(npp, type, out));
3207 
3208 	if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS) {
3209 		rc_node_clear(out, 0);
3210 		return (rc);
3211 	}
3212 
3213 	if (type != pnp->rn_id.rl_type) {
3214 		rc_node_rele(pnp);
3215 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
3216 	}
3217 
3218 	rc_node_assign(out, pnp);
3219 	rc_node_rele(pnp);
3220 
3221 	return (REP_PROTOCOL_SUCCESS);
3222 }
3223 
3224 int
3225 rc_node_parent_type(rc_node_ptr_t *npp, uint32_t *type_out)
3226 {
3227 	rc_node_t *pnp;
3228 	int rc;
3229 
3230 	if (npp->rnp_node != NULL &&
3231 	    npp->rnp_node->rn_id.rl_type == REP_PROTOCOL_ENTITY_SCOPE) {
3232 		*type_out = REP_PROTOCOL_ENTITY_SCOPE;
3233 		return (REP_PROTOCOL_SUCCESS);
3234 	}
3235 
3236 	if ((rc = rc_node_ptr_parent(npp, &pnp)) != REP_PROTOCOL_SUCCESS)
3237 		return (rc);
3238 
3239 	*type_out = pnp->rn_id.rl_type;
3240 
3241 	rc_node_rele(pnp);
3242 
3243 	return (REP_PROTOCOL_SUCCESS);
3244 }
3245 
3246 /*
3247  * Fails with
3248  *   _INVALID_TYPE - type is invalid
3249  *   _TYPE_MISMATCH - np doesn't carry children of type type
3250  *   _DELETED - np has been deleted
3251  *   _NOT_FOUND - no child with that name/type combo found
3252  *   _NO_RESOURCES
3253  *   _BACKEND_ACCESS
3254  */
3255 int
3256 rc_node_get_child(rc_node_ptr_t *npp, const char *name, uint32_t type,
3257     rc_node_ptr_t *outp)
3258 {
3259 	rc_node_t *np, *cp;
3260 	rc_node_t *child = NULL;
3261 	int ret, idx;
3262 
3263 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3264 	if ((ret = rc_check_type_name(type, name)) == REP_PROTOCOL_SUCCESS) {
3265 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3266 			ret = rc_node_find_named_child(np, name, type, &child);
3267 		} else {
3268 			(void) pthread_mutex_unlock(&np->rn_lock);
3269 			ret = REP_PROTOCOL_SUCCESS;
3270 			for (idx = 0; idx < COMPOSITION_DEPTH; idx++) {
3271 				cp = np->rn_cchain[idx];
3272 				if (cp == NULL)
3273 					break;
3274 				RC_NODE_CHECK_AND_LOCK(cp);
3275 				ret = rc_node_find_named_child(cp, name, type,
3276 				    &child);
3277 				(void) pthread_mutex_unlock(&cp->rn_lock);
3278 				/*
3279 				 * loop only if we succeeded, but no child of
3280 				 * the correct name was found.
3281 				 */
3282 				if (ret != REP_PROTOCOL_SUCCESS ||
3283 				    child != NULL)
3284 					break;
3285 			}
3286 			(void) pthread_mutex_lock(&np->rn_lock);
3287 		}
3288 	}
3289 	(void) pthread_mutex_unlock(&np->rn_lock);
3290 
3291 	if (ret == REP_PROTOCOL_SUCCESS) {
3292 		rc_node_assign(outp, child);
3293 		if (child != NULL)
3294 			rc_node_rele(child);
3295 		else
3296 			ret = REP_PROTOCOL_FAIL_NOT_FOUND;
3297 	} else {
3298 		rc_node_assign(outp, NULL);
3299 	}
3300 	return (ret);
3301 }
3302 
3303 int
3304 rc_node_update(rc_node_ptr_t *npp)
3305 {
3306 	cache_bucket_t *bp;
3307 	rc_node_t *np = npp->rnp_node;
3308 	rc_node_t *nnp;
3309 	rc_node_t *cpg = NULL;
3310 
3311 	if (np != NULL &&
3312 	    np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3313 		/*
3314 		 * If we're updating a composed property group, actually
3315 		 * update the top-level property group & return the
3316 		 * appropriate value.  But leave *nnp pointing at us.
3317 		 */
3318 		cpg = np;
3319 		np = np->rn_cchain[0];
3320 	}
3321 
3322 	RC_NODE_CHECK(np);
3323 
3324 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP &&
3325 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT)
3326 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
3327 
3328 	for (;;) {
3329 		bp = cache_hold(np->rn_hash);
3330 		nnp = cache_lookup_unlocked(bp, &np->rn_id);
3331 		if (nnp == NULL) {
3332 			cache_release(bp);
3333 			rc_node_clear(npp, 1);
3334 			return (REP_PROTOCOL_FAIL_DELETED);
3335 		}
3336 		/*
3337 		 * grab the lock before dropping the cache bucket, so
3338 		 * that no one else can sneak in
3339 		 */
3340 		(void) pthread_mutex_lock(&nnp->rn_lock);
3341 		cache_release(bp);
3342 
3343 		if (!(nnp->rn_flags & RC_NODE_IN_TX) ||
3344 		    !rc_node_wait_flag(nnp, RC_NODE_IN_TX))
3345 			break;
3346 
3347 		rc_node_rele_locked(nnp);
3348 	}
3349 
3350 	/*
3351 	 * If it is dead, we want to update it so that it will continue to
3352 	 * report being dead.
3353 	 */
3354 	if (nnp->rn_flags & RC_NODE_DEAD) {
3355 		(void) pthread_mutex_unlock(&nnp->rn_lock);
3356 		if (nnp != np && cpg == NULL)
3357 			rc_node_assign(npp, nnp);	/* updated */
3358 		rc_node_rele(nnp);
3359 		return (REP_PROTOCOL_FAIL_DELETED);
3360 	}
3361 
3362 	assert(!(nnp->rn_flags & RC_NODE_OLD));
3363 	(void) pthread_mutex_unlock(&nnp->rn_lock);
3364 
3365 	if (nnp != np && cpg == NULL)
3366 		rc_node_assign(npp, nnp);		/* updated */
3367 
3368 	rc_node_rele(nnp);
3369 
3370 	return ((nnp == np)? REP_PROTOCOL_SUCCESS : REP_PROTOCOL_DONE);
3371 }
3372 
3373 /*
3374  * does a generic modification check, for creation, deletion, and snapshot
3375  * management only.  Property group transactions have different checks.
3376  *
3377  * The string returned to *match_auth must be freed.
3378  */
3379 int
3380 rc_node_modify_permission_check(char **match_auth)
3381 {
3382 	int rc = REP_PROTOCOL_SUCCESS;
3383 	permcheck_t *pcp;
3384 	int granted;
3385 
3386 	*match_auth = NULL;
3387 #ifdef NATIVE_BUILD
3388 	if (!client_is_privileged()) {
3389 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
3390 	}
3391 	return (rc);
3392 #else
3393 	if (is_main_repository == 0)
3394 		return (REP_PROTOCOL_SUCCESS);
3395 	pcp = pc_create();
3396 	if (pcp != NULL) {
3397 		rc = perm_add_enabling(pcp, AUTH_MODIFY);
3398 
3399 		if (rc == REP_PROTOCOL_SUCCESS) {
3400 			granted = perm_granted(pcp);
3401 
3402 			if (granted < 0) {
3403 				rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
3404 			} else {
3405 				/*
3406 				 * Copy off the authorization
3407 				 * string before freeing pcp.
3408 				 */
3409 				*match_auth =
3410 				    strdup(pcp->pc_auth_string);
3411 				if (*match_auth == NULL)
3412 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
3413 			}
3414 		}
3415 
3416 		pc_free(pcp);
3417 	} else {
3418 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
3419 	}
3420 
3421 	if (rc == REP_PROTOCOL_SUCCESS && !granted)
3422 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
3423 
3424 	return (rc);
3425 #endif /* NATIVE_BUILD */
3426 }
3427 
3428 /*
3429  * Native builds are done to create svc.configd-native.  This program runs
3430  * only on the Solaris build machines to create the seed repository, and it
3431  * is compiled against the build machine's header files.  The ADT_smf_*
3432  * symbols may not be defined in these header files.  For this reason
3433  * smf_annotation_event(), _smf_audit_event() and special_property_event()
3434  * are not compiled for native builds.
3435  */
3436 #ifndef	NATIVE_BUILD
3437 
3438 /*
3439  * This function generates an annotation audit event if one has been setup.
3440  * Annotation events should only be generated immediately before the audit
3441  * record from the first attempt to modify the repository from a client
3442  * which has requested an annotation.
3443  */
3444 static void
3445 smf_annotation_event(int status, int return_val)
3446 {
3447 	adt_session_data_t *session;
3448 	adt_event_data_t *event = NULL;
3449 	char file[MAXPATHLEN];
3450 	char operation[REP_PROTOCOL_NAME_LEN];
3451 
3452 	/* Don't audit if we're using an alternate repository. */
3453 	if (is_main_repository == 0)
3454 		return;
3455 
3456 	if (client_annotation_needed(operation, sizeof (operation), file,
3457 	    sizeof (file)) == 0) {
3458 		return;
3459 	}
3460 	if (file[0] == 0) {
3461 		(void) strlcpy(file, "NO FILE", sizeof (file));
3462 	}
3463 	if (operation[0] == 0) {
3464 		(void) strlcpy(operation, "NO OPERATION",
3465 		    sizeof (operation));
3466 	}
3467 	if ((session = get_audit_session()) == NULL)
3468 		return;
3469 	if ((event = adt_alloc_event(session, ADT_smf_annotation)) == NULL) {
3470 		uu_warn("smf_annotation_event cannot allocate event "
3471 		    "data.  %s\n", strerror(errno));
3472 		return;
3473 	}
3474 	event->adt_smf_annotation.operation = operation;
3475 	event->adt_smf_annotation.file = file;
3476 	if (adt_put_event(event, status, return_val) == 0) {
3477 		client_annotation_finished();
3478 	} else {
3479 		uu_warn("smf_annotation_event failed to put event.  "
3480 		    "%s\n", strerror(errno));
3481 	}
3482 	adt_free_event(event);
3483 }
3484 
3485 /*
3486  * _smf_audit_event interacts with the security auditing system to generate
3487  * an audit event structure.  It establishes an audit session and allocates
3488  * an audit event.  The event is filled in from the audit data, and
3489  * adt_put_event is called to generate the event.
3490  */
3491 static void
3492 _smf_audit_event(au_event_t event_id, int status, int return_val,
3493     audit_event_data_t *data)
3494 {
3495 	char *auth_used;
3496 	char *fmri;
3497 	char *prop_value;
3498 	adt_session_data_t *session;
3499 	adt_event_data_t *event = NULL;
3500 
3501 	/* Don't audit if we're using an alternate repository */
3502 	if (is_main_repository == 0)
3503 		return;
3504 
3505 	smf_annotation_event(status, return_val);
3506 	if ((session = get_audit_session()) == NULL)
3507 		return;
3508 	if ((event = adt_alloc_event(session, event_id)) == NULL) {
3509 		uu_warn("_smf_audit_event cannot allocate event "
3510 		    "data.  %s\n", strerror(errno));
3511 		return;
3512 	}
3513 
3514 	/*
3515 	 * Handle possibility of NULL authorization strings, FMRIs and
3516 	 * property values.
3517 	 */
3518 	if (data->ed_auth == NULL) {
3519 		auth_used = "PRIVILEGED";
3520 	} else {
3521 		auth_used = data->ed_auth;
3522 	}
3523 	if (data->ed_fmri == NULL) {
3524 		syslog(LOG_WARNING, "_smf_audit_event called with "
3525 		    "empty FMRI string");
3526 		fmri = "UNKNOWN FMRI";
3527 	} else {
3528 		fmri = data->ed_fmri;
3529 	}
3530 	if (data->ed_prop_value == NULL) {
3531 		prop_value = "";
3532 	} else {
3533 		prop_value = data->ed_prop_value;
3534 	}
3535 
3536 	/* Fill in the event data. */
3537 	switch (event_id) {
3538 	case ADT_smf_attach_snap:
3539 		event->adt_smf_attach_snap.auth_used = auth_used;
3540 		event->adt_smf_attach_snap.old_fmri = data->ed_old_fmri;
3541 		event->adt_smf_attach_snap.old_name = data->ed_old_name;
3542 		event->adt_smf_attach_snap.new_fmri = fmri;
3543 		event->adt_smf_attach_snap.new_name = data->ed_snapname;
3544 		break;
3545 	case ADT_smf_change_prop:
3546 		event->adt_smf_change_prop.auth_used = auth_used;
3547 		event->adt_smf_change_prop.fmri = fmri;
3548 		event->adt_smf_change_prop.type = data->ed_type;
3549 		event->adt_smf_change_prop.value = prop_value;
3550 		break;
3551 	case ADT_smf_clear:
3552 		event->adt_smf_clear.auth_used = auth_used;
3553 		event->adt_smf_clear.fmri = fmri;
3554 		break;
3555 	case ADT_smf_create:
3556 		event->adt_smf_create.fmri = fmri;
3557 		event->adt_smf_create.auth_used = auth_used;
3558 		break;
3559 	case ADT_smf_create_npg:
3560 		event->adt_smf_create_npg.auth_used = auth_used;
3561 		event->adt_smf_create_npg.fmri = fmri;
3562 		event->adt_smf_create_npg.type = data->ed_type;
3563 		break;
3564 	case ADT_smf_create_pg:
3565 		event->adt_smf_create_pg.auth_used = auth_used;
3566 		event->adt_smf_create_pg.fmri = fmri;
3567 		event->adt_smf_create_pg.type = data->ed_type;
3568 		break;
3569 	case ADT_smf_create_prop:
3570 		event->adt_smf_create_prop.auth_used = auth_used;
3571 		event->adt_smf_create_prop.fmri = fmri;
3572 		event->adt_smf_create_prop.type = data->ed_type;
3573 		event->adt_smf_create_prop.value = prop_value;
3574 		break;
3575 	case ADT_smf_create_snap:
3576 		event->adt_smf_create_snap.auth_used = auth_used;
3577 		event->adt_smf_create_snap.fmri = fmri;
3578 		event->adt_smf_create_snap.name = data->ed_snapname;
3579 		break;
3580 	case ADT_smf_degrade:
3581 		event->adt_smf_degrade.auth_used = auth_used;
3582 		event->adt_smf_degrade.fmri = fmri;
3583 		break;
3584 	case ADT_smf_delete:
3585 		event->adt_smf_delete.fmri = fmri;
3586 		event->adt_smf_delete.auth_used = auth_used;
3587 		break;
3588 	case ADT_smf_delete_npg:
3589 		event->adt_smf_delete_npg.auth_used = auth_used;
3590 		event->adt_smf_delete_npg.fmri = fmri;
3591 		event->adt_smf_delete_npg.type = data->ed_type;
3592 		break;
3593 	case ADT_smf_delete_pg:
3594 		event->adt_smf_delete_pg.auth_used = auth_used;
3595 		event->adt_smf_delete_pg.fmri = fmri;
3596 		event->adt_smf_delete_pg.type = data->ed_type;
3597 		break;
3598 	case ADT_smf_delete_prop:
3599 		event->adt_smf_delete_prop.auth_used = auth_used;
3600 		event->adt_smf_delete_prop.fmri = fmri;
3601 		break;
3602 	case ADT_smf_delete_snap:
3603 		event->adt_smf_delete_snap.auth_used = auth_used;
3604 		event->adt_smf_delete_snap.fmri = fmri;
3605 		event->adt_smf_delete_snap.name = data->ed_snapname;
3606 		break;
3607 	case ADT_smf_disable:
3608 		event->adt_smf_disable.auth_used = auth_used;
3609 		event->adt_smf_disable.fmri = fmri;
3610 		break;
3611 	case ADT_smf_enable:
3612 		event->adt_smf_enable.auth_used = auth_used;
3613 		event->adt_smf_enable.fmri = fmri;
3614 		break;
3615 	case ADT_smf_immediate_degrade:
3616 		event->adt_smf_immediate_degrade.auth_used = auth_used;
3617 		event->adt_smf_immediate_degrade.fmri = fmri;
3618 		break;
3619 	case ADT_smf_immediate_maintenance:
3620 		event->adt_smf_immediate_maintenance.auth_used = auth_used;
3621 		event->adt_smf_immediate_maintenance.fmri = fmri;
3622 		break;
3623 	case ADT_smf_immtmp_maintenance:
3624 		event->adt_smf_immtmp_maintenance.auth_used = auth_used;
3625 		event->adt_smf_immtmp_maintenance.fmri = fmri;
3626 		break;
3627 	case ADT_smf_maintenance:
3628 		event->adt_smf_maintenance.auth_used = auth_used;
3629 		event->adt_smf_maintenance.fmri = fmri;
3630 		break;
3631 	case ADT_smf_milestone:
3632 		event->adt_smf_milestone.auth_used = auth_used;
3633 		event->adt_smf_milestone.fmri = fmri;
3634 		break;
3635 	case ADT_smf_read_prop:
3636 		event->adt_smf_read_prop.auth_used = auth_used;
3637 		event->adt_smf_read_prop.fmri = fmri;
3638 		break;
3639 	case ADT_smf_refresh:
3640 		event->adt_smf_refresh.auth_used = auth_used;
3641 		event->adt_smf_refresh.fmri = fmri;
3642 		break;
3643 	case ADT_smf_restart:
3644 		event->adt_smf_restart.auth_used = auth_used;
3645 		event->adt_smf_restart.fmri = fmri;
3646 		break;
3647 	case ADT_smf_tmp_disable:
3648 		event->adt_smf_tmp_disable.auth_used = auth_used;
3649 		event->adt_smf_tmp_disable.fmri = fmri;
3650 		break;
3651 	case ADT_smf_tmp_enable:
3652 		event->adt_smf_tmp_enable.auth_used = auth_used;
3653 		event->adt_smf_tmp_enable.fmri = fmri;
3654 		break;
3655 	case ADT_smf_tmp_maintenance:
3656 		event->adt_smf_tmp_maintenance.auth_used = auth_used;
3657 		event->adt_smf_tmp_maintenance.fmri = fmri;
3658 		break;
3659 	default:
3660 		abort();	/* Need to cover all SMF event IDs */
3661 	}
3662 
3663 	if (adt_put_event(event, status, return_val) != 0) {
3664 		uu_warn("_smf_audit_event failed to put event.  %s\n",
3665 		    strerror(errno));
3666 	}
3667 	adt_free_event(event);
3668 }
3669 
3670 /*
3671  * Determine if the combination of the property group at pg_name and the
3672  * property at prop_name are in the set of special startd properties.  If
3673  * they are, a special audit event will be generated.
3674  */
3675 static void
3676 special_property_event(audit_event_data_t *evdp, const char *prop_name,
3677     char *pg_name, int status, int return_val, tx_commit_data_t *tx_data,
3678     size_t cmd_no)
3679 {
3680 	au_event_t event_id;
3681 	audit_special_prop_item_t search_key;
3682 	audit_special_prop_item_t *found;
3683 
3684 	/* Use bsearch to find the special property information. */
3685 	search_key.api_prop_name = prop_name;
3686 	search_key.api_pg_name = pg_name;
3687 	found = (audit_special_prop_item_t *)bsearch(&search_key,
3688 	    special_props_list, SPECIAL_PROP_COUNT,
3689 	    sizeof (special_props_list[0]), special_prop_compare);
3690 	if (found == NULL) {
3691 		/* Not a special property. */
3692 		return;
3693 	}
3694 
3695 	/* Get the event id */
3696 	if (found->api_event_func == NULL) {
3697 		event_id = found->api_event_id;
3698 	} else {
3699 		if ((*found->api_event_func)(tx_data, cmd_no,
3700 		    found->api_pg_name, &event_id) < 0)
3701 			return;
3702 	}
3703 
3704 	/* Generate the event. */
3705 	smf_audit_event(event_id, status, return_val, evdp);
3706 }
3707 #endif	/* NATIVE_BUILD */
3708 
3709 /*
3710  * Return a pointer to a string containing all the values of the command
3711  * specified by cmd_no with each value enclosed in quotes.  It is up to the
3712  * caller to free the memory at the returned pointer.
3713  */
3714 static char *
3715 generate_value_list(tx_commit_data_t *tx_data, size_t cmd_no)
3716 {
3717 	const char *cp;
3718 	const char *cur_value;
3719 	size_t byte_count = 0;
3720 	uint32_t i;
3721 	uint32_t nvalues;
3722 	size_t str_size = 0;
3723 	char *values = NULL;
3724 	char *vp;
3725 
3726 	if (tx_cmd_nvalues(tx_data, cmd_no, &nvalues) != REP_PROTOCOL_SUCCESS)
3727 		return (NULL);
3728 	/*
3729 	 * First determine the size of the buffer that we will need.  We
3730 	 * will represent each property value surrounded by quotes with a
3731 	 * space separating the values.  Thus, we need to find the total
3732 	 * size of all the value strings and add 3 for each value.
3733 	 *
3734 	 * There is one catch, though.  We need to escape any internal
3735 	 * quote marks in the values.  So for each quote in the value we
3736 	 * need to add another byte to the buffer size.
3737 	 */
3738 	for (i = 0; i < nvalues; i++) {
3739 		if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3740 		    REP_PROTOCOL_SUCCESS)
3741 			return (NULL);
3742 		for (cp = cur_value; *cp != 0; cp++) {
3743 			byte_count += (*cp == '"') ? 2 : 1;
3744 		}
3745 		byte_count += 3;	/* surrounding quotes & space */
3746 	}
3747 	byte_count++;		/* nul terminator */
3748 	values = malloc(byte_count);
3749 	if (values == NULL)
3750 		return (NULL);
3751 	*values = 0;
3752 
3753 	/* Now build up the string of values. */
3754 	for (i = 0; i < nvalues; i++) {
3755 		if (tx_cmd_value(tx_data, cmd_no, i, &cur_value) !=
3756 		    REP_PROTOCOL_SUCCESS) {
3757 			free(values);
3758 			return (NULL);
3759 		}
3760 		(void) strlcat(values, "\"", byte_count);
3761 		for (cp = cur_value, vp = values + strlen(values);
3762 		    *cp != 0; cp++) {
3763 			if (*cp == '"') {
3764 				*vp++ = '\\';
3765 				*vp++ = '"';
3766 			} else {
3767 				*vp++ = *cp;
3768 			}
3769 		}
3770 		*vp = 0;
3771 		str_size = strlcat(values, "\" ", byte_count);
3772 		assert(str_size < byte_count);
3773 	}
3774 	if (str_size > 0)
3775 		values[str_size - 1] = 0;	/* get rid of trailing space */
3776 	return (values);
3777 }
3778 
3779 /*
3780  * generate_property_events takes the transaction commit data at tx_data
3781  * and generates an audit event for each command.
3782  *
3783  * Native builds are done to create svc.configd-native.  This program runs
3784  * only on the Solaris build machines to create the seed repository.  Thus,
3785  * no audit events should be generated when running svc.configd-native.
3786  */
3787 static void
3788 generate_property_events(
3789 	tx_commit_data_t *tx_data,
3790 	char *pg_fmri,		/* FMRI of property group */
3791 	char *auth_string,
3792 	int auth_status,
3793 	int auth_ret_value)
3794 {
3795 #ifndef	NATIVE_BUILD
3796 	enum rep_protocol_transaction_action action;
3797 	audit_event_data_t audit_data;
3798 	size_t count;
3799 	size_t cmd_no;
3800 	char *cp;
3801 	au_event_t event_id;
3802 	char fmri[REP_PROTOCOL_FMRI_LEN];
3803 	char pg_name[REP_PROTOCOL_NAME_LEN];
3804 	char *pg_end;		/* End of prop. group fmri */
3805 	const char *prop_name;
3806 	uint32_t ptype;
3807 	char prop_type[3];
3808 	enum rep_protocol_responseid rc;
3809 	size_t sz_out;
3810 
3811 	/* Make sure we have something to do. */
3812 	if (tx_data == NULL)
3813 		return;
3814 	if ((count = tx_cmd_count(tx_data)) == 0)
3815 		return;
3816 
3817 	/* Copy the property group fmri */
3818 	pg_end = fmri;
3819 	pg_end += strlcpy(fmri, pg_fmri, sizeof (fmri));
3820 
3821 	/*
3822 	 * Get the property group name.  It is the first component after
3823 	 * the last occurance of SCF_FMRI_PROPERTYGRP_PREFIX in the fmri.
3824 	 */
3825 	cp = strstr(pg_fmri, SCF_FMRI_PROPERTYGRP_PREFIX);
3826 	if (cp == NULL) {
3827 		pg_name[0] = 0;
3828 	} else {
3829 		cp += strlen(SCF_FMRI_PROPERTYGRP_PREFIX);
3830 		(void) strlcpy(pg_name, cp, sizeof (pg_name));
3831 	}
3832 
3833 	audit_data.ed_auth = auth_string;
3834 	audit_data.ed_fmri = fmri;
3835 	audit_data.ed_type = prop_type;
3836 
3837 	/*
3838 	 * Property type is two characters (see
3839 	 * rep_protocol_value_type_t), so terminate the string.
3840 	 */
3841 	prop_type[2] = 0;
3842 
3843 	for (cmd_no = 0; cmd_no < count; cmd_no++) {
3844 		/* Construct FMRI of the property */
3845 		*pg_end = 0;
3846 		if (tx_cmd_prop(tx_data, cmd_no, &prop_name) !=
3847 		    REP_PROTOCOL_SUCCESS) {
3848 			continue;
3849 		}
3850 		rc = rc_concat_fmri_element(fmri, sizeof (fmri), &sz_out,
3851 		    prop_name, REP_PROTOCOL_ENTITY_PROPERTY);
3852 		if (rc != REP_PROTOCOL_SUCCESS) {
3853 			/*
3854 			 * If we can't get the FMRI, we'll abandon this
3855 			 * command
3856 			 */
3857 			continue;
3858 		}
3859 
3860 		/* Generate special property event if necessary. */
3861 		special_property_event(&audit_data, prop_name, pg_name,
3862 		    auth_status, auth_ret_value, tx_data, cmd_no);
3863 
3864 		/* Capture rest of audit data. */
3865 		if (tx_cmd_prop_type(tx_data, cmd_no, &ptype) !=
3866 		    REP_PROTOCOL_SUCCESS) {
3867 			continue;
3868 		}
3869 		prop_type[0] = REP_PROTOCOL_BASE_TYPE(ptype);
3870 		prop_type[1] = REP_PROTOCOL_SUBTYPE(ptype);
3871 		audit_data.ed_prop_value = generate_value_list(tx_data, cmd_no);
3872 
3873 		/* Determine the event type. */
3874 		if (tx_cmd_action(tx_data, cmd_no, &action) !=
3875 		    REP_PROTOCOL_SUCCESS) {
3876 			free(audit_data.ed_prop_value);
3877 			continue;
3878 		}
3879 		switch (action) {
3880 		case REP_PROTOCOL_TX_ENTRY_NEW:
3881 			event_id = ADT_smf_create_prop;
3882 			break;
3883 		case REP_PROTOCOL_TX_ENTRY_CLEAR:
3884 			event_id = ADT_smf_change_prop;
3885 			break;
3886 		case REP_PROTOCOL_TX_ENTRY_REPLACE:
3887 			event_id = ADT_smf_change_prop;
3888 			break;
3889 		case REP_PROTOCOL_TX_ENTRY_DELETE:
3890 			event_id = ADT_smf_delete_prop;
3891 			break;
3892 		default:
3893 			assert(0);	/* Missing a case */
3894 			free(audit_data.ed_prop_value);
3895 			continue;
3896 		}
3897 
3898 		/* Generate the event. */
3899 		smf_audit_event(event_id, auth_status, auth_ret_value,
3900 		    &audit_data);
3901 		free(audit_data.ed_prop_value);
3902 	}
3903 #endif /* NATIVE_BUILD */
3904 }
3905 
3906 /*
3907  * Fails with
3908  *   _DELETED - node has been deleted
3909  *   _NOT_SET - npp is reset
3910  *   _NOT_APPLICABLE - type is _PROPERTYGRP
3911  *   _INVALID_TYPE - node is corrupt or type is invalid
3912  *   _TYPE_MISMATCH - node cannot have children of type type
3913  *   _BAD_REQUEST - name is invalid
3914  *		    cannot create children for this type of node
3915  *   _NO_RESOURCES - out of memory, or could not allocate new id
3916  *   _PERMISSION_DENIED
3917  *   _BACKEND_ACCESS
3918  *   _BACKEND_READONLY
3919  *   _EXISTS - child already exists
3920  *   _TRUNCATED - truncated FMRI for the audit record
3921  */
3922 int
3923 rc_node_create_child(rc_node_ptr_t *npp, uint32_t type, const char *name,
3924     rc_node_ptr_t *cpp)
3925 {
3926 	rc_node_t *np;
3927 	rc_node_t *cp = NULL;
3928 	int rc,  perm_rc;
3929 	size_t sz_out;
3930 	char fmri[REP_PROTOCOL_FMRI_LEN];
3931 	audit_event_data_t audit_data;
3932 
3933 	rc_node_clear(cpp, 0);
3934 
3935 	perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
3936 
3937 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
3938 
3939 	audit_data.ed_fmri = fmri;
3940 	audit_data.ed_auth = NULL;
3941 
3942 	/*
3943 	 * there is a separate interface for creating property groups
3944 	 */
3945 	if (type == REP_PROTOCOL_ENTITY_PROPERTYGRP) {
3946 		(void) pthread_mutex_unlock(&np->rn_lock);
3947 		free(audit_data.ed_auth);
3948 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3949 	}
3950 
3951 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
3952 		(void) pthread_mutex_unlock(&np->rn_lock);
3953 		np = np->rn_cchain[0];
3954 		RC_NODE_CHECK_AND_LOCK(np);
3955 	}
3956 
3957 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
3958 	    REP_PROTOCOL_SUCCESS) {
3959 		(void) pthread_mutex_unlock(&np->rn_lock);
3960 		free(audit_data.ed_auth);
3961 		return (rc);
3962 	}
3963 	if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS) {
3964 		(void) pthread_mutex_unlock(&np->rn_lock);
3965 		free(audit_data.ed_auth);
3966 		return (rc);
3967 	}
3968 
3969 	if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
3970 	    name, type)) != REP_PROTOCOL_SUCCESS) {
3971 		(void) pthread_mutex_unlock(&np->rn_lock);
3972 		free(audit_data.ed_auth);
3973 		return (rc);
3974 	}
3975 	if (perm_rc != REP_PROTOCOL_SUCCESS) {
3976 		(void) pthread_mutex_unlock(&np->rn_lock);
3977 		smf_audit_event(ADT_smf_create, ADT_FAILURE,
3978 		    ADT_FAIL_VALUE_AUTH, &audit_data);
3979 		free(audit_data.ed_auth);
3980 		return (perm_rc);
3981 	}
3982 
3983 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
3984 	    audit_data.ed_auth);
3985 	(void) pthread_mutex_unlock(&np->rn_lock);
3986 
3987 	rc = object_create(np, type, name, &cp);
3988 	assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
3989 
3990 	if (rc == REP_PROTOCOL_SUCCESS) {
3991 		rc_node_assign(cpp, cp);
3992 		rc_node_rele(cp);
3993 	}
3994 
3995 	(void) pthread_mutex_lock(&np->rn_lock);
3996 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
3997 	(void) pthread_mutex_unlock(&np->rn_lock);
3998 
3999 	if (rc == REP_PROTOCOL_SUCCESS) {
4000 		smf_audit_event(ADT_smf_create, ADT_SUCCESS, ADT_SUCCESS,
4001 		    &audit_data);
4002 	}
4003 
4004 	free(audit_data.ed_auth);
4005 
4006 	return (rc);
4007 }
4008 
4009 int
4010 rc_node_create_child_pg(rc_node_ptr_t *npp, uint32_t type, const char *name,
4011     const char *pgtype, uint32_t flags, rc_node_ptr_t *cpp)
4012 {
4013 	rc_node_t *np;
4014 	rc_node_t *cp;
4015 	int rc;
4016 	permcheck_t *pcp;
4017 	int granted;
4018 	char fmri[REP_PROTOCOL_FMRI_LEN];
4019 	audit_event_data_t audit_data;
4020 	au_event_t event_id;
4021 	size_t sz_out;
4022 
4023 	audit_data.ed_auth = NULL;
4024 	audit_data.ed_fmri = fmri;
4025 	audit_data.ed_type = (char *)pgtype;
4026 
4027 	rc_node_clear(cpp, 0);
4028 
4029 	/* verify flags is valid */
4030 	if (flags & ~SCF_PG_FLAG_NONPERSISTENT)
4031 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4032 
4033 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
4034 
4035 	if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4036 		rc_node_rele(np);
4037 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
4038 	}
4039 
4040 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
4041 	    REP_PROTOCOL_SUCCESS) {
4042 		rc_node_rele(np);
4043 		return (rc);
4044 	}
4045 	if ((rc = rc_check_type_name(type, name)) != REP_PROTOCOL_SUCCESS ||
4046 	    (rc = rc_check_pgtype_name(pgtype)) != REP_PROTOCOL_SUCCESS) {
4047 		rc_node_rele(np);
4048 		return (rc);
4049 	}
4050 
4051 #ifdef NATIVE_BUILD
4052 	if (!client_is_privileged()) {
4053 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4054 	}
4055 #else
4056 	if (flags & SCF_PG_FLAG_NONPERSISTENT) {
4057 		event_id = ADT_smf_create_npg;
4058 	} else {
4059 		event_id = ADT_smf_create_pg;
4060 	}
4061 	if ((rc = rc_get_fmri_and_concat(np, fmri, sizeof (fmri), &sz_out,
4062 	    name, REP_PROTOCOL_ENTITY_PROPERTYGRP)) != REP_PROTOCOL_SUCCESS) {
4063 		rc_node_rele(np);
4064 		return (rc);
4065 	}
4066 
4067 	if (is_main_repository) {
4068 		/* Must have .smf.modify or smf.modify.<type> authorization */
4069 		pcp = pc_create();
4070 		if (pcp != NULL) {
4071 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
4072 
4073 			if (rc == REP_PROTOCOL_SUCCESS) {
4074 				const char * const auth =
4075 				    perm_auth_for_pgtype(pgtype);
4076 
4077 				if (auth != NULL)
4078 					rc = perm_add_enabling(pcp, auth);
4079 			}
4080 
4081 			/*
4082 			 * .manage or $action_authorization can be used to
4083 			 * create the actions pg and the general_ovr pg.
4084 			 */
4085 			if (rc == REP_PROTOCOL_SUCCESS &&
4086 			    (flags & SCF_PG_FLAG_NONPERSISTENT) != 0 &&
4087 			    np->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE &&
4088 			    ((strcmp(name, AUTH_PG_ACTIONS) == 0 &&
4089 			    strcmp(pgtype, AUTH_PG_ACTIONS_TYPE) == 0) ||
4090 			    (strcmp(name, AUTH_PG_GENERAL_OVR) == 0 &&
4091 			    strcmp(pgtype, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
4092 				rc = perm_add_enabling(pcp, AUTH_MANAGE);
4093 
4094 				if (rc == REP_PROTOCOL_SUCCESS)
4095 					rc = perm_add_inst_action_auth(pcp, np);
4096 			}
4097 
4098 			if (rc == REP_PROTOCOL_SUCCESS) {
4099 				granted = perm_granted(pcp);
4100 
4101 				if (granted < 0) {
4102 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4103 				} else {
4104 					/*
4105 					 * Copy out the authorization
4106 					 * string before freeing pcp.
4107 					 */
4108 					audit_data.ed_auth =
4109 					    strdup(pcp->pc_auth_string);
4110 					if (audit_data.ed_auth == NULL) {
4111 						/*
4112 						 * Following code line
4113 						 * cannot meet both the
4114 						 * indentation and the line
4115 						 * length requirements of
4116 						 * cstyle.  Indendation has
4117 						 * been sacrificed.
4118 						 */
4119 						/* CSTYLED */
4120 					    rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4121 					}
4122 				}
4123 			}
4124 
4125 			pc_free(pcp);
4126 		} else {
4127 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4128 		}
4129 
4130 		if (rc == REP_PROTOCOL_SUCCESS && !granted)
4131 			rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4132 	} else {
4133 		rc = REP_PROTOCOL_SUCCESS;
4134 	}
4135 #endif /* NATIVE_BUILD */
4136 
4137 	if (rc != REP_PROTOCOL_SUCCESS) {
4138 		rc_node_rele(np);
4139 		smf_audit_event(event_id, ADT_FAILURE,
4140 		    ADT_FAIL_VALUE_AUTH, &audit_data);
4141 		if (audit_data.ed_auth != NULL)
4142 			free(audit_data.ed_auth);
4143 		return (rc);
4144 	}
4145 
4146 	(void) pthread_mutex_lock(&np->rn_lock);
4147 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
4148 	    audit_data.ed_auth);
4149 	(void) pthread_mutex_unlock(&np->rn_lock);
4150 
4151 	rc = object_create_pg(np, type, name, pgtype, flags, &cp);
4152 
4153 	if (rc == REP_PROTOCOL_SUCCESS) {
4154 		rc_node_assign(cpp, cp);
4155 		rc_node_rele(cp);
4156 	}
4157 
4158 	(void) pthread_mutex_lock(&np->rn_lock);
4159 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
4160 	(void) pthread_mutex_unlock(&np->rn_lock);
4161 
4162 	if (rc == REP_PROTOCOL_SUCCESS) {
4163 		smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
4164 		    &audit_data);
4165 	}
4166 	if (audit_data.ed_auth != NULL)
4167 		free(audit_data.ed_auth);
4168 
4169 	return (rc);
4170 }
4171 
4172 static void
4173 rc_pg_notify_fire(rc_node_pg_notify_t *pnp)
4174 {
4175 	assert(MUTEX_HELD(&rc_pg_notify_lock));
4176 
4177 	if (pnp->rnpn_pg != NULL) {
4178 		uu_list_remove(pnp->rnpn_pg->rn_pg_notify_list, pnp);
4179 		(void) close(pnp->rnpn_fd);
4180 
4181 		pnp->rnpn_pg = NULL;
4182 		pnp->rnpn_fd = -1;
4183 	} else {
4184 		assert(pnp->rnpn_fd == -1);
4185 	}
4186 }
4187 
4188 static void
4189 rc_notify_node_delete(rc_notify_delete_t *ndp, rc_node_t *np_arg)
4190 {
4191 	rc_node_t *svc = NULL;
4192 	rc_node_t *inst = NULL;
4193 	rc_node_t *pg = NULL;
4194 	rc_node_t *np = np_arg;
4195 	rc_node_t *nnp;
4196 
4197 	while (svc == NULL) {
4198 		(void) pthread_mutex_lock(&np->rn_lock);
4199 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4200 			(void) pthread_mutex_unlock(&np->rn_lock);
4201 			goto cleanup;
4202 		}
4203 		nnp = np->rn_parent;
4204 		rc_node_hold_locked(np);	/* hold it in place */
4205 
4206 		switch (np->rn_id.rl_type) {
4207 		case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4208 			assert(pg == NULL);
4209 			pg = np;
4210 			break;
4211 		case REP_PROTOCOL_ENTITY_INSTANCE:
4212 			assert(inst == NULL);
4213 			inst = np;
4214 			break;
4215 		case REP_PROTOCOL_ENTITY_SERVICE:
4216 			assert(svc == NULL);
4217 			svc = np;
4218 			break;
4219 		default:
4220 			rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4221 			rc_node_rele_locked(np);
4222 			goto cleanup;
4223 		}
4224 
4225 		(void) pthread_mutex_unlock(&np->rn_lock);
4226 
4227 		np = nnp;
4228 		if (np == NULL)
4229 			goto cleanup;
4230 	}
4231 
4232 	rc_notify_deletion(ndp,
4233 	    svc->rn_name,
4234 	    inst != NULL ? inst->rn_name : NULL,
4235 	    pg != NULL ? pg->rn_name : NULL);
4236 
4237 	ndp = NULL;
4238 
4239 cleanup:
4240 	if (ndp != NULL)
4241 		uu_free(ndp);
4242 
4243 	for (;;) {
4244 		if (svc != NULL) {
4245 			np = svc;
4246 			svc = NULL;
4247 		} else if (inst != NULL) {
4248 			np = inst;
4249 			inst = NULL;
4250 		} else if (pg != NULL) {
4251 			np = pg;
4252 			pg = NULL;
4253 		} else
4254 			break;
4255 
4256 		(void) pthread_mutex_lock(&np->rn_lock);
4257 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
4258 		rc_node_rele_locked(np);
4259 	}
4260 }
4261 
4262 /*
4263  * Hold RC_NODE_DYING_FLAGS on np's descendents.  If andformer is true, do
4264  * the same down the rn_former chain.
4265  */
4266 static void
4267 rc_node_delete_hold(rc_node_t *np, int andformer)
4268 {
4269 	rc_node_t *cp;
4270 
4271 again:
4272 	assert(MUTEX_HELD(&np->rn_lock));
4273 	assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4274 
4275 	for (cp = uu_list_first(np->rn_children); cp != NULL;
4276 	    cp = uu_list_next(np->rn_children, cp)) {
4277 		(void) pthread_mutex_lock(&cp->rn_lock);
4278 		(void) pthread_mutex_unlock(&np->rn_lock);
4279 		if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS)) {
4280 			/*
4281 			 * already marked as dead -- can't happen, since that
4282 			 * would require setting RC_NODE_CHILDREN_CHANGING
4283 			 * in np, and we're holding that...
4284 			 */
4285 			abort();
4286 		}
4287 		rc_node_delete_hold(cp, andformer);	/* recurse, drop lock */
4288 
4289 		(void) pthread_mutex_lock(&np->rn_lock);
4290 	}
4291 	if (andformer && (cp = np->rn_former) != NULL) {
4292 		(void) pthread_mutex_lock(&cp->rn_lock);
4293 		(void) pthread_mutex_unlock(&np->rn_lock);
4294 		if (!rc_node_hold_flag(cp, RC_NODE_DYING_FLAGS))
4295 			abort();		/* can't happen, see above */
4296 		np = cp;
4297 		goto again;		/* tail-recurse down rn_former */
4298 	}
4299 	(void) pthread_mutex_unlock(&np->rn_lock);
4300 }
4301 
4302 /*
4303  * N.B.:  this function drops np->rn_lock on the way out.
4304  */
4305 static void
4306 rc_node_delete_rele(rc_node_t *np, int andformer)
4307 {
4308 	rc_node_t *cp;
4309 
4310 again:
4311 	assert(MUTEX_HELD(&np->rn_lock));
4312 	assert((np->rn_flags & RC_NODE_DYING_FLAGS) == RC_NODE_DYING_FLAGS);
4313 
4314 	for (cp = uu_list_first(np->rn_children); cp != NULL;
4315 	    cp = uu_list_next(np->rn_children, cp)) {
4316 		(void) pthread_mutex_lock(&cp->rn_lock);
4317 		(void) pthread_mutex_unlock(&np->rn_lock);
4318 		rc_node_delete_rele(cp, andformer);	/* recurse, drop lock */
4319 		(void) pthread_mutex_lock(&np->rn_lock);
4320 	}
4321 	if (andformer && (cp = np->rn_former) != NULL) {
4322 		(void) pthread_mutex_lock(&cp->rn_lock);
4323 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4324 		(void) pthread_mutex_unlock(&np->rn_lock);
4325 
4326 		np = cp;
4327 		goto again;		/* tail-recurse down rn_former */
4328 	}
4329 	rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4330 	(void) pthread_mutex_unlock(&np->rn_lock);
4331 }
4332 
4333 static void
4334 rc_node_finish_delete(rc_node_t *cp)
4335 {
4336 	cache_bucket_t *bp;
4337 	rc_node_pg_notify_t *pnp;
4338 
4339 	assert(MUTEX_HELD(&cp->rn_lock));
4340 
4341 	if (!(cp->rn_flags & RC_NODE_OLD)) {
4342 		assert(cp->rn_flags & RC_NODE_IN_PARENT);
4343 		if (!rc_node_wait_flag(cp, RC_NODE_USING_PARENT)) {
4344 			abort();		/* can't happen, see above */
4345 		}
4346 		cp->rn_flags &= ~RC_NODE_IN_PARENT;
4347 		cp->rn_parent = NULL;
4348 		rc_node_free_fmri(cp);
4349 	}
4350 
4351 	cp->rn_flags |= RC_NODE_DEAD;
4352 
4353 	/*
4354 	 * If this node is not out-dated, we need to remove it from
4355 	 * the notify list and cache hash table.
4356 	 */
4357 	if (!(cp->rn_flags & RC_NODE_OLD)) {
4358 		assert(cp->rn_refs > 0);	/* can't go away yet */
4359 		(void) pthread_mutex_unlock(&cp->rn_lock);
4360 
4361 		(void) pthread_mutex_lock(&rc_pg_notify_lock);
4362 		while ((pnp = uu_list_first(cp->rn_pg_notify_list)) != NULL)
4363 			rc_pg_notify_fire(pnp);
4364 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
4365 		rc_notify_remove_node(cp);
4366 
4367 		bp = cache_hold(cp->rn_hash);
4368 		(void) pthread_mutex_lock(&cp->rn_lock);
4369 		cache_remove_unlocked(bp, cp);
4370 		cache_release(bp);
4371 	}
4372 }
4373 
4374 /*
4375  * For each child, call rc_node_finish_delete() and recurse.  If andformer
4376  * is set, also recurse down rn_former.  Finally release np, which might
4377  * free it.
4378  */
4379 static void
4380 rc_node_delete_children(rc_node_t *np, int andformer)
4381 {
4382 	rc_node_t *cp;
4383 
4384 again:
4385 	assert(np->rn_refs > 0);
4386 	assert(MUTEX_HELD(&np->rn_lock));
4387 	assert(np->rn_flags & RC_NODE_DEAD);
4388 
4389 	while ((cp = uu_list_first(np->rn_children)) != NULL) {
4390 		uu_list_remove(np->rn_children, cp);
4391 		(void) pthread_mutex_lock(&cp->rn_lock);
4392 		(void) pthread_mutex_unlock(&np->rn_lock);
4393 		rc_node_hold_locked(cp);	/* hold while we recurse */
4394 		rc_node_finish_delete(cp);
4395 		rc_node_delete_children(cp, andformer);	/* drops lock + ref */
4396 		(void) pthread_mutex_lock(&np->rn_lock);
4397 	}
4398 
4399 	/*
4400 	 * When we drop cp's lock, all the children will be gone, so we
4401 	 * can release DYING_FLAGS.
4402 	 */
4403 	rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4404 	if (andformer && (cp = np->rn_former) != NULL) {
4405 		np->rn_former = NULL;		/* unlink */
4406 		(void) pthread_mutex_lock(&cp->rn_lock);
4407 
4408 		/*
4409 		 * Register the ephemeral reference created by reading
4410 		 * np->rn_former into cp.  Note that the persistent
4411 		 * reference (np->rn_former) is locked because we haven't
4412 		 * dropped np's lock since we dropped its RC_NODE_IN_TX
4413 		 * (via RC_NODE_DYING_FLAGS).
4414 		 */
4415 		rc_node_hold_ephemeral_locked(cp);
4416 
4417 		(void) pthread_mutex_unlock(&np->rn_lock);
4418 		cp->rn_flags &= ~RC_NODE_ON_FORMER;
4419 
4420 		rc_node_hold_locked(cp);	/* hold while we loop */
4421 
4422 		rc_node_finish_delete(cp);
4423 
4424 		rc_node_rele(np);		/* drop the old reference */
4425 
4426 		np = cp;
4427 		goto again;		/* tail-recurse down rn_former */
4428 	}
4429 	rc_node_rele_locked(np);
4430 }
4431 
4432 /*
4433  * The last client or child reference to np, which must be either
4434  * RC_NODE_OLD or RC_NODE_DEAD, has been destroyed.  We'll destroy any
4435  * remaining references (e.g., rn_former) and call rc_node_destroy() to
4436  * free np.
4437  */
4438 static void
4439 rc_node_no_client_refs(rc_node_t *np)
4440 {
4441 	int unrefed;
4442 	rc_node_t *current, *cur;
4443 
4444 	assert(MUTEX_HELD(&np->rn_lock));
4445 	assert(np->rn_refs == 0);
4446 	assert(np->rn_other_refs == 0);
4447 	assert(np->rn_other_refs_held == 0);
4448 
4449 	if (np->rn_flags & RC_NODE_DEAD) {
4450 		/*
4451 		 * The node is DEAD, so the deletion code should have
4452 		 * destroyed all rn_children or rn_former references.
4453 		 * Since the last client or child reference has been
4454 		 * destroyed, we're free to destroy np.  Unless another
4455 		 * thread has an ephemeral reference, in which case we'll
4456 		 * pass the buck.
4457 		 */
4458 		if (np->rn_erefs > 1) {
4459 			--np->rn_erefs;
4460 			NODE_UNLOCK(np);
4461 			return;
4462 		}
4463 
4464 		(void) pthread_mutex_unlock(&np->rn_lock);
4465 		rc_node_destroy(np);
4466 		return;
4467 	}
4468 
4469 	/* We only collect DEAD and OLD nodes, thank you. */
4470 	assert(np->rn_flags & RC_NODE_OLD);
4471 
4472 	/*
4473 	 * RC_NODE_UNREFED keeps multiple threads from processing OLD
4474 	 * nodes.  But it's vulnerable to unfriendly scheduling, so full
4475 	 * use of rn_erefs should supersede it someday.
4476 	 */
4477 	if (np->rn_flags & RC_NODE_UNREFED) {
4478 		(void) pthread_mutex_unlock(&np->rn_lock);
4479 		return;
4480 	}
4481 	np->rn_flags |= RC_NODE_UNREFED;
4482 
4483 	/*
4484 	 * Now we'll remove the node from the rn_former chain and take its
4485 	 * DYING_FLAGS.
4486 	 */
4487 
4488 	/*
4489 	 * Since this node is OLD, it should be on an rn_former chain.  To
4490 	 * remove it, we must find the current in-hash object and grab its
4491 	 * RC_NODE_IN_TX flag to protect the entire rn_former chain.
4492 	 */
4493 
4494 	(void) pthread_mutex_unlock(&np->rn_lock);
4495 
4496 	for (;;) {
4497 		current = cache_lookup(&np->rn_id);
4498 
4499 		if (current == NULL) {
4500 			(void) pthread_mutex_lock(&np->rn_lock);
4501 
4502 			if (np->rn_flags & RC_NODE_DEAD)
4503 				goto died;
4504 
4505 			/*
4506 			 * We are trying to unreference this node, but the
4507 			 * owner of the former list does not exist.  It must
4508 			 * be the case that another thread is deleting this
4509 			 * entire sub-branch, but has not yet reached us.
4510 			 * We will in short order be deleted.
4511 			 */
4512 			np->rn_flags &= ~RC_NODE_UNREFED;
4513 			(void) pthread_mutex_unlock(&np->rn_lock);
4514 			return;
4515 		}
4516 
4517 		if (current == np) {
4518 			/*
4519 			 * no longer unreferenced
4520 			 */
4521 			(void) pthread_mutex_lock(&np->rn_lock);
4522 			np->rn_flags &= ~RC_NODE_UNREFED;
4523 			/* held in cache_lookup() */
4524 			rc_node_rele_locked(np);
4525 			return;
4526 		}
4527 
4528 		(void) pthread_mutex_lock(&current->rn_lock);
4529 		if (current->rn_flags & RC_NODE_OLD) {
4530 			/*
4531 			 * current has been replaced since we looked it
4532 			 * up.  Try again.
4533 			 */
4534 			/* held in cache_lookup() */
4535 			rc_node_rele_locked(current);
4536 			continue;
4537 		}
4538 
4539 		if (!rc_node_hold_flag(current, RC_NODE_IN_TX)) {
4540 			/*
4541 			 * current has been deleted since we looked it up.  Try
4542 			 * again.
4543 			 */
4544 			/* held in cache_lookup() */
4545 			rc_node_rele_locked(current);
4546 			continue;
4547 		}
4548 
4549 		/*
4550 		 * rc_node_hold_flag() might have dropped current's lock, so
4551 		 * check OLD again.
4552 		 */
4553 		if (!(current->rn_flags & RC_NODE_OLD)) {
4554 			/* Not old.  Stop looping. */
4555 			(void) pthread_mutex_unlock(&current->rn_lock);
4556 			break;
4557 		}
4558 
4559 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4560 		rc_node_rele_locked(current);
4561 	}
4562 
4563 	/* To take np's RC_NODE_DYING_FLAGS, we need its lock. */
4564 	(void) pthread_mutex_lock(&np->rn_lock);
4565 
4566 	/*
4567 	 * While we didn't have the lock, a thread may have added
4568 	 * a reference or changed the flags.
4569 	 */
4570 	if (!(np->rn_flags & (RC_NODE_OLD | RC_NODE_DEAD)) ||
4571 	    np->rn_refs != 0 || np->rn_other_refs != 0 ||
4572 	    np->rn_other_refs_held != 0) {
4573 		np->rn_flags &= ~RC_NODE_UNREFED;
4574 
4575 		(void) pthread_mutex_lock(&current->rn_lock);
4576 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4577 		/* held by cache_lookup() */
4578 		rc_node_rele_locked(current);
4579 		return;
4580 	}
4581 
4582 	if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4583 		/*
4584 		 * Someone deleted the node while we were waiting for
4585 		 * DYING_FLAGS.  Undo the modifications to current.
4586 		 */
4587 		(void) pthread_mutex_unlock(&np->rn_lock);
4588 
4589 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4590 		/* held by cache_lookup() */
4591 		rc_node_rele_locked(current);
4592 
4593 		(void) pthread_mutex_lock(&np->rn_lock);
4594 		goto died;
4595 	}
4596 
4597 	/* Take RC_NODE_DYING_FLAGS on np's descendents. */
4598 	rc_node_delete_hold(np, 0);		/* drops np->rn_lock */
4599 
4600 	/* Mark np DEAD.  This requires the lock. */
4601 	(void) pthread_mutex_lock(&np->rn_lock);
4602 
4603 	/* Recheck for new references. */
4604 	if (!(np->rn_flags & RC_NODE_OLD) ||
4605 	    np->rn_refs != 0 || np->rn_other_refs != 0 ||
4606 	    np->rn_other_refs_held != 0) {
4607 		np->rn_flags &= ~RC_NODE_UNREFED;
4608 		rc_node_delete_rele(np, 0);	/* drops np's lock */
4609 
4610 		(void) pthread_mutex_lock(&current->rn_lock);
4611 		rc_node_rele_flag(current, RC_NODE_IN_TX);
4612 		/* held by cache_lookup() */
4613 		rc_node_rele_locked(current);
4614 		return;
4615 	}
4616 
4617 	np->rn_flags |= RC_NODE_DEAD;
4618 
4619 	/*
4620 	 * Delete the children.  This calls rc_node_rele_locked() on np at
4621 	 * the end, so add a reference to keep the count from going
4622 	 * negative.  It will recurse with RC_NODE_DEAD set, so we'll call
4623 	 * rc_node_destroy() above, but RC_NODE_UNREFED is also set, so it
4624 	 * shouldn't actually free() np.
4625 	 */
4626 	rc_node_hold_locked(np);
4627 	rc_node_delete_children(np, 0);		/* unlocks np */
4628 
4629 	/* Remove np from current's rn_former chain. */
4630 	(void) pthread_mutex_lock(&current->rn_lock);
4631 	for (cur = current; cur != NULL && cur->rn_former != np;
4632 	    cur = cur->rn_former)
4633 		;
4634 	assert(cur != NULL && cur != np);
4635 
4636 	cur->rn_former = np->rn_former;
4637 	np->rn_former = NULL;
4638 
4639 	rc_node_rele_flag(current, RC_NODE_IN_TX);
4640 	/* held by cache_lookup() */
4641 	rc_node_rele_locked(current);
4642 
4643 	/* Clear ON_FORMER and UNREFED, and destroy. */
4644 	(void) pthread_mutex_lock(&np->rn_lock);
4645 	assert(np->rn_flags & RC_NODE_ON_FORMER);
4646 	np->rn_flags &= ~(RC_NODE_UNREFED | RC_NODE_ON_FORMER);
4647 
4648 	if (np->rn_erefs > 1) {
4649 		/* Still referenced.  Stay execution. */
4650 		--np->rn_erefs;
4651 		NODE_UNLOCK(np);
4652 		return;
4653 	}
4654 
4655 	(void) pthread_mutex_unlock(&np->rn_lock);
4656 	rc_node_destroy(np);
4657 	return;
4658 
4659 died:
4660 	/*
4661 	 * Another thread marked np DEAD.  If there still aren't any
4662 	 * persistent references, destroy the node.
4663 	 */
4664 	np->rn_flags &= ~RC_NODE_UNREFED;
4665 
4666 	unrefed = (np->rn_refs == 0 && np->rn_other_refs == 0 &&
4667 	    np->rn_other_refs_held == 0);
4668 
4669 	if (np->rn_erefs > 0)
4670 		--np->rn_erefs;
4671 
4672 	if (unrefed && np->rn_erefs > 0) {
4673 		NODE_UNLOCK(np);
4674 		return;
4675 	}
4676 
4677 	(void) pthread_mutex_unlock(&np->rn_lock);
4678 
4679 	if (unrefed)
4680 		rc_node_destroy(np);
4681 }
4682 
4683 static au_event_t
4684 get_delete_event_id(rep_protocol_entity_t entity, uint32_t pgflags)
4685 {
4686 	au_event_t	id = 0;
4687 
4688 #ifndef NATIVE_BUILD
4689 	switch (entity) {
4690 	case REP_PROTOCOL_ENTITY_SERVICE:
4691 	case REP_PROTOCOL_ENTITY_INSTANCE:
4692 		id = ADT_smf_delete;
4693 		break;
4694 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
4695 		id = ADT_smf_delete_snap;
4696 		break;
4697 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4698 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4699 		if (pgflags & SCF_PG_FLAG_NONPERSISTENT) {
4700 			id = ADT_smf_delete_npg;
4701 		} else {
4702 			id = ADT_smf_delete_pg;
4703 		}
4704 		break;
4705 	default:
4706 		abort();
4707 	}
4708 #endif	/* NATIVE_BUILD */
4709 	return (id);
4710 }
4711 
4712 /*
4713  * Fails with
4714  *   _NOT_SET
4715  *   _DELETED
4716  *   _BAD_REQUEST
4717  *   _PERMISSION_DENIED
4718  *   _NO_RESOURCES
4719  *   _TRUNCATED
4720  * and whatever object_delete() fails with.
4721  */
4722 int
4723 rc_node_delete(rc_node_ptr_t *npp)
4724 {
4725 	rc_node_t *np, *np_orig;
4726 	rc_node_t *pp = NULL;
4727 	int rc;
4728 	rc_node_pg_notify_t *pnp;
4729 	cache_bucket_t *bp;
4730 	rc_notify_delete_t *ndp;
4731 	permcheck_t *pcp;
4732 	int granted;
4733 	au_event_t event_id = 0;
4734 	size_t sz_out;
4735 	audit_event_data_t audit_data;
4736 	int audit_failure = 0;
4737 
4738 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
4739 
4740 	audit_data.ed_fmri = NULL;
4741 	audit_data.ed_auth = NULL;
4742 	audit_data.ed_snapname = NULL;
4743 	audit_data.ed_type = NULL;
4744 
4745 	switch (np->rn_id.rl_type) {
4746 	case REP_PROTOCOL_ENTITY_SERVICE:
4747 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SERVICE,
4748 		    np->rn_pgflags);
4749 		break;
4750 	case REP_PROTOCOL_ENTITY_INSTANCE:
4751 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_INSTANCE,
4752 		    np->rn_pgflags);
4753 		break;
4754 	case REP_PROTOCOL_ENTITY_SNAPSHOT:
4755 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_SNAPSHOT,
4756 		    np->rn_pgflags);
4757 		audit_data.ed_snapname = strdup(np->rn_name);
4758 		if (audit_data.ed_snapname == NULL) {
4759 			(void) pthread_mutex_unlock(&np->rn_lock);
4760 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4761 		}
4762 		break;			/* deletable */
4763 
4764 	case REP_PROTOCOL_ENTITY_SCOPE:
4765 	case REP_PROTOCOL_ENTITY_SNAPLEVEL:
4766 		/* Scopes and snaplevels are indelible. */
4767 		(void) pthread_mutex_unlock(&np->rn_lock);
4768 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4769 
4770 	case REP_PROTOCOL_ENTITY_CPROPERTYGRP:
4771 		(void) pthread_mutex_unlock(&np->rn_lock);
4772 		np = np->rn_cchain[0];
4773 		RC_NODE_CHECK_AND_LOCK(np);
4774 		event_id = get_delete_event_id(REP_PROTOCOL_ENTITY_CPROPERTYGRP,
4775 		    np->rn_pgflags);
4776 		break;
4777 
4778 	case REP_PROTOCOL_ENTITY_PROPERTYGRP:
4779 		if (np->rn_id.rl_ids[ID_SNAPSHOT] == 0) {
4780 			event_id =
4781 			    get_delete_event_id(REP_PROTOCOL_ENTITY_PROPERTYGRP,
4782 			    np->rn_pgflags);
4783 			audit_data.ed_type = strdup(np->rn_type);
4784 			if (audit_data.ed_type == NULL) {
4785 				(void) pthread_mutex_unlock(&np->rn_lock);
4786 				return (REP_PROTOCOL_FAIL_NO_RESOURCES);
4787 			}
4788 			break;
4789 		}
4790 
4791 		/* Snapshot property groups are indelible. */
4792 		(void) pthread_mutex_unlock(&np->rn_lock);
4793 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
4794 
4795 	case REP_PROTOCOL_ENTITY_PROPERTY:
4796 		(void) pthread_mutex_unlock(&np->rn_lock);
4797 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
4798 
4799 	default:
4800 		assert(0);
4801 		abort();
4802 		break;
4803 	}
4804 
4805 	audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
4806 	if (audit_data.ed_fmri == NULL) {
4807 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4808 		goto cleanout;
4809 	}
4810 	np_orig = np;
4811 	rc_node_hold_locked(np);	/* simplifies rest of the code */
4812 
4813 again:
4814 	/*
4815 	 * The following loop is to deal with the fact that snapshots and
4816 	 * property groups are moving targets -- changes to them result
4817 	 * in a new "child" node.  Since we can only delete from the top node,
4818 	 * we have to loop until we have a non-RC_NODE_OLD version.
4819 	 */
4820 	for (;;) {
4821 		if (!rc_node_wait_flag(np,
4822 		    RC_NODE_IN_TX | RC_NODE_USING_PARENT)) {
4823 			rc_node_rele_locked(np);
4824 			rc = REP_PROTOCOL_FAIL_DELETED;
4825 			goto cleanout;
4826 		}
4827 
4828 		if (np->rn_flags & RC_NODE_OLD) {
4829 			rc_node_rele_locked(np);
4830 			np = cache_lookup(&np_orig->rn_id);
4831 			assert(np != np_orig);
4832 
4833 			if (np == NULL) {
4834 				rc = REP_PROTOCOL_FAIL_DELETED;
4835 				goto fail;
4836 			}
4837 			(void) pthread_mutex_lock(&np->rn_lock);
4838 			continue;
4839 		}
4840 
4841 		if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
4842 			rc_node_rele_locked(np);
4843 			rc_node_clear(npp, 1);
4844 			rc = REP_PROTOCOL_FAIL_DELETED;
4845 		}
4846 
4847 		/*
4848 		 * Mark our parent as children changing.  this call drops our
4849 		 * lock and the RC_NODE_USING_PARENT flag, and returns with
4850 		 * pp's lock held
4851 		 */
4852 		pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
4853 		if (pp == NULL) {
4854 			/* our parent is gone, we're going next... */
4855 			rc_node_rele(np);
4856 
4857 			rc_node_clear(npp, 1);
4858 			rc = REP_PROTOCOL_FAIL_DELETED;
4859 			goto cleanout;
4860 		}
4861 
4862 		rc_node_hold_locked(pp);		/* hold for later */
4863 		(void) pthread_mutex_unlock(&pp->rn_lock);
4864 
4865 		(void) pthread_mutex_lock(&np->rn_lock);
4866 		if (!(np->rn_flags & RC_NODE_OLD))
4867 			break;			/* not old -- we're done */
4868 
4869 		(void) pthread_mutex_unlock(&np->rn_lock);
4870 		(void) pthread_mutex_lock(&pp->rn_lock);
4871 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
4872 		rc_node_rele_locked(pp);
4873 		(void) pthread_mutex_lock(&np->rn_lock);
4874 		continue;			/* loop around and try again */
4875 	}
4876 	/*
4877 	 * Everyone out of the pool -- we grab everything but
4878 	 * RC_NODE_USING_PARENT (including RC_NODE_DYING) to keep
4879 	 * any changes from occurring while we are attempting to
4880 	 * delete the node.
4881 	 */
4882 	if (!rc_node_hold_flag(np, RC_NODE_DYING_FLAGS)) {
4883 		(void) pthread_mutex_unlock(&np->rn_lock);
4884 		rc = REP_PROTOCOL_FAIL_DELETED;
4885 		goto fail;
4886 	}
4887 
4888 	assert(!(np->rn_flags & RC_NODE_OLD));
4889 
4890 	if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
4891 	    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
4892 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4893 		(void) pthread_mutex_unlock(&np->rn_lock);
4894 		goto fail;
4895 	}
4896 
4897 #ifdef NATIVE_BUILD
4898 	if (!client_is_privileged()) {
4899 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4900 	}
4901 #else
4902 	if (is_main_repository) {
4903 		/* permission check */
4904 		(void) pthread_mutex_unlock(&np->rn_lock);
4905 		pcp = pc_create();
4906 		if (pcp != NULL) {
4907 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
4908 
4909 			/* add .smf.modify.<type> for pgs. */
4910 			if (rc == REP_PROTOCOL_SUCCESS && np->rn_id.rl_type ==
4911 			    REP_PROTOCOL_ENTITY_PROPERTYGRP) {
4912 				const char * const auth =
4913 				    perm_auth_for_pgtype(np->rn_type);
4914 
4915 				if (auth != NULL)
4916 					rc = perm_add_enabling(pcp, auth);
4917 			}
4918 
4919 			if (rc == REP_PROTOCOL_SUCCESS) {
4920 				granted = perm_granted(pcp);
4921 
4922 				if (granted < 0) {
4923 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4924 				} else {
4925 					/*
4926 					 * Copy out the authorization
4927 					 * string before freeing pcp.
4928 					 */
4929 					audit_data.ed_auth =
4930 					    strdup(pcp->pc_auth_string);
4931 					if (audit_data.ed_auth == NULL) {
4932 						/*
4933 						 * Following code line
4934 						 * cannot meet both the
4935 						 * indentation and the line
4936 						 * length requirements of
4937 						 * cstyle.  Indendation has
4938 						 * been sacrificed.
4939 						 */
4940 						/* CSTYLED */
4941 					    rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4942 					}
4943 				}
4944 			}
4945 
4946 			pc_free(pcp);
4947 		} else {
4948 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4949 		}
4950 
4951 		if (rc == REP_PROTOCOL_SUCCESS && !granted) {
4952 			rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
4953 			audit_failure = 1;
4954 		}
4955 		(void) pthread_mutex_lock(&np->rn_lock);
4956 	} else {
4957 		rc = REP_PROTOCOL_SUCCESS;
4958 	}
4959 #endif /* NATIVE_BUILD */
4960 
4961 	if (rc != REP_PROTOCOL_SUCCESS) {
4962 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4963 		(void) pthread_mutex_unlock(&np->rn_lock);
4964 		goto fail;
4965 	}
4966 
4967 	ndp = uu_zalloc(sizeof (*ndp));
4968 	if (ndp == NULL) {
4969 		rc_node_rele_flag(np, RC_NODE_DYING_FLAGS);
4970 		(void) pthread_mutex_unlock(&np->rn_lock);
4971 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
4972 		goto fail;
4973 	}
4974 
4975 	rc_node_delete_hold(np, 1);	/* hold entire subgraph, drop lock */
4976 
4977 	rc = object_delete(np);
4978 
4979 	if (rc != REP_PROTOCOL_SUCCESS) {
4980 		(void) pthread_mutex_lock(&np->rn_lock);
4981 		rc_node_delete_rele(np, 1);		/* drops lock */
4982 		uu_free(ndp);
4983 		goto fail;
4984 	}
4985 
4986 	/*
4987 	 * Now, delicately unlink and delete the object.
4988 	 *
4989 	 * Create the delete notification, atomically remove
4990 	 * from the hash table and set the NODE_DEAD flag, and
4991 	 * remove from the parent's children list.
4992 	 */
4993 	rc_notify_node_delete(ndp, np); /* frees or uses ndp */
4994 
4995 	bp = cache_hold(np->rn_hash);
4996 
4997 	(void) pthread_mutex_lock(&np->rn_lock);
4998 	cache_remove_unlocked(bp, np);
4999 	cache_release(bp);
5000 
5001 	np->rn_flags |= RC_NODE_DEAD;
5002 
5003 	if (pp != NULL) {
5004 		/*
5005 		 * Remove from pp's rn_children.  This requires pp's lock,
5006 		 * so we must drop np's lock to respect lock order.
5007 		 */
5008 		(void) pthread_mutex_unlock(&np->rn_lock);
5009 		(void) pthread_mutex_lock(&pp->rn_lock);
5010 		(void) pthread_mutex_lock(&np->rn_lock);
5011 
5012 		uu_list_remove(pp->rn_children, np);
5013 
5014 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5015 
5016 		(void) pthread_mutex_unlock(&pp->rn_lock);
5017 
5018 		np->rn_flags &= ~RC_NODE_IN_PARENT;
5019 	}
5020 
5021 	/*
5022 	 * finally, propagate death to our children (including marking
5023 	 * them DEAD), handle notifications, and release our hold.
5024 	 */
5025 	rc_node_hold_locked(np);	/* hold for delete */
5026 	rc_node_delete_children(np, 1);	/* drops DYING_FLAGS, lock, ref */
5027 
5028 	rc_node_clear(npp, 1);
5029 
5030 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
5031 	while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
5032 		rc_pg_notify_fire(pnp);
5033 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
5034 	rc_notify_remove_node(np);
5035 
5036 	rc_node_rele(np);
5037 
5038 	smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS,
5039 	    &audit_data);
5040 	free(audit_data.ed_auth);
5041 	free(audit_data.ed_snapname);
5042 	free(audit_data.ed_type);
5043 	free(audit_data.ed_fmri);
5044 	return (rc);
5045 
5046 fail:
5047 	rc_node_rele(np);
5048 	if (rc == REP_PROTOCOL_FAIL_DELETED)
5049 		rc_node_clear(npp, 1);
5050 	if (pp != NULL) {
5051 		(void) pthread_mutex_lock(&pp->rn_lock);
5052 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5053 		rc_node_rele_locked(pp);	/* drop ref and lock */
5054 	}
5055 	if (audit_failure) {
5056 		smf_audit_event(event_id, ADT_FAILURE,
5057 		    ADT_FAIL_VALUE_AUTH, &audit_data);
5058 	}
5059 cleanout:
5060 	free(audit_data.ed_auth);
5061 	free(audit_data.ed_snapname);
5062 	free(audit_data.ed_type);
5063 	free(audit_data.ed_fmri);
5064 	return (rc);
5065 }
5066 
5067 int
5068 rc_node_next_snaplevel(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5069 {
5070 	rc_node_t *np;
5071 	rc_node_t *cp, *pp;
5072 	int res;
5073 
5074 	rc_node_clear(cpp, 0);
5075 
5076 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5077 
5078 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT &&
5079 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL) {
5080 		(void) pthread_mutex_unlock(&np->rn_lock);
5081 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
5082 	}
5083 
5084 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5085 		if ((res = rc_node_fill_children(np,
5086 		    REP_PROTOCOL_ENTITY_SNAPLEVEL)) != REP_PROTOCOL_SUCCESS) {
5087 			(void) pthread_mutex_unlock(&np->rn_lock);
5088 			return (res);
5089 		}
5090 
5091 		for (cp = uu_list_first(np->rn_children);
5092 		    cp != NULL;
5093 		    cp = uu_list_next(np->rn_children, cp)) {
5094 			if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5095 				continue;
5096 			rc_node_hold(cp);
5097 			break;
5098 		}
5099 
5100 		(void) pthread_mutex_unlock(&np->rn_lock);
5101 	} else {
5102 		HOLD_PTR_FLAG_OR_RETURN(np, npp, RC_NODE_USING_PARENT);
5103 		/*
5104 		 * mark our parent as children changing.  This call drops our
5105 		 * lock and the RC_NODE_USING_PARENT flag, and returns with
5106 		 * pp's lock held
5107 		 */
5108 		pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
5109 		if (pp == NULL) {
5110 			/* our parent is gone, we're going next... */
5111 
5112 			rc_node_clear(npp, 1);
5113 			return (REP_PROTOCOL_FAIL_DELETED);
5114 		}
5115 
5116 		/*
5117 		 * find the next snaplevel
5118 		 */
5119 		cp = np;
5120 		while ((cp = uu_list_next(pp->rn_children, cp)) != NULL &&
5121 		    cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPLEVEL)
5122 			;
5123 
5124 		/* it must match the snaplevel list */
5125 		assert((cp == NULL && np->rn_snaplevel->rsl_next == NULL) ||
5126 		    (cp != NULL && np->rn_snaplevel->rsl_next ==
5127 		    cp->rn_snaplevel));
5128 
5129 		if (cp != NULL)
5130 			rc_node_hold(cp);
5131 
5132 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5133 
5134 		(void) pthread_mutex_unlock(&pp->rn_lock);
5135 	}
5136 
5137 	rc_node_assign(cpp, cp);
5138 	if (cp != NULL) {
5139 		rc_node_rele(cp);
5140 
5141 		return (REP_PROTOCOL_SUCCESS);
5142 	}
5143 	return (REP_PROTOCOL_FAIL_NOT_FOUND);
5144 }
5145 
5146 /*
5147  * This call takes a snapshot (np) and either:
5148  *	an existing snapid (to be associated with np), or
5149  *	a non-NULL parentp (from which a new snapshot is taken, and associated
5150  *	    with np)
5151  *
5152  * To do the association, np is duplicated, the duplicate is made to
5153  * represent the new snapid, and np is replaced with the new rc_node_t on
5154  * np's parent's child list. np is placed on the new node's rn_former list,
5155  * and replaces np in cache_hash (so rc_node_update() will find the new one).
5156  *
5157  * old_fmri and old_name point to the original snap shot's FMRI and name.
5158  * These values are used when generating audit events.
5159  *
5160  * Fails with
5161  *	_BAD_REQUEST
5162  *	_BACKEND_READONLY
5163  *	_DELETED
5164  *	_NO_RESOURCES
5165  *	_TRUNCATED
5166  *	_TYPE_MISMATCH
5167  */
5168 static int
5169 rc_attach_snapshot(
5170 	rc_node_t *np,
5171 	uint32_t snapid,
5172 	rc_node_t *parentp,
5173 	char *old_fmri,
5174 	char *old_name)
5175 {
5176 	rc_node_t *np_orig;
5177 	rc_node_t *nnp, *prev;
5178 	rc_node_t *pp;
5179 	int rc;
5180 	size_t sz_out;
5181 	au_event_t event_id;
5182 	audit_event_data_t audit_data;
5183 
5184 	if (parentp == NULL) {
5185 		assert(old_fmri != NULL);
5186 	} else {
5187 		assert(snapid == 0);
5188 	}
5189 	assert(MUTEX_HELD(&np->rn_lock));
5190 
5191 	/* Gather the audit data. */
5192 	/*
5193 	 * ADT_smf_* symbols may not be defined in the /usr/include header
5194 	 * files on the build machine.  Thus, the following if-else will
5195 	 * not be compiled when doing native builds.
5196 	 */
5197 #ifndef	NATIVE_BUILD
5198 	if (parentp == NULL) {
5199 		event_id = ADT_smf_attach_snap;
5200 	} else {
5201 		event_id = ADT_smf_create_snap;
5202 	}
5203 #endif	/* NATIVE_BUILD */
5204 	audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5205 	audit_data.ed_snapname = malloc(REP_PROTOCOL_NAME_LEN);
5206 	if ((audit_data.ed_fmri == NULL) || (audit_data.ed_snapname == NULL)) {
5207 		(void) pthread_mutex_unlock(&np->rn_lock);
5208 		free(audit_data.ed_fmri);
5209 		free(audit_data.ed_snapname);
5210 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5211 	}
5212 	audit_data.ed_auth = NULL;
5213 	if (strlcpy(audit_data.ed_snapname, np->rn_name,
5214 	    REP_PROTOCOL_NAME_LEN) >= REP_PROTOCOL_NAME_LEN) {
5215 		abort();
5216 	}
5217 	audit_data.ed_old_fmri = old_fmri;
5218 	audit_data.ed_old_name = old_name ? old_name : "NO NAME";
5219 
5220 	if (parentp == NULL) {
5221 		/*
5222 		 * In the attach case, get the instance FMRIs of the
5223 		 * snapshots.
5224 		 */
5225 		if ((rc = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5226 		    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
5227 			(void) pthread_mutex_unlock(&np->rn_lock);
5228 			free(audit_data.ed_fmri);
5229 			free(audit_data.ed_snapname);
5230 			return (rc);
5231 		}
5232 	} else {
5233 		/*
5234 		 * Capture the FMRI of the parent if we're actually going
5235 		 * to take the snapshot.
5236 		 */
5237 		if ((rc = rc_node_get_fmri_or_fragment(parentp,
5238 		    audit_data.ed_fmri, REP_PROTOCOL_FMRI_LEN, &sz_out)) !=
5239 		    REP_PROTOCOL_SUCCESS) {
5240 			(void) pthread_mutex_unlock(&np->rn_lock);
5241 			free(audit_data.ed_fmri);
5242 			free(audit_data.ed_snapname);
5243 			return (rc);
5244 		}
5245 	}
5246 
5247 	np_orig = np;
5248 	rc_node_hold_locked(np);		/* simplifies the remainder */
5249 
5250 	(void) pthread_mutex_unlock(&np->rn_lock);
5251 	if ((rc = rc_node_modify_permission_check(&audit_data.ed_auth)) !=
5252 	    REP_PROTOCOL_SUCCESS) {
5253 		smf_audit_event(event_id, ADT_FAILURE, ADT_FAIL_VALUE_AUTH,
5254 		    &audit_data);
5255 		goto cleanout;
5256 	}
5257 	(void) pthread_mutex_lock(&np->rn_lock);
5258 
5259 	/*
5260 	 * get the latest node, holding RC_NODE_IN_TX to keep the rn_former
5261 	 * list from changing.
5262 	 */
5263 	for (;;) {
5264 		if (!(np->rn_flags & RC_NODE_OLD)) {
5265 			if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
5266 				goto again;
5267 			}
5268 			pp = rc_node_hold_parent_flag(np,
5269 			    RC_NODE_CHILDREN_CHANGING);
5270 
5271 			(void) pthread_mutex_lock(&np->rn_lock);
5272 			if (pp == NULL) {
5273 				goto again;
5274 			}
5275 			if (np->rn_flags & RC_NODE_OLD) {
5276 				rc_node_rele_flag(pp,
5277 				    RC_NODE_CHILDREN_CHANGING);
5278 				(void) pthread_mutex_unlock(&pp->rn_lock);
5279 				goto again;
5280 			}
5281 			(void) pthread_mutex_unlock(&pp->rn_lock);
5282 
5283 			if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
5284 				/*
5285 				 * Can't happen, since we're holding our
5286 				 * parent's CHILDREN_CHANGING flag...
5287 				 */
5288 				abort();
5289 			}
5290 			break;			/* everything's ready */
5291 		}
5292 again:
5293 		rc_node_rele_locked(np);
5294 		np = cache_lookup(&np_orig->rn_id);
5295 
5296 		if (np == NULL) {
5297 			rc = REP_PROTOCOL_FAIL_DELETED;
5298 			goto cleanout;
5299 		}
5300 
5301 		(void) pthread_mutex_lock(&np->rn_lock);
5302 	}
5303 
5304 	if (parentp != NULL) {
5305 		if (pp != parentp) {
5306 			rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
5307 			goto fail;
5308 		}
5309 		nnp = NULL;
5310 	} else {
5311 		/*
5312 		 * look for a former node with the snapid we need.
5313 		 */
5314 		if (np->rn_snapshot_id == snapid) {
5315 			rc_node_rele_flag(np, RC_NODE_IN_TX);
5316 			rc_node_rele_locked(np);
5317 
5318 			(void) pthread_mutex_lock(&pp->rn_lock);
5319 			rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5320 			(void) pthread_mutex_unlock(&pp->rn_lock);
5321 			rc = REP_PROTOCOL_SUCCESS;	/* nothing to do */
5322 			goto cleanout;
5323 		}
5324 
5325 		prev = np;
5326 		while ((nnp = prev->rn_former) != NULL) {
5327 			if (nnp->rn_snapshot_id == snapid) {
5328 				rc_node_hold(nnp);
5329 				break;		/* existing node with that id */
5330 			}
5331 			prev = nnp;
5332 		}
5333 	}
5334 
5335 	if (nnp == NULL) {
5336 		prev = NULL;
5337 		nnp = rc_node_alloc();
5338 		if (nnp == NULL) {
5339 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5340 			goto fail;
5341 		}
5342 
5343 		nnp->rn_id = np->rn_id;		/* structure assignment */
5344 		nnp->rn_hash = np->rn_hash;
5345 		nnp->rn_name = strdup(np->rn_name);
5346 		nnp->rn_snapshot_id = snapid;
5347 		nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
5348 
5349 		if (nnp->rn_name == NULL) {
5350 			rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
5351 			goto fail;
5352 		}
5353 	}
5354 
5355 	(void) pthread_mutex_unlock(&np->rn_lock);
5356 
5357 	rc = object_snapshot_attach(&np->rn_id, &snapid, (parentp != NULL));
5358 
5359 	if (parentp != NULL)
5360 		nnp->rn_snapshot_id = snapid;	/* fill in new snapid */
5361 	else
5362 		assert(nnp->rn_snapshot_id == snapid);
5363 
5364 	(void) pthread_mutex_lock(&np->rn_lock);
5365 	if (rc != REP_PROTOCOL_SUCCESS)
5366 		goto fail;
5367 
5368 	/*
5369 	 * fix up the former chain
5370 	 */
5371 	if (prev != NULL) {
5372 		prev->rn_former = nnp->rn_former;
5373 		(void) pthread_mutex_lock(&nnp->rn_lock);
5374 		nnp->rn_flags &= ~RC_NODE_ON_FORMER;
5375 		nnp->rn_former = NULL;
5376 		(void) pthread_mutex_unlock(&nnp->rn_lock);
5377 	}
5378 	np->rn_flags |= RC_NODE_OLD;
5379 	(void) pthread_mutex_unlock(&np->rn_lock);
5380 
5381 	/*
5382 	 * replace np with nnp
5383 	 */
5384 	rc_node_relink_child(pp, np, nnp);
5385 
5386 	rc_node_rele(np);
5387 	smf_audit_event(event_id, ADT_SUCCESS, ADT_SUCCESS, &audit_data);
5388 	rc = REP_PROTOCOL_SUCCESS;
5389 
5390 cleanout:
5391 	free(audit_data.ed_auth);
5392 	free(audit_data.ed_fmri);
5393 	free(audit_data.ed_snapname);
5394 	return (rc);
5395 
5396 fail:
5397 	rc_node_rele_flag(np, RC_NODE_IN_TX);
5398 	rc_node_rele_locked(np);
5399 	(void) pthread_mutex_lock(&pp->rn_lock);
5400 	rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
5401 	(void) pthread_mutex_unlock(&pp->rn_lock);
5402 
5403 	if (nnp != NULL) {
5404 		if (prev == NULL)
5405 			rc_node_destroy(nnp);
5406 		else
5407 			rc_node_rele(nnp);
5408 	}
5409 
5410 	free(audit_data.ed_auth);
5411 	free(audit_data.ed_fmri);
5412 	free(audit_data.ed_snapname);
5413 	return (rc);
5414 }
5415 
5416 int
5417 rc_snapshot_take_new(rc_node_ptr_t *npp, const char *svcname,
5418     const char *instname, const char *name, rc_node_ptr_t *outpp)
5419 {
5420 	rc_node_t *np;
5421 	rc_node_t *outp = NULL;
5422 	int rc, perm_rc;
5423 	char fmri[REP_PROTOCOL_FMRI_LEN];
5424 	audit_event_data_t audit_data;
5425 	size_t sz_out;
5426 
5427 	rc_node_clear(outpp, 0);
5428 
5429 	perm_rc = rc_node_modify_permission_check(&audit_data.ed_auth);
5430 
5431 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5432 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5433 		(void) pthread_mutex_unlock(&np->rn_lock);
5434 		free(audit_data.ed_auth);
5435 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5436 	}
5437 
5438 	rc = rc_check_type_name(REP_PROTOCOL_ENTITY_SNAPSHOT, name);
5439 	if (rc != REP_PROTOCOL_SUCCESS) {
5440 		(void) pthread_mutex_unlock(&np->rn_lock);
5441 		free(audit_data.ed_auth);
5442 		return (rc);
5443 	}
5444 
5445 	if (svcname != NULL && (rc =
5446 	    rc_check_type_name(REP_PROTOCOL_ENTITY_SERVICE, svcname)) !=
5447 	    REP_PROTOCOL_SUCCESS) {
5448 		(void) pthread_mutex_unlock(&np->rn_lock);
5449 		free(audit_data.ed_auth);
5450 		return (rc);
5451 	}
5452 
5453 	if (instname != NULL && (rc =
5454 	    rc_check_type_name(REP_PROTOCOL_ENTITY_INSTANCE, instname)) !=
5455 	    REP_PROTOCOL_SUCCESS) {
5456 		(void) pthread_mutex_unlock(&np->rn_lock);
5457 		free(audit_data.ed_auth);
5458 		return (rc);
5459 	}
5460 
5461 	audit_data.ed_auth = NULL;
5462 	audit_data.ed_fmri = fmri;
5463 	audit_data.ed_snapname = (char *)name;
5464 
5465 	if ((rc = rc_node_get_fmri_or_fragment(np, fmri, sizeof (fmri),
5466 	    &sz_out)) != REP_PROTOCOL_SUCCESS) {
5467 		(void) pthread_mutex_unlock(&np->rn_lock);
5468 		free(audit_data.ed_auth);
5469 		return (rc);
5470 	}
5471 	if (perm_rc != REP_PROTOCOL_SUCCESS) {
5472 		(void) pthread_mutex_unlock(&np->rn_lock);
5473 		smf_audit_event(ADT_smf_create_snap, ADT_FAILURE,
5474 		    ADT_FAIL_VALUE_AUTH, &audit_data);
5475 		free(audit_data.ed_auth);
5476 		return (perm_rc);
5477 	}
5478 
5479 	HOLD_PTR_FLAG_OR_FREE_AND_RETURN(np, npp, RC_NODE_CREATING_CHILD,
5480 	    audit_data.ed_auth);
5481 	(void) pthread_mutex_unlock(&np->rn_lock);
5482 
5483 	rc = object_snapshot_take_new(np, svcname, instname, name, &outp);
5484 
5485 	if (rc == REP_PROTOCOL_SUCCESS) {
5486 		rc_node_assign(outpp, outp);
5487 		rc_node_rele(outp);
5488 	}
5489 
5490 	(void) pthread_mutex_lock(&np->rn_lock);
5491 	rc_node_rele_flag(np, RC_NODE_CREATING_CHILD);
5492 	(void) pthread_mutex_unlock(&np->rn_lock);
5493 
5494 	if (rc == REP_PROTOCOL_SUCCESS) {
5495 		smf_audit_event(ADT_smf_create_snap, ADT_SUCCESS, ADT_SUCCESS,
5496 		    &audit_data);
5497 	}
5498 	if (audit_data.ed_auth != NULL)
5499 		free(audit_data.ed_auth);
5500 	return (rc);
5501 }
5502 
5503 int
5504 rc_snapshot_take_attach(rc_node_ptr_t *npp, rc_node_ptr_t *outpp)
5505 {
5506 	rc_node_t *np, *outp;
5507 
5508 	RC_NODE_PTR_GET_CHECK(np, npp);
5509 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE) {
5510 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5511 	}
5512 
5513 	RC_NODE_PTR_GET_CHECK_AND_LOCK(outp, outpp);
5514 	if (outp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5515 		(void) pthread_mutex_unlock(&outp->rn_lock);
5516 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5517 	}
5518 
5519 	return (rc_attach_snapshot(outp, 0, np, NULL,
5520 	    NULL));					/* drops outp's lock */
5521 }
5522 
5523 int
5524 rc_snapshot_attach(rc_node_ptr_t *npp, rc_node_ptr_t *cpp)
5525 {
5526 	rc_node_t *np;
5527 	rc_node_t *cp;
5528 	uint32_t snapid;
5529 	char old_name[REP_PROTOCOL_NAME_LEN];
5530 	int rc;
5531 	size_t sz_out;
5532 	char old_fmri[REP_PROTOCOL_FMRI_LEN];
5533 
5534 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
5535 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5536 		(void) pthread_mutex_unlock(&np->rn_lock);
5537 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5538 	}
5539 	snapid = np->rn_snapshot_id;
5540 	rc = rc_node_get_fmri_or_fragment(np, old_fmri, sizeof (old_fmri),
5541 	    &sz_out);
5542 	(void) pthread_mutex_unlock(&np->rn_lock);
5543 	if (rc != REP_PROTOCOL_SUCCESS)
5544 		return (rc);
5545 	if (np->rn_name != NULL) {
5546 		if (strlcpy(old_name, np->rn_name, sizeof (old_name)) >=
5547 		    sizeof (old_name)) {
5548 			return (REP_PROTOCOL_FAIL_TRUNCATED);
5549 		}
5550 	}
5551 
5552 	RC_NODE_PTR_GET_CHECK_AND_LOCK(cp, cpp);
5553 	if (cp->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT) {
5554 		(void) pthread_mutex_unlock(&cp->rn_lock);
5555 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5556 	}
5557 
5558 	rc = rc_attach_snapshot(cp, snapid, NULL,
5559 	    old_fmri, old_name);			/* drops cp's lock */
5560 	return (rc);
5561 }
5562 
5563 /*
5564  * If the pgname property group under ent has type pgtype, and it has a
5565  * propname property with type ptype, return _SUCCESS.  If pgtype is NULL,
5566  * it is not checked.  If ent is not a service node, we will return _SUCCESS if
5567  * a property meeting the requirements exists in either the instance or its
5568  * parent.
5569  *
5570  * Returns
5571  *   _SUCCESS - see above
5572  *   _DELETED - ent or one of its ancestors was deleted
5573  *   _NO_RESOURCES - no resources
5574  *   _NOT_FOUND - no matching property was found
5575  */
5576 static int
5577 rc_svc_prop_exists(rc_node_t *ent, const char *pgname, const char *pgtype,
5578     const char *propname, rep_protocol_value_type_t ptype)
5579 {
5580 	int ret;
5581 	rc_node_t *pg = NULL, *spg = NULL, *svc, *prop;
5582 
5583 	assert(!MUTEX_HELD(&ent->rn_lock));
5584 
5585 	(void) pthread_mutex_lock(&ent->rn_lock);
5586 	ret = rc_node_find_named_child(ent, pgname,
5587 	    REP_PROTOCOL_ENTITY_PROPERTYGRP, &pg);
5588 	(void) pthread_mutex_unlock(&ent->rn_lock);
5589 
5590 	switch (ret) {
5591 	case REP_PROTOCOL_SUCCESS:
5592 		break;
5593 
5594 	case REP_PROTOCOL_FAIL_DELETED:
5595 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5596 		return (ret);
5597 
5598 	default:
5599 		bad_error("rc_node_find_named_child", ret);
5600 	}
5601 
5602 	if (ent->rn_id.rl_type != REP_PROTOCOL_ENTITY_SERVICE) {
5603 		ret = rc_node_find_ancestor(ent, REP_PROTOCOL_ENTITY_SERVICE,
5604 		    &svc);
5605 		if (ret != REP_PROTOCOL_SUCCESS) {
5606 			assert(ret == REP_PROTOCOL_FAIL_DELETED);
5607 			if (pg != NULL)
5608 				rc_node_rele(pg);
5609 			return (ret);
5610 		}
5611 		assert(svc->rn_id.rl_type == REP_PROTOCOL_ENTITY_SERVICE);
5612 
5613 		(void) pthread_mutex_lock(&svc->rn_lock);
5614 		ret = rc_node_find_named_child(svc, pgname,
5615 		    REP_PROTOCOL_ENTITY_PROPERTYGRP, &spg);
5616 		(void) pthread_mutex_unlock(&svc->rn_lock);
5617 
5618 		rc_node_rele(svc);
5619 
5620 		switch (ret) {
5621 		case REP_PROTOCOL_SUCCESS:
5622 			break;
5623 
5624 		case REP_PROTOCOL_FAIL_DELETED:
5625 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
5626 			if (pg != NULL)
5627 				rc_node_rele(pg);
5628 			return (ret);
5629 
5630 		default:
5631 			bad_error("rc_node_find_named_child", ret);
5632 		}
5633 	}
5634 
5635 	if (pg != NULL &&
5636 	    pgtype != NULL && strcmp(pg->rn_type, pgtype) != 0) {
5637 		rc_node_rele(pg);
5638 		pg = NULL;
5639 	}
5640 
5641 	if (spg != NULL &&
5642 	    pgtype != NULL && strcmp(spg->rn_type, pgtype) != 0) {
5643 		rc_node_rele(spg);
5644 		spg = NULL;
5645 	}
5646 
5647 	if (pg == NULL) {
5648 		if (spg == NULL)
5649 			return (REP_PROTOCOL_FAIL_NOT_FOUND);
5650 		pg = spg;
5651 		spg = NULL;
5652 	}
5653 
5654 	/*
5655 	 * At this point, pg is non-NULL, and is a property group node of the
5656 	 * correct type.  spg, if non-NULL, is also a property group node of
5657 	 * the correct type.  Check for the property in pg first, then spg
5658 	 * (if applicable).
5659 	 */
5660 	(void) pthread_mutex_lock(&pg->rn_lock);
5661 	ret = rc_node_find_named_child(pg, propname,
5662 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5663 	(void) pthread_mutex_unlock(&pg->rn_lock);
5664 	rc_node_rele(pg);
5665 	switch (ret) {
5666 	case REP_PROTOCOL_SUCCESS:
5667 		if (prop != NULL) {
5668 			if (prop->rn_valtype == ptype) {
5669 				rc_node_rele(prop);
5670 				if (spg != NULL)
5671 					rc_node_rele(spg);
5672 				return (REP_PROTOCOL_SUCCESS);
5673 			}
5674 			rc_node_rele(prop);
5675 		}
5676 		break;
5677 
5678 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5679 		if (spg != NULL)
5680 			rc_node_rele(spg);
5681 		return (ret);
5682 
5683 	case REP_PROTOCOL_FAIL_DELETED:
5684 		break;
5685 
5686 	default:
5687 		bad_error("rc_node_find_named_child", ret);
5688 	}
5689 
5690 	if (spg == NULL)
5691 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5692 
5693 	pg = spg;
5694 
5695 	(void) pthread_mutex_lock(&pg->rn_lock);
5696 	ret = rc_node_find_named_child(pg, propname,
5697 	    REP_PROTOCOL_ENTITY_PROPERTY, &prop);
5698 	(void) pthread_mutex_unlock(&pg->rn_lock);
5699 	rc_node_rele(pg);
5700 	switch (ret) {
5701 	case REP_PROTOCOL_SUCCESS:
5702 		if (prop != NULL) {
5703 			if (prop->rn_valtype == ptype) {
5704 				rc_node_rele(prop);
5705 				return (REP_PROTOCOL_SUCCESS);
5706 			}
5707 			rc_node_rele(prop);
5708 		}
5709 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5710 
5711 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5712 		return (ret);
5713 
5714 	case REP_PROTOCOL_FAIL_DELETED:
5715 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
5716 
5717 	default:
5718 		bad_error("rc_node_find_named_child", ret);
5719 	}
5720 
5721 	return (REP_PROTOCOL_SUCCESS);
5722 }
5723 
5724 /*
5725  * Given a property group node, returns _SUCCESS if the property group may
5726  * be read without any special authorization.
5727  *
5728  * Fails with:
5729  *   _DELETED - np or an ancestor node was deleted
5730  *   _TYPE_MISMATCH - np does not refer to a property group
5731  *   _NO_RESOURCES - no resources
5732  *   _PERMISSION_DENIED - authorization is required
5733  */
5734 static int
5735 rc_node_pg_check_read_protect(rc_node_t *np)
5736 {
5737 	int ret;
5738 	rc_node_t *ent;
5739 
5740 	assert(!MUTEX_HELD(&np->rn_lock));
5741 
5742 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP)
5743 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5744 
5745 	if (strcmp(np->rn_type, SCF_GROUP_FRAMEWORK) == 0 ||
5746 	    strcmp(np->rn_type, SCF_GROUP_DEPENDENCY) == 0 ||
5747 	    strcmp(np->rn_type, SCF_GROUP_METHOD) == 0)
5748 		return (REP_PROTOCOL_SUCCESS);
5749 
5750 	ret = rc_node_parent(np, &ent);
5751 
5752 	if (ret != REP_PROTOCOL_SUCCESS)
5753 		return (ret);
5754 
5755 	ret = rc_svc_prop_exists(ent, np->rn_name, np->rn_type,
5756 	    AUTH_PROP_READ, REP_PROTOCOL_TYPE_STRING);
5757 
5758 	rc_node_rele(ent);
5759 
5760 	switch (ret) {
5761 	case REP_PROTOCOL_FAIL_NOT_FOUND:
5762 		return (REP_PROTOCOL_SUCCESS);
5763 	case REP_PROTOCOL_SUCCESS:
5764 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5765 	case REP_PROTOCOL_FAIL_DELETED:
5766 	case REP_PROTOCOL_FAIL_NO_RESOURCES:
5767 		return (ret);
5768 	default:
5769 		bad_error("rc_svc_prop_exists", ret);
5770 	}
5771 
5772 	return (REP_PROTOCOL_SUCCESS);
5773 }
5774 
5775 /*
5776  * Fails with
5777  *   _DELETED - np's node or parent has been deleted
5778  *   _TYPE_MISMATCH - np's node is not a property
5779  *   _NO_RESOURCES - out of memory
5780  *   _PERMISSION_DENIED - no authorization to read this property's value(s)
5781  *   _BAD_REQUEST - np's parent is not a property group
5782  */
5783 static int
5784 rc_node_property_may_read(rc_node_t *np)
5785 {
5786 	int ret, granted = 0;
5787 	rc_node_t *pgp;
5788 	permcheck_t *pcp;
5789 	audit_event_data_t audit_data;
5790 	size_t sz_out;
5791 
5792 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
5793 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
5794 
5795 	if (client_is_privileged())
5796 		return (REP_PROTOCOL_SUCCESS);
5797 
5798 #ifdef NATIVE_BUILD
5799 	return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
5800 #else
5801 	ret = rc_node_parent(np, &pgp);
5802 
5803 	if (ret != REP_PROTOCOL_SUCCESS)
5804 		return (ret);
5805 
5806 	if (pgp->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
5807 		rc_node_rele(pgp);
5808 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
5809 	}
5810 
5811 	ret = rc_node_pg_check_read_protect(pgp);
5812 
5813 	if (ret != REP_PROTOCOL_FAIL_PERMISSION_DENIED) {
5814 		rc_node_rele(pgp);
5815 		return (ret);
5816 	}
5817 
5818 	pcp = pc_create();
5819 
5820 	if (pcp == NULL) {
5821 		rc_node_rele(pgp);
5822 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5823 	}
5824 
5825 	ret = perm_add_enabling(pcp, AUTH_MODIFY);
5826 
5827 	if (ret == REP_PROTOCOL_SUCCESS) {
5828 		const char * const auth =
5829 		    perm_auth_for_pgtype(pgp->rn_type);
5830 
5831 		if (auth != NULL)
5832 			ret = perm_add_enabling(pcp, auth);
5833 	}
5834 
5835 	/*
5836 	 * If you are permitted to modify the value, you may also
5837 	 * read it.  This means that both the MODIFY and VALUE
5838 	 * authorizations are acceptable.  We don't allow requests
5839 	 * for AUTH_PROP_MODIFY if all you have is $AUTH_PROP_VALUE,
5840 	 * however, to avoid leaking possibly valuable information
5841 	 * since such a user can't change the property anyway.
5842 	 */
5843 	if (ret == REP_PROTOCOL_SUCCESS)
5844 		ret = perm_add_enabling_values(pcp, pgp,
5845 		    AUTH_PROP_MODIFY);
5846 
5847 	if (ret == REP_PROTOCOL_SUCCESS &&
5848 	    strcmp(np->rn_name, AUTH_PROP_MODIFY) != 0)
5849 		ret = perm_add_enabling_values(pcp, pgp,
5850 		    AUTH_PROP_VALUE);
5851 
5852 	if (ret == REP_PROTOCOL_SUCCESS)
5853 		ret = perm_add_enabling_values(pcp, pgp,
5854 		    AUTH_PROP_READ);
5855 
5856 	rc_node_rele(pgp);
5857 
5858 	if (ret == REP_PROTOCOL_SUCCESS) {
5859 		granted = perm_granted(pcp);
5860 		if (granted < 0)
5861 			ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5862 	}
5863 	if (ret == REP_PROTOCOL_SUCCESS) {
5864 		/* Generate a read_prop audit event. */
5865 		audit_data.ed_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
5866 		if (audit_data.ed_fmri == NULL)
5867 			ret = REP_PROTOCOL_FAIL_NO_RESOURCES;
5868 	}
5869 	ret = rc_node_get_fmri_or_fragment(np, audit_data.ed_fmri,
5870 	    REP_PROTOCOL_FMRI_LEN, &sz_out);
5871 	assert(ret == REP_PROTOCOL_SUCCESS);
5872 	if (ret == REP_PROTOCOL_SUCCESS) {
5873 		int status;
5874 		int ret_value;
5875 
5876 		if (granted == 0) {
5877 			status = ADT_FAILURE;
5878 			ret_value = ADT_FAIL_VALUE_AUTH;
5879 		} else {
5880 			status = ADT_SUCCESS;
5881 			ret_value = ADT_SUCCESS;
5882 		}
5883 		audit_data.ed_auth = pcp->pc_auth_string;
5884 		smf_audit_event(ADT_smf_read_prop,
5885 		    status, ret_value, &audit_data);
5886 	}
5887 	free(audit_data.ed_fmri);
5888 
5889 	pc_free(pcp);
5890 
5891 	if (ret == REP_PROTOCOL_SUCCESS && !granted)
5892 		ret = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
5893 
5894 	return (ret);
5895 #endif	/* NATIVE_BUILD */
5896 }
5897 
5898 /*
5899  * Iteration
5900  */
5901 static int
5902 rc_iter_filter_name(rc_node_t *np, void *s)
5903 {
5904 	const char *name = s;
5905 
5906 	return (strcmp(np->rn_name, name) == 0);
5907 }
5908 
5909 static int
5910 rc_iter_filter_type(rc_node_t *np, void *s)
5911 {
5912 	const char *type = s;
5913 
5914 	return (np->rn_type != NULL && strcmp(np->rn_type, type) == 0);
5915 }
5916 
5917 /*ARGSUSED*/
5918 static int
5919 rc_iter_null_filter(rc_node_t *np, void *s)
5920 {
5921 	return (1);
5922 }
5923 
5924 /*
5925  * Allocate & initialize an rc_node_iter_t structure.  Essentially, ensure
5926  * np->rn_children is populated and call uu_list_walk_start(np->rn_children).
5927  * If successful, leaves a hold on np & increments np->rn_other_refs
5928  *
5929  * If composed is true, then set up for iteration across the top level of np's
5930  * composition chain.  If successful, leaves a hold on np and increments
5931  * rn_other_refs for the top level of np's composition chain.
5932  *
5933  * Fails with
5934  *   _NO_RESOURCES
5935  *   _INVALID_TYPE
5936  *   _TYPE_MISMATCH - np cannot carry type children
5937  *   _DELETED
5938  */
5939 static int
5940 rc_iter_create(rc_node_iter_t **resp, rc_node_t *np, uint32_t type,
5941     rc_iter_filter_func *filter, void *arg, boolean_t composed)
5942 {
5943 	rc_node_iter_t *nip;
5944 	int res;
5945 
5946 	assert(*resp == NULL);
5947 
5948 	nip = uu_zalloc(sizeof (*nip));
5949 	if (nip == NULL)
5950 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5951 
5952 	/* np is held by the client's rc_node_ptr_t */
5953 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP)
5954 		composed = 1;
5955 
5956 	if (!composed) {
5957 		(void) pthread_mutex_lock(&np->rn_lock);
5958 
5959 		if ((res = rc_node_fill_children(np, type)) !=
5960 		    REP_PROTOCOL_SUCCESS) {
5961 			(void) pthread_mutex_unlock(&np->rn_lock);
5962 			uu_free(nip);
5963 			return (res);
5964 		}
5965 
5966 		nip->rni_clevel = -1;
5967 
5968 		nip->rni_iter = uu_list_walk_start(np->rn_children,
5969 		    UU_WALK_ROBUST);
5970 		if (nip->rni_iter != NULL) {
5971 			nip->rni_iter_node = np;
5972 			rc_node_hold_other(np);
5973 		} else {
5974 			(void) pthread_mutex_unlock(&np->rn_lock);
5975 			uu_free(nip);
5976 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
5977 		}
5978 		(void) pthread_mutex_unlock(&np->rn_lock);
5979 	} else {
5980 		rc_node_t *ent;
5981 
5982 		if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_SNAPSHOT) {
5983 			/* rn_cchain isn't valid until children are loaded. */
5984 			(void) pthread_mutex_lock(&np->rn_lock);
5985 			res = rc_node_fill_children(np,
5986 			    REP_PROTOCOL_ENTITY_SNAPLEVEL);
5987 			(void) pthread_mutex_unlock(&np->rn_lock);
5988 			if (res != REP_PROTOCOL_SUCCESS) {
5989 				uu_free(nip);
5990 				return (res);
5991 			}
5992 
5993 			/* Check for an empty snapshot. */
5994 			if (np->rn_cchain[0] == NULL)
5995 				goto empty;
5996 		}
5997 
5998 		/* Start at the top of the composition chain. */
5999 		for (nip->rni_clevel = 0; ; ++nip->rni_clevel) {
6000 			if (nip->rni_clevel >= COMPOSITION_DEPTH) {
6001 				/* Empty composition chain. */
6002 empty:
6003 				nip->rni_clevel = -1;
6004 				nip->rni_iter = NULL;
6005 				/* It's ok, iter_next() will return _DONE. */
6006 				goto out;
6007 			}
6008 
6009 			ent = np->rn_cchain[nip->rni_clevel];
6010 			assert(ent != NULL);
6011 
6012 			if (rc_node_check_and_lock(ent) == REP_PROTOCOL_SUCCESS)
6013 				break;
6014 
6015 			/* Someone deleted it, so try the next one. */
6016 		}
6017 
6018 		res = rc_node_fill_children(ent, type);
6019 
6020 		if (res == REP_PROTOCOL_SUCCESS) {
6021 			nip->rni_iter = uu_list_walk_start(ent->rn_children,
6022 			    UU_WALK_ROBUST);
6023 
6024 			if (nip->rni_iter == NULL)
6025 				res = REP_PROTOCOL_FAIL_NO_RESOURCES;
6026 			else {
6027 				nip->rni_iter_node = ent;
6028 				rc_node_hold_other(ent);
6029 			}
6030 		}
6031 
6032 		if (res != REP_PROTOCOL_SUCCESS) {
6033 			(void) pthread_mutex_unlock(&ent->rn_lock);
6034 			uu_free(nip);
6035 			return (res);
6036 		}
6037 
6038 		(void) pthread_mutex_unlock(&ent->rn_lock);
6039 	}
6040 
6041 out:
6042 	rc_node_hold(np);		/* released by rc_iter_end() */
6043 	nip->rni_parent = np;
6044 	nip->rni_type = type;
6045 	nip->rni_filter = (filter != NULL)? filter : rc_iter_null_filter;
6046 	nip->rni_filter_arg = arg;
6047 	*resp = nip;
6048 	return (REP_PROTOCOL_SUCCESS);
6049 }
6050 
6051 static void
6052 rc_iter_end(rc_node_iter_t *iter)
6053 {
6054 	rc_node_t *np = iter->rni_parent;
6055 
6056 	if (iter->rni_clevel >= 0)
6057 		np = np->rn_cchain[iter->rni_clevel];
6058 
6059 	assert(MUTEX_HELD(&np->rn_lock));
6060 	if (iter->rni_iter != NULL)
6061 		uu_list_walk_end(iter->rni_iter);
6062 	iter->rni_iter = NULL;
6063 
6064 	(void) pthread_mutex_unlock(&np->rn_lock);
6065 	rc_node_rele(iter->rni_parent);
6066 	if (iter->rni_iter_node != NULL)
6067 		rc_node_rele_other(iter->rni_iter_node);
6068 }
6069 
6070 /*
6071  * Fails with
6072  *   _NOT_SET - npp is reset
6073  *   _DELETED - npp's node has been deleted
6074  *   _NOT_APPLICABLE - npp's node is not a property
6075  *   _NO_RESOURCES - out of memory
6076  */
6077 static int
6078 rc_node_setup_value_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp)
6079 {
6080 	rc_node_t *np;
6081 
6082 	rc_node_iter_t *nip;
6083 
6084 	assert(*iterp == NULL);
6085 
6086 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6087 
6088 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6089 		(void) pthread_mutex_unlock(&np->rn_lock);
6090 		return (REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6091 	}
6092 
6093 	nip = uu_zalloc(sizeof (*nip));
6094 	if (nip == NULL) {
6095 		(void) pthread_mutex_unlock(&np->rn_lock);
6096 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6097 	}
6098 
6099 	nip->rni_parent = np;
6100 	nip->rni_iter = NULL;
6101 	nip->rni_clevel = -1;
6102 	nip->rni_type = REP_PROTOCOL_ENTITY_VALUE;
6103 	nip->rni_offset = 0;
6104 	nip->rni_last_offset = 0;
6105 
6106 	rc_node_hold_locked(np);
6107 
6108 	*iterp = nip;
6109 	(void) pthread_mutex_unlock(&np->rn_lock);
6110 
6111 	return (REP_PROTOCOL_SUCCESS);
6112 }
6113 
6114 /*
6115  * Returns:
6116  *   _NO_RESOURCES - out of memory
6117  *   _NOT_SET - npp is reset
6118  *   _DELETED - npp's node has been deleted
6119  *   _TYPE_MISMATCH - npp's node is not a property
6120  *   _NOT_FOUND - property has no values
6121  *   _TRUNCATED - property has >1 values (first is written into out)
6122  *   _SUCCESS - property has 1 value (which is written into out)
6123  *   _PERMISSION_DENIED - no authorization to read property value(s)
6124  *
6125  * We shorten *sz_out to not include anything after the final '\0'.
6126  */
6127 int
6128 rc_node_get_property_value(rc_node_ptr_t *npp,
6129     struct rep_protocol_value_response *out, size_t *sz_out)
6130 {
6131 	rc_node_t *np;
6132 	size_t w;
6133 	int ret;
6134 
6135 	assert(*sz_out == sizeof (*out));
6136 
6137 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6138 	ret = rc_node_property_may_read(np);
6139 	rc_node_rele(np);
6140 
6141 	if (ret != REP_PROTOCOL_SUCCESS)
6142 		return (ret);
6143 
6144 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
6145 
6146 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY) {
6147 		(void) pthread_mutex_unlock(&np->rn_lock);
6148 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6149 	}
6150 
6151 	if (np->rn_values_size == 0) {
6152 		(void) pthread_mutex_unlock(&np->rn_lock);
6153 		return (REP_PROTOCOL_FAIL_NOT_FOUND);
6154 	}
6155 	out->rpr_type = np->rn_valtype;
6156 	w = strlcpy(out->rpr_value, &np->rn_values[0],
6157 	    sizeof (out->rpr_value));
6158 
6159 	if (w >= sizeof (out->rpr_value))
6160 		backend_panic("value too large");
6161 
6162 	*sz_out = offsetof(struct rep_protocol_value_response,
6163 	    rpr_value[w + 1]);
6164 
6165 	ret = (np->rn_values_count != 1)? REP_PROTOCOL_FAIL_TRUNCATED :
6166 	    REP_PROTOCOL_SUCCESS;
6167 	(void) pthread_mutex_unlock(&np->rn_lock);
6168 	return (ret);
6169 }
6170 
6171 int
6172 rc_iter_next_value(rc_node_iter_t *iter,
6173     struct rep_protocol_value_response *out, size_t *sz_out, int repeat)
6174 {
6175 	rc_node_t *np = iter->rni_parent;
6176 	const char *vals;
6177 	size_t len;
6178 
6179 	size_t start;
6180 	size_t w;
6181 	int ret;
6182 
6183 	rep_protocol_responseid_t result;
6184 
6185 	assert(*sz_out == sizeof (*out));
6186 
6187 	(void) memset(out, '\0', *sz_out);
6188 
6189 	if (iter->rni_type != REP_PROTOCOL_ENTITY_VALUE)
6190 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6191 
6192 	RC_NODE_CHECK(np);
6193 	ret = rc_node_property_may_read(np);
6194 
6195 	if (ret != REP_PROTOCOL_SUCCESS)
6196 		return (ret);
6197 
6198 	RC_NODE_CHECK_AND_LOCK(np);
6199 
6200 	vals = np->rn_values;
6201 	len = np->rn_values_size;
6202 
6203 	out->rpr_type = np->rn_valtype;
6204 
6205 	start = (repeat)? iter->rni_last_offset : iter->rni_offset;
6206 
6207 	if (len == 0 || start >= len) {
6208 		result = REP_PROTOCOL_DONE;
6209 		*sz_out -= sizeof (out->rpr_value);
6210 	} else {
6211 		w = strlcpy(out->rpr_value, &vals[start],
6212 		    sizeof (out->rpr_value));
6213 
6214 		if (w >= sizeof (out->rpr_value))
6215 			backend_panic("value too large");
6216 
6217 		*sz_out = offsetof(struct rep_protocol_value_response,
6218 		    rpr_value[w + 1]);
6219 
6220 		/*
6221 		 * update the offsets if we're not repeating
6222 		 */
6223 		if (!repeat) {
6224 			iter->rni_last_offset = iter->rni_offset;
6225 			iter->rni_offset += (w + 1);
6226 		}
6227 
6228 		result = REP_PROTOCOL_SUCCESS;
6229 	}
6230 
6231 	(void) pthread_mutex_unlock(&np->rn_lock);
6232 	return (result);
6233 }
6234 
6235 /*
6236  * Entry point for ITER_START from client.c.  Validate the arguments & call
6237  * rc_iter_create().
6238  *
6239  * Fails with
6240  *   _NOT_SET
6241  *   _DELETED
6242  *   _TYPE_MISMATCH - np cannot carry type children
6243  *   _BAD_REQUEST - flags is invalid
6244  *		    pattern is invalid
6245  *   _NO_RESOURCES
6246  *   _INVALID_TYPE
6247  *   _TYPE_MISMATCH - *npp cannot have children of type
6248  *   _BACKEND_ACCESS
6249  */
6250 int
6251 rc_node_setup_iter(rc_node_ptr_t *npp, rc_node_iter_t **iterp,
6252     uint32_t type, uint32_t flags, const char *pattern)
6253 {
6254 	rc_node_t *np;
6255 	rc_iter_filter_func *f = NULL;
6256 	int rc;
6257 
6258 	RC_NODE_PTR_GET_CHECK(np, npp);
6259 
6260 	if (pattern != NULL && pattern[0] == '\0')
6261 		pattern = NULL;
6262 
6263 	if (type == REP_PROTOCOL_ENTITY_VALUE) {
6264 		if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTY)
6265 			return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6266 		if (flags != RP_ITER_START_ALL || pattern != NULL)
6267 			return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6268 
6269 		rc = rc_node_setup_value_iter(npp, iterp);
6270 		assert(rc != REP_PROTOCOL_FAIL_NOT_APPLICABLE);
6271 		return (rc);
6272 	}
6273 
6274 	if ((rc = rc_check_parent_child(np->rn_id.rl_type, type)) !=
6275 	    REP_PROTOCOL_SUCCESS)
6276 		return (rc);
6277 
6278 	if (((flags & RP_ITER_START_FILT_MASK) == RP_ITER_START_ALL) ^
6279 	    (pattern == NULL))
6280 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6281 
6282 	/* Composition only works for instances & snapshots. */
6283 	if ((flags & RP_ITER_START_COMPOSED) &&
6284 	    (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_INSTANCE &&
6285 	    np->rn_id.rl_type != REP_PROTOCOL_ENTITY_SNAPSHOT))
6286 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6287 
6288 	if (pattern != NULL) {
6289 		if ((rc = rc_check_type_name(type, pattern)) !=
6290 		    REP_PROTOCOL_SUCCESS)
6291 			return (rc);
6292 		pattern = strdup(pattern);
6293 		if (pattern == NULL)
6294 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6295 	}
6296 
6297 	switch (flags & RP_ITER_START_FILT_MASK) {
6298 	case RP_ITER_START_ALL:
6299 		f = NULL;
6300 		break;
6301 	case RP_ITER_START_EXACT:
6302 		f = rc_iter_filter_name;
6303 		break;
6304 	case RP_ITER_START_PGTYPE:
6305 		if (type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6306 			free((void *)pattern);
6307 			return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6308 		}
6309 		f = rc_iter_filter_type;
6310 		break;
6311 	default:
6312 		free((void *)pattern);
6313 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6314 	}
6315 
6316 	rc = rc_iter_create(iterp, np, type, f, (void *)pattern,
6317 	    flags & RP_ITER_START_COMPOSED);
6318 	if (rc != REP_PROTOCOL_SUCCESS && pattern != NULL)
6319 		free((void *)pattern);
6320 
6321 	return (rc);
6322 }
6323 
6324 /*
6325  * Do uu_list_walk_next(iter->rni_iter) until we find a child which matches
6326  * the filter.
6327  * For composed iterators, then check to see if there's an overlapping entity
6328  * (see embedded comments).  If we reach the end of the list, start over at
6329  * the next level.
6330  *
6331  * Returns
6332  *   _BAD_REQUEST - iter walks values
6333  *   _TYPE_MISMATCH - iter does not walk type entities
6334  *   _DELETED - parent was deleted
6335  *   _NO_RESOURCES
6336  *   _INVALID_TYPE - type is invalid
6337  *   _DONE
6338  *   _SUCCESS
6339  *
6340  * For composed property group iterators, can also return
6341  *   _TYPE_MISMATCH - parent cannot have type children
6342  */
6343 int
6344 rc_iter_next(rc_node_iter_t *iter, rc_node_ptr_t *out, uint32_t type)
6345 {
6346 	rc_node_t *np = iter->rni_parent;
6347 	rc_node_t *res;
6348 	int rc;
6349 
6350 	if (iter->rni_type == REP_PROTOCOL_ENTITY_VALUE)
6351 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6352 
6353 	if (iter->rni_iter == NULL) {
6354 		rc_node_clear(out, 0);
6355 		return (REP_PROTOCOL_DONE);
6356 	}
6357 
6358 	if (iter->rni_type != type) {
6359 		rc_node_clear(out, 0);
6360 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6361 	}
6362 
6363 	(void) pthread_mutex_lock(&np->rn_lock);  /* held by _iter_create() */
6364 
6365 	if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6366 		(void) pthread_mutex_unlock(&np->rn_lock);
6367 		rc_node_clear(out, 1);
6368 		return (REP_PROTOCOL_FAIL_DELETED);
6369 	}
6370 
6371 	if (iter->rni_clevel >= 0) {
6372 		/* Composed iterator.  Iterate over appropriate level. */
6373 		(void) pthread_mutex_unlock(&np->rn_lock);
6374 		np = np->rn_cchain[iter->rni_clevel];
6375 		/*
6376 		 * If iter->rni_parent is an instance or a snapshot, np must
6377 		 * be valid since iter holds iter->rni_parent & possible
6378 		 * levels (service, instance, snaplevel) cannot be destroyed
6379 		 * while rni_parent is held.  If iter->rni_parent is
6380 		 * a composed property group then rc_node_setup_cpg() put
6381 		 * a hold on np.
6382 		 */
6383 
6384 		(void) pthread_mutex_lock(&np->rn_lock);
6385 
6386 		if (!rc_node_wait_flag(np, RC_NODE_CHILDREN_CHANGING)) {
6387 			(void) pthread_mutex_unlock(&np->rn_lock);
6388 			rc_node_clear(out, 1);
6389 			return (REP_PROTOCOL_FAIL_DELETED);
6390 		}
6391 	}
6392 
6393 	assert(np->rn_flags & RC_NODE_HAS_CHILDREN);
6394 
6395 	for (;;) {
6396 		res = uu_list_walk_next(iter->rni_iter);
6397 		if (res == NULL) {
6398 			rc_node_t *parent = iter->rni_parent;
6399 
6400 #if COMPOSITION_DEPTH == 2
6401 			if (iter->rni_clevel < 0 || iter->rni_clevel == 1) {
6402 				/* release walker and lock */
6403 				rc_iter_end(iter);
6404 				break;
6405 			}
6406 
6407 			/* Stop walking current level. */
6408 			uu_list_walk_end(iter->rni_iter);
6409 			iter->rni_iter = NULL;
6410 			(void) pthread_mutex_unlock(&np->rn_lock);
6411 			rc_node_rele_other(iter->rni_iter_node);
6412 			iter->rni_iter_node = NULL;
6413 
6414 			/* Start walking next level. */
6415 			++iter->rni_clevel;
6416 			np = parent->rn_cchain[iter->rni_clevel];
6417 			assert(np != NULL);
6418 #else
6419 #error This code must be updated.
6420 #endif
6421 
6422 			(void) pthread_mutex_lock(&np->rn_lock);
6423 
6424 			rc = rc_node_fill_children(np, iter->rni_type);
6425 
6426 			if (rc == REP_PROTOCOL_SUCCESS) {
6427 				iter->rni_iter =
6428 				    uu_list_walk_start(np->rn_children,
6429 				    UU_WALK_ROBUST);
6430 
6431 				if (iter->rni_iter == NULL)
6432 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
6433 				else {
6434 					iter->rni_iter_node = np;
6435 					rc_node_hold_other(np);
6436 				}
6437 			}
6438 
6439 			if (rc != REP_PROTOCOL_SUCCESS) {
6440 				(void) pthread_mutex_unlock(&np->rn_lock);
6441 				rc_node_clear(out, 0);
6442 				return (rc);
6443 			}
6444 
6445 			continue;
6446 		}
6447 
6448 		if (res->rn_id.rl_type != type ||
6449 		    !iter->rni_filter(res, iter->rni_filter_arg))
6450 			continue;
6451 
6452 		/*
6453 		 * If we're composed and not at the top level, check to see if
6454 		 * there's an entity at a higher level with the same name.  If
6455 		 * so, skip this one.
6456 		 */
6457 		if (iter->rni_clevel > 0) {
6458 			rc_node_t *ent = iter->rni_parent->rn_cchain[0];
6459 			rc_node_t *pg;
6460 
6461 #if COMPOSITION_DEPTH == 2
6462 			assert(iter->rni_clevel == 1);
6463 
6464 			(void) pthread_mutex_unlock(&np->rn_lock);
6465 			(void) pthread_mutex_lock(&ent->rn_lock);
6466 			rc = rc_node_find_named_child(ent, res->rn_name, type,
6467 			    &pg);
6468 			if (rc == REP_PROTOCOL_SUCCESS && pg != NULL)
6469 				rc_node_rele(pg);
6470 			(void) pthread_mutex_unlock(&ent->rn_lock);
6471 			if (rc != REP_PROTOCOL_SUCCESS) {
6472 				rc_node_clear(out, 0);
6473 				return (rc);
6474 			}
6475 			(void) pthread_mutex_lock(&np->rn_lock);
6476 
6477 			/* Make sure np isn't being deleted all of a sudden. */
6478 			if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6479 				(void) pthread_mutex_unlock(&np->rn_lock);
6480 				rc_node_clear(out, 1);
6481 				return (REP_PROTOCOL_FAIL_DELETED);
6482 			}
6483 
6484 			if (pg != NULL)
6485 				/* Keep going. */
6486 				continue;
6487 #else
6488 #error This code must be updated.
6489 #endif
6490 		}
6491 
6492 		/*
6493 		 * If we're composed, iterating over property groups, and not
6494 		 * at the bottom level, check to see if there's a pg at lower
6495 		 * level with the same name.  If so, return a cpg.
6496 		 */
6497 		if (iter->rni_clevel >= 0 &&
6498 		    type == REP_PROTOCOL_ENTITY_PROPERTYGRP &&
6499 		    iter->rni_clevel < COMPOSITION_DEPTH - 1) {
6500 #if COMPOSITION_DEPTH == 2
6501 			rc_node_t *pg;
6502 			rc_node_t *ent = iter->rni_parent->rn_cchain[1];
6503 
6504 			rc_node_hold(res);	/* While we drop np->rn_lock */
6505 
6506 			(void) pthread_mutex_unlock(&np->rn_lock);
6507 			(void) pthread_mutex_lock(&ent->rn_lock);
6508 			rc = rc_node_find_named_child(ent, res->rn_name, type,
6509 			    &pg);
6510 			/* holds pg if not NULL */
6511 			(void) pthread_mutex_unlock(&ent->rn_lock);
6512 			if (rc != REP_PROTOCOL_SUCCESS) {
6513 				rc_node_rele(res);
6514 				rc_node_clear(out, 0);
6515 				return (rc);
6516 			}
6517 
6518 			(void) pthread_mutex_lock(&np->rn_lock);
6519 			if (!rc_node_wait_flag(np, RC_NODE_DYING)) {
6520 				(void) pthread_mutex_unlock(&np->rn_lock);
6521 				rc_node_rele(res);
6522 				if (pg != NULL)
6523 					rc_node_rele(pg);
6524 				rc_node_clear(out, 1);
6525 				return (REP_PROTOCOL_FAIL_DELETED);
6526 			}
6527 
6528 			if (pg == NULL) {
6529 				rc_node_rele(res);
6530 			} else {
6531 				rc_node_t *cpg;
6532 
6533 				/* Keep res held for rc_node_setup_cpg(). */
6534 
6535 				cpg = rc_node_alloc();
6536 				if (cpg == NULL) {
6537 					(void) pthread_mutex_unlock(
6538 					    &np->rn_lock);
6539 					rc_node_rele(res);
6540 					rc_node_rele(pg);
6541 					rc_node_clear(out, 0);
6542 					return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6543 				}
6544 
6545 				switch (rc_node_setup_cpg(cpg, res, pg)) {
6546 				case REP_PROTOCOL_SUCCESS:
6547 					res = cpg;
6548 					break;
6549 
6550 				case REP_PROTOCOL_FAIL_TYPE_MISMATCH:
6551 					/* Nevermind. */
6552 					rc_node_destroy(cpg);
6553 					rc_node_rele(pg);
6554 					rc_node_rele(res);
6555 					break;
6556 
6557 				case REP_PROTOCOL_FAIL_NO_RESOURCES:
6558 					rc_node_destroy(cpg);
6559 					(void) pthread_mutex_unlock(
6560 					    &np->rn_lock);
6561 					rc_node_rele(res);
6562 					rc_node_rele(pg);
6563 					rc_node_clear(out, 0);
6564 					return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6565 
6566 				default:
6567 					assert(0);
6568 					abort();
6569 				}
6570 			}
6571 #else
6572 #error This code must be updated.
6573 #endif
6574 		}
6575 
6576 		rc_node_hold(res);
6577 		(void) pthread_mutex_unlock(&np->rn_lock);
6578 		break;
6579 	}
6580 	rc_node_assign(out, res);
6581 
6582 	if (res == NULL)
6583 		return (REP_PROTOCOL_DONE);
6584 	rc_node_rele(res);
6585 	return (REP_PROTOCOL_SUCCESS);
6586 }
6587 
6588 void
6589 rc_iter_destroy(rc_node_iter_t **nipp)
6590 {
6591 	rc_node_iter_t *nip = *nipp;
6592 	rc_node_t *np;
6593 
6594 	if (nip == NULL)
6595 		return;				/* already freed */
6596 
6597 	np = nip->rni_parent;
6598 
6599 	if (nip->rni_filter_arg != NULL)
6600 		free(nip->rni_filter_arg);
6601 	nip->rni_filter_arg = NULL;
6602 
6603 	if (nip->rni_type == REP_PROTOCOL_ENTITY_VALUE ||
6604 	    nip->rni_iter != NULL) {
6605 		if (nip->rni_clevel < 0)
6606 			(void) pthread_mutex_lock(&np->rn_lock);
6607 		else
6608 			(void) pthread_mutex_lock(
6609 			    &np->rn_cchain[nip->rni_clevel]->rn_lock);
6610 		rc_iter_end(nip);		/* release walker and lock */
6611 	}
6612 	nip->rni_parent = NULL;
6613 
6614 	uu_free(nip);
6615 	*nipp = NULL;
6616 }
6617 
6618 int
6619 rc_node_setup_tx(rc_node_ptr_t *npp, rc_node_ptr_t *txp)
6620 {
6621 	rc_node_t *np;
6622 	permcheck_t *pcp;
6623 	int ret;
6624 	rc_auth_state_t authorized = RC_AUTH_UNKNOWN;
6625 	char *auth_string = NULL;
6626 
6627 	RC_NODE_PTR_GET_CHECK_AND_HOLD(np, npp);
6628 
6629 	if (np->rn_id.rl_type == REP_PROTOCOL_ENTITY_CPROPERTYGRP) {
6630 		rc_node_rele(np);
6631 		np = np->rn_cchain[0];
6632 		RC_NODE_CHECK_AND_HOLD(np);
6633 	}
6634 
6635 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
6636 		rc_node_rele(np);
6637 		return (REP_PROTOCOL_FAIL_TYPE_MISMATCH);
6638 	}
6639 
6640 	if (np->rn_id.rl_ids[ID_SNAPSHOT] != 0) {
6641 		rc_node_rele(np);
6642 		return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6643 	}
6644 
6645 #ifdef NATIVE_BUILD
6646 	if (client_is_privileged())
6647 		goto skip_checks;
6648 	rc_node_rele(np);
6649 	return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6650 #else
6651 	if (is_main_repository == 0)
6652 		goto skip_checks;
6653 
6654 	/* permission check */
6655 	pcp = pc_create();
6656 	if (pcp == NULL) {
6657 		rc_node_rele(np);
6658 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6659 	}
6660 
6661 	if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&	/* instance pg */
6662 	    ((strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0 &&
6663 	    strcmp(np->rn_type, AUTH_PG_ACTIONS_TYPE) == 0) ||
6664 	    (strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6665 	    strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0))) {
6666 		rc_node_t *instn;
6667 
6668 		/* solaris.smf.manage can be used. */
6669 		ret = perm_add_enabling(pcp, AUTH_MANAGE);
6670 
6671 		if (ret != REP_PROTOCOL_SUCCESS) {
6672 			pc_free(pcp);
6673 			rc_node_rele(np);
6674 			return (ret);
6675 		}
6676 
6677 		/* general/action_authorization values can be used. */
6678 		ret = rc_node_parent(np, &instn);
6679 		if (ret != REP_PROTOCOL_SUCCESS) {
6680 			assert(ret == REP_PROTOCOL_FAIL_DELETED);
6681 			rc_node_rele(np);
6682 			pc_free(pcp);
6683 			return (REP_PROTOCOL_FAIL_DELETED);
6684 		}
6685 
6686 		assert(instn->rn_id.rl_type == REP_PROTOCOL_ENTITY_INSTANCE);
6687 
6688 		ret = perm_add_inst_action_auth(pcp, instn);
6689 		rc_node_rele(instn);
6690 		switch (ret) {
6691 		case REP_PROTOCOL_SUCCESS:
6692 			break;
6693 
6694 		case REP_PROTOCOL_FAIL_DELETED:
6695 		case REP_PROTOCOL_FAIL_NO_RESOURCES:
6696 			rc_node_rele(np);
6697 			pc_free(pcp);
6698 			return (ret);
6699 
6700 		default:
6701 			bad_error("perm_add_inst_action_auth", ret);
6702 		}
6703 
6704 		if (strcmp(np->rn_name, AUTH_PG_ACTIONS) == 0)
6705 			authorized = RC_AUTH_PASSED; /* No check on commit. */
6706 	} else {
6707 		ret = perm_add_enabling(pcp, AUTH_MODIFY);
6708 
6709 		if (ret == REP_PROTOCOL_SUCCESS) {
6710 			/* propertygroup-type-specific authorization */
6711 			/* no locking because rn_type won't change anyway */
6712 			const char * const auth =
6713 			    perm_auth_for_pgtype(np->rn_type);
6714 
6715 			if (auth != NULL)
6716 				ret = perm_add_enabling(pcp, auth);
6717 		}
6718 
6719 		if (ret == REP_PROTOCOL_SUCCESS)
6720 			/* propertygroup/transaction-type-specific auths */
6721 			ret =
6722 			    perm_add_enabling_values(pcp, np, AUTH_PROP_VALUE);
6723 
6724 		if (ret == REP_PROTOCOL_SUCCESS)
6725 			ret =
6726 			    perm_add_enabling_values(pcp, np, AUTH_PROP_MODIFY);
6727 
6728 		/* AUTH_MANAGE can manipulate general/AUTH_PROP_ACTION */
6729 		if (ret == REP_PROTOCOL_SUCCESS &&
6730 		    strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6731 		    strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0)
6732 			ret = perm_add_enabling(pcp, AUTH_MANAGE);
6733 
6734 		if (ret != REP_PROTOCOL_SUCCESS) {
6735 			pc_free(pcp);
6736 			rc_node_rele(np);
6737 			return (ret);
6738 		}
6739 	}
6740 
6741 	ret = perm_granted(pcp);
6742 	/*
6743 	 * Copy out the authorization string before freeing pcp.
6744 	 */
6745 	if (ret >= 0) {
6746 		auth_string = strdup(pcp->pc_auth_string);
6747 	}
6748 	pc_free(pcp);
6749 	if ((auth_string == NULL) || (ret < 0)) {
6750 		rc_node_rele(np);
6751 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6752 	}
6753 
6754 	if (ret == 0) {
6755 		/*
6756 		 * If we get here, the authorization failed.
6757 		 * Unfortunately, we don't have enough information at this
6758 		 * point to generate the security audit events.  We'll only
6759 		 * get that information when the client tries to commit the
6760 		 * event.  Thus, we'll remember the failed authorization,
6761 		 * so that we can generate the audit events later.
6762 		 */
6763 		authorized = RC_AUTH_FAILED;
6764 	}
6765 #endif /* NATIVE_BUILD */
6766 
6767 skip_checks:
6768 	rc_node_assign(txp, np);
6769 	txp->rnp_authorized = authorized;
6770 	if (authorized != RC_AUTH_UNKNOWN) {
6771 		/* Save the authorization string. */
6772 		if (txp->rnp_auth_string != NULL)
6773 			free((void *)txp->rnp_auth_string);
6774 		txp->rnp_auth_string = auth_string;
6775 		auth_string = NULL;	/* Don't free until done with txp. */
6776 	}
6777 
6778 	rc_node_rele(np);
6779 	if (auth_string != NULL)
6780 		free(auth_string);
6781 	return (REP_PROTOCOL_SUCCESS);
6782 }
6783 
6784 /*
6785  * Return 1 if the given transaction commands only modify the values of
6786  * properties other than "modify_authorization".  Return -1 if any of the
6787  * commands are invalid, and 0 otherwise.
6788  */
6789 static int
6790 tx_allow_value(const void *cmds_arg, size_t cmds_sz, rc_node_t *pg)
6791 {
6792 	const struct rep_protocol_transaction_cmd *cmds;
6793 	uintptr_t loc;
6794 	uint32_t sz;
6795 	rc_node_t *prop;
6796 	boolean_t ok;
6797 
6798 	assert(!MUTEX_HELD(&pg->rn_lock));
6799 
6800 	loc = (uintptr_t)cmds_arg;
6801 
6802 	while (cmds_sz > 0) {
6803 		cmds = (struct rep_protocol_transaction_cmd *)loc;
6804 
6805 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6806 			return (-1);
6807 
6808 		sz = cmds->rptc_size;
6809 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6810 			return (-1);
6811 
6812 		sz = TX_SIZE(sz);
6813 		if (sz > cmds_sz)
6814 			return (-1);
6815 
6816 		switch (cmds[0].rptc_action) {
6817 		case REP_PROTOCOL_TX_ENTRY_CLEAR:
6818 			break;
6819 
6820 		case REP_PROTOCOL_TX_ENTRY_REPLACE:
6821 			/* Check type */
6822 			(void) pthread_mutex_lock(&pg->rn_lock);
6823 			if (rc_node_find_named_child(pg,
6824 			    (const char *)cmds[0].rptc_data,
6825 			    REP_PROTOCOL_ENTITY_PROPERTY, &prop) ==
6826 			    REP_PROTOCOL_SUCCESS) {
6827 				ok = (prop != NULL &&
6828 				    prop->rn_valtype == cmds[0].rptc_type);
6829 			} else {
6830 				/* Return more particular error? */
6831 				ok = B_FALSE;
6832 			}
6833 			(void) pthread_mutex_unlock(&pg->rn_lock);
6834 			if (ok)
6835 				break;
6836 			return (0);
6837 
6838 		default:
6839 			return (0);
6840 		}
6841 
6842 		if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_MODIFY)
6843 		    == 0)
6844 			return (0);
6845 
6846 		loc += sz;
6847 		cmds_sz -= sz;
6848 	}
6849 
6850 	return (1);
6851 }
6852 
6853 /*
6854  * Return 1 if any of the given transaction commands affect
6855  * "action_authorization".  Return -1 if any of the commands are invalid and
6856  * 0 in all other cases.
6857  */
6858 static int
6859 tx_modifies_action(const void *cmds_arg, size_t cmds_sz)
6860 {
6861 	const struct rep_protocol_transaction_cmd *cmds;
6862 	uintptr_t loc;
6863 	uint32_t sz;
6864 
6865 	loc = (uintptr_t)cmds_arg;
6866 
6867 	while (cmds_sz > 0) {
6868 		cmds = (struct rep_protocol_transaction_cmd *)loc;
6869 
6870 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6871 			return (-1);
6872 
6873 		sz = cmds->rptc_size;
6874 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6875 			return (-1);
6876 
6877 		sz = TX_SIZE(sz);
6878 		if (sz > cmds_sz)
6879 			return (-1);
6880 
6881 		if (strcmp((const char *)cmds[0].rptc_data, AUTH_PROP_ACTION)
6882 		    == 0)
6883 			return (1);
6884 
6885 		loc += sz;
6886 		cmds_sz -= sz;
6887 	}
6888 
6889 	return (0);
6890 }
6891 
6892 /*
6893  * Returns 1 if the transaction commands only modify properties named
6894  * 'enabled'.
6895  */
6896 static int
6897 tx_only_enabled(const void *cmds_arg, size_t cmds_sz)
6898 {
6899 	const struct rep_protocol_transaction_cmd *cmd;
6900 	uintptr_t loc;
6901 	uint32_t sz;
6902 
6903 	loc = (uintptr_t)cmds_arg;
6904 
6905 	while (cmds_sz > 0) {
6906 		cmd = (struct rep_protocol_transaction_cmd *)loc;
6907 
6908 		if (cmds_sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6909 			return (-1);
6910 
6911 		sz = cmd->rptc_size;
6912 		if (sz <= REP_PROTOCOL_TRANSACTION_CMD_MIN_SIZE)
6913 			return (-1);
6914 
6915 		sz = TX_SIZE(sz);
6916 		if (sz > cmds_sz)
6917 			return (-1);
6918 
6919 		if (strcmp((const char *)cmd->rptc_data, AUTH_PROP_ENABLED)
6920 		    != 0)
6921 			return (0);
6922 
6923 		loc += sz;
6924 		cmds_sz -= sz;
6925 	}
6926 
6927 	return (1);
6928 }
6929 
6930 int
6931 rc_tx_commit(rc_node_ptr_t *txp, const void *cmds, size_t cmds_sz)
6932 {
6933 	rc_node_t *np = txp->rnp_node;
6934 	rc_node_t *pp;
6935 	rc_node_t *nnp;
6936 	rc_node_pg_notify_t *pnp;
6937 	int rc;
6938 	permcheck_t *pcp;
6939 	int granted, normal;
6940 	char *pg_fmri = NULL;
6941 	char *auth_string = NULL;
6942 	int auth_status = ADT_SUCCESS;
6943 	int auth_ret_value = ADT_SUCCESS;
6944 	size_t sz_out;
6945 	int tx_flag = 1;
6946 	tx_commit_data_t *tx_data = NULL;
6947 
6948 	RC_NODE_CHECK(np);
6949 
6950 	if ((txp->rnp_authorized != RC_AUTH_UNKNOWN) &&
6951 	    (txp->rnp_auth_string != NULL)) {
6952 		auth_string = strdup(txp->rnp_auth_string);
6953 		if (auth_string == NULL)
6954 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6955 	}
6956 
6957 	if ((txp->rnp_authorized == RC_AUTH_UNKNOWN) &&
6958 	    is_main_repository) {
6959 #ifdef NATIVE_BUILD
6960 		if (!client_is_privileged()) {
6961 			return (REP_PROTOCOL_FAIL_PERMISSION_DENIED);
6962 		}
6963 #else
6964 		/* permission check: depends on contents of transaction */
6965 		pcp = pc_create();
6966 		if (pcp == NULL)
6967 			return (REP_PROTOCOL_FAIL_NO_RESOURCES);
6968 
6969 		/* If normal is cleared, we won't do the normal checks. */
6970 		normal = 1;
6971 		rc = REP_PROTOCOL_SUCCESS;
6972 
6973 		if (strcmp(np->rn_name, AUTH_PG_GENERAL) == 0 &&
6974 		    strcmp(np->rn_type, AUTH_PG_GENERAL_TYPE) == 0) {
6975 			/* Touching general[framework]/action_authorization? */
6976 			rc = tx_modifies_action(cmds, cmds_sz);
6977 			if (rc == -1) {
6978 				pc_free(pcp);
6979 				return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6980 			}
6981 
6982 			if (rc) {
6983 				/* Yes: only AUTH_MANAGE can be used. */
6984 				rc = perm_add_enabling(pcp, AUTH_MANAGE);
6985 				normal = 0;
6986 			} else {
6987 				rc = REP_PROTOCOL_SUCCESS;
6988 			}
6989 		} else if (np->rn_id.rl_ids[ID_INSTANCE] != 0 &&
6990 		    strcmp(np->rn_name, AUTH_PG_GENERAL_OVR) == 0 &&
6991 		    strcmp(np->rn_type, AUTH_PG_GENERAL_OVR_TYPE) == 0) {
6992 			rc_node_t *instn;
6993 
6994 			rc = tx_only_enabled(cmds, cmds_sz);
6995 			if (rc == -1) {
6996 				pc_free(pcp);
6997 				return (REP_PROTOCOL_FAIL_BAD_REQUEST);
6998 			}
6999 
7000 			if (rc) {
7001 				rc = rc_node_parent(np, &instn);
7002 				if (rc != REP_PROTOCOL_SUCCESS) {
7003 					assert(rc == REP_PROTOCOL_FAIL_DELETED);
7004 					pc_free(pcp);
7005 					return (rc);
7006 				}
7007 
7008 				assert(instn->rn_id.rl_type ==
7009 				    REP_PROTOCOL_ENTITY_INSTANCE);
7010 
7011 				rc = perm_add_inst_action_auth(pcp, instn);
7012 				rc_node_rele(instn);
7013 				switch (rc) {
7014 				case REP_PROTOCOL_SUCCESS:
7015 					break;
7016 
7017 				case REP_PROTOCOL_FAIL_DELETED:
7018 				case REP_PROTOCOL_FAIL_NO_RESOURCES:
7019 					pc_free(pcp);
7020 					return (rc);
7021 
7022 				default:
7023 					bad_error("perm_add_inst_action_auth",
7024 					    rc);
7025 				}
7026 			} else {
7027 				rc = REP_PROTOCOL_SUCCESS;
7028 			}
7029 		}
7030 
7031 		if (rc == REP_PROTOCOL_SUCCESS && normal) {
7032 			rc = perm_add_enabling(pcp, AUTH_MODIFY);
7033 
7034 			if (rc == REP_PROTOCOL_SUCCESS) {
7035 				/* Add pgtype-specific authorization. */
7036 				const char * const auth =
7037 				    perm_auth_for_pgtype(np->rn_type);
7038 
7039 				if (auth != NULL)
7040 					rc = perm_add_enabling(pcp, auth);
7041 			}
7042 
7043 			/* Add pg-specific modify_authorization auths. */
7044 			if (rc == REP_PROTOCOL_SUCCESS)
7045 				rc = perm_add_enabling_values(pcp, np,
7046 				    AUTH_PROP_MODIFY);
7047 
7048 			/* If value_authorization values are ok, add them. */
7049 			if (rc == REP_PROTOCOL_SUCCESS) {
7050 				rc = tx_allow_value(cmds, cmds_sz, np);
7051 				if (rc == -1)
7052 					rc = REP_PROTOCOL_FAIL_BAD_REQUEST;
7053 				else if (rc)
7054 					rc = perm_add_enabling_values(pcp, np,
7055 					    AUTH_PROP_VALUE);
7056 			}
7057 		}
7058 
7059 		if (rc == REP_PROTOCOL_SUCCESS) {
7060 			granted = perm_granted(pcp);
7061 			if (granted < 0) {
7062 				rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7063 			} else {
7064 				/*
7065 				 * Copy out the authorization string before
7066 				 * freeing pcp.
7067 				 */
7068 				auth_string = strdup(pcp->pc_auth_string);
7069 				if (auth_string == NULL)
7070 					rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7071 			}
7072 		}
7073 
7074 		pc_free(pcp);
7075 
7076 		if (rc != REP_PROTOCOL_SUCCESS)
7077 			goto cleanout;
7078 
7079 		if (!granted) {
7080 			auth_status = ADT_FAILURE;
7081 			auth_ret_value = ADT_FAIL_VALUE_AUTH;
7082 			tx_flag = 0;
7083 		}
7084 #endif /* NATIVE_BUILD */
7085 	} else if (txp->rnp_authorized == RC_AUTH_FAILED) {
7086 		auth_status = ADT_FAILURE;
7087 		auth_ret_value = ADT_FAIL_VALUE_AUTH;
7088 		tx_flag = 0;
7089 	}
7090 
7091 	pg_fmri = malloc(REP_PROTOCOL_FMRI_LEN);
7092 	if (pg_fmri == NULL) {
7093 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7094 		goto cleanout;
7095 	}
7096 	if ((rc = rc_node_get_fmri_or_fragment(np, pg_fmri,
7097 	    REP_PROTOCOL_FMRI_LEN, &sz_out)) != REP_PROTOCOL_SUCCESS) {
7098 		goto cleanout;
7099 	}
7100 
7101 	/*
7102 	 * Parse the transaction commands into a useful form.
7103 	 */
7104 	if ((rc = tx_commit_data_new(cmds, cmds_sz, &tx_data)) !=
7105 	    REP_PROTOCOL_SUCCESS) {
7106 		goto cleanout;
7107 	}
7108 
7109 	if (tx_flag == 0) {
7110 		/* Authorization failed.  Generate audit events. */
7111 		generate_property_events(tx_data, pg_fmri, auth_string,
7112 		    auth_status, auth_ret_value);
7113 		rc = REP_PROTOCOL_FAIL_PERMISSION_DENIED;
7114 		goto cleanout;
7115 	}
7116 
7117 	nnp = rc_node_alloc();
7118 	if (nnp == NULL) {
7119 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7120 		goto cleanout;
7121 	}
7122 
7123 	nnp->rn_id = np->rn_id;			/* structure assignment */
7124 	nnp->rn_hash = np->rn_hash;
7125 	nnp->rn_name = strdup(np->rn_name);
7126 	nnp->rn_type = strdup(np->rn_type);
7127 	nnp->rn_pgflags = np->rn_pgflags;
7128 
7129 	nnp->rn_flags = RC_NODE_IN_TX | RC_NODE_USING_PARENT;
7130 
7131 	if (nnp->rn_name == NULL || nnp->rn_type == NULL) {
7132 		rc_node_destroy(nnp);
7133 		rc = REP_PROTOCOL_FAIL_NO_RESOURCES;
7134 		goto cleanout;
7135 	}
7136 
7137 	(void) pthread_mutex_lock(&np->rn_lock);
7138 
7139 	/*
7140 	 * We must have all of the old properties in the cache, or the
7141 	 * database deletions could cause inconsistencies.
7142 	 */
7143 	if ((rc = rc_node_fill_children(np, REP_PROTOCOL_ENTITY_PROPERTY)) !=
7144 	    REP_PROTOCOL_SUCCESS) {
7145 		(void) pthread_mutex_unlock(&np->rn_lock);
7146 		rc_node_destroy(nnp);
7147 		goto cleanout;
7148 	}
7149 
7150 	if (!rc_node_hold_flag(np, RC_NODE_USING_PARENT)) {
7151 		(void) pthread_mutex_unlock(&np->rn_lock);
7152 		rc_node_destroy(nnp);
7153 		rc = REP_PROTOCOL_FAIL_DELETED;
7154 		goto cleanout;
7155 	}
7156 
7157 	if (np->rn_flags & RC_NODE_OLD) {
7158 		rc_node_rele_flag(np, RC_NODE_USING_PARENT);
7159 		(void) pthread_mutex_unlock(&np->rn_lock);
7160 		rc_node_destroy(nnp);
7161 		rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7162 		goto cleanout;
7163 	}
7164 
7165 	pp = rc_node_hold_parent_flag(np, RC_NODE_CHILDREN_CHANGING);
7166 	if (pp == NULL) {
7167 		/* our parent is gone, we're going next... */
7168 		rc_node_destroy(nnp);
7169 		(void) pthread_mutex_lock(&np->rn_lock);
7170 		if (np->rn_flags & RC_NODE_OLD) {
7171 			(void) pthread_mutex_unlock(&np->rn_lock);
7172 			rc = REP_PROTOCOL_FAIL_NOT_LATEST;
7173 			goto cleanout;
7174 		}
7175 		(void) pthread_mutex_unlock(&np->rn_lock);
7176 		rc = REP_PROTOCOL_FAIL_DELETED;
7177 		goto cleanout;
7178 	}
7179 	(void) pthread_mutex_unlock(&pp->rn_lock);
7180 
7181 	/*
7182 	 * prepare for the transaction
7183 	 */
7184 	(void) pthread_mutex_lock(&np->rn_lock);
7185 	if (!rc_node_hold_flag(np, RC_NODE_IN_TX)) {
7186 		(void) pthread_mutex_unlock(&np->rn_lock);
7187 		(void) pthread_mutex_lock(&pp->rn_lock);
7188 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7189 		(void) pthread_mutex_unlock(&pp->rn_lock);
7190 		rc_node_destroy(nnp);
7191 		rc = REP_PROTOCOL_FAIL_DELETED;
7192 		goto cleanout;
7193 	}
7194 	nnp->rn_gen_id = np->rn_gen_id;
7195 	(void) pthread_mutex_unlock(&np->rn_lock);
7196 
7197 	/* Sets nnp->rn_gen_id on success. */
7198 	rc = object_tx_commit(&np->rn_id, tx_data, &nnp->rn_gen_id);
7199 
7200 	(void) pthread_mutex_lock(&np->rn_lock);
7201 	if (rc != REP_PROTOCOL_SUCCESS) {
7202 		rc_node_rele_flag(np, RC_NODE_IN_TX);
7203 		(void) pthread_mutex_unlock(&np->rn_lock);
7204 		(void) pthread_mutex_lock(&pp->rn_lock);
7205 		rc_node_rele_flag(pp, RC_NODE_CHILDREN_CHANGING);
7206 		(void) pthread_mutex_unlock(&pp->rn_lock);
7207 		rc_node_destroy(nnp);
7208 		rc_node_clear(txp, 0);
7209 		if (rc == REP_PROTOCOL_DONE)
7210 			rc = REP_PROTOCOL_SUCCESS; /* successful empty tx */
7211 		goto cleanout;
7212 	}
7213 
7214 	/*
7215 	 * Notify waiters
7216 	 */
7217 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7218 	while ((pnp = uu_list_first(np->rn_pg_notify_list)) != NULL)
7219 		rc_pg_notify_fire(pnp);
7220 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7221 
7222 	np->rn_flags |= RC_NODE_OLD;
7223 	(void) pthread_mutex_unlock(&np->rn_lock);
7224 
7225 	rc_notify_remove_node(np);
7226 
7227 	/*
7228 	 * replace np with nnp
7229 	 */
7230 	rc_node_relink_child(pp, np, nnp);
7231 
7232 	/*
7233 	 * all done -- clear the transaction.
7234 	 */
7235 	rc_node_clear(txp, 0);
7236 	generate_property_events(tx_data, pg_fmri, auth_string,
7237 	    auth_status, auth_ret_value);
7238 
7239 	rc = REP_PROTOCOL_SUCCESS;
7240 
7241 cleanout:
7242 	free(auth_string);
7243 	free(pg_fmri);
7244 	tx_commit_data_free(tx_data);
7245 	return (rc);
7246 }
7247 
7248 void
7249 rc_pg_notify_init(rc_node_pg_notify_t *pnp)
7250 {
7251 	uu_list_node_init(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7252 	pnp->rnpn_pg = NULL;
7253 	pnp->rnpn_fd = -1;
7254 }
7255 
7256 int
7257 rc_pg_notify_setup(rc_node_pg_notify_t *pnp, rc_node_ptr_t *npp, int fd)
7258 {
7259 	rc_node_t *np;
7260 
7261 	RC_NODE_PTR_GET_CHECK_AND_LOCK(np, npp);
7262 
7263 	if (np->rn_id.rl_type != REP_PROTOCOL_ENTITY_PROPERTYGRP) {
7264 		(void) pthread_mutex_unlock(&np->rn_lock);
7265 		return (REP_PROTOCOL_FAIL_BAD_REQUEST);
7266 	}
7267 
7268 	/*
7269 	 * wait for any transaction in progress to complete
7270 	 */
7271 	if (!rc_node_wait_flag(np, RC_NODE_IN_TX)) {
7272 		(void) pthread_mutex_unlock(&np->rn_lock);
7273 		return (REP_PROTOCOL_FAIL_DELETED);
7274 	}
7275 
7276 	if (np->rn_flags & RC_NODE_OLD) {
7277 		(void) pthread_mutex_unlock(&np->rn_lock);
7278 		return (REP_PROTOCOL_FAIL_NOT_LATEST);
7279 	}
7280 
7281 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7282 	rc_pg_notify_fire(pnp);
7283 	pnp->rnpn_pg = np;
7284 	pnp->rnpn_fd = fd;
7285 	(void) uu_list_insert_after(np->rn_pg_notify_list, NULL, pnp);
7286 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7287 
7288 	(void) pthread_mutex_unlock(&np->rn_lock);
7289 	return (REP_PROTOCOL_SUCCESS);
7290 }
7291 
7292 void
7293 rc_pg_notify_fini(rc_node_pg_notify_t *pnp)
7294 {
7295 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7296 	rc_pg_notify_fire(pnp);
7297 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7298 
7299 	uu_list_node_fini(pnp, &pnp->rnpn_node, rc_pg_notify_pool);
7300 }
7301 
7302 void
7303 rc_notify_info_init(rc_notify_info_t *rnip)
7304 {
7305 	int i;
7306 
7307 	uu_list_node_init(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7308 	uu_list_node_init(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7309 	    rc_notify_pool);
7310 
7311 	rnip->rni_notify.rcn_node = NULL;
7312 	rnip->rni_notify.rcn_info = rnip;
7313 
7314 	bzero(rnip->rni_namelist, sizeof (rnip->rni_namelist));
7315 	bzero(rnip->rni_typelist, sizeof (rnip->rni_typelist));
7316 
7317 	(void) pthread_cond_init(&rnip->rni_cv, NULL);
7318 
7319 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7320 		rnip->rni_namelist[i] = NULL;
7321 		rnip->rni_typelist[i] = NULL;
7322 	}
7323 }
7324 
7325 static void
7326 rc_notify_info_insert_locked(rc_notify_info_t *rnip)
7327 {
7328 	assert(MUTEX_HELD(&rc_pg_notify_lock));
7329 
7330 	assert(!(rnip->rni_flags & RC_NOTIFY_ACTIVE));
7331 
7332 	rnip->rni_flags |= RC_NOTIFY_ACTIVE;
7333 	(void) uu_list_insert_after(rc_notify_info_list, NULL, rnip);
7334 	(void) uu_list_insert_before(rc_notify_list, NULL, &rnip->rni_notify);
7335 }
7336 
7337 static void
7338 rc_notify_info_remove_locked(rc_notify_info_t *rnip)
7339 {
7340 	rc_notify_t *me = &rnip->rni_notify;
7341 	rc_notify_t *np;
7342 
7343 	assert(MUTEX_HELD(&rc_pg_notify_lock));
7344 
7345 	assert(rnip->rni_flags & RC_NOTIFY_ACTIVE);
7346 
7347 	assert(!(rnip->rni_flags & RC_NOTIFY_DRAIN));
7348 	rnip->rni_flags |= RC_NOTIFY_DRAIN;
7349 	(void) pthread_cond_broadcast(&rnip->rni_cv);
7350 
7351 	(void) uu_list_remove(rc_notify_info_list, rnip);
7352 
7353 	/*
7354 	 * clean up any notifications at the beginning of the list
7355 	 */
7356 	if (uu_list_first(rc_notify_list) == me) {
7357 		while ((np = uu_list_next(rc_notify_list, me)) != NULL &&
7358 		    np->rcn_info == NULL)
7359 			rc_notify_remove_locked(np);
7360 	}
7361 	(void) uu_list_remove(rc_notify_list, me);
7362 
7363 	while (rnip->rni_waiters) {
7364 		(void) pthread_cond_broadcast(&rc_pg_notify_cv);
7365 		(void) pthread_cond_broadcast(&rnip->rni_cv);
7366 		(void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7367 	}
7368 
7369 	rnip->rni_flags &= ~(RC_NOTIFY_DRAIN | RC_NOTIFY_ACTIVE);
7370 }
7371 
7372 static int
7373 rc_notify_info_add_watch(rc_notify_info_t *rnip, const char **arr,
7374     const char *name)
7375 {
7376 	int i;
7377 	int rc;
7378 	char *f;
7379 
7380 	rc = rc_check_type_name(REP_PROTOCOL_ENTITY_PROPERTYGRP, name);
7381 	if (rc != REP_PROTOCOL_SUCCESS)
7382 		return (rc);
7383 
7384 	f = strdup(name);
7385 	if (f == NULL)
7386 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7387 
7388 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7389 
7390 	while (rnip->rni_flags & RC_NOTIFY_EMPTYING)
7391 		(void) pthread_cond_wait(&rnip->rni_cv, &rc_pg_notify_lock);
7392 
7393 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7394 		if (arr[i] == NULL)
7395 			break;
7396 
7397 		/*
7398 		 * Don't add name if it's already being tracked.
7399 		 */
7400 		if (strcmp(arr[i], f) == 0) {
7401 			free(f);
7402 			goto out;
7403 		}
7404 	}
7405 
7406 	if (i == RC_NOTIFY_MAX_NAMES) {
7407 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7408 		free(f);
7409 		return (REP_PROTOCOL_FAIL_NO_RESOURCES);
7410 	}
7411 
7412 	arr[i] = f;
7413 
7414 out:
7415 	if (!(rnip->rni_flags & RC_NOTIFY_ACTIVE))
7416 		rc_notify_info_insert_locked(rnip);
7417 
7418 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7419 	return (REP_PROTOCOL_SUCCESS);
7420 }
7421 
7422 int
7423 rc_notify_info_add_name(rc_notify_info_t *rnip, const char *name)
7424 {
7425 	return (rc_notify_info_add_watch(rnip, rnip->rni_namelist, name));
7426 }
7427 
7428 int
7429 rc_notify_info_add_type(rc_notify_info_t *rnip, const char *type)
7430 {
7431 	return (rc_notify_info_add_watch(rnip, rnip->rni_typelist, type));
7432 }
7433 
7434 /*
7435  * Wait for and report an event of interest to rnip, a notification client
7436  */
7437 int
7438 rc_notify_info_wait(rc_notify_info_t *rnip, rc_node_ptr_t *out,
7439     char *outp, size_t sz)
7440 {
7441 	rc_notify_t *np;
7442 	rc_notify_t *me = &rnip->rni_notify;
7443 	rc_node_t *nnp;
7444 	rc_notify_delete_t *ndp;
7445 
7446 	int am_first_info;
7447 
7448 	if (sz > 0)
7449 		outp[0] = 0;
7450 
7451 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7452 
7453 	while ((rnip->rni_flags & (RC_NOTIFY_ACTIVE | RC_NOTIFY_DRAIN)) ==
7454 	    RC_NOTIFY_ACTIVE) {
7455 		/*
7456 		 * If I'm first on the notify list, it is my job to
7457 		 * clean up any notifications I pass by.  I can't do that
7458 		 * if someone is blocking the list from removals, so I
7459 		 * have to wait until they have all drained.
7460 		 */
7461 		am_first_info = (uu_list_first(rc_notify_list) == me);
7462 		if (am_first_info && rc_notify_in_use) {
7463 			rnip->rni_waiters++;
7464 			(void) pthread_cond_wait(&rc_pg_notify_cv,
7465 			    &rc_pg_notify_lock);
7466 			rnip->rni_waiters--;
7467 			continue;
7468 		}
7469 
7470 		/*
7471 		 * Search the list for a node of interest.
7472 		 */
7473 		np = uu_list_next(rc_notify_list, me);
7474 		while (np != NULL && !rc_notify_info_interested(rnip, np)) {
7475 			rc_notify_t *next = uu_list_next(rc_notify_list, np);
7476 
7477 			if (am_first_info) {
7478 				if (np->rcn_info) {
7479 					/*
7480 					 * Passing another client -- stop
7481 					 * cleaning up notifications
7482 					 */
7483 					am_first_info = 0;
7484 				} else {
7485 					rc_notify_remove_locked(np);
7486 				}
7487 			}
7488 			np = next;
7489 		}
7490 
7491 		/*
7492 		 * Nothing of interest -- wait for notification
7493 		 */
7494 		if (np == NULL) {
7495 			rnip->rni_waiters++;
7496 			(void) pthread_cond_wait(&rnip->rni_cv,
7497 			    &rc_pg_notify_lock);
7498 			rnip->rni_waiters--;
7499 			continue;
7500 		}
7501 
7502 		/*
7503 		 * found something to report -- move myself after the
7504 		 * notification and process it.
7505 		 */
7506 		(void) uu_list_remove(rc_notify_list, me);
7507 		(void) uu_list_insert_after(rc_notify_list, np, me);
7508 
7509 		if ((ndp = np->rcn_delete) != NULL) {
7510 			(void) strlcpy(outp, ndp->rnd_fmri, sz);
7511 			if (am_first_info)
7512 				rc_notify_remove_locked(np);
7513 			(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7514 			rc_node_clear(out, 0);
7515 			return (REP_PROTOCOL_SUCCESS);
7516 		}
7517 
7518 		nnp = np->rcn_node;
7519 		assert(nnp != NULL);
7520 
7521 		/*
7522 		 * We can't bump nnp's reference count without grabbing its
7523 		 * lock, and rc_pg_notify_lock is a leaf lock.  So we
7524 		 * temporarily block all removals to keep nnp from
7525 		 * disappearing.
7526 		 */
7527 		rc_notify_in_use++;
7528 		assert(rc_notify_in_use > 0);
7529 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7530 
7531 		rc_node_assign(out, nnp);
7532 
7533 		(void) pthread_mutex_lock(&rc_pg_notify_lock);
7534 		assert(rc_notify_in_use > 0);
7535 		rc_notify_in_use--;
7536 		if (am_first_info)
7537 			rc_notify_remove_locked(np);
7538 		if (rc_notify_in_use == 0)
7539 			(void) pthread_cond_broadcast(&rc_pg_notify_cv);
7540 		(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7541 
7542 		return (REP_PROTOCOL_SUCCESS);
7543 	}
7544 	/*
7545 	 * If we're the last one out, let people know it's clear.
7546 	 */
7547 	if (rnip->rni_waiters == 0)
7548 		(void) pthread_cond_broadcast(&rnip->rni_cv);
7549 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7550 	return (REP_PROTOCOL_DONE);
7551 }
7552 
7553 static void
7554 rc_notify_info_reset(rc_notify_info_t *rnip)
7555 {
7556 	int i;
7557 
7558 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7559 	if (rnip->rni_flags & RC_NOTIFY_ACTIVE)
7560 		rc_notify_info_remove_locked(rnip);
7561 	assert(!(rnip->rni_flags & (RC_NOTIFY_DRAIN | RC_NOTIFY_EMPTYING)));
7562 	rnip->rni_flags |= RC_NOTIFY_EMPTYING;
7563 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7564 
7565 	for (i = 0; i < RC_NOTIFY_MAX_NAMES; i++) {
7566 		if (rnip->rni_namelist[i] != NULL) {
7567 			free((void *)rnip->rni_namelist[i]);
7568 			rnip->rni_namelist[i] = NULL;
7569 		}
7570 		if (rnip->rni_typelist[i] != NULL) {
7571 			free((void *)rnip->rni_typelist[i]);
7572 			rnip->rni_typelist[i] = NULL;
7573 		}
7574 	}
7575 
7576 	(void) pthread_mutex_lock(&rc_pg_notify_lock);
7577 	rnip->rni_flags &= ~RC_NOTIFY_EMPTYING;
7578 	(void) pthread_mutex_unlock(&rc_pg_notify_lock);
7579 }
7580 
7581 void
7582 rc_notify_info_fini(rc_notify_info_t *rnip)
7583 {
7584 	rc_notify_info_reset(rnip);
7585 
7586 	uu_list_node_fini(rnip, &rnip->rni_list_node, rc_notify_info_pool);
7587 	uu_list_node_fini(&rnip->rni_notify, &rnip->rni_notify.rcn_list_node,
7588 	    rc_notify_pool);
7589 }
7590