xref: /titanic_44/usr/src/cmd/rcm_daemon/common/rcm_lock.c (revision 03859504f0d42ac2e32c1828dd62f1a80c4d2ab1)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  *
22*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
23*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
24*7c478bd9Sstevel@tonic-gate  */
25*7c478bd9Sstevel@tonic-gate 
26*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
27*7c478bd9Sstevel@tonic-gate 
28*7c478bd9Sstevel@tonic-gate #include "rcm_impl.h"
29*7c478bd9Sstevel@tonic-gate #include "rcm_module.h"
30*7c478bd9Sstevel@tonic-gate 
31*7c478bd9Sstevel@tonic-gate /*
32*7c478bd9Sstevel@tonic-gate  * Global locks
33*7c478bd9Sstevel@tonic-gate  */
34*7c478bd9Sstevel@tonic-gate mutex_t rcm_req_lock;	/* protects global dr & info request list */
35*7c478bd9Sstevel@tonic-gate 
36*7c478bd9Sstevel@tonic-gate /*
37*7c478bd9Sstevel@tonic-gate  * Daemon state file
38*7c478bd9Sstevel@tonic-gate  */
39*7c478bd9Sstevel@tonic-gate static int state_fd;
40*7c478bd9Sstevel@tonic-gate #define	RCM_STATE_FILE	"/var/run/rcm_daemon_state"
41*7c478bd9Sstevel@tonic-gate #define	N_REQ_CHUNK	10	/* grow 10 entries at a time */
42*7c478bd9Sstevel@tonic-gate 
43*7c478bd9Sstevel@tonic-gate /*
44*7c478bd9Sstevel@tonic-gate  * Daemon timeout value
45*7c478bd9Sstevel@tonic-gate  */
46*7c478bd9Sstevel@tonic-gate #define	RCM_DAEMON_TIMEOUT	300	/* 5 minutes idle time */
47*7c478bd9Sstevel@tonic-gate 
48*7c478bd9Sstevel@tonic-gate /*
49*7c478bd9Sstevel@tonic-gate  * Struct for a list of outstanding rcm requests
50*7c478bd9Sstevel@tonic-gate  */
51*7c478bd9Sstevel@tonic-gate typedef struct {
52*7c478bd9Sstevel@tonic-gate 	int	seq_num;		/* sequence number of request */
53*7c478bd9Sstevel@tonic-gate 	int	state;			/* current state */
54*7c478bd9Sstevel@tonic-gate 	pid_t	pid;			/* pid of initiator */
55*7c478bd9Sstevel@tonic-gate 	uint_t	flag;			/* request flags */
56*7c478bd9Sstevel@tonic-gate 	int	type;			/* resource(device) type */
57*7c478bd9Sstevel@tonic-gate 	timespec_t interval;		/* suspend interval */
58*7c478bd9Sstevel@tonic-gate 	char	device[MAXPATHLEN];	/* name of device or resource */
59*7c478bd9Sstevel@tonic-gate } req_t;
60*7c478bd9Sstevel@tonic-gate 
61*7c478bd9Sstevel@tonic-gate typedef struct {
62*7c478bd9Sstevel@tonic-gate 	int	n_req;
63*7c478bd9Sstevel@tonic-gate 	int	n_req_max;	/* number of req_t's to follow */
64*7c478bd9Sstevel@tonic-gate 	int	n_seq_max;	/* last sequence number */
65*7c478bd9Sstevel@tonic-gate 	int	idle_timeout;	/* persist idle timeout value */
66*7c478bd9Sstevel@tonic-gate 	req_t	req[1];
67*7c478bd9Sstevel@tonic-gate 	/* more req_t follows */
68*7c478bd9Sstevel@tonic-gate } req_list_t;
69*7c478bd9Sstevel@tonic-gate 
70*7c478bd9Sstevel@tonic-gate static req_list_t *dr_req_list;
71*7c478bd9Sstevel@tonic-gate static req_list_t *info_req_list;
72*7c478bd9Sstevel@tonic-gate 
73*7c478bd9Sstevel@tonic-gate static const char *locked_info = "DR operation in progress";
74*7c478bd9Sstevel@tonic-gate static const char *locked_err = "Resource is busy";
75*7c478bd9Sstevel@tonic-gate 
76*7c478bd9Sstevel@tonic-gate static int rcmd_get_state();
77*7c478bd9Sstevel@tonic-gate static void add_to_polling_list(pid_t);
78*7c478bd9Sstevel@tonic-gate static void remove_from_polling_list(pid_t);
79*7c478bd9Sstevel@tonic-gate 
80*7c478bd9Sstevel@tonic-gate void start_polling_thread();
81*7c478bd9Sstevel@tonic-gate static void stop_polling_thread();
82*7c478bd9Sstevel@tonic-gate 
83*7c478bd9Sstevel@tonic-gate /*
84*7c478bd9Sstevel@tonic-gate  * Initialize request lists required for locking
85*7c478bd9Sstevel@tonic-gate  */
86*7c478bd9Sstevel@tonic-gate void
rcmd_lock_init(void)87*7c478bd9Sstevel@tonic-gate rcmd_lock_init(void)
88*7c478bd9Sstevel@tonic-gate {
89*7c478bd9Sstevel@tonic-gate 	int size;
90*7c478bd9Sstevel@tonic-gate 	struct stat fbuf;
91*7c478bd9Sstevel@tonic-gate 
92*7c478bd9Sstevel@tonic-gate 	/*
93*7c478bd9Sstevel@tonic-gate 	 * Start info list with one slot, then grow on demand.
94*7c478bd9Sstevel@tonic-gate 	 */
95*7c478bd9Sstevel@tonic-gate 	info_req_list = s_calloc(1, sizeof (req_list_t));
96*7c478bd9Sstevel@tonic-gate 	info_req_list->n_req_max = 1;
97*7c478bd9Sstevel@tonic-gate 
98*7c478bd9Sstevel@tonic-gate 	/*
99*7c478bd9Sstevel@tonic-gate 	 * Open daemon state file and map in contents
100*7c478bd9Sstevel@tonic-gate 	 */
101*7c478bd9Sstevel@tonic-gate 	state_fd = open(RCM_STATE_FILE, O_CREAT|O_RDWR, 0600);
102*7c478bd9Sstevel@tonic-gate 	if (state_fd == -1) {
103*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_ERROR, gettext("cannot open %s: %s\n"),
104*7c478bd9Sstevel@tonic-gate 		    RCM_STATE_FILE, strerror(errno));
105*7c478bd9Sstevel@tonic-gate 		rcmd_exit(errno);
106*7c478bd9Sstevel@tonic-gate 	}
107*7c478bd9Sstevel@tonic-gate 
108*7c478bd9Sstevel@tonic-gate 	if (fstat(state_fd, &fbuf) != 0) {
109*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_ERROR, gettext("cannot stat %s: %s\n"),
110*7c478bd9Sstevel@tonic-gate 		    RCM_STATE_FILE, strerror(errno));
111*7c478bd9Sstevel@tonic-gate 		rcmd_exit(errno);
112*7c478bd9Sstevel@tonic-gate 	}
113*7c478bd9Sstevel@tonic-gate 
114*7c478bd9Sstevel@tonic-gate 	size = fbuf.st_size;
115*7c478bd9Sstevel@tonic-gate 	if (size == 0) {
116*7c478bd9Sstevel@tonic-gate 		size = sizeof (req_list_t);
117*7c478bd9Sstevel@tonic-gate 		if (ftruncate(state_fd, size) != 0) {
118*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
119*7c478bd9Sstevel@tonic-gate 			    gettext("cannot truncate %s: %s\n"),
120*7c478bd9Sstevel@tonic-gate 			    RCM_STATE_FILE, strerror(errno));
121*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
122*7c478bd9Sstevel@tonic-gate 		}
123*7c478bd9Sstevel@tonic-gate 	}
124*7c478bd9Sstevel@tonic-gate 
125*7c478bd9Sstevel@tonic-gate 	/*LINTED*/
126*7c478bd9Sstevel@tonic-gate 	dr_req_list = (req_list_t *)mmap(NULL, size, PROT_READ|PROT_WRITE,
127*7c478bd9Sstevel@tonic-gate 	    MAP_SHARED, state_fd, 0);
128*7c478bd9Sstevel@tonic-gate 	if (dr_req_list == MAP_FAILED) {
129*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_ERROR, gettext("cannot mmap %s: %s\n"),
130*7c478bd9Sstevel@tonic-gate 		    RCM_STATE_FILE, strerror(errno));
131*7c478bd9Sstevel@tonic-gate 		rcmd_exit(errno);
132*7c478bd9Sstevel@tonic-gate 	}
133*7c478bd9Sstevel@tonic-gate 
134*7c478bd9Sstevel@tonic-gate 	/*
135*7c478bd9Sstevel@tonic-gate 	 * Initial size is one entry
136*7c478bd9Sstevel@tonic-gate 	 */
137*7c478bd9Sstevel@tonic-gate 	if (dr_req_list->n_req_max == 0) {
138*7c478bd9Sstevel@tonic-gate 		dr_req_list->n_req_max = 1;
139*7c478bd9Sstevel@tonic-gate 		(void) fsync(state_fd);
140*7c478bd9Sstevel@tonic-gate 		return;
141*7c478bd9Sstevel@tonic-gate 	}
142*7c478bd9Sstevel@tonic-gate 
143*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_DEBUG, "n_req = %d, n_req_max = %d\n",
144*7c478bd9Sstevel@tonic-gate 	    dr_req_list->n_req, dr_req_list->n_req_max);
145*7c478bd9Sstevel@tonic-gate 
146*7c478bd9Sstevel@tonic-gate 	/*
147*7c478bd9Sstevel@tonic-gate 	 * Recover the daemon state
148*7c478bd9Sstevel@tonic-gate 	 */
149*7c478bd9Sstevel@tonic-gate 	clean_dr_list();
150*7c478bd9Sstevel@tonic-gate }
151*7c478bd9Sstevel@tonic-gate 
152*7c478bd9Sstevel@tonic-gate /*
153*7c478bd9Sstevel@tonic-gate  * Get a unique sequence number--to be called with rcm_req_lock held.
154*7c478bd9Sstevel@tonic-gate  */
155*7c478bd9Sstevel@tonic-gate static int
get_seq_number()156*7c478bd9Sstevel@tonic-gate get_seq_number()
157*7c478bd9Sstevel@tonic-gate {
158*7c478bd9Sstevel@tonic-gate 	int number;
159*7c478bd9Sstevel@tonic-gate 
160*7c478bd9Sstevel@tonic-gate 	if (dr_req_list == NULL)
161*7c478bd9Sstevel@tonic-gate 		return (0);
162*7c478bd9Sstevel@tonic-gate 
163*7c478bd9Sstevel@tonic-gate 	dr_req_list->n_seq_max++;
164*7c478bd9Sstevel@tonic-gate 	number  = (dr_req_list->n_seq_max << SEQ_NUM_SHIFT);
165*7c478bd9Sstevel@tonic-gate 	(void) fsync(state_fd);
166*7c478bd9Sstevel@tonic-gate 
167*7c478bd9Sstevel@tonic-gate 	return (number);
168*7c478bd9Sstevel@tonic-gate }
169*7c478bd9Sstevel@tonic-gate 
170*7c478bd9Sstevel@tonic-gate /*
171*7c478bd9Sstevel@tonic-gate  * Find entry in list with the same resource name and sequence number.
172*7c478bd9Sstevel@tonic-gate  * If seq_num == -1, no seq_num matching is required.
173*7c478bd9Sstevel@tonic-gate  */
174*7c478bd9Sstevel@tonic-gate static req_t *
find_req_entry(char * device,uint_t flag,int seq_num,req_list_t * list)175*7c478bd9Sstevel@tonic-gate find_req_entry(char *device, uint_t flag, int seq_num, req_list_t *list)
176*7c478bd9Sstevel@tonic-gate {
177*7c478bd9Sstevel@tonic-gate 	int i;
178*7c478bd9Sstevel@tonic-gate 
179*7c478bd9Sstevel@tonic-gate 	/*
180*7c478bd9Sstevel@tonic-gate 	 * Look for entry with the same resource and seq_num.
181*7c478bd9Sstevel@tonic-gate 	 * Also match RCM_FILESYS field in flag.
182*7c478bd9Sstevel@tonic-gate 	 */
183*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < list->n_req_max; i++) {
184*7c478bd9Sstevel@tonic-gate 		if (list->req[i].state == RCM_STATE_REMOVE)
185*7c478bd9Sstevel@tonic-gate 			/* stale entry */
186*7c478bd9Sstevel@tonic-gate 			continue;
187*7c478bd9Sstevel@tonic-gate 		/*
188*7c478bd9Sstevel@tonic-gate 		 * We need to distiguish a file system root from the directory
189*7c478bd9Sstevel@tonic-gate 		 * it is mounted on.
190*7c478bd9Sstevel@tonic-gate 		 *
191*7c478bd9Sstevel@tonic-gate 		 * Applications are not aware of any difference between the
192*7c478bd9Sstevel@tonic-gate 		 * two, but the system keeps track of it internally by
193*7c478bd9Sstevel@tonic-gate 		 * checking for mount points while traversing file path.
194*7c478bd9Sstevel@tonic-gate 		 * In a similar spirit, RCM is keeping this difference as
195*7c478bd9Sstevel@tonic-gate 		 * an implementation detail.
196*7c478bd9Sstevel@tonic-gate 		 */
197*7c478bd9Sstevel@tonic-gate 		if ((strcmp(device, list->req[i].device) != 0) ||
198*7c478bd9Sstevel@tonic-gate 		    (list->req[i].flag & RCM_FILESYS) != (flag & RCM_FILESYS))
199*7c478bd9Sstevel@tonic-gate 			/* different resource */
200*7c478bd9Sstevel@tonic-gate 			continue;
201*7c478bd9Sstevel@tonic-gate 
202*7c478bd9Sstevel@tonic-gate 		if ((seq_num != -1) && ((seq_num >> SEQ_NUM_SHIFT) !=
203*7c478bd9Sstevel@tonic-gate 		    (list->req[i].seq_num >> SEQ_NUM_SHIFT)))
204*7c478bd9Sstevel@tonic-gate 			/* different base seqnum */
205*7c478bd9Sstevel@tonic-gate 			continue;
206*7c478bd9Sstevel@tonic-gate 
207*7c478bd9Sstevel@tonic-gate 		return (&list->req[i]);
208*7c478bd9Sstevel@tonic-gate 	}
209*7c478bd9Sstevel@tonic-gate 
210*7c478bd9Sstevel@tonic-gate 	return (NULL);
211*7c478bd9Sstevel@tonic-gate }
212*7c478bd9Sstevel@tonic-gate 
213*7c478bd9Sstevel@tonic-gate /*
214*7c478bd9Sstevel@tonic-gate  * Get the next empty req_t entry. If no entry exists, grow the list.
215*7c478bd9Sstevel@tonic-gate  */
216*7c478bd9Sstevel@tonic-gate static req_t *
get_req_entry(req_list_t ** listp)217*7c478bd9Sstevel@tonic-gate get_req_entry(req_list_t **listp)
218*7c478bd9Sstevel@tonic-gate {
219*7c478bd9Sstevel@tonic-gate 	int i;
220*7c478bd9Sstevel@tonic-gate 	int n_req = (*listp)->n_req;
221*7c478bd9Sstevel@tonic-gate 	int n_req_max = (*listp)->n_req_max;
222*7c478bd9Sstevel@tonic-gate 
223*7c478bd9Sstevel@tonic-gate 	/*
224*7c478bd9Sstevel@tonic-gate 	 * If the list is full, grow the list and return the first
225*7c478bd9Sstevel@tonic-gate 	 * entry in the new portion.
226*7c478bd9Sstevel@tonic-gate 	 */
227*7c478bd9Sstevel@tonic-gate 	if (n_req == n_req_max) {
228*7c478bd9Sstevel@tonic-gate 		int newsize;
229*7c478bd9Sstevel@tonic-gate 
230*7c478bd9Sstevel@tonic-gate 		n_req_max += N_REQ_CHUNK;
231*7c478bd9Sstevel@tonic-gate 		newsize = sizeof (req_list_t) + (n_req_max - 1) *
232*7c478bd9Sstevel@tonic-gate 		    sizeof (req_t);
233*7c478bd9Sstevel@tonic-gate 
234*7c478bd9Sstevel@tonic-gate 		if (listp == &info_req_list) {
235*7c478bd9Sstevel@tonic-gate 			*listp = s_realloc(*listp, newsize);
236*7c478bd9Sstevel@tonic-gate 		} else if (ftruncate(state_fd, newsize) != 0) {
237*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
238*7c478bd9Sstevel@tonic-gate 			    gettext("cannot truncate %s: %s\n"),
239*7c478bd9Sstevel@tonic-gate 			    RCM_STATE_FILE, strerror(errno));
240*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
241*7c478bd9Sstevel@tonic-gate 		/*LINTED*/
242*7c478bd9Sstevel@tonic-gate 		} else if ((*listp = (req_list_t *)mmap(NULL, newsize,
243*7c478bd9Sstevel@tonic-gate 		    PROT_READ|PROT_WRITE, MAP_SHARED, state_fd, 0)) ==
244*7c478bd9Sstevel@tonic-gate 		    MAP_FAILED) {
245*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
246*7c478bd9Sstevel@tonic-gate 			    gettext("cannot mmap %s: %s\n"),
247*7c478bd9Sstevel@tonic-gate 			    RCM_STATE_FILE, strerror(errno));
248*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
249*7c478bd9Sstevel@tonic-gate 		}
250*7c478bd9Sstevel@tonic-gate 
251*7c478bd9Sstevel@tonic-gate 		/* Initialize the new entries */
252*7c478bd9Sstevel@tonic-gate 		for (i = (*listp)->n_req_max; i < n_req_max; i++) {
253*7c478bd9Sstevel@tonic-gate 			(*listp)->req[i].state = RCM_STATE_REMOVE;
254*7c478bd9Sstevel@tonic-gate 			(void) strcpy((*listp)->req[i].device, "");
255*7c478bd9Sstevel@tonic-gate 		}
256*7c478bd9Sstevel@tonic-gate 
257*7c478bd9Sstevel@tonic-gate 		(*listp)->n_req_max = n_req_max;
258*7c478bd9Sstevel@tonic-gate 		(*listp)->n_req++;
259*7c478bd9Sstevel@tonic-gate 		return (&(*listp)->req[n_req]);
260*7c478bd9Sstevel@tonic-gate 	}
261*7c478bd9Sstevel@tonic-gate 
262*7c478bd9Sstevel@tonic-gate 	/*
263*7c478bd9Sstevel@tonic-gate 	 * List contains empty slots, find it.
264*7c478bd9Sstevel@tonic-gate 	 */
265*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < n_req_max; i++) {
266*7c478bd9Sstevel@tonic-gate 		if (((*listp)->req[i].device[0] == '\0') ||
267*7c478bd9Sstevel@tonic-gate 		    ((*listp)->req[i].state == RCM_STATE_REMOVE)) {
268*7c478bd9Sstevel@tonic-gate 			break;
269*7c478bd9Sstevel@tonic-gate 		}
270*7c478bd9Sstevel@tonic-gate 	}
271*7c478bd9Sstevel@tonic-gate 
272*7c478bd9Sstevel@tonic-gate 	assert(i < n_req_max);	/* empty slot must exist */
273*7c478bd9Sstevel@tonic-gate 
274*7c478bd9Sstevel@tonic-gate 	(*listp)->n_req++;
275*7c478bd9Sstevel@tonic-gate 	return (&(*listp)->req[i]);
276*7c478bd9Sstevel@tonic-gate }
277*7c478bd9Sstevel@tonic-gate 
278*7c478bd9Sstevel@tonic-gate /*
279*7c478bd9Sstevel@tonic-gate  * When one resource depends on multiple resources, it's possible that
280*7c478bd9Sstevel@tonic-gate  * rcm_get_info can be called multiple times on the resource, resulting
281*7c478bd9Sstevel@tonic-gate  * in duplicate information. By assigning a unique sequence number to
282*7c478bd9Sstevel@tonic-gate  * each rcm_get_info operation, this duplication can be eliminated.
283*7c478bd9Sstevel@tonic-gate  *
284*7c478bd9Sstevel@tonic-gate  * Insert a dr entry in info_req_list
285*7c478bd9Sstevel@tonic-gate  */
286*7c478bd9Sstevel@tonic-gate int
info_req_add(char * rsrcname,uint_t flag,int seq_num)287*7c478bd9Sstevel@tonic-gate info_req_add(char *rsrcname, uint_t flag, int seq_num)
288*7c478bd9Sstevel@tonic-gate {
289*7c478bd9Sstevel@tonic-gate 	int error = 0;
290*7c478bd9Sstevel@tonic-gate 	char *device;
291*7c478bd9Sstevel@tonic-gate 	req_t *req;
292*7c478bd9Sstevel@tonic-gate 
293*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE2, "info_req_add(%s, %d)\n",
294*7c478bd9Sstevel@tonic-gate 	    rsrcname, seq_num);
295*7c478bd9Sstevel@tonic-gate 
296*7c478bd9Sstevel@tonic-gate 	device = resolve_name(rsrcname);
297*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
298*7c478bd9Sstevel@tonic-gate 
299*7c478bd9Sstevel@tonic-gate 	/*
300*7c478bd9Sstevel@tonic-gate 	 * Look for entry with the same resource and seq_num.
301*7c478bd9Sstevel@tonic-gate 	 * If it exists, we return an error so that such
302*7c478bd9Sstevel@tonic-gate 	 * information is not gathered more than once.
303*7c478bd9Sstevel@tonic-gate 	 */
304*7c478bd9Sstevel@tonic-gate 	if (find_req_entry(device, flag, seq_num, info_req_list) != NULL) {
305*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_DEBUG, "getinfo cycle: %s %d \n",
306*7c478bd9Sstevel@tonic-gate 		    device, seq_num);
307*7c478bd9Sstevel@tonic-gate 		error = -1;
308*7c478bd9Sstevel@tonic-gate 		goto out;
309*7c478bd9Sstevel@tonic-gate 	}
310*7c478bd9Sstevel@tonic-gate 
311*7c478bd9Sstevel@tonic-gate 	/*
312*7c478bd9Sstevel@tonic-gate 	 * Get empty entry and fill in seq_num and device.
313*7c478bd9Sstevel@tonic-gate 	 */
314*7c478bd9Sstevel@tonic-gate 	req = get_req_entry(&info_req_list);
315*7c478bd9Sstevel@tonic-gate 	req->seq_num = seq_num;
316*7c478bd9Sstevel@tonic-gate 	req->state = RCM_STATE_ONLINE;  /* mark that the entry is in use */
317*7c478bd9Sstevel@tonic-gate 	req->flag = flag;
318*7c478bd9Sstevel@tonic-gate 	(void) strcpy(req->device, device);
319*7c478bd9Sstevel@tonic-gate 
320*7c478bd9Sstevel@tonic-gate out:
321*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
322*7c478bd9Sstevel@tonic-gate 	free(device);
323*7c478bd9Sstevel@tonic-gate 
324*7c478bd9Sstevel@tonic-gate 	return (error);
325*7c478bd9Sstevel@tonic-gate }
326*7c478bd9Sstevel@tonic-gate 
327*7c478bd9Sstevel@tonic-gate /*
328*7c478bd9Sstevel@tonic-gate  * Remove all entries associated with seq_num from info_req_list
329*7c478bd9Sstevel@tonic-gate  */
330*7c478bd9Sstevel@tonic-gate void
info_req_remove(int seq_num)331*7c478bd9Sstevel@tonic-gate info_req_remove(int seq_num)
332*7c478bd9Sstevel@tonic-gate {
333*7c478bd9Sstevel@tonic-gate 	int i;
334*7c478bd9Sstevel@tonic-gate 
335*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE3, "info_req_remove(%d)\n", seq_num);
336*7c478bd9Sstevel@tonic-gate 
337*7c478bd9Sstevel@tonic-gate 	seq_num >>= SEQ_NUM_SHIFT;
338*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
339*7c478bd9Sstevel@tonic-gate 
340*7c478bd9Sstevel@tonic-gate 	/* remove all entries with seq_num */
341*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < info_req_list->n_req_max; i++) {
342*7c478bd9Sstevel@tonic-gate 		if (info_req_list->req[i].state == RCM_STATE_REMOVE)
343*7c478bd9Sstevel@tonic-gate 			continue;
344*7c478bd9Sstevel@tonic-gate 
345*7c478bd9Sstevel@tonic-gate 		if ((info_req_list->req[i].seq_num >> SEQ_NUM_SHIFT) != seq_num)
346*7c478bd9Sstevel@tonic-gate 			continue;
347*7c478bd9Sstevel@tonic-gate 
348*7c478bd9Sstevel@tonic-gate 		info_req_list->req[i].state = RCM_STATE_REMOVE;
349*7c478bd9Sstevel@tonic-gate 		info_req_list->n_req--;
350*7c478bd9Sstevel@tonic-gate 	}
351*7c478bd9Sstevel@tonic-gate 
352*7c478bd9Sstevel@tonic-gate 	/*
353*7c478bd9Sstevel@tonic-gate 	 * We don't shrink the info_req_list size for now.
354*7c478bd9Sstevel@tonic-gate 	 */
355*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
356*7c478bd9Sstevel@tonic-gate }
357*7c478bd9Sstevel@tonic-gate 
358*7c478bd9Sstevel@tonic-gate /*
359*7c478bd9Sstevel@tonic-gate  * Checking lock conflicts. There is a conflict if:
360*7c478bd9Sstevel@tonic-gate  * - attempt to DR a node when either its ancester or descendent
361*7c478bd9Sstevel@tonic-gate  *	is in the process of DR
362*7c478bd9Sstevel@tonic-gate  * - attempt to register for a node when its ancester is locked for DR
363*7c478bd9Sstevel@tonic-gate  */
364*7c478bd9Sstevel@tonic-gate static int
check_lock(char * device,uint_t flag,int cflag,rcm_info_t ** info)365*7c478bd9Sstevel@tonic-gate check_lock(char *device, uint_t flag, int cflag, rcm_info_t **info)
366*7c478bd9Sstevel@tonic-gate {
367*7c478bd9Sstevel@tonic-gate 	int i, ret = RCM_SUCCESS;
368*7c478bd9Sstevel@tonic-gate 
369*7c478bd9Sstevel@tonic-gate 	if (info)
370*7c478bd9Sstevel@tonic-gate 		*info = NULL;
371*7c478bd9Sstevel@tonic-gate 
372*7c478bd9Sstevel@tonic-gate 	/*
373*7c478bd9Sstevel@tonic-gate 	 * During daemon initialization, don't check locks
374*7c478bd9Sstevel@tonic-gate 	 */
375*7c478bd9Sstevel@tonic-gate 	if (dr_req_list == NULL)
376*7c478bd9Sstevel@tonic-gate 		return (ret);
377*7c478bd9Sstevel@tonic-gate 
378*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < dr_req_list->n_req; i++) {
379*7c478bd9Sstevel@tonic-gate 		req_t *req = &dr_req_list->req[i];
380*7c478bd9Sstevel@tonic-gate 		char *dr_dev = req->device;
381*7c478bd9Sstevel@tonic-gate 
382*7c478bd9Sstevel@tonic-gate 		/*
383*7c478bd9Sstevel@tonic-gate 		 * Skip empty entries
384*7c478bd9Sstevel@tonic-gate 		 */
385*7c478bd9Sstevel@tonic-gate 		if ((req->state == RCM_STATE_REMOVE) || (dr_dev[0] == '\0'))
386*7c478bd9Sstevel@tonic-gate 			continue;
387*7c478bd9Sstevel@tonic-gate 
388*7c478bd9Sstevel@tonic-gate 		/*
389*7c478bd9Sstevel@tonic-gate 		 * Make sure that none of the ancestors of dr_dev is
390*7c478bd9Sstevel@tonic-gate 		 * being operated upon.
391*7c478bd9Sstevel@tonic-gate 		 */
392*7c478bd9Sstevel@tonic-gate 		if (EQUAL(device, dr_dev) || DESCENDENT(device, dr_dev)) {
393*7c478bd9Sstevel@tonic-gate 			/*
394*7c478bd9Sstevel@tonic-gate 			 * An exception to this is the filesystem.
395*7c478bd9Sstevel@tonic-gate 			 * We should allowed a filesystem rooted at a
396*7c478bd9Sstevel@tonic-gate 			 * child directory to be unmounted.
397*7c478bd9Sstevel@tonic-gate 			 */
398*7c478bd9Sstevel@tonic-gate 			if ((flag & RCM_FILESYS) && (!EQUAL(device, dr_dev) ||
399*7c478bd9Sstevel@tonic-gate 			    ((dr_req_list->req[i].flag & RCM_FILESYS) == 0)))
400*7c478bd9Sstevel@tonic-gate 				continue;
401*7c478bd9Sstevel@tonic-gate 
402*7c478bd9Sstevel@tonic-gate 			assert(info != 0);
403*7c478bd9Sstevel@tonic-gate 
404*7c478bd9Sstevel@tonic-gate 			add_busy_rsrc_to_list(dr_dev, dr_req_list->req[i].pid,
405*7c478bd9Sstevel@tonic-gate 			    dr_req_list->req[i].state,
406*7c478bd9Sstevel@tonic-gate 			    dr_req_list->req[i].seq_num, NULL, locked_info,
407*7c478bd9Sstevel@tonic-gate 			    locked_err, NULL, info);
408*7c478bd9Sstevel@tonic-gate 			ret = RCM_CONFLICT;
409*7c478bd9Sstevel@tonic-gate 			break;
410*7c478bd9Sstevel@tonic-gate 		}
411*7c478bd9Sstevel@tonic-gate 
412*7c478bd9Sstevel@tonic-gate 		if ((cflag == LOCK_FOR_DR) && DESCENDENT(dr_dev, device)) {
413*7c478bd9Sstevel@tonic-gate 			/*
414*7c478bd9Sstevel@tonic-gate 			 * Check descendents only for DR request.
415*7c478bd9Sstevel@tonic-gate 			 *
416*7c478bd9Sstevel@tonic-gate 			 * Could have multiple descendents doing DR,
417*7c478bd9Sstevel@tonic-gate 			 * we want to find them all.
418*7c478bd9Sstevel@tonic-gate 			 */
419*7c478bd9Sstevel@tonic-gate 			assert(info != 0);
420*7c478bd9Sstevel@tonic-gate 
421*7c478bd9Sstevel@tonic-gate 			add_busy_rsrc_to_list(dr_dev, dr_req_list->req[i].pid,
422*7c478bd9Sstevel@tonic-gate 			    dr_req_list->req[i].state,
423*7c478bd9Sstevel@tonic-gate 			    dr_req_list->req[i].seq_num, NULL, locked_info,
424*7c478bd9Sstevel@tonic-gate 			    locked_err, NULL, info);
425*7c478bd9Sstevel@tonic-gate 			ret = RCM_CONFLICT;
426*7c478bd9Sstevel@tonic-gate 			/* don't break here, need to find all conflicts */
427*7c478bd9Sstevel@tonic-gate 		}
428*7c478bd9Sstevel@tonic-gate 	}
429*7c478bd9Sstevel@tonic-gate 
430*7c478bd9Sstevel@tonic-gate 	return (ret);
431*7c478bd9Sstevel@tonic-gate }
432*7c478bd9Sstevel@tonic-gate 
433*7c478bd9Sstevel@tonic-gate /*
434*7c478bd9Sstevel@tonic-gate  * Check for lock conflicts for DR operation or client registration
435*7c478bd9Sstevel@tonic-gate  */
436*7c478bd9Sstevel@tonic-gate int
rsrc_check_lock_conflicts(char * rsrcname,uint_t flag,int cflag,rcm_info_t ** info)437*7c478bd9Sstevel@tonic-gate rsrc_check_lock_conflicts(char *rsrcname, uint_t flag, int cflag,
438*7c478bd9Sstevel@tonic-gate     rcm_info_t **info)
439*7c478bd9Sstevel@tonic-gate {
440*7c478bd9Sstevel@tonic-gate 	int result;
441*7c478bd9Sstevel@tonic-gate 	char *device;
442*7c478bd9Sstevel@tonic-gate 
443*7c478bd9Sstevel@tonic-gate 	device = resolve_name(rsrcname);
444*7c478bd9Sstevel@tonic-gate 	result = check_lock(device, flag, cflag, info);
445*7c478bd9Sstevel@tonic-gate 	free(device);
446*7c478bd9Sstevel@tonic-gate 
447*7c478bd9Sstevel@tonic-gate 	return (result);
448*7c478bd9Sstevel@tonic-gate }
449*7c478bd9Sstevel@tonic-gate 
450*7c478bd9Sstevel@tonic-gate static int
transition_state(int state)451*7c478bd9Sstevel@tonic-gate transition_state(int state)
452*7c478bd9Sstevel@tonic-gate {
453*7c478bd9Sstevel@tonic-gate 	/*
454*7c478bd9Sstevel@tonic-gate 	 * If the resource state is in transition, ask caller to
455*7c478bd9Sstevel@tonic-gate 	 * try again.
456*7c478bd9Sstevel@tonic-gate 	 */
457*7c478bd9Sstevel@tonic-gate 	switch (state) {
458*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_OFFLINING:
459*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_SUSPENDING:
460*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_RESUMING:
461*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_ONLINING:
462*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_REMOVING:
463*7c478bd9Sstevel@tonic-gate 
464*7c478bd9Sstevel@tonic-gate 		return (1);
465*7c478bd9Sstevel@tonic-gate 
466*7c478bd9Sstevel@tonic-gate 	default:
467*7c478bd9Sstevel@tonic-gate 		/*FALLTHROUGH*/
468*7c478bd9Sstevel@tonic-gate 		break;
469*7c478bd9Sstevel@tonic-gate 	}
470*7c478bd9Sstevel@tonic-gate 	return (0);
471*7c478bd9Sstevel@tonic-gate }
472*7c478bd9Sstevel@tonic-gate 
473*7c478bd9Sstevel@tonic-gate /*
474*7c478bd9Sstevel@tonic-gate  * Update a dr entry in dr_req_list
475*7c478bd9Sstevel@tonic-gate  */
476*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
477*7c478bd9Sstevel@tonic-gate static int
dr_req_update_entry(char * device,pid_t pid,uint_t flag,int state,int seq_num,timespec_t * interval,rcm_info_t ** infop)478*7c478bd9Sstevel@tonic-gate dr_req_update_entry(char *device, pid_t pid, uint_t flag, int state,
479*7c478bd9Sstevel@tonic-gate     int seq_num, timespec_t *interval, rcm_info_t **infop)
480*7c478bd9Sstevel@tonic-gate {
481*7c478bd9Sstevel@tonic-gate 	req_t *req;
482*7c478bd9Sstevel@tonic-gate 
483*7c478bd9Sstevel@tonic-gate 	/*
484*7c478bd9Sstevel@tonic-gate 	 * Find request entry. If not found, return RCM_FAILURE
485*7c478bd9Sstevel@tonic-gate 	 */
486*7c478bd9Sstevel@tonic-gate 	req = find_req_entry(device, flag, -1, dr_req_list);
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate 	if (req == NULL) {
489*7c478bd9Sstevel@tonic-gate 		switch (state) {
490*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINE_QUERYING:
491*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPEND_QUERYING:
492*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINING:
493*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPENDING:
494*7c478bd9Sstevel@tonic-gate 			/* could be re-do operation, no error message */
495*7c478bd9Sstevel@tonic-gate 			break;
496*7c478bd9Sstevel@tonic-gate 
497*7c478bd9Sstevel@tonic-gate 		default:
498*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_DEBUG,
499*7c478bd9Sstevel@tonic-gate 			    "update non-existing resource %s\n", device);
500*7c478bd9Sstevel@tonic-gate 		}
501*7c478bd9Sstevel@tonic-gate 		return (RCM_FAILURE);
502*7c478bd9Sstevel@tonic-gate 	}
503*7c478bd9Sstevel@tonic-gate 
504*7c478bd9Sstevel@tonic-gate 	/*
505*7c478bd9Sstevel@tonic-gate 	 * During initialization, update is unconditional (forced)
506*7c478bd9Sstevel@tonic-gate 	 * in order to bring the daemon up in a sane state.
507*7c478bd9Sstevel@tonic-gate 	 */
508*7c478bd9Sstevel@tonic-gate 	if (rcmd_get_state() == RCMD_INIT)
509*7c478bd9Sstevel@tonic-gate 		goto update;
510*7c478bd9Sstevel@tonic-gate 
511*7c478bd9Sstevel@tonic-gate 	/*
512*7c478bd9Sstevel@tonic-gate 	 * Don't allow update with mismatched initiator pid. This could happen
513*7c478bd9Sstevel@tonic-gate 	 * as part of normal operation.
514*7c478bd9Sstevel@tonic-gate 	 */
515*7c478bd9Sstevel@tonic-gate 	if (pid != req->pid) {
516*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_INFO,
517*7c478bd9Sstevel@tonic-gate 		    gettext("mismatched dr initiator pid: %ld %ld\n"),
518*7c478bd9Sstevel@tonic-gate 		    req->pid, pid);
519*7c478bd9Sstevel@tonic-gate 		goto failure;
520*7c478bd9Sstevel@tonic-gate 	}
521*7c478bd9Sstevel@tonic-gate 
522*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE4,
523*7c478bd9Sstevel@tonic-gate 	    "dr_req_update_entry: state=%d, device=%s\n",
524*7c478bd9Sstevel@tonic-gate 	    req->state, req->device);
525*7c478bd9Sstevel@tonic-gate 
526*7c478bd9Sstevel@tonic-gate 	/*
527*7c478bd9Sstevel@tonic-gate 	 * Check that the state transition is valid
528*7c478bd9Sstevel@tonic-gate 	 */
529*7c478bd9Sstevel@tonic-gate 	switch (state) {
530*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_OFFLINE_QUERYING:
531*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_OFFLINING:
532*7c478bd9Sstevel@tonic-gate 		/*
533*7c478bd9Sstevel@tonic-gate 		 * This is the case of re-offlining, which applies only
534*7c478bd9Sstevel@tonic-gate 		 * if a previous attempt failed.
535*7c478bd9Sstevel@tonic-gate 		 */
536*7c478bd9Sstevel@tonic-gate 		if ((req->state != RCM_STATE_OFFLINE_FAIL) &&
537*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_QUERYING) &&
538*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_QUERY) &&
539*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_QUERY_FAIL) &&
540*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE)) {
541*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_WARNING,
542*7c478bd9Sstevel@tonic-gate 			    gettext("%s: invalid offlining from state %d\n"),
543*7c478bd9Sstevel@tonic-gate 			    device, req->state);
544*7c478bd9Sstevel@tonic-gate 			goto failure;
545*7c478bd9Sstevel@tonic-gate 		}
546*7c478bd9Sstevel@tonic-gate 		break;
547*7c478bd9Sstevel@tonic-gate 
548*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_SUSPEND_QUERYING:
549*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_SUSPENDING:
550*7c478bd9Sstevel@tonic-gate 		/*
551*7c478bd9Sstevel@tonic-gate 		 * This is the case of re-suspending, which applies only
552*7c478bd9Sstevel@tonic-gate 		 * if a previous attempt failed.
553*7c478bd9Sstevel@tonic-gate 		 */
554*7c478bd9Sstevel@tonic-gate 		if ((req->state != RCM_STATE_SUSPEND_FAIL) &&
555*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_QUERYING) &&
556*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_QUERY) &&
557*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_QUERY_FAIL) &&
558*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND)) {
559*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_WARNING,
560*7c478bd9Sstevel@tonic-gate 			    gettext("%s: invalid suspending from state %d\n"),
561*7c478bd9Sstevel@tonic-gate 			    device, req->state);
562*7c478bd9Sstevel@tonic-gate 			goto failure;
563*7c478bd9Sstevel@tonic-gate 		}
564*7c478bd9Sstevel@tonic-gate 		break;
565*7c478bd9Sstevel@tonic-gate 
566*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_RESUMING:
567*7c478bd9Sstevel@tonic-gate 		if ((req->state != RCM_STATE_SUSPEND) &&
568*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_QUERYING) &&
569*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_QUERY) &&
570*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_QUERY_FAIL) &&
571*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_SUSPEND_FAIL)) {
572*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_DEBUG,
573*7c478bd9Sstevel@tonic-gate 			    "%s: invalid resuming from state %d\n",
574*7c478bd9Sstevel@tonic-gate 			    device, req->state);
575*7c478bd9Sstevel@tonic-gate 			goto failure;
576*7c478bd9Sstevel@tonic-gate 		}
577*7c478bd9Sstevel@tonic-gate 		break;
578*7c478bd9Sstevel@tonic-gate 
579*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_ONLINING:
580*7c478bd9Sstevel@tonic-gate 		if ((req->state != RCM_STATE_OFFLINE) &&
581*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_QUERYING) &&
582*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_QUERY) &&
583*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_QUERY_FAIL) &&
584*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_FAIL)) {
585*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_INFO,
586*7c478bd9Sstevel@tonic-gate 			    gettext("%s: invalid onlining from state %d\n"),
587*7c478bd9Sstevel@tonic-gate 			    device, req->state);
588*7c478bd9Sstevel@tonic-gate 			goto failure;
589*7c478bd9Sstevel@tonic-gate 		}
590*7c478bd9Sstevel@tonic-gate 		break;
591*7c478bd9Sstevel@tonic-gate 
592*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_REMOVING:
593*7c478bd9Sstevel@tonic-gate 		if ((req->state != RCM_STATE_OFFLINE) &&
594*7c478bd9Sstevel@tonic-gate 		    (req->state != RCM_STATE_OFFLINE_FAIL)) {
595*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_INFO,
596*7c478bd9Sstevel@tonic-gate 			    gettext("%s: invalid removing from state %d\n"),
597*7c478bd9Sstevel@tonic-gate 			    device, req->state);
598*7c478bd9Sstevel@tonic-gate 			goto failure;
599*7c478bd9Sstevel@tonic-gate 		}
600*7c478bd9Sstevel@tonic-gate 		break;
601*7c478bd9Sstevel@tonic-gate 
602*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_SUSPEND_FAIL:
603*7c478bd9Sstevel@tonic-gate 		assert(req->state == RCM_STATE_SUSPENDING);
604*7c478bd9Sstevel@tonic-gate 		break;
605*7c478bd9Sstevel@tonic-gate 
606*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_OFFLINE_FAIL:
607*7c478bd9Sstevel@tonic-gate 		assert(req->state == RCM_STATE_OFFLINING);
608*7c478bd9Sstevel@tonic-gate 		break;
609*7c478bd9Sstevel@tonic-gate 
610*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_SUSPEND:
611*7c478bd9Sstevel@tonic-gate 		assert(req->state == RCM_STATE_SUSPENDING);
612*7c478bd9Sstevel@tonic-gate 		break;
613*7c478bd9Sstevel@tonic-gate 
614*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_OFFLINE:
615*7c478bd9Sstevel@tonic-gate 		assert(req->state == RCM_STATE_OFFLINING);
616*7c478bd9Sstevel@tonic-gate 		break;
617*7c478bd9Sstevel@tonic-gate 
618*7c478bd9Sstevel@tonic-gate 	case RCM_STATE_ONLINE:
619*7c478bd9Sstevel@tonic-gate 		assert((req->state == RCM_STATE_RESUMING) ||
620*7c478bd9Sstevel@tonic-gate 		    (req->state == RCM_STATE_ONLINING));
621*7c478bd9Sstevel@tonic-gate 		break;
622*7c478bd9Sstevel@tonic-gate 
623*7c478bd9Sstevel@tonic-gate 	default:	/* shouldn't be here */
624*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_ERROR,
625*7c478bd9Sstevel@tonic-gate 		    gettext("invalid update to dr state: %d\n"), state);
626*7c478bd9Sstevel@tonic-gate 		return (RCM_FAILURE);
627*7c478bd9Sstevel@tonic-gate 	}
628*7c478bd9Sstevel@tonic-gate 
629*7c478bd9Sstevel@tonic-gate update:
630*7c478bd9Sstevel@tonic-gate 	/*
631*7c478bd9Sstevel@tonic-gate 	 * update the state, interval, and sequence number; sync state file
632*7c478bd9Sstevel@tonic-gate 	 */
633*7c478bd9Sstevel@tonic-gate 	req->state = state;
634*7c478bd9Sstevel@tonic-gate 	req->seq_num = seq_num;
635*7c478bd9Sstevel@tonic-gate 
636*7c478bd9Sstevel@tonic-gate 	if (interval)
637*7c478bd9Sstevel@tonic-gate 		req->interval = *interval;
638*7c478bd9Sstevel@tonic-gate 	else
639*7c478bd9Sstevel@tonic-gate 		bzero(&req->interval, sizeof (timespec_t));
640*7c478bd9Sstevel@tonic-gate 
641*7c478bd9Sstevel@tonic-gate 	(void) fsync(state_fd);
642*7c478bd9Sstevel@tonic-gate 	return (RCM_SUCCESS);
643*7c478bd9Sstevel@tonic-gate 
644*7c478bd9Sstevel@tonic-gate failure:
645*7c478bd9Sstevel@tonic-gate 	if (infop != NULL) {
646*7c478bd9Sstevel@tonic-gate 		add_busy_rsrc_to_list(req->device, req->pid, req->state,
647*7c478bd9Sstevel@tonic-gate 		    req->seq_num, NULL, locked_info, locked_err, NULL, infop);
648*7c478bd9Sstevel@tonic-gate 	}
649*7c478bd9Sstevel@tonic-gate 
650*7c478bd9Sstevel@tonic-gate 	/*
651*7c478bd9Sstevel@tonic-gate 	 * A request may be left in a transition state because the operator
652*7c478bd9Sstevel@tonic-gate 	 * typed ctrl-C. In this case, the daemon thread continues to run
653*7c478bd9Sstevel@tonic-gate 	 * and will eventually put the state in a non-transitional state.
654*7c478bd9Sstevel@tonic-gate 	 *
655*7c478bd9Sstevel@tonic-gate 	 * To be safe, we return EAGAIN to allow librcm to loop and retry.
656*7c478bd9Sstevel@tonic-gate 	 * If we are called from a module, loop & retry could result in a
657*7c478bd9Sstevel@tonic-gate 	 * deadlock. The called will check for this case and turn EAGAIN
658*7c478bd9Sstevel@tonic-gate 	 * into RCM_CONFLICT.
659*7c478bd9Sstevel@tonic-gate 	 */
660*7c478bd9Sstevel@tonic-gate 	if (transition_state(req->state)) {
661*7c478bd9Sstevel@tonic-gate 		return (EAGAIN);
662*7c478bd9Sstevel@tonic-gate 	}
663*7c478bd9Sstevel@tonic-gate 
664*7c478bd9Sstevel@tonic-gate 	return (RCM_CONFLICT);
665*7c478bd9Sstevel@tonic-gate }
666*7c478bd9Sstevel@tonic-gate 
667*7c478bd9Sstevel@tonic-gate /*
668*7c478bd9Sstevel@tonic-gate  * Insert a dr entry in dr_req_list
669*7c478bd9Sstevel@tonic-gate  */
670*7c478bd9Sstevel@tonic-gate int
dr_req_add(char * rsrcname,pid_t pid,uint_t flag,int state,int seq_num,timespec_t * interval,rcm_info_t ** info)671*7c478bd9Sstevel@tonic-gate dr_req_add(char *rsrcname, pid_t pid, uint_t flag, int state, int seq_num,
672*7c478bd9Sstevel@tonic-gate     timespec_t *interval, rcm_info_t **info)
673*7c478bd9Sstevel@tonic-gate {
674*7c478bd9Sstevel@tonic-gate 	int error;
675*7c478bd9Sstevel@tonic-gate 	char *device;
676*7c478bd9Sstevel@tonic-gate 	req_t *req;
677*7c478bd9Sstevel@tonic-gate 
678*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE3, "dr_req_add(%s, %ld, 0x%x, %d, %d, %p)\n",
679*7c478bd9Sstevel@tonic-gate 	    rsrcname, pid, flag, state, seq_num, (void *)info);
680*7c478bd9Sstevel@tonic-gate 
681*7c478bd9Sstevel@tonic-gate 	device = resolve_name(rsrcname);
682*7c478bd9Sstevel@tonic-gate 	if (device == NULL)
683*7c478bd9Sstevel@tonic-gate 		return (EINVAL);
684*7c478bd9Sstevel@tonic-gate 
685*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
686*7c478bd9Sstevel@tonic-gate 
687*7c478bd9Sstevel@tonic-gate 	/*
688*7c478bd9Sstevel@tonic-gate 	 * In the re-offline/suspend case, attempt to update dr request.
689*7c478bd9Sstevel@tonic-gate 	 *
690*7c478bd9Sstevel@tonic-gate 	 * If this succeeds, return success;
691*7c478bd9Sstevel@tonic-gate 	 * If this fails because of a conflict, return error;
692*7c478bd9Sstevel@tonic-gate 	 * If this this fails because no entry exists, add a new entry.
693*7c478bd9Sstevel@tonic-gate 	 */
694*7c478bd9Sstevel@tonic-gate 	error = dr_req_update_entry(device, pid, flag, state, seq_num, interval,
695*7c478bd9Sstevel@tonic-gate 	    info);
696*7c478bd9Sstevel@tonic-gate 
697*7c478bd9Sstevel@tonic-gate 	switch (error) {
698*7c478bd9Sstevel@tonic-gate 	case RCM_FAILURE:
699*7c478bd9Sstevel@tonic-gate 		/* proceed to add a new entry */
700*7c478bd9Sstevel@tonic-gate 		break;
701*7c478bd9Sstevel@tonic-gate 
702*7c478bd9Sstevel@tonic-gate 	case RCM_CONFLICT:
703*7c478bd9Sstevel@tonic-gate 	case RCM_SUCCESS:
704*7c478bd9Sstevel@tonic-gate 	case EAGAIN:
705*7c478bd9Sstevel@tonic-gate 	default:
706*7c478bd9Sstevel@tonic-gate 		goto out;
707*7c478bd9Sstevel@tonic-gate 	}
708*7c478bd9Sstevel@tonic-gate 
709*7c478bd9Sstevel@tonic-gate 	/*
710*7c478bd9Sstevel@tonic-gate 	 * Check for lock conflicts
711*7c478bd9Sstevel@tonic-gate 	 */
712*7c478bd9Sstevel@tonic-gate 	error = check_lock(device, flag, LOCK_FOR_DR, info);
713*7c478bd9Sstevel@tonic-gate 	if (error != RCM_SUCCESS) {
714*7c478bd9Sstevel@tonic-gate 		error = RCM_CONFLICT;
715*7c478bd9Sstevel@tonic-gate 		goto out;
716*7c478bd9Sstevel@tonic-gate 	}
717*7c478bd9Sstevel@tonic-gate 
718*7c478bd9Sstevel@tonic-gate 	/*
719*7c478bd9Sstevel@tonic-gate 	 * Get empty request entry, fill in values and sync state file
720*7c478bd9Sstevel@tonic-gate 	 */
721*7c478bd9Sstevel@tonic-gate 	req = get_req_entry(&dr_req_list);
722*7c478bd9Sstevel@tonic-gate 
723*7c478bd9Sstevel@tonic-gate 	req->seq_num = seq_num;
724*7c478bd9Sstevel@tonic-gate 	req->pid = pid;
725*7c478bd9Sstevel@tonic-gate 	req->flag = flag;
726*7c478bd9Sstevel@tonic-gate 	req->state = state;
727*7c478bd9Sstevel@tonic-gate 	req->type = rsrc_get_type(device);
728*7c478bd9Sstevel@tonic-gate 	(void) strcpy(req->device, device);
729*7c478bd9Sstevel@tonic-gate 
730*7c478bd9Sstevel@tonic-gate 	/* cache interval for failure recovery */
731*7c478bd9Sstevel@tonic-gate 	if (interval)
732*7c478bd9Sstevel@tonic-gate 		req->interval = *interval;
733*7c478bd9Sstevel@tonic-gate 	else
734*7c478bd9Sstevel@tonic-gate 		bzero(&req->interval, sizeof (timespec_t));
735*7c478bd9Sstevel@tonic-gate 
736*7c478bd9Sstevel@tonic-gate 	(void) fsync(state_fd);
737*7c478bd9Sstevel@tonic-gate 
738*7c478bd9Sstevel@tonic-gate 	/*
739*7c478bd9Sstevel@tonic-gate 	 * Add initiator pid to polling list
740*7c478bd9Sstevel@tonic-gate 	 */
741*7c478bd9Sstevel@tonic-gate 	add_to_polling_list(req->pid);
742*7c478bd9Sstevel@tonic-gate 
743*7c478bd9Sstevel@tonic-gate out:
744*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
745*7c478bd9Sstevel@tonic-gate 	free(device);
746*7c478bd9Sstevel@tonic-gate 
747*7c478bd9Sstevel@tonic-gate 	return (error);
748*7c478bd9Sstevel@tonic-gate }
749*7c478bd9Sstevel@tonic-gate 
750*7c478bd9Sstevel@tonic-gate /*
751*7c478bd9Sstevel@tonic-gate  * Update a dr entry in dr_req_list
752*7c478bd9Sstevel@tonic-gate  */
753*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
754*7c478bd9Sstevel@tonic-gate int
dr_req_update(char * rsrcname,pid_t pid,uint_t flag,int state,int seq_num,rcm_info_t ** info)755*7c478bd9Sstevel@tonic-gate dr_req_update(char *rsrcname, pid_t pid, uint_t flag, int state, int seq_num,
756*7c478bd9Sstevel@tonic-gate     rcm_info_t **info)
757*7c478bd9Sstevel@tonic-gate {
758*7c478bd9Sstevel@tonic-gate 	int error;
759*7c478bd9Sstevel@tonic-gate 	char *device = resolve_name(rsrcname);
760*7c478bd9Sstevel@tonic-gate 
761*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE3, "dr_req_update(%s, %ld, 0x%x, %d, %d)\n",
762*7c478bd9Sstevel@tonic-gate 	    rsrcname, pid, flag, state, seq_num);
763*7c478bd9Sstevel@tonic-gate 
764*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
765*7c478bd9Sstevel@tonic-gate 	error = dr_req_update_entry(device, pid, flag, state, seq_num, NULL,
766*7c478bd9Sstevel@tonic-gate 	    info);
767*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
768*7c478bd9Sstevel@tonic-gate 	free(device);
769*7c478bd9Sstevel@tonic-gate 
770*7c478bd9Sstevel@tonic-gate 	return (error);
771*7c478bd9Sstevel@tonic-gate }
772*7c478bd9Sstevel@tonic-gate 
773*7c478bd9Sstevel@tonic-gate /*
774*7c478bd9Sstevel@tonic-gate  * This function scans the DR request list for the next, non-removed
775*7c478bd9Sstevel@tonic-gate  * entry that is part of the specified sequence.  The 'device' name
776*7c478bd9Sstevel@tonic-gate  * of the entry is copied into the provided 'rsrc' buffer.
777*7c478bd9Sstevel@tonic-gate  *
778*7c478bd9Sstevel@tonic-gate  * The 'rsrc' buffer is required because the DR request list is only
779*7c478bd9Sstevel@tonic-gate  * locked during the duration of this lookup.  Giving a direct pointer
780*7c478bd9Sstevel@tonic-gate  * to something in the list would be unsafe.
781*7c478bd9Sstevel@tonic-gate  */
782*7c478bd9Sstevel@tonic-gate int
dr_req_lookup(int seq_num,char * rsrc)783*7c478bd9Sstevel@tonic-gate dr_req_lookup(int seq_num, char *rsrc)
784*7c478bd9Sstevel@tonic-gate {
785*7c478bd9Sstevel@tonic-gate 	int	i;
786*7c478bd9Sstevel@tonic-gate 	int	len;
787*7c478bd9Sstevel@tonic-gate 	int	base = (seq_num >> SEQ_NUM_SHIFT);
788*7c478bd9Sstevel@tonic-gate 	int	retval = RCM_FAILURE;
789*7c478bd9Sstevel@tonic-gate 
790*7c478bd9Sstevel@tonic-gate 	if (rsrc == NULL) {
791*7c478bd9Sstevel@tonic-gate 		return (RCM_FAILURE);
792*7c478bd9Sstevel@tonic-gate 	}
793*7c478bd9Sstevel@tonic-gate 
794*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
795*7c478bd9Sstevel@tonic-gate 
796*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < dr_req_list->n_req_max; i++) {
797*7c478bd9Sstevel@tonic-gate 
798*7c478bd9Sstevel@tonic-gate 		/* Skip removed or non-matching entries */
799*7c478bd9Sstevel@tonic-gate 		if ((dr_req_list->req[i].state == RCM_STATE_REMOVE) ||
800*7c478bd9Sstevel@tonic-gate 		    ((dr_req_list->req[i].seq_num >> SEQ_NUM_SHIFT) != base)) {
801*7c478bd9Sstevel@tonic-gate 			continue;
802*7c478bd9Sstevel@tonic-gate 		}
803*7c478bd9Sstevel@tonic-gate 
804*7c478bd9Sstevel@tonic-gate 		/* Copy the next-matching 'device' name into 'rsrc' */
805*7c478bd9Sstevel@tonic-gate 		len = strlcpy(rsrc, dr_req_list->req[i].device, MAXPATHLEN);
806*7c478bd9Sstevel@tonic-gate 		if (len < MAXPATHLEN) {
807*7c478bd9Sstevel@tonic-gate 			retval = RCM_SUCCESS;
808*7c478bd9Sstevel@tonic-gate 		}
809*7c478bd9Sstevel@tonic-gate 		break;
810*7c478bd9Sstevel@tonic-gate 	}
811*7c478bd9Sstevel@tonic-gate 
812*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
813*7c478bd9Sstevel@tonic-gate 
814*7c478bd9Sstevel@tonic-gate 	return (retval);
815*7c478bd9Sstevel@tonic-gate }
816*7c478bd9Sstevel@tonic-gate 
817*7c478bd9Sstevel@tonic-gate /*
818*7c478bd9Sstevel@tonic-gate  * Remove a dr entry in dr_req_list
819*7c478bd9Sstevel@tonic-gate  */
820*7c478bd9Sstevel@tonic-gate void
dr_req_remove(char * rsrcname,uint_t flag)821*7c478bd9Sstevel@tonic-gate dr_req_remove(char *rsrcname, uint_t flag)
822*7c478bd9Sstevel@tonic-gate {
823*7c478bd9Sstevel@tonic-gate 	req_t *req;
824*7c478bd9Sstevel@tonic-gate 	char *device = resolve_name(rsrcname);
825*7c478bd9Sstevel@tonic-gate 
826*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE3, "dr_req_remove(%s)\n", rsrcname);
827*7c478bd9Sstevel@tonic-gate 
828*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
829*7c478bd9Sstevel@tonic-gate 
830*7c478bd9Sstevel@tonic-gate 	/* find entry */
831*7c478bd9Sstevel@tonic-gate 	req = find_req_entry(device, flag, -1, dr_req_list);
832*7c478bd9Sstevel@tonic-gate 	free(device);
833*7c478bd9Sstevel@tonic-gate 
834*7c478bd9Sstevel@tonic-gate 	if (req == NULL) {
835*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&rcm_req_lock);
836*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_WARNING,
837*7c478bd9Sstevel@tonic-gate 		    gettext("dr_req entry %s not found\n"), rsrcname);
838*7c478bd9Sstevel@tonic-gate 		return;
839*7c478bd9Sstevel@tonic-gate 	}
840*7c478bd9Sstevel@tonic-gate 
841*7c478bd9Sstevel@tonic-gate 	req->state = RCM_STATE_REMOVE;
842*7c478bd9Sstevel@tonic-gate 	dr_req_list->n_req--;
843*7c478bd9Sstevel@tonic-gate 	(void) fsync(state_fd);
844*7c478bd9Sstevel@tonic-gate 
845*7c478bd9Sstevel@tonic-gate 	/*
846*7c478bd9Sstevel@tonic-gate 	 * remove pid from polling list
847*7c478bd9Sstevel@tonic-gate 	 */
848*7c478bd9Sstevel@tonic-gate 	remove_from_polling_list(req->pid);
849*7c478bd9Sstevel@tonic-gate 
850*7c478bd9Sstevel@tonic-gate 	/*
851*7c478bd9Sstevel@tonic-gate 	 * We don't shrink the dr_req_list size for now.
852*7c478bd9Sstevel@tonic-gate 	 * Shouldn't cause big memory leaks.
853*7c478bd9Sstevel@tonic-gate 	 */
854*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
855*7c478bd9Sstevel@tonic-gate }
856*7c478bd9Sstevel@tonic-gate 
857*7c478bd9Sstevel@tonic-gate /*
858*7c478bd9Sstevel@tonic-gate  * Return the list of ongoing dr operation requests
859*7c478bd9Sstevel@tonic-gate  */
860*7c478bd9Sstevel@tonic-gate rcm_info_t *
rsrc_dr_info()861*7c478bd9Sstevel@tonic-gate rsrc_dr_info()
862*7c478bd9Sstevel@tonic-gate {
863*7c478bd9Sstevel@tonic-gate 	int i;
864*7c478bd9Sstevel@tonic-gate 	rcm_info_t *info;
865*7c478bd9Sstevel@tonic-gate 	rcm_info_t *result = NULL;
866*7c478bd9Sstevel@tonic-gate 	char *rsrc;
867*7c478bd9Sstevel@tonic-gate 	int len;
868*7c478bd9Sstevel@tonic-gate 
869*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE2, "rsrc_dr_info()\n");
870*7c478bd9Sstevel@tonic-gate 
871*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
872*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < dr_req_list->n_req_max; i++) {
873*7c478bd9Sstevel@tonic-gate 		if (dr_req_list->req[i].state == RCM_STATE_REMOVE)
874*7c478bd9Sstevel@tonic-gate 			continue;
875*7c478bd9Sstevel@tonic-gate 
876*7c478bd9Sstevel@tonic-gate 		if (dr_req_list->req[i].device[0] == '\0')
877*7c478bd9Sstevel@tonic-gate 			continue;
878*7c478bd9Sstevel@tonic-gate 
879*7c478bd9Sstevel@tonic-gate 		if (dr_req_list->req[i].flag & RCM_FILESYS) {
880*7c478bd9Sstevel@tonic-gate 			len = strlen(dr_req_list->req[i].device) + 5;
881*7c478bd9Sstevel@tonic-gate 			rsrc = s_malloc(len);
882*7c478bd9Sstevel@tonic-gate 			(void) snprintf(rsrc, len, "%s(fs)",
883*7c478bd9Sstevel@tonic-gate 			    dr_req_list->req[i].device);
884*7c478bd9Sstevel@tonic-gate 		} else {
885*7c478bd9Sstevel@tonic-gate 			rsrc = s_strdup(dr_req_list->req[i].device);
886*7c478bd9Sstevel@tonic-gate 		}
887*7c478bd9Sstevel@tonic-gate 
888*7c478bd9Sstevel@tonic-gate 		info = s_calloc(1, sizeof (*info));
889*7c478bd9Sstevel@tonic-gate 		if (errno = nvlist_alloc(&(info->info), NV_UNIQUE_NAME, 0)) {
890*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
891*7c478bd9Sstevel@tonic-gate 			    gettext("failed (nvlist_alloc=%s).\n"),
892*7c478bd9Sstevel@tonic-gate 			    strerror(errno));
893*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
894*7c478bd9Sstevel@tonic-gate 		}
895*7c478bd9Sstevel@tonic-gate 
896*7c478bd9Sstevel@tonic-gate 		if (errno = nvlist_add_string(info->info, RCM_RSRCNAME, rsrc)) {
897*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
898*7c478bd9Sstevel@tonic-gate 			    gettext("failed (nvlist_add=%s).\n"),
899*7c478bd9Sstevel@tonic-gate 			    strerror(errno));
900*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
901*7c478bd9Sstevel@tonic-gate 		}
902*7c478bd9Sstevel@tonic-gate 		(void) free(rsrc);
903*7c478bd9Sstevel@tonic-gate 
904*7c478bd9Sstevel@tonic-gate 		if (errno = nvlist_add_int64(info->info, RCM_CLIENT_ID,
905*7c478bd9Sstevel@tonic-gate 		    dr_req_list->req[i].pid)) {
906*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
907*7c478bd9Sstevel@tonic-gate 			    gettext("failed (nvlist_add=%s).\n"),
908*7c478bd9Sstevel@tonic-gate 			    strerror(errno));
909*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
910*7c478bd9Sstevel@tonic-gate 		}
911*7c478bd9Sstevel@tonic-gate 
912*7c478bd9Sstevel@tonic-gate 		if (errno = nvlist_add_int32(info->info, RCM_SEQ_NUM,
913*7c478bd9Sstevel@tonic-gate 		    dr_req_list->req[i].seq_num)) {
914*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
915*7c478bd9Sstevel@tonic-gate 			    gettext("failed (nvlist_add=%s).\n"),
916*7c478bd9Sstevel@tonic-gate 			    strerror(errno));
917*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
918*7c478bd9Sstevel@tonic-gate 		}
919*7c478bd9Sstevel@tonic-gate 
920*7c478bd9Sstevel@tonic-gate 		if (errno = nvlist_add_int32(info->info, RCM_RSRCSTATE,
921*7c478bd9Sstevel@tonic-gate 		    dr_req_list->req[i].state)) {
922*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
923*7c478bd9Sstevel@tonic-gate 			    gettext("failed (nvlist_add=%s).\n"),
924*7c478bd9Sstevel@tonic-gate 			    strerror(errno));
925*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
926*7c478bd9Sstevel@tonic-gate 		}
927*7c478bd9Sstevel@tonic-gate 
928*7c478bd9Sstevel@tonic-gate 		if (errno = nvlist_add_string(info->info, RCM_CLIENT_INFO,
929*7c478bd9Sstevel@tonic-gate 		    (char *)locked_info)) {
930*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_ERROR,
931*7c478bd9Sstevel@tonic-gate 			    gettext("failed (nvlist_add=%s).\n"),
932*7c478bd9Sstevel@tonic-gate 			    strerror(errno));
933*7c478bd9Sstevel@tonic-gate 			rcmd_exit(errno);
934*7c478bd9Sstevel@tonic-gate 		}
935*7c478bd9Sstevel@tonic-gate 
936*7c478bd9Sstevel@tonic-gate 		info->next = result;
937*7c478bd9Sstevel@tonic-gate 		result = info;
938*7c478bd9Sstevel@tonic-gate 	}
939*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
940*7c478bd9Sstevel@tonic-gate 
941*7c478bd9Sstevel@tonic-gate 	return (result);
942*7c478bd9Sstevel@tonic-gate }
943*7c478bd9Sstevel@tonic-gate 
944*7c478bd9Sstevel@tonic-gate /*
945*7c478bd9Sstevel@tonic-gate  * Eliminate entries whose dr initiator is no longer running
946*7c478bd9Sstevel@tonic-gate  * and recover daemon state during daemon restart.
947*7c478bd9Sstevel@tonic-gate  *
948*7c478bd9Sstevel@tonic-gate  * This routine is called from either during daemon initialization
949*7c478bd9Sstevel@tonic-gate  * after all modules have registered resources or from the cleanup
950*7c478bd9Sstevel@tonic-gate  * thread. In either case, it is the only thread running in the
951*7c478bd9Sstevel@tonic-gate  * daemon.
952*7c478bd9Sstevel@tonic-gate  */
953*7c478bd9Sstevel@tonic-gate void
clean_dr_list()954*7c478bd9Sstevel@tonic-gate clean_dr_list()
955*7c478bd9Sstevel@tonic-gate {
956*7c478bd9Sstevel@tonic-gate 	int i;
957*7c478bd9Sstevel@tonic-gate 	struct clean_list {
958*7c478bd9Sstevel@tonic-gate 		struct clean_list *next;
959*7c478bd9Sstevel@tonic-gate 		char *rsrcname;
960*7c478bd9Sstevel@tonic-gate 		pid_t pid;
961*7c478bd9Sstevel@tonic-gate 		int seq_num;
962*7c478bd9Sstevel@tonic-gate 		int state;
963*7c478bd9Sstevel@tonic-gate 		timespec_t interval;
964*7c478bd9Sstevel@tonic-gate 	} *tmp, *list = NULL;
965*7c478bd9Sstevel@tonic-gate 	char *rsrcnames[2];
966*7c478bd9Sstevel@tonic-gate 
967*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE3,
968*7c478bd9Sstevel@tonic-gate 	    "clean_dr_list(): look for stale dr initiators\n");
969*7c478bd9Sstevel@tonic-gate 
970*7c478bd9Sstevel@tonic-gate 	rsrcnames[1] = NULL;
971*7c478bd9Sstevel@tonic-gate 
972*7c478bd9Sstevel@tonic-gate 	/*
973*7c478bd9Sstevel@tonic-gate 	 * Make a list of entries to recover. This is necessary because
974*7c478bd9Sstevel@tonic-gate 	 * the recovery operation will modify dr_req_list.
975*7c478bd9Sstevel@tonic-gate 	 */
976*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
977*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < dr_req_list->n_req_max; i++) {
978*7c478bd9Sstevel@tonic-gate 		/* skip empty entries */
979*7c478bd9Sstevel@tonic-gate 		if (dr_req_list->req[i].state == RCM_STATE_REMOVE)
980*7c478bd9Sstevel@tonic-gate 			continue;
981*7c478bd9Sstevel@tonic-gate 
982*7c478bd9Sstevel@tonic-gate 		if (dr_req_list->req[i].device[0] == '\0')
983*7c478bd9Sstevel@tonic-gate 			continue;
984*7c478bd9Sstevel@tonic-gate 
985*7c478bd9Sstevel@tonic-gate 		/* skip cascade operations */
986*7c478bd9Sstevel@tonic-gate 		if (dr_req_list->req[i].seq_num & SEQ_NUM_MASK)
987*7c478bd9Sstevel@tonic-gate 			continue;
988*7c478bd9Sstevel@tonic-gate 
989*7c478bd9Sstevel@tonic-gate 		/*
990*7c478bd9Sstevel@tonic-gate 		 * In the cleanup case, ignore entries with initiators alive
991*7c478bd9Sstevel@tonic-gate 		 */
992*7c478bd9Sstevel@tonic-gate 		if ((rcmd_get_state() == RCMD_CLEANUP) &&
993*7c478bd9Sstevel@tonic-gate 		    proc_exist(dr_req_list->req[i].pid))
994*7c478bd9Sstevel@tonic-gate 			continue;
995*7c478bd9Sstevel@tonic-gate 
996*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_TRACE1,
997*7c478bd9Sstevel@tonic-gate 		    "found stale entry: %s\n", dr_req_list->req[i].device);
998*7c478bd9Sstevel@tonic-gate 
999*7c478bd9Sstevel@tonic-gate 		tmp = s_malloc(sizeof (*tmp));
1000*7c478bd9Sstevel@tonic-gate 		tmp->rsrcname = s_strdup(dr_req_list->req[i].device);
1001*7c478bd9Sstevel@tonic-gate 		tmp->state = dr_req_list->req[i].state;
1002*7c478bd9Sstevel@tonic-gate 		tmp->pid = dr_req_list->req[i].pid;
1003*7c478bd9Sstevel@tonic-gate 		tmp->seq_num = dr_req_list->req[i].seq_num;
1004*7c478bd9Sstevel@tonic-gate 		tmp->interval = dr_req_list->req[i].interval;
1005*7c478bd9Sstevel@tonic-gate 		tmp->next = list;
1006*7c478bd9Sstevel@tonic-gate 		list = tmp;
1007*7c478bd9Sstevel@tonic-gate 	}
1008*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
1009*7c478bd9Sstevel@tonic-gate 
1010*7c478bd9Sstevel@tonic-gate 	if (list == NULL)
1011*7c478bd9Sstevel@tonic-gate 		return;
1012*7c478bd9Sstevel@tonic-gate 
1013*7c478bd9Sstevel@tonic-gate 	/*
1014*7c478bd9Sstevel@tonic-gate 	 * If everything worked normally, we shouldn't be here.
1015*7c478bd9Sstevel@tonic-gate 	 * Since we are here, something went wrong, so say something.
1016*7c478bd9Sstevel@tonic-gate 	 */
1017*7c478bd9Sstevel@tonic-gate 	if (rcmd_get_state() == RCMD_INIT) {
1018*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_NOTICE, gettext("rcm_daemon died "
1019*7c478bd9Sstevel@tonic-gate 		    "unexpectedly, recovering previous daemon state\n"));
1020*7c478bd9Sstevel@tonic-gate 	} else {
1021*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_INFO, gettext("one or more dr initiator "
1022*7c478bd9Sstevel@tonic-gate 		    "died, attempting automatic recovery\n"));
1023*7c478bd9Sstevel@tonic-gate 	}
1024*7c478bd9Sstevel@tonic-gate 
1025*7c478bd9Sstevel@tonic-gate 	while (list) {
1026*7c478bd9Sstevel@tonic-gate 		tmp = list;
1027*7c478bd9Sstevel@tonic-gate 		list = tmp->next;
1028*7c478bd9Sstevel@tonic-gate 
1029*7c478bd9Sstevel@tonic-gate 		switch (tmp->state) {
1030*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINE_QUERY:
1031*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINE_QUERY_FAIL:
1032*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1033*7c478bd9Sstevel@tonic-gate 			if (proc_exist(tmp->pid)) {
1034*7c478bd9Sstevel@tonic-gate 				/* redo */
1035*7c478bd9Sstevel@tonic-gate 				(void) process_resource_offline(rsrcnames,
1036*7c478bd9Sstevel@tonic-gate 				    tmp->pid, RCM_QUERY, tmp->seq_num, NULL);
1037*7c478bd9Sstevel@tonic-gate 			} else {
1038*7c478bd9Sstevel@tonic-gate 				/* undo */
1039*7c478bd9Sstevel@tonic-gate 				(void) notify_resource_online(rsrcnames,
1040*7c478bd9Sstevel@tonic-gate 				    tmp->pid, 0, tmp->seq_num, NULL);
1041*7c478bd9Sstevel@tonic-gate 			}
1042*7c478bd9Sstevel@tonic-gate 			break;
1043*7c478bd9Sstevel@tonic-gate 
1044*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINE:
1045*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINE_FAIL:
1046*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1047*7c478bd9Sstevel@tonic-gate 			if (proc_exist(tmp->pid)) {
1048*7c478bd9Sstevel@tonic-gate 				/* redo */
1049*7c478bd9Sstevel@tonic-gate 				(void) process_resource_offline(rsrcnames,
1050*7c478bd9Sstevel@tonic-gate 				    tmp->pid, 0, tmp->seq_num, NULL);
1051*7c478bd9Sstevel@tonic-gate 			} else {
1052*7c478bd9Sstevel@tonic-gate 				/* undo */
1053*7c478bd9Sstevel@tonic-gate 				(void) notify_resource_online(rsrcnames,
1054*7c478bd9Sstevel@tonic-gate 				    tmp->pid, 0, tmp->seq_num, NULL);
1055*7c478bd9Sstevel@tonic-gate 			}
1056*7c478bd9Sstevel@tonic-gate 			break;
1057*7c478bd9Sstevel@tonic-gate 
1058*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPEND_QUERY:
1059*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPEND_QUERY_FAIL:
1060*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1061*7c478bd9Sstevel@tonic-gate 			if (proc_exist(tmp->pid)) {
1062*7c478bd9Sstevel@tonic-gate 				/* redo */
1063*7c478bd9Sstevel@tonic-gate 				(void) process_resource_suspend(rsrcnames,
1064*7c478bd9Sstevel@tonic-gate 				    tmp->pid, RCM_QUERY, tmp->seq_num,
1065*7c478bd9Sstevel@tonic-gate 				    &tmp->interval, NULL);
1066*7c478bd9Sstevel@tonic-gate 			} else {
1067*7c478bd9Sstevel@tonic-gate 				/* undo */
1068*7c478bd9Sstevel@tonic-gate 				(void) notify_resource_resume(rsrcnames,
1069*7c478bd9Sstevel@tonic-gate 				    tmp->pid, 0, tmp->seq_num, NULL);
1070*7c478bd9Sstevel@tonic-gate 			}
1071*7c478bd9Sstevel@tonic-gate 			break;
1072*7c478bd9Sstevel@tonic-gate 
1073*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPEND:
1074*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPEND_FAIL:
1075*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1076*7c478bd9Sstevel@tonic-gate 			if (proc_exist(tmp->pid)) {
1077*7c478bd9Sstevel@tonic-gate 				/* redo */
1078*7c478bd9Sstevel@tonic-gate 				(void) process_resource_suspend(rsrcnames,
1079*7c478bd9Sstevel@tonic-gate 				    tmp->pid, 0, tmp->seq_num, &tmp->interval,
1080*7c478bd9Sstevel@tonic-gate 				    NULL);
1081*7c478bd9Sstevel@tonic-gate 			} else {
1082*7c478bd9Sstevel@tonic-gate 				/* undo */
1083*7c478bd9Sstevel@tonic-gate 				(void) notify_resource_resume(rsrcnames,
1084*7c478bd9Sstevel@tonic-gate 				    tmp->pid, 0, tmp->seq_num, NULL);
1085*7c478bd9Sstevel@tonic-gate 			}
1086*7c478bd9Sstevel@tonic-gate 			break;
1087*7c478bd9Sstevel@tonic-gate 
1088*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_OFFLINING:
1089*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_ONLINING:
1090*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1091*7c478bd9Sstevel@tonic-gate 			(void) notify_resource_online(rsrcnames, tmp->pid, 0,
1092*7c478bd9Sstevel@tonic-gate 			    tmp->seq_num, NULL);
1093*7c478bd9Sstevel@tonic-gate 			break;
1094*7c478bd9Sstevel@tonic-gate 
1095*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_SUSPENDING:
1096*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_RESUMING:
1097*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1098*7c478bd9Sstevel@tonic-gate 			(void) notify_resource_resume(rsrcnames, tmp->pid, 0,
1099*7c478bd9Sstevel@tonic-gate 			    tmp->seq_num, NULL);
1100*7c478bd9Sstevel@tonic-gate 			break;
1101*7c478bd9Sstevel@tonic-gate 
1102*7c478bd9Sstevel@tonic-gate 		case RCM_STATE_REMOVING:
1103*7c478bd9Sstevel@tonic-gate 			rsrcnames[0] = tmp->rsrcname;
1104*7c478bd9Sstevel@tonic-gate 			(void) notify_resource_remove(rsrcnames, tmp->pid, 0,
1105*7c478bd9Sstevel@tonic-gate 			    tmp->seq_num, NULL);
1106*7c478bd9Sstevel@tonic-gate 			break;
1107*7c478bd9Sstevel@tonic-gate 
1108*7c478bd9Sstevel@tonic-gate 		default:
1109*7c478bd9Sstevel@tonic-gate 			rcm_log_message(RCM_WARNING,
1110*7c478bd9Sstevel@tonic-gate 			    gettext("%s in unknown state %d\n"),
1111*7c478bd9Sstevel@tonic-gate 			    tmp->rsrcname, tmp->state);
1112*7c478bd9Sstevel@tonic-gate 			break;
1113*7c478bd9Sstevel@tonic-gate 		}
1114*7c478bd9Sstevel@tonic-gate 		free(tmp->rsrcname);
1115*7c478bd9Sstevel@tonic-gate 		free(tmp);
1116*7c478bd9Sstevel@tonic-gate 	}
1117*7c478bd9Sstevel@tonic-gate }
1118*7c478bd9Sstevel@tonic-gate 
1119*7c478bd9Sstevel@tonic-gate /*
1120*7c478bd9Sstevel@tonic-gate  * Selected thread blocking based on event type
1121*7c478bd9Sstevel@tonic-gate  */
1122*7c478bd9Sstevel@tonic-gate barrier_t barrier;
1123*7c478bd9Sstevel@tonic-gate 
1124*7c478bd9Sstevel@tonic-gate /*
1125*7c478bd9Sstevel@tonic-gate  * Change barrier state:
1126*7c478bd9Sstevel@tonic-gate  *	RCMD_INIT - daemon is intializing, only register allowed
1127*7c478bd9Sstevel@tonic-gate  *	RCMD_NORMAL - normal daemon processing
1128*7c478bd9Sstevel@tonic-gate  *	RCMD_CLEANUP - cleanup thread is waiting or running
1129*7c478bd9Sstevel@tonic-gate  */
1130*7c478bd9Sstevel@tonic-gate int
rcmd_get_state()1131*7c478bd9Sstevel@tonic-gate rcmd_get_state()
1132*7c478bd9Sstevel@tonic-gate {
1133*7c478bd9Sstevel@tonic-gate 	return (barrier.state);
1134*7c478bd9Sstevel@tonic-gate }
1135*7c478bd9Sstevel@tonic-gate 
1136*7c478bd9Sstevel@tonic-gate void
rcmd_set_state(int state)1137*7c478bd9Sstevel@tonic-gate rcmd_set_state(int state)
1138*7c478bd9Sstevel@tonic-gate {
1139*7c478bd9Sstevel@tonic-gate 	/*
1140*7c478bd9Sstevel@tonic-gate 	 * The state transition is as follows:
1141*7c478bd9Sstevel@tonic-gate 	 *	INIT --> NORMAL <---> CLEANUP
1142*7c478bd9Sstevel@tonic-gate 	 * The implementation favors the cleanup thread
1143*7c478bd9Sstevel@tonic-gate 	 */
1144*7c478bd9Sstevel@tonic-gate 
1145*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&barrier.lock);
1146*7c478bd9Sstevel@tonic-gate 	barrier.state = state;
1147*7c478bd9Sstevel@tonic-gate 
1148*7c478bd9Sstevel@tonic-gate 	switch (state) {
1149*7c478bd9Sstevel@tonic-gate 	case RCMD_CLEANUP:
1150*7c478bd9Sstevel@tonic-gate 		/*
1151*7c478bd9Sstevel@tonic-gate 		 * Wait for existing threads to exit
1152*7c478bd9Sstevel@tonic-gate 		 */
1153*7c478bd9Sstevel@tonic-gate 		barrier.wanted++;
1154*7c478bd9Sstevel@tonic-gate 		while (barrier.thr_count != 0)
1155*7c478bd9Sstevel@tonic-gate 			(void) cond_wait(&barrier.cv, &barrier.lock);
1156*7c478bd9Sstevel@tonic-gate 		barrier.wanted--;
1157*7c478bd9Sstevel@tonic-gate 		barrier.thr_count = -1;
1158*7c478bd9Sstevel@tonic-gate 		break;
1159*7c478bd9Sstevel@tonic-gate 
1160*7c478bd9Sstevel@tonic-gate 	case RCMD_INIT:
1161*7c478bd9Sstevel@tonic-gate 	case RCMD_NORMAL:
1162*7c478bd9Sstevel@tonic-gate 	default:
1163*7c478bd9Sstevel@tonic-gate 		if (barrier.thr_count == -1)
1164*7c478bd9Sstevel@tonic-gate 			barrier.thr_count = 0;
1165*7c478bd9Sstevel@tonic-gate 		if (barrier.wanted)
1166*7c478bd9Sstevel@tonic-gate 			(void) cond_broadcast(&barrier.cv);
1167*7c478bd9Sstevel@tonic-gate 		break;
1168*7c478bd9Sstevel@tonic-gate 	}
1169*7c478bd9Sstevel@tonic-gate 
1170*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&barrier.lock);
1171*7c478bd9Sstevel@tonic-gate }
1172*7c478bd9Sstevel@tonic-gate 
1173*7c478bd9Sstevel@tonic-gate /*
1174*7c478bd9Sstevel@tonic-gate  * Increment daemon thread count
1175*7c478bd9Sstevel@tonic-gate  */
1176*7c478bd9Sstevel@tonic-gate int
rcmd_thr_incr(int cmd)1177*7c478bd9Sstevel@tonic-gate rcmd_thr_incr(int cmd)
1178*7c478bd9Sstevel@tonic-gate {
1179*7c478bd9Sstevel@tonic-gate 	int seq_num;
1180*7c478bd9Sstevel@tonic-gate 
1181*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&barrier.lock);
1182*7c478bd9Sstevel@tonic-gate 	/*
1183*7c478bd9Sstevel@tonic-gate 	 * Set wanted flag
1184*7c478bd9Sstevel@tonic-gate 	 */
1185*7c478bd9Sstevel@tonic-gate 	barrier.wanted++;
1186*7c478bd9Sstevel@tonic-gate 
1187*7c478bd9Sstevel@tonic-gate 	/*
1188*7c478bd9Sstevel@tonic-gate 	 * Wait till it is safe for daemon to perform the operation
1189*7c478bd9Sstevel@tonic-gate 	 *
1190*7c478bd9Sstevel@tonic-gate 	 * NOTE: if a module registers by passing a request to the
1191*7c478bd9Sstevel@tonic-gate 	 *	client proccess, we may need to allow register
1192*7c478bd9Sstevel@tonic-gate 	 *	to come through during daemon initialization.
1193*7c478bd9Sstevel@tonic-gate 	 */
1194*7c478bd9Sstevel@tonic-gate 	while (barrier.state != RCMD_NORMAL)
1195*7c478bd9Sstevel@tonic-gate 		(void) cond_wait(&barrier.cv, &barrier.lock);
1196*7c478bd9Sstevel@tonic-gate 
1197*7c478bd9Sstevel@tonic-gate 	if ((cmd == CMD_EVENT) ||
1198*7c478bd9Sstevel@tonic-gate 	    (cmd == CMD_REGISTER) ||
1199*7c478bd9Sstevel@tonic-gate 	    (cmd == CMD_UNREGISTER)) {
1200*7c478bd9Sstevel@tonic-gate 		/*
1201*7c478bd9Sstevel@tonic-gate 		 * Event passthru and register ops don't need sequence number
1202*7c478bd9Sstevel@tonic-gate 		 */
1203*7c478bd9Sstevel@tonic-gate 		seq_num = -1;
1204*7c478bd9Sstevel@tonic-gate 	} else {
1205*7c478bd9Sstevel@tonic-gate 		/*
1206*7c478bd9Sstevel@tonic-gate 		 * Non register operation gets a sequence number
1207*7c478bd9Sstevel@tonic-gate 		 */
1208*7c478bd9Sstevel@tonic-gate 		seq_num = get_seq_number();
1209*7c478bd9Sstevel@tonic-gate 	}
1210*7c478bd9Sstevel@tonic-gate 	barrier.wanted--;
1211*7c478bd9Sstevel@tonic-gate 	barrier.thr_count++;
1212*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&barrier.lock);
1213*7c478bd9Sstevel@tonic-gate 
1214*7c478bd9Sstevel@tonic-gate 	if ((cmd == CMD_OFFLINE) ||
1215*7c478bd9Sstevel@tonic-gate 	    (cmd == CMD_SUSPEND) ||
1216*7c478bd9Sstevel@tonic-gate 	    (cmd == CMD_GETINFO)) {
1217*7c478bd9Sstevel@tonic-gate 		/*
1218*7c478bd9Sstevel@tonic-gate 		 * For these operations, need to ask modules to
1219*7c478bd9Sstevel@tonic-gate 		 * register any new resources that came online.
1220*7c478bd9Sstevel@tonic-gate 		 *
1221*7c478bd9Sstevel@tonic-gate 		 * This is because mount/umount are not instrumented
1222*7c478bd9Sstevel@tonic-gate 		 * to register with rcm before using system resources.
1223*7c478bd9Sstevel@tonic-gate 		 * Certain registration ops may fail during sync, which
1224*7c478bd9Sstevel@tonic-gate 		 * indicates race conditions. This cannot be avoided
1225*7c478bd9Sstevel@tonic-gate 		 * without changing mount/umount.
1226*7c478bd9Sstevel@tonic-gate 		 */
1227*7c478bd9Sstevel@tonic-gate 		rcmd_db_sync();
1228*7c478bd9Sstevel@tonic-gate 	}
1229*7c478bd9Sstevel@tonic-gate 
1230*7c478bd9Sstevel@tonic-gate 	return (seq_num);
1231*7c478bd9Sstevel@tonic-gate }
1232*7c478bd9Sstevel@tonic-gate 
1233*7c478bd9Sstevel@tonic-gate /*
1234*7c478bd9Sstevel@tonic-gate  * Decrement thread count
1235*7c478bd9Sstevel@tonic-gate  */
1236*7c478bd9Sstevel@tonic-gate void
rcmd_thr_decr()1237*7c478bd9Sstevel@tonic-gate rcmd_thr_decr()
1238*7c478bd9Sstevel@tonic-gate {
1239*7c478bd9Sstevel@tonic-gate 	/*
1240*7c478bd9Sstevel@tonic-gate 	 * Decrement thread count and wake up reload/cleanup thread.
1241*7c478bd9Sstevel@tonic-gate 	 */
1242*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&barrier.lock);
1243*7c478bd9Sstevel@tonic-gate 	barrier.last_update = time(NULL);
1244*7c478bd9Sstevel@tonic-gate 	if (--barrier.thr_count == 0)
1245*7c478bd9Sstevel@tonic-gate 		(void) cond_broadcast(&barrier.cv);
1246*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&barrier.lock);
1247*7c478bd9Sstevel@tonic-gate }
1248*7c478bd9Sstevel@tonic-gate 
1249*7c478bd9Sstevel@tonic-gate /*
1250*7c478bd9Sstevel@tonic-gate  * Wakeup all waiting threads as a result of SIGHUP
1251*7c478bd9Sstevel@tonic-gate  */
1252*7c478bd9Sstevel@tonic-gate static int sighup_received = 0;
1253*7c478bd9Sstevel@tonic-gate 
1254*7c478bd9Sstevel@tonic-gate void
rcmd_thr_signal()1255*7c478bd9Sstevel@tonic-gate rcmd_thr_signal()
1256*7c478bd9Sstevel@tonic-gate {
1257*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&barrier.lock);
1258*7c478bd9Sstevel@tonic-gate 	sighup_received = 1;
1259*7c478bd9Sstevel@tonic-gate 	(void) cond_broadcast(&barrier.cv);
1260*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&barrier.lock);
1261*7c478bd9Sstevel@tonic-gate }
1262*7c478bd9Sstevel@tonic-gate 
1263*7c478bd9Sstevel@tonic-gate void
rcmd_start_timer(int timeout)1264*7c478bd9Sstevel@tonic-gate rcmd_start_timer(int timeout)
1265*7c478bd9Sstevel@tonic-gate {
1266*7c478bd9Sstevel@tonic-gate 	timestruc_t abstime;
1267*7c478bd9Sstevel@tonic-gate 
1268*7c478bd9Sstevel@tonic-gate 	if (timeout == 0)
1269*7c478bd9Sstevel@tonic-gate 		timeout = RCM_DAEMON_TIMEOUT;	/* default to 5 minutes */
1270*7c478bd9Sstevel@tonic-gate 	else
1271*7c478bd9Sstevel@tonic-gate 		dr_req_list->idle_timeout = timeout;	/* persist timeout */
1272*7c478bd9Sstevel@tonic-gate 
1273*7c478bd9Sstevel@tonic-gate 	if (timeout > 0) {
1274*7c478bd9Sstevel@tonic-gate 		abstime.tv_sec = time(NULL) + timeout;
1275*7c478bd9Sstevel@tonic-gate 	}
1276*7c478bd9Sstevel@tonic-gate 
1277*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&barrier.lock);
1278*7c478bd9Sstevel@tonic-gate 	for (;;) {
1279*7c478bd9Sstevel@tonic-gate 		int idletime;
1280*7c478bd9Sstevel@tonic-gate 		int is_active;
1281*7c478bd9Sstevel@tonic-gate 
1282*7c478bd9Sstevel@tonic-gate 		if (timeout > 0)
1283*7c478bd9Sstevel@tonic-gate 			(void) cond_timedwait(&barrier.cv, &barrier.lock,
1284*7c478bd9Sstevel@tonic-gate 			    &abstime);
1285*7c478bd9Sstevel@tonic-gate 		else
1286*7c478bd9Sstevel@tonic-gate 			(void) cond_wait(&barrier.cv, &barrier.lock);
1287*7c478bd9Sstevel@tonic-gate 
1288*7c478bd9Sstevel@tonic-gate 		/*
1289*7c478bd9Sstevel@tonic-gate 		 * If sighup received, change timeout to 0 so the daemon is
1290*7c478bd9Sstevel@tonic-gate 		 * shut down at the first possible moment
1291*7c478bd9Sstevel@tonic-gate 		 */
1292*7c478bd9Sstevel@tonic-gate 		if (sighup_received)
1293*7c478bd9Sstevel@tonic-gate 			timeout = 0;
1294*7c478bd9Sstevel@tonic-gate 
1295*7c478bd9Sstevel@tonic-gate 		/*
1296*7c478bd9Sstevel@tonic-gate 		 * If timeout is negative, never shutdown the daemon
1297*7c478bd9Sstevel@tonic-gate 		 */
1298*7c478bd9Sstevel@tonic-gate 		if (timeout < 0)
1299*7c478bd9Sstevel@tonic-gate 			continue;
1300*7c478bd9Sstevel@tonic-gate 
1301*7c478bd9Sstevel@tonic-gate 		/*
1302*7c478bd9Sstevel@tonic-gate 		 * Check for ongoing/pending activity
1303*7c478bd9Sstevel@tonic-gate 		 */
1304*7c478bd9Sstevel@tonic-gate 		is_active = (barrier.thr_count || barrier.wanted ||
1305*7c478bd9Sstevel@tonic-gate 		    (dr_req_list->n_req != 0));
1306*7c478bd9Sstevel@tonic-gate 		if (is_active) {
1307*7c478bd9Sstevel@tonic-gate 			abstime.tv_sec = time(NULL) + timeout;
1308*7c478bd9Sstevel@tonic-gate 			continue;
1309*7c478bd9Sstevel@tonic-gate 		}
1310*7c478bd9Sstevel@tonic-gate 
1311*7c478bd9Sstevel@tonic-gate 		/*
1312*7c478bd9Sstevel@tonic-gate 		 * If idletime is less than timeout, continue to wait
1313*7c478bd9Sstevel@tonic-gate 		 */
1314*7c478bd9Sstevel@tonic-gate 		idletime = time(NULL) - barrier.last_update;
1315*7c478bd9Sstevel@tonic-gate 		if (idletime < timeout) {
1316*7c478bd9Sstevel@tonic-gate 			abstime.tv_sec = barrier.last_update + timeout;
1317*7c478bd9Sstevel@tonic-gate 			continue;
1318*7c478bd9Sstevel@tonic-gate 		}
1319*7c478bd9Sstevel@tonic-gate 		break;
1320*7c478bd9Sstevel@tonic-gate 	}
1321*7c478bd9Sstevel@tonic-gate 
1322*7c478bd9Sstevel@tonic-gate 	(void) script_main_fini();
1323*7c478bd9Sstevel@tonic-gate 
1324*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_INFO, gettext("rcm_daemon is shut down.\n"));
1325*7c478bd9Sstevel@tonic-gate }
1326*7c478bd9Sstevel@tonic-gate 
1327*7c478bd9Sstevel@tonic-gate /*
1328*7c478bd9Sstevel@tonic-gate  * Code related to polling client pid's
1329*7c478bd9Sstevel@tonic-gate  * Not declared as static so that we can find this structure easily
1330*7c478bd9Sstevel@tonic-gate  * in the core file.
1331*7c478bd9Sstevel@tonic-gate  */
1332*7c478bd9Sstevel@tonic-gate struct {
1333*7c478bd9Sstevel@tonic-gate 	int		n_pids;
1334*7c478bd9Sstevel@tonic-gate 	int		n_max_pids;
1335*7c478bd9Sstevel@tonic-gate 	thread_t	poll_tid;	/* poll thread id */
1336*7c478bd9Sstevel@tonic-gate 	int		signaled;
1337*7c478bd9Sstevel@tonic-gate 	pid_t		*pids;
1338*7c478bd9Sstevel@tonic-gate 	int		*refcnt;
1339*7c478bd9Sstevel@tonic-gate 	struct pollfd	*fds;
1340*7c478bd9Sstevel@tonic-gate 	cond_t		cv;	/* the associated lock is rcm_req_lock */
1341*7c478bd9Sstevel@tonic-gate } polllist;
1342*7c478bd9Sstevel@tonic-gate 
1343*7c478bd9Sstevel@tonic-gate static int
find_pid_index(pid_t pid)1344*7c478bd9Sstevel@tonic-gate find_pid_index(pid_t pid)
1345*7c478bd9Sstevel@tonic-gate {
1346*7c478bd9Sstevel@tonic-gate 	int i;
1347*7c478bd9Sstevel@tonic-gate 
1348*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < polllist.n_pids; i++) {
1349*7c478bd9Sstevel@tonic-gate 		if (polllist.pids[i] == pid) {
1350*7c478bd9Sstevel@tonic-gate 			return (i);
1351*7c478bd9Sstevel@tonic-gate 		}
1352*7c478bd9Sstevel@tonic-gate 	}
1353*7c478bd9Sstevel@tonic-gate 	return (-1);
1354*7c478bd9Sstevel@tonic-gate }
1355*7c478bd9Sstevel@tonic-gate 
1356*7c478bd9Sstevel@tonic-gate /*
1357*7c478bd9Sstevel@tonic-gate  * Resize buffer for new pids
1358*7c478bd9Sstevel@tonic-gate  */
1359*7c478bd9Sstevel@tonic-gate static int
get_pid_index()1360*7c478bd9Sstevel@tonic-gate get_pid_index()
1361*7c478bd9Sstevel@tonic-gate {
1362*7c478bd9Sstevel@tonic-gate 	const int n_chunk = 10;
1363*7c478bd9Sstevel@tonic-gate 
1364*7c478bd9Sstevel@tonic-gate 	int n_max;
1365*7c478bd9Sstevel@tonic-gate 	int index = polllist.n_pids;
1366*7c478bd9Sstevel@tonic-gate 
1367*7c478bd9Sstevel@tonic-gate 	if (polllist.n_pids < polllist.n_max_pids) {
1368*7c478bd9Sstevel@tonic-gate 		polllist.n_pids++;
1369*7c478bd9Sstevel@tonic-gate 		return (index);
1370*7c478bd9Sstevel@tonic-gate 	}
1371*7c478bd9Sstevel@tonic-gate 
1372*7c478bd9Sstevel@tonic-gate 	if (polllist.n_max_pids == 0) {
1373*7c478bd9Sstevel@tonic-gate 		n_max = n_chunk;
1374*7c478bd9Sstevel@tonic-gate 		polllist.pids = s_calloc(n_max, sizeof (pid_t));
1375*7c478bd9Sstevel@tonic-gate 		polllist.refcnt = s_calloc(n_max, sizeof (int));
1376*7c478bd9Sstevel@tonic-gate 		polllist.fds = s_calloc(n_max, sizeof (struct pollfd));
1377*7c478bd9Sstevel@tonic-gate 	} else {
1378*7c478bd9Sstevel@tonic-gate 		n_max = polllist.n_max_pids + n_chunk;
1379*7c478bd9Sstevel@tonic-gate 		polllist.pids = s_realloc(polllist.pids,
1380*7c478bd9Sstevel@tonic-gate 		    n_max * sizeof (pid_t));
1381*7c478bd9Sstevel@tonic-gate 		polllist.refcnt = s_realloc(polllist.refcnt,
1382*7c478bd9Sstevel@tonic-gate 		    n_max * sizeof (int));
1383*7c478bd9Sstevel@tonic-gate 		polllist.fds = s_realloc(polllist.fds,
1384*7c478bd9Sstevel@tonic-gate 		    n_max * sizeof (struct pollfd));
1385*7c478bd9Sstevel@tonic-gate 	}
1386*7c478bd9Sstevel@tonic-gate 	polllist.n_max_pids = n_max;
1387*7c478bd9Sstevel@tonic-gate 	polllist.n_pids++;
1388*7c478bd9Sstevel@tonic-gate 	return (index);
1389*7c478bd9Sstevel@tonic-gate }
1390*7c478bd9Sstevel@tonic-gate 
1391*7c478bd9Sstevel@tonic-gate /*
1392*7c478bd9Sstevel@tonic-gate  * rcm_req_lock must be held
1393*7c478bd9Sstevel@tonic-gate  */
1394*7c478bd9Sstevel@tonic-gate static void
add_to_polling_list(pid_t pid)1395*7c478bd9Sstevel@tonic-gate add_to_polling_list(pid_t pid)
1396*7c478bd9Sstevel@tonic-gate {
1397*7c478bd9Sstevel@tonic-gate 	int fd, index;
1398*7c478bd9Sstevel@tonic-gate 	char procfile[MAXPATHLEN];
1399*7c478bd9Sstevel@tonic-gate 
1400*7c478bd9Sstevel@tonic-gate 	if (pid == (pid_t)0)
1401*7c478bd9Sstevel@tonic-gate 		return;
1402*7c478bd9Sstevel@tonic-gate 
1403*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE1, "add_to_polling_list(%ld)\n", pid);
1404*7c478bd9Sstevel@tonic-gate 
1405*7c478bd9Sstevel@tonic-gate 	/*
1406*7c478bd9Sstevel@tonic-gate 	 * Need to stop the poll thread before manipulating the polllist
1407*7c478bd9Sstevel@tonic-gate 	 * since poll thread may possibly be using polllist.fds[] and
1408*7c478bd9Sstevel@tonic-gate 	 * polllist.n_pids. As an optimization, first check if the pid
1409*7c478bd9Sstevel@tonic-gate 	 * is already in the polllist. If it is, there is no need to
1410*7c478bd9Sstevel@tonic-gate 	 * stop the poll thread. Just increment the pid reference count
1411*7c478bd9Sstevel@tonic-gate 	 * and return;
1412*7c478bd9Sstevel@tonic-gate 	 */
1413*7c478bd9Sstevel@tonic-gate 	index = find_pid_index(pid);
1414*7c478bd9Sstevel@tonic-gate 	if (index != -1) {
1415*7c478bd9Sstevel@tonic-gate 		polllist.refcnt[index]++;
1416*7c478bd9Sstevel@tonic-gate 		return;
1417*7c478bd9Sstevel@tonic-gate 	}
1418*7c478bd9Sstevel@tonic-gate 
1419*7c478bd9Sstevel@tonic-gate 	stop_polling_thread();
1420*7c478bd9Sstevel@tonic-gate 
1421*7c478bd9Sstevel@tonic-gate 	/*
1422*7c478bd9Sstevel@tonic-gate 	 * In an attempt to stop the poll thread we may have released
1423*7c478bd9Sstevel@tonic-gate 	 * and reacquired rcm_req_lock. So find the index again.
1424*7c478bd9Sstevel@tonic-gate 	 */
1425*7c478bd9Sstevel@tonic-gate 	index = find_pid_index(pid);
1426*7c478bd9Sstevel@tonic-gate 	if (index != -1) {
1427*7c478bd9Sstevel@tonic-gate 		polllist.refcnt[index]++;
1428*7c478bd9Sstevel@tonic-gate 		goto done;
1429*7c478bd9Sstevel@tonic-gate 	}
1430*7c478bd9Sstevel@tonic-gate 
1431*7c478bd9Sstevel@tonic-gate 	/*
1432*7c478bd9Sstevel@tonic-gate 	 * Open a /proc file
1433*7c478bd9Sstevel@tonic-gate 	 */
1434*7c478bd9Sstevel@tonic-gate 	(void) sprintf(procfile, "/proc/%ld/as", pid);
1435*7c478bd9Sstevel@tonic-gate 	if ((fd = open(procfile, O_RDONLY)) == -1) {
1436*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_NOTICE, gettext("open(%s): %s\n"),
1437*7c478bd9Sstevel@tonic-gate 		    procfile, strerror(errno));
1438*7c478bd9Sstevel@tonic-gate 		goto done;
1439*7c478bd9Sstevel@tonic-gate 	}
1440*7c478bd9Sstevel@tonic-gate 
1441*7c478bd9Sstevel@tonic-gate 	/*
1442*7c478bd9Sstevel@tonic-gate 	 * add pid to polllist
1443*7c478bd9Sstevel@tonic-gate 	 */
1444*7c478bd9Sstevel@tonic-gate 	index = get_pid_index();
1445*7c478bd9Sstevel@tonic-gate 	polllist.pids[index] = pid;
1446*7c478bd9Sstevel@tonic-gate 	polllist.refcnt[index] = 1;
1447*7c478bd9Sstevel@tonic-gate 	polllist.fds[index].fd = fd;
1448*7c478bd9Sstevel@tonic-gate 	polllist.fds[index].events = 0;
1449*7c478bd9Sstevel@tonic-gate 	polllist.fds[index].revents = 0;
1450*7c478bd9Sstevel@tonic-gate 
1451*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_DEBUG, "add pid %ld at index %ld\n", pid, index);
1452*7c478bd9Sstevel@tonic-gate 
1453*7c478bd9Sstevel@tonic-gate done:
1454*7c478bd9Sstevel@tonic-gate 	start_polling_thread();
1455*7c478bd9Sstevel@tonic-gate }
1456*7c478bd9Sstevel@tonic-gate 
1457*7c478bd9Sstevel@tonic-gate /*
1458*7c478bd9Sstevel@tonic-gate  * rcm_req_lock must be held
1459*7c478bd9Sstevel@tonic-gate  */
1460*7c478bd9Sstevel@tonic-gate static void
remove_from_polling_list(pid_t pid)1461*7c478bd9Sstevel@tonic-gate remove_from_polling_list(pid_t pid)
1462*7c478bd9Sstevel@tonic-gate {
1463*7c478bd9Sstevel@tonic-gate 	int i, index;
1464*7c478bd9Sstevel@tonic-gate 
1465*7c478bd9Sstevel@tonic-gate 	if (pid == (pid_t)0)
1466*7c478bd9Sstevel@tonic-gate 		return;
1467*7c478bd9Sstevel@tonic-gate 
1468*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE1, "remove_from_polling_list(%ld)\n", pid);
1469*7c478bd9Sstevel@tonic-gate 
1470*7c478bd9Sstevel@tonic-gate 	/*
1471*7c478bd9Sstevel@tonic-gate 	 * Need to stop the poll thread before manipulating the polllist
1472*7c478bd9Sstevel@tonic-gate 	 * since poll thread may possibly be using polllist.fds[] and
1473*7c478bd9Sstevel@tonic-gate 	 * polllist.n_pids. As an optimization, first check the pid
1474*7c478bd9Sstevel@tonic-gate 	 * reference count. If the pid reference count is greater than 1
1475*7c478bd9Sstevel@tonic-gate 	 * there is no need to stop the polling thread.
1476*7c478bd9Sstevel@tonic-gate 	 */
1477*7c478bd9Sstevel@tonic-gate 
1478*7c478bd9Sstevel@tonic-gate 	index = find_pid_index(pid);
1479*7c478bd9Sstevel@tonic-gate 	if (index == -1) {
1480*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_NOTICE,
1481*7c478bd9Sstevel@tonic-gate 		    gettext("error removing pid %ld from polling list\n"), pid);
1482*7c478bd9Sstevel@tonic-gate 		return;
1483*7c478bd9Sstevel@tonic-gate 	}
1484*7c478bd9Sstevel@tonic-gate 
1485*7c478bd9Sstevel@tonic-gate 	/*
1486*7c478bd9Sstevel@tonic-gate 	 * decrement the pid refcnt
1487*7c478bd9Sstevel@tonic-gate 	 */
1488*7c478bd9Sstevel@tonic-gate 	if (polllist.refcnt[index] > 1) {
1489*7c478bd9Sstevel@tonic-gate 		polllist.refcnt[index]--;
1490*7c478bd9Sstevel@tonic-gate 		return;
1491*7c478bd9Sstevel@tonic-gate 	}
1492*7c478bd9Sstevel@tonic-gate 
1493*7c478bd9Sstevel@tonic-gate 	stop_polling_thread();
1494*7c478bd9Sstevel@tonic-gate 
1495*7c478bd9Sstevel@tonic-gate 	/*
1496*7c478bd9Sstevel@tonic-gate 	 * In an attempt to stop the poll thread we may have released
1497*7c478bd9Sstevel@tonic-gate 	 * and reacquired rcm_req_lock. So find the index again.
1498*7c478bd9Sstevel@tonic-gate 	 */
1499*7c478bd9Sstevel@tonic-gate 	index = find_pid_index(pid);
1500*7c478bd9Sstevel@tonic-gate 	if (index == -1) {
1501*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_NOTICE,
1502*7c478bd9Sstevel@tonic-gate 		    gettext("error removing pid %ld from polling list\n"), pid);
1503*7c478bd9Sstevel@tonic-gate 		goto done;
1504*7c478bd9Sstevel@tonic-gate 	}
1505*7c478bd9Sstevel@tonic-gate 
1506*7c478bd9Sstevel@tonic-gate 	if (--polllist.refcnt[index] > 0)
1507*7c478bd9Sstevel@tonic-gate 		goto done;
1508*7c478bd9Sstevel@tonic-gate 
1509*7c478bd9Sstevel@tonic-gate 	/*
1510*7c478bd9Sstevel@tonic-gate 	 * refcnt down to zero, delete pid from polling list
1511*7c478bd9Sstevel@tonic-gate 	 */
1512*7c478bd9Sstevel@tonic-gate 	(void) close(polllist.fds[index].fd);
1513*7c478bd9Sstevel@tonic-gate 	polllist.n_pids--;
1514*7c478bd9Sstevel@tonic-gate 
1515*7c478bd9Sstevel@tonic-gate 	for (i = index; i < polllist.n_pids; i++) {
1516*7c478bd9Sstevel@tonic-gate 		polllist.pids[i] = polllist.pids[i + 1];
1517*7c478bd9Sstevel@tonic-gate 		polllist.refcnt[i] = polllist.refcnt[i + 1];
1518*7c478bd9Sstevel@tonic-gate 		bcopy(&polllist.fds[i + 1], &polllist.fds[i],
1519*7c478bd9Sstevel@tonic-gate 		    sizeof (struct pollfd));
1520*7c478bd9Sstevel@tonic-gate 	}
1521*7c478bd9Sstevel@tonic-gate 
1522*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_DEBUG, "remove pid %ld at index %d\n", pid, index);
1523*7c478bd9Sstevel@tonic-gate 
1524*7c478bd9Sstevel@tonic-gate done:
1525*7c478bd9Sstevel@tonic-gate 	start_polling_thread();
1526*7c478bd9Sstevel@tonic-gate }
1527*7c478bd9Sstevel@tonic-gate 
1528*7c478bd9Sstevel@tonic-gate void
init_poll_thread()1529*7c478bd9Sstevel@tonic-gate init_poll_thread()
1530*7c478bd9Sstevel@tonic-gate {
1531*7c478bd9Sstevel@tonic-gate 	polllist.poll_tid = (thread_t)-1;
1532*7c478bd9Sstevel@tonic-gate }
1533*7c478bd9Sstevel@tonic-gate 
1534*7c478bd9Sstevel@tonic-gate void
cleanup_poll_thread()1535*7c478bd9Sstevel@tonic-gate cleanup_poll_thread()
1536*7c478bd9Sstevel@tonic-gate {
1537*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&rcm_req_lock);
1538*7c478bd9Sstevel@tonic-gate 	if (polllist.poll_tid == thr_self()) {
1539*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_TRACE2,
1540*7c478bd9Sstevel@tonic-gate 		    "cleanup_poll_thread: n_pids = %d\n", polllist.n_pids);
1541*7c478bd9Sstevel@tonic-gate 		polllist.poll_tid = (thread_t)-1;
1542*7c478bd9Sstevel@tonic-gate 		(void) cond_broadcast(&polllist.cv);
1543*7c478bd9Sstevel@tonic-gate 	}
1544*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&rcm_req_lock);
1545*7c478bd9Sstevel@tonic-gate }
1546*7c478bd9Sstevel@tonic-gate 
1547*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1548*7c478bd9Sstevel@tonic-gate static void *
pollfunc(void * arg)1549*7c478bd9Sstevel@tonic-gate pollfunc(void *arg)
1550*7c478bd9Sstevel@tonic-gate {
1551*7c478bd9Sstevel@tonic-gate 	sigset_t mask;
1552*7c478bd9Sstevel@tonic-gate 
1553*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE2, "poll thread started. n_pids = %d\n",
1554*7c478bd9Sstevel@tonic-gate 	    polllist.n_pids);
1555*7c478bd9Sstevel@tonic-gate 
1556*7c478bd9Sstevel@tonic-gate 	/*
1557*7c478bd9Sstevel@tonic-gate 	 * Unblock SIGUSR1 to allow polling thread to be killed
1558*7c478bd9Sstevel@tonic-gate 	 */
1559*7c478bd9Sstevel@tonic-gate 	(void) sigemptyset(&mask);
1560*7c478bd9Sstevel@tonic-gate 	(void) sigaddset(&mask, SIGUSR1);
1561*7c478bd9Sstevel@tonic-gate 	(void) thr_sigsetmask(SIG_UNBLOCK, &mask, NULL);
1562*7c478bd9Sstevel@tonic-gate 
1563*7c478bd9Sstevel@tonic-gate 	(void) poll(polllist.fds, polllist.n_pids, (time_t)-1);
1564*7c478bd9Sstevel@tonic-gate 
1565*7c478bd9Sstevel@tonic-gate 	/*
1566*7c478bd9Sstevel@tonic-gate 	 * block SIGUSR1 to avoid being killed while holding a lock
1567*7c478bd9Sstevel@tonic-gate 	 */
1568*7c478bd9Sstevel@tonic-gate 	(void) sigemptyset(&mask);
1569*7c478bd9Sstevel@tonic-gate 	(void) sigaddset(&mask, SIGUSR1);
1570*7c478bd9Sstevel@tonic-gate 	(void) thr_sigsetmask(SIG_BLOCK, &mask, NULL);
1571*7c478bd9Sstevel@tonic-gate 
1572*7c478bd9Sstevel@tonic-gate 	rcm_log_message(RCM_TRACE2, "returned from poll()\n");
1573*7c478bd9Sstevel@tonic-gate 
1574*7c478bd9Sstevel@tonic-gate 	cleanup_poll_thread();
1575*7c478bd9Sstevel@tonic-gate 
1576*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&barrier.lock);
1577*7c478bd9Sstevel@tonic-gate 	need_cleanup = 1;
1578*7c478bd9Sstevel@tonic-gate 	(void) cond_broadcast(&barrier.cv);
1579*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&barrier.lock);
1580*7c478bd9Sstevel@tonic-gate 
1581*7c478bd9Sstevel@tonic-gate 	return (NULL);
1582*7c478bd9Sstevel@tonic-gate }
1583*7c478bd9Sstevel@tonic-gate 
1584*7c478bd9Sstevel@tonic-gate /*
1585*7c478bd9Sstevel@tonic-gate  * rcm_req_lock must be held
1586*7c478bd9Sstevel@tonic-gate  */
1587*7c478bd9Sstevel@tonic-gate void
start_polling_thread()1588*7c478bd9Sstevel@tonic-gate start_polling_thread()
1589*7c478bd9Sstevel@tonic-gate {
1590*7c478bd9Sstevel@tonic-gate 	int err;
1591*7c478bd9Sstevel@tonic-gate 
1592*7c478bd9Sstevel@tonic-gate 	if (rcmd_get_state() != RCMD_NORMAL)
1593*7c478bd9Sstevel@tonic-gate 		return;
1594*7c478bd9Sstevel@tonic-gate 
1595*7c478bd9Sstevel@tonic-gate 	if (polllist.poll_tid != (thread_t)-1 || polllist.n_pids == 0)
1596*7c478bd9Sstevel@tonic-gate 		return;
1597*7c478bd9Sstevel@tonic-gate 
1598*7c478bd9Sstevel@tonic-gate 	if ((err = thr_create(NULL, 0, pollfunc, NULL, THR_DETACHED,
1599*7c478bd9Sstevel@tonic-gate 	    &polllist.poll_tid)) == 0)
1600*7c478bd9Sstevel@tonic-gate 		polllist.signaled = 0;
1601*7c478bd9Sstevel@tonic-gate 	else
1602*7c478bd9Sstevel@tonic-gate 		rcm_log_message(RCM_ERROR,
1603*7c478bd9Sstevel@tonic-gate 		    gettext("failed to create polling thread: %s\n"),
1604*7c478bd9Sstevel@tonic-gate 		    strerror(err));
1605*7c478bd9Sstevel@tonic-gate }
1606*7c478bd9Sstevel@tonic-gate 
1607*7c478bd9Sstevel@tonic-gate /*
1608*7c478bd9Sstevel@tonic-gate  * rcm_req_lock must be held
1609*7c478bd9Sstevel@tonic-gate  */
1610*7c478bd9Sstevel@tonic-gate static void
stop_polling_thread()1611*7c478bd9Sstevel@tonic-gate stop_polling_thread()
1612*7c478bd9Sstevel@tonic-gate {
1613*7c478bd9Sstevel@tonic-gate 	int err;
1614*7c478bd9Sstevel@tonic-gate 
1615*7c478bd9Sstevel@tonic-gate 	while (polllist.poll_tid != (thread_t)-1) {
1616*7c478bd9Sstevel@tonic-gate 		if (polllist.signaled == 0) {
1617*7c478bd9Sstevel@tonic-gate 			if ((err = thr_kill(polllist.poll_tid, SIGUSR1)) == 0)
1618*7c478bd9Sstevel@tonic-gate 				polllist.signaled = 1;
1619*7c478bd9Sstevel@tonic-gate 			else
1620*7c478bd9Sstevel@tonic-gate 				/*
1621*7c478bd9Sstevel@tonic-gate 				 * thr_kill shouldn't have failed since the
1622*7c478bd9Sstevel@tonic-gate 				 * poll thread id and the signal are valid.
1623*7c478bd9Sstevel@tonic-gate 				 * So log an error. Since when thr_kill
1624*7c478bd9Sstevel@tonic-gate 				 * fails no signal is sent (as per man page),
1625*7c478bd9Sstevel@tonic-gate 				 * the cond_wait below will wait until the
1626*7c478bd9Sstevel@tonic-gate 				 * the poll thread exits by some other means.
1627*7c478bd9Sstevel@tonic-gate 				 * The poll thread, for example, exits on its
1628*7c478bd9Sstevel@tonic-gate 				 * own when any DR initiator process that it
1629*7c478bd9Sstevel@tonic-gate 				 * is currently polling exits.
1630*7c478bd9Sstevel@tonic-gate 				 */
1631*7c478bd9Sstevel@tonic-gate 				rcm_log_message(RCM_ERROR,
1632*7c478bd9Sstevel@tonic-gate 				    gettext(
1633*7c478bd9Sstevel@tonic-gate 				    "fail to kill polling thread %d: %s\n"),
1634*7c478bd9Sstevel@tonic-gate 				    polllist.poll_tid, strerror(err));
1635*7c478bd9Sstevel@tonic-gate 		}
1636*7c478bd9Sstevel@tonic-gate 		(void) cond_wait(&polllist.cv, &rcm_req_lock);
1637*7c478bd9Sstevel@tonic-gate 	}
1638*7c478bd9Sstevel@tonic-gate }
1639