xref: /illumos-gate/usr/src/cmd/svc/startd/protocol.c (revision eb9a1df2aeb866bf1de4494433b6d7e5fa07b3ae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * protocol.c - protocols between graph engine and restarters
27  *
28  *   The graph engine uses restarter_protocol_send_event() to send a
29  *   restarter_event_type_t to the restarter.  For delegated restarters,
30  *   this is published on the GPEC queue for the restarter, which can
31  *   then be consumed by the librestart interfaces.  For services managed
32  *   by svc.startd, the event is stored on the local restarter_queue list,
33  *   where it can be dequeued by the restarter.
34  *
35  *   The svc.startd restarter uses graph_protocol_send_event() to send
36  *   a graph_event_type_t to the graph engine when an instance's states are
37  *   updated.
38  *
39  *   The graph engine uses restarter_protocol_init_delegate() to
40  *   register its interest in a particular delegated restarter's instance
41  *   state events.  The state_cb() registered on the event channel then
42  *   invokes graph_protocol_send_event() to communicate the update to
43  *   the graph engine.
44  */
45 
46 #include <assert.h>
47 #include <libintl.h>
48 #include <libsysevent.h>
49 #include <pthread.h>
50 #include <stdarg.h>
51 #include <stdio.h>
52 #include <strings.h>
53 #include <sys/time.h>
54 #include <errno.h>
55 #include <libuutil.h>
56 
57 #include <librestart.h>
58 #include <librestart_priv.h>
59 
60 #include "protocol.h"
61 #include "startd.h"
62 
63 /* Local event queue structures. */
64 typedef struct graph_protocol_event_queue {
65 	uu_list_t		*gpeq_event_list;
66 	pthread_mutex_t		gpeq_lock;
67 } graph_protocol_event_queue_t;
68 
69 typedef struct restarter_protocol_event_queue {
70 	uu_list_t		*rpeq_event_list;
71 	pthread_mutex_t		rpeq_lock;
72 } restarter_protocol_event_queue_t;
73 
74 static uu_list_pool_t *restarter_protocol_event_queue_pool;
75 static restarter_protocol_event_queue_t *restarter_queue;
76 
77 static uu_list_pool_t *graph_protocol_event_queue_pool;
78 static graph_protocol_event_queue_t *graph_queue;
79 
80 void
81 graph_protocol_init()
82 {
83 	graph_protocol_event_queue_pool = startd_list_pool_create(
84 	    "graph_protocol_events", sizeof (graph_protocol_event_t),
85 	    offsetof(graph_protocol_event_t, gpe_link), NULL,
86 	    UU_LIST_POOL_DEBUG);
87 
88 	graph_queue = startd_zalloc(sizeof (graph_protocol_event_queue_t));
89 
90 	(void) pthread_mutex_init(&graph_queue->gpeq_lock, &mutex_attrs);
91 	graph_queue->gpeq_event_list = startd_list_create(
92 	    graph_protocol_event_queue_pool, graph_queue, 0);
93 }
94 
95 /*
96  * "data" will be freed by the consumer
97  */
98 static void
99 graph_event_enqueue(const char *inst, graph_event_type_t event,
100     protocol_states_t *data)
101 {
102 	graph_protocol_event_t *e;
103 
104 	e = startd_zalloc(sizeof (graph_protocol_event_t));
105 
106 	if (inst != NULL) {
107 		int size = strlen(inst) + 1;
108 		e->gpe_inst = startd_alloc(size);
109 		e->gpe_inst_sz = size;
110 		(void) strlcpy(e->gpe_inst, inst, size);
111 	}
112 	e->gpe_type = event;
113 	e->gpe_data = data;
114 
115 	(void) pthread_mutex_init(&e->gpe_lock, &mutex_attrs);
116 
117 	MUTEX_LOCK(&graph_queue->gpeq_lock);
118 	uu_list_node_init(e, &e->gpe_link, graph_protocol_event_queue_pool);
119 	if (uu_list_insert_before(graph_queue->gpeq_event_list, NULL, e) == -1)
120 		uu_die("failed to enqueue graph event (%s: %s)\n",
121 		    e->gpe_inst, uu_strerror(uu_error()));
122 
123 	MUTEX_UNLOCK(&graph_queue->gpeq_lock);
124 }
125 
126 void
127 graph_event_release(graph_protocol_event_t *e)
128 {
129 	uu_list_node_fini(e, &e->gpe_link, graph_protocol_event_queue_pool);
130 	(void) pthread_mutex_destroy(&e->gpe_lock);
131 	if (e->gpe_inst != NULL)
132 		startd_free(e->gpe_inst, e->gpe_inst_sz);
133 	startd_free(e, sizeof (graph_protocol_event_t));
134 }
135 
136 /*
137  * graph_protocol_event_t *graph_event_dequeue()
138  *   The caller must hold gu_lock, and is expected to be a single thread.
139  *   It is allowed to utilize graph_event_requeue() and abort processing
140  *   on the event. If graph_event_requeue() is not called, the caller is
141  *   expected to call graph_event_release() when finished.
142  */
143 graph_protocol_event_t *
144 graph_event_dequeue()
145 {
146 	graph_protocol_event_t *e;
147 
148 	MUTEX_LOCK(&graph_queue->gpeq_lock);
149 
150 	e = uu_list_first(graph_queue->gpeq_event_list);
151 	if (e == NULL) {
152 		MUTEX_UNLOCK(&graph_queue->gpeq_lock);
153 		return (NULL);
154 	}
155 
156 	if (uu_list_next(graph_queue->gpeq_event_list, e) != NULL)
157 		gu->gu_wakeup = 1;
158 	uu_list_remove(graph_queue->gpeq_event_list, e);
159 	MUTEX_UNLOCK(&graph_queue->gpeq_lock);
160 
161 	return (e);
162 }
163 
164 /*
165  * void graph_event_requeue()
166  *   Requeue the event back at the head of the queue.
167  */
168 void
169 graph_event_requeue(graph_protocol_event_t *e)
170 {
171 	assert(e != NULL);
172 
173 	log_framework(LOG_DEBUG, "Requeing event\n");
174 
175 	MUTEX_LOCK(&graph_queue->gpeq_lock);
176 	if (uu_list_insert_after(graph_queue->gpeq_event_list, NULL, e) == -1)
177 		uu_die("failed to requeue graph event (%s: %s)\n",
178 		    e->gpe_inst, uu_strerror(uu_error()));
179 
180 	MUTEX_UNLOCK(&graph_queue->gpeq_lock);
181 }
182 
183 void
184 graph_protocol_send_event(const char *inst, graph_event_type_t event,
185     protocol_states_t *data)
186 {
187 	graph_event_enqueue(inst, event, data);
188 	MUTEX_LOCK(&gu->gu_lock);
189 	gu->gu_wakeup = 1;
190 	(void) pthread_cond_broadcast(&gu->gu_cv);
191 	MUTEX_UNLOCK(&gu->gu_lock);
192 }
193 
194 void
195 restarter_protocol_init()
196 {
197 	restarter_protocol_event_queue_pool = startd_list_pool_create(
198 	    "restarter_protocol_events", sizeof (restarter_protocol_event_t),
199 	    offsetof(restarter_protocol_event_t, rpe_link), NULL,
200 	    UU_LIST_POOL_DEBUG);
201 
202 	restarter_queue = startd_zalloc(
203 	    sizeof (restarter_protocol_event_queue_t));
204 
205 	(void) pthread_mutex_init(&restarter_queue->rpeq_lock, &mutex_attrs);
206 	restarter_queue->rpeq_event_list = startd_list_create(
207 	    restarter_protocol_event_queue_pool, restarter_queue, 0);
208 
209 	log_framework(LOG_DEBUG, "Initialized restarter protocol\n");
210 }
211 
212 /*
213  * void restarter_event_enqueue()
214  *   Enqueue a restarter event.
215  */
216 static void
217 restarter_event_enqueue(const char *inst, restarter_event_type_t event,
218     int32_t reason)
219 {
220 	restarter_protocol_event_t *e;
221 	int r;
222 
223 	/* Allocate and populate the event structure. */
224 	e = startd_zalloc(sizeof (restarter_protocol_event_t));
225 
226 	e->rpe_inst = startd_alloc(strlen(inst) + 1);
227 	(void) strlcpy(e->rpe_inst, inst, strlen(inst)+1);
228 	e->rpe_type = event;
229 	e->rpe_reason = reason;
230 
231 	MUTEX_LOCK(&restarter_queue->rpeq_lock);
232 	uu_list_node_init(e, &e->rpe_link, restarter_protocol_event_queue_pool);
233 	r = uu_list_insert_before(restarter_queue->rpeq_event_list, NULL, e);
234 	assert(r == 0);
235 
236 	MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
237 
238 }
239 
240 void
241 restarter_event_release(restarter_protocol_event_t *e)
242 {
243 	uu_list_node_fini(e, &e->rpe_link, restarter_protocol_event_queue_pool);
244 	startd_free(e->rpe_inst, strlen(e->rpe_inst) + 1);
245 	startd_free(e, sizeof (restarter_protocol_event_t));
246 }
247 
248 /*
249  * restarter_protocol_event_t *restarter_event_dequeue()
250  *   Dequeue a restarter protocol event. The caller is expected to be
251  *   a single thread. It is allowed to utilize restarter_event_requeue()
252  *   and abort processing on the event. The caller is expected to call
253  *   restarter_event_release() when finished.
254  */
255 restarter_protocol_event_t *
256 restarter_event_dequeue()
257 {
258 	restarter_protocol_event_t *e = NULL;
259 
260 	MUTEX_LOCK(&restarter_queue->rpeq_lock);
261 
262 	e = uu_list_first(restarter_queue->rpeq_event_list);
263 	if (e == NULL) {
264 		MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
265 		return (NULL);
266 	}
267 
268 	if (uu_list_next(restarter_queue->rpeq_event_list, e) != NULL)
269 		ru->restarter_update_wakeup = 1;
270 	uu_list_remove(restarter_queue->rpeq_event_list, e);
271 	MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
272 
273 	return (e);
274 }
275 
276 static int
277 state_cb(sysevent_t *syse, void *cookie)
278 {
279 	char *fmri = (char *)cookie;
280 	char *instance_name;
281 	int32_t reason;
282 	nvlist_t *attr_list = NULL;
283 	int state, next_state;
284 	char str_state[MAX_SCF_STATE_STRING_SZ];
285 	char str_next_state[MAX_SCF_STATE_STRING_SZ];
286 	protocol_states_t *states;
287 	int err;
288 	ssize_t sz;
289 
290 	/*
291 	 * Might fail due to a bad event or a lack of memory. Try
292 	 * the callback again to see if it goes better the next time.
293 	 */
294 	if (sysevent_get_attr_list(syse, &attr_list) != 0)
295 		return (EAGAIN);
296 
297 	if ((nvlist_lookup_int32(attr_list, RESTARTER_NAME_STATE,
298 	    &state) != 0) ||
299 	    (nvlist_lookup_int32(attr_list, RESTARTER_NAME_NEXT_STATE,
300 	    &next_state) != 0) ||
301 	    (nvlist_lookup_int32(attr_list, RESTARTER_NAME_ERROR, &err) != 0) ||
302 	    (nvlist_lookup_string(attr_list, RESTARTER_NAME_INSTANCE,
303 	    &instance_name) != 0) ||
304 	    (nvlist_lookup_int32(attr_list, RESTARTER_NAME_REASON, &reason) !=
305 	    0))
306 		uu_die("%s: can't decode nvlist\n", fmri);
307 
308 	states = startd_alloc(sizeof (protocol_states_t));
309 	states->ps_state = state;
310 	states->ps_state_next = next_state;
311 	states->ps_err = err;
312 	states->ps_reason = reason;
313 
314 	graph_protocol_send_event(instance_name, GRAPH_UPDATE_STATE_CHANGE,
315 	    states);
316 
317 	sz = restarter_state_to_string(state, str_state, sizeof (str_state));
318 	assert(sz < sizeof (str_state));
319 	sz = restarter_state_to_string(next_state, str_next_state,
320 	    sizeof (str_next_state));
321 	assert(sz < sizeof (str_next_state));
322 	log_framework(LOG_DEBUG, "%s: state updates for %s (%s, %s)\n", fmri,
323 	    instance_name, str_state, str_next_state);
324 	nvlist_free(attr_list);
325 	return (0);
326 }
327 
328 evchan_t *
329 restarter_protocol_init_delegate(char *fmri)
330 {
331 	char *delegate_channel_name, *master_channel_name, *sid;
332 	evchan_t *delegate_channel, *master_channel;
333 	int r = 0;
334 
335 	/* master restarter -- nothing to do */
336 	if (strcmp(fmri, SCF_SERVICE_STARTD) == 0) {
337 		uu_warn("Attempt to initialize restarter protocol delegate "
338 		    "with %s\n", fmri);
339 		return (NULL);
340 	}
341 
342 	log_framework(LOG_DEBUG, "%s: Intializing protocol for delegate\n",
343 	    fmri);
344 
345 	delegate_channel_name = master_channel_name = NULL;
346 	if ((delegate_channel_name = _restarter_get_channel_name(fmri,
347 	    RESTARTER_CHANNEL_DELEGATE)) == NULL ||
348 	    (master_channel_name = _restarter_get_channel_name(fmri,
349 	    RESTARTER_CHANNEL_MASTER)) == NULL ||
350 	    (sid = strdup("svc.startd")) == NULL) {
351 		if (delegate_channel_name) {
352 			free(delegate_channel_name);
353 		}
354 		if (master_channel_name) {
355 			free(master_channel_name);
356 		}
357 		uu_warn("Allocation of channel name failed");
358 
359 		return (NULL);
360 	}
361 
362 	if ((r = sysevent_evc_bind(delegate_channel_name, &delegate_channel,
363 	    EVCH_CREAT|EVCH_HOLD_PEND)) != 0) {
364 		uu_warn("%s: sysevent_evc_bind failed: %s\n",
365 		    delegate_channel_name, strerror(errno));
366 		goto out;
367 	}
368 
369 	if ((r = sysevent_evc_bind(master_channel_name, &master_channel,
370 	    EVCH_CREAT|EVCH_HOLD_PEND)) != 0) {
371 		uu_warn("%s: sysevent_evc_bind failed: %s\n",
372 		    master_channel_name, strerror(errno));
373 		goto out;
374 	}
375 
376 	log_framework(LOG_DEBUG,
377 	    "%s: Bound to channel %s (delegate), %s (master)\n", fmri,
378 	    delegate_channel_name, master_channel_name);
379 
380 	if ((r = sysevent_evc_subscribe(master_channel, sid, EC_ALL,
381 	    state_cb, fmri, EVCH_SUB_KEEP)) != 0) {
382 		/*
383 		 * The following errors can be returned in this
384 		 * case :
385 		 *	EINVAL : inappropriate flags or dump flag
386 		 *		and the dump failed.
387 		 *	EEXIST : svc.startd already has a channel
388 		 *		named as the master channel name
389 		 *	ENOMEM : too many subscribers to the channel
390 		 */
391 		uu_warn("Failed to subscribe to restarter %s, channel %s with "
392 		    "subscriber id %s : \n", fmri, master_channel_name, sid);
393 		switch (r) {
394 		case EEXIST:
395 			uu_warn("Channel name already exists\n");
396 			break;
397 		case ENOMEM:
398 			uu_warn("Too many subscribers for the channel\n");
399 			break;
400 		default:
401 			uu_warn("%s\n", strerror(errno));
402 		}
403 	} else {
404 		log_framework(LOG_DEBUG,
405 		    "%s: Subscribed to channel %s with subscriber id %s\n",
406 		    fmri, master_channel_name, "svc.startd");
407 	}
408 
409 
410 out:
411 	free(delegate_channel_name);
412 	free(master_channel_name);
413 	free(sid);
414 
415 	if (r == 0)
416 		return (delegate_channel);
417 
418 	return (NULL);
419 }
420 
421 void
422 restarter_protocol_send_event(const char *inst, evchan_t *chan,
423     restarter_event_type_t event, int32_t reason)
424 {
425 	nvlist_t *attr;
426 	int ret;
427 
428 	/*
429 	 * If the service is managed by the master restarter,
430 	 * queue the event locally.
431 	 */
432 	if (chan == NULL) {
433 		restarter_event_enqueue(inst, event, reason);
434 		MUTEX_LOCK(&ru->restarter_update_lock);
435 		ru->restarter_update_wakeup = 1;
436 		(void) pthread_cond_broadcast(&ru->restarter_update_cv);
437 		MUTEX_UNLOCK(&ru->restarter_update_lock);
438 		return;
439 	}
440 
441 	/*
442 	 * Otherwise, send the event to the delegate.
443 	 */
444 	log_framework(LOG_DEBUG, "Sending %s to channel 0x%p for %s.\n",
445 	    event_names[event], chan, inst);
446 	if (nvlist_alloc(&attr, NV_UNIQUE_NAME, 0) != 0 ||
447 	    nvlist_add_uint32(attr, RESTARTER_NAME_TYPE, event) != 0 ||
448 	    nvlist_add_string(attr, RESTARTER_NAME_INSTANCE, (char *)inst) !=
449 	    0 || nvlist_add_uint32(attr, RESTARTER_NAME_REASON,
450 	    reason) != 0)
451 		uu_die("Allocation failure\n");
452 
453 	if ((ret = restarter_event_publish_retry(chan, "protocol", "restarter",
454 	    "com.sun", "svc.startd", attr, EVCH_NOSLEEP)) != 0) {
455 
456 		switch (ret) {
457 		case ENOSPC:
458 			log_framework(LOG_DEBUG, "Dropping %s event for %s. "
459 			    "Delegate may not be running.\n",
460 			    event_names[event], inst);
461 			break;
462 		default:
463 			uu_die("%s: can't publish event: %s\n", inst,
464 			    strerror(errno));
465 		}
466 	}
467 
468 	nvlist_free(attr);
469 
470 	if (event != RESTARTER_EVENT_TYPE_ADD_INSTANCE) {
471 		/*
472 		 * Not relevant for graph loading.
473 		 */
474 		return;
475 	}
476 
477 	/*
478 	 * For the purposes of loading state after interruption, this is
479 	 * sufficient, as svc.startd(1M) won't receive events on the contracts
480 	 * associated with each delegate.
481 	 */
482 	MUTEX_LOCK(&st->st_load_lock);
483 	if (--st->st_load_instances == 0)
484 		(void) pthread_cond_broadcast(&st->st_load_cv);
485 	MUTEX_UNLOCK(&st->st_load_lock);
486 
487 }
488