xref: /titanic_41/usr/src/cmd/svc/startd/protocol.c (revision 2449e17f82f6097fd2c665b64723e31ceecbeca6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * protocol.c - protocols between graph engine and restarters
30  *
31  *   The graph engine uses restarter_protocol_send_event() to send a
32  *   restarter_event_type_t to the restarter.  For delegated restarters,
33  *   this is published on the GPEC queue for the restarter, which can
34  *   then be consumed by the librestart interfaces.  For services managed
35  *   by svc.startd, the event is stored on the local restarter_queue list,
36  *   where it can be dequeued by the restarter.
37  *
38  *   The svc.startd restarter uses graph_protocol_send_event() to send
39  *   a graph_event_type_t to the graph engine when an instance's states are
40  *   updated.
41  *
42  *   The graph engine uses restarter_protocol_init_delegate() to
43  *   register its interest in a particular delegated restarter's instance
44  *   state events.  The state_cb() registered on the event channel then
45  *   invokes graph_protocol_send_event() to communicate the update to
46  *   the graph engine.
47  */
48 
49 #include <assert.h>
50 #include <libintl.h>
51 #include <libsysevent.h>
52 #include <pthread.h>
53 #include <stdarg.h>
54 #include <stdio.h>
55 #include <strings.h>
56 #include <sys/time.h>
57 #include <errno.h>
58 #include <libuutil.h>
59 
60 #include <librestart.h>
61 #include <librestart_priv.h>
62 
63 #include "protocol.h"
64 #include "startd.h"
65 
66 /* Local event queue structures. */
67 typedef struct graph_protocol_event_queue {
68 	uu_list_t		*gpeq_event_list;
69 	pthread_mutex_t		gpeq_lock;
70 } graph_protocol_event_queue_t;
71 
72 typedef struct restarter_protocol_event_queue {
73 	uu_list_t		*rpeq_event_list;
74 	pthread_mutex_t		rpeq_lock;
75 } restarter_protocol_event_queue_t;
76 
77 static uu_list_pool_t *restarter_protocol_event_queue_pool;
78 static restarter_protocol_event_queue_t *restarter_queue;
79 
80 static uu_list_pool_t *graph_protocol_event_queue_pool;
81 static graph_protocol_event_queue_t *graph_queue;
82 
83 void
84 graph_protocol_init()
85 {
86 	graph_protocol_event_queue_pool = startd_list_pool_create(
87 	    "graph_protocol_events", sizeof (graph_protocol_event_t),
88 	    offsetof(graph_protocol_event_t, gpe_link), NULL,
89 	    UU_LIST_POOL_DEBUG);
90 
91 	graph_queue = startd_zalloc(sizeof (graph_protocol_event_queue_t));
92 
93 	(void) pthread_mutex_init(&graph_queue->gpeq_lock, &mutex_attrs);
94 	graph_queue->gpeq_event_list = startd_list_create(
95 	    graph_protocol_event_queue_pool, graph_queue, NULL);
96 }
97 
98 /*
99  * "data" will be freed by the consumer
100  */
101 static void
102 graph_event_enqueue(const char *inst, graph_event_type_t event,
103     protocol_states_t *data)
104 {
105 	graph_protocol_event_t *e;
106 
107 	e = startd_zalloc(sizeof (graph_protocol_event_t));
108 
109 	if (inst != NULL) {
110 		int size = strlen(inst) + 1;
111 		e->gpe_inst = startd_alloc(size);
112 		e->gpe_inst_sz = size;
113 		(void) strlcpy(e->gpe_inst, inst, size);
114 	}
115 	e->gpe_type = event;
116 	e->gpe_data = data;
117 
118 	(void) pthread_mutex_init(&e->gpe_lock, &mutex_attrs);
119 
120 	MUTEX_LOCK(&graph_queue->gpeq_lock);
121 	uu_list_node_init(e, &e->gpe_link, graph_protocol_event_queue_pool);
122 	if (uu_list_insert_before(graph_queue->gpeq_event_list, NULL, e) == -1)
123 		uu_die("failed to enqueue graph event (%s: %s)\n",
124 		    e->gpe_inst, uu_strerror(uu_error()));
125 
126 	MUTEX_UNLOCK(&graph_queue->gpeq_lock);
127 }
128 
129 void
130 graph_event_release(graph_protocol_event_t *e)
131 {
132 	uu_list_node_fini(e, &e->gpe_link, graph_protocol_event_queue_pool);
133 	(void) pthread_mutex_destroy(&e->gpe_lock);
134 	if (e->gpe_inst != NULL)
135 		startd_free(e->gpe_inst, e->gpe_inst_sz);
136 	startd_free(e, sizeof (graph_protocol_event_t));
137 }
138 
139 /*
140  * graph_protocol_event_t *graph_event_dequeue()
141  *   The caller must hold gu_lock, and is expected to be a single thread.
142  *   It is allowed to utilize graph_event_requeue() and abort processing
143  *   on the event. If graph_event_requeue() is not called, the caller is
144  *   expected to call graph_event_release() when finished.
145  */
146 graph_protocol_event_t *
147 graph_event_dequeue()
148 {
149 	graph_protocol_event_t *e;
150 
151 	MUTEX_LOCK(&graph_queue->gpeq_lock);
152 
153 	e = uu_list_first(graph_queue->gpeq_event_list);
154 	if (e == NULL) {
155 		MUTEX_UNLOCK(&graph_queue->gpeq_lock);
156 		return (NULL);
157 	}
158 
159 	if (uu_list_next(graph_queue->gpeq_event_list, e) != NULL)
160 		gu->gu_wakeup = 1;
161 	uu_list_remove(graph_queue->gpeq_event_list, e);
162 	MUTEX_UNLOCK(&graph_queue->gpeq_lock);
163 
164 	return (e);
165 }
166 
167 /*
168  * void graph_event_requeue()
169  *   Requeue the event back at the head of the queue.
170  */
171 void
172 graph_event_requeue(graph_protocol_event_t *e)
173 {
174 	assert(e != NULL);
175 
176 	log_framework(LOG_DEBUG, "Requeing event\n");
177 
178 	MUTEX_LOCK(&graph_queue->gpeq_lock);
179 	if (uu_list_insert_after(graph_queue->gpeq_event_list, NULL, e) == -1)
180 		uu_die("failed to requeue graph event (%s: %s)\n",
181 		    e->gpe_inst, uu_strerror(uu_error()));
182 
183 	MUTEX_UNLOCK(&graph_queue->gpeq_lock);
184 }
185 
186 void
187 graph_protocol_send_event(const char *inst, graph_event_type_t event,
188     protocol_states_t *data)
189 {
190 	graph_event_enqueue(inst, event, data);
191 	MUTEX_LOCK(&gu->gu_lock);
192 	gu->gu_wakeup = 1;
193 	(void) pthread_cond_broadcast(&gu->gu_cv);
194 	MUTEX_UNLOCK(&gu->gu_lock);
195 }
196 
197 void
198 restarter_protocol_init()
199 {
200 	restarter_protocol_event_queue_pool = startd_list_pool_create(
201 	    "restarter_protocol_events", sizeof (restarter_protocol_event_t),
202 	    offsetof(restarter_protocol_event_t, rpe_link), NULL,
203 	    UU_LIST_POOL_DEBUG);
204 
205 	restarter_queue = startd_zalloc(
206 	    sizeof (restarter_protocol_event_queue_t));
207 
208 	(void) pthread_mutex_init(&restarter_queue->rpeq_lock, &mutex_attrs);
209 	restarter_queue->rpeq_event_list = startd_list_create(
210 	    restarter_protocol_event_queue_pool, restarter_queue, NULL);
211 
212 	log_framework(LOG_DEBUG, "Initialized restarter protocol\n");
213 }
214 
215 /*
216  * void restarter_event_enqueue()
217  *   Enqueue a restarter event.
218  */
219 static void
220 restarter_event_enqueue(const char *inst, restarter_event_type_t event)
221 {
222 	restarter_protocol_event_t *e;
223 	int r;
224 
225 	/* Allocate and populate the event structure. */
226 	e = startd_zalloc(sizeof (restarter_protocol_event_t));
227 
228 	e->rpe_inst = startd_alloc(strlen(inst) + 1);
229 	(void) strlcpy(e->rpe_inst, inst, strlen(inst)+1);
230 	e->rpe_type = event;
231 
232 	MUTEX_LOCK(&restarter_queue->rpeq_lock);
233 	uu_list_node_init(e, &e->rpe_link, restarter_protocol_event_queue_pool);
234 	r = uu_list_insert_before(restarter_queue->rpeq_event_list, NULL, e);
235 	assert(r == 0);
236 
237 	MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
238 
239 }
240 
241 void
242 restarter_event_release(restarter_protocol_event_t *e)
243 {
244 	uu_list_node_fini(e, &e->rpe_link, restarter_protocol_event_queue_pool);
245 	startd_free(e->rpe_inst, strlen(e->rpe_inst) + 1);
246 	startd_free(e, sizeof (restarter_protocol_event_t));
247 }
248 
249 /*
250  * restarter_protocol_event_t *restarter_event_dequeue()
251  *   Dequeue a restarter protocol event. The caller is expected to be
252  *   a single thread. It is allowed to utilize restarter_event_requeue()
253  *   and abort processing on the event. The caller is expected to call
254  *   restarter_event_release() when finished.
255  */
256 restarter_protocol_event_t *
257 restarter_event_dequeue()
258 {
259 	restarter_protocol_event_t *e = NULL;
260 
261 	MUTEX_LOCK(&restarter_queue->rpeq_lock);
262 
263 	e = uu_list_first(restarter_queue->rpeq_event_list);
264 	if (e == NULL) {
265 		MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
266 		return (NULL);
267 	}
268 
269 	if (uu_list_next(restarter_queue->rpeq_event_list, e) != NULL)
270 		ru->restarter_update_wakeup = 1;
271 	uu_list_remove(restarter_queue->rpeq_event_list, e);
272 	MUTEX_UNLOCK(&restarter_queue->rpeq_lock);
273 
274 	return (e);
275 }
276 
277 static int
278 state_cb(sysevent_t *syse, void *cookie)
279 {
280 	char *fmri = (char *)cookie;
281 	char *instance_name;
282 	nvlist_t *attr_list = NULL;
283 	int state, next_state;
284 	char str_state[MAX_SCF_STATE_STRING_SZ];
285 	char str_next_state[MAX_SCF_STATE_STRING_SZ];
286 	protocol_states_t *states;
287 	int err;
288 	ssize_t sz;
289 
290 	/*
291 	 * Might fail due to a bad event or a lack of memory. Try
292 	 * the callback again to see if it goes better the next time.
293 	 */
294 	if (sysevent_get_attr_list(syse, &attr_list) != 0)
295 		return (EAGAIN);
296 
297 	if ((nvlist_lookup_int32(attr_list, RESTARTER_NAME_STATE,
298 	    &state) != 0) ||
299 	    (nvlist_lookup_int32(attr_list, RESTARTER_NAME_NEXT_STATE,
300 	    &next_state) != 0) ||
301 	    (nvlist_lookup_int32(attr_list, RESTARTER_NAME_ERROR, &err) != 0) ||
302 	    (nvlist_lookup_string(attr_list, RESTARTER_NAME_INSTANCE,
303 	    &instance_name) != 0))
304 		uu_die("%s: can't decode nvlist\n", fmri);
305 
306 	states = startd_alloc(sizeof (protocol_states_t));
307 	states->ps_state = state;
308 	states->ps_state_next = next_state;
309 	states->ps_err = err;
310 
311 	graph_protocol_send_event(instance_name, GRAPH_UPDATE_STATE_CHANGE,
312 	    states);
313 
314 	sz = restarter_state_to_string(state, str_state, sizeof (str_state));
315 	assert(sz < sizeof (str_state));
316 	sz = restarter_state_to_string(next_state, str_next_state,
317 	    sizeof (str_next_state));
318 	assert(sz < sizeof (str_next_state));
319 	log_framework(LOG_DEBUG, "%s: state updates for %s (%s, %s)\n", fmri,
320 	    instance_name, str_state, str_next_state);
321 	nvlist_free(attr_list);
322 	return (0);
323 }
324 
325 evchan_t *
326 restarter_protocol_init_delegate(char *fmri)
327 {
328 	char *delegate_channel_name, *master_channel_name, *sid;
329 	evchan_t *delegate_channel, *master_channel;
330 
331 	/* master restarter -- nothing to do */
332 	if (strcmp(fmri, SCF_SERVICE_STARTD) == 0)
333 		return (NULL);
334 
335 	log_framework(LOG_DEBUG, "%s: Intializing protocol for delegate\n",
336 	    fmri);
337 
338 	if ((delegate_channel_name = _restarter_get_channel_name(fmri,
339 	    RESTARTER_CHANNEL_DELEGATE)) == NULL ||
340 	    (master_channel_name = _restarter_get_channel_name(fmri,
341 	    RESTARTER_CHANNEL_MASTER)) == NULL ||
342 	    (sid = strdup("svc.startd")) == NULL)
343 		uu_die("Allocation failure\n");
344 
345 	if (sysevent_evc_bind(delegate_channel_name, &delegate_channel,
346 	    EVCH_CREAT|EVCH_HOLD_PEND) != 0)
347 		uu_die("%s: sysevent_evc_bind failed: %s\n",
348 		    delegate_channel_name, strerror(errno));
349 	if (sysevent_evc_bind(master_channel_name, &master_channel,
350 	    EVCH_CREAT|EVCH_HOLD_PEND) != 0)
351 		uu_die("%s: sysevent_evc_bind failed: %s\n",
352 		    master_channel_name, strerror(errno));
353 	log_framework(LOG_DEBUG,
354 	    "%s: Bound to channel %s (delegate), %s (master)\n", fmri,
355 	    delegate_channel_name, master_channel_name);
356 
357 	if (sysevent_evc_subscribe(master_channel, sid, EC_ALL,
358 	    state_cb, fmri, EVCH_SUB_KEEP) != 0)
359 		uu_die("%s: Failed to subscribe to channel %s with "
360 		    "subscriber id %s: %s\n", fmri,
361 		    master_channel_name, sid, strerror(errno));
362 	log_framework(LOG_DEBUG,
363 	    "%s: Subscribed to channel %s with subscriber id %s\n", fmri,
364 	    master_channel_name, "svc.startd");
365 
366 	free(delegate_channel_name);
367 	free(master_channel_name);
368 	free(sid);
369 
370 	return (delegate_channel);
371 }
372 
373 void
374 restarter_protocol_send_event(const char *inst, evchan_t *chan,
375     restarter_event_type_t event)
376 {
377 	nvlist_t *attr;
378 
379 	/*
380 	 * If the service is managed by the master restarter,
381 	 * queue the event locally.
382 	 */
383 	if (chan == NULL) {
384 		restarter_event_enqueue(inst, event);
385 		MUTEX_LOCK(&ru->restarter_update_lock);
386 		ru->restarter_update_wakeup = 1;
387 		(void) pthread_cond_broadcast(&ru->restarter_update_cv);
388 		MUTEX_UNLOCK(&ru->restarter_update_lock);
389 		return;
390 	}
391 
392 	/*
393 	 * Otherwise, send the event to the delegate.
394 	 */
395 	log_framework(LOG_DEBUG, "Sending %s to channel 0x%p for %s.\n",
396 	    event_names[event], chan, inst);
397 	if (nvlist_alloc(&attr, NV_UNIQUE_NAME, 0) != 0 ||
398 	    nvlist_add_uint32(attr, RESTARTER_NAME_TYPE, event) != 0 ||
399 	    nvlist_add_string(attr, RESTARTER_NAME_INSTANCE, (char *)inst) != 0)
400 		uu_die("Allocation failure\n");
401 
402 	if (sysevent_evc_publish(chan, "protocol", "restarter", "com.sun",
403 	    "svc.startd", attr, EVCH_NOSLEEP) != 0) {
404 		if (errno == EAGAIN)
405 			uu_die("%s: queue is full\n", inst);
406 		uu_die("%s: can't publish event: %s\n", inst, strerror(errno));
407 	}
408 	nvlist_free(attr);
409 
410 	if (event != RESTARTER_EVENT_TYPE_ADD_INSTANCE) {
411 		/*
412 		 * Not relevant for graph loading.
413 		 */
414 		return;
415 	}
416 
417 	/*
418 	 * For the purposes of loading state after interruption, this is
419 	 * sufficient, as svc.startd(1M) won't receive events on the contracts
420 	 * associated with each delegate.
421 	 */
422 	MUTEX_LOCK(&st->st_load_lock);
423 	if (--st->st_load_instances == 0)
424 		(void) pthread_cond_broadcast(&st->st_load_cv);
425 	MUTEX_UNLOCK(&st->st_load_lock);
426 
427 }
428