xref: /freebsd/sys/geom/geom_event.c (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * XXX: How do we in general know that objects referenced in events
38  * have not been destroyed before we get around to handle the event ?
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/param.h>
45 #include <sys/malloc.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/errno.h>
52 #include <sys/time.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55 
56 #include <machine/stdarg.h>
57 
58 TAILQ_HEAD(event_tailq_head, g_event);
59 
60 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
61 static u_int g_pending_events;
62 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
63 static struct mtx g_eventlock;
64 static int g_wither_work;
65 
66 #define G_N_EVENTREFS		20
67 
68 struct g_event {
69 	TAILQ_ENTRY(g_event)	events;
70 	g_event_t		*func;
71 	void			*arg;
72 	int			flag;
73 	void			*ref[G_N_EVENTREFS];
74 };
75 
76 #define EV_DONE		0x80000
77 #define EV_WAKEUP	0x40000
78 #define EV_CANCELED	0x20000
79 #define EV_INPROGRESS	0x10000
80 
81 void
82 g_waitidle(void)
83 {
84 
85 	g_topology_assert_not();
86 	mtx_assert(&Giant, MA_NOTOWNED);
87 
88 	mtx_lock(&g_eventlock);
89 	while (!TAILQ_EMPTY(&g_events))
90 		msleep(&g_pending_events, &g_eventlock, PPAUSE,
91 		    "g_waitidle", hz/5);
92 	mtx_unlock(&g_eventlock);
93 	curthread->td_pflags &= ~TDP_GEOM;
94 }
95 
96 #if 0
97 void
98 g_waitidlelock(void)
99 {
100 
101 	g_topology_assert();
102 	mtx_lock(&g_eventlock);
103 	while (!TAILQ_EMPTY(&g_events)) {
104 		g_topology_unlock();
105 		msleep(&g_pending_events, &g_eventlock, PPAUSE,
106 		    "g_waitidlel", hz/5);
107 		g_topology_lock();
108 	}
109 	mtx_unlock(&g_eventlock);
110 }
111 #endif
112 
113 struct g_attrchanged_args {
114 	struct g_provider *pp;
115 	const char *attr;
116 };
117 
118 static void
119 g_attr_changed_event(void *arg, int flag)
120 {
121 	struct g_attrchanged_args *args;
122 	struct g_provider *pp;
123 	struct g_consumer *cp;
124 	struct g_consumer *next_cp;
125 
126 	args = arg;
127 	pp = args->pp;
128 
129 	g_topology_assert();
130 	if (flag != EV_CANCEL && g_shutdown == 0) {
131 
132 		/*
133 		 * Tell all consumers of the change.
134 		 */
135 		LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
136 			if (cp->geom->attrchanged != NULL)
137 				cp->geom->attrchanged(cp, args->attr);
138 		}
139 	}
140 	g_free(args);
141 }
142 
143 int
144 g_attr_changed(struct g_provider *pp, const char *attr, int flag)
145 {
146 	struct g_attrchanged_args *args;
147 	int error;
148 
149 	args = g_malloc(sizeof *args, flag);
150 	if (args == NULL)
151 		return (ENOMEM);
152 	args->pp = pp;
153 	args->attr = attr;
154 	error = g_post_event(g_attr_changed_event, args, flag, pp, NULL);
155 	if (error != 0)
156 		g_free(args);
157 	return (error);
158 }
159 
160 void
161 g_orphan_provider(struct g_provider *pp, int error)
162 {
163 
164 	/* G_VALID_PROVIDER(pp)  We likely lack topology lock */
165 	g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
166 	    pp, pp->name, error);
167 	KASSERT(error != 0,
168 	    ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
169 	     pp, pp->name));
170 
171 	pp->error = error;
172 	mtx_lock(&g_eventlock);
173 	KASSERT(!(pp->flags & G_PF_ORPHAN),
174 	    ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name));
175 	pp->flags |= G_PF_ORPHAN;
176 	TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
177 	mtx_unlock(&g_eventlock);
178 	wakeup(&g_wait_event);
179 }
180 
181 /*
182  * This function is called once on each provider which the event handler
183  * finds on its g_doorstep.
184  */
185 
186 static void
187 g_orphan_register(struct g_provider *pp)
188 {
189 	struct g_consumer *cp, *cp2;
190 	int wf;
191 
192 	g_topology_assert();
193 	G_VALID_PROVIDER(pp);
194 	g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
195 
196 	g_cancel_event(pp);
197 
198 	wf = pp->flags & G_PF_WITHER;
199 	pp->flags &= ~G_PF_WITHER;
200 
201 	/*
202 	 * Tell all consumers the bad news.
203 	 * Don't be surprised if they self-destruct.
204 	 */
205 	LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
206 		KASSERT(cp->geom->orphan != NULL,
207 		    ("geom %s has no orphan, class %s",
208 		    cp->geom->name, cp->geom->class->name));
209 		cp->flags |= G_CF_ORPHAN;
210 		cp->geom->orphan(cp);
211 	}
212 	if (LIST_EMPTY(&pp->consumers) && wf)
213 		g_destroy_provider(pp);
214 	else
215 		pp->flags |= wf;
216 #ifdef notyet
217 	cp = LIST_FIRST(&pp->consumers);
218 	if (cp != NULL)
219 		return;
220 	if (pp->geom->flags & G_GEOM_WITHER)
221 		g_destroy_provider(pp);
222 #endif
223 }
224 
225 static int
226 one_event(void)
227 {
228 	struct g_event *ep;
229 	struct g_provider *pp;
230 
231 	g_topology_assert();
232 	mtx_lock(&g_eventlock);
233 	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
234 		if (pp->nstart == pp->nend)
235 			break;
236 	}
237 	if (pp != NULL) {
238 		G_VALID_PROVIDER(pp);
239 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
240 		mtx_unlock(&g_eventlock);
241 		g_orphan_register(pp);
242 		return (1);
243 	}
244 
245 	ep = TAILQ_FIRST(&g_events);
246 	if (ep == NULL) {
247 		wakeup(&g_pending_events);
248 		return (0);
249 	}
250 	if (ep->flag & EV_INPROGRESS) {
251 		mtx_unlock(&g_eventlock);
252 		return (1);
253 	}
254 	ep->flag |= EV_INPROGRESS;
255 	mtx_unlock(&g_eventlock);
256 	g_topology_assert();
257 	ep->func(ep->arg, 0);
258 	g_topology_assert();
259 	mtx_lock(&g_eventlock);
260 	TAILQ_REMOVE(&g_events, ep, events);
261 	ep->flag &= ~EV_INPROGRESS;
262 	if (ep->flag & EV_WAKEUP) {
263 		ep->flag |= EV_DONE;
264 		mtx_unlock(&g_eventlock);
265 		wakeup(ep);
266 	} else {
267 		mtx_unlock(&g_eventlock);
268 		g_free(ep);
269 	}
270 	return (1);
271 }
272 
273 void
274 g_run_events()
275 {
276 
277 	for (;;) {
278 		g_topology_lock();
279 		while (one_event())
280 			;
281 		mtx_assert(&g_eventlock, MA_OWNED);
282 		if (g_wither_work) {
283 			g_wither_work = 0;
284 			mtx_unlock(&g_eventlock);
285 			g_wither_washer();
286 			g_topology_unlock();
287 		} else {
288 			g_topology_unlock();
289 			msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP,
290 			    "-", TAILQ_EMPTY(&g_doorstep) ? 0 : hz / 10);
291 		}
292 	}
293 	/* NOTREACHED */
294 }
295 
296 void
297 g_cancel_event(void *ref)
298 {
299 	struct g_event *ep, *epn;
300 	struct g_provider *pp;
301 	u_int n;
302 
303 	mtx_lock(&g_eventlock);
304 	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
305 		if (pp != ref)
306 			continue;
307 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
308 		break;
309 	}
310 	TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) {
311 		if (ep->flag & EV_INPROGRESS)
312 			continue;
313 		for (n = 0; n < G_N_EVENTREFS; n++) {
314 			if (ep->ref[n] == NULL)
315 				break;
316 			if (ep->ref[n] != ref)
317 				continue;
318 			TAILQ_REMOVE(&g_events, ep, events);
319 			ep->func(ep->arg, EV_CANCEL);
320 			mtx_assert(&g_eventlock, MA_OWNED);
321 			if (ep->flag & EV_WAKEUP) {
322 				ep->flag |= (EV_DONE|EV_CANCELED);
323 				wakeup(ep);
324 			} else {
325 				g_free(ep);
326 			}
327 			break;
328 		}
329 	}
330 	if (TAILQ_EMPTY(&g_events))
331 		wakeup(&g_pending_events);
332 	mtx_unlock(&g_eventlock);
333 }
334 
335 static int
336 g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap)
337 {
338 	struct g_event *ep;
339 	void *p;
340 	u_int n;
341 
342 	g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)",
343 	    func, arg, flag, wuflag);
344 	KASSERT(wuflag == 0 || wuflag == EV_WAKEUP,
345 	    ("Wrong wuflag in g_post_event_x(0x%x)", wuflag));
346 	ep = g_malloc(sizeof *ep, flag | M_ZERO);
347 	if (ep == NULL)
348 		return (ENOMEM);
349 	ep->flag = wuflag;
350 	for (n = 0; n < G_N_EVENTREFS; n++) {
351 		p = va_arg(ap, void *);
352 		if (p == NULL)
353 			break;
354 		g_trace(G_T_TOPOLOGY, "  ref %p", p);
355 		ep->ref[n] = p;
356 	}
357 	KASSERT(p == NULL, ("Too many references to event"));
358 	ep->func = func;
359 	ep->arg = arg;
360 	mtx_lock(&g_eventlock);
361 	TAILQ_INSERT_TAIL(&g_events, ep, events);
362 	mtx_unlock(&g_eventlock);
363 	wakeup(&g_wait_event);
364 	if (epp != NULL)
365 		*epp = ep;
366 	curthread->td_pflags |= TDP_GEOM;
367 	return (0);
368 }
369 
370 int
371 g_post_event(g_event_t *func, void *arg, int flag, ...)
372 {
373 	va_list ap;
374 	int i;
375 
376 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
377 	    ("Wrong flag to g_post_event"));
378 	va_start(ap, flag);
379 	i = g_post_event_x(func, arg, flag, 0, NULL, ap);
380 	va_end(ap);
381 	return (i);
382 }
383 
384 void
385 g_do_wither()
386 {
387 
388 	mtx_lock(&g_eventlock);
389 	g_wither_work = 1;
390 	mtx_unlock(&g_eventlock);
391 	wakeup(&g_wait_event);
392 }
393 
394 /*
395  * XXX: It might actually be useful to call this function with topology held.
396  * XXX: This would ensure that the event gets created before anything else
397  * XXX: changes.  At present all users have a handle on things in some other
398  * XXX: way, so this remains an XXX for now.
399  */
400 
401 int
402 g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
403 {
404 	va_list ap;
405 	struct g_event *ep;
406 	int error;
407 
408 	g_topology_assert_not();
409 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
410 	    ("Wrong flag to g_post_event"));
411 	va_start(ap, flag);
412 	error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap);
413 	va_end(ap);
414 	if (error)
415 		return (error);
416 
417 	mtx_lock(&g_eventlock);
418 	while (!(ep->flag & EV_DONE))
419 		msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", hz);
420 	if (ep->flag & EV_CANCELED)
421 		error = EAGAIN;
422 	mtx_unlock(&g_eventlock);
423 
424 	g_free(ep);
425 	return (error);
426 }
427 
428 void
429 g_event_init()
430 {
431 
432 	mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
433 }
434