xref: /freebsd/sys/geom/geom_event.c (revision 7562eaabc01a48e6b11d5b558c41e3b92dae5c2d)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*
37  * XXX: How do we in general know that objects referenced in events
38  * have not been destroyed before we get around to handle the event ?
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include <sys/param.h>
45 #include <sys/malloc.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/errno.h>
52 #include <sys/time.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55 
56 #include <machine/stdarg.h>
57 
58 TAILQ_HEAD(event_tailq_head, g_event);
59 
60 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
61 static u_int g_pending_events;
62 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
63 static struct mtx g_eventlock;
64 static int g_wither_work;
65 
66 #define G_N_EVENTREFS		20
67 
68 struct g_event {
69 	TAILQ_ENTRY(g_event)	events;
70 	g_event_t		*func;
71 	void			*arg;
72 	int			flag;
73 	void			*ref[G_N_EVENTREFS];
74 };
75 
76 #define EV_DONE		0x80000
77 #define EV_WAKEUP	0x40000
78 #define EV_CANCELED	0x20000
79 
80 void
81 g_waitidle(void)
82 {
83 
84 	g_topology_assert_not();
85 	mtx_assert(&Giant, MA_NOTOWNED);
86 
87 	while (g_pending_events)
88 		tsleep(&g_pending_events, PPAUSE, "g_waitidle", hz/5);
89 	curthread->td_pflags &= ~TDP_GEOM;
90 }
91 
92 void
93 g_orphan_provider(struct g_provider *pp, int error)
94 {
95 
96 	/* G_VALID_PROVIDER(pp)  We likely lack topology lock */
97 	g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
98 	    pp, pp->name, error);
99 	KASSERT(error != 0,
100 	    ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
101 	     pp, pp->name));
102 
103 	pp->error = error;
104 	mtx_lock(&g_eventlock);
105 	KASSERT(!(pp->flags & G_PF_ORPHAN),
106 	    ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name));
107 	pp->flags |= G_PF_ORPHAN;
108 	TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
109 	mtx_unlock(&g_eventlock);
110 	wakeup(&g_wait_event);
111 }
112 
113 /*
114  * This function is called once on each provider which the event handler
115  * finds on its g_doorstep.
116  */
117 
118 static void
119 g_orphan_register(struct g_provider *pp)
120 {
121 	struct g_consumer *cp, *cp2;
122 	int wf;
123 
124 	g_topology_assert();
125 	G_VALID_PROVIDER(pp);
126 	g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
127 
128 	wf = pp->flags & G_PF_WITHER;
129 	pp->flags &= ~G_PF_WITHER;
130 
131 	/*
132 	 * Tell all consumers the bad news.
133 	 * Don't be surprised if they self-destruct.
134 	 */
135 	cp = LIST_FIRST(&pp->consumers);
136 	while (cp != NULL) {
137 		cp2 = LIST_NEXT(cp, consumers);
138 		KASSERT(cp->geom->orphan != NULL,
139 		    ("geom %s has no orphan, class %s",
140 		    cp->geom->name, cp->geom->class->name));
141 		cp->geom->orphan(cp);
142 		cp = cp2;
143 	}
144 	if (LIST_EMPTY(&pp->consumers) && wf)
145 		g_destroy_provider(pp);
146 	else
147 		pp->flags |= wf;
148 #ifdef notyet
149 	cp = LIST_FIRST(&pp->consumers);
150 	if (cp != NULL)
151 		return;
152 	if (pp->geom->flags & G_GEOM_WITHER)
153 		g_destroy_provider(pp);
154 #endif
155 }
156 
157 static int
158 one_event(void)
159 {
160 	struct g_event *ep;
161 	struct g_provider *pp;
162 
163 	g_topology_lock();
164 	for (;;) {
165 		mtx_lock(&g_eventlock);
166 		pp = TAILQ_FIRST(&g_doorstep);
167 		if (pp != NULL) {
168 			G_VALID_PROVIDER(pp);
169 			TAILQ_REMOVE(&g_doorstep, pp, orphan);
170 		}
171 		mtx_unlock(&g_eventlock);
172 		if (pp == NULL)
173 			break;
174 		g_orphan_register(pp);
175 	}
176 	mtx_lock(&g_eventlock);
177 	ep = TAILQ_FIRST(&g_events);
178 	if (ep == NULL) {
179 		mtx_unlock(&g_eventlock);
180 		g_topology_unlock();
181 		return (0);
182 	}
183 	TAILQ_REMOVE(&g_events, ep, events);
184 	mtx_unlock(&g_eventlock);
185 	g_topology_assert();
186 	ep->func(ep->arg, 0);
187 	g_topology_assert();
188 	if (ep->flag & EV_WAKEUP) {
189 		ep->flag |= EV_DONE;
190 		wakeup(ep);
191 	} else {
192 		g_free(ep);
193 	}
194 	g_pending_events--;
195 	if (g_pending_events == 0)
196 		wakeup(&g_pending_events);
197 	g_topology_unlock();
198 	return (1);
199 }
200 
201 void
202 g_run_events()
203 {
204 	int i;
205 
206 	while (one_event())
207 		;
208 	g_topology_lock();
209 	i = g_wither_work;
210 	while (i) {
211 		i = g_wither_washer();
212 		g_wither_work = i & 1;
213 		i &= 2;
214 	}
215 	g_topology_unlock();
216 }
217 
218 void
219 g_cancel_event(void *ref)
220 {
221 	struct g_event *ep, *epn;
222 	struct g_provider *pp;
223 	u_int n;
224 
225 	mtx_lock(&g_eventlock);
226 	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
227 		if (pp != ref)
228 			continue;
229 		TAILQ_REMOVE(&g_doorstep, pp, orphan);
230 		break;
231 	}
232 	for (ep = TAILQ_FIRST(&g_events); ep != NULL; ep = epn) {
233 		epn = TAILQ_NEXT(ep, events);
234 		for (n = 0; n < G_N_EVENTREFS; n++) {
235 			if (ep->ref[n] == NULL)
236 				break;
237 			if (ep->ref[n] == ref) {
238 				TAILQ_REMOVE(&g_events, ep, events);
239 				ep->func(ep->arg, EV_CANCEL);
240 				if (ep->flag & EV_WAKEUP) {
241 					ep->flag |= EV_DONE;
242 					ep->flag |= EV_CANCELED;
243 					wakeup(ep);
244 				} else {
245 					g_free(ep);
246 				}
247 				if (--g_pending_events == 0)
248 					wakeup(&g_pending_events);
249 				break;
250 			}
251 		}
252 	}
253 	mtx_unlock(&g_eventlock);
254 }
255 
256 static int
257 g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap)
258 {
259 	struct g_event *ep;
260 	void *p;
261 	u_int n;
262 
263 	g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)",
264 	    func, arg, flag, wakeup);
265 	KASSERT(wuflag == 0 || wuflag == EV_WAKEUP,
266 	    ("Wrong wuflag in g_post_event_x(0x%x)", wuflag));
267 	ep = g_malloc(sizeof *ep, flag | M_ZERO);
268 	if (ep == NULL)
269 		return (ENOMEM);
270 	ep->flag = wuflag;
271 	for (n = 0; n < G_N_EVENTREFS; n++) {
272 		p = va_arg(ap, void *);
273 		if (p == NULL)
274 			break;
275 		g_trace(G_T_TOPOLOGY, "  ref %p", p);
276 		ep->ref[n] = p;
277 	}
278 	KASSERT(p == NULL, ("Too many references to event"));
279 	ep->func = func;
280 	ep->arg = arg;
281 	mtx_lock(&g_eventlock);
282 	g_pending_events++;
283 	TAILQ_INSERT_TAIL(&g_events, ep, events);
284 	mtx_unlock(&g_eventlock);
285 	wakeup(&g_wait_event);
286 	if (epp != NULL)
287 		*epp = ep;
288 	curthread->td_pflags |= TDP_GEOM;
289 	return (0);
290 }
291 
292 int
293 g_post_event(g_event_t *func, void *arg, int flag, ...)
294 {
295 	va_list ap;
296 	int i;
297 
298 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
299 	    ("Wrong flag to g_post_event"));
300 	va_start(ap, flag);
301 	i = g_post_event_x(func, arg, flag, 0, NULL, ap);
302 	va_end(ap);
303 	return (i);
304 }
305 
306 void
307 g_do_wither() {
308 
309 	g_wither_work = 1;
310 	wakeup(&g_wait_event);
311 }
312 
313 /*
314  * XXX: It might actually be useful to call this function with topology held.
315  * XXX: This would ensure that the event gets created before anything else
316  * XXX: changes.  At present all users have a handle on things in some other
317  * XXX: way, so this remains an XXX for now.
318  */
319 
320 int
321 g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
322 {
323 	va_list ap;
324 	struct g_event *ep;
325 	int error;
326 
327 	g_topology_assert_not();
328 	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
329 	    ("Wrong flag to g_post_event"));
330 	va_start(ap, flag);
331 	error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap);
332 	va_end(ap);
333 	if (error)
334 		return (error);
335 	do
336 		tsleep(ep, PRIBIO, "g_waitfor_event", hz);
337 	while (!(ep->flag & EV_DONE));
338 	if (ep->flag & EV_CANCELED)
339 		error = EAGAIN;
340 	g_free(ep);
341 	return (error);
342 }
343 
344 void
345 g_event_init()
346 {
347 
348 	mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
349 }
350