1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9 * and NAI Labs, the Security Research Division of Network Associates, Inc.
10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote
22 * products derived from this software without specific prior written
23 * permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * XXX: How do we in general know that objects referenced in events
40 * have not been destroyed before we get around to handle the event ?
41 */
42
43 #include <sys/param.h>
44 #include <sys/malloc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/errno.h>
51 #include <sys/stdarg.h>
52 #include <sys/time.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55
56 TAILQ_HEAD(event_tailq_head, g_event);
57
58 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
59 static u_int g_pending_events;
60 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
61 static struct mtx g_eventlock;
62 static int g_wither_work;
63
64 #define G_N_EVENTREFS 20
65
66 struct g_event {
67 TAILQ_ENTRY(g_event) events;
68 g_event_t *func;
69 void *arg;
70 int flag;
71 void *ref[G_N_EVENTREFS];
72 };
73
74 #define EV_DONE 0x80000
75 #define EV_WAKEUP 0x40000
76 #define EV_CANCELED 0x20000
77 #define EV_INPROGRESS 0x10000
78
79 void
g_waitidle(struct thread * td)80 g_waitidle(struct thread *td)
81 {
82
83 g_topology_assert_not();
84
85 mtx_lock(&g_eventlock);
86 TSWAIT("GEOM events");
87 while (!TAILQ_EMPTY(&g_events))
88 msleep(&g_pending_events, &g_eventlock, PPAUSE,
89 "g_waitidle", 0);
90 TSUNWAIT("GEOM events");
91 mtx_unlock(&g_eventlock);
92 td->td_pflags &= ~TDP_GEOM;
93 }
94
95 static void
ast_geom(struct thread * td,int tda __unused)96 ast_geom(struct thread *td, int tda __unused)
97 {
98 /*
99 * If this thread tickled GEOM, we need to wait for the giggling to
100 * stop before we return to userland.
101 */
102 g_waitidle(td);
103 }
104
105 static void
geom_event_init(void * arg __unused)106 geom_event_init(void *arg __unused)
107 {
108 ast_register(TDA_GEOM, ASTR_ASTF_REQUIRED | ASTR_TDP | ASTR_KCLEAR,
109 TDP_GEOM, ast_geom);
110 }
111 SYSINIT(geom_event, SI_SUB_INTRINSIC, SI_ORDER_ANY, geom_event_init, NULL);
112
113 struct g_attrchanged_args {
114 struct g_provider *pp;
115 const char *attr;
116 };
117
118 static void
g_attr_changed_event(void * arg,int flag)119 g_attr_changed_event(void *arg, int flag)
120 {
121 struct g_attrchanged_args *args;
122 struct g_provider *pp;
123 struct g_consumer *cp;
124 struct g_consumer *next_cp;
125
126 args = arg;
127 pp = args->pp;
128
129 g_topology_assert();
130 if (flag != EV_CANCEL && g_shutdown == 0) {
131 /*
132 * Tell all consumers of the change.
133 */
134 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
135 if (cp->geom->attrchanged != NULL)
136 cp->geom->attrchanged(cp, args->attr);
137 }
138 }
139 g_free(args);
140 }
141
142 int
g_attr_changed(struct g_provider * pp,const char * attr,int flag)143 g_attr_changed(struct g_provider *pp, const char *attr, int flag)
144 {
145 struct g_attrchanged_args *args;
146 int error;
147
148 args = g_malloc(sizeof *args, flag);
149 if (args == NULL)
150 return (ENOMEM);
151 args->pp = pp;
152 args->attr = attr;
153 error = g_post_event(g_attr_changed_event, args, flag, pp, NULL);
154 if (error != 0)
155 g_free(args);
156 return (error);
157 }
158
159 void
g_orphan_provider(struct g_provider * pp,int error)160 g_orphan_provider(struct g_provider *pp, int error)
161 {
162
163 /* G_VALID_PROVIDER(pp) We likely lack topology lock */
164 g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
165 pp, pp->name, error);
166 KASSERT(error != 0,
167 ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
168 pp, pp->name));
169
170 pp->error = error;
171 mtx_lock(&g_eventlock);
172 KASSERT(!(pp->flags & G_PF_ORPHAN),
173 ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name));
174 pp->flags |= G_PF_ORPHAN;
175 TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
176 mtx_unlock(&g_eventlock);
177 wakeup(&g_wait_event);
178 }
179
180 /*
181 * This function is called once on each provider which the event handler
182 * finds on its g_doorstep.
183 */
184
185 static void
g_orphan_register(struct g_provider * pp)186 g_orphan_register(struct g_provider *pp)
187 {
188 struct g_consumer *cp, *cp2;
189 int wf;
190
191 g_topology_assert();
192 G_VALID_PROVIDER(pp);
193 g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
194
195 g_cancel_event(pp);
196
197 wf = pp->flags & G_PF_WITHER;
198 pp->flags &= ~G_PF_WITHER;
199
200 /*
201 * Tell all consumers the bad news.
202 * Don't be surprised if they self-destruct.
203 */
204 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
205 KASSERT(cp->geom->orphan != NULL,
206 ("geom %s has no orphan, class %s",
207 cp->geom->name, cp->geom->class->name));
208 /*
209 * XXX: g_dev_orphan method does deferred destroying
210 * and it is possible, that other event could already
211 * call the orphan method. Check consumer's flags to
212 * do not schedule it twice.
213 */
214 if (cp->flags & G_CF_ORPHAN)
215 continue;
216 cp->flags |= G_CF_ORPHAN;
217 cp->geom->orphan(cp);
218 }
219 if (LIST_EMPTY(&pp->consumers) && wf)
220 g_destroy_provider(pp);
221 else
222 pp->flags |= wf;
223 #ifdef notyet
224 cp = LIST_FIRST(&pp->consumers);
225 if (cp != NULL)
226 return;
227 if (pp->geom->flags & G_GEOM_WITHER)
228 g_destroy_provider(pp);
229 #endif
230 }
231
232 static int
one_event(void)233 one_event(void)
234 {
235 struct g_event *ep;
236 struct g_provider *pp;
237
238 g_topology_assert();
239 mtx_lock(&g_eventlock);
240 pp = TAILQ_FIRST(&g_doorstep);
241 if (pp != NULL) {
242 G_VALID_PROVIDER(pp);
243 TAILQ_REMOVE(&g_doorstep, pp, orphan);
244 mtx_unlock(&g_eventlock);
245 g_orphan_register(pp);
246 return (1);
247 }
248
249 ep = TAILQ_FIRST(&g_events);
250 if (ep == NULL) {
251 wakeup(&g_pending_events);
252 return (0);
253 }
254 ep->flag |= EV_INPROGRESS;
255 mtx_unlock(&g_eventlock);
256 g_topology_assert();
257 ep->func(ep->arg, 0);
258 g_topology_assert();
259 mtx_lock(&g_eventlock);
260 TSRELEASE("GEOM events");
261 TAILQ_REMOVE(&g_events, ep, events);
262 ep->flag &= ~EV_INPROGRESS;
263 if (ep->flag & EV_WAKEUP) {
264 ep->flag |= EV_DONE;
265 wakeup(ep);
266 mtx_unlock(&g_eventlock);
267 } else {
268 mtx_unlock(&g_eventlock);
269 g_free(ep);
270 }
271 return (1);
272 }
273
274 void
g_run_events(void)275 g_run_events(void)
276 {
277
278 for (;;) {
279 g_topology_lock();
280 while (one_event())
281 ;
282 mtx_assert(&g_eventlock, MA_OWNED);
283 if (g_wither_work) {
284 g_wither_work = 0;
285 mtx_unlock(&g_eventlock);
286 g_wither_washer();
287 g_topology_unlock();
288 } else {
289 g_topology_unlock();
290 msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP,
291 "-", 0);
292 }
293 }
294 /* NOTREACHED */
295 }
296
297 void
g_cancel_event(void * ref)298 g_cancel_event(void *ref)
299 {
300 struct g_event *ep, *epn;
301 struct g_provider *pp;
302 u_int n;
303
304 mtx_lock(&g_eventlock);
305 TAILQ_FOREACH(pp, &g_doorstep, orphan) {
306 if (pp != ref)
307 continue;
308 TAILQ_REMOVE(&g_doorstep, pp, orphan);
309 break;
310 }
311 TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) {
312 if (ep->flag & EV_INPROGRESS)
313 continue;
314 for (n = 0; n < G_N_EVENTREFS; n++) {
315 if (ep->ref[n] == NULL)
316 break;
317 if (ep->ref[n] != ref)
318 continue;
319 TSRELEASE("GEOM events");
320 TAILQ_REMOVE(&g_events, ep, events);
321 ep->func(ep->arg, EV_CANCEL);
322 mtx_assert(&g_eventlock, MA_OWNED);
323 if (ep->flag & EV_WAKEUP) {
324 ep->flag |= (EV_DONE|EV_CANCELED);
325 wakeup(ep);
326 } else {
327 g_free(ep);
328 }
329 break;
330 }
331 }
332 if (TAILQ_EMPTY(&g_events))
333 wakeup(&g_pending_events);
334 mtx_unlock(&g_eventlock);
335 }
336
337 struct g_event *
g_alloc_event(int flag)338 g_alloc_event(int flag)
339 {
340 KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
341 ("Wrong flag to g_alloc_event"));
342
343 return (g_malloc(sizeof(struct g_event), flag | M_ZERO));
344 }
345
346 static void
g_post_event_ep_va(g_event_t * func,void * arg,int wuflag,struct g_event * ep,va_list ap)347 g_post_event_ep_va(g_event_t *func, void *arg, int wuflag,
348 struct g_event *ep, va_list ap)
349 {
350 void *p;
351 u_int n;
352
353 ep->flag = wuflag;
354 for (n = 0; n < G_N_EVENTREFS; n++) {
355 p = va_arg(ap, void *);
356 if (p == NULL)
357 break;
358 g_trace(G_T_TOPOLOGY, " ref %p", p);
359 ep->ref[n] = p;
360 }
361 KASSERT(p == NULL, ("Too many references to event"));
362 ep->func = func;
363 ep->arg = arg;
364 mtx_lock(&g_eventlock);
365 TSHOLD("GEOM events");
366 TAILQ_INSERT_TAIL(&g_events, ep, events);
367 mtx_unlock(&g_eventlock);
368 wakeup(&g_wait_event);
369 curthread->td_pflags |= TDP_GEOM;
370 ast_sched(curthread, TDA_GEOM);
371 }
372
373 void
g_post_event_ep(g_event_t * func,void * arg,struct g_event * ep,...)374 g_post_event_ep(g_event_t *func, void *arg, struct g_event *ep, ...)
375 {
376 va_list ap;
377
378 va_start(ap, ep);
379 g_post_event_ep_va(func, arg, 0, ep, ap);
380 va_end(ap);
381 }
382
383
384 static int
g_post_event_x(g_event_t * func,void * arg,int flag,int wuflag,struct g_event ** epp,va_list ap)385 g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap)
386 {
387 struct g_event *ep;
388
389 g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)",
390 func, arg, flag, wuflag);
391 KASSERT(wuflag == 0 || wuflag == EV_WAKEUP,
392 ("Wrong wuflag in g_post_event_x(0x%x)", wuflag));
393 ep = g_alloc_event(flag);
394 if (ep == NULL)
395 return (ENOMEM);
396 if (epp != NULL)
397 *epp = ep;
398 g_post_event_ep_va(func, arg, wuflag, ep, ap);
399 return (0);
400 }
401
402 int
g_post_event(g_event_t * func,void * arg,int flag,...)403 g_post_event(g_event_t *func, void *arg, int flag, ...)
404 {
405 va_list ap;
406 int i;
407
408 KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
409 ("Wrong flag to g_post_event"));
410 va_start(ap, flag);
411 i = g_post_event_x(func, arg, flag, 0, NULL, ap);
412 va_end(ap);
413 return (i);
414 }
415
416 void
g_do_wither(void)417 g_do_wither(void)
418 {
419
420 mtx_lock(&g_eventlock);
421 g_wither_work = 1;
422 mtx_unlock(&g_eventlock);
423 wakeup(&g_wait_event);
424 }
425
426 /*
427 * XXX: It might actually be useful to call this function with topology held.
428 * XXX: This would ensure that the event gets created before anything else
429 * XXX: changes. At present all users have a handle on things in some other
430 * XXX: way, so this remains an XXX for now.
431 */
432
433 int
g_waitfor_event(g_event_t * func,void * arg,int flag,...)434 g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
435 {
436 va_list ap;
437 struct g_event *ep;
438 int error;
439
440 g_topology_assert_not();
441 KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
442 ("Wrong flag to g_post_event"));
443 va_start(ap, flag);
444 error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap);
445 va_end(ap);
446 if (error)
447 return (error);
448
449 mtx_lock(&g_eventlock);
450 while (!(ep->flag & EV_DONE))
451 msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", 0);
452 if (ep->flag & EV_CANCELED)
453 error = EAGAIN;
454 mtx_unlock(&g_eventlock);
455
456 g_free(ep);
457 return (error);
458 }
459
460 void
g_event_init(void)461 g_event_init(void)
462 {
463
464 mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
465 }
466