1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 /* 37 * XXX: How do we in general know that objects referenced in events 38 * have not been destroyed before we get around to handle the event ? 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include <sys/param.h> 45 #include <sys/malloc.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <machine/stdarg.h> 51 #include <sys/errno.h> 52 #include <sys/time.h> 53 #include <geom/geom.h> 54 #include <geom/geom_int.h> 55 56 TAILQ_HEAD(event_tailq_head, g_event); 57 58 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events); 59 static u_int g_pending_events; 60 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep); 61 static struct mtx g_eventlock; 62 static struct sx g_eventstall; 63 64 #define G_N_EVENTREFS 20 65 66 struct g_event { 67 TAILQ_ENTRY(g_event) events; 68 g_event_t *func; 69 void *arg; 70 int flag; 71 void *ref[G_N_EVENTREFS]; 72 }; 73 74 #define EV_DONE 0x80000 75 #define EV_WAKEUP 0x40000 76 #define EV_CANCELED 0x20000 77 78 void 79 g_waitidle(void) 80 { 81 82 while (g_pending_events) 83 tsleep(&g_pending_events, PPAUSE, "g_waitidle", hz/5); 84 } 85 86 void 87 g_stall_events(void) 88 { 89 90 sx_xlock(&g_eventstall); 91 } 92 93 void 94 g_release_events(void) 95 { 96 97 sx_xunlock(&g_eventstall); 98 } 99 100 void 101 g_orphan_provider(struct g_provider *pp, int error) 102 { 103 104 g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)", 105 pp, pp->name, error); 106 KASSERT(error != 0, 107 ("g_orphan_provider(%p(%s), 0) error must be non-zero\n", 108 pp, pp->name)); 109 110 pp->error = error; 111 mtx_lock(&g_eventlock); 112 KASSERT(!(pp->flags & G_PF_ORPHAN), 113 ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name)); 114 pp->flags |= G_PF_ORPHAN; 115 TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan); 116 mtx_unlock(&g_eventlock); 117 wakeup(&g_wait_event); 118 } 119 120 /* 121 * This function is called once on each provider which the event handler 122 * finds on its g_doorstep. 123 */ 124 125 static void 126 g_orphan_register(struct g_provider *pp) 127 { 128 struct g_consumer *cp, *cp2; 129 int wf; 130 131 g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name); 132 g_topology_assert(); 133 134 wf = pp->flags & G_PF_WITHER; 135 pp->flags &= ~G_PF_WITHER; 136 137 /* 138 * Tell all consumers the bad news. 139 * Don't be surprised if they self-destruct. 140 */ 141 cp = LIST_FIRST(&pp->consumers); 142 while (cp != NULL) { 143 cp2 = LIST_NEXT(cp, consumers); 144 KASSERT(cp->geom->orphan != NULL, 145 ("geom %s has no orphan, class %s", 146 cp->geom->name, cp->geom->class->name)); 147 cp->geom->orphan(cp); 148 cp = cp2; 149 } 150 if (LIST_EMPTY(&pp->consumers) && wf) 151 g_destroy_provider(pp); 152 else 153 pp->flags |= wf; 154 #ifdef notyet 155 cp = LIST_FIRST(&pp->consumers); 156 if (cp != NULL) 157 return; 158 if (pp->geom->flags & G_GEOM_WITHER) 159 g_destroy_provider(pp); 160 #endif 161 } 162 163 static int 164 one_event(void) 165 { 166 struct g_event *ep; 167 struct g_provider *pp; 168 169 sx_xlock(&g_eventstall); 170 g_topology_lock(); 171 for (;;) { 172 mtx_lock(&g_eventlock); 173 pp = TAILQ_FIRST(&g_doorstep); 174 if (pp != NULL) 175 TAILQ_REMOVE(&g_doorstep, pp, orphan); 176 mtx_unlock(&g_eventlock); 177 if (pp == NULL) 178 break; 179 g_orphan_register(pp); 180 } 181 mtx_lock(&g_eventlock); 182 ep = TAILQ_FIRST(&g_events); 183 if (ep == NULL) { 184 mtx_unlock(&g_eventlock); 185 g_topology_unlock(); 186 sx_xunlock(&g_eventstall); 187 return (0); 188 } 189 TAILQ_REMOVE(&g_events, ep, events); 190 mtx_unlock(&g_eventlock); 191 g_topology_assert(); 192 ep->func(ep->arg, 0); 193 g_topology_assert(); 194 if (ep->flag & EV_WAKEUP) { 195 ep->flag |= EV_DONE; 196 wakeup(ep); 197 } else { 198 g_free(ep); 199 } 200 g_pending_events--; 201 if (g_pending_events == 0) 202 wakeup(&g_pending_events); 203 g_topology_unlock(); 204 sx_xunlock(&g_eventstall); 205 return (1); 206 } 207 208 void 209 g_run_events() 210 { 211 212 while (one_event()) 213 ; 214 } 215 216 void 217 g_cancel_event(void *ref) 218 { 219 struct g_event *ep, *epn; 220 struct g_provider *pp; 221 u_int n; 222 223 mtx_lock(&g_eventlock); 224 TAILQ_FOREACH(pp, &g_doorstep, orphan) { 225 if (pp != ref) 226 continue; 227 TAILQ_REMOVE(&g_doorstep, pp, orphan); 228 break; 229 } 230 for (ep = TAILQ_FIRST(&g_events); ep != NULL; ep = epn) { 231 epn = TAILQ_NEXT(ep, events); 232 for (n = 0; n < G_N_EVENTREFS; n++) { 233 if (ep->ref[n] == NULL) 234 break; 235 if (ep->ref[n] == ref) { 236 TAILQ_REMOVE(&g_events, ep, events); 237 ep->func(ep->arg, EV_CANCEL); 238 if (ep->flag & EV_WAKEUP) { 239 ep->flag |= EV_DONE; 240 ep->flag |= EV_CANCELED; 241 wakeup(ep); 242 } else { 243 g_free(ep); 244 } 245 break; 246 } 247 } 248 } 249 mtx_unlock(&g_eventlock); 250 } 251 252 static int 253 g_post_event_x(g_event_t *func, void *arg, int flag, struct g_event **epp, va_list ap) 254 { 255 struct g_event *ep; 256 void *p; 257 u_int n; 258 259 g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d)", func, arg, flag); 260 ep = g_malloc(sizeof *ep, flag | M_ZERO); 261 if (ep == NULL) 262 return (ENOMEM); 263 ep->flag = flag; 264 for (n = 0; n < G_N_EVENTREFS; n++) { 265 p = va_arg(ap, void *); 266 if (p == NULL) 267 break; 268 g_trace(G_T_TOPOLOGY, " ref %p", p); 269 ep->ref[n] = p; 270 } 271 KASSERT(p == NULL, ("Too many references to event")); 272 ep->func = func; 273 ep->arg = arg; 274 mtx_lock(&g_eventlock); 275 g_pending_events++; 276 TAILQ_INSERT_TAIL(&g_events, ep, events); 277 mtx_unlock(&g_eventlock); 278 wakeup(&g_wait_event); 279 if (epp != NULL) 280 *epp = ep; 281 return (0); 282 } 283 284 int 285 g_post_event(g_event_t *func, void *arg, int flag, ...) 286 { 287 va_list ap; 288 int i; 289 290 KASSERT(flag == M_WAITOK || flag == M_NOWAIT, 291 ("Wrong flag to g_post_event")); 292 va_start(ap, flag); 293 i = g_post_event_x(func, arg, flag, NULL, ap); 294 va_end(ap); 295 return (i); 296 } 297 298 299 /* 300 * XXX: It might actually be useful to call this function with topology held. 301 * XXX: This would ensure that the event gets created before anything else 302 * XXX: changes. At present all users have a handle on things in some other 303 * XXX: way, so this remains an XXX for now. 304 */ 305 306 int 307 g_waitfor_event(g_event_t *func, void *arg, int flag, ...) 308 { 309 va_list ap; 310 struct g_event *ep; 311 int error; 312 313 /* g_topology_assert_not(); */ 314 KASSERT(flag == M_WAITOK || flag == M_NOWAIT, 315 ("Wrong flag to g_post_event")); 316 va_start(ap, flag); 317 error = g_post_event_x(func, arg, flag | EV_WAKEUP, &ep, ap); 318 va_end(ap); 319 if (error) 320 return (error); 321 do 322 tsleep(ep, PRIBIO, "g_waitfor_event", hz); 323 while (!(ep->flag & EV_DONE)); 324 if (ep->flag & EV_CANCELED) 325 error = EAGAIN; 326 g_free(ep); 327 return (error); 328 } 329 330 void 331 g_event_init() 332 { 333 334 mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF); 335 sx_init(&g_eventstall, "GEOM event stalling"); 336 } 337