1 /* $FreeBSD$ */ 2 /* $NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $ */ 3 4 /*- 5 * Copyright (c) 1996 Matthew R. Green 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/errno.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/rmlock.h> 38 #include <sys/socket.h> 39 #include <sys/socketvar.h> 40 #include <sys/systm.h> 41 #include <sys/condvar.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/queue.h> 46 47 #include <net/if.h> 48 #include <net/pfil.h> 49 50 static struct mtx pfil_global_lock; 51 52 MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock", 53 MTX_DEF); 54 55 static int pfil_list_add(pfil_list_t *, struct packet_filter_hook *, int); 56 57 static int pfil_list_remove(pfil_list_t *, 58 int (*)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *), 59 void *); 60 61 LIST_HEAD(pfilheadhead, pfil_head); 62 VNET_DEFINE(struct pfilheadhead, pfil_head_list); 63 #define V_pfil_head_list VNET(pfil_head_list) 64 VNET_DEFINE(struct rmlock, pfil_lock); 65 #define V_pfil_lock VNET(pfil_lock) 66 67 /* 68 * pfil_run_hooks() runs the specified packet filter hooks. 69 */ 70 int 71 pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp, 72 int dir, struct inpcb *inp) 73 { 74 struct rm_priotracker rmpt; 75 struct packet_filter_hook *pfh; 76 struct mbuf *m = *mp; 77 int rv = 0; 78 79 PFIL_RLOCK(ph, &rmpt); 80 KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0")); 81 for (pfh = pfil_hook_get(dir, ph); pfh != NULL; 82 pfh = TAILQ_NEXT(pfh, pfil_link)) { 83 if (pfh->pfil_func != NULL) { 84 rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir, 85 inp); 86 if (rv != 0 || m == NULL) 87 break; 88 } 89 } 90 PFIL_RUNLOCK(ph, &rmpt); 91 *mp = m; 92 return (rv); 93 } 94 95 /* 96 * pfil_try_rlock() acquires rm reader lock for specified head 97 * if this is immediately possible. 98 */ 99 int 100 pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 101 { 102 103 return (PFIL_TRY_RLOCK(ph, tracker)); 104 } 105 106 /* 107 * pfil_rlock() acquires rm reader lock for specified head. 108 */ 109 void 110 pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 111 { 112 113 PFIL_RLOCK(ph, tracker); 114 } 115 116 /* 117 * pfil_runlock() releases reader lock for specified head. 118 */ 119 void 120 pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker) 121 { 122 123 PFIL_RUNLOCK(ph, tracker); 124 } 125 126 /* 127 * pfil_wlock() acquires writer lock for specified head. 128 */ 129 void 130 pfil_wlock(struct pfil_head *ph) 131 { 132 133 PFIL_WLOCK(ph); 134 } 135 136 /* 137 * pfil_wunlock() releases writer lock for specified head. 138 */ 139 void 140 pfil_wunlock(struct pfil_head *ph) 141 { 142 143 PFIL_WUNLOCK(ph); 144 } 145 146 /* 147 * pfil_wowned() returns a non-zero value if the current thread owns 148 * an exclusive lock. 149 */ 150 int 151 pfil_wowned(struct pfil_head *ph) 152 { 153 154 return (PFIL_WOWNED(ph)); 155 } 156 /* 157 * pfil_head_register() registers a pfil_head with the packet filter hook 158 * mechanism. 159 */ 160 int 161 pfil_head_register(struct pfil_head *ph) 162 { 163 struct pfil_head *lph; 164 165 PFIL_LIST_LOCK(); 166 LIST_FOREACH(lph, &V_pfil_head_list, ph_list) { 167 if (ph->ph_type == lph->ph_type && 168 ph->ph_un.phu_val == lph->ph_un.phu_val) { 169 PFIL_LIST_UNLOCK(); 170 return (EEXIST); 171 } 172 } 173 PFIL_LOCK_INIT(ph); 174 ph->ph_nhooks = 0; 175 TAILQ_INIT(&ph->ph_in); 176 TAILQ_INIT(&ph->ph_out); 177 LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list); 178 PFIL_LIST_UNLOCK(); 179 return (0); 180 } 181 182 /* 183 * pfil_head_unregister() removes a pfil_head from the packet filter hook 184 * mechanism. The producer of the hook promises that all outstanding 185 * invocations of the hook have completed before it unregisters the hook. 186 */ 187 int 188 pfil_head_unregister(struct pfil_head *ph) 189 { 190 struct packet_filter_hook *pfh, *pfnext; 191 192 PFIL_LIST_LOCK(); 193 LIST_REMOVE(ph, ph_list); 194 PFIL_LIST_UNLOCK(); 195 TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_link, pfnext) 196 free(pfh, M_IFADDR); 197 TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_link, pfnext) 198 free(pfh, M_IFADDR); 199 PFIL_LOCK_DESTROY(ph); 200 return (0); 201 } 202 203 /* 204 * pfil_head_get() returns the pfil_head for a given key/dlt. 205 */ 206 struct pfil_head * 207 pfil_head_get(int type, u_long val) 208 { 209 struct pfil_head *ph; 210 211 PFIL_LIST_LOCK(); 212 LIST_FOREACH(ph, &V_pfil_head_list, ph_list) 213 if (ph->ph_type == type && ph->ph_un.phu_val == val) 214 break; 215 PFIL_LIST_UNLOCK(); 216 return (ph); 217 } 218 219 /* 220 * pfil_add_hook() adds a function to the packet filter hook. the 221 * flags are: 222 * PFIL_IN call me on incoming packets 223 * PFIL_OUT call me on outgoing packets 224 * PFIL_ALL call me on all of the above 225 * PFIL_WAITOK OK to call malloc with M_WAITOK. 226 */ 227 int 228 pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int, 229 struct inpcb *), void *arg, int flags, struct pfil_head *ph) 230 { 231 struct packet_filter_hook *pfh1 = NULL; 232 struct packet_filter_hook *pfh2 = NULL; 233 int err; 234 235 if (flags & PFIL_IN) { 236 pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 237 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 238 if (pfh1 == NULL) { 239 err = ENOMEM; 240 goto error; 241 } 242 } 243 if (flags & PFIL_OUT) { 244 pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 245 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 246 if (pfh2 == NULL) { 247 err = ENOMEM; 248 goto error; 249 } 250 } 251 PFIL_WLOCK(ph); 252 if (flags & PFIL_IN) { 253 pfh1->pfil_func = func; 254 pfh1->pfil_arg = arg; 255 err = pfil_list_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT); 256 if (err) 257 goto locked_error; 258 ph->ph_nhooks++; 259 } 260 if (flags & PFIL_OUT) { 261 pfh2->pfil_func = func; 262 pfh2->pfil_arg = arg; 263 err = pfil_list_add(&ph->ph_out, pfh2, flags & ~PFIL_IN); 264 if (err) { 265 if (flags & PFIL_IN) 266 pfil_list_remove(&ph->ph_in, func, arg); 267 goto locked_error; 268 } 269 ph->ph_nhooks++; 270 } 271 PFIL_WUNLOCK(ph); 272 return (0); 273 locked_error: 274 PFIL_WUNLOCK(ph); 275 error: 276 if (pfh1 != NULL) 277 free(pfh1, M_IFADDR); 278 if (pfh2 != NULL) 279 free(pfh2, M_IFADDR); 280 return (err); 281 } 282 283 /* 284 * pfil_remove_hook removes a specific function from the packet filter hook 285 * list. 286 */ 287 int 288 pfil_remove_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int, 289 struct inpcb *), void *arg, int flags, struct pfil_head *ph) 290 { 291 int err = 0; 292 293 PFIL_WLOCK(ph); 294 if (flags & PFIL_IN) { 295 err = pfil_list_remove(&ph->ph_in, func, arg); 296 if (err == 0) 297 ph->ph_nhooks--; 298 } 299 if ((err == 0) && (flags & PFIL_OUT)) { 300 err = pfil_list_remove(&ph->ph_out, func, arg); 301 if (err == 0) 302 ph->ph_nhooks--; 303 } 304 PFIL_WUNLOCK(ph); 305 return (err); 306 } 307 308 static int 309 pfil_list_add(pfil_list_t *list, struct packet_filter_hook *pfh1, int flags) 310 { 311 struct packet_filter_hook *pfh; 312 313 /* 314 * First make sure the hook is not already there. 315 */ 316 TAILQ_FOREACH(pfh, list, pfil_link) 317 if (pfh->pfil_func == pfh1->pfil_func && 318 pfh->pfil_arg == pfh1->pfil_arg) 319 return (EEXIST); 320 321 /* 322 * Insert the input list in reverse order of the output list so that 323 * the same path is followed in or out of the kernel. 324 */ 325 if (flags & PFIL_IN) 326 TAILQ_INSERT_HEAD(list, pfh1, pfil_link); 327 else 328 TAILQ_INSERT_TAIL(list, pfh1, pfil_link); 329 return (0); 330 } 331 332 /* 333 * pfil_list_remove is an internal function that takes a function off the 334 * specified list. 335 */ 336 static int 337 pfil_list_remove(pfil_list_t *list, 338 int (*func)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *), 339 void *arg) 340 { 341 struct packet_filter_hook *pfh; 342 343 TAILQ_FOREACH(pfh, list, pfil_link) 344 if (pfh->pfil_func == func && pfh->pfil_arg == arg) { 345 TAILQ_REMOVE(list, pfh, pfil_link); 346 free(pfh, M_IFADDR); 347 return (0); 348 } 349 return (ENOENT); 350 } 351 352 /* 353 * Stuff that must be initialized for every instance (including the first of 354 * course). 355 */ 356 static int 357 vnet_pfil_init(const void *unused) 358 { 359 360 LIST_INIT(&V_pfil_head_list); 361 PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared"); 362 return (0); 363 } 364 365 /* 366 * Called for the removal of each instance. 367 */ 368 static int 369 vnet_pfil_uninit(const void *unused) 370 { 371 372 /* XXX should panic if list is not empty */ 373 PFIL_LOCK_DESTROY_REAL(&V_pfil_lock); 374 return (0); 375 } 376 377 /* Define startup order. */ 378 #define PFIL_SYSINIT_ORDER SI_SUB_PROTO_BEGIN 379 #define PFIL_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */ 380 #define PFIL_VNET_ORDER (PFIL_MODEVENT_ORDER + 2) /* Later still. */ 381 382 /* 383 * Starting up. 384 * 385 * VNET_SYSINIT is called for each existing vnet and each new vnet. 386 */ 387 VNET_SYSINIT(vnet_pfil_init, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER, 388 vnet_pfil_init, NULL); 389 390 /* 391 * Closing up shop. These are done in REVERSE ORDER. Not called on reboot. 392 * 393 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 394 */ 395 VNET_SYSUNINIT(vnet_pfil_uninit, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER, 396 vnet_pfil_uninit, NULL); 397