1 /* $FreeBSD$ */ 2 /* $NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $ */ 3 4 /*- 5 * Copyright (c) 1996 Matthew R. Green 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/errno.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/rmlock.h> 38 #include <sys/socket.h> 39 #include <sys/socketvar.h> 40 #include <sys/systm.h> 41 #include <sys/condvar.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/queue.h> 46 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <net/pfil.h> 50 51 static struct mtx pfil_global_lock; 52 53 MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock", 54 MTX_DEF); 55 56 static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *); 57 static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int); 58 static int pfil_chain_remove(pfil_chain_t *, pfil_func_t, void *); 59 60 LIST_HEAD(pfilheadhead, pfil_head); 61 VNET_DEFINE(struct pfilheadhead, pfil_head_list); 62 #define V_pfil_head_list VNET(pfil_head_list) 63 VNET_DEFINE(struct rmlock, pfil_lock); 64 #define V_pfil_lock VNET(pfil_lock) 65 66 #define PFIL_LOCK_INIT_REAL(l, t) \ 67 rm_init_flags(l, "PFil " t " rmlock", RM_RECURSE) 68 #define PFIL_LOCK_DESTROY_REAL(l) \ 69 rm_destroy(l) 70 #define PFIL_LOCK_INIT(p) do { \ 71 if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK) { \ 72 PFIL_LOCK_INIT_REAL(&(p)->ph_lock, "private"); \ 73 (p)->ph_plock = &(p)->ph_lock; \ 74 } else \ 75 (p)->ph_plock = &V_pfil_lock; \ 76 } while (0) 77 #define PFIL_LOCK_DESTROY(p) do { \ 78 if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK) \ 79 PFIL_LOCK_DESTROY_REAL((p)->ph_plock); \ 80 } while (0) 81 82 #define PFIL_TRY_RLOCK(p, t) rm_try_rlock((p)->ph_plock, (t)) 83 #define PFIL_RLOCK(p, t) rm_rlock((p)->ph_plock, (t)) 84 #define PFIL_WLOCK(p) rm_wlock((p)->ph_plock) 85 #define PFIL_RUNLOCK(p, t) rm_runlock((p)->ph_plock, (t)) 86 #define PFIL_WUNLOCK(p) rm_wunlock((p)->ph_plock) 87 #define PFIL_WOWNED(p) rm_wowned((p)->ph_plock) 88 89 #define PFIL_HEADLIST_LOCK() mtx_lock(&pfil_global_lock) 90 #define PFIL_HEADLIST_UNLOCK() mtx_unlock(&pfil_global_lock) 91 92 /* 93 * pfil_run_hooks() runs the specified packet filter hook chain. 94 */ 95 int 96 pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp, 97 int dir, struct inpcb *inp) 98 { 99 struct rm_priotracker rmpt; 100 struct packet_filter_hook *pfh; 101 struct mbuf *m = *mp; 102 int rv = 0; 103 104 PFIL_RLOCK(ph, &rmpt); 105 KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0")); 106 for (pfh = pfil_chain_get(dir, ph); pfh != NULL; 107 pfh = TAILQ_NEXT(pfh, pfil_chain)) { 108 if (pfh->pfil_func != NULL) { 109 rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir, 110 inp); 111 if (rv != 0 || m == NULL) 112 break; 113 } 114 } 115 PFIL_RUNLOCK(ph, &rmpt); 116 *mp = m; 117 return (rv); 118 } 119 120 static struct packet_filter_hook * 121 pfil_chain_get(int dir, struct pfil_head *ph) 122 { 123 124 if (dir == PFIL_IN) 125 return (TAILQ_FIRST(&ph->ph_in)); 126 else if (dir == PFIL_OUT) 127 return (TAILQ_FIRST(&ph->ph_out)); 128 else 129 return (NULL); 130 } 131 132 /* 133 * pfil_try_rlock() acquires rm reader lock for specified head 134 * if this is immediately possible. 135 */ 136 int 137 pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 138 { 139 140 return (PFIL_TRY_RLOCK(ph, tracker)); 141 } 142 143 /* 144 * pfil_rlock() acquires rm reader lock for specified head. 145 */ 146 void 147 pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 148 { 149 150 PFIL_RLOCK(ph, tracker); 151 } 152 153 /* 154 * pfil_runlock() releases reader lock for specified head. 155 */ 156 void 157 pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker) 158 { 159 160 PFIL_RUNLOCK(ph, tracker); 161 } 162 163 /* 164 * pfil_wlock() acquires writer lock for specified head. 165 */ 166 void 167 pfil_wlock(struct pfil_head *ph) 168 { 169 170 PFIL_WLOCK(ph); 171 } 172 173 /* 174 * pfil_wunlock() releases writer lock for specified head. 175 */ 176 void 177 pfil_wunlock(struct pfil_head *ph) 178 { 179 180 PFIL_WUNLOCK(ph); 181 } 182 183 /* 184 * pfil_wowned() returns a non-zero value if the current thread owns 185 * an exclusive lock. 186 */ 187 int 188 pfil_wowned(struct pfil_head *ph) 189 { 190 191 return (PFIL_WOWNED(ph)); 192 } 193 194 /* 195 * pfil_head_register() registers a pfil_head with the packet filter hook 196 * mechanism. 197 */ 198 int 199 pfil_head_register(struct pfil_head *ph) 200 { 201 struct pfil_head *lph; 202 203 PFIL_HEADLIST_LOCK(); 204 LIST_FOREACH(lph, &V_pfil_head_list, ph_list) { 205 if (ph->ph_type == lph->ph_type && 206 ph->ph_un.phu_val == lph->ph_un.phu_val) { 207 PFIL_HEADLIST_UNLOCK(); 208 return (EEXIST); 209 } 210 } 211 PFIL_LOCK_INIT(ph); 212 ph->ph_nhooks = 0; 213 TAILQ_INIT(&ph->ph_in); 214 TAILQ_INIT(&ph->ph_out); 215 LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list); 216 PFIL_HEADLIST_UNLOCK(); 217 return (0); 218 } 219 220 /* 221 * pfil_head_unregister() removes a pfil_head from the packet filter hook 222 * mechanism. The producer of the hook promises that all outstanding 223 * invocations of the hook have completed before it unregisters the hook. 224 */ 225 int 226 pfil_head_unregister(struct pfil_head *ph) 227 { 228 struct packet_filter_hook *pfh, *pfnext; 229 230 PFIL_HEADLIST_LOCK(); 231 LIST_REMOVE(ph, ph_list); 232 PFIL_HEADLIST_UNLOCK(); 233 TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext) 234 free(pfh, M_IFADDR); 235 TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext) 236 free(pfh, M_IFADDR); 237 PFIL_LOCK_DESTROY(ph); 238 return (0); 239 } 240 241 /* 242 * pfil_head_get() returns the pfil_head for a given key/dlt. 243 */ 244 struct pfil_head * 245 pfil_head_get(int type, u_long val) 246 { 247 struct pfil_head *ph; 248 249 PFIL_HEADLIST_LOCK(); 250 LIST_FOREACH(ph, &V_pfil_head_list, ph_list) 251 if (ph->ph_type == type && ph->ph_un.phu_val == val) 252 break; 253 PFIL_HEADLIST_UNLOCK(); 254 return (ph); 255 } 256 257 /* 258 * pfil_add_hook() adds a function to the packet filter hook. the 259 * flags are: 260 * PFIL_IN call me on incoming packets 261 * PFIL_OUT call me on outgoing packets 262 * PFIL_ALL call me on all of the above 263 * PFIL_WAITOK OK to call malloc with M_WAITOK. 264 */ 265 int 266 pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph) 267 { 268 struct packet_filter_hook *pfh1 = NULL; 269 struct packet_filter_hook *pfh2 = NULL; 270 int err; 271 272 if (flags & PFIL_IN) { 273 pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 274 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 275 if (pfh1 == NULL) { 276 err = ENOMEM; 277 goto error; 278 } 279 } 280 if (flags & PFIL_OUT) { 281 pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 282 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 283 if (pfh2 == NULL) { 284 err = ENOMEM; 285 goto error; 286 } 287 } 288 PFIL_WLOCK(ph); 289 if (flags & PFIL_IN) { 290 pfh1->pfil_func = func; 291 pfh1->pfil_arg = arg; 292 err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT); 293 if (err) 294 goto locked_error; 295 ph->ph_nhooks++; 296 } 297 if (flags & PFIL_OUT) { 298 pfh2->pfil_func = func; 299 pfh2->pfil_arg = arg; 300 err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN); 301 if (err) { 302 if (flags & PFIL_IN) 303 pfil_chain_remove(&ph->ph_in, func, arg); 304 goto locked_error; 305 } 306 ph->ph_nhooks++; 307 } 308 PFIL_WUNLOCK(ph); 309 return (0); 310 locked_error: 311 PFIL_WUNLOCK(ph); 312 error: 313 if (pfh1 != NULL) 314 free(pfh1, M_IFADDR); 315 if (pfh2 != NULL) 316 free(pfh2, M_IFADDR); 317 return (err); 318 } 319 320 /* 321 * pfil_remove_hook removes a specific function from the packet filter hook 322 * chain. 323 */ 324 int 325 pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph) 326 { 327 int err = 0; 328 329 PFIL_WLOCK(ph); 330 if (flags & PFIL_IN) { 331 err = pfil_chain_remove(&ph->ph_in, func, arg); 332 if (err == 0) 333 ph->ph_nhooks--; 334 } 335 if ((err == 0) && (flags & PFIL_OUT)) { 336 err = pfil_chain_remove(&ph->ph_out, func, arg); 337 if (err == 0) 338 ph->ph_nhooks--; 339 } 340 PFIL_WUNLOCK(ph); 341 return (err); 342 } 343 344 /* 345 * Internal: Add a new pfil hook into a hook chain. 346 */ 347 static int 348 pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags) 349 { 350 struct packet_filter_hook *pfh; 351 352 /* 353 * First make sure the hook is not already there. 354 */ 355 TAILQ_FOREACH(pfh, chain, pfil_chain) 356 if (pfh->pfil_func == pfh1->pfil_func && 357 pfh->pfil_arg == pfh1->pfil_arg) 358 return (EEXIST); 359 360 /* 361 * Insert the input list in reverse order of the output list so that 362 * the same path is followed in or out of the kernel. 363 */ 364 if (flags & PFIL_IN) 365 TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain); 366 else 367 TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain); 368 return (0); 369 } 370 371 /* 372 * Internal: Remove a pfil hook from a hook chain. 373 */ 374 static int 375 pfil_chain_remove(pfil_chain_t *chain, pfil_func_t func, void *arg) 376 { 377 struct packet_filter_hook *pfh; 378 379 TAILQ_FOREACH(pfh, chain, pfil_chain) 380 if (pfh->pfil_func == func && pfh->pfil_arg == arg) { 381 TAILQ_REMOVE(chain, pfh, pfil_chain); 382 free(pfh, M_IFADDR); 383 return (0); 384 } 385 return (ENOENT); 386 } 387 388 /* 389 * Stuff that must be initialized for every instance (including the first of 390 * course). 391 */ 392 static void 393 vnet_pfil_init(const void *unused __unused) 394 { 395 396 LIST_INIT(&V_pfil_head_list); 397 PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared"); 398 } 399 400 /* 401 * Called for the removal of each instance. 402 */ 403 static void 404 vnet_pfil_uninit(const void *unused __unused) 405 { 406 407 KASSERT(LIST_EMPTY(&V_pfil_head_list), 408 ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list)); 409 PFIL_LOCK_DESTROY_REAL(&V_pfil_lock); 410 } 411 412 /* 413 * Starting up. 414 * 415 * VNET_SYSINIT is called for each existing vnet and each new vnet. 416 * Make sure the pfil bits are first before any possible subsystem which 417 * might piggyback on the SI_SUB_PROTO_PFIL. 418 */ 419 VNET_SYSINIT(vnet_pfil_init, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST, 420 vnet_pfil_init, NULL); 421 422 /* 423 * Closing up shop. These are done in REVERSE ORDER. Not called on reboot. 424 * 425 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 426 */ 427 VNET_SYSUNINIT(vnet_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST, 428 vnet_pfil_uninit, NULL); 429