1 /* $FreeBSD$ */ 2 /* $NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $ */ 3 4 /*- 5 * Copyright (c) 1996 Matthew R. Green 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/errno.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/rmlock.h> 38 #include <sys/socket.h> 39 #include <sys/socketvar.h> 40 #include <sys/systm.h> 41 #include <sys/condvar.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/queue.h> 46 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <net/pfil.h> 50 51 static struct mtx pfil_global_lock; 52 53 MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock", 54 MTX_DEF); 55 56 static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *); 57 static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int); 58 static int pfil_chain_remove(pfil_chain_t *, pfil_func_t, void *); 59 60 LIST_HEAD(pfilheadhead, pfil_head); 61 VNET_DEFINE(struct pfilheadhead, pfil_head_list); 62 #define V_pfil_head_list VNET(pfil_head_list) 63 VNET_DEFINE(struct rmlock, pfil_lock); 64 65 #define PFIL_LOCK_INIT_REAL(l, t) \ 66 rm_init_flags(l, "PFil " t " rmlock", RM_RECURSE) 67 #define PFIL_LOCK_DESTROY_REAL(l) \ 68 rm_destroy(l) 69 #define PFIL_LOCK_INIT(p) do { \ 70 if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK) { \ 71 PFIL_LOCK_INIT_REAL(&(p)->ph_lock, "private"); \ 72 (p)->ph_plock = &(p)->ph_lock; \ 73 } else \ 74 (p)->ph_plock = &V_pfil_lock; \ 75 } while (0) 76 #define PFIL_LOCK_DESTROY(p) do { \ 77 if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK) \ 78 PFIL_LOCK_DESTROY_REAL((p)->ph_plock); \ 79 } while (0) 80 81 #define PFIL_TRY_RLOCK(p, t) rm_try_rlock((p)->ph_plock, (t)) 82 #define PFIL_RLOCK(p, t) rm_rlock((p)->ph_plock, (t)) 83 #define PFIL_WLOCK(p) rm_wlock((p)->ph_plock) 84 #define PFIL_RUNLOCK(p, t) rm_runlock((p)->ph_plock, (t)) 85 #define PFIL_WUNLOCK(p) rm_wunlock((p)->ph_plock) 86 #define PFIL_WOWNED(p) rm_wowned((p)->ph_plock) 87 88 #define PFIL_HEADLIST_LOCK() mtx_lock(&pfil_global_lock) 89 #define PFIL_HEADLIST_UNLOCK() mtx_unlock(&pfil_global_lock) 90 91 /* 92 * pfil_run_hooks() runs the specified packet filter hook chain. 93 */ 94 int 95 pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp, 96 int dir, struct inpcb *inp) 97 { 98 struct rm_priotracker rmpt; 99 struct packet_filter_hook *pfh; 100 struct mbuf *m = *mp; 101 int rv = 0; 102 103 PFIL_RLOCK(ph, &rmpt); 104 KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0")); 105 for (pfh = pfil_chain_get(dir, ph); pfh != NULL; 106 pfh = TAILQ_NEXT(pfh, pfil_chain)) { 107 if (pfh->pfil_func != NULL) { 108 rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir, 109 inp); 110 if (rv != 0 || m == NULL) 111 break; 112 } 113 } 114 PFIL_RUNLOCK(ph, &rmpt); 115 *mp = m; 116 return (rv); 117 } 118 119 static struct packet_filter_hook * 120 pfil_chain_get(int dir, struct pfil_head *ph) 121 { 122 123 if (dir == PFIL_IN) 124 return (TAILQ_FIRST(&ph->ph_in)); 125 else if (dir == PFIL_OUT) 126 return (TAILQ_FIRST(&ph->ph_out)); 127 else 128 return (NULL); 129 } 130 131 /* 132 * pfil_try_rlock() acquires rm reader lock for specified head 133 * if this is immediately possible. 134 */ 135 int 136 pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 137 { 138 139 return (PFIL_TRY_RLOCK(ph, tracker)); 140 } 141 142 /* 143 * pfil_rlock() acquires rm reader lock for specified head. 144 */ 145 void 146 pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 147 { 148 149 PFIL_RLOCK(ph, tracker); 150 } 151 152 /* 153 * pfil_runlock() releases reader lock for specified head. 154 */ 155 void 156 pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker) 157 { 158 159 PFIL_RUNLOCK(ph, tracker); 160 } 161 162 /* 163 * pfil_wlock() acquires writer lock for specified head. 164 */ 165 void 166 pfil_wlock(struct pfil_head *ph) 167 { 168 169 PFIL_WLOCK(ph); 170 } 171 172 /* 173 * pfil_wunlock() releases writer lock for specified head. 174 */ 175 void 176 pfil_wunlock(struct pfil_head *ph) 177 { 178 179 PFIL_WUNLOCK(ph); 180 } 181 182 /* 183 * pfil_wowned() returns a non-zero value if the current thread owns 184 * an exclusive lock. 185 */ 186 int 187 pfil_wowned(struct pfil_head *ph) 188 { 189 190 return (PFIL_WOWNED(ph)); 191 } 192 193 /* 194 * pfil_head_register() registers a pfil_head with the packet filter hook 195 * mechanism. 196 */ 197 int 198 pfil_head_register(struct pfil_head *ph) 199 { 200 struct pfil_head *lph; 201 202 PFIL_HEADLIST_LOCK(); 203 LIST_FOREACH(lph, &V_pfil_head_list, ph_list) { 204 if (ph->ph_type == lph->ph_type && 205 ph->ph_un.phu_val == lph->ph_un.phu_val) { 206 PFIL_HEADLIST_UNLOCK(); 207 return (EEXIST); 208 } 209 } 210 PFIL_LOCK_INIT(ph); 211 ph->ph_nhooks = 0; 212 TAILQ_INIT(&ph->ph_in); 213 TAILQ_INIT(&ph->ph_out); 214 LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list); 215 PFIL_HEADLIST_UNLOCK(); 216 return (0); 217 } 218 219 /* 220 * pfil_head_unregister() removes a pfil_head from the packet filter hook 221 * mechanism. The producer of the hook promises that all outstanding 222 * invocations of the hook have completed before it unregisters the hook. 223 */ 224 int 225 pfil_head_unregister(struct pfil_head *ph) 226 { 227 struct packet_filter_hook *pfh, *pfnext; 228 229 PFIL_HEADLIST_LOCK(); 230 LIST_REMOVE(ph, ph_list); 231 PFIL_HEADLIST_UNLOCK(); 232 TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext) 233 free(pfh, M_IFADDR); 234 TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext) 235 free(pfh, M_IFADDR); 236 PFIL_LOCK_DESTROY(ph); 237 return (0); 238 } 239 240 /* 241 * pfil_head_get() returns the pfil_head for a given key/dlt. 242 */ 243 struct pfil_head * 244 pfil_head_get(int type, u_long val) 245 { 246 struct pfil_head *ph; 247 248 PFIL_HEADLIST_LOCK(); 249 LIST_FOREACH(ph, &V_pfil_head_list, ph_list) 250 if (ph->ph_type == type && ph->ph_un.phu_val == val) 251 break; 252 PFIL_HEADLIST_UNLOCK(); 253 return (ph); 254 } 255 256 /* 257 * pfil_add_hook() adds a function to the packet filter hook. the 258 * flags are: 259 * PFIL_IN call me on incoming packets 260 * PFIL_OUT call me on outgoing packets 261 * PFIL_ALL call me on all of the above 262 * PFIL_WAITOK OK to call malloc with M_WAITOK. 263 */ 264 int 265 pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph) 266 { 267 struct packet_filter_hook *pfh1 = NULL; 268 struct packet_filter_hook *pfh2 = NULL; 269 int err; 270 271 if (flags & PFIL_IN) { 272 pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 273 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 274 if (pfh1 == NULL) { 275 err = ENOMEM; 276 goto error; 277 } 278 } 279 if (flags & PFIL_OUT) { 280 pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 281 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 282 if (pfh2 == NULL) { 283 err = ENOMEM; 284 goto error; 285 } 286 } 287 PFIL_WLOCK(ph); 288 if (flags & PFIL_IN) { 289 pfh1->pfil_func = func; 290 pfh1->pfil_arg = arg; 291 err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT); 292 if (err) 293 goto locked_error; 294 ph->ph_nhooks++; 295 } 296 if (flags & PFIL_OUT) { 297 pfh2->pfil_func = func; 298 pfh2->pfil_arg = arg; 299 err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN); 300 if (err) { 301 if (flags & PFIL_IN) 302 pfil_chain_remove(&ph->ph_in, func, arg); 303 goto locked_error; 304 } 305 ph->ph_nhooks++; 306 } 307 PFIL_WUNLOCK(ph); 308 return (0); 309 locked_error: 310 PFIL_WUNLOCK(ph); 311 error: 312 if (pfh1 != NULL) 313 free(pfh1, M_IFADDR); 314 if (pfh2 != NULL) 315 free(pfh2, M_IFADDR); 316 return (err); 317 } 318 319 /* 320 * pfil_remove_hook removes a specific function from the packet filter hook 321 * chain. 322 */ 323 int 324 pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph) 325 { 326 int err = 0; 327 328 PFIL_WLOCK(ph); 329 if (flags & PFIL_IN) { 330 err = pfil_chain_remove(&ph->ph_in, func, arg); 331 if (err == 0) 332 ph->ph_nhooks--; 333 } 334 if ((err == 0) && (flags & PFIL_OUT)) { 335 err = pfil_chain_remove(&ph->ph_out, func, arg); 336 if (err == 0) 337 ph->ph_nhooks--; 338 } 339 PFIL_WUNLOCK(ph); 340 return (err); 341 } 342 343 /* 344 * Internal: Add a new pfil hook into a hook chain. 345 */ 346 static int 347 pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags) 348 { 349 struct packet_filter_hook *pfh; 350 351 /* 352 * First make sure the hook is not already there. 353 */ 354 TAILQ_FOREACH(pfh, chain, pfil_chain) 355 if (pfh->pfil_func == pfh1->pfil_func && 356 pfh->pfil_arg == pfh1->pfil_arg) 357 return (EEXIST); 358 359 /* 360 * Insert the input list in reverse order of the output list so that 361 * the same path is followed in or out of the kernel. 362 */ 363 if (flags & PFIL_IN) 364 TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain); 365 else 366 TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain); 367 return (0); 368 } 369 370 /* 371 * Internal: Remove a pfil hook from a hook chain. 372 */ 373 static int 374 pfil_chain_remove(pfil_chain_t *chain, pfil_func_t func, void *arg) 375 { 376 struct packet_filter_hook *pfh; 377 378 TAILQ_FOREACH(pfh, chain, pfil_chain) 379 if (pfh->pfil_func == func && pfh->pfil_arg == arg) { 380 TAILQ_REMOVE(chain, pfh, pfil_chain); 381 free(pfh, M_IFADDR); 382 return (0); 383 } 384 return (ENOENT); 385 } 386 387 /* 388 * Stuff that must be initialized for every instance (including the first of 389 * course). 390 */ 391 static void 392 vnet_pfil_init(const void *unused __unused) 393 { 394 395 LIST_INIT(&V_pfil_head_list); 396 PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared"); 397 } 398 399 /* 400 * Called for the removal of each instance. 401 */ 402 static void 403 vnet_pfil_uninit(const void *unused __unused) 404 { 405 406 KASSERT(LIST_EMPTY(&V_pfil_head_list), 407 ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list)); 408 PFIL_LOCK_DESTROY_REAL(&V_pfil_lock); 409 } 410 411 /* 412 * Starting up. 413 * 414 * VNET_SYSINIT is called for each existing vnet and each new vnet. 415 * Make sure the pfil bits are first before any possible subsystem which 416 * might piggyback on the SI_SUB_PROTO_PFIL. 417 */ 418 VNET_SYSINIT(vnet_pfil_init, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST, 419 vnet_pfil_init, NULL); 420 421 /* 422 * Closing up shop. These are done in REVERSE ORDER. Not called on reboot. 423 * 424 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 425 */ 426 VNET_SYSUNINIT(vnet_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST, 427 vnet_pfil_uninit, NULL); 428