1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_bpf.h" 41 #include "opt_netgraph.h" 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/conf.h> 47 #include <sys/fcntl.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/time.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/signalvar.h> 54 #include <sys/filio.h> 55 #include <sys/sockio.h> 56 #include <sys/ttycom.h> 57 #include <sys/uio.h> 58 #include <sys/vimage.h> 59 60 #include <sys/event.h> 61 #include <sys/file.h> 62 #include <sys/poll.h> 63 #include <sys/proc.h> 64 65 #include <sys/socket.h> 66 67 #include <net/if.h> 68 #include <net/bpf.h> 69 #include <net/bpf_buffer.h> 70 #ifdef BPF_JITTER 71 #include <net/bpf_jitter.h> 72 #endif 73 #include <net/bpf_zerocopy.h> 74 #include <net/bpfdesc.h> 75 76 #include <netinet/in.h> 77 #include <netinet/if_ether.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 81 #include <net80211/ieee80211_freebsd.h> 82 83 #include <security/mac/mac_framework.h> 84 85 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 86 87 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 88 89 #define PRINET 26 /* interruptible */ 90 91 /* 92 * bpf_iflist is a list of BPF interface structures, each corresponding to a 93 * specific DLT. The same network interface might have several BPF interface 94 * structures registered by different layers in the stack (i.e., 802.11 95 * frames, ethernet frames, etc). 96 */ 97 static LIST_HEAD(, bpf_if) bpf_iflist; 98 static struct mtx bpf_mtx; /* bpf global lock */ 99 static int bpf_bpfd_cnt; 100 101 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 102 static void bpf_detachd(struct bpf_d *); 103 static void bpf_freed(struct bpf_d *); 104 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 105 struct sockaddr *, int *, struct bpf_insn *); 106 static int bpf_setif(struct bpf_d *, struct ifreq *); 107 static void bpf_timed_out(void *); 108 static __inline void 109 bpf_wakeup(struct bpf_d *); 110 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 111 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 112 struct timeval *); 113 static void reset_d(struct bpf_d *); 114 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 115 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 116 static int bpf_setdlt(struct bpf_d *, u_int); 117 static void filt_bpfdetach(struct knote *); 118 static int filt_bpfread(struct knote *, long); 119 static void bpf_drvinit(void *); 120 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 121 122 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 123 int bpf_maxinsns = BPF_MAXINSNS; 124 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 125 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 126 static int bpf_zerocopy_enable = 0; 127 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 128 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 129 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 130 bpf_stats_sysctl, "bpf statistics portal"); 131 132 static d_open_t bpfopen; 133 static d_read_t bpfread; 134 static d_write_t bpfwrite; 135 static d_ioctl_t bpfioctl; 136 static d_poll_t bpfpoll; 137 static d_kqfilter_t bpfkqfilter; 138 139 static struct cdevsw bpf_cdevsw = { 140 .d_version = D_VERSION, 141 .d_open = bpfopen, 142 .d_read = bpfread, 143 .d_write = bpfwrite, 144 .d_ioctl = bpfioctl, 145 .d_poll = bpfpoll, 146 .d_name = "bpf", 147 .d_kqfilter = bpfkqfilter, 148 }; 149 150 static struct filterops bpfread_filtops = 151 { 1, NULL, filt_bpfdetach, filt_bpfread }; 152 153 /* 154 * Wrapper functions for various buffering methods. If the set of buffer 155 * modes expands, we will probably want to introduce a switch data structure 156 * similar to protosw, et. 157 */ 158 static void 159 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 160 u_int len) 161 { 162 163 BPFD_LOCK_ASSERT(d); 164 165 switch (d->bd_bufmode) { 166 case BPF_BUFMODE_BUFFER: 167 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 168 169 case BPF_BUFMODE_ZBUF: 170 d->bd_zcopy++; 171 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 172 173 default: 174 panic("bpf_buf_append_bytes"); 175 } 176 } 177 178 static void 179 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 180 u_int len) 181 { 182 183 BPFD_LOCK_ASSERT(d); 184 185 switch (d->bd_bufmode) { 186 case BPF_BUFMODE_BUFFER: 187 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 188 189 case BPF_BUFMODE_ZBUF: 190 d->bd_zcopy++; 191 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 192 193 default: 194 panic("bpf_buf_append_mbuf"); 195 } 196 } 197 198 /* 199 * This function gets called when the free buffer is re-assigned. 200 */ 201 static void 202 bpf_buf_reclaimed(struct bpf_d *d) 203 { 204 205 BPFD_LOCK_ASSERT(d); 206 207 switch (d->bd_bufmode) { 208 case BPF_BUFMODE_BUFFER: 209 return; 210 211 case BPF_BUFMODE_ZBUF: 212 bpf_zerocopy_buf_reclaimed(d); 213 return; 214 215 default: 216 panic("bpf_buf_reclaimed"); 217 } 218 } 219 220 /* 221 * If the buffer mechanism has a way to decide that a held buffer can be made 222 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 223 * returned if the buffer can be discarded, (0) is returned if it cannot. 224 */ 225 static int 226 bpf_canfreebuf(struct bpf_d *d) 227 { 228 229 BPFD_LOCK_ASSERT(d); 230 231 switch (d->bd_bufmode) { 232 case BPF_BUFMODE_ZBUF: 233 return (bpf_zerocopy_canfreebuf(d)); 234 } 235 return (0); 236 } 237 238 /* 239 * Allow the buffer model to indicate that the current store buffer is 240 * immutable, regardless of the appearance of space. Return (1) if the 241 * buffer is writable, and (0) if not. 242 */ 243 static int 244 bpf_canwritebuf(struct bpf_d *d) 245 { 246 247 BPFD_LOCK_ASSERT(d); 248 249 switch (d->bd_bufmode) { 250 case BPF_BUFMODE_ZBUF: 251 return (bpf_zerocopy_canwritebuf(d)); 252 } 253 return (1); 254 } 255 256 /* 257 * Notify buffer model that an attempt to write to the store buffer has 258 * resulted in a dropped packet, in which case the buffer may be considered 259 * full. 260 */ 261 static void 262 bpf_buffull(struct bpf_d *d) 263 { 264 265 BPFD_LOCK_ASSERT(d); 266 267 switch (d->bd_bufmode) { 268 case BPF_BUFMODE_ZBUF: 269 bpf_zerocopy_buffull(d); 270 break; 271 } 272 } 273 274 /* 275 * Notify the buffer model that a buffer has moved into the hold position. 276 */ 277 void 278 bpf_bufheld(struct bpf_d *d) 279 { 280 281 BPFD_LOCK_ASSERT(d); 282 283 switch (d->bd_bufmode) { 284 case BPF_BUFMODE_ZBUF: 285 bpf_zerocopy_bufheld(d); 286 break; 287 } 288 } 289 290 static void 291 bpf_free(struct bpf_d *d) 292 { 293 294 switch (d->bd_bufmode) { 295 case BPF_BUFMODE_BUFFER: 296 return (bpf_buffer_free(d)); 297 298 case BPF_BUFMODE_ZBUF: 299 return (bpf_zerocopy_free(d)); 300 301 default: 302 panic("bpf_buf_free"); 303 } 304 } 305 306 static int 307 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 308 { 309 310 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 311 return (EOPNOTSUPP); 312 return (bpf_buffer_uiomove(d, buf, len, uio)); 313 } 314 315 static int 316 bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 317 { 318 319 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 320 return (EOPNOTSUPP); 321 return (bpf_buffer_ioctl_sblen(d, i)); 322 } 323 324 static int 325 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 326 { 327 328 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 329 return (EOPNOTSUPP); 330 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 331 } 332 333 static int 334 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 335 { 336 337 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 338 return (EOPNOTSUPP); 339 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 340 } 341 342 static int 343 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 344 { 345 346 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 347 return (EOPNOTSUPP); 348 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 349 } 350 351 /* 352 * General BPF functions. 353 */ 354 static int 355 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 356 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 357 { 358 const struct ieee80211_bpf_params *p; 359 struct ether_header *eh; 360 struct mbuf *m; 361 int error; 362 int len; 363 int hlen; 364 int slen; 365 366 /* 367 * Build a sockaddr based on the data link layer type. 368 * We do this at this level because the ethernet header 369 * is copied directly into the data field of the sockaddr. 370 * In the case of SLIP, there is no header and the packet 371 * is forwarded as is. 372 * Also, we are careful to leave room at the front of the mbuf 373 * for the link level header. 374 */ 375 switch (linktype) { 376 377 case DLT_SLIP: 378 sockp->sa_family = AF_INET; 379 hlen = 0; 380 break; 381 382 case DLT_EN10MB: 383 sockp->sa_family = AF_UNSPEC; 384 /* XXX Would MAXLINKHDR be better? */ 385 hlen = ETHER_HDR_LEN; 386 break; 387 388 case DLT_FDDI: 389 sockp->sa_family = AF_IMPLINK; 390 hlen = 0; 391 break; 392 393 case DLT_RAW: 394 sockp->sa_family = AF_UNSPEC; 395 hlen = 0; 396 break; 397 398 case DLT_NULL: 399 /* 400 * null interface types require a 4 byte pseudo header which 401 * corresponds to the address family of the packet. 402 */ 403 sockp->sa_family = AF_UNSPEC; 404 hlen = 4; 405 break; 406 407 case DLT_ATM_RFC1483: 408 /* 409 * en atm driver requires 4-byte atm pseudo header. 410 * though it isn't standard, vpi:vci needs to be 411 * specified anyway. 412 */ 413 sockp->sa_family = AF_UNSPEC; 414 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 415 break; 416 417 case DLT_PPP: 418 sockp->sa_family = AF_UNSPEC; 419 hlen = 4; /* This should match PPP_HDRLEN */ 420 break; 421 422 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 423 sockp->sa_family = AF_IEEE80211; 424 hlen = 0; 425 break; 426 427 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 428 sockp->sa_family = AF_IEEE80211; 429 sockp->sa_len = 12; /* XXX != 0 */ 430 hlen = sizeof(struct ieee80211_bpf_params); 431 break; 432 433 default: 434 return (EIO); 435 } 436 437 len = uio->uio_resid; 438 439 if (len - hlen > ifp->if_mtu) 440 return (EMSGSIZE); 441 442 if ((unsigned)len > MJUM16BYTES) 443 return (EIO); 444 445 if (len <= MHLEN) 446 MGETHDR(m, M_WAIT, MT_DATA); 447 else if (len <= MCLBYTES) 448 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 449 else 450 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR, 451 #if (MJUMPAGESIZE > MCLBYTES) 452 len <= MJUMPAGESIZE ? MJUMPAGESIZE : 453 #endif 454 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES)); 455 m->m_pkthdr.len = m->m_len = len; 456 m->m_pkthdr.rcvif = NULL; 457 *mp = m; 458 459 if (m->m_len < hlen) { 460 error = EPERM; 461 goto bad; 462 } 463 464 error = uiomove(mtod(m, u_char *), len, uio); 465 if (error) 466 goto bad; 467 468 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 469 if (slen == 0) { 470 error = EPERM; 471 goto bad; 472 } 473 474 /* Check for multicast destination */ 475 switch (linktype) { 476 case DLT_EN10MB: 477 eh = mtod(m, struct ether_header *); 478 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 479 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 480 ETHER_ADDR_LEN) == 0) 481 m->m_flags |= M_BCAST; 482 else 483 m->m_flags |= M_MCAST; 484 } 485 break; 486 } 487 488 /* 489 * Make room for link header, and copy it to sockaddr 490 */ 491 if (hlen != 0) { 492 if (sockp->sa_family == AF_IEEE80211) { 493 /* 494 * Collect true length from the parameter header 495 * NB: sockp is known to be zero'd so if we do a 496 * short copy unspecified parameters will be 497 * zero. 498 * NB: packet may not be aligned after stripping 499 * bpf params 500 * XXX check ibp_vers 501 */ 502 p = mtod(m, const struct ieee80211_bpf_params *); 503 hlen = p->ibp_len; 504 if (hlen > sizeof(sockp->sa_data)) { 505 error = EINVAL; 506 goto bad; 507 } 508 } 509 bcopy(m->m_data, sockp->sa_data, hlen); 510 } 511 *hdrlen = hlen; 512 513 return (0); 514 bad: 515 m_freem(m); 516 return (error); 517 } 518 519 /* 520 * Attach file to the bpf interface, i.e. make d listen on bp. 521 */ 522 static void 523 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 524 { 525 /* 526 * Point d at bp, and add d to the interface's list of listeners. 527 * Finally, point the driver's bpf cookie at the interface so 528 * it will divert packets to bpf. 529 */ 530 BPFIF_LOCK(bp); 531 d->bd_bif = bp; 532 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 533 534 bpf_bpfd_cnt++; 535 BPFIF_UNLOCK(bp); 536 537 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 538 } 539 540 /* 541 * Detach a file from its interface. 542 */ 543 static void 544 bpf_detachd(struct bpf_d *d) 545 { 546 int error; 547 struct bpf_if *bp; 548 struct ifnet *ifp; 549 550 bp = d->bd_bif; 551 BPFIF_LOCK(bp); 552 BPFD_LOCK(d); 553 ifp = d->bd_bif->bif_ifp; 554 555 /* 556 * Remove d from the interface's descriptor list. 557 */ 558 LIST_REMOVE(d, bd_next); 559 560 bpf_bpfd_cnt--; 561 d->bd_bif = NULL; 562 BPFD_UNLOCK(d); 563 BPFIF_UNLOCK(bp); 564 565 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 566 567 /* 568 * Check if this descriptor had requested promiscuous mode. 569 * If so, turn it off. 570 */ 571 if (d->bd_promisc) { 572 d->bd_promisc = 0; 573 CURVNET_SET(ifp->if_vnet); 574 error = ifpromisc(ifp, 0); 575 CURVNET_RESTORE(); 576 if (error != 0 && error != ENXIO) { 577 /* 578 * ENXIO can happen if a pccard is unplugged 579 * Something is really wrong if we were able to put 580 * the driver into promiscuous mode, but can't 581 * take it out. 582 */ 583 if_printf(bp->bif_ifp, 584 "bpf_detach: ifpromisc failed (%d)\n", error); 585 } 586 } 587 } 588 589 /* 590 * Close the descriptor by detaching it from its interface, 591 * deallocating its buffers, and marking it free. 592 */ 593 static void 594 bpf_dtor(void *data) 595 { 596 struct bpf_d *d = data; 597 598 BPFD_LOCK(d); 599 if (d->bd_state == BPF_WAITING) 600 callout_stop(&d->bd_callout); 601 d->bd_state = BPF_IDLE; 602 BPFD_UNLOCK(d); 603 funsetown(&d->bd_sigio); 604 mtx_lock(&bpf_mtx); 605 if (d->bd_bif) 606 bpf_detachd(d); 607 mtx_unlock(&bpf_mtx); 608 selwakeuppri(&d->bd_sel, PRINET); 609 #ifdef MAC 610 mac_bpfdesc_destroy(d); 611 #endif /* MAC */ 612 knlist_destroy(&d->bd_sel.si_note); 613 bpf_freed(d); 614 free(d, M_BPF); 615 } 616 617 /* 618 * Open ethernet device. Returns ENXIO for illegal minor device number, 619 * EBUSY if file is open by another process. 620 */ 621 /* ARGSUSED */ 622 static int 623 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 624 { 625 struct bpf_d *d; 626 int error; 627 628 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 629 error = devfs_set_cdevpriv(d, bpf_dtor); 630 if (error != 0) { 631 free(d, M_BPF); 632 return (error); 633 } 634 635 /* 636 * For historical reasons, perform a one-time initialization call to 637 * the buffer routines, even though we're not yet committed to a 638 * particular buffer method. 639 */ 640 bpf_buffer_init(d); 641 d->bd_bufmode = BPF_BUFMODE_BUFFER; 642 d->bd_sig = SIGIO; 643 d->bd_direction = BPF_D_INOUT; 644 d->bd_pid = td->td_proc->p_pid; 645 #ifdef MAC 646 mac_bpfdesc_init(d); 647 mac_bpfdesc_create(td->td_ucred, d); 648 #endif 649 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 650 callout_init(&d->bd_callout, CALLOUT_MPSAFE); 651 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL); 652 653 return (0); 654 } 655 656 /* 657 * bpfread - read next chunk of packets from buffers 658 */ 659 static int 660 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 661 { 662 struct bpf_d *d; 663 int timed_out; 664 int error; 665 666 error = devfs_get_cdevpriv((void **)&d); 667 if (error != 0) 668 return (error); 669 670 /* 671 * Restrict application to use a buffer the same size as 672 * as kernel buffers. 673 */ 674 if (uio->uio_resid != d->bd_bufsize) 675 return (EINVAL); 676 677 BPFD_LOCK(d); 678 d->bd_pid = curthread->td_proc->p_pid; 679 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 680 BPFD_UNLOCK(d); 681 return (EOPNOTSUPP); 682 } 683 if (d->bd_state == BPF_WAITING) 684 callout_stop(&d->bd_callout); 685 timed_out = (d->bd_state == BPF_TIMED_OUT); 686 d->bd_state = BPF_IDLE; 687 /* 688 * If the hold buffer is empty, then do a timed sleep, which 689 * ends when the timeout expires or when enough packets 690 * have arrived to fill the store buffer. 691 */ 692 while (d->bd_hbuf == NULL) { 693 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 694 /* 695 * A packet(s) either arrived since the previous 696 * read or arrived while we were asleep. 697 * Rotate the buffers and return what's here. 698 */ 699 ROTATE_BUFFERS(d); 700 break; 701 } 702 703 /* 704 * No data is available, check to see if the bpf device 705 * is still pointed at a real interface. If not, return 706 * ENXIO so that the userland process knows to rebind 707 * it before using it again. 708 */ 709 if (d->bd_bif == NULL) { 710 BPFD_UNLOCK(d); 711 return (ENXIO); 712 } 713 714 if (ioflag & O_NONBLOCK) { 715 BPFD_UNLOCK(d); 716 return (EWOULDBLOCK); 717 } 718 error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 719 "bpf", d->bd_rtout); 720 if (error == EINTR || error == ERESTART) { 721 BPFD_UNLOCK(d); 722 return (error); 723 } 724 if (error == EWOULDBLOCK) { 725 /* 726 * On a timeout, return what's in the buffer, 727 * which may be nothing. If there is something 728 * in the store buffer, we can rotate the buffers. 729 */ 730 if (d->bd_hbuf) 731 /* 732 * We filled up the buffer in between 733 * getting the timeout and arriving 734 * here, so we don't need to rotate. 735 */ 736 break; 737 738 if (d->bd_slen == 0) { 739 BPFD_UNLOCK(d); 740 return (0); 741 } 742 ROTATE_BUFFERS(d); 743 break; 744 } 745 } 746 /* 747 * At this point, we know we have something in the hold slot. 748 */ 749 BPFD_UNLOCK(d); 750 751 /* 752 * Move data from hold buffer into user space. 753 * We know the entire buffer is transferred since 754 * we checked above that the read buffer is bpf_bufsize bytes. 755 * 756 * XXXRW: More synchronization needed here: what if a second thread 757 * issues a read on the same fd at the same time? Don't want this 758 * getting invalidated. 759 */ 760 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 761 762 BPFD_LOCK(d); 763 d->bd_fbuf = d->bd_hbuf; 764 d->bd_hbuf = NULL; 765 d->bd_hlen = 0; 766 bpf_buf_reclaimed(d); 767 BPFD_UNLOCK(d); 768 769 return (error); 770 } 771 772 /* 773 * If there are processes sleeping on this descriptor, wake them up. 774 */ 775 static __inline void 776 bpf_wakeup(struct bpf_d *d) 777 { 778 779 BPFD_LOCK_ASSERT(d); 780 if (d->bd_state == BPF_WAITING) { 781 callout_stop(&d->bd_callout); 782 d->bd_state = BPF_IDLE; 783 } 784 wakeup(d); 785 if (d->bd_async && d->bd_sig && d->bd_sigio) 786 pgsigio(&d->bd_sigio, d->bd_sig, 0); 787 788 selwakeuppri(&d->bd_sel, PRINET); 789 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 790 } 791 792 static void 793 bpf_timed_out(void *arg) 794 { 795 struct bpf_d *d = (struct bpf_d *)arg; 796 797 BPFD_LOCK(d); 798 if (d->bd_state == BPF_WAITING) { 799 d->bd_state = BPF_TIMED_OUT; 800 if (d->bd_slen != 0) 801 bpf_wakeup(d); 802 } 803 BPFD_UNLOCK(d); 804 } 805 806 static int 807 bpf_ready(struct bpf_d *d) 808 { 809 810 BPFD_LOCK_ASSERT(d); 811 812 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 813 return (1); 814 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 815 d->bd_slen != 0) 816 return (1); 817 return (0); 818 } 819 820 static int 821 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 822 { 823 struct bpf_d *d; 824 struct ifnet *ifp; 825 struct mbuf *m, *mc; 826 struct sockaddr dst; 827 int error, hlen; 828 829 error = devfs_get_cdevpriv((void **)&d); 830 if (error != 0) 831 return (error); 832 833 d->bd_pid = curthread->td_proc->p_pid; 834 d->bd_wcount++; 835 if (d->bd_bif == NULL) { 836 d->bd_wdcount++; 837 return (ENXIO); 838 } 839 840 ifp = d->bd_bif->bif_ifp; 841 842 if ((ifp->if_flags & IFF_UP) == 0) { 843 d->bd_wdcount++; 844 return (ENETDOWN); 845 } 846 847 if (uio->uio_resid == 0) { 848 d->bd_wdcount++; 849 return (0); 850 } 851 852 bzero(&dst, sizeof(dst)); 853 m = NULL; 854 hlen = 0; 855 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 856 &m, &dst, &hlen, d->bd_wfilter); 857 if (error) { 858 d->bd_wdcount++; 859 return (error); 860 } 861 d->bd_wfcount++; 862 if (d->bd_hdrcmplt) 863 dst.sa_family = pseudo_AF_HDRCMPLT; 864 865 if (d->bd_feedback) { 866 mc = m_dup(m, M_DONTWAIT); 867 if (mc != NULL) 868 mc->m_pkthdr.rcvif = ifp; 869 /* Set M_PROMISC for outgoing packets to be discarded. */ 870 if (d->bd_direction == BPF_D_INOUT) 871 m->m_flags |= M_PROMISC; 872 } else 873 mc = NULL; 874 875 m->m_pkthdr.len -= hlen; 876 m->m_len -= hlen; 877 m->m_data += hlen; /* XXX */ 878 879 CURVNET_SET(ifp->if_vnet); 880 #ifdef MAC 881 BPFD_LOCK(d); 882 mac_bpfdesc_create_mbuf(d, m); 883 if (mc != NULL) 884 mac_bpfdesc_create_mbuf(d, mc); 885 BPFD_UNLOCK(d); 886 #endif 887 888 error = (*ifp->if_output)(ifp, m, &dst, NULL); 889 if (error) 890 d->bd_wdcount++; 891 892 if (mc != NULL) { 893 if (error == 0) 894 (*ifp->if_input)(ifp, mc); 895 else 896 m_freem(mc); 897 } 898 CURVNET_RESTORE(); 899 900 return (error); 901 } 902 903 /* 904 * Reset a descriptor by flushing its packet buffer and clearing the receive 905 * and drop counts. This is doable for kernel-only buffers, but with 906 * zero-copy buffers, we can't write to (or rotate) buffers that are 907 * currently owned by userspace. It would be nice if we could encapsulate 908 * this logic in the buffer code rather than here. 909 */ 910 static void 911 reset_d(struct bpf_d *d) 912 { 913 914 mtx_assert(&d->bd_mtx, MA_OWNED); 915 916 if ((d->bd_hbuf != NULL) && 917 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 918 /* Free the hold buffer. */ 919 d->bd_fbuf = d->bd_hbuf; 920 d->bd_hbuf = NULL; 921 d->bd_hlen = 0; 922 bpf_buf_reclaimed(d); 923 } 924 if (bpf_canwritebuf(d)) 925 d->bd_slen = 0; 926 d->bd_rcount = 0; 927 d->bd_dcount = 0; 928 d->bd_fcount = 0; 929 d->bd_wcount = 0; 930 d->bd_wfcount = 0; 931 d->bd_wdcount = 0; 932 d->bd_zcopy = 0; 933 } 934 935 /* 936 * FIONREAD Check for read packet available. 937 * SIOCGIFADDR Get interface address - convenient hook to driver. 938 * BIOCGBLEN Get buffer len [for read()]. 939 * BIOCSETF Set read filter. 940 * BIOCSETFNR Set read filter without resetting descriptor. 941 * BIOCSETWF Set write filter. 942 * BIOCFLUSH Flush read packet buffer. 943 * BIOCPROMISC Put interface into promiscuous mode. 944 * BIOCGDLT Get link layer type. 945 * BIOCGETIF Get interface name. 946 * BIOCSETIF Set interface. 947 * BIOCSRTIMEOUT Set read timeout. 948 * BIOCGRTIMEOUT Get read timeout. 949 * BIOCGSTATS Get packet stats. 950 * BIOCIMMEDIATE Set immediate mode. 951 * BIOCVERSION Get filter language version. 952 * BIOCGHDRCMPLT Get "header already complete" flag 953 * BIOCSHDRCMPLT Set "header already complete" flag 954 * BIOCGDIRECTION Get packet direction flag 955 * BIOCSDIRECTION Set packet direction flag 956 * BIOCLOCK Set "locked" flag 957 * BIOCFEEDBACK Set packet feedback mode. 958 * BIOCSETZBUF Set current zero-copy buffer locations. 959 * BIOCGETZMAX Get maximum zero-copy buffer size. 960 * BIOCROTZBUF Force rotation of zero-copy buffer 961 * BIOCSETBUFMODE Set buffer mode. 962 * BIOCGETBUFMODE Get current buffer mode. 963 */ 964 /* ARGSUSED */ 965 static int 966 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 967 struct thread *td) 968 { 969 struct bpf_d *d; 970 int error; 971 972 error = devfs_get_cdevpriv((void **)&d); 973 if (error != 0) 974 return (error); 975 976 /* 977 * Refresh PID associated with this descriptor. 978 */ 979 BPFD_LOCK(d); 980 d->bd_pid = td->td_proc->p_pid; 981 if (d->bd_state == BPF_WAITING) 982 callout_stop(&d->bd_callout); 983 d->bd_state = BPF_IDLE; 984 BPFD_UNLOCK(d); 985 986 if (d->bd_locked == 1) { 987 switch (cmd) { 988 case BIOCGBLEN: 989 case BIOCFLUSH: 990 case BIOCGDLT: 991 case BIOCGDLTLIST: 992 case BIOCGETIF: 993 case BIOCGRTIMEOUT: 994 case BIOCGSTATS: 995 case BIOCVERSION: 996 case BIOCGRSIG: 997 case BIOCGHDRCMPLT: 998 case BIOCFEEDBACK: 999 case FIONREAD: 1000 case BIOCLOCK: 1001 case BIOCSRTIMEOUT: 1002 case BIOCIMMEDIATE: 1003 case TIOCGPGRP: 1004 case BIOCROTZBUF: 1005 break; 1006 default: 1007 return (EPERM); 1008 } 1009 } 1010 CURVNET_SET(TD_TO_VNET(td)); 1011 switch (cmd) { 1012 1013 default: 1014 error = EINVAL; 1015 break; 1016 1017 /* 1018 * Check for read packet available. 1019 */ 1020 case FIONREAD: 1021 { 1022 int n; 1023 1024 BPFD_LOCK(d); 1025 n = d->bd_slen; 1026 if (d->bd_hbuf) 1027 n += d->bd_hlen; 1028 BPFD_UNLOCK(d); 1029 1030 *(int *)addr = n; 1031 break; 1032 } 1033 1034 case SIOCGIFADDR: 1035 { 1036 struct ifnet *ifp; 1037 1038 if (d->bd_bif == NULL) 1039 error = EINVAL; 1040 else { 1041 ifp = d->bd_bif->bif_ifp; 1042 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1043 } 1044 break; 1045 } 1046 1047 /* 1048 * Get buffer len [for read()]. 1049 */ 1050 case BIOCGBLEN: 1051 *(u_int *)addr = d->bd_bufsize; 1052 break; 1053 1054 /* 1055 * Set buffer length. 1056 */ 1057 case BIOCSBLEN: 1058 error = bpf_ioctl_sblen(d, (u_int *)addr); 1059 break; 1060 1061 /* 1062 * Set link layer read filter. 1063 */ 1064 case BIOCSETF: 1065 case BIOCSETFNR: 1066 case BIOCSETWF: 1067 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1068 break; 1069 1070 /* 1071 * Flush read packet buffer. 1072 */ 1073 case BIOCFLUSH: 1074 BPFD_LOCK(d); 1075 reset_d(d); 1076 BPFD_UNLOCK(d); 1077 break; 1078 1079 /* 1080 * Put interface into promiscuous mode. 1081 */ 1082 case BIOCPROMISC: 1083 if (d->bd_bif == NULL) { 1084 /* 1085 * No interface attached yet. 1086 */ 1087 error = EINVAL; 1088 break; 1089 } 1090 if (d->bd_promisc == 0) { 1091 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1092 if (error == 0) 1093 d->bd_promisc = 1; 1094 } 1095 break; 1096 1097 /* 1098 * Get current data link type. 1099 */ 1100 case BIOCGDLT: 1101 if (d->bd_bif == NULL) 1102 error = EINVAL; 1103 else 1104 *(u_int *)addr = d->bd_bif->bif_dlt; 1105 break; 1106 1107 /* 1108 * Get a list of supported data link types. 1109 */ 1110 case BIOCGDLTLIST: 1111 if (d->bd_bif == NULL) 1112 error = EINVAL; 1113 else 1114 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1115 break; 1116 1117 /* 1118 * Set data link type. 1119 */ 1120 case BIOCSDLT: 1121 if (d->bd_bif == NULL) 1122 error = EINVAL; 1123 else 1124 error = bpf_setdlt(d, *(u_int *)addr); 1125 break; 1126 1127 /* 1128 * Get interface name. 1129 */ 1130 case BIOCGETIF: 1131 if (d->bd_bif == NULL) 1132 error = EINVAL; 1133 else { 1134 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1135 struct ifreq *const ifr = (struct ifreq *)addr; 1136 1137 strlcpy(ifr->ifr_name, ifp->if_xname, 1138 sizeof(ifr->ifr_name)); 1139 } 1140 break; 1141 1142 /* 1143 * Set interface. 1144 */ 1145 case BIOCSETIF: 1146 error = bpf_setif(d, (struct ifreq *)addr); 1147 break; 1148 1149 /* 1150 * Set read timeout. 1151 */ 1152 case BIOCSRTIMEOUT: 1153 { 1154 struct timeval *tv = (struct timeval *)addr; 1155 1156 /* 1157 * Subtract 1 tick from tvtohz() since this isn't 1158 * a one-shot timer. 1159 */ 1160 if ((error = itimerfix(tv)) == 0) 1161 d->bd_rtout = tvtohz(tv) - 1; 1162 break; 1163 } 1164 1165 /* 1166 * Get read timeout. 1167 */ 1168 case BIOCGRTIMEOUT: 1169 { 1170 struct timeval *tv = (struct timeval *)addr; 1171 1172 tv->tv_sec = d->bd_rtout / hz; 1173 tv->tv_usec = (d->bd_rtout % hz) * tick; 1174 break; 1175 } 1176 1177 /* 1178 * Get packet stats. 1179 */ 1180 case BIOCGSTATS: 1181 { 1182 struct bpf_stat *bs = (struct bpf_stat *)addr; 1183 1184 /* XXXCSJP overflow */ 1185 bs->bs_recv = d->bd_rcount; 1186 bs->bs_drop = d->bd_dcount; 1187 break; 1188 } 1189 1190 /* 1191 * Set immediate mode. 1192 */ 1193 case BIOCIMMEDIATE: 1194 d->bd_immediate = *(u_int *)addr; 1195 break; 1196 1197 case BIOCVERSION: 1198 { 1199 struct bpf_version *bv = (struct bpf_version *)addr; 1200 1201 bv->bv_major = BPF_MAJOR_VERSION; 1202 bv->bv_minor = BPF_MINOR_VERSION; 1203 break; 1204 } 1205 1206 /* 1207 * Get "header already complete" flag 1208 */ 1209 case BIOCGHDRCMPLT: 1210 *(u_int *)addr = d->bd_hdrcmplt; 1211 break; 1212 1213 /* 1214 * Set "header already complete" flag 1215 */ 1216 case BIOCSHDRCMPLT: 1217 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1218 break; 1219 1220 /* 1221 * Get packet direction flag 1222 */ 1223 case BIOCGDIRECTION: 1224 *(u_int *)addr = d->bd_direction; 1225 break; 1226 1227 /* 1228 * Set packet direction flag 1229 */ 1230 case BIOCSDIRECTION: 1231 { 1232 u_int direction; 1233 1234 direction = *(u_int *)addr; 1235 switch (direction) { 1236 case BPF_D_IN: 1237 case BPF_D_INOUT: 1238 case BPF_D_OUT: 1239 d->bd_direction = direction; 1240 break; 1241 default: 1242 error = EINVAL; 1243 } 1244 } 1245 break; 1246 1247 case BIOCFEEDBACK: 1248 d->bd_feedback = *(u_int *)addr; 1249 break; 1250 1251 case BIOCLOCK: 1252 d->bd_locked = 1; 1253 break; 1254 1255 case FIONBIO: /* Non-blocking I/O */ 1256 break; 1257 1258 case FIOASYNC: /* Send signal on receive packets */ 1259 d->bd_async = *(int *)addr; 1260 break; 1261 1262 case FIOSETOWN: 1263 error = fsetown(*(int *)addr, &d->bd_sigio); 1264 break; 1265 1266 case FIOGETOWN: 1267 *(int *)addr = fgetown(&d->bd_sigio); 1268 break; 1269 1270 /* This is deprecated, FIOSETOWN should be used instead. */ 1271 case TIOCSPGRP: 1272 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1273 break; 1274 1275 /* This is deprecated, FIOGETOWN should be used instead. */ 1276 case TIOCGPGRP: 1277 *(int *)addr = -fgetown(&d->bd_sigio); 1278 break; 1279 1280 case BIOCSRSIG: /* Set receive signal */ 1281 { 1282 u_int sig; 1283 1284 sig = *(u_int *)addr; 1285 1286 if (sig >= NSIG) 1287 error = EINVAL; 1288 else 1289 d->bd_sig = sig; 1290 break; 1291 } 1292 case BIOCGRSIG: 1293 *(u_int *)addr = d->bd_sig; 1294 break; 1295 1296 case BIOCGETBUFMODE: 1297 *(u_int *)addr = d->bd_bufmode; 1298 break; 1299 1300 case BIOCSETBUFMODE: 1301 /* 1302 * Allow the buffering mode to be changed as long as we 1303 * haven't yet committed to a particular mode. Our 1304 * definition of commitment, for now, is whether or not a 1305 * buffer has been allocated or an interface attached, since 1306 * that's the point where things get tricky. 1307 */ 1308 switch (*(u_int *)addr) { 1309 case BPF_BUFMODE_BUFFER: 1310 break; 1311 1312 case BPF_BUFMODE_ZBUF: 1313 if (bpf_zerocopy_enable) 1314 break; 1315 /* FALLSTHROUGH */ 1316 1317 default: 1318 return (EINVAL); 1319 } 1320 1321 BPFD_LOCK(d); 1322 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1323 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1324 BPFD_UNLOCK(d); 1325 return (EBUSY); 1326 } 1327 d->bd_bufmode = *(u_int *)addr; 1328 BPFD_UNLOCK(d); 1329 break; 1330 1331 case BIOCGETZMAX: 1332 return (bpf_ioctl_getzmax(td, d, (size_t *)addr)); 1333 1334 case BIOCSETZBUF: 1335 return (bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr)); 1336 1337 case BIOCROTZBUF: 1338 return (bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr)); 1339 } 1340 CURVNET_RESTORE(); 1341 return (error); 1342 } 1343 1344 /* 1345 * Set d's packet filter program to fp. If this file already has a filter, 1346 * free it and replace it. Returns EINVAL for bogus requests. 1347 */ 1348 static int 1349 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1350 { 1351 struct bpf_insn *fcode, *old; 1352 u_int wfilter, flen, size; 1353 #ifdef BPF_JITTER 1354 bpf_jit_filter *ofunc; 1355 #endif 1356 1357 if (cmd == BIOCSETWF) { 1358 old = d->bd_wfilter; 1359 wfilter = 1; 1360 #ifdef BPF_JITTER 1361 ofunc = NULL; 1362 #endif 1363 } else { 1364 wfilter = 0; 1365 old = d->bd_rfilter; 1366 #ifdef BPF_JITTER 1367 ofunc = d->bd_bfilter; 1368 #endif 1369 } 1370 if (fp->bf_insns == NULL) { 1371 if (fp->bf_len != 0) 1372 return (EINVAL); 1373 BPFD_LOCK(d); 1374 if (wfilter) 1375 d->bd_wfilter = NULL; 1376 else { 1377 d->bd_rfilter = NULL; 1378 #ifdef BPF_JITTER 1379 d->bd_bfilter = NULL; 1380 #endif 1381 if (cmd == BIOCSETF) 1382 reset_d(d); 1383 } 1384 BPFD_UNLOCK(d); 1385 if (old != NULL) 1386 free((caddr_t)old, M_BPF); 1387 #ifdef BPF_JITTER 1388 if (ofunc != NULL) 1389 bpf_destroy_jit_filter(ofunc); 1390 #endif 1391 return (0); 1392 } 1393 flen = fp->bf_len; 1394 if (flen > bpf_maxinsns) 1395 return (EINVAL); 1396 1397 size = flen * sizeof(*fp->bf_insns); 1398 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1399 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1400 bpf_validate(fcode, (int)flen)) { 1401 BPFD_LOCK(d); 1402 if (wfilter) 1403 d->bd_wfilter = fcode; 1404 else { 1405 d->bd_rfilter = fcode; 1406 #ifdef BPF_JITTER 1407 d->bd_bfilter = bpf_jitter(fcode, flen); 1408 #endif 1409 if (cmd == BIOCSETF) 1410 reset_d(d); 1411 } 1412 BPFD_UNLOCK(d); 1413 if (old != NULL) 1414 free((caddr_t)old, M_BPF); 1415 #ifdef BPF_JITTER 1416 if (ofunc != NULL) 1417 bpf_destroy_jit_filter(ofunc); 1418 #endif 1419 1420 return (0); 1421 } 1422 free((caddr_t)fcode, M_BPF); 1423 return (EINVAL); 1424 } 1425 1426 /* 1427 * Detach a file from its current interface (if attached at all) and attach 1428 * to the interface indicated by the name stored in ifr. 1429 * Return an errno or 0. 1430 */ 1431 static int 1432 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1433 { 1434 struct bpf_if *bp; 1435 struct ifnet *theywant; 1436 1437 theywant = ifunit(ifr->ifr_name); 1438 if (theywant == NULL || theywant->if_bpf == NULL) 1439 return (ENXIO); 1440 1441 bp = theywant->if_bpf; 1442 1443 /* 1444 * Behavior here depends on the buffering model. If we're using 1445 * kernel memory buffers, then we can allocate them here. If we're 1446 * using zero-copy, then the user process must have registered 1447 * buffers by the time we get here. If not, return an error. 1448 * 1449 * XXXRW: There are locking issues here with multi-threaded use: what 1450 * if two threads try to set the interface at once? 1451 */ 1452 switch (d->bd_bufmode) { 1453 case BPF_BUFMODE_BUFFER: 1454 if (d->bd_sbuf == NULL) 1455 bpf_buffer_alloc(d); 1456 KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL")); 1457 break; 1458 1459 case BPF_BUFMODE_ZBUF: 1460 if (d->bd_sbuf == NULL) 1461 return (EINVAL); 1462 break; 1463 1464 default: 1465 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1466 } 1467 if (bp != d->bd_bif) { 1468 if (d->bd_bif) 1469 /* 1470 * Detach if attached to something else. 1471 */ 1472 bpf_detachd(d); 1473 1474 bpf_attachd(d, bp); 1475 } 1476 BPFD_LOCK(d); 1477 reset_d(d); 1478 BPFD_UNLOCK(d); 1479 return (0); 1480 } 1481 1482 /* 1483 * Support for select() and poll() system calls 1484 * 1485 * Return true iff the specific operation will not block indefinitely. 1486 * Otherwise, return false but make a note that a selwakeup() must be done. 1487 */ 1488 static int 1489 bpfpoll(struct cdev *dev, int events, struct thread *td) 1490 { 1491 struct bpf_d *d; 1492 int revents; 1493 1494 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1495 return (events & 1496 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1497 1498 /* 1499 * Refresh PID associated with this descriptor. 1500 */ 1501 revents = events & (POLLOUT | POLLWRNORM); 1502 BPFD_LOCK(d); 1503 d->bd_pid = td->td_proc->p_pid; 1504 if (events & (POLLIN | POLLRDNORM)) { 1505 if (bpf_ready(d)) 1506 revents |= events & (POLLIN | POLLRDNORM); 1507 else { 1508 selrecord(td, &d->bd_sel); 1509 /* Start the read timeout if necessary. */ 1510 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1511 callout_reset(&d->bd_callout, d->bd_rtout, 1512 bpf_timed_out, d); 1513 d->bd_state = BPF_WAITING; 1514 } 1515 } 1516 } 1517 BPFD_UNLOCK(d); 1518 return (revents); 1519 } 1520 1521 /* 1522 * Support for kevent() system call. Register EVFILT_READ filters and 1523 * reject all others. 1524 */ 1525 int 1526 bpfkqfilter(struct cdev *dev, struct knote *kn) 1527 { 1528 struct bpf_d *d; 1529 1530 if (devfs_get_cdevpriv((void **)&d) != 0 || 1531 kn->kn_filter != EVFILT_READ) 1532 return (1); 1533 1534 /* 1535 * Refresh PID associated with this descriptor. 1536 */ 1537 BPFD_LOCK(d); 1538 d->bd_pid = curthread->td_proc->p_pid; 1539 kn->kn_fop = &bpfread_filtops; 1540 kn->kn_hook = d; 1541 knlist_add(&d->bd_sel.si_note, kn, 1); 1542 BPFD_UNLOCK(d); 1543 1544 return (0); 1545 } 1546 1547 static void 1548 filt_bpfdetach(struct knote *kn) 1549 { 1550 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1551 1552 knlist_remove(&d->bd_sel.si_note, kn, 0); 1553 } 1554 1555 static int 1556 filt_bpfread(struct knote *kn, long hint) 1557 { 1558 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1559 int ready; 1560 1561 BPFD_LOCK_ASSERT(d); 1562 ready = bpf_ready(d); 1563 if (ready) { 1564 kn->kn_data = d->bd_slen; 1565 if (d->bd_hbuf) 1566 kn->kn_data += d->bd_hlen; 1567 } 1568 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1569 callout_reset(&d->bd_callout, d->bd_rtout, 1570 bpf_timed_out, d); 1571 d->bd_state = BPF_WAITING; 1572 } 1573 1574 return (ready); 1575 } 1576 1577 /* 1578 * Incoming linkage from device drivers. Process the packet pkt, of length 1579 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1580 * by each process' filter, and if accepted, stashed into the corresponding 1581 * buffer. 1582 */ 1583 void 1584 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1585 { 1586 struct bpf_d *d; 1587 u_int slen; 1588 int gottime; 1589 struct timeval tv; 1590 1591 gottime = 0; 1592 BPFIF_LOCK(bp); 1593 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1594 BPFD_LOCK(d); 1595 ++d->bd_rcount; 1596 /* 1597 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 1598 * way for the caller to indiciate to us whether this packet 1599 * is inbound or outbound. In the bpf_mtap() routines, we use 1600 * the interface pointers on the mbuf to figure it out. 1601 */ 1602 #ifdef BPF_JITTER 1603 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL) 1604 slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen); 1605 else 1606 #endif 1607 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1608 if (slen != 0) { 1609 d->bd_fcount++; 1610 if (!gottime) { 1611 microtime(&tv); 1612 gottime = 1; 1613 } 1614 #ifdef MAC 1615 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1616 #endif 1617 catchpacket(d, pkt, pktlen, slen, 1618 bpf_append_bytes, &tv); 1619 } 1620 BPFD_UNLOCK(d); 1621 } 1622 BPFIF_UNLOCK(bp); 1623 } 1624 1625 #define BPF_CHECK_DIRECTION(d, r, i) \ 1626 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 1627 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 1628 1629 /* 1630 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1631 */ 1632 void 1633 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1634 { 1635 struct bpf_d *d; 1636 u_int pktlen, slen; 1637 int gottime; 1638 struct timeval tv; 1639 1640 /* Skip outgoing duplicate packets. */ 1641 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 1642 m->m_flags &= ~M_PROMISC; 1643 return; 1644 } 1645 1646 gottime = 0; 1647 1648 pktlen = m_length(m, NULL); 1649 1650 BPFIF_LOCK(bp); 1651 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1652 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 1653 continue; 1654 BPFD_LOCK(d); 1655 ++d->bd_rcount; 1656 #ifdef BPF_JITTER 1657 /* XXX We cannot handle multiple mbufs. */ 1658 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL && 1659 m->m_next == NULL) 1660 slen = (*(d->bd_bfilter->func))(mtod(m, u_char *), 1661 pktlen, pktlen); 1662 else 1663 #endif 1664 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1665 if (slen != 0) { 1666 d->bd_fcount++; 1667 if (!gottime) { 1668 microtime(&tv); 1669 gottime = 1; 1670 } 1671 #ifdef MAC 1672 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1673 #endif 1674 catchpacket(d, (u_char *)m, pktlen, slen, 1675 bpf_append_mbuf, &tv); 1676 } 1677 BPFD_UNLOCK(d); 1678 } 1679 BPFIF_UNLOCK(bp); 1680 } 1681 1682 /* 1683 * Incoming linkage from device drivers, when packet is in 1684 * an mbuf chain and to be prepended by a contiguous header. 1685 */ 1686 void 1687 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 1688 { 1689 struct mbuf mb; 1690 struct bpf_d *d; 1691 u_int pktlen, slen; 1692 int gottime; 1693 struct timeval tv; 1694 1695 /* Skip outgoing duplicate packets. */ 1696 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 1697 m->m_flags &= ~M_PROMISC; 1698 return; 1699 } 1700 1701 gottime = 0; 1702 1703 pktlen = m_length(m, NULL); 1704 /* 1705 * Craft on-stack mbuf suitable for passing to bpf_filter. 1706 * Note that we cut corners here; we only setup what's 1707 * absolutely needed--this mbuf should never go anywhere else. 1708 */ 1709 mb.m_next = m; 1710 mb.m_data = data; 1711 mb.m_len = dlen; 1712 pktlen += dlen; 1713 1714 BPFIF_LOCK(bp); 1715 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1716 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 1717 continue; 1718 BPFD_LOCK(d); 1719 ++d->bd_rcount; 1720 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 1721 if (slen != 0) { 1722 d->bd_fcount++; 1723 if (!gottime) { 1724 microtime(&tv); 1725 gottime = 1; 1726 } 1727 #ifdef MAC 1728 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1729 #endif 1730 catchpacket(d, (u_char *)&mb, pktlen, slen, 1731 bpf_append_mbuf, &tv); 1732 } 1733 BPFD_UNLOCK(d); 1734 } 1735 BPFIF_UNLOCK(bp); 1736 } 1737 1738 #undef BPF_CHECK_DIRECTION 1739 1740 /* 1741 * Move the packet data from interface memory (pkt) into the 1742 * store buffer. "cpfn" is the routine called to do the actual data 1743 * transfer. bcopy is passed in to copy contiguous chunks, while 1744 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 1745 * pkt is really an mbuf. 1746 */ 1747 static void 1748 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1749 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 1750 struct timeval *tv) 1751 { 1752 struct bpf_hdr hdr; 1753 int totlen, curlen; 1754 int hdrlen = d->bd_bif->bif_hdrlen; 1755 int do_wakeup = 0; 1756 1757 BPFD_LOCK_ASSERT(d); 1758 1759 /* 1760 * Detect whether user space has released a buffer back to us, and if 1761 * so, move it from being a hold buffer to a free buffer. This may 1762 * not be the best place to do it (for example, we might only want to 1763 * run this check if we need the space), but for now it's a reliable 1764 * spot to do it. 1765 */ 1766 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 1767 d->bd_fbuf = d->bd_hbuf; 1768 d->bd_hbuf = NULL; 1769 d->bd_hlen = 0; 1770 bpf_buf_reclaimed(d); 1771 } 1772 1773 /* 1774 * Figure out how many bytes to move. If the packet is 1775 * greater or equal to the snapshot length, transfer that 1776 * much. Otherwise, transfer the whole packet (unless 1777 * we hit the buffer size limit). 1778 */ 1779 totlen = hdrlen + min(snaplen, pktlen); 1780 if (totlen > d->bd_bufsize) 1781 totlen = d->bd_bufsize; 1782 1783 /* 1784 * Round up the end of the previous packet to the next longword. 1785 * 1786 * Drop the packet if there's no room and no hope of room 1787 * If the packet would overflow the storage buffer or the storage 1788 * buffer is considered immutable by the buffer model, try to rotate 1789 * the buffer and wakeup pending processes. 1790 */ 1791 curlen = BPF_WORDALIGN(d->bd_slen); 1792 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 1793 if (d->bd_fbuf == NULL) { 1794 /* 1795 * There's no room in the store buffer, and no 1796 * prospect of room, so drop the packet. Notify the 1797 * buffer model. 1798 */ 1799 bpf_buffull(d); 1800 ++d->bd_dcount; 1801 return; 1802 } 1803 ROTATE_BUFFERS(d); 1804 do_wakeup = 1; 1805 curlen = 0; 1806 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1807 /* 1808 * Immediate mode is set, or the read timeout has already 1809 * expired during a select call. A packet arrived, so the 1810 * reader should be woken up. 1811 */ 1812 do_wakeup = 1; 1813 1814 /* 1815 * Append the bpf header. Note we append the actual header size, but 1816 * move forward the length of the header plus padding. 1817 */ 1818 bzero(&hdr, sizeof(hdr)); 1819 hdr.bh_tstamp = *tv; 1820 hdr.bh_datalen = pktlen; 1821 hdr.bh_hdrlen = hdrlen; 1822 hdr.bh_caplen = totlen - hdrlen; 1823 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 1824 1825 /* 1826 * Copy the packet data into the store buffer and update its length. 1827 */ 1828 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen); 1829 d->bd_slen = curlen + totlen; 1830 1831 if (do_wakeup) 1832 bpf_wakeup(d); 1833 } 1834 1835 /* 1836 * Free buffers currently in use by a descriptor. 1837 * Called on close. 1838 */ 1839 static void 1840 bpf_freed(struct bpf_d *d) 1841 { 1842 1843 /* 1844 * We don't need to lock out interrupts since this descriptor has 1845 * been detached from its interface and it yet hasn't been marked 1846 * free. 1847 */ 1848 bpf_free(d); 1849 if (d->bd_rfilter) { 1850 free((caddr_t)d->bd_rfilter, M_BPF); 1851 #ifdef BPF_JITTER 1852 bpf_destroy_jit_filter(d->bd_bfilter); 1853 #endif 1854 } 1855 if (d->bd_wfilter) 1856 free((caddr_t)d->bd_wfilter, M_BPF); 1857 mtx_destroy(&d->bd_mtx); 1858 } 1859 1860 /* 1861 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1862 * fixed size of the link header (variable length headers not yet supported). 1863 */ 1864 void 1865 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1866 { 1867 1868 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1869 } 1870 1871 /* 1872 * Attach an interface to bpf. ifp is a pointer to the structure 1873 * defining the interface to be attached, dlt is the link layer type, 1874 * and hdrlen is the fixed size of the link header (variable length 1875 * headers are not yet supporrted). 1876 */ 1877 void 1878 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1879 { 1880 struct bpf_if *bp; 1881 1882 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1883 if (bp == NULL) 1884 panic("bpfattach"); 1885 1886 LIST_INIT(&bp->bif_dlist); 1887 bp->bif_ifp = ifp; 1888 bp->bif_dlt = dlt; 1889 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1890 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 1891 *driverp = bp; 1892 1893 mtx_lock(&bpf_mtx); 1894 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1895 mtx_unlock(&bpf_mtx); 1896 1897 /* 1898 * Compute the length of the bpf header. This is not necessarily 1899 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1900 * that the network layer header begins on a longword boundary (for 1901 * performance reasons and to alleviate alignment restrictions). 1902 */ 1903 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1904 1905 if (bootverbose) 1906 if_printf(ifp, "bpf attached\n"); 1907 } 1908 1909 /* 1910 * Detach bpf from an interface. This involves detaching each descriptor 1911 * associated with the interface, and leaving bd_bif NULL. Notify each 1912 * descriptor as it's detached so that any sleepers wake up and get 1913 * ENXIO. 1914 */ 1915 void 1916 bpfdetach(struct ifnet *ifp) 1917 { 1918 struct bpf_if *bp; 1919 struct bpf_d *d; 1920 1921 /* Locate BPF interface information */ 1922 mtx_lock(&bpf_mtx); 1923 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1924 if (ifp == bp->bif_ifp) 1925 break; 1926 } 1927 1928 /* Interface wasn't attached */ 1929 if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1930 mtx_unlock(&bpf_mtx); 1931 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1932 return; 1933 } 1934 1935 LIST_REMOVE(bp, bif_next); 1936 mtx_unlock(&bpf_mtx); 1937 1938 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1939 bpf_detachd(d); 1940 BPFD_LOCK(d); 1941 bpf_wakeup(d); 1942 BPFD_UNLOCK(d); 1943 } 1944 1945 mtx_destroy(&bp->bif_mtx); 1946 free(bp, M_BPF); 1947 } 1948 1949 /* 1950 * Get a list of available data link type of the interface. 1951 */ 1952 static int 1953 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1954 { 1955 int n, error; 1956 struct ifnet *ifp; 1957 struct bpf_if *bp; 1958 1959 ifp = d->bd_bif->bif_ifp; 1960 n = 0; 1961 error = 0; 1962 mtx_lock(&bpf_mtx); 1963 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1964 if (bp->bif_ifp != ifp) 1965 continue; 1966 if (bfl->bfl_list != NULL) { 1967 if (n >= bfl->bfl_len) { 1968 mtx_unlock(&bpf_mtx); 1969 return (ENOMEM); 1970 } 1971 error = copyout(&bp->bif_dlt, 1972 bfl->bfl_list + n, sizeof(u_int)); 1973 } 1974 n++; 1975 } 1976 mtx_unlock(&bpf_mtx); 1977 bfl->bfl_len = n; 1978 return (error); 1979 } 1980 1981 /* 1982 * Set the data link type of a BPF instance. 1983 */ 1984 static int 1985 bpf_setdlt(struct bpf_d *d, u_int dlt) 1986 { 1987 int error, opromisc; 1988 struct ifnet *ifp; 1989 struct bpf_if *bp; 1990 1991 if (d->bd_bif->bif_dlt == dlt) 1992 return (0); 1993 ifp = d->bd_bif->bif_ifp; 1994 mtx_lock(&bpf_mtx); 1995 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1996 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1997 break; 1998 } 1999 mtx_unlock(&bpf_mtx); 2000 if (bp != NULL) { 2001 opromisc = d->bd_promisc; 2002 bpf_detachd(d); 2003 bpf_attachd(d, bp); 2004 BPFD_LOCK(d); 2005 reset_d(d); 2006 BPFD_UNLOCK(d); 2007 if (opromisc) { 2008 error = ifpromisc(bp->bif_ifp, 1); 2009 if (error) 2010 if_printf(bp->bif_ifp, 2011 "bpf_setdlt: ifpromisc failed (%d)\n", 2012 error); 2013 else 2014 d->bd_promisc = 1; 2015 } 2016 } 2017 return (bp == NULL ? EINVAL : 0); 2018 } 2019 2020 static void 2021 bpf_drvinit(void *unused) 2022 { 2023 struct cdev *dev; 2024 2025 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2026 LIST_INIT(&bpf_iflist); 2027 2028 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2029 /* For compatibility */ 2030 make_dev_alias(dev, "bpf0"); 2031 2032 } 2033 2034 static void 2035 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2036 { 2037 2038 bzero(d, sizeof(*d)); 2039 BPFD_LOCK_ASSERT(bd); 2040 d->bd_structsize = sizeof(*d); 2041 d->bd_immediate = bd->bd_immediate; 2042 d->bd_promisc = bd->bd_promisc; 2043 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2044 d->bd_direction = bd->bd_direction; 2045 d->bd_feedback = bd->bd_feedback; 2046 d->bd_async = bd->bd_async; 2047 d->bd_rcount = bd->bd_rcount; 2048 d->bd_dcount = bd->bd_dcount; 2049 d->bd_fcount = bd->bd_fcount; 2050 d->bd_sig = bd->bd_sig; 2051 d->bd_slen = bd->bd_slen; 2052 d->bd_hlen = bd->bd_hlen; 2053 d->bd_bufsize = bd->bd_bufsize; 2054 d->bd_pid = bd->bd_pid; 2055 strlcpy(d->bd_ifname, 2056 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2057 d->bd_locked = bd->bd_locked; 2058 d->bd_wcount = bd->bd_wcount; 2059 d->bd_wdcount = bd->bd_wdcount; 2060 d->bd_wfcount = bd->bd_wfcount; 2061 d->bd_zcopy = bd->bd_zcopy; 2062 d->bd_bufmode = bd->bd_bufmode; 2063 } 2064 2065 static int 2066 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2067 { 2068 struct xbpf_d *xbdbuf, *xbd; 2069 int index, error; 2070 struct bpf_if *bp; 2071 struct bpf_d *bd; 2072 2073 /* 2074 * XXX This is not technically correct. It is possible for non 2075 * privileged users to open bpf devices. It would make sense 2076 * if the users who opened the devices were able to retrieve 2077 * the statistics for them, too. 2078 */ 2079 error = priv_check(req->td, PRIV_NET_BPF); 2080 if (error) 2081 return (error); 2082 if (req->oldptr == NULL) 2083 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2084 if (bpf_bpfd_cnt == 0) 2085 return (SYSCTL_OUT(req, 0, 0)); 2086 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2087 mtx_lock(&bpf_mtx); 2088 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2089 mtx_unlock(&bpf_mtx); 2090 free(xbdbuf, M_BPF); 2091 return (ENOMEM); 2092 } 2093 index = 0; 2094 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2095 BPFIF_LOCK(bp); 2096 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2097 xbd = &xbdbuf[index++]; 2098 BPFD_LOCK(bd); 2099 bpfstats_fill_xbpf(xbd, bd); 2100 BPFD_UNLOCK(bd); 2101 } 2102 BPFIF_UNLOCK(bp); 2103 } 2104 mtx_unlock(&bpf_mtx); 2105 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2106 free(xbdbuf, M_BPF); 2107 return (error); 2108 } 2109 2110 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2111 2112 #else /* !DEV_BPF && !NETGRAPH_BPF */ 2113 /* 2114 * NOP stubs to allow bpf-using drivers to load and function. 2115 * 2116 * A 'better' implementation would allow the core bpf functionality 2117 * to be loaded at runtime. 2118 */ 2119 static struct bpf_if bp_null; 2120 2121 void 2122 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2123 { 2124 } 2125 2126 void 2127 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2128 { 2129 } 2130 2131 void 2132 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2133 { 2134 } 2135 2136 void 2137 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2138 { 2139 2140 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2141 } 2142 2143 void 2144 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2145 { 2146 2147 *driverp = &bp_null; 2148 } 2149 2150 void 2151 bpfdetach(struct ifnet *ifp) 2152 { 2153 } 2154 2155 u_int 2156 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2157 { 2158 return -1; /* "no filter" behaviour */ 2159 } 2160 2161 int 2162 bpf_validate(const struct bpf_insn *f, int len) 2163 { 2164 return 0; /* false */ 2165 } 2166 2167 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 2168