1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_bpf.h" 41 #include "opt_mac.h" 42 #include "opt_netgraph.h" 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/conf.h> 48 #include <sys/fcntl.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/time.h> 52 #include <sys/priv.h> 53 #include <sys/proc.h> 54 #include <sys/signalvar.h> 55 #include <sys/filio.h> 56 #include <sys/sockio.h> 57 #include <sys/ttycom.h> 58 #include <sys/uio.h> 59 #include <sys/vimage.h> 60 61 #include <sys/event.h> 62 #include <sys/file.h> 63 #include <sys/poll.h> 64 #include <sys/proc.h> 65 66 #include <sys/socket.h> 67 68 #include <net/if.h> 69 #include <net/bpf.h> 70 #include <net/bpf_buffer.h> 71 #ifdef BPF_JITTER 72 #include <net/bpf_jitter.h> 73 #endif 74 #include <net/bpf_zerocopy.h> 75 #include <net/bpfdesc.h> 76 77 #include <netinet/in.h> 78 #include <netinet/if_ether.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 82 #include <net80211/ieee80211_freebsd.h> 83 84 #include <security/mac/mac_framework.h> 85 86 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 87 88 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 89 90 #define PRINET 26 /* interruptible */ 91 92 /* 93 * bpf_iflist is a list of BPF interface structures, each corresponding to a 94 * specific DLT. The same network interface might have several BPF interface 95 * structures registered by different layers in the stack (i.e., 802.11 96 * frames, ethernet frames, etc). 97 */ 98 static LIST_HEAD(, bpf_if) bpf_iflist; 99 static struct mtx bpf_mtx; /* bpf global lock */ 100 static int bpf_bpfd_cnt; 101 102 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 103 static void bpf_detachd(struct bpf_d *); 104 static void bpf_freed(struct bpf_d *); 105 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 106 struct sockaddr *, int *, struct bpf_insn *); 107 static int bpf_setif(struct bpf_d *, struct ifreq *); 108 static void bpf_timed_out(void *); 109 static __inline void 110 bpf_wakeup(struct bpf_d *); 111 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 112 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 113 struct timeval *); 114 static void reset_d(struct bpf_d *); 115 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 116 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 117 static int bpf_setdlt(struct bpf_d *, u_int); 118 static void filt_bpfdetach(struct knote *); 119 static int filt_bpfread(struct knote *, long); 120 static void bpf_drvinit(void *); 121 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 122 123 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 124 int bpf_maxinsns = BPF_MAXINSNS; 125 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 126 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 127 static int bpf_zerocopy_enable = 0; 128 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 129 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 130 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 131 bpf_stats_sysctl, "bpf statistics portal"); 132 133 static d_open_t bpfopen; 134 static d_read_t bpfread; 135 static d_write_t bpfwrite; 136 static d_ioctl_t bpfioctl; 137 static d_poll_t bpfpoll; 138 static d_kqfilter_t bpfkqfilter; 139 140 static struct cdevsw bpf_cdevsw = { 141 .d_version = D_VERSION, 142 .d_open = bpfopen, 143 .d_read = bpfread, 144 .d_write = bpfwrite, 145 .d_ioctl = bpfioctl, 146 .d_poll = bpfpoll, 147 .d_name = "bpf", 148 .d_kqfilter = bpfkqfilter, 149 }; 150 151 static struct filterops bpfread_filtops = 152 { 1, NULL, filt_bpfdetach, filt_bpfread }; 153 154 /* 155 * Wrapper functions for various buffering methods. If the set of buffer 156 * modes expands, we will probably want to introduce a switch data structure 157 * similar to protosw, et. 158 */ 159 static void 160 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 161 u_int len) 162 { 163 164 BPFD_LOCK_ASSERT(d); 165 166 switch (d->bd_bufmode) { 167 case BPF_BUFMODE_BUFFER: 168 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 169 170 case BPF_BUFMODE_ZBUF: 171 d->bd_zcopy++; 172 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 173 174 default: 175 panic("bpf_buf_append_bytes"); 176 } 177 } 178 179 static void 180 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 181 u_int len) 182 { 183 184 BPFD_LOCK_ASSERT(d); 185 186 switch (d->bd_bufmode) { 187 case BPF_BUFMODE_BUFFER: 188 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 189 190 case BPF_BUFMODE_ZBUF: 191 d->bd_zcopy++; 192 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 193 194 default: 195 panic("bpf_buf_append_mbuf"); 196 } 197 } 198 199 /* 200 * This function gets called when the free buffer is re-assigned. 201 */ 202 static void 203 bpf_buf_reclaimed(struct bpf_d *d) 204 { 205 206 BPFD_LOCK_ASSERT(d); 207 208 switch (d->bd_bufmode) { 209 case BPF_BUFMODE_BUFFER: 210 return; 211 212 case BPF_BUFMODE_ZBUF: 213 bpf_zerocopy_buf_reclaimed(d); 214 return; 215 216 default: 217 panic("bpf_buf_reclaimed"); 218 } 219 } 220 221 /* 222 * If the buffer mechanism has a way to decide that a held buffer can be made 223 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 224 * returned if the buffer can be discarded, (0) is returned if it cannot. 225 */ 226 static int 227 bpf_canfreebuf(struct bpf_d *d) 228 { 229 230 BPFD_LOCK_ASSERT(d); 231 232 switch (d->bd_bufmode) { 233 case BPF_BUFMODE_ZBUF: 234 return (bpf_zerocopy_canfreebuf(d)); 235 } 236 return (0); 237 } 238 239 /* 240 * Allow the buffer model to indicate that the current store buffer is 241 * immutable, regardless of the appearance of space. Return (1) if the 242 * buffer is writable, and (0) if not. 243 */ 244 static int 245 bpf_canwritebuf(struct bpf_d *d) 246 { 247 248 BPFD_LOCK_ASSERT(d); 249 250 switch (d->bd_bufmode) { 251 case BPF_BUFMODE_ZBUF: 252 return (bpf_zerocopy_canwritebuf(d)); 253 } 254 return (1); 255 } 256 257 /* 258 * Notify buffer model that an attempt to write to the store buffer has 259 * resulted in a dropped packet, in which case the buffer may be considered 260 * full. 261 */ 262 static void 263 bpf_buffull(struct bpf_d *d) 264 { 265 266 BPFD_LOCK_ASSERT(d); 267 268 switch (d->bd_bufmode) { 269 case BPF_BUFMODE_ZBUF: 270 bpf_zerocopy_buffull(d); 271 break; 272 } 273 } 274 275 /* 276 * Notify the buffer model that a buffer has moved into the hold position. 277 */ 278 void 279 bpf_bufheld(struct bpf_d *d) 280 { 281 282 BPFD_LOCK_ASSERT(d); 283 284 switch (d->bd_bufmode) { 285 case BPF_BUFMODE_ZBUF: 286 bpf_zerocopy_bufheld(d); 287 break; 288 } 289 } 290 291 static void 292 bpf_free(struct bpf_d *d) 293 { 294 295 switch (d->bd_bufmode) { 296 case BPF_BUFMODE_BUFFER: 297 return (bpf_buffer_free(d)); 298 299 case BPF_BUFMODE_ZBUF: 300 return (bpf_zerocopy_free(d)); 301 302 default: 303 panic("bpf_buf_free"); 304 } 305 } 306 307 static int 308 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 309 { 310 311 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 312 return (EOPNOTSUPP); 313 return (bpf_buffer_uiomove(d, buf, len, uio)); 314 } 315 316 static int 317 bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 318 { 319 320 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 321 return (EOPNOTSUPP); 322 return (bpf_buffer_ioctl_sblen(d, i)); 323 } 324 325 static int 326 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 327 { 328 329 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 330 return (EOPNOTSUPP); 331 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 332 } 333 334 static int 335 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 336 { 337 338 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 339 return (EOPNOTSUPP); 340 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 341 } 342 343 static int 344 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 345 { 346 347 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 348 return (EOPNOTSUPP); 349 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 350 } 351 352 /* 353 * General BPF functions. 354 */ 355 static int 356 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 357 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 358 { 359 const struct ieee80211_bpf_params *p; 360 struct ether_header *eh; 361 struct mbuf *m; 362 int error; 363 int len; 364 int hlen; 365 int slen; 366 367 /* 368 * Build a sockaddr based on the data link layer type. 369 * We do this at this level because the ethernet header 370 * is copied directly into the data field of the sockaddr. 371 * In the case of SLIP, there is no header and the packet 372 * is forwarded as is. 373 * Also, we are careful to leave room at the front of the mbuf 374 * for the link level header. 375 */ 376 switch (linktype) { 377 378 case DLT_SLIP: 379 sockp->sa_family = AF_INET; 380 hlen = 0; 381 break; 382 383 case DLT_EN10MB: 384 sockp->sa_family = AF_UNSPEC; 385 /* XXX Would MAXLINKHDR be better? */ 386 hlen = ETHER_HDR_LEN; 387 break; 388 389 case DLT_FDDI: 390 sockp->sa_family = AF_IMPLINK; 391 hlen = 0; 392 break; 393 394 case DLT_RAW: 395 sockp->sa_family = AF_UNSPEC; 396 hlen = 0; 397 break; 398 399 case DLT_NULL: 400 /* 401 * null interface types require a 4 byte pseudo header which 402 * corresponds to the address family of the packet. 403 */ 404 sockp->sa_family = AF_UNSPEC; 405 hlen = 4; 406 break; 407 408 case DLT_ATM_RFC1483: 409 /* 410 * en atm driver requires 4-byte atm pseudo header. 411 * though it isn't standard, vpi:vci needs to be 412 * specified anyway. 413 */ 414 sockp->sa_family = AF_UNSPEC; 415 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 416 break; 417 418 case DLT_PPP: 419 sockp->sa_family = AF_UNSPEC; 420 hlen = 4; /* This should match PPP_HDRLEN */ 421 break; 422 423 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 424 sockp->sa_family = AF_IEEE80211; 425 hlen = 0; 426 break; 427 428 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 429 sockp->sa_family = AF_IEEE80211; 430 sockp->sa_len = 12; /* XXX != 0 */ 431 hlen = sizeof(struct ieee80211_bpf_params); 432 break; 433 434 default: 435 return (EIO); 436 } 437 438 len = uio->uio_resid; 439 440 if (len - hlen > ifp->if_mtu) 441 return (EMSGSIZE); 442 443 if ((unsigned)len > MJUM16BYTES) 444 return (EIO); 445 446 if (len <= MHLEN) 447 MGETHDR(m, M_WAIT, MT_DATA); 448 else if (len <= MCLBYTES) 449 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 450 else 451 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR, 452 #if (MJUMPAGESIZE > MCLBYTES) 453 len <= MJUMPAGESIZE ? MJUMPAGESIZE : 454 #endif 455 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES)); 456 m->m_pkthdr.len = m->m_len = len; 457 m->m_pkthdr.rcvif = NULL; 458 *mp = m; 459 460 if (m->m_len < hlen) { 461 error = EPERM; 462 goto bad; 463 } 464 465 error = uiomove(mtod(m, u_char *), len, uio); 466 if (error) 467 goto bad; 468 469 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 470 if (slen == 0) { 471 error = EPERM; 472 goto bad; 473 } 474 475 /* Check for multicast destination */ 476 switch (linktype) { 477 case DLT_EN10MB: 478 eh = mtod(m, struct ether_header *); 479 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 480 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 481 ETHER_ADDR_LEN) == 0) 482 m->m_flags |= M_BCAST; 483 else 484 m->m_flags |= M_MCAST; 485 } 486 break; 487 } 488 489 /* 490 * Make room for link header, and copy it to sockaddr 491 */ 492 if (hlen != 0) { 493 if (sockp->sa_family == AF_IEEE80211) { 494 /* 495 * Collect true length from the parameter header 496 * NB: sockp is known to be zero'd so if we do a 497 * short copy unspecified parameters will be 498 * zero. 499 * NB: packet may not be aligned after stripping 500 * bpf params 501 * XXX check ibp_vers 502 */ 503 p = mtod(m, const struct ieee80211_bpf_params *); 504 hlen = p->ibp_len; 505 if (hlen > sizeof(sockp->sa_data)) { 506 error = EINVAL; 507 goto bad; 508 } 509 } 510 bcopy(m->m_data, sockp->sa_data, hlen); 511 } 512 *hdrlen = hlen; 513 514 return (0); 515 bad: 516 m_freem(m); 517 return (error); 518 } 519 520 /* 521 * Attach file to the bpf interface, i.e. make d listen on bp. 522 */ 523 static void 524 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 525 { 526 /* 527 * Point d at bp, and add d to the interface's list of listeners. 528 * Finally, point the driver's bpf cookie at the interface so 529 * it will divert packets to bpf. 530 */ 531 BPFIF_LOCK(bp); 532 d->bd_bif = bp; 533 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 534 535 bpf_bpfd_cnt++; 536 BPFIF_UNLOCK(bp); 537 } 538 539 /* 540 * Detach a file from its interface. 541 */ 542 static void 543 bpf_detachd(struct bpf_d *d) 544 { 545 int error; 546 struct bpf_if *bp; 547 struct ifnet *ifp; 548 549 bp = d->bd_bif; 550 BPFIF_LOCK(bp); 551 BPFD_LOCK(d); 552 ifp = d->bd_bif->bif_ifp; 553 554 /* 555 * Remove d from the interface's descriptor list. 556 */ 557 LIST_REMOVE(d, bd_next); 558 559 bpf_bpfd_cnt--; 560 d->bd_bif = NULL; 561 BPFD_UNLOCK(d); 562 BPFIF_UNLOCK(bp); 563 564 /* 565 * Check if this descriptor had requested promiscuous mode. 566 * If so, turn it off. 567 */ 568 if (d->bd_promisc) { 569 d->bd_promisc = 0; 570 CURVNET_SET(ifp->if_vnet); 571 error = ifpromisc(ifp, 0); 572 CURVNET_RESTORE(); 573 if (error != 0 && error != ENXIO) { 574 /* 575 * ENXIO can happen if a pccard is unplugged 576 * Something is really wrong if we were able to put 577 * the driver into promiscuous mode, but can't 578 * take it out. 579 */ 580 if_printf(bp->bif_ifp, 581 "bpf_detach: ifpromisc failed (%d)\n", error); 582 } 583 } 584 } 585 586 /* 587 * Close the descriptor by detaching it from its interface, 588 * deallocating its buffers, and marking it free. 589 */ 590 static void 591 bpf_dtor(void *data) 592 { 593 struct bpf_d *d = data; 594 595 BPFD_LOCK(d); 596 if (d->bd_state == BPF_WAITING) 597 callout_stop(&d->bd_callout); 598 d->bd_state = BPF_IDLE; 599 BPFD_UNLOCK(d); 600 funsetown(&d->bd_sigio); 601 mtx_lock(&bpf_mtx); 602 if (d->bd_bif) 603 bpf_detachd(d); 604 mtx_unlock(&bpf_mtx); 605 selwakeuppri(&d->bd_sel, PRINET); 606 #ifdef MAC 607 mac_bpfdesc_destroy(d); 608 #endif /* MAC */ 609 knlist_destroy(&d->bd_sel.si_note); 610 bpf_freed(d); 611 free(d, M_BPF); 612 } 613 614 /* 615 * Open ethernet device. Returns ENXIO for illegal minor device number, 616 * EBUSY if file is open by another process. 617 */ 618 /* ARGSUSED */ 619 static int 620 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 621 { 622 struct bpf_d *d; 623 int error; 624 625 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 626 error = devfs_set_cdevpriv(d, bpf_dtor); 627 if (error != 0) { 628 free(d, M_BPF); 629 return (error); 630 } 631 632 /* 633 * For historical reasons, perform a one-time initialization call to 634 * the buffer routines, even though we're not yet committed to a 635 * particular buffer method. 636 */ 637 bpf_buffer_init(d); 638 d->bd_bufmode = BPF_BUFMODE_BUFFER; 639 d->bd_sig = SIGIO; 640 d->bd_direction = BPF_D_INOUT; 641 d->bd_pid = td->td_proc->p_pid; 642 #ifdef MAC 643 mac_bpfdesc_init(d); 644 mac_bpfdesc_create(td->td_ucred, d); 645 #endif 646 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 647 callout_init(&d->bd_callout, CALLOUT_MPSAFE); 648 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL); 649 650 return (0); 651 } 652 653 /* 654 * bpfread - read next chunk of packets from buffers 655 */ 656 static int 657 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 658 { 659 struct bpf_d *d; 660 int timed_out; 661 int error; 662 663 error = devfs_get_cdevpriv((void **)&d); 664 if (error != 0) 665 return (error); 666 667 /* 668 * Restrict application to use a buffer the same size as 669 * as kernel buffers. 670 */ 671 if (uio->uio_resid != d->bd_bufsize) 672 return (EINVAL); 673 674 BPFD_LOCK(d); 675 d->bd_pid = curthread->td_proc->p_pid; 676 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 677 BPFD_UNLOCK(d); 678 return (EOPNOTSUPP); 679 } 680 if (d->bd_state == BPF_WAITING) 681 callout_stop(&d->bd_callout); 682 timed_out = (d->bd_state == BPF_TIMED_OUT); 683 d->bd_state = BPF_IDLE; 684 /* 685 * If the hold buffer is empty, then do a timed sleep, which 686 * ends when the timeout expires or when enough packets 687 * have arrived to fill the store buffer. 688 */ 689 while (d->bd_hbuf == NULL) { 690 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 691 /* 692 * A packet(s) either arrived since the previous 693 * read or arrived while we were asleep. 694 * Rotate the buffers and return what's here. 695 */ 696 ROTATE_BUFFERS(d); 697 break; 698 } 699 700 /* 701 * No data is available, check to see if the bpf device 702 * is still pointed at a real interface. If not, return 703 * ENXIO so that the userland process knows to rebind 704 * it before using it again. 705 */ 706 if (d->bd_bif == NULL) { 707 BPFD_UNLOCK(d); 708 return (ENXIO); 709 } 710 711 if (ioflag & O_NONBLOCK) { 712 BPFD_UNLOCK(d); 713 return (EWOULDBLOCK); 714 } 715 error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 716 "bpf", d->bd_rtout); 717 if (error == EINTR || error == ERESTART) { 718 BPFD_UNLOCK(d); 719 return (error); 720 } 721 if (error == EWOULDBLOCK) { 722 /* 723 * On a timeout, return what's in the buffer, 724 * which may be nothing. If there is something 725 * in the store buffer, we can rotate the buffers. 726 */ 727 if (d->bd_hbuf) 728 /* 729 * We filled up the buffer in between 730 * getting the timeout and arriving 731 * here, so we don't need to rotate. 732 */ 733 break; 734 735 if (d->bd_slen == 0) { 736 BPFD_UNLOCK(d); 737 return (0); 738 } 739 ROTATE_BUFFERS(d); 740 break; 741 } 742 } 743 /* 744 * At this point, we know we have something in the hold slot. 745 */ 746 BPFD_UNLOCK(d); 747 748 /* 749 * Move data from hold buffer into user space. 750 * We know the entire buffer is transferred since 751 * we checked above that the read buffer is bpf_bufsize bytes. 752 * 753 * XXXRW: More synchronization needed here: what if a second thread 754 * issues a read on the same fd at the same time? Don't want this 755 * getting invalidated. 756 */ 757 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 758 759 BPFD_LOCK(d); 760 d->bd_fbuf = d->bd_hbuf; 761 d->bd_hbuf = NULL; 762 d->bd_hlen = 0; 763 bpf_buf_reclaimed(d); 764 BPFD_UNLOCK(d); 765 766 return (error); 767 } 768 769 /* 770 * If there are processes sleeping on this descriptor, wake them up. 771 */ 772 static __inline void 773 bpf_wakeup(struct bpf_d *d) 774 { 775 776 BPFD_LOCK_ASSERT(d); 777 if (d->bd_state == BPF_WAITING) { 778 callout_stop(&d->bd_callout); 779 d->bd_state = BPF_IDLE; 780 } 781 wakeup(d); 782 if (d->bd_async && d->bd_sig && d->bd_sigio) 783 pgsigio(&d->bd_sigio, d->bd_sig, 0); 784 785 selwakeuppri(&d->bd_sel, PRINET); 786 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 787 } 788 789 static void 790 bpf_timed_out(void *arg) 791 { 792 struct bpf_d *d = (struct bpf_d *)arg; 793 794 BPFD_LOCK(d); 795 if (d->bd_state == BPF_WAITING) { 796 d->bd_state = BPF_TIMED_OUT; 797 if (d->bd_slen != 0) 798 bpf_wakeup(d); 799 } 800 BPFD_UNLOCK(d); 801 } 802 803 static int 804 bpf_ready(struct bpf_d *d) 805 { 806 807 BPFD_LOCK_ASSERT(d); 808 809 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 810 return (1); 811 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 812 d->bd_slen != 0) 813 return (1); 814 return (0); 815 } 816 817 static int 818 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 819 { 820 struct bpf_d *d; 821 struct ifnet *ifp; 822 struct mbuf *m, *mc; 823 struct sockaddr dst; 824 int error, hlen; 825 826 error = devfs_get_cdevpriv((void **)&d); 827 if (error != 0) 828 return (error); 829 830 d->bd_pid = curthread->td_proc->p_pid; 831 d->bd_wcount++; 832 if (d->bd_bif == NULL) { 833 d->bd_wdcount++; 834 return (ENXIO); 835 } 836 837 ifp = d->bd_bif->bif_ifp; 838 839 if ((ifp->if_flags & IFF_UP) == 0) { 840 d->bd_wdcount++; 841 return (ENETDOWN); 842 } 843 844 if (uio->uio_resid == 0) { 845 d->bd_wdcount++; 846 return (0); 847 } 848 849 bzero(&dst, sizeof(dst)); 850 m = NULL; 851 hlen = 0; 852 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 853 &m, &dst, &hlen, d->bd_wfilter); 854 if (error) { 855 d->bd_wdcount++; 856 return (error); 857 } 858 d->bd_wfcount++; 859 if (d->bd_hdrcmplt) 860 dst.sa_family = pseudo_AF_HDRCMPLT; 861 862 if (d->bd_feedback) { 863 mc = m_dup(m, M_DONTWAIT); 864 if (mc != NULL) 865 mc->m_pkthdr.rcvif = ifp; 866 /* Set M_PROMISC for outgoing packets to be discarded. */ 867 if (d->bd_direction == BPF_D_INOUT) 868 m->m_flags |= M_PROMISC; 869 } else 870 mc = NULL; 871 872 m->m_pkthdr.len -= hlen; 873 m->m_len -= hlen; 874 m->m_data += hlen; /* XXX */ 875 876 CURVNET_SET(ifp->if_vnet); 877 #ifdef MAC 878 BPFD_LOCK(d); 879 mac_bpfdesc_create_mbuf(d, m); 880 if (mc != NULL) 881 mac_bpfdesc_create_mbuf(d, mc); 882 BPFD_UNLOCK(d); 883 #endif 884 885 error = (*ifp->if_output)(ifp, m, &dst, NULL); 886 if (error) 887 d->bd_wdcount++; 888 889 if (mc != NULL) { 890 if (error == 0) 891 (*ifp->if_input)(ifp, mc); 892 else 893 m_freem(mc); 894 } 895 CURVNET_RESTORE(); 896 897 return (error); 898 } 899 900 /* 901 * Reset a descriptor by flushing its packet buffer and clearing the receive 902 * and drop counts. This is doable for kernel-only buffers, but with 903 * zero-copy buffers, we can't write to (or rotate) buffers that are 904 * currently owned by userspace. It would be nice if we could encapsulate 905 * this logic in the buffer code rather than here. 906 */ 907 static void 908 reset_d(struct bpf_d *d) 909 { 910 911 mtx_assert(&d->bd_mtx, MA_OWNED); 912 913 if ((d->bd_hbuf != NULL) && 914 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 915 /* Free the hold buffer. */ 916 d->bd_fbuf = d->bd_hbuf; 917 d->bd_hbuf = NULL; 918 d->bd_hlen = 0; 919 bpf_buf_reclaimed(d); 920 } 921 if (bpf_canwritebuf(d)) 922 d->bd_slen = 0; 923 d->bd_rcount = 0; 924 d->bd_dcount = 0; 925 d->bd_fcount = 0; 926 d->bd_wcount = 0; 927 d->bd_wfcount = 0; 928 d->bd_wdcount = 0; 929 d->bd_zcopy = 0; 930 } 931 932 /* 933 * FIONREAD Check for read packet available. 934 * SIOCGIFADDR Get interface address - convenient hook to driver. 935 * BIOCGBLEN Get buffer len [for read()]. 936 * BIOCSETF Set read filter. 937 * BIOCSETFNR Set read filter without resetting descriptor. 938 * BIOCSETWF Set write filter. 939 * BIOCFLUSH Flush read packet buffer. 940 * BIOCPROMISC Put interface into promiscuous mode. 941 * BIOCGDLT Get link layer type. 942 * BIOCGETIF Get interface name. 943 * BIOCSETIF Set interface. 944 * BIOCSRTIMEOUT Set read timeout. 945 * BIOCGRTIMEOUT Get read timeout. 946 * BIOCGSTATS Get packet stats. 947 * BIOCIMMEDIATE Set immediate mode. 948 * BIOCVERSION Get filter language version. 949 * BIOCGHDRCMPLT Get "header already complete" flag 950 * BIOCSHDRCMPLT Set "header already complete" flag 951 * BIOCGDIRECTION Get packet direction flag 952 * BIOCSDIRECTION Set packet direction flag 953 * BIOCLOCK Set "locked" flag 954 * BIOCFEEDBACK Set packet feedback mode. 955 * BIOCSETZBUF Set current zero-copy buffer locations. 956 * BIOCGETZMAX Get maximum zero-copy buffer size. 957 * BIOCROTZBUF Force rotation of zero-copy buffer 958 * BIOCSETBUFMODE Set buffer mode. 959 * BIOCGETBUFMODE Get current buffer mode. 960 */ 961 /* ARGSUSED */ 962 static int 963 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 964 struct thread *td) 965 { 966 struct bpf_d *d; 967 int error; 968 969 error = devfs_get_cdevpriv((void **)&d); 970 if (error != 0) 971 return (error); 972 973 /* 974 * Refresh PID associated with this descriptor. 975 */ 976 BPFD_LOCK(d); 977 d->bd_pid = td->td_proc->p_pid; 978 if (d->bd_state == BPF_WAITING) 979 callout_stop(&d->bd_callout); 980 d->bd_state = BPF_IDLE; 981 BPFD_UNLOCK(d); 982 983 if (d->bd_locked == 1) { 984 switch (cmd) { 985 case BIOCGBLEN: 986 case BIOCFLUSH: 987 case BIOCGDLT: 988 case BIOCGDLTLIST: 989 case BIOCGETIF: 990 case BIOCGRTIMEOUT: 991 case BIOCGSTATS: 992 case BIOCVERSION: 993 case BIOCGRSIG: 994 case BIOCGHDRCMPLT: 995 case BIOCFEEDBACK: 996 case FIONREAD: 997 case BIOCLOCK: 998 case BIOCSRTIMEOUT: 999 case BIOCIMMEDIATE: 1000 case TIOCGPGRP: 1001 case BIOCROTZBUF: 1002 break; 1003 default: 1004 return (EPERM); 1005 } 1006 } 1007 CURVNET_SET(TD_TO_VNET(td)); 1008 switch (cmd) { 1009 1010 default: 1011 error = EINVAL; 1012 break; 1013 1014 /* 1015 * Check for read packet available. 1016 */ 1017 case FIONREAD: 1018 { 1019 int n; 1020 1021 BPFD_LOCK(d); 1022 n = d->bd_slen; 1023 if (d->bd_hbuf) 1024 n += d->bd_hlen; 1025 BPFD_UNLOCK(d); 1026 1027 *(int *)addr = n; 1028 break; 1029 } 1030 1031 case SIOCGIFADDR: 1032 { 1033 struct ifnet *ifp; 1034 1035 if (d->bd_bif == NULL) 1036 error = EINVAL; 1037 else { 1038 ifp = d->bd_bif->bif_ifp; 1039 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1040 } 1041 break; 1042 } 1043 1044 /* 1045 * Get buffer len [for read()]. 1046 */ 1047 case BIOCGBLEN: 1048 *(u_int *)addr = d->bd_bufsize; 1049 break; 1050 1051 /* 1052 * Set buffer length. 1053 */ 1054 case BIOCSBLEN: 1055 error = bpf_ioctl_sblen(d, (u_int *)addr); 1056 break; 1057 1058 /* 1059 * Set link layer read filter. 1060 */ 1061 case BIOCSETF: 1062 case BIOCSETFNR: 1063 case BIOCSETWF: 1064 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1065 break; 1066 1067 /* 1068 * Flush read packet buffer. 1069 */ 1070 case BIOCFLUSH: 1071 BPFD_LOCK(d); 1072 reset_d(d); 1073 BPFD_UNLOCK(d); 1074 break; 1075 1076 /* 1077 * Put interface into promiscuous mode. 1078 */ 1079 case BIOCPROMISC: 1080 if (d->bd_bif == NULL) { 1081 /* 1082 * No interface attached yet. 1083 */ 1084 error = EINVAL; 1085 break; 1086 } 1087 if (d->bd_promisc == 0) { 1088 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1089 if (error == 0) 1090 d->bd_promisc = 1; 1091 } 1092 break; 1093 1094 /* 1095 * Get current data link type. 1096 */ 1097 case BIOCGDLT: 1098 if (d->bd_bif == NULL) 1099 error = EINVAL; 1100 else 1101 *(u_int *)addr = d->bd_bif->bif_dlt; 1102 break; 1103 1104 /* 1105 * Get a list of supported data link types. 1106 */ 1107 case BIOCGDLTLIST: 1108 if (d->bd_bif == NULL) 1109 error = EINVAL; 1110 else 1111 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1112 break; 1113 1114 /* 1115 * Set data link type. 1116 */ 1117 case BIOCSDLT: 1118 if (d->bd_bif == NULL) 1119 error = EINVAL; 1120 else 1121 error = bpf_setdlt(d, *(u_int *)addr); 1122 break; 1123 1124 /* 1125 * Get interface name. 1126 */ 1127 case BIOCGETIF: 1128 if (d->bd_bif == NULL) 1129 error = EINVAL; 1130 else { 1131 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1132 struct ifreq *const ifr = (struct ifreq *)addr; 1133 1134 strlcpy(ifr->ifr_name, ifp->if_xname, 1135 sizeof(ifr->ifr_name)); 1136 } 1137 break; 1138 1139 /* 1140 * Set interface. 1141 */ 1142 case BIOCSETIF: 1143 error = bpf_setif(d, (struct ifreq *)addr); 1144 break; 1145 1146 /* 1147 * Set read timeout. 1148 */ 1149 case BIOCSRTIMEOUT: 1150 { 1151 struct timeval *tv = (struct timeval *)addr; 1152 1153 /* 1154 * Subtract 1 tick from tvtohz() since this isn't 1155 * a one-shot timer. 1156 */ 1157 if ((error = itimerfix(tv)) == 0) 1158 d->bd_rtout = tvtohz(tv) - 1; 1159 break; 1160 } 1161 1162 /* 1163 * Get read timeout. 1164 */ 1165 case BIOCGRTIMEOUT: 1166 { 1167 struct timeval *tv = (struct timeval *)addr; 1168 1169 tv->tv_sec = d->bd_rtout / hz; 1170 tv->tv_usec = (d->bd_rtout % hz) * tick; 1171 break; 1172 } 1173 1174 /* 1175 * Get packet stats. 1176 */ 1177 case BIOCGSTATS: 1178 { 1179 struct bpf_stat *bs = (struct bpf_stat *)addr; 1180 1181 /* XXXCSJP overflow */ 1182 bs->bs_recv = d->bd_rcount; 1183 bs->bs_drop = d->bd_dcount; 1184 break; 1185 } 1186 1187 /* 1188 * Set immediate mode. 1189 */ 1190 case BIOCIMMEDIATE: 1191 d->bd_immediate = *(u_int *)addr; 1192 break; 1193 1194 case BIOCVERSION: 1195 { 1196 struct bpf_version *bv = (struct bpf_version *)addr; 1197 1198 bv->bv_major = BPF_MAJOR_VERSION; 1199 bv->bv_minor = BPF_MINOR_VERSION; 1200 break; 1201 } 1202 1203 /* 1204 * Get "header already complete" flag 1205 */ 1206 case BIOCGHDRCMPLT: 1207 *(u_int *)addr = d->bd_hdrcmplt; 1208 break; 1209 1210 /* 1211 * Set "header already complete" flag 1212 */ 1213 case BIOCSHDRCMPLT: 1214 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1215 break; 1216 1217 /* 1218 * Get packet direction flag 1219 */ 1220 case BIOCGDIRECTION: 1221 *(u_int *)addr = d->bd_direction; 1222 break; 1223 1224 /* 1225 * Set packet direction flag 1226 */ 1227 case BIOCSDIRECTION: 1228 { 1229 u_int direction; 1230 1231 direction = *(u_int *)addr; 1232 switch (direction) { 1233 case BPF_D_IN: 1234 case BPF_D_INOUT: 1235 case BPF_D_OUT: 1236 d->bd_direction = direction; 1237 break; 1238 default: 1239 error = EINVAL; 1240 } 1241 } 1242 break; 1243 1244 case BIOCFEEDBACK: 1245 d->bd_feedback = *(u_int *)addr; 1246 break; 1247 1248 case BIOCLOCK: 1249 d->bd_locked = 1; 1250 break; 1251 1252 case FIONBIO: /* Non-blocking I/O */ 1253 break; 1254 1255 case FIOASYNC: /* Send signal on receive packets */ 1256 d->bd_async = *(int *)addr; 1257 break; 1258 1259 case FIOSETOWN: 1260 error = fsetown(*(int *)addr, &d->bd_sigio); 1261 break; 1262 1263 case FIOGETOWN: 1264 *(int *)addr = fgetown(&d->bd_sigio); 1265 break; 1266 1267 /* This is deprecated, FIOSETOWN should be used instead. */ 1268 case TIOCSPGRP: 1269 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1270 break; 1271 1272 /* This is deprecated, FIOGETOWN should be used instead. */ 1273 case TIOCGPGRP: 1274 *(int *)addr = -fgetown(&d->bd_sigio); 1275 break; 1276 1277 case BIOCSRSIG: /* Set receive signal */ 1278 { 1279 u_int sig; 1280 1281 sig = *(u_int *)addr; 1282 1283 if (sig >= NSIG) 1284 error = EINVAL; 1285 else 1286 d->bd_sig = sig; 1287 break; 1288 } 1289 case BIOCGRSIG: 1290 *(u_int *)addr = d->bd_sig; 1291 break; 1292 1293 case BIOCGETBUFMODE: 1294 *(u_int *)addr = d->bd_bufmode; 1295 break; 1296 1297 case BIOCSETBUFMODE: 1298 /* 1299 * Allow the buffering mode to be changed as long as we 1300 * haven't yet committed to a particular mode. Our 1301 * definition of commitment, for now, is whether or not a 1302 * buffer has been allocated or an interface attached, since 1303 * that's the point where things get tricky. 1304 */ 1305 switch (*(u_int *)addr) { 1306 case BPF_BUFMODE_BUFFER: 1307 break; 1308 1309 case BPF_BUFMODE_ZBUF: 1310 if (bpf_zerocopy_enable) 1311 break; 1312 /* FALLSTHROUGH */ 1313 1314 default: 1315 return (EINVAL); 1316 } 1317 1318 BPFD_LOCK(d); 1319 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1320 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1321 BPFD_UNLOCK(d); 1322 return (EBUSY); 1323 } 1324 d->bd_bufmode = *(u_int *)addr; 1325 BPFD_UNLOCK(d); 1326 break; 1327 1328 case BIOCGETZMAX: 1329 return (bpf_ioctl_getzmax(td, d, (size_t *)addr)); 1330 1331 case BIOCSETZBUF: 1332 return (bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr)); 1333 1334 case BIOCROTZBUF: 1335 return (bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr)); 1336 } 1337 CURVNET_RESTORE(); 1338 return (error); 1339 } 1340 1341 /* 1342 * Set d's packet filter program to fp. If this file already has a filter, 1343 * free it and replace it. Returns EINVAL for bogus requests. 1344 */ 1345 static int 1346 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1347 { 1348 struct bpf_insn *fcode, *old; 1349 u_int wfilter, flen, size; 1350 #ifdef BPF_JITTER 1351 bpf_jit_filter *ofunc; 1352 #endif 1353 1354 if (cmd == BIOCSETWF) { 1355 old = d->bd_wfilter; 1356 wfilter = 1; 1357 #ifdef BPF_JITTER 1358 ofunc = NULL; 1359 #endif 1360 } else { 1361 wfilter = 0; 1362 old = d->bd_rfilter; 1363 #ifdef BPF_JITTER 1364 ofunc = d->bd_bfilter; 1365 #endif 1366 } 1367 if (fp->bf_insns == NULL) { 1368 if (fp->bf_len != 0) 1369 return (EINVAL); 1370 BPFD_LOCK(d); 1371 if (wfilter) 1372 d->bd_wfilter = NULL; 1373 else { 1374 d->bd_rfilter = NULL; 1375 #ifdef BPF_JITTER 1376 d->bd_bfilter = NULL; 1377 #endif 1378 if (cmd == BIOCSETF) 1379 reset_d(d); 1380 } 1381 BPFD_UNLOCK(d); 1382 if (old != NULL) 1383 free((caddr_t)old, M_BPF); 1384 #ifdef BPF_JITTER 1385 if (ofunc != NULL) 1386 bpf_destroy_jit_filter(ofunc); 1387 #endif 1388 return (0); 1389 } 1390 flen = fp->bf_len; 1391 if (flen > bpf_maxinsns) 1392 return (EINVAL); 1393 1394 size = flen * sizeof(*fp->bf_insns); 1395 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1396 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1397 bpf_validate(fcode, (int)flen)) { 1398 BPFD_LOCK(d); 1399 if (wfilter) 1400 d->bd_wfilter = fcode; 1401 else { 1402 d->bd_rfilter = fcode; 1403 #ifdef BPF_JITTER 1404 d->bd_bfilter = bpf_jitter(fcode, flen); 1405 #endif 1406 if (cmd == BIOCSETF) 1407 reset_d(d); 1408 } 1409 BPFD_UNLOCK(d); 1410 if (old != NULL) 1411 free((caddr_t)old, M_BPF); 1412 #ifdef BPF_JITTER 1413 if (ofunc != NULL) 1414 bpf_destroy_jit_filter(ofunc); 1415 #endif 1416 1417 return (0); 1418 } 1419 free((caddr_t)fcode, M_BPF); 1420 return (EINVAL); 1421 } 1422 1423 /* 1424 * Detach a file from its current interface (if attached at all) and attach 1425 * to the interface indicated by the name stored in ifr. 1426 * Return an errno or 0. 1427 */ 1428 static int 1429 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1430 { 1431 struct bpf_if *bp; 1432 struct ifnet *theywant; 1433 1434 theywant = ifunit(ifr->ifr_name); 1435 if (theywant == NULL || theywant->if_bpf == NULL) 1436 return (ENXIO); 1437 1438 bp = theywant->if_bpf; 1439 1440 /* 1441 * Behavior here depends on the buffering model. If we're using 1442 * kernel memory buffers, then we can allocate them here. If we're 1443 * using zero-copy, then the user process must have registered 1444 * buffers by the time we get here. If not, return an error. 1445 * 1446 * XXXRW: There are locking issues here with multi-threaded use: what 1447 * if two threads try to set the interface at once? 1448 */ 1449 switch (d->bd_bufmode) { 1450 case BPF_BUFMODE_BUFFER: 1451 if (d->bd_sbuf == NULL) 1452 bpf_buffer_alloc(d); 1453 KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL")); 1454 break; 1455 1456 case BPF_BUFMODE_ZBUF: 1457 if (d->bd_sbuf == NULL) 1458 return (EINVAL); 1459 break; 1460 1461 default: 1462 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1463 } 1464 if (bp != d->bd_bif) { 1465 if (d->bd_bif) 1466 /* 1467 * Detach if attached to something else. 1468 */ 1469 bpf_detachd(d); 1470 1471 bpf_attachd(d, bp); 1472 } 1473 BPFD_LOCK(d); 1474 reset_d(d); 1475 BPFD_UNLOCK(d); 1476 return (0); 1477 } 1478 1479 /* 1480 * Support for select() and poll() system calls 1481 * 1482 * Return true iff the specific operation will not block indefinitely. 1483 * Otherwise, return false but make a note that a selwakeup() must be done. 1484 */ 1485 static int 1486 bpfpoll(struct cdev *dev, int events, struct thread *td) 1487 { 1488 struct bpf_d *d; 1489 int revents; 1490 1491 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1492 return (events & 1493 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1494 1495 /* 1496 * Refresh PID associated with this descriptor. 1497 */ 1498 revents = events & (POLLOUT | POLLWRNORM); 1499 BPFD_LOCK(d); 1500 d->bd_pid = td->td_proc->p_pid; 1501 if (events & (POLLIN | POLLRDNORM)) { 1502 if (bpf_ready(d)) 1503 revents |= events & (POLLIN | POLLRDNORM); 1504 else { 1505 selrecord(td, &d->bd_sel); 1506 /* Start the read timeout if necessary. */ 1507 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1508 callout_reset(&d->bd_callout, d->bd_rtout, 1509 bpf_timed_out, d); 1510 d->bd_state = BPF_WAITING; 1511 } 1512 } 1513 } 1514 BPFD_UNLOCK(d); 1515 return (revents); 1516 } 1517 1518 /* 1519 * Support for kevent() system call. Register EVFILT_READ filters and 1520 * reject all others. 1521 */ 1522 int 1523 bpfkqfilter(struct cdev *dev, struct knote *kn) 1524 { 1525 struct bpf_d *d; 1526 1527 if (devfs_get_cdevpriv((void **)&d) != 0 || 1528 kn->kn_filter != EVFILT_READ) 1529 return (1); 1530 1531 /* 1532 * Refresh PID associated with this descriptor. 1533 */ 1534 BPFD_LOCK(d); 1535 d->bd_pid = curthread->td_proc->p_pid; 1536 kn->kn_fop = &bpfread_filtops; 1537 kn->kn_hook = d; 1538 knlist_add(&d->bd_sel.si_note, kn, 1); 1539 BPFD_UNLOCK(d); 1540 1541 return (0); 1542 } 1543 1544 static void 1545 filt_bpfdetach(struct knote *kn) 1546 { 1547 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1548 1549 knlist_remove(&d->bd_sel.si_note, kn, 0); 1550 } 1551 1552 static int 1553 filt_bpfread(struct knote *kn, long hint) 1554 { 1555 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1556 int ready; 1557 1558 BPFD_LOCK_ASSERT(d); 1559 ready = bpf_ready(d); 1560 if (ready) { 1561 kn->kn_data = d->bd_slen; 1562 if (d->bd_hbuf) 1563 kn->kn_data += d->bd_hlen; 1564 } 1565 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1566 callout_reset(&d->bd_callout, d->bd_rtout, 1567 bpf_timed_out, d); 1568 d->bd_state = BPF_WAITING; 1569 } 1570 1571 return (ready); 1572 } 1573 1574 /* 1575 * Incoming linkage from device drivers. Process the packet pkt, of length 1576 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1577 * by each process' filter, and if accepted, stashed into the corresponding 1578 * buffer. 1579 */ 1580 void 1581 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1582 { 1583 struct bpf_d *d; 1584 u_int slen; 1585 int gottime; 1586 struct timeval tv; 1587 1588 gottime = 0; 1589 BPFIF_LOCK(bp); 1590 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1591 BPFD_LOCK(d); 1592 ++d->bd_rcount; 1593 /* 1594 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 1595 * way for the caller to indiciate to us whether this packet 1596 * is inbound or outbound. In the bpf_mtap() routines, we use 1597 * the interface pointers on the mbuf to figure it out. 1598 */ 1599 #ifdef BPF_JITTER 1600 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL) 1601 slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen); 1602 else 1603 #endif 1604 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1605 if (slen != 0) { 1606 d->bd_fcount++; 1607 if (!gottime) { 1608 microtime(&tv); 1609 gottime = 1; 1610 } 1611 #ifdef MAC 1612 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1613 #endif 1614 catchpacket(d, pkt, pktlen, slen, 1615 bpf_append_bytes, &tv); 1616 } 1617 BPFD_UNLOCK(d); 1618 } 1619 BPFIF_UNLOCK(bp); 1620 } 1621 1622 #define BPF_CHECK_DIRECTION(d, r, i) \ 1623 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 1624 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 1625 1626 /* 1627 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1628 */ 1629 void 1630 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1631 { 1632 struct bpf_d *d; 1633 u_int pktlen, slen; 1634 int gottime; 1635 struct timeval tv; 1636 1637 /* Skip outgoing duplicate packets. */ 1638 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 1639 m->m_flags &= ~M_PROMISC; 1640 return; 1641 } 1642 1643 gottime = 0; 1644 1645 pktlen = m_length(m, NULL); 1646 1647 BPFIF_LOCK(bp); 1648 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1649 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 1650 continue; 1651 BPFD_LOCK(d); 1652 ++d->bd_rcount; 1653 #ifdef BPF_JITTER 1654 /* XXX We cannot handle multiple mbufs. */ 1655 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL && 1656 m->m_next == NULL) 1657 slen = (*(d->bd_bfilter->func))(mtod(m, u_char *), 1658 pktlen, pktlen); 1659 else 1660 #endif 1661 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1662 if (slen != 0) { 1663 d->bd_fcount++; 1664 if (!gottime) { 1665 microtime(&tv); 1666 gottime = 1; 1667 } 1668 #ifdef MAC 1669 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1670 #endif 1671 catchpacket(d, (u_char *)m, pktlen, slen, 1672 bpf_append_mbuf, &tv); 1673 } 1674 BPFD_UNLOCK(d); 1675 } 1676 BPFIF_UNLOCK(bp); 1677 } 1678 1679 /* 1680 * Incoming linkage from device drivers, when packet is in 1681 * an mbuf chain and to be prepended by a contiguous header. 1682 */ 1683 void 1684 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 1685 { 1686 struct mbuf mb; 1687 struct bpf_d *d; 1688 u_int pktlen, slen; 1689 int gottime; 1690 struct timeval tv; 1691 1692 /* Skip outgoing duplicate packets. */ 1693 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 1694 m->m_flags &= ~M_PROMISC; 1695 return; 1696 } 1697 1698 gottime = 0; 1699 1700 pktlen = m_length(m, NULL); 1701 /* 1702 * Craft on-stack mbuf suitable for passing to bpf_filter. 1703 * Note that we cut corners here; we only setup what's 1704 * absolutely needed--this mbuf should never go anywhere else. 1705 */ 1706 mb.m_next = m; 1707 mb.m_data = data; 1708 mb.m_len = dlen; 1709 pktlen += dlen; 1710 1711 BPFIF_LOCK(bp); 1712 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1713 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 1714 continue; 1715 BPFD_LOCK(d); 1716 ++d->bd_rcount; 1717 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 1718 if (slen != 0) { 1719 d->bd_fcount++; 1720 if (!gottime) { 1721 microtime(&tv); 1722 gottime = 1; 1723 } 1724 #ifdef MAC 1725 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1726 #endif 1727 catchpacket(d, (u_char *)&mb, pktlen, slen, 1728 bpf_append_mbuf, &tv); 1729 } 1730 BPFD_UNLOCK(d); 1731 } 1732 BPFIF_UNLOCK(bp); 1733 } 1734 1735 #undef BPF_CHECK_DIRECTION 1736 1737 /* 1738 * Move the packet data from interface memory (pkt) into the 1739 * store buffer. "cpfn" is the routine called to do the actual data 1740 * transfer. bcopy is passed in to copy contiguous chunks, while 1741 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 1742 * pkt is really an mbuf. 1743 */ 1744 static void 1745 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1746 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 1747 struct timeval *tv) 1748 { 1749 struct bpf_hdr hdr; 1750 int totlen, curlen; 1751 int hdrlen = d->bd_bif->bif_hdrlen; 1752 int do_wakeup = 0; 1753 1754 BPFD_LOCK_ASSERT(d); 1755 1756 /* 1757 * Detect whether user space has released a buffer back to us, and if 1758 * so, move it from being a hold buffer to a free buffer. This may 1759 * not be the best place to do it (for example, we might only want to 1760 * run this check if we need the space), but for now it's a reliable 1761 * spot to do it. 1762 */ 1763 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 1764 d->bd_fbuf = d->bd_hbuf; 1765 d->bd_hbuf = NULL; 1766 d->bd_hlen = 0; 1767 bpf_buf_reclaimed(d); 1768 } 1769 1770 /* 1771 * Figure out how many bytes to move. If the packet is 1772 * greater or equal to the snapshot length, transfer that 1773 * much. Otherwise, transfer the whole packet (unless 1774 * we hit the buffer size limit). 1775 */ 1776 totlen = hdrlen + min(snaplen, pktlen); 1777 if (totlen > d->bd_bufsize) 1778 totlen = d->bd_bufsize; 1779 1780 /* 1781 * Round up the end of the previous packet to the next longword. 1782 * 1783 * Drop the packet if there's no room and no hope of room 1784 * If the packet would overflow the storage buffer or the storage 1785 * buffer is considered immutable by the buffer model, try to rotate 1786 * the buffer and wakeup pending processes. 1787 */ 1788 curlen = BPF_WORDALIGN(d->bd_slen); 1789 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 1790 if (d->bd_fbuf == NULL) { 1791 /* 1792 * There's no room in the store buffer, and no 1793 * prospect of room, so drop the packet. Notify the 1794 * buffer model. 1795 */ 1796 bpf_buffull(d); 1797 ++d->bd_dcount; 1798 return; 1799 } 1800 ROTATE_BUFFERS(d); 1801 do_wakeup = 1; 1802 curlen = 0; 1803 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1804 /* 1805 * Immediate mode is set, or the read timeout has already 1806 * expired during a select call. A packet arrived, so the 1807 * reader should be woken up. 1808 */ 1809 do_wakeup = 1; 1810 1811 /* 1812 * Append the bpf header. Note we append the actual header size, but 1813 * move forward the length of the header plus padding. 1814 */ 1815 bzero(&hdr, sizeof(hdr)); 1816 hdr.bh_tstamp = *tv; 1817 hdr.bh_datalen = pktlen; 1818 hdr.bh_hdrlen = hdrlen; 1819 hdr.bh_caplen = totlen - hdrlen; 1820 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 1821 1822 /* 1823 * Copy the packet data into the store buffer and update its length. 1824 */ 1825 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen); 1826 d->bd_slen = curlen + totlen; 1827 1828 if (do_wakeup) 1829 bpf_wakeup(d); 1830 } 1831 1832 /* 1833 * Free buffers currently in use by a descriptor. 1834 * Called on close. 1835 */ 1836 static void 1837 bpf_freed(struct bpf_d *d) 1838 { 1839 1840 /* 1841 * We don't need to lock out interrupts since this descriptor has 1842 * been detached from its interface and it yet hasn't been marked 1843 * free. 1844 */ 1845 bpf_free(d); 1846 if (d->bd_rfilter) { 1847 free((caddr_t)d->bd_rfilter, M_BPF); 1848 #ifdef BPF_JITTER 1849 bpf_destroy_jit_filter(d->bd_bfilter); 1850 #endif 1851 } 1852 if (d->bd_wfilter) 1853 free((caddr_t)d->bd_wfilter, M_BPF); 1854 mtx_destroy(&d->bd_mtx); 1855 } 1856 1857 /* 1858 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1859 * fixed size of the link header (variable length headers not yet supported). 1860 */ 1861 void 1862 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1863 { 1864 1865 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1866 } 1867 1868 /* 1869 * Attach an interface to bpf. ifp is a pointer to the structure 1870 * defining the interface to be attached, dlt is the link layer type, 1871 * and hdrlen is the fixed size of the link header (variable length 1872 * headers are not yet supporrted). 1873 */ 1874 void 1875 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1876 { 1877 struct bpf_if *bp; 1878 1879 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1880 if (bp == NULL) 1881 panic("bpfattach"); 1882 1883 LIST_INIT(&bp->bif_dlist); 1884 bp->bif_ifp = ifp; 1885 bp->bif_dlt = dlt; 1886 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1887 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 1888 *driverp = bp; 1889 1890 mtx_lock(&bpf_mtx); 1891 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1892 mtx_unlock(&bpf_mtx); 1893 1894 /* 1895 * Compute the length of the bpf header. This is not necessarily 1896 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1897 * that the network layer header begins on a longword boundary (for 1898 * performance reasons and to alleviate alignment restrictions). 1899 */ 1900 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1901 1902 if (bootverbose) 1903 if_printf(ifp, "bpf attached\n"); 1904 } 1905 1906 /* 1907 * Detach bpf from an interface. This involves detaching each descriptor 1908 * associated with the interface, and leaving bd_bif NULL. Notify each 1909 * descriptor as it's detached so that any sleepers wake up and get 1910 * ENXIO. 1911 */ 1912 void 1913 bpfdetach(struct ifnet *ifp) 1914 { 1915 struct bpf_if *bp; 1916 struct bpf_d *d; 1917 1918 /* Locate BPF interface information */ 1919 mtx_lock(&bpf_mtx); 1920 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1921 if (ifp == bp->bif_ifp) 1922 break; 1923 } 1924 1925 /* Interface wasn't attached */ 1926 if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1927 mtx_unlock(&bpf_mtx); 1928 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1929 return; 1930 } 1931 1932 LIST_REMOVE(bp, bif_next); 1933 mtx_unlock(&bpf_mtx); 1934 1935 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1936 bpf_detachd(d); 1937 BPFD_LOCK(d); 1938 bpf_wakeup(d); 1939 BPFD_UNLOCK(d); 1940 } 1941 1942 mtx_destroy(&bp->bif_mtx); 1943 free(bp, M_BPF); 1944 } 1945 1946 /* 1947 * Get a list of available data link type of the interface. 1948 */ 1949 static int 1950 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1951 { 1952 int n, error; 1953 struct ifnet *ifp; 1954 struct bpf_if *bp; 1955 1956 ifp = d->bd_bif->bif_ifp; 1957 n = 0; 1958 error = 0; 1959 mtx_lock(&bpf_mtx); 1960 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1961 if (bp->bif_ifp != ifp) 1962 continue; 1963 if (bfl->bfl_list != NULL) { 1964 if (n >= bfl->bfl_len) { 1965 mtx_unlock(&bpf_mtx); 1966 return (ENOMEM); 1967 } 1968 error = copyout(&bp->bif_dlt, 1969 bfl->bfl_list + n, sizeof(u_int)); 1970 } 1971 n++; 1972 } 1973 mtx_unlock(&bpf_mtx); 1974 bfl->bfl_len = n; 1975 return (error); 1976 } 1977 1978 /* 1979 * Set the data link type of a BPF instance. 1980 */ 1981 static int 1982 bpf_setdlt(struct bpf_d *d, u_int dlt) 1983 { 1984 int error, opromisc; 1985 struct ifnet *ifp; 1986 struct bpf_if *bp; 1987 1988 if (d->bd_bif->bif_dlt == dlt) 1989 return (0); 1990 ifp = d->bd_bif->bif_ifp; 1991 mtx_lock(&bpf_mtx); 1992 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1993 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1994 break; 1995 } 1996 mtx_unlock(&bpf_mtx); 1997 if (bp != NULL) { 1998 opromisc = d->bd_promisc; 1999 bpf_detachd(d); 2000 bpf_attachd(d, bp); 2001 BPFD_LOCK(d); 2002 reset_d(d); 2003 BPFD_UNLOCK(d); 2004 if (opromisc) { 2005 error = ifpromisc(bp->bif_ifp, 1); 2006 if (error) 2007 if_printf(bp->bif_ifp, 2008 "bpf_setdlt: ifpromisc failed (%d)\n", 2009 error); 2010 else 2011 d->bd_promisc = 1; 2012 } 2013 } 2014 return (bp == NULL ? EINVAL : 0); 2015 } 2016 2017 static void 2018 bpf_drvinit(void *unused) 2019 { 2020 struct cdev *dev; 2021 2022 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2023 LIST_INIT(&bpf_iflist); 2024 2025 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2026 /* For compatibility */ 2027 make_dev_alias(dev, "bpf0"); 2028 2029 } 2030 2031 static void 2032 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2033 { 2034 2035 bzero(d, sizeof(*d)); 2036 BPFD_LOCK_ASSERT(bd); 2037 d->bd_structsize = sizeof(*d); 2038 d->bd_immediate = bd->bd_immediate; 2039 d->bd_promisc = bd->bd_promisc; 2040 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2041 d->bd_direction = bd->bd_direction; 2042 d->bd_feedback = bd->bd_feedback; 2043 d->bd_async = bd->bd_async; 2044 d->bd_rcount = bd->bd_rcount; 2045 d->bd_dcount = bd->bd_dcount; 2046 d->bd_fcount = bd->bd_fcount; 2047 d->bd_sig = bd->bd_sig; 2048 d->bd_slen = bd->bd_slen; 2049 d->bd_hlen = bd->bd_hlen; 2050 d->bd_bufsize = bd->bd_bufsize; 2051 d->bd_pid = bd->bd_pid; 2052 strlcpy(d->bd_ifname, 2053 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2054 d->bd_locked = bd->bd_locked; 2055 d->bd_wcount = bd->bd_wcount; 2056 d->bd_wdcount = bd->bd_wdcount; 2057 d->bd_wfcount = bd->bd_wfcount; 2058 d->bd_zcopy = bd->bd_zcopy; 2059 d->bd_bufmode = bd->bd_bufmode; 2060 } 2061 2062 static int 2063 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2064 { 2065 struct xbpf_d *xbdbuf, *xbd; 2066 int index, error; 2067 struct bpf_if *bp; 2068 struct bpf_d *bd; 2069 2070 /* 2071 * XXX This is not technically correct. It is possible for non 2072 * privileged users to open bpf devices. It would make sense 2073 * if the users who opened the devices were able to retrieve 2074 * the statistics for them, too. 2075 */ 2076 error = priv_check(req->td, PRIV_NET_BPF); 2077 if (error) 2078 return (error); 2079 if (req->oldptr == NULL) 2080 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2081 if (bpf_bpfd_cnt == 0) 2082 return (SYSCTL_OUT(req, 0, 0)); 2083 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2084 mtx_lock(&bpf_mtx); 2085 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2086 mtx_unlock(&bpf_mtx); 2087 free(xbdbuf, M_BPF); 2088 return (ENOMEM); 2089 } 2090 index = 0; 2091 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2092 BPFIF_LOCK(bp); 2093 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2094 xbd = &xbdbuf[index++]; 2095 BPFD_LOCK(bd); 2096 bpfstats_fill_xbpf(xbd, bd); 2097 BPFD_UNLOCK(bd); 2098 } 2099 BPFIF_UNLOCK(bp); 2100 } 2101 mtx_unlock(&bpf_mtx); 2102 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2103 free(xbdbuf, M_BPF); 2104 return (error); 2105 } 2106 2107 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2108 2109 #else /* !DEV_BPF && !NETGRAPH_BPF */ 2110 /* 2111 * NOP stubs to allow bpf-using drivers to load and function. 2112 * 2113 * A 'better' implementation would allow the core bpf functionality 2114 * to be loaded at runtime. 2115 */ 2116 static struct bpf_if bp_null; 2117 2118 void 2119 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2120 { 2121 } 2122 2123 void 2124 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2125 { 2126 } 2127 2128 void 2129 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2130 { 2131 } 2132 2133 void 2134 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2135 { 2136 2137 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2138 } 2139 2140 void 2141 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2142 { 2143 2144 *driverp = &bp_null; 2145 } 2146 2147 void 2148 bpfdetach(struct ifnet *ifp) 2149 { 2150 } 2151 2152 u_int 2153 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2154 { 2155 return -1; /* "no filter" behaviour */ 2156 } 2157 2158 int 2159 bpf_validate(const struct bpf_insn *f, int len) 2160 { 2161 return 0; /* false */ 2162 } 2163 2164 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 2165