1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_bpf.h" 41 #include "opt_compat.h" 42 #include "opt_netgraph.h" 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/lock.h> 47 #include <sys/rwlock.h> 48 #include <sys/systm.h> 49 #include <sys/conf.h> 50 #include <sys/fcntl.h> 51 #include <sys/jail.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/time.h> 55 #include <sys/priv.h> 56 #include <sys/proc.h> 57 #include <sys/signalvar.h> 58 #include <sys/filio.h> 59 #include <sys/sockio.h> 60 #include <sys/ttycom.h> 61 #include <sys/uio.h> 62 63 #include <sys/event.h> 64 #include <sys/file.h> 65 #include <sys/poll.h> 66 #include <sys/proc.h> 67 68 #include <sys/socket.h> 69 70 #include <net/if.h> 71 #define BPF_INTERNAL 72 #include <net/bpf.h> 73 #include <net/bpf_buffer.h> 74 #ifdef BPF_JITTER 75 #include <net/bpf_jitter.h> 76 #endif 77 #include <net/bpf_zerocopy.h> 78 #include <net/bpfdesc.h> 79 #include <net/vnet.h> 80 81 #include <netinet/in.h> 82 #include <netinet/if_ether.h> 83 #include <sys/kernel.h> 84 #include <sys/sysctl.h> 85 86 #include <net80211/ieee80211_freebsd.h> 87 88 #include <security/mac/mac_framework.h> 89 90 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 91 92 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 93 94 #define PRINET 26 /* interruptible */ 95 96 #define SIZEOF_BPF_HDR(type) \ 97 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen)) 98 99 #ifdef COMPAT_FREEBSD32 100 #include <sys/mount.h> 101 #include <compat/freebsd32/freebsd32.h> 102 #define BPF_ALIGNMENT32 sizeof(int32_t) 103 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1)) 104 105 #ifndef BURN_BRIDGES 106 /* 107 * 32-bit version of structure prepended to each packet. We use this header 108 * instead of the standard one for 32-bit streams. We mark the a stream as 109 * 32-bit the first time we see a 32-bit compat ioctl request. 110 */ 111 struct bpf_hdr32 { 112 struct timeval32 bh_tstamp; /* time stamp */ 113 uint32_t bh_caplen; /* length of captured portion */ 114 uint32_t bh_datalen; /* original length of packet */ 115 uint16_t bh_hdrlen; /* length of bpf header (this struct 116 plus alignment padding) */ 117 }; 118 #endif 119 120 struct bpf_program32 { 121 u_int bf_len; 122 uint32_t bf_insns; 123 }; 124 125 struct bpf_dltlist32 { 126 u_int bfl_len; 127 u_int bfl_list; 128 }; 129 130 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32) 131 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32) 132 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32) 133 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32) 134 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32) 135 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32) 136 #endif 137 138 /* 139 * bpf_iflist is a list of BPF interface structures, each corresponding to a 140 * specific DLT. The same network interface might have several BPF interface 141 * structures registered by different layers in the stack (i.e., 802.11 142 * frames, ethernet frames, etc). 143 */ 144 static LIST_HEAD(, bpf_if) bpf_iflist; 145 static struct mtx bpf_mtx; /* bpf global lock */ 146 static int bpf_bpfd_cnt; 147 148 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 149 static void bpf_detachd(struct bpf_d *); 150 static void bpf_detachd_locked(struct bpf_d *); 151 static void bpf_freed(struct bpf_d *); 152 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 153 struct sockaddr *, int *, struct bpf_insn *); 154 static int bpf_setif(struct bpf_d *, struct ifreq *); 155 static void bpf_timed_out(void *); 156 static __inline void 157 bpf_wakeup(struct bpf_d *); 158 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 159 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 160 struct bintime *); 161 static void reset_d(struct bpf_d *); 162 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 163 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 164 static int bpf_setdlt(struct bpf_d *, u_int); 165 static void filt_bpfdetach(struct knote *); 166 static int filt_bpfread(struct knote *, long); 167 static void bpf_drvinit(void *); 168 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 169 170 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 171 int bpf_maxinsns = BPF_MAXINSNS; 172 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 173 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 174 static int bpf_zerocopy_enable = 0; 175 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 176 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 177 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 178 bpf_stats_sysctl, "bpf statistics portal"); 179 180 static VNET_DEFINE(int, bpf_optimize_writers) = 0; 181 #define V_bpf_optimize_writers VNET(bpf_optimize_writers) 182 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers, 183 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0, 184 "Do not send packets until BPF program is set"); 185 186 static d_open_t bpfopen; 187 static d_read_t bpfread; 188 static d_write_t bpfwrite; 189 static d_ioctl_t bpfioctl; 190 static d_poll_t bpfpoll; 191 static d_kqfilter_t bpfkqfilter; 192 193 static struct cdevsw bpf_cdevsw = { 194 .d_version = D_VERSION, 195 .d_open = bpfopen, 196 .d_read = bpfread, 197 .d_write = bpfwrite, 198 .d_ioctl = bpfioctl, 199 .d_poll = bpfpoll, 200 .d_name = "bpf", 201 .d_kqfilter = bpfkqfilter, 202 }; 203 204 static struct filterops bpfread_filtops = { 205 .f_isfd = 1, 206 .f_detach = filt_bpfdetach, 207 .f_event = filt_bpfread, 208 }; 209 210 /* 211 * LOCKING MODEL USED BY BPF: 212 * Locks: 213 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal, 214 * some global counters and every bpf_if reference. 215 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters. 216 * 3) Descriptor lock. Rwlock, used to protect BPF buffers and various structure fields 217 * used by bpf_mtap code. 218 * 219 * Lock order: 220 * 221 * Global lock, interface lock, descriptor lock 222 * 223 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2] 224 * working model. In many places (like bpf_detachd) we start with BPF descriptor 225 * (and we need to at least rlock it to get reliable interface pointer). This 226 * gives us potential LOR. As a result, we use global lock to protect from bpf_if 227 * change in every such place. 228 * 229 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and 230 * 3) descriptor main wlock. 231 * Reading bd_bif can be protected by any of these locks, typically global lock. 232 * 233 * Changing read/write BPF filter is protected by the same three locks, 234 * the same applies for reading. 235 * 236 * Sleeping in global lock is not allowed due to bpfdetach() using it. 237 */ 238 239 /* 240 * Wrapper functions for various buffering methods. If the set of buffer 241 * modes expands, we will probably want to introduce a switch data structure 242 * similar to protosw, et. 243 */ 244 static void 245 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 246 u_int len) 247 { 248 249 BPFD_WLOCK_ASSERT(d); 250 251 switch (d->bd_bufmode) { 252 case BPF_BUFMODE_BUFFER: 253 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 254 255 case BPF_BUFMODE_ZBUF: 256 d->bd_zcopy++; 257 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 258 259 default: 260 panic("bpf_buf_append_bytes"); 261 } 262 } 263 264 static void 265 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 266 u_int len) 267 { 268 269 BPFD_WLOCK_ASSERT(d); 270 271 switch (d->bd_bufmode) { 272 case BPF_BUFMODE_BUFFER: 273 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 274 275 case BPF_BUFMODE_ZBUF: 276 d->bd_zcopy++; 277 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 278 279 default: 280 panic("bpf_buf_append_mbuf"); 281 } 282 } 283 284 /* 285 * This function gets called when the free buffer is re-assigned. 286 */ 287 static void 288 bpf_buf_reclaimed(struct bpf_d *d) 289 { 290 291 BPFD_WLOCK_ASSERT(d); 292 293 switch (d->bd_bufmode) { 294 case BPF_BUFMODE_BUFFER: 295 return; 296 297 case BPF_BUFMODE_ZBUF: 298 bpf_zerocopy_buf_reclaimed(d); 299 return; 300 301 default: 302 panic("bpf_buf_reclaimed"); 303 } 304 } 305 306 /* 307 * If the buffer mechanism has a way to decide that a held buffer can be made 308 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 309 * returned if the buffer can be discarded, (0) is returned if it cannot. 310 */ 311 static int 312 bpf_canfreebuf(struct bpf_d *d) 313 { 314 315 BPFD_LOCK_ASSERT(d); 316 317 switch (d->bd_bufmode) { 318 case BPF_BUFMODE_ZBUF: 319 return (bpf_zerocopy_canfreebuf(d)); 320 } 321 return (0); 322 } 323 324 /* 325 * Allow the buffer model to indicate that the current store buffer is 326 * immutable, regardless of the appearance of space. Return (1) if the 327 * buffer is writable, and (0) if not. 328 */ 329 static int 330 bpf_canwritebuf(struct bpf_d *d) 331 { 332 BPFD_LOCK_ASSERT(d); 333 334 switch (d->bd_bufmode) { 335 case BPF_BUFMODE_ZBUF: 336 return (bpf_zerocopy_canwritebuf(d)); 337 } 338 return (1); 339 } 340 341 /* 342 * Notify buffer model that an attempt to write to the store buffer has 343 * resulted in a dropped packet, in which case the buffer may be considered 344 * full. 345 */ 346 static void 347 bpf_buffull(struct bpf_d *d) 348 { 349 350 BPFD_WLOCK_ASSERT(d); 351 352 switch (d->bd_bufmode) { 353 case BPF_BUFMODE_ZBUF: 354 bpf_zerocopy_buffull(d); 355 break; 356 } 357 } 358 359 /* 360 * Notify the buffer model that a buffer has moved into the hold position. 361 */ 362 void 363 bpf_bufheld(struct bpf_d *d) 364 { 365 366 BPFD_WLOCK_ASSERT(d); 367 368 switch (d->bd_bufmode) { 369 case BPF_BUFMODE_ZBUF: 370 bpf_zerocopy_bufheld(d); 371 break; 372 } 373 } 374 375 static void 376 bpf_free(struct bpf_d *d) 377 { 378 379 switch (d->bd_bufmode) { 380 case BPF_BUFMODE_BUFFER: 381 return (bpf_buffer_free(d)); 382 383 case BPF_BUFMODE_ZBUF: 384 return (bpf_zerocopy_free(d)); 385 386 default: 387 panic("bpf_buf_free"); 388 } 389 } 390 391 static int 392 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 393 { 394 395 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 396 return (EOPNOTSUPP); 397 return (bpf_buffer_uiomove(d, buf, len, uio)); 398 } 399 400 static int 401 bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 402 { 403 404 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 405 return (EOPNOTSUPP); 406 return (bpf_buffer_ioctl_sblen(d, i)); 407 } 408 409 static int 410 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 411 { 412 413 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 414 return (EOPNOTSUPP); 415 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 416 } 417 418 static int 419 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 420 { 421 422 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 423 return (EOPNOTSUPP); 424 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 425 } 426 427 static int 428 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 429 { 430 431 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 432 return (EOPNOTSUPP); 433 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 434 } 435 436 /* 437 * General BPF functions. 438 */ 439 static int 440 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 441 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 442 { 443 const struct ieee80211_bpf_params *p; 444 struct ether_header *eh; 445 struct mbuf *m; 446 int error; 447 int len; 448 int hlen; 449 int slen; 450 451 /* 452 * Build a sockaddr based on the data link layer type. 453 * We do this at this level because the ethernet header 454 * is copied directly into the data field of the sockaddr. 455 * In the case of SLIP, there is no header and the packet 456 * is forwarded as is. 457 * Also, we are careful to leave room at the front of the mbuf 458 * for the link level header. 459 */ 460 switch (linktype) { 461 462 case DLT_SLIP: 463 sockp->sa_family = AF_INET; 464 hlen = 0; 465 break; 466 467 case DLT_EN10MB: 468 sockp->sa_family = AF_UNSPEC; 469 /* XXX Would MAXLINKHDR be better? */ 470 hlen = ETHER_HDR_LEN; 471 break; 472 473 case DLT_FDDI: 474 sockp->sa_family = AF_IMPLINK; 475 hlen = 0; 476 break; 477 478 case DLT_RAW: 479 sockp->sa_family = AF_UNSPEC; 480 hlen = 0; 481 break; 482 483 case DLT_NULL: 484 /* 485 * null interface types require a 4 byte pseudo header which 486 * corresponds to the address family of the packet. 487 */ 488 sockp->sa_family = AF_UNSPEC; 489 hlen = 4; 490 break; 491 492 case DLT_ATM_RFC1483: 493 /* 494 * en atm driver requires 4-byte atm pseudo header. 495 * though it isn't standard, vpi:vci needs to be 496 * specified anyway. 497 */ 498 sockp->sa_family = AF_UNSPEC; 499 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 500 break; 501 502 case DLT_PPP: 503 sockp->sa_family = AF_UNSPEC; 504 hlen = 4; /* This should match PPP_HDRLEN */ 505 break; 506 507 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 508 sockp->sa_family = AF_IEEE80211; 509 hlen = 0; 510 break; 511 512 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 513 sockp->sa_family = AF_IEEE80211; 514 sockp->sa_len = 12; /* XXX != 0 */ 515 hlen = sizeof(struct ieee80211_bpf_params); 516 break; 517 518 default: 519 return (EIO); 520 } 521 522 len = uio->uio_resid; 523 524 if (len - hlen > ifp->if_mtu) 525 return (EMSGSIZE); 526 527 if ((unsigned)len > MJUM16BYTES) 528 return (EIO); 529 530 if (len <= MHLEN) 531 MGETHDR(m, M_WAIT, MT_DATA); 532 else if (len <= MCLBYTES) 533 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 534 else 535 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR, 536 #if (MJUMPAGESIZE > MCLBYTES) 537 len <= MJUMPAGESIZE ? MJUMPAGESIZE : 538 #endif 539 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES)); 540 m->m_pkthdr.len = m->m_len = len; 541 m->m_pkthdr.rcvif = NULL; 542 *mp = m; 543 544 if (m->m_len < hlen) { 545 error = EPERM; 546 goto bad; 547 } 548 549 error = uiomove(mtod(m, u_char *), len, uio); 550 if (error) 551 goto bad; 552 553 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 554 if (slen == 0) { 555 error = EPERM; 556 goto bad; 557 } 558 559 /* Check for multicast destination */ 560 switch (linktype) { 561 case DLT_EN10MB: 562 eh = mtod(m, struct ether_header *); 563 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 564 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 565 ETHER_ADDR_LEN) == 0) 566 m->m_flags |= M_BCAST; 567 else 568 m->m_flags |= M_MCAST; 569 } 570 break; 571 } 572 573 /* 574 * Make room for link header, and copy it to sockaddr 575 */ 576 if (hlen != 0) { 577 if (sockp->sa_family == AF_IEEE80211) { 578 /* 579 * Collect true length from the parameter header 580 * NB: sockp is known to be zero'd so if we do a 581 * short copy unspecified parameters will be 582 * zero. 583 * NB: packet may not be aligned after stripping 584 * bpf params 585 * XXX check ibp_vers 586 */ 587 p = mtod(m, const struct ieee80211_bpf_params *); 588 hlen = p->ibp_len; 589 if (hlen > sizeof(sockp->sa_data)) { 590 error = EINVAL; 591 goto bad; 592 } 593 } 594 bcopy(m->m_data, sockp->sa_data, hlen); 595 } 596 *hdrlen = hlen; 597 598 return (0); 599 bad: 600 m_freem(m); 601 return (error); 602 } 603 604 /* 605 * Attach file to the bpf interface, i.e. make d listen on bp. 606 */ 607 static void 608 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 609 { 610 int op_w; 611 612 BPF_LOCK_ASSERT(); 613 614 /* 615 * Save sysctl value to protect from sysctl change 616 * between reads 617 */ 618 op_w = V_bpf_optimize_writers; 619 620 if (d->bd_bif != NULL) 621 bpf_detachd_locked(d); 622 /* 623 * Point d at bp, and add d to the interface's list. 624 * Since there are many applicaiotns using BPF for 625 * sending raw packets only (dhcpd, cdpd are good examples) 626 * we can delay adding d to the list of active listeners until 627 * some filter is configured. 628 */ 629 630 BPFIF_WLOCK(bp); 631 BPFD_WLOCK(d); 632 633 d->bd_bif = bp; 634 635 if (op_w != 0) { 636 /* Add to writers-only list */ 637 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); 638 /* 639 * We decrement bd_writer on every filter set operation. 640 * First BIOCSETF is done by pcap_open_live() to set up 641 * snap length. After that appliation usually sets its own filter 642 */ 643 d->bd_writer = 2; 644 } else 645 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 646 647 BPFD_WUNLOCK(d); 648 BPFIF_WUNLOCK(bp); 649 650 bpf_bpfd_cnt++; 651 652 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list", 653 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); 654 655 if (op_w == 0) 656 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 657 } 658 659 /* 660 * Add d to the list of active bp filters. 661 * Reuqires bpf_attachd() to be called before 662 */ 663 static void 664 bpf_upgraded(struct bpf_d *d) 665 { 666 struct bpf_if *bp; 667 668 BPF_LOCK_ASSERT(); 669 670 bp = d->bd_bif; 671 672 /* 673 * Filter can be set several times without specifying interface. 674 * Mark d as reader and exit. 675 */ 676 if (bp == NULL) { 677 BPFD_WLOCK(d); 678 d->bd_writer = 0; 679 BPFD_WUNLOCK(d); 680 return; 681 } 682 683 BPFIF_WLOCK(bp); 684 BPFD_WLOCK(d); 685 686 /* Remove from writers-only list */ 687 LIST_REMOVE(d, bd_next); 688 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 689 /* Mark d as reader */ 690 d->bd_writer = 0; 691 692 BPFD_WUNLOCK(d); 693 BPFIF_WUNLOCK(bp); 694 695 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid); 696 697 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 698 } 699 700 /* 701 * Detach a file from its interface. 702 */ 703 static void 704 bpf_detachd(struct bpf_d *d) 705 { 706 BPF_LOCK(); 707 bpf_detachd_locked(d); 708 BPF_UNLOCK(); 709 } 710 711 static void 712 bpf_detachd_locked(struct bpf_d *d) 713 { 714 int error; 715 struct bpf_if *bp; 716 struct ifnet *ifp; 717 718 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); 719 720 BPF_LOCK_ASSERT(); 721 722 /* Check if descriptor is attached */ 723 if ((bp = d->bd_bif) == NULL) 724 return; 725 726 BPFIF_WLOCK(bp); 727 BPFD_WLOCK(d); 728 729 /* Save bd_writer value */ 730 error = d->bd_writer; 731 732 /* 733 * Remove d from the interface's descriptor list. 734 */ 735 LIST_REMOVE(d, bd_next); 736 737 ifp = bp->bif_ifp; 738 d->bd_bif = NULL; 739 BPFD_WUNLOCK(d); 740 BPFIF_WUNLOCK(bp); 741 742 bpf_bpfd_cnt--; 743 744 /* Call event handler iff d is attached */ 745 if (error == 0) 746 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 747 748 /* 749 * Check if this descriptor had requested promiscuous mode. 750 * If so, turn it off. 751 */ 752 if (d->bd_promisc) { 753 d->bd_promisc = 0; 754 CURVNET_SET(ifp->if_vnet); 755 error = ifpromisc(ifp, 0); 756 CURVNET_RESTORE(); 757 if (error != 0 && error != ENXIO) { 758 /* 759 * ENXIO can happen if a pccard is unplugged 760 * Something is really wrong if we were able to put 761 * the driver into promiscuous mode, but can't 762 * take it out. 763 */ 764 if_printf(bp->bif_ifp, 765 "bpf_detach: ifpromisc failed (%d)\n", error); 766 } 767 } 768 } 769 770 /* 771 * Close the descriptor by detaching it from its interface, 772 * deallocating its buffers, and marking it free. 773 */ 774 static void 775 bpf_dtor(void *data) 776 { 777 struct bpf_d *d = data; 778 779 BPFD_WLOCK(d); 780 if (d->bd_state == BPF_WAITING) 781 callout_stop(&d->bd_callout); 782 d->bd_state = BPF_IDLE; 783 BPFD_WUNLOCK(d); 784 funsetown(&d->bd_sigio); 785 bpf_detachd(d); 786 #ifdef MAC 787 mac_bpfdesc_destroy(d); 788 #endif /* MAC */ 789 seldrain(&d->bd_sel); 790 knlist_destroy(&d->bd_sel.si_note); 791 callout_drain(&d->bd_callout); 792 bpf_freed(d); 793 free(d, M_BPF); 794 } 795 796 /* 797 * Open ethernet device. Returns ENXIO for illegal minor device number, 798 * EBUSY if file is open by another process. 799 */ 800 /* ARGSUSED */ 801 static int 802 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 803 { 804 struct bpf_d *d; 805 int error; 806 807 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 808 error = devfs_set_cdevpriv(d, bpf_dtor); 809 if (error != 0) { 810 free(d, M_BPF); 811 return (error); 812 } 813 814 /* 815 * For historical reasons, perform a one-time initialization call to 816 * the buffer routines, even though we're not yet committed to a 817 * particular buffer method. 818 */ 819 bpf_buffer_init(d); 820 d->bd_bufmode = BPF_BUFMODE_BUFFER; 821 d->bd_sig = SIGIO; 822 d->bd_direction = BPF_D_INOUT; 823 BPF_PID_REFRESH(d, td); 824 #ifdef MAC 825 mac_bpfdesc_init(d); 826 mac_bpfdesc_create(td->td_ucred, d); 827 #endif 828 rw_init(&d->bd_lock, "bpf cdev lock"); 829 callout_init_rw(&d->bd_callout, &d->bd_lock, 0); 830 knlist_init_rw_reader(&d->bd_sel.si_note, &d->bd_lock); 831 832 return (0); 833 } 834 835 /* 836 * bpfread - read next chunk of packets from buffers 837 */ 838 static int 839 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 840 { 841 struct bpf_d *d; 842 int error; 843 int non_block; 844 int timed_out; 845 846 error = devfs_get_cdevpriv((void **)&d); 847 if (error != 0) 848 return (error); 849 850 /* 851 * Restrict application to use a buffer the same size as 852 * as kernel buffers. 853 */ 854 if (uio->uio_resid != d->bd_bufsize) 855 return (EINVAL); 856 857 non_block = ((ioflag & O_NONBLOCK) != 0); 858 859 BPFD_WLOCK(d); 860 BPF_PID_REFRESH_CUR(d); 861 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 862 BPFD_WUNLOCK(d); 863 return (EOPNOTSUPP); 864 } 865 if (d->bd_state == BPF_WAITING) 866 callout_stop(&d->bd_callout); 867 timed_out = (d->bd_state == BPF_TIMED_OUT); 868 d->bd_state = BPF_IDLE; 869 /* 870 * If the hold buffer is empty, then do a timed sleep, which 871 * ends when the timeout expires or when enough packets 872 * have arrived to fill the store buffer. 873 */ 874 while (d->bd_hbuf == NULL) { 875 if (d->bd_slen != 0) { 876 /* 877 * A packet(s) either arrived since the previous 878 * read or arrived while we were asleep. 879 */ 880 if (d->bd_immediate || non_block || timed_out) { 881 /* 882 * Rotate the buffers and return what's here 883 * if we are in immediate mode, non-blocking 884 * flag is set, or this descriptor timed out. 885 */ 886 ROTATE_BUFFERS(d); 887 break; 888 } 889 } 890 891 /* 892 * No data is available, check to see if the bpf device 893 * is still pointed at a real interface. If not, return 894 * ENXIO so that the userland process knows to rebind 895 * it before using it again. 896 */ 897 if (d->bd_bif == NULL) { 898 BPFD_WUNLOCK(d); 899 return (ENXIO); 900 } 901 902 if (non_block) { 903 BPFD_WUNLOCK(d); 904 return (EWOULDBLOCK); 905 } 906 error = rw_sleep(d, &d->bd_lock, PRINET|PCATCH, 907 "bpf", d->bd_rtout); 908 if (error == EINTR || error == ERESTART) { 909 BPFD_WUNLOCK(d); 910 return (error); 911 } 912 if (error == EWOULDBLOCK) { 913 /* 914 * On a timeout, return what's in the buffer, 915 * which may be nothing. If there is something 916 * in the store buffer, we can rotate the buffers. 917 */ 918 if (d->bd_hbuf) 919 /* 920 * We filled up the buffer in between 921 * getting the timeout and arriving 922 * here, so we don't need to rotate. 923 */ 924 break; 925 926 if (d->bd_slen == 0) { 927 BPFD_WUNLOCK(d); 928 return (0); 929 } 930 ROTATE_BUFFERS(d); 931 break; 932 } 933 } 934 /* 935 * At this point, we know we have something in the hold slot. 936 */ 937 BPFD_WUNLOCK(d); 938 939 /* 940 * Move data from hold buffer into user space. 941 * We know the entire buffer is transferred since 942 * we checked above that the read buffer is bpf_bufsize bytes. 943 * 944 * XXXRW: More synchronization needed here: what if a second thread 945 * issues a read on the same fd at the same time? Don't want this 946 * getting invalidated. 947 */ 948 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 949 950 BPFD_WLOCK(d); 951 d->bd_fbuf = d->bd_hbuf; 952 d->bd_hbuf = NULL; 953 d->bd_hlen = 0; 954 bpf_buf_reclaimed(d); 955 BPFD_WUNLOCK(d); 956 957 return (error); 958 } 959 960 /* 961 * If there are processes sleeping on this descriptor, wake them up. 962 */ 963 static __inline void 964 bpf_wakeup(struct bpf_d *d) 965 { 966 967 BPFD_WLOCK_ASSERT(d); 968 if (d->bd_state == BPF_WAITING) { 969 callout_stop(&d->bd_callout); 970 d->bd_state = BPF_IDLE; 971 } 972 wakeup(d); 973 if (d->bd_async && d->bd_sig && d->bd_sigio) 974 pgsigio(&d->bd_sigio, d->bd_sig, 0); 975 976 selwakeuppri(&d->bd_sel, PRINET); 977 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 978 } 979 980 static void 981 bpf_timed_out(void *arg) 982 { 983 struct bpf_d *d = (struct bpf_d *)arg; 984 985 BPFD_WLOCK_ASSERT(d); 986 987 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout)) 988 return; 989 if (d->bd_state == BPF_WAITING) { 990 d->bd_state = BPF_TIMED_OUT; 991 if (d->bd_slen != 0) 992 bpf_wakeup(d); 993 } 994 } 995 996 static int 997 bpf_ready(struct bpf_d *d) 998 { 999 1000 BPFD_WLOCK_ASSERT(d); 1001 1002 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 1003 return (1); 1004 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1005 d->bd_slen != 0) 1006 return (1); 1007 return (0); 1008 } 1009 1010 static int 1011 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 1012 { 1013 struct bpf_d *d; 1014 struct ifnet *ifp; 1015 struct mbuf *m, *mc; 1016 struct sockaddr dst; 1017 int error, hlen; 1018 1019 error = devfs_get_cdevpriv((void **)&d); 1020 if (error != 0) 1021 return (error); 1022 1023 BPF_PID_REFRESH_CUR(d); 1024 d->bd_wcount++; 1025 /* XXX: locking required */ 1026 if (d->bd_bif == NULL) { 1027 d->bd_wdcount++; 1028 return (ENXIO); 1029 } 1030 1031 ifp = d->bd_bif->bif_ifp; 1032 1033 if ((ifp->if_flags & IFF_UP) == 0) { 1034 d->bd_wdcount++; 1035 return (ENETDOWN); 1036 } 1037 1038 if (uio->uio_resid == 0) { 1039 d->bd_wdcount++; 1040 return (0); 1041 } 1042 1043 bzero(&dst, sizeof(dst)); 1044 m = NULL; 1045 hlen = 0; 1046 /* XXX: bpf_movein() can sleep */ 1047 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 1048 &m, &dst, &hlen, d->bd_wfilter); 1049 if (error) { 1050 d->bd_wdcount++; 1051 return (error); 1052 } 1053 d->bd_wfcount++; 1054 if (d->bd_hdrcmplt) 1055 dst.sa_family = pseudo_AF_HDRCMPLT; 1056 1057 if (d->bd_feedback) { 1058 mc = m_dup(m, M_DONTWAIT); 1059 if (mc != NULL) 1060 mc->m_pkthdr.rcvif = ifp; 1061 /* Set M_PROMISC for outgoing packets to be discarded. */ 1062 if (d->bd_direction == BPF_D_INOUT) 1063 m->m_flags |= M_PROMISC; 1064 } else 1065 mc = NULL; 1066 1067 m->m_pkthdr.len -= hlen; 1068 m->m_len -= hlen; 1069 m->m_data += hlen; /* XXX */ 1070 1071 CURVNET_SET(ifp->if_vnet); 1072 #ifdef MAC 1073 BPFD_WLOCK(d); 1074 mac_bpfdesc_create_mbuf(d, m); 1075 if (mc != NULL) 1076 mac_bpfdesc_create_mbuf(d, mc); 1077 BPFD_WUNLOCK(d); 1078 #endif 1079 1080 error = (*ifp->if_output)(ifp, m, &dst, NULL); 1081 if (error) 1082 d->bd_wdcount++; 1083 1084 if (mc != NULL) { 1085 if (error == 0) 1086 (*ifp->if_input)(ifp, mc); 1087 else 1088 m_freem(mc); 1089 } 1090 CURVNET_RESTORE(); 1091 1092 return (error); 1093 } 1094 1095 /* 1096 * Reset a descriptor by flushing its packet buffer and clearing the receive 1097 * and drop counts. This is doable for kernel-only buffers, but with 1098 * zero-copy buffers, we can't write to (or rotate) buffers that are 1099 * currently owned by userspace. It would be nice if we could encapsulate 1100 * this logic in the buffer code rather than here. 1101 */ 1102 static void 1103 reset_d(struct bpf_d *d) 1104 { 1105 1106 BPFD_WLOCK_ASSERT(d); 1107 1108 if ((d->bd_hbuf != NULL) && 1109 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 1110 /* Free the hold buffer. */ 1111 d->bd_fbuf = d->bd_hbuf; 1112 d->bd_hbuf = NULL; 1113 d->bd_hlen = 0; 1114 bpf_buf_reclaimed(d); 1115 } 1116 if (bpf_canwritebuf(d)) 1117 d->bd_slen = 0; 1118 d->bd_rcount = 0; 1119 d->bd_dcount = 0; 1120 d->bd_fcount = 0; 1121 d->bd_wcount = 0; 1122 d->bd_wfcount = 0; 1123 d->bd_wdcount = 0; 1124 d->bd_zcopy = 0; 1125 } 1126 1127 /* 1128 * FIONREAD Check for read packet available. 1129 * SIOCGIFADDR Get interface address - convenient hook to driver. 1130 * BIOCGBLEN Get buffer len [for read()]. 1131 * BIOCSETF Set read filter. 1132 * BIOCSETFNR Set read filter without resetting descriptor. 1133 * BIOCSETWF Set write filter. 1134 * BIOCFLUSH Flush read packet buffer. 1135 * BIOCPROMISC Put interface into promiscuous mode. 1136 * BIOCGDLT Get link layer type. 1137 * BIOCGETIF Get interface name. 1138 * BIOCSETIF Set interface. 1139 * BIOCSRTIMEOUT Set read timeout. 1140 * BIOCGRTIMEOUT Get read timeout. 1141 * BIOCGSTATS Get packet stats. 1142 * BIOCIMMEDIATE Set immediate mode. 1143 * BIOCVERSION Get filter language version. 1144 * BIOCGHDRCMPLT Get "header already complete" flag 1145 * BIOCSHDRCMPLT Set "header already complete" flag 1146 * BIOCGDIRECTION Get packet direction flag 1147 * BIOCSDIRECTION Set packet direction flag 1148 * BIOCGTSTAMP Get time stamp format and resolution. 1149 * BIOCSTSTAMP Set time stamp format and resolution. 1150 * BIOCLOCK Set "locked" flag 1151 * BIOCFEEDBACK Set packet feedback mode. 1152 * BIOCSETZBUF Set current zero-copy buffer locations. 1153 * BIOCGETZMAX Get maximum zero-copy buffer size. 1154 * BIOCROTZBUF Force rotation of zero-copy buffer 1155 * BIOCSETBUFMODE Set buffer mode. 1156 * BIOCGETBUFMODE Get current buffer mode. 1157 */ 1158 /* ARGSUSED */ 1159 static int 1160 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1161 struct thread *td) 1162 { 1163 struct bpf_d *d; 1164 int error; 1165 1166 error = devfs_get_cdevpriv((void **)&d); 1167 if (error != 0) 1168 return (error); 1169 1170 /* 1171 * Refresh PID associated with this descriptor. 1172 */ 1173 BPFD_WLOCK(d); 1174 BPF_PID_REFRESH(d, td); 1175 if (d->bd_state == BPF_WAITING) 1176 callout_stop(&d->bd_callout); 1177 d->bd_state = BPF_IDLE; 1178 BPFD_WUNLOCK(d); 1179 1180 if (d->bd_locked == 1) { 1181 switch (cmd) { 1182 case BIOCGBLEN: 1183 case BIOCFLUSH: 1184 case BIOCGDLT: 1185 case BIOCGDLTLIST: 1186 #ifdef COMPAT_FREEBSD32 1187 case BIOCGDLTLIST32: 1188 #endif 1189 case BIOCGETIF: 1190 case BIOCGRTIMEOUT: 1191 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1192 case BIOCGRTIMEOUT32: 1193 #endif 1194 case BIOCGSTATS: 1195 case BIOCVERSION: 1196 case BIOCGRSIG: 1197 case BIOCGHDRCMPLT: 1198 case BIOCSTSTAMP: 1199 case BIOCFEEDBACK: 1200 case FIONREAD: 1201 case BIOCLOCK: 1202 case BIOCSRTIMEOUT: 1203 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1204 case BIOCSRTIMEOUT32: 1205 #endif 1206 case BIOCIMMEDIATE: 1207 case TIOCGPGRP: 1208 case BIOCROTZBUF: 1209 break; 1210 default: 1211 return (EPERM); 1212 } 1213 } 1214 #ifdef COMPAT_FREEBSD32 1215 /* 1216 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so 1217 * that it will get 32-bit packet headers. 1218 */ 1219 switch (cmd) { 1220 case BIOCSETF32: 1221 case BIOCSETFNR32: 1222 case BIOCSETWF32: 1223 case BIOCGDLTLIST32: 1224 case BIOCGRTIMEOUT32: 1225 case BIOCSRTIMEOUT32: 1226 d->bd_compat32 = 1; 1227 } 1228 #endif 1229 1230 CURVNET_SET(TD_TO_VNET(td)); 1231 switch (cmd) { 1232 1233 default: 1234 error = EINVAL; 1235 break; 1236 1237 /* 1238 * Check for read packet available. 1239 */ 1240 case FIONREAD: 1241 { 1242 int n; 1243 1244 BPFD_WLOCK(d); 1245 n = d->bd_slen; 1246 if (d->bd_hbuf) 1247 n += d->bd_hlen; 1248 BPFD_WUNLOCK(d); 1249 1250 *(int *)addr = n; 1251 break; 1252 } 1253 1254 case SIOCGIFADDR: 1255 { 1256 struct ifnet *ifp; 1257 1258 if (d->bd_bif == NULL) 1259 error = EINVAL; 1260 else { 1261 ifp = d->bd_bif->bif_ifp; 1262 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1263 } 1264 break; 1265 } 1266 1267 /* 1268 * Get buffer len [for read()]. 1269 */ 1270 case BIOCGBLEN: 1271 *(u_int *)addr = d->bd_bufsize; 1272 break; 1273 1274 /* 1275 * Set buffer length. 1276 */ 1277 case BIOCSBLEN: 1278 error = bpf_ioctl_sblen(d, (u_int *)addr); 1279 break; 1280 1281 /* 1282 * Set link layer read filter. 1283 */ 1284 case BIOCSETF: 1285 case BIOCSETFNR: 1286 case BIOCSETWF: 1287 #ifdef COMPAT_FREEBSD32 1288 case BIOCSETF32: 1289 case BIOCSETFNR32: 1290 case BIOCSETWF32: 1291 #endif 1292 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1293 break; 1294 1295 /* 1296 * Flush read packet buffer. 1297 */ 1298 case BIOCFLUSH: 1299 BPFD_WLOCK(d); 1300 reset_d(d); 1301 BPFD_WUNLOCK(d); 1302 break; 1303 1304 /* 1305 * Put interface into promiscuous mode. 1306 */ 1307 case BIOCPROMISC: 1308 if (d->bd_bif == NULL) { 1309 /* 1310 * No interface attached yet. 1311 */ 1312 error = EINVAL; 1313 break; 1314 } 1315 if (d->bd_promisc == 0) { 1316 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1317 if (error == 0) 1318 d->bd_promisc = 1; 1319 } 1320 break; 1321 1322 /* 1323 * Get current data link type. 1324 */ 1325 case BIOCGDLT: 1326 if (d->bd_bif == NULL) 1327 error = EINVAL; 1328 else 1329 *(u_int *)addr = d->bd_bif->bif_dlt; 1330 break; 1331 1332 /* 1333 * Get a list of supported data link types. 1334 */ 1335 #ifdef COMPAT_FREEBSD32 1336 case BIOCGDLTLIST32: 1337 { 1338 struct bpf_dltlist32 *list32; 1339 struct bpf_dltlist dltlist; 1340 1341 list32 = (struct bpf_dltlist32 *)addr; 1342 dltlist.bfl_len = list32->bfl_len; 1343 dltlist.bfl_list = PTRIN(list32->bfl_list); 1344 if (d->bd_bif == NULL) 1345 error = EINVAL; 1346 else { 1347 error = bpf_getdltlist(d, &dltlist); 1348 if (error == 0) 1349 list32->bfl_len = dltlist.bfl_len; 1350 } 1351 break; 1352 } 1353 #endif 1354 1355 case BIOCGDLTLIST: 1356 if (d->bd_bif == NULL) 1357 error = EINVAL; 1358 else 1359 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1360 break; 1361 1362 /* 1363 * Set data link type. 1364 */ 1365 case BIOCSDLT: 1366 BPF_LOCK(); 1367 if (d->bd_bif == NULL) 1368 error = EINVAL; 1369 else 1370 error = bpf_setdlt(d, *(u_int *)addr); 1371 BPF_UNLOCK(); 1372 break; 1373 1374 /* 1375 * Get interface name. 1376 */ 1377 case BIOCGETIF: 1378 if (d->bd_bif == NULL) 1379 error = EINVAL; 1380 else { 1381 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1382 struct ifreq *const ifr = (struct ifreq *)addr; 1383 1384 strlcpy(ifr->ifr_name, ifp->if_xname, 1385 sizeof(ifr->ifr_name)); 1386 } 1387 break; 1388 1389 /* 1390 * Set interface. 1391 */ 1392 case BIOCSETIF: 1393 BPF_LOCK(); 1394 error = bpf_setif(d, (struct ifreq *)addr); 1395 BPF_UNLOCK(); 1396 break; 1397 1398 /* 1399 * Set read timeout. 1400 */ 1401 case BIOCSRTIMEOUT: 1402 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1403 case BIOCSRTIMEOUT32: 1404 #endif 1405 { 1406 struct timeval *tv = (struct timeval *)addr; 1407 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1408 struct timeval32 *tv32; 1409 struct timeval tv64; 1410 1411 if (cmd == BIOCSRTIMEOUT32) { 1412 tv32 = (struct timeval32 *)addr; 1413 tv = &tv64; 1414 tv->tv_sec = tv32->tv_sec; 1415 tv->tv_usec = tv32->tv_usec; 1416 } else 1417 #endif 1418 tv = (struct timeval *)addr; 1419 1420 /* 1421 * Subtract 1 tick from tvtohz() since this isn't 1422 * a one-shot timer. 1423 */ 1424 if ((error = itimerfix(tv)) == 0) 1425 d->bd_rtout = tvtohz(tv) - 1; 1426 break; 1427 } 1428 1429 /* 1430 * Get read timeout. 1431 */ 1432 case BIOCGRTIMEOUT: 1433 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1434 case BIOCGRTIMEOUT32: 1435 #endif 1436 { 1437 struct timeval *tv; 1438 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1439 struct timeval32 *tv32; 1440 struct timeval tv64; 1441 1442 if (cmd == BIOCGRTIMEOUT32) 1443 tv = &tv64; 1444 else 1445 #endif 1446 tv = (struct timeval *)addr; 1447 1448 tv->tv_sec = d->bd_rtout / hz; 1449 tv->tv_usec = (d->bd_rtout % hz) * tick; 1450 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1451 if (cmd == BIOCGRTIMEOUT32) { 1452 tv32 = (struct timeval32 *)addr; 1453 tv32->tv_sec = tv->tv_sec; 1454 tv32->tv_usec = tv->tv_usec; 1455 } 1456 #endif 1457 1458 break; 1459 } 1460 1461 /* 1462 * Get packet stats. 1463 */ 1464 case BIOCGSTATS: 1465 { 1466 struct bpf_stat *bs = (struct bpf_stat *)addr; 1467 1468 /* XXXCSJP overflow */ 1469 bs->bs_recv = d->bd_rcount; 1470 bs->bs_drop = d->bd_dcount; 1471 break; 1472 } 1473 1474 /* 1475 * Set immediate mode. 1476 */ 1477 case BIOCIMMEDIATE: 1478 d->bd_immediate = *(u_int *)addr; 1479 break; 1480 1481 case BIOCVERSION: 1482 { 1483 struct bpf_version *bv = (struct bpf_version *)addr; 1484 1485 bv->bv_major = BPF_MAJOR_VERSION; 1486 bv->bv_minor = BPF_MINOR_VERSION; 1487 break; 1488 } 1489 1490 /* 1491 * Get "header already complete" flag 1492 */ 1493 case BIOCGHDRCMPLT: 1494 *(u_int *)addr = d->bd_hdrcmplt; 1495 break; 1496 1497 /* 1498 * Set "header already complete" flag 1499 */ 1500 case BIOCSHDRCMPLT: 1501 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1502 break; 1503 1504 /* 1505 * Get packet direction flag 1506 */ 1507 case BIOCGDIRECTION: 1508 *(u_int *)addr = d->bd_direction; 1509 break; 1510 1511 /* 1512 * Set packet direction flag 1513 */ 1514 case BIOCSDIRECTION: 1515 { 1516 u_int direction; 1517 1518 direction = *(u_int *)addr; 1519 switch (direction) { 1520 case BPF_D_IN: 1521 case BPF_D_INOUT: 1522 case BPF_D_OUT: 1523 d->bd_direction = direction; 1524 break; 1525 default: 1526 error = EINVAL; 1527 } 1528 } 1529 break; 1530 1531 /* 1532 * Get packet timestamp format and resolution. 1533 */ 1534 case BIOCGTSTAMP: 1535 *(u_int *)addr = d->bd_tstamp; 1536 break; 1537 1538 /* 1539 * Set packet timestamp format and resolution. 1540 */ 1541 case BIOCSTSTAMP: 1542 { 1543 u_int func; 1544 1545 func = *(u_int *)addr; 1546 if (BPF_T_VALID(func)) 1547 d->bd_tstamp = func; 1548 else 1549 error = EINVAL; 1550 } 1551 break; 1552 1553 case BIOCFEEDBACK: 1554 d->bd_feedback = *(u_int *)addr; 1555 break; 1556 1557 case BIOCLOCK: 1558 d->bd_locked = 1; 1559 break; 1560 1561 case FIONBIO: /* Non-blocking I/O */ 1562 break; 1563 1564 case FIOASYNC: /* Send signal on receive packets */ 1565 d->bd_async = *(int *)addr; 1566 break; 1567 1568 case FIOSETOWN: 1569 error = fsetown(*(int *)addr, &d->bd_sigio); 1570 break; 1571 1572 case FIOGETOWN: 1573 *(int *)addr = fgetown(&d->bd_sigio); 1574 break; 1575 1576 /* This is deprecated, FIOSETOWN should be used instead. */ 1577 case TIOCSPGRP: 1578 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1579 break; 1580 1581 /* This is deprecated, FIOGETOWN should be used instead. */ 1582 case TIOCGPGRP: 1583 *(int *)addr = -fgetown(&d->bd_sigio); 1584 break; 1585 1586 case BIOCSRSIG: /* Set receive signal */ 1587 { 1588 u_int sig; 1589 1590 sig = *(u_int *)addr; 1591 1592 if (sig >= NSIG) 1593 error = EINVAL; 1594 else 1595 d->bd_sig = sig; 1596 break; 1597 } 1598 case BIOCGRSIG: 1599 *(u_int *)addr = d->bd_sig; 1600 break; 1601 1602 case BIOCGETBUFMODE: 1603 *(u_int *)addr = d->bd_bufmode; 1604 break; 1605 1606 case BIOCSETBUFMODE: 1607 /* 1608 * Allow the buffering mode to be changed as long as we 1609 * haven't yet committed to a particular mode. Our 1610 * definition of commitment, for now, is whether or not a 1611 * buffer has been allocated or an interface attached, since 1612 * that's the point where things get tricky. 1613 */ 1614 switch (*(u_int *)addr) { 1615 case BPF_BUFMODE_BUFFER: 1616 break; 1617 1618 case BPF_BUFMODE_ZBUF: 1619 if (bpf_zerocopy_enable) 1620 break; 1621 /* FALLSTHROUGH */ 1622 1623 default: 1624 CURVNET_RESTORE(); 1625 return (EINVAL); 1626 } 1627 1628 BPFD_WLOCK(d); 1629 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1630 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1631 BPFD_WUNLOCK(d); 1632 CURVNET_RESTORE(); 1633 return (EBUSY); 1634 } 1635 d->bd_bufmode = *(u_int *)addr; 1636 BPFD_WUNLOCK(d); 1637 break; 1638 1639 case BIOCGETZMAX: 1640 error = bpf_ioctl_getzmax(td, d, (size_t *)addr); 1641 break; 1642 1643 case BIOCSETZBUF: 1644 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr); 1645 break; 1646 1647 case BIOCROTZBUF: 1648 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr); 1649 break; 1650 } 1651 CURVNET_RESTORE(); 1652 return (error); 1653 } 1654 1655 /* 1656 * Set d's packet filter program to fp. If this file already has a filter, 1657 * free it and replace it. Returns EINVAL for bogus requests. 1658 */ 1659 static int 1660 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1661 { 1662 struct bpf_insn *fcode, *old; 1663 u_int wfilter, flen, size; 1664 #ifdef BPF_JITTER 1665 bpf_jit_filter *ofunc; 1666 #endif 1667 int need_upgrade; 1668 #ifdef COMPAT_FREEBSD32 1669 struct bpf_program32 *fp32; 1670 struct bpf_program fp_swab; 1671 1672 if (cmd == BIOCSETWF32 || cmd == BIOCSETF32 || cmd == BIOCSETFNR32) { 1673 fp32 = (struct bpf_program32 *)fp; 1674 fp_swab.bf_len = fp32->bf_len; 1675 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns; 1676 fp = &fp_swab; 1677 if (cmd == BIOCSETWF32) 1678 cmd = BIOCSETWF; 1679 } 1680 #endif 1681 /* 1682 * Check new filter validness before acquiring any locks. 1683 * Allocate memory for new filter, if needed. 1684 */ 1685 flen = fp->bf_len; 1686 if ((flen > bpf_maxinsns) || ((fp->bf_insns == NULL) && (flen != 0))) 1687 return (EINVAL); 1688 1689 need_upgrade = 0; 1690 size = flen * sizeof(*fp->bf_insns); 1691 if (size > 0) 1692 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1693 else 1694 fcode = NULL; /* Make compiler happy */ 1695 1696 BPF_LOCK(); 1697 1698 if (cmd == BIOCSETWF) { 1699 old = d->bd_wfilter; 1700 wfilter = 1; 1701 #ifdef BPF_JITTER 1702 ofunc = NULL; 1703 #endif 1704 } else { 1705 wfilter = 0; 1706 old = d->bd_rfilter; 1707 #ifdef BPF_JITTER 1708 ofunc = d->bd_bfilter; 1709 #endif 1710 } 1711 if (fp->bf_insns == NULL) { 1712 /* 1713 * Protect filter removal by interface lock. 1714 * Additionally, we are protected by global lock here. 1715 */ 1716 if (d->bd_bif != NULL) 1717 BPFIF_WLOCK(d->bd_bif); 1718 BPFD_WLOCK(d); 1719 if (wfilter) 1720 d->bd_wfilter = NULL; 1721 else { 1722 d->bd_rfilter = NULL; 1723 #ifdef BPF_JITTER 1724 d->bd_bfilter = NULL; 1725 #endif 1726 if (cmd == BIOCSETF) 1727 reset_d(d); 1728 } 1729 BPFD_WUNLOCK(d); 1730 if (d->bd_bif != NULL) 1731 BPFIF_WUNLOCK(d->bd_bif); 1732 if (old != NULL) 1733 free((caddr_t)old, M_BPF); 1734 #ifdef BPF_JITTER 1735 if (ofunc != NULL) 1736 bpf_destroy_jit_filter(ofunc); 1737 #endif 1738 BPF_UNLOCK(); 1739 return (0); 1740 } 1741 1742 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1743 bpf_validate(fcode, (int)flen)) { 1744 /* 1745 * Protect filter change by interface lock 1746 * Additionally, we are protected by global lock here. 1747 */ 1748 if (d->bd_bif != NULL) 1749 BPFIF_WLOCK(d->bd_bif); 1750 BPFD_WLOCK(d); 1751 if (wfilter) 1752 d->bd_wfilter = fcode; 1753 else { 1754 d->bd_rfilter = fcode; 1755 #ifdef BPF_JITTER 1756 d->bd_bfilter = bpf_jitter(fcode, flen); 1757 #endif 1758 if (cmd == BIOCSETF) 1759 reset_d(d); 1760 1761 /* 1762 * Do not require upgrade by first BIOCSETF 1763 * (used to set snaplen) by pcap_open_live() 1764 */ 1765 if ((d->bd_writer != 0) && (--d->bd_writer == 0)) 1766 need_upgrade = 1; 1767 CTR4(KTR_NET, "%s: filter function set by pid %d, " 1768 "bd_writer counter %d, need_upgrade %d", 1769 __func__, d->bd_pid, d->bd_writer, need_upgrade); 1770 } 1771 BPFD_WUNLOCK(d); 1772 if (d->bd_bif != NULL) 1773 BPFIF_WUNLOCK(d->bd_bif); 1774 if (old != NULL) 1775 free((caddr_t)old, M_BPF); 1776 #ifdef BPF_JITTER 1777 if (ofunc != NULL) 1778 bpf_destroy_jit_filter(ofunc); 1779 #endif 1780 1781 /* Move d to active readers list */ 1782 if (need_upgrade != 0) 1783 bpf_upgraded(d); 1784 1785 BPF_UNLOCK(); 1786 return (0); 1787 } 1788 free((caddr_t)fcode, M_BPF); 1789 BPF_UNLOCK(); 1790 return (EINVAL); 1791 } 1792 1793 /* 1794 * Detach a file from its current interface (if attached at all) and attach 1795 * to the interface indicated by the name stored in ifr. 1796 * Return an errno or 0. 1797 */ 1798 static int 1799 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1800 { 1801 struct bpf_if *bp; 1802 struct ifnet *theywant; 1803 1804 BPF_LOCK_ASSERT(); 1805 1806 theywant = ifunit(ifr->ifr_name); 1807 if (theywant == NULL || theywant->if_bpf == NULL) 1808 return (ENXIO); 1809 1810 bp = theywant->if_bpf; 1811 1812 /* 1813 * Behavior here depends on the buffering model. If we're using 1814 * kernel memory buffers, then we can allocate them here. If we're 1815 * using zero-copy, then the user process must have registered 1816 * buffers by the time we get here. If not, return an error. 1817 * 1818 * XXXRW: There are locking issues here with multi-threaded use: what 1819 * if two threads try to set the interface at once? 1820 */ 1821 switch (d->bd_bufmode) { 1822 case BPF_BUFMODE_BUFFER: 1823 if (d->bd_sbuf == NULL) 1824 bpf_buffer_alloc(d); 1825 KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL")); 1826 break; 1827 1828 case BPF_BUFMODE_ZBUF: 1829 if (d->bd_sbuf == NULL) 1830 return (EINVAL); 1831 break; 1832 1833 default: 1834 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1835 } 1836 if (bp != d->bd_bif) 1837 bpf_attachd(d, bp); 1838 BPFD_WLOCK(d); 1839 reset_d(d); 1840 BPFD_WUNLOCK(d); 1841 return (0); 1842 } 1843 1844 /* 1845 * Support for select() and poll() system calls 1846 * 1847 * Return true iff the specific operation will not block indefinitely. 1848 * Otherwise, return false but make a note that a selwakeup() must be done. 1849 */ 1850 static int 1851 bpfpoll(struct cdev *dev, int events, struct thread *td) 1852 { 1853 struct bpf_d *d; 1854 int revents; 1855 1856 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1857 return (events & 1858 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1859 1860 /* 1861 * Refresh PID associated with this descriptor. 1862 */ 1863 revents = events & (POLLOUT | POLLWRNORM); 1864 BPFD_WLOCK(d); 1865 BPF_PID_REFRESH(d, td); 1866 if (events & (POLLIN | POLLRDNORM)) { 1867 if (bpf_ready(d)) 1868 revents |= events & (POLLIN | POLLRDNORM); 1869 else { 1870 selrecord(td, &d->bd_sel); 1871 /* Start the read timeout if necessary. */ 1872 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1873 callout_reset(&d->bd_callout, d->bd_rtout, 1874 bpf_timed_out, d); 1875 d->bd_state = BPF_WAITING; 1876 } 1877 } 1878 } 1879 BPFD_WUNLOCK(d); 1880 return (revents); 1881 } 1882 1883 /* 1884 * Support for kevent() system call. Register EVFILT_READ filters and 1885 * reject all others. 1886 */ 1887 int 1888 bpfkqfilter(struct cdev *dev, struct knote *kn) 1889 { 1890 struct bpf_d *d; 1891 1892 if (devfs_get_cdevpriv((void **)&d) != 0 || 1893 kn->kn_filter != EVFILT_READ) 1894 return (1); 1895 1896 /* 1897 * Refresh PID associated with this descriptor. 1898 */ 1899 BPFD_WLOCK(d); 1900 BPF_PID_REFRESH_CUR(d); 1901 kn->kn_fop = &bpfread_filtops; 1902 kn->kn_hook = d; 1903 knlist_add(&d->bd_sel.si_note, kn, 1); 1904 BPFD_WUNLOCK(d); 1905 1906 return (0); 1907 } 1908 1909 static void 1910 filt_bpfdetach(struct knote *kn) 1911 { 1912 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1913 1914 knlist_remove(&d->bd_sel.si_note, kn, 0); 1915 } 1916 1917 static int 1918 filt_bpfread(struct knote *kn, long hint) 1919 { 1920 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1921 int ready; 1922 1923 BPFD_WLOCK_ASSERT(d); 1924 ready = bpf_ready(d); 1925 if (ready) { 1926 kn->kn_data = d->bd_slen; 1927 if (d->bd_hbuf) 1928 kn->kn_data += d->bd_hlen; 1929 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1930 callout_reset(&d->bd_callout, d->bd_rtout, 1931 bpf_timed_out, d); 1932 d->bd_state = BPF_WAITING; 1933 } 1934 1935 return (ready); 1936 } 1937 1938 #define BPF_TSTAMP_NONE 0 1939 #define BPF_TSTAMP_FAST 1 1940 #define BPF_TSTAMP_NORMAL 2 1941 #define BPF_TSTAMP_EXTERN 3 1942 1943 static int 1944 bpf_ts_quality(int tstype) 1945 { 1946 1947 if (tstype == BPF_T_NONE) 1948 return (BPF_TSTAMP_NONE); 1949 if ((tstype & BPF_T_FAST) != 0) 1950 return (BPF_TSTAMP_FAST); 1951 1952 return (BPF_TSTAMP_NORMAL); 1953 } 1954 1955 static int 1956 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m) 1957 { 1958 struct m_tag *tag; 1959 int quality; 1960 1961 quality = bpf_ts_quality(tstype); 1962 if (quality == BPF_TSTAMP_NONE) 1963 return (quality); 1964 1965 if (m != NULL) { 1966 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL); 1967 if (tag != NULL) { 1968 *bt = *(struct bintime *)(tag + 1); 1969 return (BPF_TSTAMP_EXTERN); 1970 } 1971 } 1972 if (quality == BPF_TSTAMP_NORMAL) 1973 binuptime(bt); 1974 else 1975 getbinuptime(bt); 1976 1977 return (quality); 1978 } 1979 1980 /* 1981 * Incoming linkage from device drivers. Process the packet pkt, of length 1982 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1983 * by each process' filter, and if accepted, stashed into the corresponding 1984 * buffer. 1985 */ 1986 void 1987 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1988 { 1989 struct bintime bt; 1990 struct bpf_d *d; 1991 #ifdef BPF_JITTER 1992 bpf_jit_filter *bf; 1993 #endif 1994 u_int slen; 1995 int gottime; 1996 1997 gottime = BPF_TSTAMP_NONE; 1998 1999 BPFIF_RLOCK(bp); 2000 2001 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2002 /* 2003 * We are not using any locks for d here because: 2004 * 1) any filter change is protected by interface 2005 * write lock 2006 * 2) destroying/detaching d is protected by interface 2007 * write lock, too 2008 */ 2009 2010 /* XXX: Do not protect counter for the sake of performance. */ 2011 ++d->bd_rcount; 2012 /* 2013 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 2014 * way for the caller to indiciate to us whether this packet 2015 * is inbound or outbound. In the bpf_mtap() routines, we use 2016 * the interface pointers on the mbuf to figure it out. 2017 */ 2018 #ifdef BPF_JITTER 2019 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2020 if (bf != NULL) 2021 slen = (*(bf->func))(pkt, pktlen, pktlen); 2022 else 2023 #endif 2024 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 2025 if (slen != 0) { 2026 /* 2027 * Filter matches. Let's to acquire write lock. 2028 */ 2029 BPFD_WLOCK(d); 2030 2031 d->bd_fcount++; 2032 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2033 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL); 2034 #ifdef MAC 2035 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2036 #endif 2037 catchpacket(d, pkt, pktlen, slen, 2038 bpf_append_bytes, &bt); 2039 BPFD_WUNLOCK(d); 2040 } 2041 } 2042 BPFIF_RUNLOCK(bp); 2043 } 2044 2045 #define BPF_CHECK_DIRECTION(d, r, i) \ 2046 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 2047 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 2048 2049 /* 2050 * Incoming linkage from device drivers, when packet is in an mbuf chain. 2051 * Locking model is explained in bpf_tap(). 2052 */ 2053 void 2054 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2055 { 2056 struct bintime bt; 2057 struct bpf_d *d; 2058 #ifdef BPF_JITTER 2059 bpf_jit_filter *bf; 2060 #endif 2061 u_int pktlen, slen; 2062 int gottime; 2063 2064 /* Skip outgoing duplicate packets. */ 2065 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2066 m->m_flags &= ~M_PROMISC; 2067 return; 2068 } 2069 2070 pktlen = m_length(m, NULL); 2071 gottime = BPF_TSTAMP_NONE; 2072 2073 BPFIF_RLOCK(bp); 2074 2075 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2076 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2077 continue; 2078 ++d->bd_rcount; 2079 #ifdef BPF_JITTER 2080 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2081 /* XXX We cannot handle multiple mbufs. */ 2082 if (bf != NULL && m->m_next == NULL) 2083 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen); 2084 else 2085 #endif 2086 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 2087 if (slen != 0) { 2088 BPFD_WLOCK(d); 2089 2090 d->bd_fcount++; 2091 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2092 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2093 #ifdef MAC 2094 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2095 #endif 2096 catchpacket(d, (u_char *)m, pktlen, slen, 2097 bpf_append_mbuf, &bt); 2098 BPFD_WUNLOCK(d); 2099 } 2100 } 2101 BPFIF_RUNLOCK(bp); 2102 } 2103 2104 /* 2105 * Incoming linkage from device drivers, when packet is in 2106 * an mbuf chain and to be prepended by a contiguous header. 2107 */ 2108 void 2109 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 2110 { 2111 struct bintime bt; 2112 struct mbuf mb; 2113 struct bpf_d *d; 2114 u_int pktlen, slen; 2115 int gottime; 2116 2117 /* Skip outgoing duplicate packets. */ 2118 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2119 m->m_flags &= ~M_PROMISC; 2120 return; 2121 } 2122 2123 pktlen = m_length(m, NULL); 2124 /* 2125 * Craft on-stack mbuf suitable for passing to bpf_filter. 2126 * Note that we cut corners here; we only setup what's 2127 * absolutely needed--this mbuf should never go anywhere else. 2128 */ 2129 mb.m_next = m; 2130 mb.m_data = data; 2131 mb.m_len = dlen; 2132 pktlen += dlen; 2133 2134 gottime = BPF_TSTAMP_NONE; 2135 2136 BPFIF_RLOCK(bp); 2137 2138 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2139 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2140 continue; 2141 ++d->bd_rcount; 2142 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 2143 if (slen != 0) { 2144 BPFD_WLOCK(d); 2145 2146 d->bd_fcount++; 2147 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2148 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2149 #ifdef MAC 2150 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2151 #endif 2152 catchpacket(d, (u_char *)&mb, pktlen, slen, 2153 bpf_append_mbuf, &bt); 2154 BPFD_WUNLOCK(d); 2155 } 2156 } 2157 BPFIF_RUNLOCK(bp); 2158 } 2159 2160 #undef BPF_CHECK_DIRECTION 2161 2162 #undef BPF_TSTAMP_NONE 2163 #undef BPF_TSTAMP_FAST 2164 #undef BPF_TSTAMP_NORMAL 2165 #undef BPF_TSTAMP_EXTERN 2166 2167 static int 2168 bpf_hdrlen(struct bpf_d *d) 2169 { 2170 int hdrlen; 2171 2172 hdrlen = d->bd_bif->bif_hdrlen; 2173 #ifndef BURN_BRIDGES 2174 if (d->bd_tstamp == BPF_T_NONE || 2175 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) 2176 #ifdef COMPAT_FREEBSD32 2177 if (d->bd_compat32) 2178 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32); 2179 else 2180 #endif 2181 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr); 2182 else 2183 #endif 2184 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr); 2185 #ifdef COMPAT_FREEBSD32 2186 if (d->bd_compat32) 2187 hdrlen = BPF_WORDALIGN32(hdrlen); 2188 else 2189 #endif 2190 hdrlen = BPF_WORDALIGN(hdrlen); 2191 2192 return (hdrlen - d->bd_bif->bif_hdrlen); 2193 } 2194 2195 static void 2196 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) 2197 { 2198 struct bintime bt2; 2199 struct timeval tsm; 2200 struct timespec tsn; 2201 2202 if ((tstype & BPF_T_MONOTONIC) == 0) { 2203 bt2 = *bt; 2204 bintime_add(&bt2, &boottimebin); 2205 bt = &bt2; 2206 } 2207 switch (BPF_T_FORMAT(tstype)) { 2208 case BPF_T_MICROTIME: 2209 bintime2timeval(bt, &tsm); 2210 ts->bt_sec = tsm.tv_sec; 2211 ts->bt_frac = tsm.tv_usec; 2212 break; 2213 case BPF_T_NANOTIME: 2214 bintime2timespec(bt, &tsn); 2215 ts->bt_sec = tsn.tv_sec; 2216 ts->bt_frac = tsn.tv_nsec; 2217 break; 2218 case BPF_T_BINTIME: 2219 ts->bt_sec = bt->sec; 2220 ts->bt_frac = bt->frac; 2221 break; 2222 } 2223 } 2224 2225 /* 2226 * Move the packet data from interface memory (pkt) into the 2227 * store buffer. "cpfn" is the routine called to do the actual data 2228 * transfer. bcopy is passed in to copy contiguous chunks, while 2229 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 2230 * pkt is really an mbuf. 2231 */ 2232 static void 2233 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 2234 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 2235 struct bintime *bt) 2236 { 2237 struct bpf_xhdr hdr; 2238 #ifndef BURN_BRIDGES 2239 struct bpf_hdr hdr_old; 2240 #ifdef COMPAT_FREEBSD32 2241 struct bpf_hdr32 hdr32_old; 2242 #endif 2243 #endif 2244 int caplen, curlen, hdrlen, totlen; 2245 int do_wakeup = 0; 2246 int do_timestamp; 2247 int tstype; 2248 2249 BPFD_WLOCK_ASSERT(d); 2250 2251 /* 2252 * Detect whether user space has released a buffer back to us, and if 2253 * so, move it from being a hold buffer to a free buffer. This may 2254 * not be the best place to do it (for example, we might only want to 2255 * run this check if we need the space), but for now it's a reliable 2256 * spot to do it. 2257 */ 2258 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 2259 d->bd_fbuf = d->bd_hbuf; 2260 d->bd_hbuf = NULL; 2261 d->bd_hlen = 0; 2262 bpf_buf_reclaimed(d); 2263 } 2264 2265 /* 2266 * Figure out how many bytes to move. If the packet is 2267 * greater or equal to the snapshot length, transfer that 2268 * much. Otherwise, transfer the whole packet (unless 2269 * we hit the buffer size limit). 2270 */ 2271 hdrlen = bpf_hdrlen(d); 2272 totlen = hdrlen + min(snaplen, pktlen); 2273 if (totlen > d->bd_bufsize) 2274 totlen = d->bd_bufsize; 2275 2276 /* 2277 * Round up the end of the previous packet to the next longword. 2278 * 2279 * Drop the packet if there's no room and no hope of room 2280 * If the packet would overflow the storage buffer or the storage 2281 * buffer is considered immutable by the buffer model, try to rotate 2282 * the buffer and wakeup pending processes. 2283 */ 2284 #ifdef COMPAT_FREEBSD32 2285 if (d->bd_compat32) 2286 curlen = BPF_WORDALIGN32(d->bd_slen); 2287 else 2288 #endif 2289 curlen = BPF_WORDALIGN(d->bd_slen); 2290 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 2291 if (d->bd_fbuf == NULL) { 2292 /* 2293 * There's no room in the store buffer, and no 2294 * prospect of room, so drop the packet. Notify the 2295 * buffer model. 2296 */ 2297 bpf_buffull(d); 2298 ++d->bd_dcount; 2299 return; 2300 } 2301 ROTATE_BUFFERS(d); 2302 do_wakeup = 1; 2303 curlen = 0; 2304 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 2305 /* 2306 * Immediate mode is set, or the read timeout has already 2307 * expired during a select call. A packet arrived, so the 2308 * reader should be woken up. 2309 */ 2310 do_wakeup = 1; 2311 caplen = totlen - hdrlen; 2312 tstype = d->bd_tstamp; 2313 do_timestamp = tstype != BPF_T_NONE; 2314 #ifndef BURN_BRIDGES 2315 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) { 2316 struct bpf_ts ts; 2317 if (do_timestamp) 2318 bpf_bintime2ts(bt, &ts, tstype); 2319 #ifdef COMPAT_FREEBSD32 2320 if (d->bd_compat32) { 2321 bzero(&hdr32_old, sizeof(hdr32_old)); 2322 if (do_timestamp) { 2323 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; 2324 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; 2325 } 2326 hdr32_old.bh_datalen = pktlen; 2327 hdr32_old.bh_hdrlen = hdrlen; 2328 hdr32_old.bh_caplen = caplen; 2329 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, 2330 sizeof(hdr32_old)); 2331 goto copy; 2332 } 2333 #endif 2334 bzero(&hdr_old, sizeof(hdr_old)); 2335 if (do_timestamp) { 2336 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; 2337 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; 2338 } 2339 hdr_old.bh_datalen = pktlen; 2340 hdr_old.bh_hdrlen = hdrlen; 2341 hdr_old.bh_caplen = caplen; 2342 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, 2343 sizeof(hdr_old)); 2344 goto copy; 2345 } 2346 #endif 2347 2348 /* 2349 * Append the bpf header. Note we append the actual header size, but 2350 * move forward the length of the header plus padding. 2351 */ 2352 bzero(&hdr, sizeof(hdr)); 2353 if (do_timestamp) 2354 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype); 2355 hdr.bh_datalen = pktlen; 2356 hdr.bh_hdrlen = hdrlen; 2357 hdr.bh_caplen = caplen; 2358 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 2359 2360 /* 2361 * Copy the packet data into the store buffer and update its length. 2362 */ 2363 #ifndef BURN_BRIDGES 2364 copy: 2365 #endif 2366 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); 2367 d->bd_slen = curlen + totlen; 2368 2369 if (do_wakeup) 2370 bpf_wakeup(d); 2371 } 2372 2373 /* 2374 * Free buffers currently in use by a descriptor. 2375 * Called on close. 2376 */ 2377 static void 2378 bpf_freed(struct bpf_d *d) 2379 { 2380 2381 /* 2382 * We don't need to lock out interrupts since this descriptor has 2383 * been detached from its interface and it yet hasn't been marked 2384 * free. 2385 */ 2386 bpf_free(d); 2387 if (d->bd_rfilter != NULL) { 2388 free((caddr_t)d->bd_rfilter, M_BPF); 2389 #ifdef BPF_JITTER 2390 if (d->bd_bfilter != NULL) 2391 bpf_destroy_jit_filter(d->bd_bfilter); 2392 #endif 2393 } 2394 if (d->bd_wfilter != NULL) 2395 free((caddr_t)d->bd_wfilter, M_BPF); 2396 rw_destroy(&d->bd_lock); 2397 } 2398 2399 /* 2400 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 2401 * fixed size of the link header (variable length headers not yet supported). 2402 */ 2403 void 2404 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2405 { 2406 2407 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2408 } 2409 2410 /* 2411 * Attach an interface to bpf. ifp is a pointer to the structure 2412 * defining the interface to be attached, dlt is the link layer type, 2413 * and hdrlen is the fixed size of the link header (variable length 2414 * headers are not yet supporrted). 2415 */ 2416 void 2417 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2418 { 2419 struct bpf_if *bp; 2420 2421 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 2422 if (bp == NULL) 2423 panic("bpfattach"); 2424 2425 LIST_INIT(&bp->bif_dlist); 2426 LIST_INIT(&bp->bif_wlist); 2427 bp->bif_ifp = ifp; 2428 bp->bif_dlt = dlt; 2429 rw_init(&bp->bif_lock, "bpf interface lock"); 2430 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 2431 *driverp = bp; 2432 2433 BPF_LOCK(); 2434 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 2435 BPF_UNLOCK(); 2436 2437 bp->bif_hdrlen = hdrlen; 2438 2439 if (bootverbose) 2440 if_printf(ifp, "bpf attached\n"); 2441 } 2442 2443 /* 2444 * Detach bpf from an interface. This involves detaching each descriptor 2445 * associated with the interface. Notify each descriptor as it's detached 2446 * so that any sleepers wake up and get ENXIO. 2447 */ 2448 void 2449 bpfdetach(struct ifnet *ifp) 2450 { 2451 struct bpf_if *bp; 2452 struct bpf_d *d; 2453 #ifdef INVARIANTS 2454 int ndetached; 2455 2456 ndetached = 0; 2457 #endif 2458 2459 /* Find all bpf_if struct's which reference ifp and detach them. */ 2460 do { 2461 BPF_LOCK(); 2462 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2463 if (ifp == bp->bif_ifp) 2464 break; 2465 } 2466 if (bp != NULL) 2467 LIST_REMOVE(bp, bif_next); 2468 BPF_UNLOCK(); 2469 2470 if (bp != NULL) { 2471 #ifdef INVARIANTS 2472 ndetached++; 2473 #endif 2474 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 2475 bpf_detachd(d); 2476 BPFD_WLOCK(d); 2477 bpf_wakeup(d); 2478 BPFD_WUNLOCK(d); 2479 } 2480 /* Free writer-only descriptors */ 2481 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) { 2482 bpf_detachd(d); 2483 BPFD_WLOCK(d); 2484 bpf_wakeup(d); 2485 BPFD_WUNLOCK(d); 2486 } 2487 rw_destroy(&bp->bif_lock); 2488 free(bp, M_BPF); 2489 } 2490 } while (bp != NULL); 2491 2492 #ifdef INVARIANTS 2493 if (ndetached == 0) 2494 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 2495 #endif 2496 } 2497 2498 /* 2499 * Get a list of available data link type of the interface. 2500 */ 2501 static int 2502 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 2503 { 2504 int n, error; 2505 struct ifnet *ifp; 2506 struct bpf_if *bp; 2507 2508 ifp = d->bd_bif->bif_ifp; 2509 n = 0; 2510 error = 0; 2511 BPF_LOCK(); 2512 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2513 if (bp->bif_ifp != ifp) 2514 continue; 2515 if (bfl->bfl_list != NULL) { 2516 if (n >= bfl->bfl_len) { 2517 BPF_UNLOCK(); 2518 return (ENOMEM); 2519 } 2520 error = copyout(&bp->bif_dlt, 2521 bfl->bfl_list + n, sizeof(u_int)); 2522 } 2523 n++; 2524 } 2525 BPF_UNLOCK(); 2526 bfl->bfl_len = n; 2527 return (error); 2528 } 2529 2530 /* 2531 * Set the data link type of a BPF instance. 2532 */ 2533 static int 2534 bpf_setdlt(struct bpf_d *d, u_int dlt) 2535 { 2536 int error, opromisc; 2537 struct ifnet *ifp; 2538 struct bpf_if *bp; 2539 2540 BPF_LOCK_ASSERT(); 2541 2542 if (d->bd_bif->bif_dlt == dlt) 2543 return (0); 2544 ifp = d->bd_bif->bif_ifp; 2545 2546 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2547 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 2548 break; 2549 } 2550 2551 if (bp != NULL) { 2552 opromisc = d->bd_promisc; 2553 bpf_attachd(d, bp); 2554 BPFD_WLOCK(d); 2555 reset_d(d); 2556 BPFD_WUNLOCK(d); 2557 if (opromisc) { 2558 error = ifpromisc(bp->bif_ifp, 1); 2559 if (error) 2560 if_printf(bp->bif_ifp, 2561 "bpf_setdlt: ifpromisc failed (%d)\n", 2562 error); 2563 else 2564 d->bd_promisc = 1; 2565 } 2566 } 2567 return (bp == NULL ? EINVAL : 0); 2568 } 2569 2570 static void 2571 bpf_drvinit(void *unused) 2572 { 2573 struct cdev *dev; 2574 2575 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2576 LIST_INIT(&bpf_iflist); 2577 2578 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2579 /* For compatibility */ 2580 make_dev_alias(dev, "bpf0"); 2581 } 2582 2583 /* 2584 * Zero out the various packet counters associated with all of the bpf 2585 * descriptors. At some point, we will probably want to get a bit more 2586 * granular and allow the user to specify descriptors to be zeroed. 2587 */ 2588 static void 2589 bpf_zero_counters(void) 2590 { 2591 struct bpf_if *bp; 2592 struct bpf_d *bd; 2593 2594 BPF_LOCK(); 2595 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2596 BPFIF_RLOCK(bp); 2597 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2598 BPFD_WLOCK(bd); 2599 bd->bd_rcount = 0; 2600 bd->bd_dcount = 0; 2601 bd->bd_fcount = 0; 2602 bd->bd_wcount = 0; 2603 bd->bd_wfcount = 0; 2604 bd->bd_zcopy = 0; 2605 BPFD_WUNLOCK(bd); 2606 } 2607 BPFIF_RUNLOCK(bp); 2608 } 2609 BPF_UNLOCK(); 2610 } 2611 2612 /* 2613 * Fill filter statistics 2614 */ 2615 static void 2616 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2617 { 2618 2619 bzero(d, sizeof(*d)); 2620 BPFD_LOCK_ASSERT(bd); 2621 d->bd_structsize = sizeof(*d); 2622 /* XXX: reading should be protected by global lock */ 2623 d->bd_immediate = bd->bd_immediate; 2624 d->bd_promisc = bd->bd_promisc; 2625 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2626 d->bd_direction = bd->bd_direction; 2627 d->bd_feedback = bd->bd_feedback; 2628 d->bd_async = bd->bd_async; 2629 d->bd_rcount = bd->bd_rcount; 2630 d->bd_dcount = bd->bd_dcount; 2631 d->bd_fcount = bd->bd_fcount; 2632 d->bd_sig = bd->bd_sig; 2633 d->bd_slen = bd->bd_slen; 2634 d->bd_hlen = bd->bd_hlen; 2635 d->bd_bufsize = bd->bd_bufsize; 2636 d->bd_pid = bd->bd_pid; 2637 strlcpy(d->bd_ifname, 2638 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2639 d->bd_locked = bd->bd_locked; 2640 d->bd_wcount = bd->bd_wcount; 2641 d->bd_wdcount = bd->bd_wdcount; 2642 d->bd_wfcount = bd->bd_wfcount; 2643 d->bd_zcopy = bd->bd_zcopy; 2644 d->bd_bufmode = bd->bd_bufmode; 2645 } 2646 2647 /* 2648 * Handle `netstat -B' stats request 2649 */ 2650 static int 2651 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2652 { 2653 struct xbpf_d *xbdbuf, *xbd, zerostats; 2654 int index, error; 2655 struct bpf_if *bp; 2656 struct bpf_d *bd; 2657 2658 /* 2659 * XXX This is not technically correct. It is possible for non 2660 * privileged users to open bpf devices. It would make sense 2661 * if the users who opened the devices were able to retrieve 2662 * the statistics for them, too. 2663 */ 2664 error = priv_check(req->td, PRIV_NET_BPF); 2665 if (error) 2666 return (error); 2667 /* 2668 * Check to see if the user is requesting that the counters be 2669 * zeroed out. Explicitly check that the supplied data is zeroed, 2670 * as we aren't allowing the user to set the counters currently. 2671 */ 2672 if (req->newptr != NULL) { 2673 if (req->newlen != sizeof(zerostats)) 2674 return (EINVAL); 2675 bzero(&zerostats, sizeof(zerostats)); 2676 xbd = req->newptr; 2677 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0) 2678 return (EINVAL); 2679 bpf_zero_counters(); 2680 return (0); 2681 } 2682 if (req->oldptr == NULL) 2683 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2684 if (bpf_bpfd_cnt == 0) 2685 return (SYSCTL_OUT(req, 0, 0)); 2686 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2687 BPF_LOCK(); 2688 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2689 BPF_UNLOCK(); 2690 free(xbdbuf, M_BPF); 2691 return (ENOMEM); 2692 } 2693 index = 0; 2694 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2695 BPFIF_RLOCK(bp); 2696 /* Send writers-only first */ 2697 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { 2698 xbd = &xbdbuf[index++]; 2699 BPFD_RLOCK(bd); 2700 bpfstats_fill_xbpf(xbd, bd); 2701 BPFD_RUNLOCK(bd); 2702 } 2703 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2704 xbd = &xbdbuf[index++]; 2705 BPFD_RLOCK(bd); 2706 bpfstats_fill_xbpf(xbd, bd); 2707 BPFD_RUNLOCK(bd); 2708 } 2709 BPFIF_RUNLOCK(bp); 2710 } 2711 BPF_UNLOCK(); 2712 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2713 free(xbdbuf, M_BPF); 2714 return (error); 2715 } 2716 2717 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2718 2719 #else /* !DEV_BPF && !NETGRAPH_BPF */ 2720 /* 2721 * NOP stubs to allow bpf-using drivers to load and function. 2722 * 2723 * A 'better' implementation would allow the core bpf functionality 2724 * to be loaded at runtime. 2725 */ 2726 static struct bpf_if bp_null; 2727 2728 void 2729 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2730 { 2731 } 2732 2733 void 2734 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2735 { 2736 } 2737 2738 void 2739 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2740 { 2741 } 2742 2743 void 2744 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2745 { 2746 2747 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2748 } 2749 2750 void 2751 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2752 { 2753 2754 *driverp = &bp_null; 2755 } 2756 2757 void 2758 bpfdetach(struct ifnet *ifp) 2759 { 2760 } 2761 2762 u_int 2763 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2764 { 2765 return -1; /* "no filter" behaviour */ 2766 } 2767 2768 int 2769 bpf_validate(const struct bpf_insn *f, int len) 2770 { 2771 return 0; /* false */ 2772 } 2773 2774 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 2775