1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_bpf.h" 41 #include "opt_compat.h" 42 #include "opt_netgraph.h" 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/lock.h> 47 #include <sys/rwlock.h> 48 #include <sys/systm.h> 49 #include <sys/conf.h> 50 #include <sys/fcntl.h> 51 #include <sys/jail.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/time.h> 55 #include <sys/priv.h> 56 #include <sys/proc.h> 57 #include <sys/signalvar.h> 58 #include <sys/filio.h> 59 #include <sys/sockio.h> 60 #include <sys/ttycom.h> 61 #include <sys/uio.h> 62 63 #include <sys/event.h> 64 #include <sys/file.h> 65 #include <sys/poll.h> 66 #include <sys/proc.h> 67 68 #include <sys/socket.h> 69 70 #include <net/if.h> 71 #define BPF_INTERNAL 72 #include <net/bpf.h> 73 #include <net/bpf_buffer.h> 74 #ifdef BPF_JITTER 75 #include <net/bpf_jitter.h> 76 #endif 77 #include <net/bpf_zerocopy.h> 78 #include <net/bpfdesc.h> 79 #include <net/vnet.h> 80 81 #include <netinet/in.h> 82 #include <netinet/if_ether.h> 83 #include <sys/kernel.h> 84 #include <sys/sysctl.h> 85 86 #include <net80211/ieee80211_freebsd.h> 87 88 #include <security/mac/mac_framework.h> 89 90 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 91 92 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 93 94 #define PRINET 26 /* interruptible */ 95 96 #define SIZEOF_BPF_HDR(type) \ 97 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen)) 98 99 #ifdef COMPAT_FREEBSD32 100 #include <sys/mount.h> 101 #include <compat/freebsd32/freebsd32.h> 102 #define BPF_ALIGNMENT32 sizeof(int32_t) 103 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1)) 104 105 #ifndef BURN_BRIDGES 106 /* 107 * 32-bit version of structure prepended to each packet. We use this header 108 * instead of the standard one for 32-bit streams. We mark the a stream as 109 * 32-bit the first time we see a 32-bit compat ioctl request. 110 */ 111 struct bpf_hdr32 { 112 struct timeval32 bh_tstamp; /* time stamp */ 113 uint32_t bh_caplen; /* length of captured portion */ 114 uint32_t bh_datalen; /* original length of packet */ 115 uint16_t bh_hdrlen; /* length of bpf header (this struct 116 plus alignment padding) */ 117 }; 118 #endif 119 120 struct bpf_program32 { 121 u_int bf_len; 122 uint32_t bf_insns; 123 }; 124 125 struct bpf_dltlist32 { 126 u_int bfl_len; 127 u_int bfl_list; 128 }; 129 130 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32) 131 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32) 132 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32) 133 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32) 134 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32) 135 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32) 136 #endif 137 138 /* 139 * bpf_iflist is a list of BPF interface structures, each corresponding to a 140 * specific DLT. The same network interface might have several BPF interface 141 * structures registered by different layers in the stack (i.e., 802.11 142 * frames, ethernet frames, etc). 143 */ 144 static LIST_HEAD(, bpf_if) bpf_iflist; 145 static struct mtx bpf_mtx; /* bpf global lock */ 146 static int bpf_bpfd_cnt; 147 148 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 149 static void bpf_detachd(struct bpf_d *); 150 static void bpf_detachd_locked(struct bpf_d *); 151 static void bpf_freed(struct bpf_d *); 152 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 153 struct sockaddr *, int *, struct bpf_insn *); 154 static int bpf_setif(struct bpf_d *, struct ifreq *); 155 static void bpf_timed_out(void *); 156 static __inline void 157 bpf_wakeup(struct bpf_d *); 158 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 159 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 160 struct bintime *); 161 static void reset_d(struct bpf_d *); 162 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 163 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 164 static int bpf_setdlt(struct bpf_d *, u_int); 165 static void filt_bpfdetach(struct knote *); 166 static int filt_bpfread(struct knote *, long); 167 static void bpf_drvinit(void *); 168 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 169 170 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 171 int bpf_maxinsns = BPF_MAXINSNS; 172 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 173 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 174 static int bpf_zerocopy_enable = 0; 175 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 176 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 177 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 178 bpf_stats_sysctl, "bpf statistics portal"); 179 180 static VNET_DEFINE(int, bpf_optimize_writers) = 0; 181 #define V_bpf_optimize_writers VNET(bpf_optimize_writers) 182 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers, 183 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0, 184 "Do not send packets until BPF program is set"); 185 186 static d_open_t bpfopen; 187 static d_read_t bpfread; 188 static d_write_t bpfwrite; 189 static d_ioctl_t bpfioctl; 190 static d_poll_t bpfpoll; 191 static d_kqfilter_t bpfkqfilter; 192 193 static struct cdevsw bpf_cdevsw = { 194 .d_version = D_VERSION, 195 .d_open = bpfopen, 196 .d_read = bpfread, 197 .d_write = bpfwrite, 198 .d_ioctl = bpfioctl, 199 .d_poll = bpfpoll, 200 .d_name = "bpf", 201 .d_kqfilter = bpfkqfilter, 202 }; 203 204 static struct filterops bpfread_filtops = { 205 .f_isfd = 1, 206 .f_detach = filt_bpfdetach, 207 .f_event = filt_bpfread, 208 }; 209 210 eventhandler_tag bpf_ifdetach_cookie = NULL; 211 212 /* 213 * LOCKING MODEL USED BY BPF: 214 * Locks: 215 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal, 216 * some global counters and every bpf_if reference. 217 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters. 218 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields 219 * used by bpf_mtap code. 220 * 221 * Lock order: 222 * 223 * Global lock, interface lock, descriptor lock 224 * 225 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2] 226 * working model. In many places (like bpf_detachd) we start with BPF descriptor 227 * (and we need to at least rlock it to get reliable interface pointer). This 228 * gives us potential LOR. As a result, we use global lock to protect from bpf_if 229 * change in every such place. 230 * 231 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and 232 * 3) descriptor main wlock. 233 * Reading bd_bif can be protected by any of these locks, typically global lock. 234 * 235 * Changing read/write BPF filter is protected by the same three locks, 236 * the same applies for reading. 237 * 238 * Sleeping in global lock is not allowed due to bpfdetach() using it. 239 */ 240 241 /* 242 * Wrapper functions for various buffering methods. If the set of buffer 243 * modes expands, we will probably want to introduce a switch data structure 244 * similar to protosw, et. 245 */ 246 static void 247 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 248 u_int len) 249 { 250 251 BPFD_LOCK_ASSERT(d); 252 253 switch (d->bd_bufmode) { 254 case BPF_BUFMODE_BUFFER: 255 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 256 257 case BPF_BUFMODE_ZBUF: 258 d->bd_zcopy++; 259 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 260 261 default: 262 panic("bpf_buf_append_bytes"); 263 } 264 } 265 266 static void 267 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 268 u_int len) 269 { 270 271 BPFD_LOCK_ASSERT(d); 272 273 switch (d->bd_bufmode) { 274 case BPF_BUFMODE_BUFFER: 275 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 276 277 case BPF_BUFMODE_ZBUF: 278 d->bd_zcopy++; 279 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 280 281 default: 282 panic("bpf_buf_append_mbuf"); 283 } 284 } 285 286 /* 287 * This function gets called when the free buffer is re-assigned. 288 */ 289 static void 290 bpf_buf_reclaimed(struct bpf_d *d) 291 { 292 293 BPFD_LOCK_ASSERT(d); 294 295 switch (d->bd_bufmode) { 296 case BPF_BUFMODE_BUFFER: 297 return; 298 299 case BPF_BUFMODE_ZBUF: 300 bpf_zerocopy_buf_reclaimed(d); 301 return; 302 303 default: 304 panic("bpf_buf_reclaimed"); 305 } 306 } 307 308 /* 309 * If the buffer mechanism has a way to decide that a held buffer can be made 310 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 311 * returned if the buffer can be discarded, (0) is returned if it cannot. 312 */ 313 static int 314 bpf_canfreebuf(struct bpf_d *d) 315 { 316 317 BPFD_LOCK_ASSERT(d); 318 319 switch (d->bd_bufmode) { 320 case BPF_BUFMODE_ZBUF: 321 return (bpf_zerocopy_canfreebuf(d)); 322 } 323 return (0); 324 } 325 326 /* 327 * Allow the buffer model to indicate that the current store buffer is 328 * immutable, regardless of the appearance of space. Return (1) if the 329 * buffer is writable, and (0) if not. 330 */ 331 static int 332 bpf_canwritebuf(struct bpf_d *d) 333 { 334 BPFD_LOCK_ASSERT(d); 335 336 switch (d->bd_bufmode) { 337 case BPF_BUFMODE_ZBUF: 338 return (bpf_zerocopy_canwritebuf(d)); 339 } 340 return (1); 341 } 342 343 /* 344 * Notify buffer model that an attempt to write to the store buffer has 345 * resulted in a dropped packet, in which case the buffer may be considered 346 * full. 347 */ 348 static void 349 bpf_buffull(struct bpf_d *d) 350 { 351 352 BPFD_LOCK_ASSERT(d); 353 354 switch (d->bd_bufmode) { 355 case BPF_BUFMODE_ZBUF: 356 bpf_zerocopy_buffull(d); 357 break; 358 } 359 } 360 361 /* 362 * Notify the buffer model that a buffer has moved into the hold position. 363 */ 364 void 365 bpf_bufheld(struct bpf_d *d) 366 { 367 368 BPFD_LOCK_ASSERT(d); 369 370 switch (d->bd_bufmode) { 371 case BPF_BUFMODE_ZBUF: 372 bpf_zerocopy_bufheld(d); 373 break; 374 } 375 } 376 377 static void 378 bpf_free(struct bpf_d *d) 379 { 380 381 switch (d->bd_bufmode) { 382 case BPF_BUFMODE_BUFFER: 383 return (bpf_buffer_free(d)); 384 385 case BPF_BUFMODE_ZBUF: 386 return (bpf_zerocopy_free(d)); 387 388 default: 389 panic("bpf_buf_free"); 390 } 391 } 392 393 static int 394 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 395 { 396 397 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 398 return (EOPNOTSUPP); 399 return (bpf_buffer_uiomove(d, buf, len, uio)); 400 } 401 402 static int 403 bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 404 { 405 406 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 407 return (EOPNOTSUPP); 408 return (bpf_buffer_ioctl_sblen(d, i)); 409 } 410 411 static int 412 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 413 { 414 415 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 416 return (EOPNOTSUPP); 417 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 418 } 419 420 static int 421 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 422 { 423 424 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 425 return (EOPNOTSUPP); 426 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 427 } 428 429 static int 430 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 431 { 432 433 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 434 return (EOPNOTSUPP); 435 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 436 } 437 438 /* 439 * General BPF functions. 440 */ 441 static int 442 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 443 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 444 { 445 const struct ieee80211_bpf_params *p; 446 struct ether_header *eh; 447 struct mbuf *m; 448 int error; 449 int len; 450 int hlen; 451 int slen; 452 453 /* 454 * Build a sockaddr based on the data link layer type. 455 * We do this at this level because the ethernet header 456 * is copied directly into the data field of the sockaddr. 457 * In the case of SLIP, there is no header and the packet 458 * is forwarded as is. 459 * Also, we are careful to leave room at the front of the mbuf 460 * for the link level header. 461 */ 462 switch (linktype) { 463 464 case DLT_SLIP: 465 sockp->sa_family = AF_INET; 466 hlen = 0; 467 break; 468 469 case DLT_EN10MB: 470 sockp->sa_family = AF_UNSPEC; 471 /* XXX Would MAXLINKHDR be better? */ 472 hlen = ETHER_HDR_LEN; 473 break; 474 475 case DLT_FDDI: 476 sockp->sa_family = AF_IMPLINK; 477 hlen = 0; 478 break; 479 480 case DLT_RAW: 481 sockp->sa_family = AF_UNSPEC; 482 hlen = 0; 483 break; 484 485 case DLT_NULL: 486 /* 487 * null interface types require a 4 byte pseudo header which 488 * corresponds to the address family of the packet. 489 */ 490 sockp->sa_family = AF_UNSPEC; 491 hlen = 4; 492 break; 493 494 case DLT_ATM_RFC1483: 495 /* 496 * en atm driver requires 4-byte atm pseudo header. 497 * though it isn't standard, vpi:vci needs to be 498 * specified anyway. 499 */ 500 sockp->sa_family = AF_UNSPEC; 501 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 502 break; 503 504 case DLT_PPP: 505 sockp->sa_family = AF_UNSPEC; 506 hlen = 4; /* This should match PPP_HDRLEN */ 507 break; 508 509 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 510 sockp->sa_family = AF_IEEE80211; 511 hlen = 0; 512 break; 513 514 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 515 sockp->sa_family = AF_IEEE80211; 516 sockp->sa_len = 12; /* XXX != 0 */ 517 hlen = sizeof(struct ieee80211_bpf_params); 518 break; 519 520 default: 521 return (EIO); 522 } 523 524 len = uio->uio_resid; 525 526 if (len - hlen > ifp->if_mtu) 527 return (EMSGSIZE); 528 529 if ((unsigned)len > MJUM16BYTES) 530 return (EIO); 531 532 if (len <= MHLEN) 533 MGETHDR(m, M_WAIT, MT_DATA); 534 else if (len <= MCLBYTES) 535 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 536 else 537 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR, 538 #if (MJUMPAGESIZE > MCLBYTES) 539 len <= MJUMPAGESIZE ? MJUMPAGESIZE : 540 #endif 541 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES)); 542 m->m_pkthdr.len = m->m_len = len; 543 m->m_pkthdr.rcvif = NULL; 544 *mp = m; 545 546 if (m->m_len < hlen) { 547 error = EPERM; 548 goto bad; 549 } 550 551 error = uiomove(mtod(m, u_char *), len, uio); 552 if (error) 553 goto bad; 554 555 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 556 if (slen == 0) { 557 error = EPERM; 558 goto bad; 559 } 560 561 /* Check for multicast destination */ 562 switch (linktype) { 563 case DLT_EN10MB: 564 eh = mtod(m, struct ether_header *); 565 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 566 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 567 ETHER_ADDR_LEN) == 0) 568 m->m_flags |= M_BCAST; 569 else 570 m->m_flags |= M_MCAST; 571 } 572 break; 573 } 574 575 /* 576 * Make room for link header, and copy it to sockaddr 577 */ 578 if (hlen != 0) { 579 if (sockp->sa_family == AF_IEEE80211) { 580 /* 581 * Collect true length from the parameter header 582 * NB: sockp is known to be zero'd so if we do a 583 * short copy unspecified parameters will be 584 * zero. 585 * NB: packet may not be aligned after stripping 586 * bpf params 587 * XXX check ibp_vers 588 */ 589 p = mtod(m, const struct ieee80211_bpf_params *); 590 hlen = p->ibp_len; 591 if (hlen > sizeof(sockp->sa_data)) { 592 error = EINVAL; 593 goto bad; 594 } 595 } 596 bcopy(m->m_data, sockp->sa_data, hlen); 597 } 598 *hdrlen = hlen; 599 600 return (0); 601 bad: 602 m_freem(m); 603 return (error); 604 } 605 606 /* 607 * Attach file to the bpf interface, i.e. make d listen on bp. 608 */ 609 static void 610 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 611 { 612 int op_w; 613 614 BPF_LOCK_ASSERT(); 615 616 /* 617 * Save sysctl value to protect from sysctl change 618 * between reads 619 */ 620 op_w = V_bpf_optimize_writers; 621 622 if (d->bd_bif != NULL) 623 bpf_detachd_locked(d); 624 /* 625 * Point d at bp, and add d to the interface's list. 626 * Since there are many applicaiotns using BPF for 627 * sending raw packets only (dhcpd, cdpd are good examples) 628 * we can delay adding d to the list of active listeners until 629 * some filter is configured. 630 */ 631 632 BPFIF_WLOCK(bp); 633 BPFD_LOCK(d); 634 635 d->bd_bif = bp; 636 637 if (op_w != 0) { 638 /* Add to writers-only list */ 639 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); 640 /* 641 * We decrement bd_writer on every filter set operation. 642 * First BIOCSETF is done by pcap_open_live() to set up 643 * snap length. After that appliation usually sets its own filter 644 */ 645 d->bd_writer = 2; 646 } else 647 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 648 649 BPFD_UNLOCK(d); 650 BPFIF_WUNLOCK(bp); 651 652 bpf_bpfd_cnt++; 653 654 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list", 655 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); 656 657 if (op_w == 0) 658 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 659 } 660 661 /* 662 * Add d to the list of active bp filters. 663 * Reuqires bpf_attachd() to be called before 664 */ 665 static void 666 bpf_upgraded(struct bpf_d *d) 667 { 668 struct bpf_if *bp; 669 670 BPF_LOCK_ASSERT(); 671 672 bp = d->bd_bif; 673 674 /* 675 * Filter can be set several times without specifying interface. 676 * Mark d as reader and exit. 677 */ 678 if (bp == NULL) { 679 BPFD_LOCK(d); 680 d->bd_writer = 0; 681 BPFD_UNLOCK(d); 682 return; 683 } 684 685 BPFIF_WLOCK(bp); 686 BPFD_LOCK(d); 687 688 /* Remove from writers-only list */ 689 LIST_REMOVE(d, bd_next); 690 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 691 /* Mark d as reader */ 692 d->bd_writer = 0; 693 694 BPFD_UNLOCK(d); 695 BPFIF_WUNLOCK(bp); 696 697 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid); 698 699 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 700 } 701 702 /* 703 * Detach a file from its interface. 704 */ 705 static void 706 bpf_detachd(struct bpf_d *d) 707 { 708 BPF_LOCK(); 709 bpf_detachd_locked(d); 710 BPF_UNLOCK(); 711 } 712 713 static void 714 bpf_detachd_locked(struct bpf_d *d) 715 { 716 int error; 717 struct bpf_if *bp; 718 struct ifnet *ifp; 719 720 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); 721 722 BPF_LOCK_ASSERT(); 723 724 /* Check if descriptor is attached */ 725 if ((bp = d->bd_bif) == NULL) 726 return; 727 728 BPFIF_WLOCK(bp); 729 BPFD_LOCK(d); 730 731 /* Save bd_writer value */ 732 error = d->bd_writer; 733 734 /* 735 * Remove d from the interface's descriptor list. 736 */ 737 LIST_REMOVE(d, bd_next); 738 739 ifp = bp->bif_ifp; 740 d->bd_bif = NULL; 741 BPFD_UNLOCK(d); 742 BPFIF_WUNLOCK(bp); 743 744 bpf_bpfd_cnt--; 745 746 /* Call event handler iff d is attached */ 747 if (error == 0) 748 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 749 750 /* 751 * Check if this descriptor had requested promiscuous mode. 752 * If so, turn it off. 753 */ 754 if (d->bd_promisc) { 755 d->bd_promisc = 0; 756 CURVNET_SET(ifp->if_vnet); 757 error = ifpromisc(ifp, 0); 758 CURVNET_RESTORE(); 759 if (error != 0 && error != ENXIO) { 760 /* 761 * ENXIO can happen if a pccard is unplugged 762 * Something is really wrong if we were able to put 763 * the driver into promiscuous mode, but can't 764 * take it out. 765 */ 766 if_printf(bp->bif_ifp, 767 "bpf_detach: ifpromisc failed (%d)\n", error); 768 } 769 } 770 } 771 772 /* 773 * Close the descriptor by detaching it from its interface, 774 * deallocating its buffers, and marking it free. 775 */ 776 static void 777 bpf_dtor(void *data) 778 { 779 struct bpf_d *d = data; 780 781 BPFD_LOCK(d); 782 if (d->bd_state == BPF_WAITING) 783 callout_stop(&d->bd_callout); 784 d->bd_state = BPF_IDLE; 785 BPFD_UNLOCK(d); 786 funsetown(&d->bd_sigio); 787 bpf_detachd(d); 788 #ifdef MAC 789 mac_bpfdesc_destroy(d); 790 #endif /* MAC */ 791 seldrain(&d->bd_sel); 792 knlist_destroy(&d->bd_sel.si_note); 793 callout_drain(&d->bd_callout); 794 bpf_freed(d); 795 free(d, M_BPF); 796 } 797 798 /* 799 * Open ethernet device. Returns ENXIO for illegal minor device number, 800 * EBUSY if file is open by another process. 801 */ 802 /* ARGSUSED */ 803 static int 804 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 805 { 806 struct bpf_d *d; 807 int error, size; 808 809 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 810 error = devfs_set_cdevpriv(d, bpf_dtor); 811 if (error != 0) { 812 free(d, M_BPF); 813 return (error); 814 } 815 816 /* 817 * For historical reasons, perform a one-time initialization call to 818 * the buffer routines, even though we're not yet committed to a 819 * particular buffer method. 820 */ 821 bpf_buffer_init(d); 822 d->bd_bufmode = BPF_BUFMODE_BUFFER; 823 d->bd_sig = SIGIO; 824 d->bd_direction = BPF_D_INOUT; 825 BPF_PID_REFRESH(d, td); 826 #ifdef MAC 827 mac_bpfdesc_init(d); 828 mac_bpfdesc_create(td->td_ucred, d); 829 #endif 830 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF); 831 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0); 832 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock); 833 834 /* Allocate default buffers */ 835 size = d->bd_bufsize; 836 bpf_buffer_ioctl_sblen(d, &size); 837 838 return (0); 839 } 840 841 /* 842 * bpfread - read next chunk of packets from buffers 843 */ 844 static int 845 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 846 { 847 struct bpf_d *d; 848 int error; 849 int non_block; 850 int timed_out; 851 852 error = devfs_get_cdevpriv((void **)&d); 853 if (error != 0) 854 return (error); 855 856 /* 857 * Restrict application to use a buffer the same size as 858 * as kernel buffers. 859 */ 860 if (uio->uio_resid != d->bd_bufsize) 861 return (EINVAL); 862 863 non_block = ((ioflag & O_NONBLOCK) != 0); 864 865 BPFD_LOCK(d); 866 BPF_PID_REFRESH_CUR(d); 867 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 868 BPFD_UNLOCK(d); 869 return (EOPNOTSUPP); 870 } 871 if (d->bd_state == BPF_WAITING) 872 callout_stop(&d->bd_callout); 873 timed_out = (d->bd_state == BPF_TIMED_OUT); 874 d->bd_state = BPF_IDLE; 875 /* 876 * If the hold buffer is empty, then do a timed sleep, which 877 * ends when the timeout expires or when enough packets 878 * have arrived to fill the store buffer. 879 */ 880 while (d->bd_hbuf == NULL) { 881 if (d->bd_slen != 0) { 882 /* 883 * A packet(s) either arrived since the previous 884 * read or arrived while we were asleep. 885 */ 886 if (d->bd_immediate || non_block || timed_out) { 887 /* 888 * Rotate the buffers and return what's here 889 * if we are in immediate mode, non-blocking 890 * flag is set, or this descriptor timed out. 891 */ 892 ROTATE_BUFFERS(d); 893 break; 894 } 895 } 896 897 /* 898 * No data is available, check to see if the bpf device 899 * is still pointed at a real interface. If not, return 900 * ENXIO so that the userland process knows to rebind 901 * it before using it again. 902 */ 903 if (d->bd_bif == NULL) { 904 BPFD_UNLOCK(d); 905 return (ENXIO); 906 } 907 908 if (non_block) { 909 BPFD_UNLOCK(d); 910 return (EWOULDBLOCK); 911 } 912 error = msleep(d, &d->bd_lock, PRINET|PCATCH, 913 "bpf", d->bd_rtout); 914 if (error == EINTR || error == ERESTART) { 915 BPFD_UNLOCK(d); 916 return (error); 917 } 918 if (error == EWOULDBLOCK) { 919 /* 920 * On a timeout, return what's in the buffer, 921 * which may be nothing. If there is something 922 * in the store buffer, we can rotate the buffers. 923 */ 924 if (d->bd_hbuf) 925 /* 926 * We filled up the buffer in between 927 * getting the timeout and arriving 928 * here, so we don't need to rotate. 929 */ 930 break; 931 932 if (d->bd_slen == 0) { 933 BPFD_UNLOCK(d); 934 return (0); 935 } 936 ROTATE_BUFFERS(d); 937 break; 938 } 939 } 940 /* 941 * At this point, we know we have something in the hold slot. 942 */ 943 BPFD_UNLOCK(d); 944 945 /* 946 * Move data from hold buffer into user space. 947 * We know the entire buffer is transferred since 948 * we checked above that the read buffer is bpf_bufsize bytes. 949 * 950 * XXXRW: More synchronization needed here: what if a second thread 951 * issues a read on the same fd at the same time? Don't want this 952 * getting invalidated. 953 */ 954 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 955 956 BPFD_LOCK(d); 957 d->bd_fbuf = d->bd_hbuf; 958 d->bd_hbuf = NULL; 959 d->bd_hlen = 0; 960 bpf_buf_reclaimed(d); 961 BPFD_UNLOCK(d); 962 963 return (error); 964 } 965 966 /* 967 * If there are processes sleeping on this descriptor, wake them up. 968 */ 969 static __inline void 970 bpf_wakeup(struct bpf_d *d) 971 { 972 973 BPFD_LOCK_ASSERT(d); 974 if (d->bd_state == BPF_WAITING) { 975 callout_stop(&d->bd_callout); 976 d->bd_state = BPF_IDLE; 977 } 978 wakeup(d); 979 if (d->bd_async && d->bd_sig && d->bd_sigio) 980 pgsigio(&d->bd_sigio, d->bd_sig, 0); 981 982 selwakeuppri(&d->bd_sel, PRINET); 983 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 984 } 985 986 static void 987 bpf_timed_out(void *arg) 988 { 989 struct bpf_d *d = (struct bpf_d *)arg; 990 991 BPFD_LOCK_ASSERT(d); 992 993 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout)) 994 return; 995 if (d->bd_state == BPF_WAITING) { 996 d->bd_state = BPF_TIMED_OUT; 997 if (d->bd_slen != 0) 998 bpf_wakeup(d); 999 } 1000 } 1001 1002 static int 1003 bpf_ready(struct bpf_d *d) 1004 { 1005 1006 BPFD_LOCK_ASSERT(d); 1007 1008 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 1009 return (1); 1010 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1011 d->bd_slen != 0) 1012 return (1); 1013 return (0); 1014 } 1015 1016 static int 1017 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 1018 { 1019 struct bpf_d *d; 1020 struct ifnet *ifp; 1021 struct mbuf *m, *mc; 1022 struct sockaddr dst; 1023 int error, hlen; 1024 1025 error = devfs_get_cdevpriv((void **)&d); 1026 if (error != 0) 1027 return (error); 1028 1029 BPF_PID_REFRESH_CUR(d); 1030 d->bd_wcount++; 1031 /* XXX: locking required */ 1032 if (d->bd_bif == NULL) { 1033 d->bd_wdcount++; 1034 return (ENXIO); 1035 } 1036 1037 ifp = d->bd_bif->bif_ifp; 1038 1039 if ((ifp->if_flags & IFF_UP) == 0) { 1040 d->bd_wdcount++; 1041 return (ENETDOWN); 1042 } 1043 1044 if (uio->uio_resid == 0) { 1045 d->bd_wdcount++; 1046 return (0); 1047 } 1048 1049 bzero(&dst, sizeof(dst)); 1050 m = NULL; 1051 hlen = 0; 1052 /* XXX: bpf_movein() can sleep */ 1053 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 1054 &m, &dst, &hlen, d->bd_wfilter); 1055 if (error) { 1056 d->bd_wdcount++; 1057 return (error); 1058 } 1059 d->bd_wfcount++; 1060 if (d->bd_hdrcmplt) 1061 dst.sa_family = pseudo_AF_HDRCMPLT; 1062 1063 if (d->bd_feedback) { 1064 mc = m_dup(m, M_DONTWAIT); 1065 if (mc != NULL) 1066 mc->m_pkthdr.rcvif = ifp; 1067 /* Set M_PROMISC for outgoing packets to be discarded. */ 1068 if (d->bd_direction == BPF_D_INOUT) 1069 m->m_flags |= M_PROMISC; 1070 } else 1071 mc = NULL; 1072 1073 m->m_pkthdr.len -= hlen; 1074 m->m_len -= hlen; 1075 m->m_data += hlen; /* XXX */ 1076 1077 CURVNET_SET(ifp->if_vnet); 1078 #ifdef MAC 1079 BPFD_LOCK(d); 1080 mac_bpfdesc_create_mbuf(d, m); 1081 if (mc != NULL) 1082 mac_bpfdesc_create_mbuf(d, mc); 1083 BPFD_UNLOCK(d); 1084 #endif 1085 1086 error = (*ifp->if_output)(ifp, m, &dst, NULL); 1087 if (error) 1088 d->bd_wdcount++; 1089 1090 if (mc != NULL) { 1091 if (error == 0) 1092 (*ifp->if_input)(ifp, mc); 1093 else 1094 m_freem(mc); 1095 } 1096 CURVNET_RESTORE(); 1097 1098 return (error); 1099 } 1100 1101 /* 1102 * Reset a descriptor by flushing its packet buffer and clearing the receive 1103 * and drop counts. This is doable for kernel-only buffers, but with 1104 * zero-copy buffers, we can't write to (or rotate) buffers that are 1105 * currently owned by userspace. It would be nice if we could encapsulate 1106 * this logic in the buffer code rather than here. 1107 */ 1108 static void 1109 reset_d(struct bpf_d *d) 1110 { 1111 1112 BPFD_LOCK_ASSERT(d); 1113 1114 if ((d->bd_hbuf != NULL) && 1115 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 1116 /* Free the hold buffer. */ 1117 d->bd_fbuf = d->bd_hbuf; 1118 d->bd_hbuf = NULL; 1119 d->bd_hlen = 0; 1120 bpf_buf_reclaimed(d); 1121 } 1122 if (bpf_canwritebuf(d)) 1123 d->bd_slen = 0; 1124 d->bd_rcount = 0; 1125 d->bd_dcount = 0; 1126 d->bd_fcount = 0; 1127 d->bd_wcount = 0; 1128 d->bd_wfcount = 0; 1129 d->bd_wdcount = 0; 1130 d->bd_zcopy = 0; 1131 } 1132 1133 /* 1134 * FIONREAD Check for read packet available. 1135 * SIOCGIFADDR Get interface address - convenient hook to driver. 1136 * BIOCGBLEN Get buffer len [for read()]. 1137 * BIOCSETF Set read filter. 1138 * BIOCSETFNR Set read filter without resetting descriptor. 1139 * BIOCSETWF Set write filter. 1140 * BIOCFLUSH Flush read packet buffer. 1141 * BIOCPROMISC Put interface into promiscuous mode. 1142 * BIOCGDLT Get link layer type. 1143 * BIOCGETIF Get interface name. 1144 * BIOCSETIF Set interface. 1145 * BIOCSRTIMEOUT Set read timeout. 1146 * BIOCGRTIMEOUT Get read timeout. 1147 * BIOCGSTATS Get packet stats. 1148 * BIOCIMMEDIATE Set immediate mode. 1149 * BIOCVERSION Get filter language version. 1150 * BIOCGHDRCMPLT Get "header already complete" flag 1151 * BIOCSHDRCMPLT Set "header already complete" flag 1152 * BIOCGDIRECTION Get packet direction flag 1153 * BIOCSDIRECTION Set packet direction flag 1154 * BIOCGTSTAMP Get time stamp format and resolution. 1155 * BIOCSTSTAMP Set time stamp format and resolution. 1156 * BIOCLOCK Set "locked" flag 1157 * BIOCFEEDBACK Set packet feedback mode. 1158 * BIOCSETZBUF Set current zero-copy buffer locations. 1159 * BIOCGETZMAX Get maximum zero-copy buffer size. 1160 * BIOCROTZBUF Force rotation of zero-copy buffer 1161 * BIOCSETBUFMODE Set buffer mode. 1162 * BIOCGETBUFMODE Get current buffer mode. 1163 */ 1164 /* ARGSUSED */ 1165 static int 1166 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1167 struct thread *td) 1168 { 1169 struct bpf_d *d; 1170 int error; 1171 1172 error = devfs_get_cdevpriv((void **)&d); 1173 if (error != 0) 1174 return (error); 1175 1176 /* 1177 * Refresh PID associated with this descriptor. 1178 */ 1179 BPFD_LOCK(d); 1180 BPF_PID_REFRESH(d, td); 1181 if (d->bd_state == BPF_WAITING) 1182 callout_stop(&d->bd_callout); 1183 d->bd_state = BPF_IDLE; 1184 BPFD_UNLOCK(d); 1185 1186 if (d->bd_locked == 1) { 1187 switch (cmd) { 1188 case BIOCGBLEN: 1189 case BIOCFLUSH: 1190 case BIOCGDLT: 1191 case BIOCGDLTLIST: 1192 #ifdef COMPAT_FREEBSD32 1193 case BIOCGDLTLIST32: 1194 #endif 1195 case BIOCGETIF: 1196 case BIOCGRTIMEOUT: 1197 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1198 case BIOCGRTIMEOUT32: 1199 #endif 1200 case BIOCGSTATS: 1201 case BIOCVERSION: 1202 case BIOCGRSIG: 1203 case BIOCGHDRCMPLT: 1204 case BIOCSTSTAMP: 1205 case BIOCFEEDBACK: 1206 case FIONREAD: 1207 case BIOCLOCK: 1208 case BIOCSRTIMEOUT: 1209 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1210 case BIOCSRTIMEOUT32: 1211 #endif 1212 case BIOCIMMEDIATE: 1213 case TIOCGPGRP: 1214 case BIOCROTZBUF: 1215 break; 1216 default: 1217 return (EPERM); 1218 } 1219 } 1220 #ifdef COMPAT_FREEBSD32 1221 /* 1222 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so 1223 * that it will get 32-bit packet headers. 1224 */ 1225 switch (cmd) { 1226 case BIOCSETF32: 1227 case BIOCSETFNR32: 1228 case BIOCSETWF32: 1229 case BIOCGDLTLIST32: 1230 case BIOCGRTIMEOUT32: 1231 case BIOCSRTIMEOUT32: 1232 BPFD_LOCK(d); 1233 d->bd_compat32 = 1; 1234 BPFD_UNLOCK(d); 1235 } 1236 #endif 1237 1238 CURVNET_SET(TD_TO_VNET(td)); 1239 switch (cmd) { 1240 1241 default: 1242 error = EINVAL; 1243 break; 1244 1245 /* 1246 * Check for read packet available. 1247 */ 1248 case FIONREAD: 1249 { 1250 int n; 1251 1252 BPFD_LOCK(d); 1253 n = d->bd_slen; 1254 if (d->bd_hbuf) 1255 n += d->bd_hlen; 1256 BPFD_UNLOCK(d); 1257 1258 *(int *)addr = n; 1259 break; 1260 } 1261 1262 case SIOCGIFADDR: 1263 { 1264 struct ifnet *ifp; 1265 1266 if (d->bd_bif == NULL) 1267 error = EINVAL; 1268 else { 1269 ifp = d->bd_bif->bif_ifp; 1270 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1271 } 1272 break; 1273 } 1274 1275 /* 1276 * Get buffer len [for read()]. 1277 */ 1278 case BIOCGBLEN: 1279 BPFD_LOCK(d); 1280 *(u_int *)addr = d->bd_bufsize; 1281 BPFD_UNLOCK(d); 1282 break; 1283 1284 /* 1285 * Set buffer length. 1286 */ 1287 case BIOCSBLEN: 1288 error = bpf_ioctl_sblen(d, (u_int *)addr); 1289 break; 1290 1291 /* 1292 * Set link layer read filter. 1293 */ 1294 case BIOCSETF: 1295 case BIOCSETFNR: 1296 case BIOCSETWF: 1297 #ifdef COMPAT_FREEBSD32 1298 case BIOCSETF32: 1299 case BIOCSETFNR32: 1300 case BIOCSETWF32: 1301 #endif 1302 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1303 break; 1304 1305 /* 1306 * Flush read packet buffer. 1307 */ 1308 case BIOCFLUSH: 1309 BPFD_LOCK(d); 1310 reset_d(d); 1311 BPFD_UNLOCK(d); 1312 break; 1313 1314 /* 1315 * Put interface into promiscuous mode. 1316 */ 1317 case BIOCPROMISC: 1318 if (d->bd_bif == NULL) { 1319 /* 1320 * No interface attached yet. 1321 */ 1322 error = EINVAL; 1323 break; 1324 } 1325 if (d->bd_promisc == 0) { 1326 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1327 if (error == 0) 1328 d->bd_promisc = 1; 1329 } 1330 break; 1331 1332 /* 1333 * Get current data link type. 1334 */ 1335 case BIOCGDLT: 1336 BPF_LOCK(); 1337 if (d->bd_bif == NULL) 1338 error = EINVAL; 1339 else 1340 *(u_int *)addr = d->bd_bif->bif_dlt; 1341 BPF_UNLOCK(); 1342 break; 1343 1344 /* 1345 * Get a list of supported data link types. 1346 */ 1347 #ifdef COMPAT_FREEBSD32 1348 case BIOCGDLTLIST32: 1349 { 1350 struct bpf_dltlist32 *list32; 1351 struct bpf_dltlist dltlist; 1352 1353 list32 = (struct bpf_dltlist32 *)addr; 1354 dltlist.bfl_len = list32->bfl_len; 1355 dltlist.bfl_list = PTRIN(list32->bfl_list); 1356 BPF_LOCK(); 1357 if (d->bd_bif == NULL) 1358 error = EINVAL; 1359 else { 1360 error = bpf_getdltlist(d, &dltlist); 1361 if (error == 0) 1362 list32->bfl_len = dltlist.bfl_len; 1363 } 1364 BPF_UNLOCK(); 1365 break; 1366 } 1367 #endif 1368 1369 case BIOCGDLTLIST: 1370 BPF_LOCK(); 1371 if (d->bd_bif == NULL) 1372 error = EINVAL; 1373 else 1374 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1375 BPF_UNLOCK(); 1376 break; 1377 1378 /* 1379 * Set data link type. 1380 */ 1381 case BIOCSDLT: 1382 BPF_LOCK(); 1383 if (d->bd_bif == NULL) 1384 error = EINVAL; 1385 else 1386 error = bpf_setdlt(d, *(u_int *)addr); 1387 BPF_UNLOCK(); 1388 break; 1389 1390 /* 1391 * Get interface name. 1392 */ 1393 case BIOCGETIF: 1394 BPF_LOCK(); 1395 if (d->bd_bif == NULL) 1396 error = EINVAL; 1397 else { 1398 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1399 struct ifreq *const ifr = (struct ifreq *)addr; 1400 1401 strlcpy(ifr->ifr_name, ifp->if_xname, 1402 sizeof(ifr->ifr_name)); 1403 } 1404 BPF_UNLOCK(); 1405 break; 1406 1407 /* 1408 * Set interface. 1409 */ 1410 case BIOCSETIF: 1411 BPF_LOCK(); 1412 error = bpf_setif(d, (struct ifreq *)addr); 1413 BPF_UNLOCK(); 1414 break; 1415 1416 /* 1417 * Set read timeout. 1418 */ 1419 case BIOCSRTIMEOUT: 1420 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1421 case BIOCSRTIMEOUT32: 1422 #endif 1423 { 1424 struct timeval *tv = (struct timeval *)addr; 1425 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1426 struct timeval32 *tv32; 1427 struct timeval tv64; 1428 1429 if (cmd == BIOCSRTIMEOUT32) { 1430 tv32 = (struct timeval32 *)addr; 1431 tv = &tv64; 1432 tv->tv_sec = tv32->tv_sec; 1433 tv->tv_usec = tv32->tv_usec; 1434 } else 1435 #endif 1436 tv = (struct timeval *)addr; 1437 1438 /* 1439 * Subtract 1 tick from tvtohz() since this isn't 1440 * a one-shot timer. 1441 */ 1442 if ((error = itimerfix(tv)) == 0) 1443 d->bd_rtout = tvtohz(tv) - 1; 1444 break; 1445 } 1446 1447 /* 1448 * Get read timeout. 1449 */ 1450 case BIOCGRTIMEOUT: 1451 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1452 case BIOCGRTIMEOUT32: 1453 #endif 1454 { 1455 struct timeval *tv; 1456 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1457 struct timeval32 *tv32; 1458 struct timeval tv64; 1459 1460 if (cmd == BIOCGRTIMEOUT32) 1461 tv = &tv64; 1462 else 1463 #endif 1464 tv = (struct timeval *)addr; 1465 1466 tv->tv_sec = d->bd_rtout / hz; 1467 tv->tv_usec = (d->bd_rtout % hz) * tick; 1468 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1469 if (cmd == BIOCGRTIMEOUT32) { 1470 tv32 = (struct timeval32 *)addr; 1471 tv32->tv_sec = tv->tv_sec; 1472 tv32->tv_usec = tv->tv_usec; 1473 } 1474 #endif 1475 1476 break; 1477 } 1478 1479 /* 1480 * Get packet stats. 1481 */ 1482 case BIOCGSTATS: 1483 { 1484 struct bpf_stat *bs = (struct bpf_stat *)addr; 1485 1486 /* XXXCSJP overflow */ 1487 bs->bs_recv = d->bd_rcount; 1488 bs->bs_drop = d->bd_dcount; 1489 break; 1490 } 1491 1492 /* 1493 * Set immediate mode. 1494 */ 1495 case BIOCIMMEDIATE: 1496 BPFD_LOCK(d); 1497 d->bd_immediate = *(u_int *)addr; 1498 BPFD_UNLOCK(d); 1499 break; 1500 1501 case BIOCVERSION: 1502 { 1503 struct bpf_version *bv = (struct bpf_version *)addr; 1504 1505 bv->bv_major = BPF_MAJOR_VERSION; 1506 bv->bv_minor = BPF_MINOR_VERSION; 1507 break; 1508 } 1509 1510 /* 1511 * Get "header already complete" flag 1512 */ 1513 case BIOCGHDRCMPLT: 1514 BPFD_LOCK(d); 1515 *(u_int *)addr = d->bd_hdrcmplt; 1516 BPFD_UNLOCK(d); 1517 break; 1518 1519 /* 1520 * Set "header already complete" flag 1521 */ 1522 case BIOCSHDRCMPLT: 1523 BPFD_LOCK(d); 1524 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1525 BPFD_UNLOCK(d); 1526 break; 1527 1528 /* 1529 * Get packet direction flag 1530 */ 1531 case BIOCGDIRECTION: 1532 BPFD_LOCK(d); 1533 *(u_int *)addr = d->bd_direction; 1534 BPFD_UNLOCK(d); 1535 break; 1536 1537 /* 1538 * Set packet direction flag 1539 */ 1540 case BIOCSDIRECTION: 1541 { 1542 u_int direction; 1543 1544 direction = *(u_int *)addr; 1545 switch (direction) { 1546 case BPF_D_IN: 1547 case BPF_D_INOUT: 1548 case BPF_D_OUT: 1549 BPFD_LOCK(d); 1550 d->bd_direction = direction; 1551 BPFD_UNLOCK(d); 1552 break; 1553 default: 1554 error = EINVAL; 1555 } 1556 } 1557 break; 1558 1559 /* 1560 * Get packet timestamp format and resolution. 1561 */ 1562 case BIOCGTSTAMP: 1563 BPFD_LOCK(d); 1564 *(u_int *)addr = d->bd_tstamp; 1565 BPFD_UNLOCK(d); 1566 break; 1567 1568 /* 1569 * Set packet timestamp format and resolution. 1570 */ 1571 case BIOCSTSTAMP: 1572 { 1573 u_int func; 1574 1575 func = *(u_int *)addr; 1576 if (BPF_T_VALID(func)) 1577 d->bd_tstamp = func; 1578 else 1579 error = EINVAL; 1580 } 1581 break; 1582 1583 case BIOCFEEDBACK: 1584 BPFD_LOCK(d); 1585 d->bd_feedback = *(u_int *)addr; 1586 BPFD_UNLOCK(d); 1587 break; 1588 1589 case BIOCLOCK: 1590 BPFD_LOCK(d); 1591 d->bd_locked = 1; 1592 BPFD_UNLOCK(d); 1593 break; 1594 1595 case FIONBIO: /* Non-blocking I/O */ 1596 break; 1597 1598 case FIOASYNC: /* Send signal on receive packets */ 1599 BPFD_LOCK(d); 1600 d->bd_async = *(int *)addr; 1601 BPFD_UNLOCK(d); 1602 break; 1603 1604 case FIOSETOWN: 1605 /* 1606 * XXX: Add some sort of locking here? 1607 * fsetown() can sleep. 1608 */ 1609 error = fsetown(*(int *)addr, &d->bd_sigio); 1610 break; 1611 1612 case FIOGETOWN: 1613 BPFD_LOCK(d); 1614 *(int *)addr = fgetown(&d->bd_sigio); 1615 BPFD_UNLOCK(d); 1616 break; 1617 1618 /* This is deprecated, FIOSETOWN should be used instead. */ 1619 case TIOCSPGRP: 1620 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1621 break; 1622 1623 /* This is deprecated, FIOGETOWN should be used instead. */ 1624 case TIOCGPGRP: 1625 *(int *)addr = -fgetown(&d->bd_sigio); 1626 break; 1627 1628 case BIOCSRSIG: /* Set receive signal */ 1629 { 1630 u_int sig; 1631 1632 sig = *(u_int *)addr; 1633 1634 if (sig >= NSIG) 1635 error = EINVAL; 1636 else { 1637 BPFD_LOCK(d); 1638 d->bd_sig = sig; 1639 BPFD_UNLOCK(d); 1640 } 1641 break; 1642 } 1643 case BIOCGRSIG: 1644 BPFD_LOCK(d); 1645 *(u_int *)addr = d->bd_sig; 1646 BPFD_UNLOCK(d); 1647 break; 1648 1649 case BIOCGETBUFMODE: 1650 BPFD_LOCK(d); 1651 *(u_int *)addr = d->bd_bufmode; 1652 BPFD_UNLOCK(d); 1653 break; 1654 1655 case BIOCSETBUFMODE: 1656 /* 1657 * Allow the buffering mode to be changed as long as we 1658 * haven't yet committed to a particular mode. Our 1659 * definition of commitment, for now, is whether or not a 1660 * buffer has been allocated or an interface attached, since 1661 * that's the point where things get tricky. 1662 */ 1663 switch (*(u_int *)addr) { 1664 case BPF_BUFMODE_BUFFER: 1665 break; 1666 1667 case BPF_BUFMODE_ZBUF: 1668 if (bpf_zerocopy_enable) 1669 break; 1670 /* FALLSTHROUGH */ 1671 1672 default: 1673 CURVNET_RESTORE(); 1674 return (EINVAL); 1675 } 1676 1677 BPFD_LOCK(d); 1678 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1679 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1680 BPFD_UNLOCK(d); 1681 CURVNET_RESTORE(); 1682 return (EBUSY); 1683 } 1684 d->bd_bufmode = *(u_int *)addr; 1685 BPFD_UNLOCK(d); 1686 break; 1687 1688 case BIOCGETZMAX: 1689 error = bpf_ioctl_getzmax(td, d, (size_t *)addr); 1690 break; 1691 1692 case BIOCSETZBUF: 1693 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr); 1694 break; 1695 1696 case BIOCROTZBUF: 1697 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr); 1698 break; 1699 } 1700 CURVNET_RESTORE(); 1701 return (error); 1702 } 1703 1704 /* 1705 * Set d's packet filter program to fp. If this file already has a filter, 1706 * free it and replace it. Returns EINVAL for bogus requests. 1707 * 1708 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls 1709 * since reading d->bd_bif can't be protected by d or interface lock due to 1710 * lock order. 1711 * 1712 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses 1713 * interface read lock to read all filers. 1714 * 1715 */ 1716 static int 1717 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1718 { 1719 #ifdef COMPAT_FREEBSD32 1720 struct bpf_program fp_swab; 1721 struct bpf_program32 *fp32; 1722 #endif 1723 struct bpf_insn *fcode, *old; 1724 #ifdef BPF_JITTER 1725 bpf_jit_filter *jfunc, *ofunc; 1726 #endif 1727 size_t size; 1728 u_int flen; 1729 int need_upgrade; 1730 1731 #ifdef COMPAT_FREEBSD32 1732 switch (cmd) { 1733 case BIOCSETF32: 1734 case BIOCSETWF32: 1735 case BIOCSETFNR32: 1736 fp32 = (struct bpf_program32 *)fp; 1737 fp_swab.bf_len = fp32->bf_len; 1738 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns; 1739 fp = &fp_swab; 1740 switch (cmd) { 1741 case BIOCSETF32: 1742 cmd = BIOCSETF; 1743 break; 1744 case BIOCSETWF32: 1745 cmd = BIOCSETWF; 1746 break; 1747 } 1748 break; 1749 } 1750 #endif 1751 1752 fcode = NULL; 1753 #ifdef BPF_JITTER 1754 jfunc = ofunc = NULL; 1755 #endif 1756 need_upgrade = 0; 1757 1758 /* 1759 * Check new filter validness before acquiring any locks. 1760 * Allocate memory for new filter, if needed. 1761 */ 1762 flen = fp->bf_len; 1763 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0)) 1764 return (EINVAL); 1765 size = flen * sizeof(*fp->bf_insns); 1766 if (size > 0) { 1767 /* We're setting up new filter. Copy and check actual data. */ 1768 fcode = malloc(size, M_BPF, M_WAITOK); 1769 if (copyin(fp->bf_insns, fcode, size) != 0 || 1770 !bpf_validate(fcode, flen)) { 1771 free(fcode, M_BPF); 1772 return (EINVAL); 1773 } 1774 #ifdef BPF_JITTER 1775 /* Filter is copied inside fcode and is perfectly valid. */ 1776 jfunc = bpf_jitter(fcode, flen); 1777 #endif 1778 } 1779 1780 BPF_LOCK(); 1781 1782 /* 1783 * Set up new filter. 1784 * Protect filter change by interface lock. 1785 * Additionally, we are protected by global lock here. 1786 */ 1787 if (d->bd_bif != NULL) 1788 BPFIF_WLOCK(d->bd_bif); 1789 BPFD_LOCK(d); 1790 if (cmd == BIOCSETWF) { 1791 old = d->bd_wfilter; 1792 d->bd_wfilter = fcode; 1793 } else { 1794 old = d->bd_rfilter; 1795 d->bd_rfilter = fcode; 1796 #ifdef BPF_JITTER 1797 ofunc = d->bd_bfilter; 1798 d->bd_bfilter = jfunc; 1799 #endif 1800 if (cmd == BIOCSETF) 1801 reset_d(d); 1802 1803 if (fcode != NULL) { 1804 /* 1805 * Do not require upgrade by first BIOCSETF 1806 * (used to set snaplen) by pcap_open_live(). 1807 */ 1808 if (d->bd_writer != 0 && --d->bd_writer == 0) 1809 need_upgrade = 1; 1810 CTR4(KTR_NET, "%s: filter function set by pid %d, " 1811 "bd_writer counter %d, need_upgrade %d", 1812 __func__, d->bd_pid, d->bd_writer, need_upgrade); 1813 } 1814 } 1815 BPFD_UNLOCK(d); 1816 if (d->bd_bif != NULL) 1817 BPFIF_WUNLOCK(d->bd_bif); 1818 if (old != NULL) 1819 free(old, M_BPF); 1820 #ifdef BPF_JITTER 1821 if (ofunc != NULL) 1822 bpf_destroy_jit_filter(ofunc); 1823 #endif 1824 1825 /* Move d to active readers list. */ 1826 if (need_upgrade) 1827 bpf_upgraded(d); 1828 1829 BPF_UNLOCK(); 1830 return (0); 1831 } 1832 1833 /* 1834 * Detach a file from its current interface (if attached at all) and attach 1835 * to the interface indicated by the name stored in ifr. 1836 * Return an errno or 0. 1837 */ 1838 static int 1839 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1840 { 1841 struct bpf_if *bp; 1842 struct ifnet *theywant; 1843 1844 BPF_LOCK_ASSERT(); 1845 1846 theywant = ifunit(ifr->ifr_name); 1847 if (theywant == NULL || theywant->if_bpf == NULL) 1848 return (ENXIO); 1849 1850 bp = theywant->if_bpf; 1851 1852 /* Check if interface is not being detached from BPF */ 1853 BPFIF_RLOCK(bp); 1854 if (bp->flags & BPFIF_FLAG_DYING) { 1855 BPFIF_RUNLOCK(bp); 1856 return (ENXIO); 1857 } 1858 BPFIF_RUNLOCK(bp); 1859 1860 /* 1861 * Behavior here depends on the buffering model. If we're using 1862 * kernel memory buffers, then we can allocate them here. If we're 1863 * using zero-copy, then the user process must have registered 1864 * buffers by the time we get here. If not, return an error. 1865 */ 1866 switch (d->bd_bufmode) { 1867 case BPF_BUFMODE_BUFFER: 1868 case BPF_BUFMODE_ZBUF: 1869 if (d->bd_sbuf == NULL) 1870 return (EINVAL); 1871 break; 1872 1873 default: 1874 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1875 } 1876 if (bp != d->bd_bif) 1877 bpf_attachd(d, bp); 1878 BPFD_LOCK(d); 1879 reset_d(d); 1880 BPFD_UNLOCK(d); 1881 return (0); 1882 } 1883 1884 /* 1885 * Support for select() and poll() system calls 1886 * 1887 * Return true iff the specific operation will not block indefinitely. 1888 * Otherwise, return false but make a note that a selwakeup() must be done. 1889 */ 1890 static int 1891 bpfpoll(struct cdev *dev, int events, struct thread *td) 1892 { 1893 struct bpf_d *d; 1894 int revents; 1895 1896 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1897 return (events & 1898 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1899 1900 /* 1901 * Refresh PID associated with this descriptor. 1902 */ 1903 revents = events & (POLLOUT | POLLWRNORM); 1904 BPFD_LOCK(d); 1905 BPF_PID_REFRESH(d, td); 1906 if (events & (POLLIN | POLLRDNORM)) { 1907 if (bpf_ready(d)) 1908 revents |= events & (POLLIN | POLLRDNORM); 1909 else { 1910 selrecord(td, &d->bd_sel); 1911 /* Start the read timeout if necessary. */ 1912 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1913 callout_reset(&d->bd_callout, d->bd_rtout, 1914 bpf_timed_out, d); 1915 d->bd_state = BPF_WAITING; 1916 } 1917 } 1918 } 1919 BPFD_UNLOCK(d); 1920 return (revents); 1921 } 1922 1923 /* 1924 * Support for kevent() system call. Register EVFILT_READ filters and 1925 * reject all others. 1926 */ 1927 int 1928 bpfkqfilter(struct cdev *dev, struct knote *kn) 1929 { 1930 struct bpf_d *d; 1931 1932 if (devfs_get_cdevpriv((void **)&d) != 0 || 1933 kn->kn_filter != EVFILT_READ) 1934 return (1); 1935 1936 /* 1937 * Refresh PID associated with this descriptor. 1938 */ 1939 BPFD_LOCK(d); 1940 BPF_PID_REFRESH_CUR(d); 1941 kn->kn_fop = &bpfread_filtops; 1942 kn->kn_hook = d; 1943 knlist_add(&d->bd_sel.si_note, kn, 1); 1944 BPFD_UNLOCK(d); 1945 1946 return (0); 1947 } 1948 1949 static void 1950 filt_bpfdetach(struct knote *kn) 1951 { 1952 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1953 1954 knlist_remove(&d->bd_sel.si_note, kn, 0); 1955 } 1956 1957 static int 1958 filt_bpfread(struct knote *kn, long hint) 1959 { 1960 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1961 int ready; 1962 1963 BPFD_LOCK_ASSERT(d); 1964 ready = bpf_ready(d); 1965 if (ready) { 1966 kn->kn_data = d->bd_slen; 1967 if (d->bd_hbuf) 1968 kn->kn_data += d->bd_hlen; 1969 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1970 callout_reset(&d->bd_callout, d->bd_rtout, 1971 bpf_timed_out, d); 1972 d->bd_state = BPF_WAITING; 1973 } 1974 1975 return (ready); 1976 } 1977 1978 #define BPF_TSTAMP_NONE 0 1979 #define BPF_TSTAMP_FAST 1 1980 #define BPF_TSTAMP_NORMAL 2 1981 #define BPF_TSTAMP_EXTERN 3 1982 1983 static int 1984 bpf_ts_quality(int tstype) 1985 { 1986 1987 if (tstype == BPF_T_NONE) 1988 return (BPF_TSTAMP_NONE); 1989 if ((tstype & BPF_T_FAST) != 0) 1990 return (BPF_TSTAMP_FAST); 1991 1992 return (BPF_TSTAMP_NORMAL); 1993 } 1994 1995 static int 1996 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m) 1997 { 1998 struct m_tag *tag; 1999 int quality; 2000 2001 quality = bpf_ts_quality(tstype); 2002 if (quality == BPF_TSTAMP_NONE) 2003 return (quality); 2004 2005 if (m != NULL) { 2006 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL); 2007 if (tag != NULL) { 2008 *bt = *(struct bintime *)(tag + 1); 2009 return (BPF_TSTAMP_EXTERN); 2010 } 2011 } 2012 if (quality == BPF_TSTAMP_NORMAL) 2013 binuptime(bt); 2014 else 2015 getbinuptime(bt); 2016 2017 return (quality); 2018 } 2019 2020 /* 2021 * Incoming linkage from device drivers. Process the packet pkt, of length 2022 * pktlen, which is stored in a contiguous buffer. The packet is parsed 2023 * by each process' filter, and if accepted, stashed into the corresponding 2024 * buffer. 2025 */ 2026 void 2027 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2028 { 2029 struct bintime bt; 2030 struct bpf_d *d; 2031 #ifdef BPF_JITTER 2032 bpf_jit_filter *bf; 2033 #endif 2034 u_int slen; 2035 int gottime; 2036 2037 gottime = BPF_TSTAMP_NONE; 2038 2039 BPFIF_RLOCK(bp); 2040 2041 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2042 /* 2043 * We are not using any locks for d here because: 2044 * 1) any filter change is protected by interface 2045 * write lock 2046 * 2) destroying/detaching d is protected by interface 2047 * write lock, too 2048 */ 2049 2050 /* XXX: Do not protect counter for the sake of performance. */ 2051 ++d->bd_rcount; 2052 /* 2053 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 2054 * way for the caller to indiciate to us whether this packet 2055 * is inbound or outbound. In the bpf_mtap() routines, we use 2056 * the interface pointers on the mbuf to figure it out. 2057 */ 2058 #ifdef BPF_JITTER 2059 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2060 if (bf != NULL) 2061 slen = (*(bf->func))(pkt, pktlen, pktlen); 2062 else 2063 #endif 2064 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 2065 if (slen != 0) { 2066 /* 2067 * Filter matches. Let's to acquire write lock. 2068 */ 2069 BPFD_LOCK(d); 2070 2071 d->bd_fcount++; 2072 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2073 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL); 2074 #ifdef MAC 2075 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2076 #endif 2077 catchpacket(d, pkt, pktlen, slen, 2078 bpf_append_bytes, &bt); 2079 BPFD_UNLOCK(d); 2080 } 2081 } 2082 BPFIF_RUNLOCK(bp); 2083 } 2084 2085 #define BPF_CHECK_DIRECTION(d, r, i) \ 2086 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 2087 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 2088 2089 /* 2090 * Incoming linkage from device drivers, when packet is in an mbuf chain. 2091 * Locking model is explained in bpf_tap(). 2092 */ 2093 void 2094 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2095 { 2096 struct bintime bt; 2097 struct bpf_d *d; 2098 #ifdef BPF_JITTER 2099 bpf_jit_filter *bf; 2100 #endif 2101 u_int pktlen, slen; 2102 int gottime; 2103 2104 /* Skip outgoing duplicate packets. */ 2105 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2106 m->m_flags &= ~M_PROMISC; 2107 return; 2108 } 2109 2110 pktlen = m_length(m, NULL); 2111 gottime = BPF_TSTAMP_NONE; 2112 2113 BPFIF_RLOCK(bp); 2114 2115 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2116 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2117 continue; 2118 ++d->bd_rcount; 2119 #ifdef BPF_JITTER 2120 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2121 /* XXX We cannot handle multiple mbufs. */ 2122 if (bf != NULL && m->m_next == NULL) 2123 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen); 2124 else 2125 #endif 2126 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 2127 if (slen != 0) { 2128 BPFD_LOCK(d); 2129 2130 d->bd_fcount++; 2131 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2132 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2133 #ifdef MAC 2134 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2135 #endif 2136 catchpacket(d, (u_char *)m, pktlen, slen, 2137 bpf_append_mbuf, &bt); 2138 BPFD_UNLOCK(d); 2139 } 2140 } 2141 BPFIF_RUNLOCK(bp); 2142 } 2143 2144 /* 2145 * Incoming linkage from device drivers, when packet is in 2146 * an mbuf chain and to be prepended by a contiguous header. 2147 */ 2148 void 2149 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 2150 { 2151 struct bintime bt; 2152 struct mbuf mb; 2153 struct bpf_d *d; 2154 u_int pktlen, slen; 2155 int gottime; 2156 2157 /* Skip outgoing duplicate packets. */ 2158 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2159 m->m_flags &= ~M_PROMISC; 2160 return; 2161 } 2162 2163 pktlen = m_length(m, NULL); 2164 /* 2165 * Craft on-stack mbuf suitable for passing to bpf_filter. 2166 * Note that we cut corners here; we only setup what's 2167 * absolutely needed--this mbuf should never go anywhere else. 2168 */ 2169 mb.m_next = m; 2170 mb.m_data = data; 2171 mb.m_len = dlen; 2172 pktlen += dlen; 2173 2174 gottime = BPF_TSTAMP_NONE; 2175 2176 BPFIF_RLOCK(bp); 2177 2178 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2179 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2180 continue; 2181 ++d->bd_rcount; 2182 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 2183 if (slen != 0) { 2184 BPFD_LOCK(d); 2185 2186 d->bd_fcount++; 2187 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2188 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2189 #ifdef MAC 2190 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2191 #endif 2192 catchpacket(d, (u_char *)&mb, pktlen, slen, 2193 bpf_append_mbuf, &bt); 2194 BPFD_UNLOCK(d); 2195 } 2196 } 2197 BPFIF_RUNLOCK(bp); 2198 } 2199 2200 #undef BPF_CHECK_DIRECTION 2201 2202 #undef BPF_TSTAMP_NONE 2203 #undef BPF_TSTAMP_FAST 2204 #undef BPF_TSTAMP_NORMAL 2205 #undef BPF_TSTAMP_EXTERN 2206 2207 static int 2208 bpf_hdrlen(struct bpf_d *d) 2209 { 2210 int hdrlen; 2211 2212 hdrlen = d->bd_bif->bif_hdrlen; 2213 #ifndef BURN_BRIDGES 2214 if (d->bd_tstamp == BPF_T_NONE || 2215 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) 2216 #ifdef COMPAT_FREEBSD32 2217 if (d->bd_compat32) 2218 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32); 2219 else 2220 #endif 2221 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr); 2222 else 2223 #endif 2224 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr); 2225 #ifdef COMPAT_FREEBSD32 2226 if (d->bd_compat32) 2227 hdrlen = BPF_WORDALIGN32(hdrlen); 2228 else 2229 #endif 2230 hdrlen = BPF_WORDALIGN(hdrlen); 2231 2232 return (hdrlen - d->bd_bif->bif_hdrlen); 2233 } 2234 2235 static void 2236 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) 2237 { 2238 struct bintime bt2; 2239 struct timeval tsm; 2240 struct timespec tsn; 2241 2242 if ((tstype & BPF_T_MONOTONIC) == 0) { 2243 bt2 = *bt; 2244 bintime_add(&bt2, &boottimebin); 2245 bt = &bt2; 2246 } 2247 switch (BPF_T_FORMAT(tstype)) { 2248 case BPF_T_MICROTIME: 2249 bintime2timeval(bt, &tsm); 2250 ts->bt_sec = tsm.tv_sec; 2251 ts->bt_frac = tsm.tv_usec; 2252 break; 2253 case BPF_T_NANOTIME: 2254 bintime2timespec(bt, &tsn); 2255 ts->bt_sec = tsn.tv_sec; 2256 ts->bt_frac = tsn.tv_nsec; 2257 break; 2258 case BPF_T_BINTIME: 2259 ts->bt_sec = bt->sec; 2260 ts->bt_frac = bt->frac; 2261 break; 2262 } 2263 } 2264 2265 /* 2266 * Move the packet data from interface memory (pkt) into the 2267 * store buffer. "cpfn" is the routine called to do the actual data 2268 * transfer. bcopy is passed in to copy contiguous chunks, while 2269 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 2270 * pkt is really an mbuf. 2271 */ 2272 static void 2273 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 2274 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 2275 struct bintime *bt) 2276 { 2277 struct bpf_xhdr hdr; 2278 #ifndef BURN_BRIDGES 2279 struct bpf_hdr hdr_old; 2280 #ifdef COMPAT_FREEBSD32 2281 struct bpf_hdr32 hdr32_old; 2282 #endif 2283 #endif 2284 int caplen, curlen, hdrlen, totlen; 2285 int do_wakeup = 0; 2286 int do_timestamp; 2287 int tstype; 2288 2289 BPFD_LOCK_ASSERT(d); 2290 2291 /* 2292 * Detect whether user space has released a buffer back to us, and if 2293 * so, move it from being a hold buffer to a free buffer. This may 2294 * not be the best place to do it (for example, we might only want to 2295 * run this check if we need the space), but for now it's a reliable 2296 * spot to do it. 2297 */ 2298 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 2299 d->bd_fbuf = d->bd_hbuf; 2300 d->bd_hbuf = NULL; 2301 d->bd_hlen = 0; 2302 bpf_buf_reclaimed(d); 2303 } 2304 2305 /* 2306 * Figure out how many bytes to move. If the packet is 2307 * greater or equal to the snapshot length, transfer that 2308 * much. Otherwise, transfer the whole packet (unless 2309 * we hit the buffer size limit). 2310 */ 2311 hdrlen = bpf_hdrlen(d); 2312 totlen = hdrlen + min(snaplen, pktlen); 2313 if (totlen > d->bd_bufsize) 2314 totlen = d->bd_bufsize; 2315 2316 /* 2317 * Round up the end of the previous packet to the next longword. 2318 * 2319 * Drop the packet if there's no room and no hope of room 2320 * If the packet would overflow the storage buffer or the storage 2321 * buffer is considered immutable by the buffer model, try to rotate 2322 * the buffer and wakeup pending processes. 2323 */ 2324 #ifdef COMPAT_FREEBSD32 2325 if (d->bd_compat32) 2326 curlen = BPF_WORDALIGN32(d->bd_slen); 2327 else 2328 #endif 2329 curlen = BPF_WORDALIGN(d->bd_slen); 2330 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 2331 if (d->bd_fbuf == NULL) { 2332 /* 2333 * There's no room in the store buffer, and no 2334 * prospect of room, so drop the packet. Notify the 2335 * buffer model. 2336 */ 2337 bpf_buffull(d); 2338 ++d->bd_dcount; 2339 return; 2340 } 2341 ROTATE_BUFFERS(d); 2342 do_wakeup = 1; 2343 curlen = 0; 2344 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 2345 /* 2346 * Immediate mode is set, or the read timeout has already 2347 * expired during a select call. A packet arrived, so the 2348 * reader should be woken up. 2349 */ 2350 do_wakeup = 1; 2351 caplen = totlen - hdrlen; 2352 tstype = d->bd_tstamp; 2353 do_timestamp = tstype != BPF_T_NONE; 2354 #ifndef BURN_BRIDGES 2355 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) { 2356 struct bpf_ts ts; 2357 if (do_timestamp) 2358 bpf_bintime2ts(bt, &ts, tstype); 2359 #ifdef COMPAT_FREEBSD32 2360 if (d->bd_compat32) { 2361 bzero(&hdr32_old, sizeof(hdr32_old)); 2362 if (do_timestamp) { 2363 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; 2364 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; 2365 } 2366 hdr32_old.bh_datalen = pktlen; 2367 hdr32_old.bh_hdrlen = hdrlen; 2368 hdr32_old.bh_caplen = caplen; 2369 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, 2370 sizeof(hdr32_old)); 2371 goto copy; 2372 } 2373 #endif 2374 bzero(&hdr_old, sizeof(hdr_old)); 2375 if (do_timestamp) { 2376 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; 2377 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; 2378 } 2379 hdr_old.bh_datalen = pktlen; 2380 hdr_old.bh_hdrlen = hdrlen; 2381 hdr_old.bh_caplen = caplen; 2382 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, 2383 sizeof(hdr_old)); 2384 goto copy; 2385 } 2386 #endif 2387 2388 /* 2389 * Append the bpf header. Note we append the actual header size, but 2390 * move forward the length of the header plus padding. 2391 */ 2392 bzero(&hdr, sizeof(hdr)); 2393 if (do_timestamp) 2394 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype); 2395 hdr.bh_datalen = pktlen; 2396 hdr.bh_hdrlen = hdrlen; 2397 hdr.bh_caplen = caplen; 2398 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 2399 2400 /* 2401 * Copy the packet data into the store buffer and update its length. 2402 */ 2403 #ifndef BURN_BRIDGES 2404 copy: 2405 #endif 2406 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); 2407 d->bd_slen = curlen + totlen; 2408 2409 if (do_wakeup) 2410 bpf_wakeup(d); 2411 } 2412 2413 /* 2414 * Free buffers currently in use by a descriptor. 2415 * Called on close. 2416 */ 2417 static void 2418 bpf_freed(struct bpf_d *d) 2419 { 2420 2421 /* 2422 * We don't need to lock out interrupts since this descriptor has 2423 * been detached from its interface and it yet hasn't been marked 2424 * free. 2425 */ 2426 bpf_free(d); 2427 if (d->bd_rfilter != NULL) { 2428 free((caddr_t)d->bd_rfilter, M_BPF); 2429 #ifdef BPF_JITTER 2430 if (d->bd_bfilter != NULL) 2431 bpf_destroy_jit_filter(d->bd_bfilter); 2432 #endif 2433 } 2434 if (d->bd_wfilter != NULL) 2435 free((caddr_t)d->bd_wfilter, M_BPF); 2436 mtx_destroy(&d->bd_lock); 2437 } 2438 2439 /* 2440 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 2441 * fixed size of the link header (variable length headers not yet supported). 2442 */ 2443 void 2444 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2445 { 2446 2447 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2448 } 2449 2450 /* 2451 * Attach an interface to bpf. ifp is a pointer to the structure 2452 * defining the interface to be attached, dlt is the link layer type, 2453 * and hdrlen is the fixed size of the link header (variable length 2454 * headers are not yet supporrted). 2455 */ 2456 void 2457 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2458 { 2459 struct bpf_if *bp; 2460 2461 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 2462 if (bp == NULL) 2463 panic("bpfattach"); 2464 2465 LIST_INIT(&bp->bif_dlist); 2466 LIST_INIT(&bp->bif_wlist); 2467 bp->bif_ifp = ifp; 2468 bp->bif_dlt = dlt; 2469 rw_init(&bp->bif_lock, "bpf interface lock"); 2470 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 2471 *driverp = bp; 2472 2473 BPF_LOCK(); 2474 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 2475 BPF_UNLOCK(); 2476 2477 bp->bif_hdrlen = hdrlen; 2478 2479 if (bootverbose) 2480 if_printf(ifp, "bpf attached\n"); 2481 } 2482 2483 /* 2484 * Detach bpf from an interface. This involves detaching each descriptor 2485 * associated with the interface. Notify each descriptor as it's detached 2486 * so that any sleepers wake up and get ENXIO. 2487 */ 2488 void 2489 bpfdetach(struct ifnet *ifp) 2490 { 2491 struct bpf_if *bp; 2492 struct bpf_d *d; 2493 #ifdef INVARIANTS 2494 int ndetached; 2495 2496 ndetached = 0; 2497 #endif 2498 2499 BPF_LOCK(); 2500 /* Find all bpf_if struct's which reference ifp and detach them. */ 2501 do { 2502 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2503 if (ifp == bp->bif_ifp) 2504 break; 2505 } 2506 if (bp != NULL) 2507 LIST_REMOVE(bp, bif_next); 2508 2509 if (bp != NULL) { 2510 #ifdef INVARIANTS 2511 ndetached++; 2512 #endif 2513 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 2514 bpf_detachd_locked(d); 2515 BPFD_LOCK(d); 2516 bpf_wakeup(d); 2517 BPFD_UNLOCK(d); 2518 } 2519 /* Free writer-only descriptors */ 2520 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) { 2521 bpf_detachd_locked(d); 2522 BPFD_LOCK(d); 2523 bpf_wakeup(d); 2524 BPFD_UNLOCK(d); 2525 } 2526 2527 /* 2528 * Delay freing bp till interface is detached 2529 * and all routes through this interface are removed. 2530 * Mark bp as detached to restrict new consumers. 2531 */ 2532 BPFIF_WLOCK(bp); 2533 bp->flags |= BPFIF_FLAG_DYING; 2534 BPFIF_WUNLOCK(bp); 2535 } 2536 } while (bp != NULL); 2537 BPF_UNLOCK(); 2538 2539 #ifdef INVARIANTS 2540 if (ndetached == 0) 2541 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 2542 #endif 2543 } 2544 2545 /* 2546 * Interface departure handler. 2547 * Note departure event does not guarantee interface is going down. 2548 */ 2549 static void 2550 bpf_ifdetach(void *arg __unused, struct ifnet *ifp) 2551 { 2552 struct bpf_if *bp; 2553 2554 BPF_LOCK(); 2555 if ((bp = ifp->if_bpf) == NULL) { 2556 BPF_UNLOCK(); 2557 return; 2558 } 2559 2560 /* Check if bpfdetach() was called previously */ 2561 if ((bp->flags & BPFIF_FLAG_DYING) == 0) { 2562 BPF_UNLOCK(); 2563 return; 2564 } 2565 2566 CTR3(KTR_NET, "%s: freing BPF instance %p for interface %p", 2567 __func__, bp, ifp); 2568 2569 ifp->if_bpf = NULL; 2570 BPF_UNLOCK(); 2571 2572 rw_destroy(&bp->bif_lock); 2573 free(bp, M_BPF); 2574 } 2575 2576 /* 2577 * Get a list of available data link type of the interface. 2578 */ 2579 static int 2580 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 2581 { 2582 int n, error; 2583 struct ifnet *ifp; 2584 struct bpf_if *bp; 2585 2586 BPF_LOCK_ASSERT(); 2587 2588 ifp = d->bd_bif->bif_ifp; 2589 n = 0; 2590 error = 0; 2591 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2592 if (bp->bif_ifp != ifp) 2593 continue; 2594 if (bfl->bfl_list != NULL) { 2595 if (n >= bfl->bfl_len) 2596 return (ENOMEM); 2597 error = copyout(&bp->bif_dlt, 2598 bfl->bfl_list + n, sizeof(u_int)); 2599 } 2600 n++; 2601 } 2602 bfl->bfl_len = n; 2603 return (error); 2604 } 2605 2606 /* 2607 * Set the data link type of a BPF instance. 2608 */ 2609 static int 2610 bpf_setdlt(struct bpf_d *d, u_int dlt) 2611 { 2612 int error, opromisc; 2613 struct ifnet *ifp; 2614 struct bpf_if *bp; 2615 2616 BPF_LOCK_ASSERT(); 2617 2618 if (d->bd_bif->bif_dlt == dlt) 2619 return (0); 2620 ifp = d->bd_bif->bif_ifp; 2621 2622 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2623 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 2624 break; 2625 } 2626 2627 if (bp != NULL) { 2628 opromisc = d->bd_promisc; 2629 bpf_attachd(d, bp); 2630 BPFD_LOCK(d); 2631 reset_d(d); 2632 BPFD_UNLOCK(d); 2633 if (opromisc) { 2634 error = ifpromisc(bp->bif_ifp, 1); 2635 if (error) 2636 if_printf(bp->bif_ifp, 2637 "bpf_setdlt: ifpromisc failed (%d)\n", 2638 error); 2639 else 2640 d->bd_promisc = 1; 2641 } 2642 } 2643 return (bp == NULL ? EINVAL : 0); 2644 } 2645 2646 static void 2647 bpf_drvinit(void *unused) 2648 { 2649 struct cdev *dev; 2650 2651 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2652 LIST_INIT(&bpf_iflist); 2653 2654 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2655 /* For compatibility */ 2656 make_dev_alias(dev, "bpf0"); 2657 2658 /* Register interface departure handler */ 2659 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER( 2660 ifnet_departure_event, bpf_ifdetach, NULL, 2661 EVENTHANDLER_PRI_ANY); 2662 } 2663 2664 /* 2665 * Zero out the various packet counters associated with all of the bpf 2666 * descriptors. At some point, we will probably want to get a bit more 2667 * granular and allow the user to specify descriptors to be zeroed. 2668 */ 2669 static void 2670 bpf_zero_counters(void) 2671 { 2672 struct bpf_if *bp; 2673 struct bpf_d *bd; 2674 2675 BPF_LOCK(); 2676 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2677 BPFIF_RLOCK(bp); 2678 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2679 BPFD_LOCK(bd); 2680 bd->bd_rcount = 0; 2681 bd->bd_dcount = 0; 2682 bd->bd_fcount = 0; 2683 bd->bd_wcount = 0; 2684 bd->bd_wfcount = 0; 2685 bd->bd_zcopy = 0; 2686 BPFD_UNLOCK(bd); 2687 } 2688 BPFIF_RUNLOCK(bp); 2689 } 2690 BPF_UNLOCK(); 2691 } 2692 2693 /* 2694 * Fill filter statistics 2695 */ 2696 static void 2697 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2698 { 2699 2700 bzero(d, sizeof(*d)); 2701 BPFD_LOCK_ASSERT(bd); 2702 d->bd_structsize = sizeof(*d); 2703 /* XXX: reading should be protected by global lock */ 2704 d->bd_immediate = bd->bd_immediate; 2705 d->bd_promisc = bd->bd_promisc; 2706 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2707 d->bd_direction = bd->bd_direction; 2708 d->bd_feedback = bd->bd_feedback; 2709 d->bd_async = bd->bd_async; 2710 d->bd_rcount = bd->bd_rcount; 2711 d->bd_dcount = bd->bd_dcount; 2712 d->bd_fcount = bd->bd_fcount; 2713 d->bd_sig = bd->bd_sig; 2714 d->bd_slen = bd->bd_slen; 2715 d->bd_hlen = bd->bd_hlen; 2716 d->bd_bufsize = bd->bd_bufsize; 2717 d->bd_pid = bd->bd_pid; 2718 strlcpy(d->bd_ifname, 2719 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2720 d->bd_locked = bd->bd_locked; 2721 d->bd_wcount = bd->bd_wcount; 2722 d->bd_wdcount = bd->bd_wdcount; 2723 d->bd_wfcount = bd->bd_wfcount; 2724 d->bd_zcopy = bd->bd_zcopy; 2725 d->bd_bufmode = bd->bd_bufmode; 2726 } 2727 2728 /* 2729 * Handle `netstat -B' stats request 2730 */ 2731 static int 2732 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2733 { 2734 struct xbpf_d *xbdbuf, *xbd, zerostats; 2735 int index, error; 2736 struct bpf_if *bp; 2737 struct bpf_d *bd; 2738 2739 /* 2740 * XXX This is not technically correct. It is possible for non 2741 * privileged users to open bpf devices. It would make sense 2742 * if the users who opened the devices were able to retrieve 2743 * the statistics for them, too. 2744 */ 2745 error = priv_check(req->td, PRIV_NET_BPF); 2746 if (error) 2747 return (error); 2748 /* 2749 * Check to see if the user is requesting that the counters be 2750 * zeroed out. Explicitly check that the supplied data is zeroed, 2751 * as we aren't allowing the user to set the counters currently. 2752 */ 2753 if (req->newptr != NULL) { 2754 if (req->newlen != sizeof(zerostats)) 2755 return (EINVAL); 2756 bzero(&zerostats, sizeof(zerostats)); 2757 xbd = req->newptr; 2758 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0) 2759 return (EINVAL); 2760 bpf_zero_counters(); 2761 return (0); 2762 } 2763 if (req->oldptr == NULL) 2764 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2765 if (bpf_bpfd_cnt == 0) 2766 return (SYSCTL_OUT(req, 0, 0)); 2767 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2768 BPF_LOCK(); 2769 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2770 BPF_UNLOCK(); 2771 free(xbdbuf, M_BPF); 2772 return (ENOMEM); 2773 } 2774 index = 0; 2775 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2776 BPFIF_RLOCK(bp); 2777 /* Send writers-only first */ 2778 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { 2779 xbd = &xbdbuf[index++]; 2780 BPFD_LOCK(bd); 2781 bpfstats_fill_xbpf(xbd, bd); 2782 BPFD_UNLOCK(bd); 2783 } 2784 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2785 xbd = &xbdbuf[index++]; 2786 BPFD_LOCK(bd); 2787 bpfstats_fill_xbpf(xbd, bd); 2788 BPFD_UNLOCK(bd); 2789 } 2790 BPFIF_RUNLOCK(bp); 2791 } 2792 BPF_UNLOCK(); 2793 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2794 free(xbdbuf, M_BPF); 2795 return (error); 2796 } 2797 2798 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2799 2800 #else /* !DEV_BPF && !NETGRAPH_BPF */ 2801 /* 2802 * NOP stubs to allow bpf-using drivers to load and function. 2803 * 2804 * A 'better' implementation would allow the core bpf functionality 2805 * to be loaded at runtime. 2806 */ 2807 static struct bpf_if bp_null; 2808 2809 void 2810 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2811 { 2812 } 2813 2814 void 2815 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2816 { 2817 } 2818 2819 void 2820 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2821 { 2822 } 2823 2824 void 2825 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2826 { 2827 2828 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2829 } 2830 2831 void 2832 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2833 { 2834 2835 *driverp = &bp_null; 2836 } 2837 2838 void 2839 bpfdetach(struct ifnet *ifp) 2840 { 2841 } 2842 2843 u_int 2844 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2845 { 2846 return -1; /* "no filter" behaviour */ 2847 } 2848 2849 int 2850 bpf_validate(const struct bpf_insn *f, int len) 2851 { 2852 return 0; /* false */ 2853 } 2854 2855 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 2856