1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_bpf.h" 41 #include "opt_compat.h" 42 #include "opt_netgraph.h" 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/lock.h> 47 #include <sys/rwlock.h> 48 #include <sys/systm.h> 49 #include <sys/conf.h> 50 #include <sys/fcntl.h> 51 #include <sys/jail.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/time.h> 55 #include <sys/priv.h> 56 #include <sys/proc.h> 57 #include <sys/signalvar.h> 58 #include <sys/filio.h> 59 #include <sys/sockio.h> 60 #include <sys/ttycom.h> 61 #include <sys/uio.h> 62 63 #include <sys/event.h> 64 #include <sys/file.h> 65 #include <sys/poll.h> 66 #include <sys/proc.h> 67 68 #include <sys/socket.h> 69 70 #include <net/if.h> 71 #include <net/if_var.h> 72 #define BPF_INTERNAL 73 #include <net/bpf.h> 74 #include <net/bpf_buffer.h> 75 #ifdef BPF_JITTER 76 #include <net/bpf_jitter.h> 77 #endif 78 #include <net/bpf_zerocopy.h> 79 #include <net/bpfdesc.h> 80 #include <net/vnet.h> 81 82 #include <netinet/in.h> 83 #include <netinet/if_ether.h> 84 #include <sys/kernel.h> 85 #include <sys/sysctl.h> 86 87 #include <net80211/ieee80211_freebsd.h> 88 89 #include <security/mac/mac_framework.h> 90 91 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 92 93 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 94 95 #define PRINET 26 /* interruptible */ 96 97 #define SIZEOF_BPF_HDR(type) \ 98 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen)) 99 100 #ifdef COMPAT_FREEBSD32 101 #include <sys/mount.h> 102 #include <compat/freebsd32/freebsd32.h> 103 #define BPF_ALIGNMENT32 sizeof(int32_t) 104 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1)) 105 106 #ifndef BURN_BRIDGES 107 /* 108 * 32-bit version of structure prepended to each packet. We use this header 109 * instead of the standard one for 32-bit streams. We mark the a stream as 110 * 32-bit the first time we see a 32-bit compat ioctl request. 111 */ 112 struct bpf_hdr32 { 113 struct timeval32 bh_tstamp; /* time stamp */ 114 uint32_t bh_caplen; /* length of captured portion */ 115 uint32_t bh_datalen; /* original length of packet */ 116 uint16_t bh_hdrlen; /* length of bpf header (this struct 117 plus alignment padding) */ 118 }; 119 #endif 120 121 struct bpf_program32 { 122 u_int bf_len; 123 uint32_t bf_insns; 124 }; 125 126 struct bpf_dltlist32 { 127 u_int bfl_len; 128 u_int bfl_list; 129 }; 130 131 #define BIOCSETF32 _IOW('B', 103, struct bpf_program32) 132 #define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32) 133 #define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32) 134 #define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32) 135 #define BIOCSETWF32 _IOW('B', 123, struct bpf_program32) 136 #define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32) 137 #endif 138 139 /* 140 * bpf_iflist is a list of BPF interface structures, each corresponding to a 141 * specific DLT. The same network interface might have several BPF interface 142 * structures registered by different layers in the stack (i.e., 802.11 143 * frames, ethernet frames, etc). 144 */ 145 static LIST_HEAD(, bpf_if) bpf_iflist, bpf_freelist; 146 static struct mtx bpf_mtx; /* bpf global lock */ 147 static int bpf_bpfd_cnt; 148 149 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 150 static void bpf_detachd(struct bpf_d *); 151 static void bpf_detachd_locked(struct bpf_d *); 152 static void bpf_freed(struct bpf_d *); 153 static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 154 struct sockaddr *, int *, struct bpf_insn *); 155 static int bpf_setif(struct bpf_d *, struct ifreq *); 156 static void bpf_timed_out(void *); 157 static __inline void 158 bpf_wakeup(struct bpf_d *); 159 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 160 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 161 struct bintime *); 162 static void reset_d(struct bpf_d *); 163 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 164 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 165 static int bpf_setdlt(struct bpf_d *, u_int); 166 static void filt_bpfdetach(struct knote *); 167 static int filt_bpfread(struct knote *, long); 168 static void bpf_drvinit(void *); 169 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 170 171 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 172 int bpf_maxinsns = BPF_MAXINSNS; 173 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 174 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 175 static int bpf_zerocopy_enable = 0; 176 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 177 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 178 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 179 bpf_stats_sysctl, "bpf statistics portal"); 180 181 static VNET_DEFINE(int, bpf_optimize_writers) = 0; 182 #define V_bpf_optimize_writers VNET(bpf_optimize_writers) 183 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers, 184 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0, 185 "Do not send packets until BPF program is set"); 186 187 static d_open_t bpfopen; 188 static d_read_t bpfread; 189 static d_write_t bpfwrite; 190 static d_ioctl_t bpfioctl; 191 static d_poll_t bpfpoll; 192 static d_kqfilter_t bpfkqfilter; 193 194 static struct cdevsw bpf_cdevsw = { 195 .d_version = D_VERSION, 196 .d_open = bpfopen, 197 .d_read = bpfread, 198 .d_write = bpfwrite, 199 .d_ioctl = bpfioctl, 200 .d_poll = bpfpoll, 201 .d_name = "bpf", 202 .d_kqfilter = bpfkqfilter, 203 }; 204 205 static struct filterops bpfread_filtops = { 206 .f_isfd = 1, 207 .f_detach = filt_bpfdetach, 208 .f_event = filt_bpfread, 209 }; 210 211 eventhandler_tag bpf_ifdetach_cookie = NULL; 212 213 /* 214 * LOCKING MODEL USED BY BPF: 215 * Locks: 216 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal, 217 * some global counters and every bpf_if reference. 218 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters. 219 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields 220 * used by bpf_mtap code. 221 * 222 * Lock order: 223 * 224 * Global lock, interface lock, descriptor lock 225 * 226 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2] 227 * working model. In many places (like bpf_detachd) we start with BPF descriptor 228 * (and we need to at least rlock it to get reliable interface pointer). This 229 * gives us potential LOR. As a result, we use global lock to protect from bpf_if 230 * change in every such place. 231 * 232 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and 233 * 3) descriptor main wlock. 234 * Reading bd_bif can be protected by any of these locks, typically global lock. 235 * 236 * Changing read/write BPF filter is protected by the same three locks, 237 * the same applies for reading. 238 * 239 * Sleeping in global lock is not allowed due to bpfdetach() using it. 240 */ 241 242 /* 243 * Wrapper functions for various buffering methods. If the set of buffer 244 * modes expands, we will probably want to introduce a switch data structure 245 * similar to protosw, et. 246 */ 247 static void 248 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 249 u_int len) 250 { 251 252 BPFD_LOCK_ASSERT(d); 253 254 switch (d->bd_bufmode) { 255 case BPF_BUFMODE_BUFFER: 256 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 257 258 case BPF_BUFMODE_ZBUF: 259 d->bd_zcopy++; 260 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 261 262 default: 263 panic("bpf_buf_append_bytes"); 264 } 265 } 266 267 static void 268 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 269 u_int len) 270 { 271 272 BPFD_LOCK_ASSERT(d); 273 274 switch (d->bd_bufmode) { 275 case BPF_BUFMODE_BUFFER: 276 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 277 278 case BPF_BUFMODE_ZBUF: 279 d->bd_zcopy++; 280 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 281 282 default: 283 panic("bpf_buf_append_mbuf"); 284 } 285 } 286 287 /* 288 * This function gets called when the free buffer is re-assigned. 289 */ 290 static void 291 bpf_buf_reclaimed(struct bpf_d *d) 292 { 293 294 BPFD_LOCK_ASSERT(d); 295 296 switch (d->bd_bufmode) { 297 case BPF_BUFMODE_BUFFER: 298 return; 299 300 case BPF_BUFMODE_ZBUF: 301 bpf_zerocopy_buf_reclaimed(d); 302 return; 303 304 default: 305 panic("bpf_buf_reclaimed"); 306 } 307 } 308 309 /* 310 * If the buffer mechanism has a way to decide that a held buffer can be made 311 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 312 * returned if the buffer can be discarded, (0) is returned if it cannot. 313 */ 314 static int 315 bpf_canfreebuf(struct bpf_d *d) 316 { 317 318 BPFD_LOCK_ASSERT(d); 319 320 switch (d->bd_bufmode) { 321 case BPF_BUFMODE_ZBUF: 322 return (bpf_zerocopy_canfreebuf(d)); 323 } 324 return (0); 325 } 326 327 /* 328 * Allow the buffer model to indicate that the current store buffer is 329 * immutable, regardless of the appearance of space. Return (1) if the 330 * buffer is writable, and (0) if not. 331 */ 332 static int 333 bpf_canwritebuf(struct bpf_d *d) 334 { 335 BPFD_LOCK_ASSERT(d); 336 337 switch (d->bd_bufmode) { 338 case BPF_BUFMODE_ZBUF: 339 return (bpf_zerocopy_canwritebuf(d)); 340 } 341 return (1); 342 } 343 344 /* 345 * Notify buffer model that an attempt to write to the store buffer has 346 * resulted in a dropped packet, in which case the buffer may be considered 347 * full. 348 */ 349 static void 350 bpf_buffull(struct bpf_d *d) 351 { 352 353 BPFD_LOCK_ASSERT(d); 354 355 switch (d->bd_bufmode) { 356 case BPF_BUFMODE_ZBUF: 357 bpf_zerocopy_buffull(d); 358 break; 359 } 360 } 361 362 /* 363 * Notify the buffer model that a buffer has moved into the hold position. 364 */ 365 void 366 bpf_bufheld(struct bpf_d *d) 367 { 368 369 BPFD_LOCK_ASSERT(d); 370 371 switch (d->bd_bufmode) { 372 case BPF_BUFMODE_ZBUF: 373 bpf_zerocopy_bufheld(d); 374 break; 375 } 376 } 377 378 static void 379 bpf_free(struct bpf_d *d) 380 { 381 382 switch (d->bd_bufmode) { 383 case BPF_BUFMODE_BUFFER: 384 return (bpf_buffer_free(d)); 385 386 case BPF_BUFMODE_ZBUF: 387 return (bpf_zerocopy_free(d)); 388 389 default: 390 panic("bpf_buf_free"); 391 } 392 } 393 394 static int 395 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 396 { 397 398 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 399 return (EOPNOTSUPP); 400 return (bpf_buffer_uiomove(d, buf, len, uio)); 401 } 402 403 static int 404 bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 405 { 406 407 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 408 return (EOPNOTSUPP); 409 return (bpf_buffer_ioctl_sblen(d, i)); 410 } 411 412 static int 413 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 414 { 415 416 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 417 return (EOPNOTSUPP); 418 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 419 } 420 421 static int 422 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 423 { 424 425 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 426 return (EOPNOTSUPP); 427 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 428 } 429 430 static int 431 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 432 { 433 434 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 435 return (EOPNOTSUPP); 436 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 437 } 438 439 /* 440 * General BPF functions. 441 */ 442 static int 443 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 444 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 445 { 446 const struct ieee80211_bpf_params *p; 447 struct ether_header *eh; 448 struct mbuf *m; 449 int error; 450 int len; 451 int hlen; 452 int slen; 453 454 /* 455 * Build a sockaddr based on the data link layer type. 456 * We do this at this level because the ethernet header 457 * is copied directly into the data field of the sockaddr. 458 * In the case of SLIP, there is no header and the packet 459 * is forwarded as is. 460 * Also, we are careful to leave room at the front of the mbuf 461 * for the link level header. 462 */ 463 switch (linktype) { 464 465 case DLT_SLIP: 466 sockp->sa_family = AF_INET; 467 hlen = 0; 468 break; 469 470 case DLT_EN10MB: 471 sockp->sa_family = AF_UNSPEC; 472 /* XXX Would MAXLINKHDR be better? */ 473 hlen = ETHER_HDR_LEN; 474 break; 475 476 case DLT_FDDI: 477 sockp->sa_family = AF_IMPLINK; 478 hlen = 0; 479 break; 480 481 case DLT_RAW: 482 sockp->sa_family = AF_UNSPEC; 483 hlen = 0; 484 break; 485 486 case DLT_NULL: 487 /* 488 * null interface types require a 4 byte pseudo header which 489 * corresponds to the address family of the packet. 490 */ 491 sockp->sa_family = AF_UNSPEC; 492 hlen = 4; 493 break; 494 495 case DLT_ATM_RFC1483: 496 /* 497 * en atm driver requires 4-byte atm pseudo header. 498 * though it isn't standard, vpi:vci needs to be 499 * specified anyway. 500 */ 501 sockp->sa_family = AF_UNSPEC; 502 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 503 break; 504 505 case DLT_PPP: 506 sockp->sa_family = AF_UNSPEC; 507 hlen = 4; /* This should match PPP_HDRLEN */ 508 break; 509 510 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 511 sockp->sa_family = AF_IEEE80211; 512 hlen = 0; 513 break; 514 515 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 516 sockp->sa_family = AF_IEEE80211; 517 sockp->sa_len = 12; /* XXX != 0 */ 518 hlen = sizeof(struct ieee80211_bpf_params); 519 break; 520 521 default: 522 return (EIO); 523 } 524 525 len = uio->uio_resid; 526 if (len < hlen || len - hlen > ifp->if_mtu) 527 return (EMSGSIZE); 528 529 m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR); 530 if (m == NULL) 531 return (EIO); 532 m->m_pkthdr.len = m->m_len = len; 533 *mp = m; 534 535 error = uiomove(mtod(m, u_char *), len, uio); 536 if (error) 537 goto bad; 538 539 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 540 if (slen == 0) { 541 error = EPERM; 542 goto bad; 543 } 544 545 /* Check for multicast destination */ 546 switch (linktype) { 547 case DLT_EN10MB: 548 eh = mtod(m, struct ether_header *); 549 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 550 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 551 ETHER_ADDR_LEN) == 0) 552 m->m_flags |= M_BCAST; 553 else 554 m->m_flags |= M_MCAST; 555 } 556 break; 557 } 558 559 /* 560 * Make room for link header, and copy it to sockaddr 561 */ 562 if (hlen != 0) { 563 if (sockp->sa_family == AF_IEEE80211) { 564 /* 565 * Collect true length from the parameter header 566 * NB: sockp is known to be zero'd so if we do a 567 * short copy unspecified parameters will be 568 * zero. 569 * NB: packet may not be aligned after stripping 570 * bpf params 571 * XXX check ibp_vers 572 */ 573 p = mtod(m, const struct ieee80211_bpf_params *); 574 hlen = p->ibp_len; 575 if (hlen > sizeof(sockp->sa_data)) { 576 error = EINVAL; 577 goto bad; 578 } 579 } 580 bcopy(m->m_data, sockp->sa_data, hlen); 581 } 582 *hdrlen = hlen; 583 584 return (0); 585 bad: 586 m_freem(m); 587 return (error); 588 } 589 590 /* 591 * Attach file to the bpf interface, i.e. make d listen on bp. 592 */ 593 static void 594 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 595 { 596 int op_w; 597 598 BPF_LOCK_ASSERT(); 599 600 /* 601 * Save sysctl value to protect from sysctl change 602 * between reads 603 */ 604 op_w = V_bpf_optimize_writers; 605 606 if (d->bd_bif != NULL) 607 bpf_detachd_locked(d); 608 /* 609 * Point d at bp, and add d to the interface's list. 610 * Since there are many applicaiotns using BPF for 611 * sending raw packets only (dhcpd, cdpd are good examples) 612 * we can delay adding d to the list of active listeners until 613 * some filter is configured. 614 */ 615 616 BPFIF_WLOCK(bp); 617 BPFD_LOCK(d); 618 619 d->bd_bif = bp; 620 621 if (op_w != 0) { 622 /* Add to writers-only list */ 623 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); 624 /* 625 * We decrement bd_writer on every filter set operation. 626 * First BIOCSETF is done by pcap_open_live() to set up 627 * snap length. After that appliation usually sets its own filter 628 */ 629 d->bd_writer = 2; 630 } else 631 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 632 633 BPFD_UNLOCK(d); 634 BPFIF_WUNLOCK(bp); 635 636 bpf_bpfd_cnt++; 637 638 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list", 639 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); 640 641 if (op_w == 0) 642 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 643 } 644 645 /* 646 * Add d to the list of active bp filters. 647 * Reuqires bpf_attachd() to be called before 648 */ 649 static void 650 bpf_upgraded(struct bpf_d *d) 651 { 652 struct bpf_if *bp; 653 654 BPF_LOCK_ASSERT(); 655 656 bp = d->bd_bif; 657 658 /* 659 * Filter can be set several times without specifying interface. 660 * Mark d as reader and exit. 661 */ 662 if (bp == NULL) { 663 BPFD_LOCK(d); 664 d->bd_writer = 0; 665 BPFD_UNLOCK(d); 666 return; 667 } 668 669 BPFIF_WLOCK(bp); 670 BPFD_LOCK(d); 671 672 /* Remove from writers-only list */ 673 LIST_REMOVE(d, bd_next); 674 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 675 /* Mark d as reader */ 676 d->bd_writer = 0; 677 678 BPFD_UNLOCK(d); 679 BPFIF_WUNLOCK(bp); 680 681 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid); 682 683 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 684 } 685 686 /* 687 * Detach a file from its interface. 688 */ 689 static void 690 bpf_detachd(struct bpf_d *d) 691 { 692 BPF_LOCK(); 693 bpf_detachd_locked(d); 694 BPF_UNLOCK(); 695 } 696 697 static void 698 bpf_detachd_locked(struct bpf_d *d) 699 { 700 int error; 701 struct bpf_if *bp; 702 struct ifnet *ifp; 703 704 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); 705 706 BPF_LOCK_ASSERT(); 707 708 /* Check if descriptor is attached */ 709 if ((bp = d->bd_bif) == NULL) 710 return; 711 712 BPFIF_WLOCK(bp); 713 BPFD_LOCK(d); 714 715 /* Save bd_writer value */ 716 error = d->bd_writer; 717 718 /* 719 * Remove d from the interface's descriptor list. 720 */ 721 LIST_REMOVE(d, bd_next); 722 723 ifp = bp->bif_ifp; 724 d->bd_bif = NULL; 725 BPFD_UNLOCK(d); 726 BPFIF_WUNLOCK(bp); 727 728 bpf_bpfd_cnt--; 729 730 /* Call event handler iff d is attached */ 731 if (error == 0) 732 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 733 734 /* 735 * Check if this descriptor had requested promiscuous mode. 736 * If so, turn it off. 737 */ 738 if (d->bd_promisc) { 739 d->bd_promisc = 0; 740 CURVNET_SET(ifp->if_vnet); 741 error = ifpromisc(ifp, 0); 742 CURVNET_RESTORE(); 743 if (error != 0 && error != ENXIO) { 744 /* 745 * ENXIO can happen if a pccard is unplugged 746 * Something is really wrong if we were able to put 747 * the driver into promiscuous mode, but can't 748 * take it out. 749 */ 750 if_printf(bp->bif_ifp, 751 "bpf_detach: ifpromisc failed (%d)\n", error); 752 } 753 } 754 } 755 756 /* 757 * Close the descriptor by detaching it from its interface, 758 * deallocating its buffers, and marking it free. 759 */ 760 static void 761 bpf_dtor(void *data) 762 { 763 struct bpf_d *d = data; 764 765 BPFD_LOCK(d); 766 if (d->bd_state == BPF_WAITING) 767 callout_stop(&d->bd_callout); 768 d->bd_state = BPF_IDLE; 769 BPFD_UNLOCK(d); 770 funsetown(&d->bd_sigio); 771 bpf_detachd(d); 772 #ifdef MAC 773 mac_bpfdesc_destroy(d); 774 #endif /* MAC */ 775 seldrain(&d->bd_sel); 776 knlist_destroy(&d->bd_sel.si_note); 777 callout_drain(&d->bd_callout); 778 bpf_freed(d); 779 free(d, M_BPF); 780 } 781 782 /* 783 * Open ethernet device. Returns ENXIO for illegal minor device number, 784 * EBUSY if file is open by another process. 785 */ 786 /* ARGSUSED */ 787 static int 788 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 789 { 790 struct bpf_d *d; 791 int error, size; 792 793 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 794 error = devfs_set_cdevpriv(d, bpf_dtor); 795 if (error != 0) { 796 free(d, M_BPF); 797 return (error); 798 } 799 800 /* 801 * For historical reasons, perform a one-time initialization call to 802 * the buffer routines, even though we're not yet committed to a 803 * particular buffer method. 804 */ 805 bpf_buffer_init(d); 806 d->bd_hbuf_in_use = 0; 807 d->bd_bufmode = BPF_BUFMODE_BUFFER; 808 d->bd_sig = SIGIO; 809 d->bd_direction = BPF_D_INOUT; 810 BPF_PID_REFRESH(d, td); 811 #ifdef MAC 812 mac_bpfdesc_init(d); 813 mac_bpfdesc_create(td->td_ucred, d); 814 #endif 815 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF); 816 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0); 817 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock); 818 819 /* Allocate default buffers */ 820 size = d->bd_bufsize; 821 bpf_buffer_ioctl_sblen(d, &size); 822 823 return (0); 824 } 825 826 /* 827 * bpfread - read next chunk of packets from buffers 828 */ 829 static int 830 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 831 { 832 struct bpf_d *d; 833 int error; 834 int non_block; 835 int timed_out; 836 837 error = devfs_get_cdevpriv((void **)&d); 838 if (error != 0) 839 return (error); 840 841 /* 842 * Restrict application to use a buffer the same size as 843 * as kernel buffers. 844 */ 845 if (uio->uio_resid != d->bd_bufsize) 846 return (EINVAL); 847 848 non_block = ((ioflag & O_NONBLOCK) != 0); 849 850 BPFD_LOCK(d); 851 BPF_PID_REFRESH_CUR(d); 852 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 853 BPFD_UNLOCK(d); 854 return (EOPNOTSUPP); 855 } 856 if (d->bd_state == BPF_WAITING) 857 callout_stop(&d->bd_callout); 858 timed_out = (d->bd_state == BPF_TIMED_OUT); 859 d->bd_state = BPF_IDLE; 860 while (d->bd_hbuf_in_use) { 861 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 862 PRINET|PCATCH, "bd_hbuf", 0); 863 if (error != 0) { 864 BPFD_UNLOCK(d); 865 return (error); 866 } 867 } 868 /* 869 * If the hold buffer is empty, then do a timed sleep, which 870 * ends when the timeout expires or when enough packets 871 * have arrived to fill the store buffer. 872 */ 873 while (d->bd_hbuf == NULL) { 874 if (d->bd_slen != 0) { 875 /* 876 * A packet(s) either arrived since the previous 877 * read or arrived while we were asleep. 878 */ 879 if (d->bd_immediate || non_block || timed_out) { 880 /* 881 * Rotate the buffers and return what's here 882 * if we are in immediate mode, non-blocking 883 * flag is set, or this descriptor timed out. 884 */ 885 ROTATE_BUFFERS(d); 886 break; 887 } 888 } 889 890 /* 891 * No data is available, check to see if the bpf device 892 * is still pointed at a real interface. If not, return 893 * ENXIO so that the userland process knows to rebind 894 * it before using it again. 895 */ 896 if (d->bd_bif == NULL) { 897 BPFD_UNLOCK(d); 898 return (ENXIO); 899 } 900 901 if (non_block) { 902 BPFD_UNLOCK(d); 903 return (EWOULDBLOCK); 904 } 905 error = msleep(d, &d->bd_lock, PRINET|PCATCH, 906 "bpf", d->bd_rtout); 907 if (error == EINTR || error == ERESTART) { 908 BPFD_UNLOCK(d); 909 return (error); 910 } 911 if (error == EWOULDBLOCK) { 912 /* 913 * On a timeout, return what's in the buffer, 914 * which may be nothing. If there is something 915 * in the store buffer, we can rotate the buffers. 916 */ 917 if (d->bd_hbuf) 918 /* 919 * We filled up the buffer in between 920 * getting the timeout and arriving 921 * here, so we don't need to rotate. 922 */ 923 break; 924 925 if (d->bd_slen == 0) { 926 BPFD_UNLOCK(d); 927 return (0); 928 } 929 ROTATE_BUFFERS(d); 930 break; 931 } 932 } 933 /* 934 * At this point, we know we have something in the hold slot. 935 */ 936 d->bd_hbuf_in_use = 1; 937 BPFD_UNLOCK(d); 938 939 /* 940 * Move data from hold buffer into user space. 941 * We know the entire buffer is transferred since 942 * we checked above that the read buffer is bpf_bufsize bytes. 943 * 944 * We do not have to worry about simultaneous reads because 945 * we waited for sole access to the hold buffer above. 946 */ 947 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 948 949 BPFD_LOCK(d); 950 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf")); 951 d->bd_fbuf = d->bd_hbuf; 952 d->bd_hbuf = NULL; 953 d->bd_hlen = 0; 954 bpf_buf_reclaimed(d); 955 d->bd_hbuf_in_use = 0; 956 wakeup(&d->bd_hbuf_in_use); 957 BPFD_UNLOCK(d); 958 959 return (error); 960 } 961 962 /* 963 * If there are processes sleeping on this descriptor, wake them up. 964 */ 965 static __inline void 966 bpf_wakeup(struct bpf_d *d) 967 { 968 969 BPFD_LOCK_ASSERT(d); 970 if (d->bd_state == BPF_WAITING) { 971 callout_stop(&d->bd_callout); 972 d->bd_state = BPF_IDLE; 973 } 974 wakeup(d); 975 if (d->bd_async && d->bd_sig && d->bd_sigio) 976 pgsigio(&d->bd_sigio, d->bd_sig, 0); 977 978 selwakeuppri(&d->bd_sel, PRINET); 979 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 980 } 981 982 static void 983 bpf_timed_out(void *arg) 984 { 985 struct bpf_d *d = (struct bpf_d *)arg; 986 987 BPFD_LOCK_ASSERT(d); 988 989 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout)) 990 return; 991 if (d->bd_state == BPF_WAITING) { 992 d->bd_state = BPF_TIMED_OUT; 993 if (d->bd_slen != 0) 994 bpf_wakeup(d); 995 } 996 } 997 998 static int 999 bpf_ready(struct bpf_d *d) 1000 { 1001 1002 BPFD_LOCK_ASSERT(d); 1003 1004 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 1005 return (1); 1006 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1007 d->bd_slen != 0) 1008 return (1); 1009 return (0); 1010 } 1011 1012 static int 1013 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 1014 { 1015 struct bpf_d *d; 1016 struct ifnet *ifp; 1017 struct mbuf *m, *mc; 1018 struct sockaddr dst; 1019 int error, hlen; 1020 1021 error = devfs_get_cdevpriv((void **)&d); 1022 if (error != 0) 1023 return (error); 1024 1025 BPF_PID_REFRESH_CUR(d); 1026 d->bd_wcount++; 1027 /* XXX: locking required */ 1028 if (d->bd_bif == NULL) { 1029 d->bd_wdcount++; 1030 return (ENXIO); 1031 } 1032 1033 ifp = d->bd_bif->bif_ifp; 1034 1035 if ((ifp->if_flags & IFF_UP) == 0) { 1036 d->bd_wdcount++; 1037 return (ENETDOWN); 1038 } 1039 1040 if (uio->uio_resid == 0) { 1041 d->bd_wdcount++; 1042 return (0); 1043 } 1044 1045 bzero(&dst, sizeof(dst)); 1046 m = NULL; 1047 hlen = 0; 1048 /* XXX: bpf_movein() can sleep */ 1049 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 1050 &m, &dst, &hlen, d->bd_wfilter); 1051 if (error) { 1052 d->bd_wdcount++; 1053 return (error); 1054 } 1055 d->bd_wfcount++; 1056 if (d->bd_hdrcmplt) 1057 dst.sa_family = pseudo_AF_HDRCMPLT; 1058 1059 if (d->bd_feedback) { 1060 mc = m_dup(m, M_NOWAIT); 1061 if (mc != NULL) 1062 mc->m_pkthdr.rcvif = ifp; 1063 /* Set M_PROMISC for outgoing packets to be discarded. */ 1064 if (d->bd_direction == BPF_D_INOUT) 1065 m->m_flags |= M_PROMISC; 1066 } else 1067 mc = NULL; 1068 1069 m->m_pkthdr.len -= hlen; 1070 m->m_len -= hlen; 1071 m->m_data += hlen; /* XXX */ 1072 1073 CURVNET_SET(ifp->if_vnet); 1074 #ifdef MAC 1075 BPFD_LOCK(d); 1076 mac_bpfdesc_create_mbuf(d, m); 1077 if (mc != NULL) 1078 mac_bpfdesc_create_mbuf(d, mc); 1079 BPFD_UNLOCK(d); 1080 #endif 1081 1082 error = (*ifp->if_output)(ifp, m, &dst, NULL); 1083 if (error) 1084 d->bd_wdcount++; 1085 1086 if (mc != NULL) { 1087 if (error == 0) 1088 (*ifp->if_input)(ifp, mc); 1089 else 1090 m_freem(mc); 1091 } 1092 CURVNET_RESTORE(); 1093 1094 return (error); 1095 } 1096 1097 /* 1098 * Reset a descriptor by flushing its packet buffer and clearing the receive 1099 * and drop counts. This is doable for kernel-only buffers, but with 1100 * zero-copy buffers, we can't write to (or rotate) buffers that are 1101 * currently owned by userspace. It would be nice if we could encapsulate 1102 * this logic in the buffer code rather than here. 1103 */ 1104 static void 1105 reset_d(struct bpf_d *d) 1106 { 1107 1108 BPFD_LOCK_ASSERT(d); 1109 1110 while (d->bd_hbuf_in_use) 1111 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, 1112 "bd_hbuf", 0); 1113 if ((d->bd_hbuf != NULL) && 1114 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 1115 /* Free the hold buffer. */ 1116 d->bd_fbuf = d->bd_hbuf; 1117 d->bd_hbuf = NULL; 1118 d->bd_hlen = 0; 1119 bpf_buf_reclaimed(d); 1120 } 1121 if (bpf_canwritebuf(d)) 1122 d->bd_slen = 0; 1123 d->bd_rcount = 0; 1124 d->bd_dcount = 0; 1125 d->bd_fcount = 0; 1126 d->bd_wcount = 0; 1127 d->bd_wfcount = 0; 1128 d->bd_wdcount = 0; 1129 d->bd_zcopy = 0; 1130 } 1131 1132 /* 1133 * FIONREAD Check for read packet available. 1134 * SIOCGIFADDR Get interface address - convenient hook to driver. 1135 * BIOCGBLEN Get buffer len [for read()]. 1136 * BIOCSETF Set read filter. 1137 * BIOCSETFNR Set read filter without resetting descriptor. 1138 * BIOCSETWF Set write filter. 1139 * BIOCFLUSH Flush read packet buffer. 1140 * BIOCPROMISC Put interface into promiscuous mode. 1141 * BIOCGDLT Get link layer type. 1142 * BIOCGETIF Get interface name. 1143 * BIOCSETIF Set interface. 1144 * BIOCSRTIMEOUT Set read timeout. 1145 * BIOCGRTIMEOUT Get read timeout. 1146 * BIOCGSTATS Get packet stats. 1147 * BIOCIMMEDIATE Set immediate mode. 1148 * BIOCVERSION Get filter language version. 1149 * BIOCGHDRCMPLT Get "header already complete" flag 1150 * BIOCSHDRCMPLT Set "header already complete" flag 1151 * BIOCGDIRECTION Get packet direction flag 1152 * BIOCSDIRECTION Set packet direction flag 1153 * BIOCGTSTAMP Get time stamp format and resolution. 1154 * BIOCSTSTAMP Set time stamp format and resolution. 1155 * BIOCLOCK Set "locked" flag 1156 * BIOCFEEDBACK Set packet feedback mode. 1157 * BIOCSETZBUF Set current zero-copy buffer locations. 1158 * BIOCGETZMAX Get maximum zero-copy buffer size. 1159 * BIOCROTZBUF Force rotation of zero-copy buffer 1160 * BIOCSETBUFMODE Set buffer mode. 1161 * BIOCGETBUFMODE Get current buffer mode. 1162 */ 1163 /* ARGSUSED */ 1164 static int 1165 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1166 struct thread *td) 1167 { 1168 struct bpf_d *d; 1169 int error; 1170 1171 error = devfs_get_cdevpriv((void **)&d); 1172 if (error != 0) 1173 return (error); 1174 1175 /* 1176 * Refresh PID associated with this descriptor. 1177 */ 1178 BPFD_LOCK(d); 1179 BPF_PID_REFRESH(d, td); 1180 if (d->bd_state == BPF_WAITING) 1181 callout_stop(&d->bd_callout); 1182 d->bd_state = BPF_IDLE; 1183 BPFD_UNLOCK(d); 1184 1185 if (d->bd_locked == 1) { 1186 switch (cmd) { 1187 case BIOCGBLEN: 1188 case BIOCFLUSH: 1189 case BIOCGDLT: 1190 case BIOCGDLTLIST: 1191 #ifdef COMPAT_FREEBSD32 1192 case BIOCGDLTLIST32: 1193 #endif 1194 case BIOCGETIF: 1195 case BIOCGRTIMEOUT: 1196 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1197 case BIOCGRTIMEOUT32: 1198 #endif 1199 case BIOCGSTATS: 1200 case BIOCVERSION: 1201 case BIOCGRSIG: 1202 case BIOCGHDRCMPLT: 1203 case BIOCSTSTAMP: 1204 case BIOCFEEDBACK: 1205 case FIONREAD: 1206 case BIOCLOCK: 1207 case BIOCSRTIMEOUT: 1208 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1209 case BIOCSRTIMEOUT32: 1210 #endif 1211 case BIOCIMMEDIATE: 1212 case TIOCGPGRP: 1213 case BIOCROTZBUF: 1214 break; 1215 default: 1216 return (EPERM); 1217 } 1218 } 1219 #ifdef COMPAT_FREEBSD32 1220 /* 1221 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so 1222 * that it will get 32-bit packet headers. 1223 */ 1224 switch (cmd) { 1225 case BIOCSETF32: 1226 case BIOCSETFNR32: 1227 case BIOCSETWF32: 1228 case BIOCGDLTLIST32: 1229 case BIOCGRTIMEOUT32: 1230 case BIOCSRTIMEOUT32: 1231 BPFD_LOCK(d); 1232 d->bd_compat32 = 1; 1233 BPFD_UNLOCK(d); 1234 } 1235 #endif 1236 1237 CURVNET_SET(TD_TO_VNET(td)); 1238 switch (cmd) { 1239 1240 default: 1241 error = EINVAL; 1242 break; 1243 1244 /* 1245 * Check for read packet available. 1246 */ 1247 case FIONREAD: 1248 { 1249 int n; 1250 1251 BPFD_LOCK(d); 1252 n = d->bd_slen; 1253 while (d->bd_hbuf_in_use) 1254 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 1255 PRINET, "bd_hbuf", 0); 1256 if (d->bd_hbuf) 1257 n += d->bd_hlen; 1258 BPFD_UNLOCK(d); 1259 1260 *(int *)addr = n; 1261 break; 1262 } 1263 1264 case SIOCGIFADDR: 1265 { 1266 struct ifnet *ifp; 1267 1268 if (d->bd_bif == NULL) 1269 error = EINVAL; 1270 else { 1271 ifp = d->bd_bif->bif_ifp; 1272 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1273 } 1274 break; 1275 } 1276 1277 /* 1278 * Get buffer len [for read()]. 1279 */ 1280 case BIOCGBLEN: 1281 BPFD_LOCK(d); 1282 *(u_int *)addr = d->bd_bufsize; 1283 BPFD_UNLOCK(d); 1284 break; 1285 1286 /* 1287 * Set buffer length. 1288 */ 1289 case BIOCSBLEN: 1290 error = bpf_ioctl_sblen(d, (u_int *)addr); 1291 break; 1292 1293 /* 1294 * Set link layer read filter. 1295 */ 1296 case BIOCSETF: 1297 case BIOCSETFNR: 1298 case BIOCSETWF: 1299 #ifdef COMPAT_FREEBSD32 1300 case BIOCSETF32: 1301 case BIOCSETFNR32: 1302 case BIOCSETWF32: 1303 #endif 1304 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1305 break; 1306 1307 /* 1308 * Flush read packet buffer. 1309 */ 1310 case BIOCFLUSH: 1311 BPFD_LOCK(d); 1312 reset_d(d); 1313 BPFD_UNLOCK(d); 1314 break; 1315 1316 /* 1317 * Put interface into promiscuous mode. 1318 */ 1319 case BIOCPROMISC: 1320 if (d->bd_bif == NULL) { 1321 /* 1322 * No interface attached yet. 1323 */ 1324 error = EINVAL; 1325 break; 1326 } 1327 if (d->bd_promisc == 0) { 1328 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1329 if (error == 0) 1330 d->bd_promisc = 1; 1331 } 1332 break; 1333 1334 /* 1335 * Get current data link type. 1336 */ 1337 case BIOCGDLT: 1338 BPF_LOCK(); 1339 if (d->bd_bif == NULL) 1340 error = EINVAL; 1341 else 1342 *(u_int *)addr = d->bd_bif->bif_dlt; 1343 BPF_UNLOCK(); 1344 break; 1345 1346 /* 1347 * Get a list of supported data link types. 1348 */ 1349 #ifdef COMPAT_FREEBSD32 1350 case BIOCGDLTLIST32: 1351 { 1352 struct bpf_dltlist32 *list32; 1353 struct bpf_dltlist dltlist; 1354 1355 list32 = (struct bpf_dltlist32 *)addr; 1356 dltlist.bfl_len = list32->bfl_len; 1357 dltlist.bfl_list = PTRIN(list32->bfl_list); 1358 BPF_LOCK(); 1359 if (d->bd_bif == NULL) 1360 error = EINVAL; 1361 else { 1362 error = bpf_getdltlist(d, &dltlist); 1363 if (error == 0) 1364 list32->bfl_len = dltlist.bfl_len; 1365 } 1366 BPF_UNLOCK(); 1367 break; 1368 } 1369 #endif 1370 1371 case BIOCGDLTLIST: 1372 BPF_LOCK(); 1373 if (d->bd_bif == NULL) 1374 error = EINVAL; 1375 else 1376 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1377 BPF_UNLOCK(); 1378 break; 1379 1380 /* 1381 * Set data link type. 1382 */ 1383 case BIOCSDLT: 1384 BPF_LOCK(); 1385 if (d->bd_bif == NULL) 1386 error = EINVAL; 1387 else 1388 error = bpf_setdlt(d, *(u_int *)addr); 1389 BPF_UNLOCK(); 1390 break; 1391 1392 /* 1393 * Get interface name. 1394 */ 1395 case BIOCGETIF: 1396 BPF_LOCK(); 1397 if (d->bd_bif == NULL) 1398 error = EINVAL; 1399 else { 1400 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1401 struct ifreq *const ifr = (struct ifreq *)addr; 1402 1403 strlcpy(ifr->ifr_name, ifp->if_xname, 1404 sizeof(ifr->ifr_name)); 1405 } 1406 BPF_UNLOCK(); 1407 break; 1408 1409 /* 1410 * Set interface. 1411 */ 1412 case BIOCSETIF: 1413 BPF_LOCK(); 1414 error = bpf_setif(d, (struct ifreq *)addr); 1415 BPF_UNLOCK(); 1416 break; 1417 1418 /* 1419 * Set read timeout. 1420 */ 1421 case BIOCSRTIMEOUT: 1422 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1423 case BIOCSRTIMEOUT32: 1424 #endif 1425 { 1426 struct timeval *tv = (struct timeval *)addr; 1427 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1428 struct timeval32 *tv32; 1429 struct timeval tv64; 1430 1431 if (cmd == BIOCSRTIMEOUT32) { 1432 tv32 = (struct timeval32 *)addr; 1433 tv = &tv64; 1434 tv->tv_sec = tv32->tv_sec; 1435 tv->tv_usec = tv32->tv_usec; 1436 } else 1437 #endif 1438 tv = (struct timeval *)addr; 1439 1440 /* 1441 * Subtract 1 tick from tvtohz() since this isn't 1442 * a one-shot timer. 1443 */ 1444 if ((error = itimerfix(tv)) == 0) 1445 d->bd_rtout = tvtohz(tv) - 1; 1446 break; 1447 } 1448 1449 /* 1450 * Get read timeout. 1451 */ 1452 case BIOCGRTIMEOUT: 1453 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1454 case BIOCGRTIMEOUT32: 1455 #endif 1456 { 1457 struct timeval *tv; 1458 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1459 struct timeval32 *tv32; 1460 struct timeval tv64; 1461 1462 if (cmd == BIOCGRTIMEOUT32) 1463 tv = &tv64; 1464 else 1465 #endif 1466 tv = (struct timeval *)addr; 1467 1468 tv->tv_sec = d->bd_rtout / hz; 1469 tv->tv_usec = (d->bd_rtout % hz) * tick; 1470 #if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1471 if (cmd == BIOCGRTIMEOUT32) { 1472 tv32 = (struct timeval32 *)addr; 1473 tv32->tv_sec = tv->tv_sec; 1474 tv32->tv_usec = tv->tv_usec; 1475 } 1476 #endif 1477 1478 break; 1479 } 1480 1481 /* 1482 * Get packet stats. 1483 */ 1484 case BIOCGSTATS: 1485 { 1486 struct bpf_stat *bs = (struct bpf_stat *)addr; 1487 1488 /* XXXCSJP overflow */ 1489 bs->bs_recv = d->bd_rcount; 1490 bs->bs_drop = d->bd_dcount; 1491 break; 1492 } 1493 1494 /* 1495 * Set immediate mode. 1496 */ 1497 case BIOCIMMEDIATE: 1498 BPFD_LOCK(d); 1499 d->bd_immediate = *(u_int *)addr; 1500 BPFD_UNLOCK(d); 1501 break; 1502 1503 case BIOCVERSION: 1504 { 1505 struct bpf_version *bv = (struct bpf_version *)addr; 1506 1507 bv->bv_major = BPF_MAJOR_VERSION; 1508 bv->bv_minor = BPF_MINOR_VERSION; 1509 break; 1510 } 1511 1512 /* 1513 * Get "header already complete" flag 1514 */ 1515 case BIOCGHDRCMPLT: 1516 BPFD_LOCK(d); 1517 *(u_int *)addr = d->bd_hdrcmplt; 1518 BPFD_UNLOCK(d); 1519 break; 1520 1521 /* 1522 * Set "header already complete" flag 1523 */ 1524 case BIOCSHDRCMPLT: 1525 BPFD_LOCK(d); 1526 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1527 BPFD_UNLOCK(d); 1528 break; 1529 1530 /* 1531 * Get packet direction flag 1532 */ 1533 case BIOCGDIRECTION: 1534 BPFD_LOCK(d); 1535 *(u_int *)addr = d->bd_direction; 1536 BPFD_UNLOCK(d); 1537 break; 1538 1539 /* 1540 * Set packet direction flag 1541 */ 1542 case BIOCSDIRECTION: 1543 { 1544 u_int direction; 1545 1546 direction = *(u_int *)addr; 1547 switch (direction) { 1548 case BPF_D_IN: 1549 case BPF_D_INOUT: 1550 case BPF_D_OUT: 1551 BPFD_LOCK(d); 1552 d->bd_direction = direction; 1553 BPFD_UNLOCK(d); 1554 break; 1555 default: 1556 error = EINVAL; 1557 } 1558 } 1559 break; 1560 1561 /* 1562 * Get packet timestamp format and resolution. 1563 */ 1564 case BIOCGTSTAMP: 1565 BPFD_LOCK(d); 1566 *(u_int *)addr = d->bd_tstamp; 1567 BPFD_UNLOCK(d); 1568 break; 1569 1570 /* 1571 * Set packet timestamp format and resolution. 1572 */ 1573 case BIOCSTSTAMP: 1574 { 1575 u_int func; 1576 1577 func = *(u_int *)addr; 1578 if (BPF_T_VALID(func)) 1579 d->bd_tstamp = func; 1580 else 1581 error = EINVAL; 1582 } 1583 break; 1584 1585 case BIOCFEEDBACK: 1586 BPFD_LOCK(d); 1587 d->bd_feedback = *(u_int *)addr; 1588 BPFD_UNLOCK(d); 1589 break; 1590 1591 case BIOCLOCK: 1592 BPFD_LOCK(d); 1593 d->bd_locked = 1; 1594 BPFD_UNLOCK(d); 1595 break; 1596 1597 case FIONBIO: /* Non-blocking I/O */ 1598 break; 1599 1600 case FIOASYNC: /* Send signal on receive packets */ 1601 BPFD_LOCK(d); 1602 d->bd_async = *(int *)addr; 1603 BPFD_UNLOCK(d); 1604 break; 1605 1606 case FIOSETOWN: 1607 /* 1608 * XXX: Add some sort of locking here? 1609 * fsetown() can sleep. 1610 */ 1611 error = fsetown(*(int *)addr, &d->bd_sigio); 1612 break; 1613 1614 case FIOGETOWN: 1615 BPFD_LOCK(d); 1616 *(int *)addr = fgetown(&d->bd_sigio); 1617 BPFD_UNLOCK(d); 1618 break; 1619 1620 /* This is deprecated, FIOSETOWN should be used instead. */ 1621 case TIOCSPGRP: 1622 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1623 break; 1624 1625 /* This is deprecated, FIOGETOWN should be used instead. */ 1626 case TIOCGPGRP: 1627 *(int *)addr = -fgetown(&d->bd_sigio); 1628 break; 1629 1630 case BIOCSRSIG: /* Set receive signal */ 1631 { 1632 u_int sig; 1633 1634 sig = *(u_int *)addr; 1635 1636 if (sig >= NSIG) 1637 error = EINVAL; 1638 else { 1639 BPFD_LOCK(d); 1640 d->bd_sig = sig; 1641 BPFD_UNLOCK(d); 1642 } 1643 break; 1644 } 1645 case BIOCGRSIG: 1646 BPFD_LOCK(d); 1647 *(u_int *)addr = d->bd_sig; 1648 BPFD_UNLOCK(d); 1649 break; 1650 1651 case BIOCGETBUFMODE: 1652 BPFD_LOCK(d); 1653 *(u_int *)addr = d->bd_bufmode; 1654 BPFD_UNLOCK(d); 1655 break; 1656 1657 case BIOCSETBUFMODE: 1658 /* 1659 * Allow the buffering mode to be changed as long as we 1660 * haven't yet committed to a particular mode. Our 1661 * definition of commitment, for now, is whether or not a 1662 * buffer has been allocated or an interface attached, since 1663 * that's the point where things get tricky. 1664 */ 1665 switch (*(u_int *)addr) { 1666 case BPF_BUFMODE_BUFFER: 1667 break; 1668 1669 case BPF_BUFMODE_ZBUF: 1670 if (bpf_zerocopy_enable) 1671 break; 1672 /* FALLSTHROUGH */ 1673 1674 default: 1675 CURVNET_RESTORE(); 1676 return (EINVAL); 1677 } 1678 1679 BPFD_LOCK(d); 1680 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1681 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1682 BPFD_UNLOCK(d); 1683 CURVNET_RESTORE(); 1684 return (EBUSY); 1685 } 1686 d->bd_bufmode = *(u_int *)addr; 1687 BPFD_UNLOCK(d); 1688 break; 1689 1690 case BIOCGETZMAX: 1691 error = bpf_ioctl_getzmax(td, d, (size_t *)addr); 1692 break; 1693 1694 case BIOCSETZBUF: 1695 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr); 1696 break; 1697 1698 case BIOCROTZBUF: 1699 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr); 1700 break; 1701 } 1702 CURVNET_RESTORE(); 1703 return (error); 1704 } 1705 1706 /* 1707 * Set d's packet filter program to fp. If this file already has a filter, 1708 * free it and replace it. Returns EINVAL for bogus requests. 1709 * 1710 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls 1711 * since reading d->bd_bif can't be protected by d or interface lock due to 1712 * lock order. 1713 * 1714 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses 1715 * interface read lock to read all filers. 1716 * 1717 */ 1718 static int 1719 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1720 { 1721 #ifdef COMPAT_FREEBSD32 1722 struct bpf_program fp_swab; 1723 struct bpf_program32 *fp32; 1724 #endif 1725 struct bpf_insn *fcode, *old; 1726 #ifdef BPF_JITTER 1727 bpf_jit_filter *jfunc, *ofunc; 1728 #endif 1729 size_t size; 1730 u_int flen; 1731 int need_upgrade; 1732 1733 #ifdef COMPAT_FREEBSD32 1734 switch (cmd) { 1735 case BIOCSETF32: 1736 case BIOCSETWF32: 1737 case BIOCSETFNR32: 1738 fp32 = (struct bpf_program32 *)fp; 1739 fp_swab.bf_len = fp32->bf_len; 1740 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns; 1741 fp = &fp_swab; 1742 switch (cmd) { 1743 case BIOCSETF32: 1744 cmd = BIOCSETF; 1745 break; 1746 case BIOCSETWF32: 1747 cmd = BIOCSETWF; 1748 break; 1749 } 1750 break; 1751 } 1752 #endif 1753 1754 fcode = NULL; 1755 #ifdef BPF_JITTER 1756 jfunc = ofunc = NULL; 1757 #endif 1758 need_upgrade = 0; 1759 1760 /* 1761 * Check new filter validness before acquiring any locks. 1762 * Allocate memory for new filter, if needed. 1763 */ 1764 flen = fp->bf_len; 1765 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0)) 1766 return (EINVAL); 1767 size = flen * sizeof(*fp->bf_insns); 1768 if (size > 0) { 1769 /* We're setting up new filter. Copy and check actual data. */ 1770 fcode = malloc(size, M_BPF, M_WAITOK); 1771 if (copyin(fp->bf_insns, fcode, size) != 0 || 1772 !bpf_validate(fcode, flen)) { 1773 free(fcode, M_BPF); 1774 return (EINVAL); 1775 } 1776 #ifdef BPF_JITTER 1777 /* Filter is copied inside fcode and is perfectly valid. */ 1778 jfunc = bpf_jitter(fcode, flen); 1779 #endif 1780 } 1781 1782 BPF_LOCK(); 1783 1784 /* 1785 * Set up new filter. 1786 * Protect filter change by interface lock. 1787 * Additionally, we are protected by global lock here. 1788 */ 1789 if (d->bd_bif != NULL) 1790 BPFIF_WLOCK(d->bd_bif); 1791 BPFD_LOCK(d); 1792 if (cmd == BIOCSETWF) { 1793 old = d->bd_wfilter; 1794 d->bd_wfilter = fcode; 1795 } else { 1796 old = d->bd_rfilter; 1797 d->bd_rfilter = fcode; 1798 #ifdef BPF_JITTER 1799 ofunc = d->bd_bfilter; 1800 d->bd_bfilter = jfunc; 1801 #endif 1802 if (cmd == BIOCSETF) 1803 reset_d(d); 1804 1805 if (fcode != NULL) { 1806 /* 1807 * Do not require upgrade by first BIOCSETF 1808 * (used to set snaplen) by pcap_open_live(). 1809 */ 1810 if (d->bd_writer != 0 && --d->bd_writer == 0) 1811 need_upgrade = 1; 1812 CTR4(KTR_NET, "%s: filter function set by pid %d, " 1813 "bd_writer counter %d, need_upgrade %d", 1814 __func__, d->bd_pid, d->bd_writer, need_upgrade); 1815 } 1816 } 1817 BPFD_UNLOCK(d); 1818 if (d->bd_bif != NULL) 1819 BPFIF_WUNLOCK(d->bd_bif); 1820 if (old != NULL) 1821 free(old, M_BPF); 1822 #ifdef BPF_JITTER 1823 if (ofunc != NULL) 1824 bpf_destroy_jit_filter(ofunc); 1825 #endif 1826 1827 /* Move d to active readers list. */ 1828 if (need_upgrade) 1829 bpf_upgraded(d); 1830 1831 BPF_UNLOCK(); 1832 return (0); 1833 } 1834 1835 /* 1836 * Detach a file from its current interface (if attached at all) and attach 1837 * to the interface indicated by the name stored in ifr. 1838 * Return an errno or 0. 1839 */ 1840 static int 1841 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1842 { 1843 struct bpf_if *bp; 1844 struct ifnet *theywant; 1845 1846 BPF_LOCK_ASSERT(); 1847 1848 theywant = ifunit(ifr->ifr_name); 1849 if (theywant == NULL || theywant->if_bpf == NULL) 1850 return (ENXIO); 1851 1852 bp = theywant->if_bpf; 1853 1854 /* Check if interface is not being detached from BPF */ 1855 BPFIF_RLOCK(bp); 1856 if (bp->flags & BPFIF_FLAG_DYING) { 1857 BPFIF_RUNLOCK(bp); 1858 return (ENXIO); 1859 } 1860 BPFIF_RUNLOCK(bp); 1861 1862 /* 1863 * Behavior here depends on the buffering model. If we're using 1864 * kernel memory buffers, then we can allocate them here. If we're 1865 * using zero-copy, then the user process must have registered 1866 * buffers by the time we get here. If not, return an error. 1867 */ 1868 switch (d->bd_bufmode) { 1869 case BPF_BUFMODE_BUFFER: 1870 case BPF_BUFMODE_ZBUF: 1871 if (d->bd_sbuf == NULL) 1872 return (EINVAL); 1873 break; 1874 1875 default: 1876 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1877 } 1878 if (bp != d->bd_bif) 1879 bpf_attachd(d, bp); 1880 BPFD_LOCK(d); 1881 reset_d(d); 1882 BPFD_UNLOCK(d); 1883 return (0); 1884 } 1885 1886 /* 1887 * Support for select() and poll() system calls 1888 * 1889 * Return true iff the specific operation will not block indefinitely. 1890 * Otherwise, return false but make a note that a selwakeup() must be done. 1891 */ 1892 static int 1893 bpfpoll(struct cdev *dev, int events, struct thread *td) 1894 { 1895 struct bpf_d *d; 1896 int revents; 1897 1898 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1899 return (events & 1900 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1901 1902 /* 1903 * Refresh PID associated with this descriptor. 1904 */ 1905 revents = events & (POLLOUT | POLLWRNORM); 1906 BPFD_LOCK(d); 1907 BPF_PID_REFRESH(d, td); 1908 if (events & (POLLIN | POLLRDNORM)) { 1909 if (bpf_ready(d)) 1910 revents |= events & (POLLIN | POLLRDNORM); 1911 else { 1912 selrecord(td, &d->bd_sel); 1913 /* Start the read timeout if necessary. */ 1914 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1915 callout_reset(&d->bd_callout, d->bd_rtout, 1916 bpf_timed_out, d); 1917 d->bd_state = BPF_WAITING; 1918 } 1919 } 1920 } 1921 BPFD_UNLOCK(d); 1922 return (revents); 1923 } 1924 1925 /* 1926 * Support for kevent() system call. Register EVFILT_READ filters and 1927 * reject all others. 1928 */ 1929 int 1930 bpfkqfilter(struct cdev *dev, struct knote *kn) 1931 { 1932 struct bpf_d *d; 1933 1934 if (devfs_get_cdevpriv((void **)&d) != 0 || 1935 kn->kn_filter != EVFILT_READ) 1936 return (1); 1937 1938 /* 1939 * Refresh PID associated with this descriptor. 1940 */ 1941 BPFD_LOCK(d); 1942 BPF_PID_REFRESH_CUR(d); 1943 kn->kn_fop = &bpfread_filtops; 1944 kn->kn_hook = d; 1945 knlist_add(&d->bd_sel.si_note, kn, 1); 1946 BPFD_UNLOCK(d); 1947 1948 return (0); 1949 } 1950 1951 static void 1952 filt_bpfdetach(struct knote *kn) 1953 { 1954 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1955 1956 knlist_remove(&d->bd_sel.si_note, kn, 0); 1957 } 1958 1959 static int 1960 filt_bpfread(struct knote *kn, long hint) 1961 { 1962 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1963 int ready; 1964 1965 BPFD_LOCK_ASSERT(d); 1966 ready = bpf_ready(d); 1967 if (ready) { 1968 kn->kn_data = d->bd_slen; 1969 while (d->bd_hbuf_in_use) 1970 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 1971 PRINET, "bd_hbuf", 0); 1972 if (d->bd_hbuf) 1973 kn->kn_data += d->bd_hlen; 1974 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1975 callout_reset(&d->bd_callout, d->bd_rtout, 1976 bpf_timed_out, d); 1977 d->bd_state = BPF_WAITING; 1978 } 1979 1980 return (ready); 1981 } 1982 1983 #define BPF_TSTAMP_NONE 0 1984 #define BPF_TSTAMP_FAST 1 1985 #define BPF_TSTAMP_NORMAL 2 1986 #define BPF_TSTAMP_EXTERN 3 1987 1988 static int 1989 bpf_ts_quality(int tstype) 1990 { 1991 1992 if (tstype == BPF_T_NONE) 1993 return (BPF_TSTAMP_NONE); 1994 if ((tstype & BPF_T_FAST) != 0) 1995 return (BPF_TSTAMP_FAST); 1996 1997 return (BPF_TSTAMP_NORMAL); 1998 } 1999 2000 static int 2001 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m) 2002 { 2003 struct m_tag *tag; 2004 int quality; 2005 2006 quality = bpf_ts_quality(tstype); 2007 if (quality == BPF_TSTAMP_NONE) 2008 return (quality); 2009 2010 if (m != NULL) { 2011 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL); 2012 if (tag != NULL) { 2013 *bt = *(struct bintime *)(tag + 1); 2014 return (BPF_TSTAMP_EXTERN); 2015 } 2016 } 2017 if (quality == BPF_TSTAMP_NORMAL) 2018 binuptime(bt); 2019 else 2020 getbinuptime(bt); 2021 2022 return (quality); 2023 } 2024 2025 /* 2026 * Incoming linkage from device drivers. Process the packet pkt, of length 2027 * pktlen, which is stored in a contiguous buffer. The packet is parsed 2028 * by each process' filter, and if accepted, stashed into the corresponding 2029 * buffer. 2030 */ 2031 void 2032 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2033 { 2034 struct bintime bt; 2035 struct bpf_d *d; 2036 #ifdef BPF_JITTER 2037 bpf_jit_filter *bf; 2038 #endif 2039 u_int slen; 2040 int gottime; 2041 2042 gottime = BPF_TSTAMP_NONE; 2043 2044 BPFIF_RLOCK(bp); 2045 2046 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2047 /* 2048 * We are not using any locks for d here because: 2049 * 1) any filter change is protected by interface 2050 * write lock 2051 * 2) destroying/detaching d is protected by interface 2052 * write lock, too 2053 */ 2054 2055 /* XXX: Do not protect counter for the sake of performance. */ 2056 ++d->bd_rcount; 2057 /* 2058 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 2059 * way for the caller to indiciate to us whether this packet 2060 * is inbound or outbound. In the bpf_mtap() routines, we use 2061 * the interface pointers on the mbuf to figure it out. 2062 */ 2063 #ifdef BPF_JITTER 2064 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2065 if (bf != NULL) 2066 slen = (*(bf->func))(pkt, pktlen, pktlen); 2067 else 2068 #endif 2069 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 2070 if (slen != 0) { 2071 /* 2072 * Filter matches. Let's to acquire write lock. 2073 */ 2074 BPFD_LOCK(d); 2075 2076 d->bd_fcount++; 2077 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2078 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL); 2079 #ifdef MAC 2080 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2081 #endif 2082 catchpacket(d, pkt, pktlen, slen, 2083 bpf_append_bytes, &bt); 2084 BPFD_UNLOCK(d); 2085 } 2086 } 2087 BPFIF_RUNLOCK(bp); 2088 } 2089 2090 #define BPF_CHECK_DIRECTION(d, r, i) \ 2091 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 2092 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 2093 2094 /* 2095 * Incoming linkage from device drivers, when packet is in an mbuf chain. 2096 * Locking model is explained in bpf_tap(). 2097 */ 2098 void 2099 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2100 { 2101 struct bintime bt; 2102 struct bpf_d *d; 2103 #ifdef BPF_JITTER 2104 bpf_jit_filter *bf; 2105 #endif 2106 u_int pktlen, slen; 2107 int gottime; 2108 2109 /* Skip outgoing duplicate packets. */ 2110 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2111 m->m_flags &= ~M_PROMISC; 2112 return; 2113 } 2114 2115 pktlen = m_length(m, NULL); 2116 gottime = BPF_TSTAMP_NONE; 2117 2118 BPFIF_RLOCK(bp); 2119 2120 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2121 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2122 continue; 2123 ++d->bd_rcount; 2124 #ifdef BPF_JITTER 2125 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2126 /* XXX We cannot handle multiple mbufs. */ 2127 if (bf != NULL && m->m_next == NULL) 2128 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen); 2129 else 2130 #endif 2131 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 2132 if (slen != 0) { 2133 BPFD_LOCK(d); 2134 2135 d->bd_fcount++; 2136 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2137 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2138 #ifdef MAC 2139 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2140 #endif 2141 catchpacket(d, (u_char *)m, pktlen, slen, 2142 bpf_append_mbuf, &bt); 2143 BPFD_UNLOCK(d); 2144 } 2145 } 2146 BPFIF_RUNLOCK(bp); 2147 } 2148 2149 /* 2150 * Incoming linkage from device drivers, when packet is in 2151 * an mbuf chain and to be prepended by a contiguous header. 2152 */ 2153 void 2154 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 2155 { 2156 struct bintime bt; 2157 struct mbuf mb; 2158 struct bpf_d *d; 2159 u_int pktlen, slen; 2160 int gottime; 2161 2162 /* Skip outgoing duplicate packets. */ 2163 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2164 m->m_flags &= ~M_PROMISC; 2165 return; 2166 } 2167 2168 pktlen = m_length(m, NULL); 2169 /* 2170 * Craft on-stack mbuf suitable for passing to bpf_filter. 2171 * Note that we cut corners here; we only setup what's 2172 * absolutely needed--this mbuf should never go anywhere else. 2173 */ 2174 mb.m_next = m; 2175 mb.m_data = data; 2176 mb.m_len = dlen; 2177 pktlen += dlen; 2178 2179 gottime = BPF_TSTAMP_NONE; 2180 2181 BPFIF_RLOCK(bp); 2182 2183 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2184 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2185 continue; 2186 ++d->bd_rcount; 2187 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 2188 if (slen != 0) { 2189 BPFD_LOCK(d); 2190 2191 d->bd_fcount++; 2192 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2193 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2194 #ifdef MAC 2195 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2196 #endif 2197 catchpacket(d, (u_char *)&mb, pktlen, slen, 2198 bpf_append_mbuf, &bt); 2199 BPFD_UNLOCK(d); 2200 } 2201 } 2202 BPFIF_RUNLOCK(bp); 2203 } 2204 2205 #undef BPF_CHECK_DIRECTION 2206 2207 #undef BPF_TSTAMP_NONE 2208 #undef BPF_TSTAMP_FAST 2209 #undef BPF_TSTAMP_NORMAL 2210 #undef BPF_TSTAMP_EXTERN 2211 2212 static int 2213 bpf_hdrlen(struct bpf_d *d) 2214 { 2215 int hdrlen; 2216 2217 hdrlen = d->bd_bif->bif_hdrlen; 2218 #ifndef BURN_BRIDGES 2219 if (d->bd_tstamp == BPF_T_NONE || 2220 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) 2221 #ifdef COMPAT_FREEBSD32 2222 if (d->bd_compat32) 2223 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32); 2224 else 2225 #endif 2226 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr); 2227 else 2228 #endif 2229 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr); 2230 #ifdef COMPAT_FREEBSD32 2231 if (d->bd_compat32) 2232 hdrlen = BPF_WORDALIGN32(hdrlen); 2233 else 2234 #endif 2235 hdrlen = BPF_WORDALIGN(hdrlen); 2236 2237 return (hdrlen - d->bd_bif->bif_hdrlen); 2238 } 2239 2240 static void 2241 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) 2242 { 2243 struct bintime bt2; 2244 struct timeval tsm; 2245 struct timespec tsn; 2246 2247 if ((tstype & BPF_T_MONOTONIC) == 0) { 2248 bt2 = *bt; 2249 bintime_add(&bt2, &boottimebin); 2250 bt = &bt2; 2251 } 2252 switch (BPF_T_FORMAT(tstype)) { 2253 case BPF_T_MICROTIME: 2254 bintime2timeval(bt, &tsm); 2255 ts->bt_sec = tsm.tv_sec; 2256 ts->bt_frac = tsm.tv_usec; 2257 break; 2258 case BPF_T_NANOTIME: 2259 bintime2timespec(bt, &tsn); 2260 ts->bt_sec = tsn.tv_sec; 2261 ts->bt_frac = tsn.tv_nsec; 2262 break; 2263 case BPF_T_BINTIME: 2264 ts->bt_sec = bt->sec; 2265 ts->bt_frac = bt->frac; 2266 break; 2267 } 2268 } 2269 2270 /* 2271 * Move the packet data from interface memory (pkt) into the 2272 * store buffer. "cpfn" is the routine called to do the actual data 2273 * transfer. bcopy is passed in to copy contiguous chunks, while 2274 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 2275 * pkt is really an mbuf. 2276 */ 2277 static void 2278 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 2279 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 2280 struct bintime *bt) 2281 { 2282 struct bpf_xhdr hdr; 2283 #ifndef BURN_BRIDGES 2284 struct bpf_hdr hdr_old; 2285 #ifdef COMPAT_FREEBSD32 2286 struct bpf_hdr32 hdr32_old; 2287 #endif 2288 #endif 2289 int caplen, curlen, hdrlen, totlen; 2290 int do_wakeup = 0; 2291 int do_timestamp; 2292 int tstype; 2293 2294 BPFD_LOCK_ASSERT(d); 2295 2296 /* 2297 * Detect whether user space has released a buffer back to us, and if 2298 * so, move it from being a hold buffer to a free buffer. This may 2299 * not be the best place to do it (for example, we might only want to 2300 * run this check if we need the space), but for now it's a reliable 2301 * spot to do it. 2302 */ 2303 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 2304 while (d->bd_hbuf_in_use) 2305 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 2306 PRINET, "bd_hbuf", 0); 2307 d->bd_fbuf = d->bd_hbuf; 2308 d->bd_hbuf = NULL; 2309 d->bd_hlen = 0; 2310 bpf_buf_reclaimed(d); 2311 } 2312 2313 /* 2314 * Figure out how many bytes to move. If the packet is 2315 * greater or equal to the snapshot length, transfer that 2316 * much. Otherwise, transfer the whole packet (unless 2317 * we hit the buffer size limit). 2318 */ 2319 hdrlen = bpf_hdrlen(d); 2320 totlen = hdrlen + min(snaplen, pktlen); 2321 if (totlen > d->bd_bufsize) 2322 totlen = d->bd_bufsize; 2323 2324 /* 2325 * Round up the end of the previous packet to the next longword. 2326 * 2327 * Drop the packet if there's no room and no hope of room 2328 * If the packet would overflow the storage buffer or the storage 2329 * buffer is considered immutable by the buffer model, try to rotate 2330 * the buffer and wakeup pending processes. 2331 */ 2332 #ifdef COMPAT_FREEBSD32 2333 if (d->bd_compat32) 2334 curlen = BPF_WORDALIGN32(d->bd_slen); 2335 else 2336 #endif 2337 curlen = BPF_WORDALIGN(d->bd_slen); 2338 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 2339 if (d->bd_fbuf == NULL) { 2340 /* 2341 * There's no room in the store buffer, and no 2342 * prospect of room, so drop the packet. Notify the 2343 * buffer model. 2344 */ 2345 bpf_buffull(d); 2346 ++d->bd_dcount; 2347 return; 2348 } 2349 while (d->bd_hbuf_in_use) 2350 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 2351 PRINET, "bd_hbuf", 0); 2352 ROTATE_BUFFERS(d); 2353 do_wakeup = 1; 2354 curlen = 0; 2355 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 2356 /* 2357 * Immediate mode is set, or the read timeout has already 2358 * expired during a select call. A packet arrived, so the 2359 * reader should be woken up. 2360 */ 2361 do_wakeup = 1; 2362 caplen = totlen - hdrlen; 2363 tstype = d->bd_tstamp; 2364 do_timestamp = tstype != BPF_T_NONE; 2365 #ifndef BURN_BRIDGES 2366 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) { 2367 struct bpf_ts ts; 2368 if (do_timestamp) 2369 bpf_bintime2ts(bt, &ts, tstype); 2370 #ifdef COMPAT_FREEBSD32 2371 if (d->bd_compat32) { 2372 bzero(&hdr32_old, sizeof(hdr32_old)); 2373 if (do_timestamp) { 2374 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; 2375 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; 2376 } 2377 hdr32_old.bh_datalen = pktlen; 2378 hdr32_old.bh_hdrlen = hdrlen; 2379 hdr32_old.bh_caplen = caplen; 2380 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, 2381 sizeof(hdr32_old)); 2382 goto copy; 2383 } 2384 #endif 2385 bzero(&hdr_old, sizeof(hdr_old)); 2386 if (do_timestamp) { 2387 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; 2388 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; 2389 } 2390 hdr_old.bh_datalen = pktlen; 2391 hdr_old.bh_hdrlen = hdrlen; 2392 hdr_old.bh_caplen = caplen; 2393 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, 2394 sizeof(hdr_old)); 2395 goto copy; 2396 } 2397 #endif 2398 2399 /* 2400 * Append the bpf header. Note we append the actual header size, but 2401 * move forward the length of the header plus padding. 2402 */ 2403 bzero(&hdr, sizeof(hdr)); 2404 if (do_timestamp) 2405 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype); 2406 hdr.bh_datalen = pktlen; 2407 hdr.bh_hdrlen = hdrlen; 2408 hdr.bh_caplen = caplen; 2409 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 2410 2411 /* 2412 * Copy the packet data into the store buffer and update its length. 2413 */ 2414 #ifndef BURN_BRIDGES 2415 copy: 2416 #endif 2417 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); 2418 d->bd_slen = curlen + totlen; 2419 2420 if (do_wakeup) 2421 bpf_wakeup(d); 2422 } 2423 2424 /* 2425 * Free buffers currently in use by a descriptor. 2426 * Called on close. 2427 */ 2428 static void 2429 bpf_freed(struct bpf_d *d) 2430 { 2431 2432 /* 2433 * We don't need to lock out interrupts since this descriptor has 2434 * been detached from its interface and it yet hasn't been marked 2435 * free. 2436 */ 2437 bpf_free(d); 2438 if (d->bd_rfilter != NULL) { 2439 free((caddr_t)d->bd_rfilter, M_BPF); 2440 #ifdef BPF_JITTER 2441 if (d->bd_bfilter != NULL) 2442 bpf_destroy_jit_filter(d->bd_bfilter); 2443 #endif 2444 } 2445 if (d->bd_wfilter != NULL) 2446 free((caddr_t)d->bd_wfilter, M_BPF); 2447 mtx_destroy(&d->bd_lock); 2448 } 2449 2450 /* 2451 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 2452 * fixed size of the link header (variable length headers not yet supported). 2453 */ 2454 void 2455 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2456 { 2457 2458 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2459 } 2460 2461 /* 2462 * Attach an interface to bpf. ifp is a pointer to the structure 2463 * defining the interface to be attached, dlt is the link layer type, 2464 * and hdrlen is the fixed size of the link header (variable length 2465 * headers are not yet supporrted). 2466 */ 2467 void 2468 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2469 { 2470 struct bpf_if *bp; 2471 2472 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 2473 if (bp == NULL) 2474 panic("bpfattach"); 2475 2476 LIST_INIT(&bp->bif_dlist); 2477 LIST_INIT(&bp->bif_wlist); 2478 bp->bif_ifp = ifp; 2479 bp->bif_dlt = dlt; 2480 rw_init(&bp->bif_lock, "bpf interface lock"); 2481 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 2482 *driverp = bp; 2483 2484 BPF_LOCK(); 2485 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 2486 BPF_UNLOCK(); 2487 2488 bp->bif_hdrlen = hdrlen; 2489 2490 if (bootverbose) 2491 if_printf(ifp, "bpf attached\n"); 2492 } 2493 2494 /* 2495 * Detach bpf from an interface. This involves detaching each descriptor 2496 * associated with the interface. Notify each descriptor as it's detached 2497 * so that any sleepers wake up and get ENXIO. 2498 */ 2499 void 2500 bpfdetach(struct ifnet *ifp) 2501 { 2502 struct bpf_if *bp, *bp_temp; 2503 struct bpf_d *d; 2504 int ndetached; 2505 2506 ndetached = 0; 2507 2508 BPF_LOCK(); 2509 /* Find all bpf_if struct's which reference ifp and detach them. */ 2510 LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) { 2511 if (ifp != bp->bif_ifp) 2512 continue; 2513 2514 LIST_REMOVE(bp, bif_next); 2515 /* Add to to-be-freed list */ 2516 LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next); 2517 2518 ndetached++; 2519 /* 2520 * Delay freeing bp till interface is detached 2521 * and all routes through this interface are removed. 2522 * Mark bp as detached to restrict new consumers. 2523 */ 2524 BPFIF_WLOCK(bp); 2525 bp->flags |= BPFIF_FLAG_DYING; 2526 BPFIF_WUNLOCK(bp); 2527 2528 CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p", 2529 __func__, bp->bif_dlt, bp, ifp); 2530 2531 /* Free common descriptors */ 2532 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 2533 bpf_detachd_locked(d); 2534 BPFD_LOCK(d); 2535 bpf_wakeup(d); 2536 BPFD_UNLOCK(d); 2537 } 2538 2539 /* Free writer-only descriptors */ 2540 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) { 2541 bpf_detachd_locked(d); 2542 BPFD_LOCK(d); 2543 bpf_wakeup(d); 2544 BPFD_UNLOCK(d); 2545 } 2546 } 2547 BPF_UNLOCK(); 2548 2549 #ifdef INVARIANTS 2550 if (ndetached == 0) 2551 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 2552 #endif 2553 } 2554 2555 /* 2556 * Interface departure handler. 2557 * Note departure event does not guarantee interface is going down. 2558 * Interface renaming is currently done via departure/arrival event set. 2559 * 2560 * Departure handled is called after all routes pointing to 2561 * given interface are removed and interface is in down state 2562 * restricting any packets to be sent/received. We assume it is now safe 2563 * to free data allocated by BPF. 2564 */ 2565 static void 2566 bpf_ifdetach(void *arg __unused, struct ifnet *ifp) 2567 { 2568 struct bpf_if *bp, *bp_temp; 2569 int nmatched = 0; 2570 2571 BPF_LOCK(); 2572 /* 2573 * Find matching entries in free list. 2574 * Nothing should be found if bpfdetach() was not called. 2575 */ 2576 LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) { 2577 if (ifp != bp->bif_ifp) 2578 continue; 2579 2580 CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p", 2581 __func__, bp, ifp); 2582 2583 LIST_REMOVE(bp, bif_next); 2584 2585 rw_destroy(&bp->bif_lock); 2586 free(bp, M_BPF); 2587 2588 nmatched++; 2589 } 2590 BPF_UNLOCK(); 2591 2592 /* 2593 * Note that we cannot zero other pointers to 2594 * custom DLTs possibly used by given interface. 2595 */ 2596 if (nmatched != 0) 2597 ifp->if_bpf = NULL; 2598 } 2599 2600 /* 2601 * Get a list of available data link type of the interface. 2602 */ 2603 static int 2604 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 2605 { 2606 int n, error; 2607 struct ifnet *ifp; 2608 struct bpf_if *bp; 2609 2610 BPF_LOCK_ASSERT(); 2611 2612 ifp = d->bd_bif->bif_ifp; 2613 n = 0; 2614 error = 0; 2615 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2616 if (bp->bif_ifp != ifp) 2617 continue; 2618 if (bfl->bfl_list != NULL) { 2619 if (n >= bfl->bfl_len) 2620 return (ENOMEM); 2621 error = copyout(&bp->bif_dlt, 2622 bfl->bfl_list + n, sizeof(u_int)); 2623 } 2624 n++; 2625 } 2626 bfl->bfl_len = n; 2627 return (error); 2628 } 2629 2630 /* 2631 * Set the data link type of a BPF instance. 2632 */ 2633 static int 2634 bpf_setdlt(struct bpf_d *d, u_int dlt) 2635 { 2636 int error, opromisc; 2637 struct ifnet *ifp; 2638 struct bpf_if *bp; 2639 2640 BPF_LOCK_ASSERT(); 2641 2642 if (d->bd_bif->bif_dlt == dlt) 2643 return (0); 2644 ifp = d->bd_bif->bif_ifp; 2645 2646 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2647 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 2648 break; 2649 } 2650 2651 if (bp != NULL) { 2652 opromisc = d->bd_promisc; 2653 bpf_attachd(d, bp); 2654 BPFD_LOCK(d); 2655 reset_d(d); 2656 BPFD_UNLOCK(d); 2657 if (opromisc) { 2658 error = ifpromisc(bp->bif_ifp, 1); 2659 if (error) 2660 if_printf(bp->bif_ifp, 2661 "bpf_setdlt: ifpromisc failed (%d)\n", 2662 error); 2663 else 2664 d->bd_promisc = 1; 2665 } 2666 } 2667 return (bp == NULL ? EINVAL : 0); 2668 } 2669 2670 static void 2671 bpf_drvinit(void *unused) 2672 { 2673 struct cdev *dev; 2674 2675 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2676 LIST_INIT(&bpf_iflist); 2677 LIST_INIT(&bpf_freelist); 2678 2679 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2680 /* For compatibility */ 2681 make_dev_alias(dev, "bpf0"); 2682 2683 /* Register interface departure handler */ 2684 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER( 2685 ifnet_departure_event, bpf_ifdetach, NULL, 2686 EVENTHANDLER_PRI_ANY); 2687 } 2688 2689 /* 2690 * Zero out the various packet counters associated with all of the bpf 2691 * descriptors. At some point, we will probably want to get a bit more 2692 * granular and allow the user to specify descriptors to be zeroed. 2693 */ 2694 static void 2695 bpf_zero_counters(void) 2696 { 2697 struct bpf_if *bp; 2698 struct bpf_d *bd; 2699 2700 BPF_LOCK(); 2701 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2702 BPFIF_RLOCK(bp); 2703 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2704 BPFD_LOCK(bd); 2705 bd->bd_rcount = 0; 2706 bd->bd_dcount = 0; 2707 bd->bd_fcount = 0; 2708 bd->bd_wcount = 0; 2709 bd->bd_wfcount = 0; 2710 bd->bd_zcopy = 0; 2711 BPFD_UNLOCK(bd); 2712 } 2713 BPFIF_RUNLOCK(bp); 2714 } 2715 BPF_UNLOCK(); 2716 } 2717 2718 /* 2719 * Fill filter statistics 2720 */ 2721 static void 2722 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2723 { 2724 2725 bzero(d, sizeof(*d)); 2726 BPFD_LOCK_ASSERT(bd); 2727 d->bd_structsize = sizeof(*d); 2728 /* XXX: reading should be protected by global lock */ 2729 d->bd_immediate = bd->bd_immediate; 2730 d->bd_promisc = bd->bd_promisc; 2731 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2732 d->bd_direction = bd->bd_direction; 2733 d->bd_feedback = bd->bd_feedback; 2734 d->bd_async = bd->bd_async; 2735 d->bd_rcount = bd->bd_rcount; 2736 d->bd_dcount = bd->bd_dcount; 2737 d->bd_fcount = bd->bd_fcount; 2738 d->bd_sig = bd->bd_sig; 2739 d->bd_slen = bd->bd_slen; 2740 d->bd_hlen = bd->bd_hlen; 2741 d->bd_bufsize = bd->bd_bufsize; 2742 d->bd_pid = bd->bd_pid; 2743 strlcpy(d->bd_ifname, 2744 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2745 d->bd_locked = bd->bd_locked; 2746 d->bd_wcount = bd->bd_wcount; 2747 d->bd_wdcount = bd->bd_wdcount; 2748 d->bd_wfcount = bd->bd_wfcount; 2749 d->bd_zcopy = bd->bd_zcopy; 2750 d->bd_bufmode = bd->bd_bufmode; 2751 } 2752 2753 /* 2754 * Handle `netstat -B' stats request 2755 */ 2756 static int 2757 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2758 { 2759 struct xbpf_d *xbdbuf, *xbd, zerostats; 2760 int index, error; 2761 struct bpf_if *bp; 2762 struct bpf_d *bd; 2763 2764 /* 2765 * XXX This is not technically correct. It is possible for non 2766 * privileged users to open bpf devices. It would make sense 2767 * if the users who opened the devices were able to retrieve 2768 * the statistics for them, too. 2769 */ 2770 error = priv_check(req->td, PRIV_NET_BPF); 2771 if (error) 2772 return (error); 2773 /* 2774 * Check to see if the user is requesting that the counters be 2775 * zeroed out. Explicitly check that the supplied data is zeroed, 2776 * as we aren't allowing the user to set the counters currently. 2777 */ 2778 if (req->newptr != NULL) { 2779 if (req->newlen != sizeof(zerostats)) 2780 return (EINVAL); 2781 bzero(&zerostats, sizeof(zerostats)); 2782 xbd = req->newptr; 2783 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0) 2784 return (EINVAL); 2785 bpf_zero_counters(); 2786 return (0); 2787 } 2788 if (req->oldptr == NULL) 2789 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2790 if (bpf_bpfd_cnt == 0) 2791 return (SYSCTL_OUT(req, 0, 0)); 2792 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2793 BPF_LOCK(); 2794 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2795 BPF_UNLOCK(); 2796 free(xbdbuf, M_BPF); 2797 return (ENOMEM); 2798 } 2799 index = 0; 2800 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2801 BPFIF_RLOCK(bp); 2802 /* Send writers-only first */ 2803 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { 2804 xbd = &xbdbuf[index++]; 2805 BPFD_LOCK(bd); 2806 bpfstats_fill_xbpf(xbd, bd); 2807 BPFD_UNLOCK(bd); 2808 } 2809 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2810 xbd = &xbdbuf[index++]; 2811 BPFD_LOCK(bd); 2812 bpfstats_fill_xbpf(xbd, bd); 2813 BPFD_UNLOCK(bd); 2814 } 2815 BPFIF_RUNLOCK(bp); 2816 } 2817 BPF_UNLOCK(); 2818 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2819 free(xbdbuf, M_BPF); 2820 return (error); 2821 } 2822 2823 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2824 2825 #else /* !DEV_BPF && !NETGRAPH_BPF */ 2826 /* 2827 * NOP stubs to allow bpf-using drivers to load and function. 2828 * 2829 * A 'better' implementation would allow the core bpf functionality 2830 * to be loaded at runtime. 2831 */ 2832 static struct bpf_if bp_null; 2833 2834 void 2835 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2836 { 2837 } 2838 2839 void 2840 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2841 { 2842 } 2843 2844 void 2845 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2846 { 2847 } 2848 2849 void 2850 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2851 { 2852 2853 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2854 } 2855 2856 void 2857 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2858 { 2859 2860 *driverp = &bp_null; 2861 } 2862 2863 void 2864 bpfdetach(struct ifnet *ifp) 2865 { 2866 } 2867 2868 u_int 2869 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2870 { 2871 return -1; /* "no filter" behaviour */ 2872 } 2873 2874 int 2875 bpf_validate(const struct bpf_insn *f, int len) 2876 { 2877 return 0; /* false */ 2878 } 2879 2880 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 2881