1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 * 36 * $FreeBSD$ 37 */ 38 39 #include "opt_bpf.h" 40 #include "opt_mac.h" 41 #include "opt_netgraph.h" 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/conf.h> 47 #include <sys/fcntl.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/time.h> 51 #include <sys/priv.h> 52 #include <sys/proc.h> 53 #include <sys/signalvar.h> 54 #include <sys/filio.h> 55 #include <sys/sockio.h> 56 #include <sys/ttycom.h> 57 #include <sys/uio.h> 58 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <sys/poll.h> 62 #include <sys/proc.h> 63 64 #include <sys/socket.h> 65 66 #include <net/if.h> 67 #include <net/bpf.h> 68 #ifdef BPF_JITTER 69 #include <net/bpf_jitter.h> 70 #endif 71 #include <net/bpfdesc.h> 72 73 #include <netinet/in.h> 74 #include <netinet/if_ether.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 78 #include <net80211/ieee80211_freebsd.h> 79 80 #include <security/mac/mac_framework.h> 81 82 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 83 84 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 85 86 #define PRINET 26 /* interruptible */ 87 88 #define M_SKIP_BPF M_SKIP_FIREWALL 89 90 /* 91 * bpf_iflist is a list of BPF interface structures, each corresponding to a 92 * specific DLT. The same network interface might have several BPF interface 93 * structures registered by different layers in the stack (i.e., 802.11 94 * frames, ethernet frames, etc). 95 */ 96 static LIST_HEAD(, bpf_if) bpf_iflist; 97 static struct mtx bpf_mtx; /* bpf global lock */ 98 static int bpf_bpfd_cnt; 99 100 static void bpf_allocbufs(struct bpf_d *); 101 static void bpf_attachd(struct bpf_d *, struct bpf_if *); 102 static void bpf_detachd(struct bpf_d *); 103 static void bpf_freed(struct bpf_d *); 104 static void bpf_mcopy(const void *, void *, size_t); 105 static int bpf_movein(struct uio *, int, int, struct mbuf **, 106 struct sockaddr *, int *, struct bpf_insn *); 107 static int bpf_setif(struct bpf_d *, struct ifreq *); 108 static void bpf_timed_out(void *); 109 static __inline void 110 bpf_wakeup(struct bpf_d *); 111 static void catchpacket(struct bpf_d *, u_char *, u_int, 112 u_int, void (*)(const void *, void *, size_t), 113 struct timeval *); 114 static void reset_d(struct bpf_d *); 115 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 116 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 117 static int bpf_setdlt(struct bpf_d *, u_int); 118 static void filt_bpfdetach(struct knote *); 119 static int filt_bpfread(struct knote *, long); 120 static void bpf_drvinit(void *); 121 static void bpf_clone(void *, struct ucred *, char *, int, struct cdev **); 122 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 123 124 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 125 static int bpf_bufsize = 4096; 126 SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW, 127 &bpf_bufsize, 0, "Default bpf buffer size"); 128 static int bpf_maxbufsize = BPF_MAXBUFSIZE; 129 SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW, 130 &bpf_maxbufsize, 0, "Maximum bpf buffer size"); 131 static int bpf_maxinsns = BPF_MAXINSNS; 132 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 133 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 134 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW, 135 bpf_stats_sysctl, "bpf statistics portal"); 136 137 static d_open_t bpfopen; 138 static d_close_t bpfclose; 139 static d_read_t bpfread; 140 static d_write_t bpfwrite; 141 static d_ioctl_t bpfioctl; 142 static d_poll_t bpfpoll; 143 static d_kqfilter_t bpfkqfilter; 144 145 static struct cdevsw bpf_cdevsw = { 146 .d_version = D_VERSION, 147 .d_open = bpfopen, 148 .d_close = bpfclose, 149 .d_read = bpfread, 150 .d_write = bpfwrite, 151 .d_ioctl = bpfioctl, 152 .d_poll = bpfpoll, 153 .d_name = "bpf", 154 .d_kqfilter = bpfkqfilter, 155 }; 156 157 static struct filterops bpfread_filtops = 158 { 1, NULL, filt_bpfdetach, filt_bpfread }; 159 160 static int 161 bpf_movein(struct uio *uio, int linktype, int mtu, struct mbuf **mp, 162 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 163 { 164 const struct ieee80211_bpf_params *p; 165 struct mbuf *m; 166 int error; 167 int len; 168 int hlen; 169 int slen; 170 171 /* 172 * Build a sockaddr based on the data link layer type. 173 * We do this at this level because the ethernet header 174 * is copied directly into the data field of the sockaddr. 175 * In the case of SLIP, there is no header and the packet 176 * is forwarded as is. 177 * Also, we are careful to leave room at the front of the mbuf 178 * for the link level header. 179 */ 180 switch (linktype) { 181 182 case DLT_SLIP: 183 sockp->sa_family = AF_INET; 184 hlen = 0; 185 break; 186 187 case DLT_EN10MB: 188 sockp->sa_family = AF_UNSPEC; 189 /* XXX Would MAXLINKHDR be better? */ 190 hlen = ETHER_HDR_LEN; 191 break; 192 193 case DLT_FDDI: 194 sockp->sa_family = AF_IMPLINK; 195 hlen = 0; 196 break; 197 198 case DLT_RAW: 199 sockp->sa_family = AF_UNSPEC; 200 hlen = 0; 201 break; 202 203 case DLT_NULL: 204 /* 205 * null interface types require a 4 byte pseudo header which 206 * corresponds to the address family of the packet. 207 */ 208 sockp->sa_family = AF_UNSPEC; 209 hlen = 4; 210 break; 211 212 case DLT_ATM_RFC1483: 213 /* 214 * en atm driver requires 4-byte atm pseudo header. 215 * though it isn't standard, vpi:vci needs to be 216 * specified anyway. 217 */ 218 sockp->sa_family = AF_UNSPEC; 219 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 220 break; 221 222 case DLT_PPP: 223 sockp->sa_family = AF_UNSPEC; 224 hlen = 4; /* This should match PPP_HDRLEN */ 225 break; 226 227 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 228 sockp->sa_family = AF_IEEE80211; 229 hlen = 0; 230 break; 231 232 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 233 sockp->sa_family = AF_IEEE80211; 234 sockp->sa_len = 12; /* XXX != 0 */ 235 hlen = sizeof(struct ieee80211_bpf_params); 236 break; 237 238 default: 239 return (EIO); 240 } 241 242 len = uio->uio_resid; 243 244 if (len - hlen > mtu) 245 return (EMSGSIZE); 246 247 if ((unsigned)len > MCLBYTES) 248 return (EIO); 249 250 if (len > MHLEN) { 251 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 252 } else { 253 MGETHDR(m, M_TRYWAIT, MT_DATA); 254 } 255 if (m == NULL) 256 return (ENOBUFS); 257 m->m_pkthdr.len = m->m_len = len; 258 m->m_pkthdr.rcvif = NULL; 259 *mp = m; 260 261 if (m->m_len < hlen) { 262 error = EPERM; 263 goto bad; 264 } 265 266 error = uiomove(mtod(m, u_char *), len, uio); 267 if (error) 268 goto bad; 269 270 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 271 if (slen == 0) { 272 error = EPERM; 273 goto bad; 274 } 275 276 /* 277 * Make room for link header, and copy it to sockaddr 278 */ 279 if (hlen != 0) { 280 if (sockp->sa_family == AF_IEEE80211) { 281 /* 282 * Collect true length from the parameter header 283 * NB: sockp is known to be zero'd so if we do a 284 * short copy unspecified parameters will be 285 * zero. 286 * NB: packet may not be aligned after stripping 287 * bpf params 288 * XXX check ibp_vers 289 */ 290 p = mtod(m, const struct ieee80211_bpf_params *); 291 hlen = p->ibp_len; 292 if (hlen > sizeof(sockp->sa_data)) { 293 error = EINVAL; 294 goto bad; 295 } 296 } 297 bcopy(m->m_data, sockp->sa_data, hlen); 298 } 299 *hdrlen = hlen; 300 301 return (0); 302 bad: 303 m_freem(m); 304 return (error); 305 } 306 307 /* 308 * Attach file to the bpf interface, i.e. make d listen on bp. 309 */ 310 static void 311 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 312 { 313 /* 314 * Point d at bp, and add d to the interface's list of listeners. 315 * Finally, point the driver's bpf cookie at the interface so 316 * it will divert packets to bpf. 317 */ 318 BPFIF_LOCK(bp); 319 d->bd_bif = bp; 320 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 321 322 bpf_bpfd_cnt++; 323 BPFIF_UNLOCK(bp); 324 } 325 326 /* 327 * Detach a file from its interface. 328 */ 329 static void 330 bpf_detachd(struct bpf_d *d) 331 { 332 int error; 333 struct bpf_if *bp; 334 struct ifnet *ifp; 335 336 bp = d->bd_bif; 337 BPFIF_LOCK(bp); 338 BPFD_LOCK(d); 339 ifp = d->bd_bif->bif_ifp; 340 341 /* 342 * Remove d from the interface's descriptor list. 343 */ 344 LIST_REMOVE(d, bd_next); 345 346 bpf_bpfd_cnt--; 347 d->bd_bif = NULL; 348 BPFD_UNLOCK(d); 349 BPFIF_UNLOCK(bp); 350 351 /* 352 * Check if this descriptor had requested promiscuous mode. 353 * If so, turn it off. 354 */ 355 if (d->bd_promisc) { 356 d->bd_promisc = 0; 357 error = ifpromisc(ifp, 0); 358 if (error != 0 && error != ENXIO) { 359 /* 360 * ENXIO can happen if a pccard is unplugged 361 * Something is really wrong if we were able to put 362 * the driver into promiscuous mode, but can't 363 * take it out. 364 */ 365 if_printf(bp->bif_ifp, 366 "bpf_detach: ifpromisc failed (%d)\n", error); 367 } 368 } 369 } 370 371 /* 372 * Open ethernet device. Returns ENXIO for illegal minor device number, 373 * EBUSY if file is open by another process. 374 */ 375 /* ARGSUSED */ 376 static int 377 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 378 { 379 struct bpf_d *d; 380 381 mtx_lock(&bpf_mtx); 382 d = dev->si_drv1; 383 /* 384 * Each minor can be opened by only one process. If the requested 385 * minor is in use, return EBUSY. 386 */ 387 if (d != NULL) { 388 mtx_unlock(&bpf_mtx); 389 return (EBUSY); 390 } 391 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */ 392 mtx_unlock(&bpf_mtx); 393 394 if ((dev->si_flags & SI_NAMED) == 0) 395 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600, 396 "bpf%d", dev2unit(dev)); 397 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 398 dev->si_drv1 = d; 399 d->bd_bufsize = bpf_bufsize; 400 d->bd_sig = SIGIO; 401 d->bd_direction = BPF_D_INOUT; 402 d->bd_pid = td->td_proc->p_pid; 403 #ifdef MAC 404 mac_init_bpfdesc(d); 405 mac_create_bpfdesc(td->td_ucred, d); 406 #endif 407 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 408 callout_init(&d->bd_callout, NET_CALLOUT_MPSAFE); 409 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL); 410 411 return (0); 412 } 413 414 /* 415 * Close the descriptor by detaching it from its interface, 416 * deallocating its buffers, and marking it free. 417 */ 418 /* ARGSUSED */ 419 static int 420 bpfclose(struct cdev *dev, int flags, int fmt, struct thread *td) 421 { 422 struct bpf_d *d = dev->si_drv1; 423 424 BPFD_LOCK(d); 425 if (d->bd_state == BPF_WAITING) 426 callout_stop(&d->bd_callout); 427 d->bd_state = BPF_IDLE; 428 BPFD_UNLOCK(d); 429 funsetown(&d->bd_sigio); 430 mtx_lock(&bpf_mtx); 431 if (d->bd_bif) 432 bpf_detachd(d); 433 mtx_unlock(&bpf_mtx); 434 selwakeuppri(&d->bd_sel, PRINET); 435 #ifdef MAC 436 mac_destroy_bpfdesc(d); 437 #endif /* MAC */ 438 knlist_destroy(&d->bd_sel.si_note); 439 bpf_freed(d); 440 dev->si_drv1 = NULL; 441 free(d, M_BPF); 442 443 return (0); 444 } 445 446 447 /* 448 * Rotate the packet buffers in descriptor d. Move the store buffer 449 * into the hold slot, and the free buffer into the store slot. 450 * Zero the length of the new store buffer. 451 */ 452 #define ROTATE_BUFFERS(d) \ 453 (d)->bd_hbuf = (d)->bd_sbuf; \ 454 (d)->bd_hlen = (d)->bd_slen; \ 455 (d)->bd_sbuf = (d)->bd_fbuf; \ 456 (d)->bd_slen = 0; \ 457 (d)->bd_fbuf = NULL; 458 /* 459 * bpfread - read next chunk of packets from buffers 460 */ 461 static int 462 bpfread(struct cdev *dev, struct uio *uio, int ioflag) 463 { 464 struct bpf_d *d = dev->si_drv1; 465 int timed_out; 466 int error; 467 468 /* 469 * Restrict application to use a buffer the same size as 470 * as kernel buffers. 471 */ 472 if (uio->uio_resid != d->bd_bufsize) 473 return (EINVAL); 474 475 BPFD_LOCK(d); 476 if (d->bd_state == BPF_WAITING) 477 callout_stop(&d->bd_callout); 478 timed_out = (d->bd_state == BPF_TIMED_OUT); 479 d->bd_state = BPF_IDLE; 480 /* 481 * If the hold buffer is empty, then do a timed sleep, which 482 * ends when the timeout expires or when enough packets 483 * have arrived to fill the store buffer. 484 */ 485 while (d->bd_hbuf == NULL) { 486 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 487 /* 488 * A packet(s) either arrived since the previous 489 * read or arrived while we were asleep. 490 * Rotate the buffers and return what's here. 491 */ 492 ROTATE_BUFFERS(d); 493 break; 494 } 495 496 /* 497 * No data is available, check to see if the bpf device 498 * is still pointed at a real interface. If not, return 499 * ENXIO so that the userland process knows to rebind 500 * it before using it again. 501 */ 502 if (d->bd_bif == NULL) { 503 BPFD_UNLOCK(d); 504 return (ENXIO); 505 } 506 507 if (ioflag & O_NONBLOCK) { 508 BPFD_UNLOCK(d); 509 return (EWOULDBLOCK); 510 } 511 error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 512 "bpf", d->bd_rtout); 513 if (error == EINTR || error == ERESTART) { 514 BPFD_UNLOCK(d); 515 return (error); 516 } 517 if (error == EWOULDBLOCK) { 518 /* 519 * On a timeout, return what's in the buffer, 520 * which may be nothing. If there is something 521 * in the store buffer, we can rotate the buffers. 522 */ 523 if (d->bd_hbuf) 524 /* 525 * We filled up the buffer in between 526 * getting the timeout and arriving 527 * here, so we don't need to rotate. 528 */ 529 break; 530 531 if (d->bd_slen == 0) { 532 BPFD_UNLOCK(d); 533 return (0); 534 } 535 ROTATE_BUFFERS(d); 536 break; 537 } 538 } 539 /* 540 * At this point, we know we have something in the hold slot. 541 */ 542 BPFD_UNLOCK(d); 543 544 /* 545 * Move data from hold buffer into user space. 546 * We know the entire buffer is transferred since 547 * we checked above that the read buffer is bpf_bufsize bytes. 548 */ 549 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 550 551 BPFD_LOCK(d); 552 d->bd_fbuf = d->bd_hbuf; 553 d->bd_hbuf = NULL; 554 d->bd_hlen = 0; 555 BPFD_UNLOCK(d); 556 557 return (error); 558 } 559 560 561 /* 562 * If there are processes sleeping on this descriptor, wake them up. 563 */ 564 static __inline void 565 bpf_wakeup(struct bpf_d *d) 566 { 567 568 BPFD_LOCK_ASSERT(d); 569 if (d->bd_state == BPF_WAITING) { 570 callout_stop(&d->bd_callout); 571 d->bd_state = BPF_IDLE; 572 } 573 wakeup(d); 574 if (d->bd_async && d->bd_sig && d->bd_sigio) 575 pgsigio(&d->bd_sigio, d->bd_sig, 0); 576 577 selwakeuppri(&d->bd_sel, PRINET); 578 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 579 } 580 581 static void 582 bpf_timed_out(void *arg) 583 { 584 struct bpf_d *d = (struct bpf_d *)arg; 585 586 BPFD_LOCK(d); 587 if (d->bd_state == BPF_WAITING) { 588 d->bd_state = BPF_TIMED_OUT; 589 if (d->bd_slen != 0) 590 bpf_wakeup(d); 591 } 592 BPFD_UNLOCK(d); 593 } 594 595 static int 596 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 597 { 598 struct bpf_d *d = dev->si_drv1; 599 struct ifnet *ifp; 600 struct mbuf *m, *mc; 601 struct sockaddr dst; 602 int error, hlen; 603 604 if (d->bd_bif == NULL) 605 return (ENXIO); 606 607 ifp = d->bd_bif->bif_ifp; 608 609 if ((ifp->if_flags & IFF_UP) == 0) 610 return (ENETDOWN); 611 612 if (uio->uio_resid == 0) 613 return (0); 614 615 bzero(&dst, sizeof(dst)); 616 m = NULL; 617 hlen = 0; 618 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, 619 &m, &dst, &hlen, d->bd_wfilter); 620 if (error) 621 return (error); 622 623 if (d->bd_hdrcmplt) 624 dst.sa_family = pseudo_AF_HDRCMPLT; 625 626 if (d->bd_feedback) { 627 mc = m_dup(m, M_DONTWAIT); 628 if (mc != NULL) 629 mc->m_pkthdr.rcvif = ifp; 630 /* XXX Do not return the same packet twice. */ 631 if (d->bd_direction == BPF_D_INOUT) 632 m->m_flags |= M_SKIP_BPF; 633 } else 634 mc = NULL; 635 636 m->m_pkthdr.len -= hlen; 637 m->m_len -= hlen; 638 m->m_data += hlen; /* XXX */ 639 640 #ifdef MAC 641 BPFD_LOCK(d); 642 mac_create_mbuf_from_bpfdesc(d, m); 643 if (mc != NULL) 644 mac_create_mbuf_from_bpfdesc(d, mc); 645 BPFD_UNLOCK(d); 646 #endif 647 648 NET_LOCK_GIANT(); 649 error = (*ifp->if_output)(ifp, m, &dst, NULL); 650 NET_UNLOCK_GIANT(); 651 652 if (mc != NULL) { 653 if (error == 0) { 654 NET_LOCK_GIANT(); 655 (*ifp->if_input)(ifp, mc); 656 NET_UNLOCK_GIANT(); 657 } else 658 m_freem(mc); 659 } 660 661 return (error); 662 } 663 664 /* 665 * Reset a descriptor by flushing its packet buffer and clearing the 666 * receive and drop counts. 667 */ 668 static void 669 reset_d(struct bpf_d *d) 670 { 671 672 mtx_assert(&d->bd_mtx, MA_OWNED); 673 if (d->bd_hbuf) { 674 /* Free the hold buffer. */ 675 d->bd_fbuf = d->bd_hbuf; 676 d->bd_hbuf = NULL; 677 } 678 d->bd_slen = 0; 679 d->bd_hlen = 0; 680 d->bd_rcount = 0; 681 d->bd_dcount = 0; 682 d->bd_fcount = 0; 683 } 684 685 /* 686 * FIONREAD Check for read packet available. 687 * SIOCGIFADDR Get interface address - convenient hook to driver. 688 * BIOCGBLEN Get buffer len [for read()]. 689 * BIOCSETF Set ethernet read filter. 690 * BIOCSETWF Set ethernet write filter. 691 * BIOCFLUSH Flush read packet buffer. 692 * BIOCPROMISC Put interface into promiscuous mode. 693 * BIOCGDLT Get link layer type. 694 * BIOCGETIF Get interface name. 695 * BIOCSETIF Set interface. 696 * BIOCSRTIMEOUT Set read timeout. 697 * BIOCGRTIMEOUT Get read timeout. 698 * BIOCGSTATS Get packet stats. 699 * BIOCIMMEDIATE Set immediate mode. 700 * BIOCVERSION Get filter language version. 701 * BIOCGHDRCMPLT Get "header already complete" flag 702 * BIOCSHDRCMPLT Set "header already complete" flag 703 * BIOCGDIRECTION Get packet direction flag 704 * BIOCSDIRECTION Set packet direction flag 705 * BIOCLOCK Set "locked" flag 706 * BIOCFEEDBACK Set packet feedback mode. 707 */ 708 /* ARGSUSED */ 709 static int 710 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 711 struct thread *td) 712 { 713 struct bpf_d *d = dev->si_drv1; 714 int error = 0; 715 716 /* 717 * Refresh PID associated with this descriptor. 718 */ 719 BPFD_LOCK(d); 720 d->bd_pid = td->td_proc->p_pid; 721 if (d->bd_state == BPF_WAITING) 722 callout_stop(&d->bd_callout); 723 d->bd_state = BPF_IDLE; 724 BPFD_UNLOCK(d); 725 726 if (d->bd_locked == 1) { 727 switch (cmd) { 728 case BIOCGBLEN: 729 case BIOCFLUSH: 730 case BIOCGDLT: 731 case BIOCGDLTLIST: 732 case BIOCGETIF: 733 case BIOCGRTIMEOUT: 734 case BIOCGSTATS: 735 case BIOCVERSION: 736 case BIOCGRSIG: 737 case BIOCGHDRCMPLT: 738 case BIOCFEEDBACK: 739 case FIONREAD: 740 case BIOCLOCK: 741 case BIOCSRTIMEOUT: 742 case BIOCIMMEDIATE: 743 case TIOCGPGRP: 744 break; 745 default: 746 return (EPERM); 747 } 748 } 749 switch (cmd) { 750 751 default: 752 error = EINVAL; 753 break; 754 755 /* 756 * Check for read packet available. 757 */ 758 case FIONREAD: 759 { 760 int n; 761 762 BPFD_LOCK(d); 763 n = d->bd_slen; 764 if (d->bd_hbuf) 765 n += d->bd_hlen; 766 BPFD_UNLOCK(d); 767 768 *(int *)addr = n; 769 break; 770 } 771 772 case SIOCGIFADDR: 773 { 774 struct ifnet *ifp; 775 776 if (d->bd_bif == NULL) 777 error = EINVAL; 778 else { 779 NET_LOCK_GIANT(); 780 ifp = d->bd_bif->bif_ifp; 781 error = (*ifp->if_ioctl)(ifp, cmd, addr); 782 NET_UNLOCK_GIANT(); 783 } 784 break; 785 } 786 787 /* 788 * Get buffer len [for read()]. 789 */ 790 case BIOCGBLEN: 791 *(u_int *)addr = d->bd_bufsize; 792 break; 793 794 /* 795 * Set buffer length. 796 */ 797 case BIOCSBLEN: 798 if (d->bd_bif != NULL) 799 error = EINVAL; 800 else { 801 u_int size = *(u_int *)addr; 802 803 if (size > bpf_maxbufsize) 804 *(u_int *)addr = size = bpf_maxbufsize; 805 else if (size < BPF_MINBUFSIZE) 806 *(u_int *)addr = size = BPF_MINBUFSIZE; 807 d->bd_bufsize = size; 808 } 809 break; 810 811 /* 812 * Set link layer read filter. 813 */ 814 case BIOCSETF: 815 case BIOCSETWF: 816 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 817 break; 818 819 /* 820 * Flush read packet buffer. 821 */ 822 case BIOCFLUSH: 823 BPFD_LOCK(d); 824 reset_d(d); 825 BPFD_UNLOCK(d); 826 break; 827 828 /* 829 * Put interface into promiscuous mode. 830 */ 831 case BIOCPROMISC: 832 if (d->bd_bif == NULL) { 833 /* 834 * No interface attached yet. 835 */ 836 error = EINVAL; 837 break; 838 } 839 if (d->bd_promisc == 0) { 840 NET_LOCK_GIANT(); 841 error = ifpromisc(d->bd_bif->bif_ifp, 1); 842 NET_UNLOCK_GIANT(); 843 if (error == 0) 844 d->bd_promisc = 1; 845 } 846 break; 847 848 /* 849 * Get current data link type. 850 */ 851 case BIOCGDLT: 852 if (d->bd_bif == NULL) 853 error = EINVAL; 854 else 855 *(u_int *)addr = d->bd_bif->bif_dlt; 856 break; 857 858 /* 859 * Get a list of supported data link types. 860 */ 861 case BIOCGDLTLIST: 862 if (d->bd_bif == NULL) 863 error = EINVAL; 864 else 865 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 866 break; 867 868 /* 869 * Set data link type. 870 */ 871 case BIOCSDLT: 872 if (d->bd_bif == NULL) 873 error = EINVAL; 874 else 875 error = bpf_setdlt(d, *(u_int *)addr); 876 break; 877 878 /* 879 * Get interface name. 880 */ 881 case BIOCGETIF: 882 if (d->bd_bif == NULL) 883 error = EINVAL; 884 else { 885 struct ifnet *const ifp = d->bd_bif->bif_ifp; 886 struct ifreq *const ifr = (struct ifreq *)addr; 887 888 strlcpy(ifr->ifr_name, ifp->if_xname, 889 sizeof(ifr->ifr_name)); 890 } 891 break; 892 893 /* 894 * Set interface. 895 */ 896 case BIOCSETIF: 897 error = bpf_setif(d, (struct ifreq *)addr); 898 break; 899 900 /* 901 * Set read timeout. 902 */ 903 case BIOCSRTIMEOUT: 904 { 905 struct timeval *tv = (struct timeval *)addr; 906 907 /* 908 * Subtract 1 tick from tvtohz() since this isn't 909 * a one-shot timer. 910 */ 911 if ((error = itimerfix(tv)) == 0) 912 d->bd_rtout = tvtohz(tv) - 1; 913 break; 914 } 915 916 /* 917 * Get read timeout. 918 */ 919 case BIOCGRTIMEOUT: 920 { 921 struct timeval *tv = (struct timeval *)addr; 922 923 tv->tv_sec = d->bd_rtout / hz; 924 tv->tv_usec = (d->bd_rtout % hz) * tick; 925 break; 926 } 927 928 /* 929 * Get packet stats. 930 */ 931 case BIOCGSTATS: 932 { 933 struct bpf_stat *bs = (struct bpf_stat *)addr; 934 935 bs->bs_recv = d->bd_rcount; 936 bs->bs_drop = d->bd_dcount; 937 break; 938 } 939 940 /* 941 * Set immediate mode. 942 */ 943 case BIOCIMMEDIATE: 944 d->bd_immediate = *(u_int *)addr; 945 break; 946 947 case BIOCVERSION: 948 { 949 struct bpf_version *bv = (struct bpf_version *)addr; 950 951 bv->bv_major = BPF_MAJOR_VERSION; 952 bv->bv_minor = BPF_MINOR_VERSION; 953 break; 954 } 955 956 /* 957 * Get "header already complete" flag 958 */ 959 case BIOCGHDRCMPLT: 960 *(u_int *)addr = d->bd_hdrcmplt; 961 break; 962 963 /* 964 * Set "header already complete" flag 965 */ 966 case BIOCSHDRCMPLT: 967 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 968 break; 969 970 /* 971 * Get packet direction flag 972 */ 973 case BIOCGDIRECTION: 974 *(u_int *)addr = d->bd_direction; 975 break; 976 977 /* 978 * Set packet direction flag 979 */ 980 case BIOCSDIRECTION: 981 { 982 u_int direction; 983 984 direction = *(u_int *)addr; 985 switch (direction) { 986 case BPF_D_IN: 987 case BPF_D_INOUT: 988 case BPF_D_OUT: 989 d->bd_direction = direction; 990 break; 991 default: 992 error = EINVAL; 993 } 994 } 995 break; 996 997 case BIOCFEEDBACK: 998 d->bd_feedback = *(u_int *)addr; 999 break; 1000 1001 case BIOCLOCK: 1002 d->bd_locked = 1; 1003 break; 1004 1005 case FIONBIO: /* Non-blocking I/O */ 1006 break; 1007 1008 case FIOASYNC: /* Send signal on receive packets */ 1009 d->bd_async = *(int *)addr; 1010 break; 1011 1012 case FIOSETOWN: 1013 error = fsetown(*(int *)addr, &d->bd_sigio); 1014 break; 1015 1016 case FIOGETOWN: 1017 *(int *)addr = fgetown(&d->bd_sigio); 1018 break; 1019 1020 /* This is deprecated, FIOSETOWN should be used instead. */ 1021 case TIOCSPGRP: 1022 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1023 break; 1024 1025 /* This is deprecated, FIOGETOWN should be used instead. */ 1026 case TIOCGPGRP: 1027 *(int *)addr = -fgetown(&d->bd_sigio); 1028 break; 1029 1030 case BIOCSRSIG: /* Set receive signal */ 1031 { 1032 u_int sig; 1033 1034 sig = *(u_int *)addr; 1035 1036 if (sig >= NSIG) 1037 error = EINVAL; 1038 else 1039 d->bd_sig = sig; 1040 break; 1041 } 1042 case BIOCGRSIG: 1043 *(u_int *)addr = d->bd_sig; 1044 break; 1045 } 1046 return (error); 1047 } 1048 1049 /* 1050 * Set d's packet filter program to fp. If this file already has a filter, 1051 * free it and replace it. Returns EINVAL for bogus requests. 1052 */ 1053 static int 1054 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1055 { 1056 struct bpf_insn *fcode, *old; 1057 u_int wfilter, flen, size; 1058 #ifdef BPF_JITTER 1059 bpf_jit_filter *ofunc; 1060 #endif 1061 1062 if (cmd == BIOCSETWF) { 1063 old = d->bd_wfilter; 1064 wfilter = 1; 1065 #ifdef BPF_JITTER 1066 ofunc = NULL; 1067 #endif 1068 } else { 1069 wfilter = 0; 1070 old = d->bd_rfilter; 1071 #ifdef BPF_JITTER 1072 ofunc = d->bd_bfilter; 1073 #endif 1074 } 1075 if (fp->bf_insns == NULL) { 1076 if (fp->bf_len != 0) 1077 return (EINVAL); 1078 BPFD_LOCK(d); 1079 if (wfilter) 1080 d->bd_wfilter = NULL; 1081 else { 1082 d->bd_rfilter = NULL; 1083 #ifdef BPF_JITTER 1084 d->bd_bfilter = NULL; 1085 #endif 1086 } 1087 reset_d(d); 1088 BPFD_UNLOCK(d); 1089 if (old != NULL) 1090 free((caddr_t)old, M_BPF); 1091 #ifdef BPF_JITTER 1092 if (ofunc != NULL) 1093 bpf_destroy_jit_filter(ofunc); 1094 #endif 1095 return (0); 1096 } 1097 flen = fp->bf_len; 1098 if (flen > bpf_maxinsns) 1099 return (EINVAL); 1100 1101 size = flen * sizeof(*fp->bf_insns); 1102 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1103 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1104 bpf_validate(fcode, (int)flen)) { 1105 BPFD_LOCK(d); 1106 if (wfilter) 1107 d->bd_wfilter = fcode; 1108 else { 1109 d->bd_rfilter = fcode; 1110 #ifdef BPF_JITTER 1111 d->bd_bfilter = bpf_jitter(fcode, flen); 1112 #endif 1113 } 1114 reset_d(d); 1115 BPFD_UNLOCK(d); 1116 if (old != NULL) 1117 free((caddr_t)old, M_BPF); 1118 #ifdef BPF_JITTER 1119 if (ofunc != NULL) 1120 bpf_destroy_jit_filter(ofunc); 1121 #endif 1122 1123 return (0); 1124 } 1125 free((caddr_t)fcode, M_BPF); 1126 return (EINVAL); 1127 } 1128 1129 /* 1130 * Detach a file from its current interface (if attached at all) and attach 1131 * to the interface indicated by the name stored in ifr. 1132 * Return an errno or 0. 1133 */ 1134 static int 1135 bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1136 { 1137 struct bpf_if *bp; 1138 struct ifnet *theywant; 1139 1140 theywant = ifunit(ifr->ifr_name); 1141 if (theywant == NULL || theywant->if_bpf == NULL) 1142 return (ENXIO); 1143 1144 bp = theywant->if_bpf; 1145 /* 1146 * Allocate the packet buffers if we need to. 1147 * If we're already attached to requested interface, 1148 * just flush the buffer. 1149 */ 1150 if (d->bd_sbuf == NULL) 1151 bpf_allocbufs(d); 1152 if (bp != d->bd_bif) { 1153 if (d->bd_bif) 1154 /* 1155 * Detach if attached to something else. 1156 */ 1157 bpf_detachd(d); 1158 1159 bpf_attachd(d, bp); 1160 } 1161 BPFD_LOCK(d); 1162 reset_d(d); 1163 BPFD_UNLOCK(d); 1164 return (0); 1165 } 1166 1167 /* 1168 * Support for select() and poll() system calls 1169 * 1170 * Return true iff the specific operation will not block indefinitely. 1171 * Otherwise, return false but make a note that a selwakeup() must be done. 1172 */ 1173 static int 1174 bpfpoll(struct cdev *dev, int events, struct thread *td) 1175 { 1176 struct bpf_d *d; 1177 int revents; 1178 1179 d = dev->si_drv1; 1180 if (d->bd_bif == NULL) 1181 return (ENXIO); 1182 1183 /* 1184 * Refresh PID associated with this descriptor. 1185 */ 1186 revents = events & (POLLOUT | POLLWRNORM); 1187 BPFD_LOCK(d); 1188 d->bd_pid = td->td_proc->p_pid; 1189 if (events & (POLLIN | POLLRDNORM)) { 1190 if (bpf_ready(d)) 1191 revents |= events & (POLLIN | POLLRDNORM); 1192 else { 1193 selrecord(td, &d->bd_sel); 1194 /* Start the read timeout if necessary. */ 1195 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1196 callout_reset(&d->bd_callout, d->bd_rtout, 1197 bpf_timed_out, d); 1198 d->bd_state = BPF_WAITING; 1199 } 1200 } 1201 } 1202 BPFD_UNLOCK(d); 1203 return (revents); 1204 } 1205 1206 /* 1207 * Support for kevent() system call. Register EVFILT_READ filters and 1208 * reject all others. 1209 */ 1210 int 1211 bpfkqfilter(struct cdev *dev, struct knote *kn) 1212 { 1213 struct bpf_d *d = (struct bpf_d *)dev->si_drv1; 1214 1215 if (kn->kn_filter != EVFILT_READ) 1216 return (1); 1217 1218 /* 1219 * Refresh PID associated with this descriptor. 1220 */ 1221 BPFD_LOCK(d); 1222 d->bd_pid = curthread->td_proc->p_pid; 1223 kn->kn_fop = &bpfread_filtops; 1224 kn->kn_hook = d; 1225 knlist_add(&d->bd_sel.si_note, kn, 1); 1226 BPFD_UNLOCK(d); 1227 1228 return (0); 1229 } 1230 1231 static void 1232 filt_bpfdetach(struct knote *kn) 1233 { 1234 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1235 1236 knlist_remove(&d->bd_sel.si_note, kn, 0); 1237 } 1238 1239 static int 1240 filt_bpfread(struct knote *kn, long hint) 1241 { 1242 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1243 int ready; 1244 1245 BPFD_LOCK_ASSERT(d); 1246 ready = bpf_ready(d); 1247 if (ready) { 1248 kn->kn_data = d->bd_slen; 1249 if (d->bd_hbuf) 1250 kn->kn_data += d->bd_hlen; 1251 } 1252 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1253 callout_reset(&d->bd_callout, d->bd_rtout, 1254 bpf_timed_out, d); 1255 d->bd_state = BPF_WAITING; 1256 } 1257 1258 return (ready); 1259 } 1260 1261 /* 1262 * Incoming linkage from device drivers. Process the packet pkt, of length 1263 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1264 * by each process' filter, and if accepted, stashed into the corresponding 1265 * buffer. 1266 */ 1267 void 1268 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1269 { 1270 struct bpf_d *d; 1271 u_int slen; 1272 int gottime; 1273 struct timeval tv; 1274 1275 gottime = 0; 1276 BPFIF_LOCK(bp); 1277 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1278 BPFD_LOCK(d); 1279 ++d->bd_rcount; 1280 #ifdef BPF_JITTER 1281 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL) 1282 slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen); 1283 else 1284 #endif 1285 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1286 if (slen != 0) { 1287 d->bd_fcount++; 1288 if (!gottime) { 1289 microtime(&tv); 1290 gottime = 1; 1291 } 1292 #ifdef MAC 1293 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1294 #endif 1295 catchpacket(d, pkt, pktlen, slen, bcopy, &tv); 1296 } 1297 BPFD_UNLOCK(d); 1298 } 1299 BPFIF_UNLOCK(bp); 1300 } 1301 1302 /* 1303 * Copy data from an mbuf chain into a buffer. This code is derived 1304 * from m_copydata in sys/uipc_mbuf.c. 1305 */ 1306 static void 1307 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1308 { 1309 const struct mbuf *m; 1310 u_int count; 1311 u_char *dst; 1312 1313 m = src_arg; 1314 dst = dst_arg; 1315 while (len > 0) { 1316 if (m == NULL) 1317 panic("bpf_mcopy"); 1318 count = min(m->m_len, len); 1319 bcopy(mtod(m, void *), dst, count); 1320 m = m->m_next; 1321 dst += count; 1322 len -= count; 1323 } 1324 } 1325 1326 #define BPF_CHECK_DIRECTION(d, m) \ 1327 if (((d)->bd_direction == BPF_D_IN && (m)->m_pkthdr.rcvif == NULL) || \ 1328 ((d)->bd_direction == BPF_D_OUT && (m)->m_pkthdr.rcvif != NULL)) 1329 1330 /* 1331 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1332 */ 1333 void 1334 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1335 { 1336 struct bpf_d *d; 1337 u_int pktlen, slen; 1338 int gottime; 1339 struct timeval tv; 1340 1341 if (m->m_flags & M_SKIP_BPF) { 1342 m->m_flags &= ~M_SKIP_BPF; 1343 return; 1344 } 1345 1346 gottime = 0; 1347 1348 pktlen = m_length(m, NULL); 1349 1350 BPFIF_LOCK(bp); 1351 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1352 BPF_CHECK_DIRECTION(d, m) 1353 continue; 1354 BPFD_LOCK(d); 1355 ++d->bd_rcount; 1356 #ifdef BPF_JITTER 1357 /* XXX We cannot handle multiple mbufs. */ 1358 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL && 1359 m->m_next == NULL) 1360 slen = (*(d->bd_bfilter->func))(mtod(m, u_char *), 1361 pktlen, pktlen); 1362 else 1363 #endif 1364 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1365 if (slen != 0) { 1366 d->bd_fcount++; 1367 if (!gottime) { 1368 microtime(&tv); 1369 gottime = 1; 1370 } 1371 #ifdef MAC 1372 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1373 #endif 1374 catchpacket(d, (u_char *)m, pktlen, slen, 1375 bpf_mcopy, &tv); 1376 } 1377 BPFD_UNLOCK(d); 1378 } 1379 BPFIF_UNLOCK(bp); 1380 } 1381 1382 /* 1383 * Incoming linkage from device drivers, when packet is in 1384 * an mbuf chain and to be prepended by a contiguous header. 1385 */ 1386 void 1387 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 1388 { 1389 struct mbuf mb; 1390 struct bpf_d *d; 1391 u_int pktlen, slen; 1392 int gottime; 1393 struct timeval tv; 1394 1395 if (m->m_flags & M_SKIP_BPF) { 1396 m->m_flags &= ~M_SKIP_BPF; 1397 return; 1398 } 1399 1400 gottime = 0; 1401 1402 pktlen = m_length(m, NULL); 1403 /* 1404 * Craft on-stack mbuf suitable for passing to bpf_filter. 1405 * Note that we cut corners here; we only setup what's 1406 * absolutely needed--this mbuf should never go anywhere else. 1407 */ 1408 mb.m_next = m; 1409 mb.m_data = data; 1410 mb.m_len = dlen; 1411 pktlen += dlen; 1412 1413 BPFIF_LOCK(bp); 1414 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1415 BPF_CHECK_DIRECTION(d, m) 1416 continue; 1417 BPFD_LOCK(d); 1418 ++d->bd_rcount; 1419 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 1420 if (slen != 0) { 1421 d->bd_fcount++; 1422 if (!gottime) { 1423 microtime(&tv); 1424 gottime = 1; 1425 } 1426 #ifdef MAC 1427 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1428 #endif 1429 catchpacket(d, (u_char *)&mb, pktlen, slen, 1430 bpf_mcopy, &tv); 1431 } 1432 BPFD_UNLOCK(d); 1433 } 1434 BPFIF_UNLOCK(bp); 1435 } 1436 1437 #undef BPF_CHECK_DIRECTION 1438 1439 /* 1440 * Move the packet data from interface memory (pkt) into the 1441 * store buffer. "cpfn" is the routine called to do the actual data 1442 * transfer. bcopy is passed in to copy contiguous chunks, while 1443 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1444 * pkt is really an mbuf. 1445 */ 1446 static void 1447 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1448 void (*cpfn)(const void *, void *, size_t), struct timeval *tv) 1449 { 1450 struct bpf_hdr *hp; 1451 int totlen, curlen; 1452 int hdrlen = d->bd_bif->bif_hdrlen; 1453 int do_wakeup = 0; 1454 1455 BPFD_LOCK_ASSERT(d); 1456 /* 1457 * Figure out how many bytes to move. If the packet is 1458 * greater or equal to the snapshot length, transfer that 1459 * much. Otherwise, transfer the whole packet (unless 1460 * we hit the buffer size limit). 1461 */ 1462 totlen = hdrlen + min(snaplen, pktlen); 1463 if (totlen > d->bd_bufsize) 1464 totlen = d->bd_bufsize; 1465 1466 /* 1467 * Round up the end of the previous packet to the next longword. 1468 */ 1469 curlen = BPF_WORDALIGN(d->bd_slen); 1470 if (curlen + totlen > d->bd_bufsize) { 1471 /* 1472 * This packet will overflow the storage buffer. 1473 * Rotate the buffers if we can, then wakeup any 1474 * pending reads. 1475 */ 1476 if (d->bd_fbuf == NULL) { 1477 /* 1478 * We haven't completed the previous read yet, 1479 * so drop the packet. 1480 */ 1481 ++d->bd_dcount; 1482 return; 1483 } 1484 ROTATE_BUFFERS(d); 1485 do_wakeup = 1; 1486 curlen = 0; 1487 } 1488 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1489 /* 1490 * Immediate mode is set, or the read timeout has 1491 * already expired during a select call. A packet 1492 * arrived, so the reader should be woken up. 1493 */ 1494 do_wakeup = 1; 1495 1496 /* 1497 * Append the bpf header. 1498 */ 1499 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1500 hp->bh_tstamp = *tv; 1501 hp->bh_datalen = pktlen; 1502 hp->bh_hdrlen = hdrlen; 1503 /* 1504 * Copy the packet data into the store buffer and update its length. 1505 */ 1506 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1507 d->bd_slen = curlen + totlen; 1508 1509 if (do_wakeup) 1510 bpf_wakeup(d); 1511 } 1512 1513 /* 1514 * Initialize all nonzero fields of a descriptor. 1515 */ 1516 static void 1517 bpf_allocbufs(struct bpf_d *d) 1518 { 1519 1520 KASSERT(d->bd_fbuf == NULL, ("bpf_allocbufs: bd_fbuf != NULL")); 1521 KASSERT(d->bd_sbuf == NULL, ("bpf_allocbufs: bd_sbuf != NULL")); 1522 KASSERT(d->bd_hbuf == NULL, ("bpf_allocbufs: bd_hbuf != NULL")); 1523 1524 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1525 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1526 d->bd_slen = 0; 1527 d->bd_hlen = 0; 1528 } 1529 1530 /* 1531 * Free buffers currently in use by a descriptor. 1532 * Called on close. 1533 */ 1534 static void 1535 bpf_freed(struct bpf_d *d) 1536 { 1537 /* 1538 * We don't need to lock out interrupts since this descriptor has 1539 * been detached from its interface and it yet hasn't been marked 1540 * free. 1541 */ 1542 if (d->bd_sbuf != NULL) { 1543 free(d->bd_sbuf, M_BPF); 1544 if (d->bd_hbuf != NULL) 1545 free(d->bd_hbuf, M_BPF); 1546 if (d->bd_fbuf != NULL) 1547 free(d->bd_fbuf, M_BPF); 1548 } 1549 if (d->bd_rfilter) { 1550 free((caddr_t)d->bd_rfilter, M_BPF); 1551 #ifdef BPF_JITTER 1552 bpf_destroy_jit_filter(d->bd_bfilter); 1553 #endif 1554 } 1555 if (d->bd_wfilter) 1556 free((caddr_t)d->bd_wfilter, M_BPF); 1557 mtx_destroy(&d->bd_mtx); 1558 } 1559 1560 /* 1561 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1562 * fixed size of the link header (variable length headers not yet supported). 1563 */ 1564 void 1565 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1566 { 1567 1568 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1569 } 1570 1571 /* 1572 * Attach an interface to bpf. ifp is a pointer to the structure 1573 * defining the interface to be attached, dlt is the link layer type, 1574 * and hdrlen is the fixed size of the link header (variable length 1575 * headers are not yet supporrted). 1576 */ 1577 void 1578 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1579 { 1580 struct bpf_if *bp; 1581 1582 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1583 if (bp == NULL) 1584 panic("bpfattach"); 1585 1586 LIST_INIT(&bp->bif_dlist); 1587 bp->bif_ifp = ifp; 1588 bp->bif_dlt = dlt; 1589 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1590 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 1591 *driverp = bp; 1592 1593 mtx_lock(&bpf_mtx); 1594 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1595 mtx_unlock(&bpf_mtx); 1596 1597 /* 1598 * Compute the length of the bpf header. This is not necessarily 1599 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1600 * that the network layer header begins on a longword boundary (for 1601 * performance reasons and to alleviate alignment restrictions). 1602 */ 1603 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1604 1605 if (bootverbose) 1606 if_printf(ifp, "bpf attached\n"); 1607 } 1608 1609 /* 1610 * Detach bpf from an interface. This involves detaching each descriptor 1611 * associated with the interface, and leaving bd_bif NULL. Notify each 1612 * descriptor as it's detached so that any sleepers wake up and get 1613 * ENXIO. 1614 */ 1615 void 1616 bpfdetach(struct ifnet *ifp) 1617 { 1618 struct bpf_if *bp; 1619 struct bpf_d *d; 1620 1621 /* Locate BPF interface information */ 1622 mtx_lock(&bpf_mtx); 1623 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1624 if (ifp == bp->bif_ifp) 1625 break; 1626 } 1627 1628 /* Interface wasn't attached */ 1629 if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1630 mtx_unlock(&bpf_mtx); 1631 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1632 return; 1633 } 1634 1635 LIST_REMOVE(bp, bif_next); 1636 mtx_unlock(&bpf_mtx); 1637 1638 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1639 bpf_detachd(d); 1640 BPFD_LOCK(d); 1641 bpf_wakeup(d); 1642 BPFD_UNLOCK(d); 1643 } 1644 1645 mtx_destroy(&bp->bif_mtx); 1646 free(bp, M_BPF); 1647 } 1648 1649 /* 1650 * Get a list of available data link type of the interface. 1651 */ 1652 static int 1653 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1654 { 1655 int n, error; 1656 struct ifnet *ifp; 1657 struct bpf_if *bp; 1658 1659 ifp = d->bd_bif->bif_ifp; 1660 n = 0; 1661 error = 0; 1662 mtx_lock(&bpf_mtx); 1663 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1664 if (bp->bif_ifp != ifp) 1665 continue; 1666 if (bfl->bfl_list != NULL) { 1667 if (n >= bfl->bfl_len) { 1668 mtx_unlock(&bpf_mtx); 1669 return (ENOMEM); 1670 } 1671 error = copyout(&bp->bif_dlt, 1672 bfl->bfl_list + n, sizeof(u_int)); 1673 } 1674 n++; 1675 } 1676 mtx_unlock(&bpf_mtx); 1677 bfl->bfl_len = n; 1678 return (error); 1679 } 1680 1681 /* 1682 * Set the data link type of a BPF instance. 1683 */ 1684 static int 1685 bpf_setdlt(struct bpf_d *d, u_int dlt) 1686 { 1687 int error, opromisc; 1688 struct ifnet *ifp; 1689 struct bpf_if *bp; 1690 1691 if (d->bd_bif->bif_dlt == dlt) 1692 return (0); 1693 ifp = d->bd_bif->bif_ifp; 1694 mtx_lock(&bpf_mtx); 1695 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1696 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1697 break; 1698 } 1699 mtx_unlock(&bpf_mtx); 1700 if (bp != NULL) { 1701 opromisc = d->bd_promisc; 1702 bpf_detachd(d); 1703 bpf_attachd(d, bp); 1704 BPFD_LOCK(d); 1705 reset_d(d); 1706 BPFD_UNLOCK(d); 1707 if (opromisc) { 1708 error = ifpromisc(bp->bif_ifp, 1); 1709 if (error) 1710 if_printf(bp->bif_ifp, 1711 "bpf_setdlt: ifpromisc failed (%d)\n", 1712 error); 1713 else 1714 d->bd_promisc = 1; 1715 } 1716 } 1717 return (bp == NULL ? EINVAL : 0); 1718 } 1719 1720 static void 1721 bpf_clone(void *arg, struct ucred *cred, char *name, int namelen, 1722 struct cdev **dev) 1723 { 1724 int u; 1725 1726 if (*dev != NULL) 1727 return; 1728 if (dev_stdclone(name, NULL, "bpf", &u) != 1) 1729 return; 1730 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600, 1731 "bpf%d", u); 1732 dev_ref(*dev); 1733 (*dev)->si_flags |= SI_CHEAPCLONE; 1734 return; 1735 } 1736 1737 static void 1738 bpf_drvinit(void *unused) 1739 { 1740 1741 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 1742 LIST_INIT(&bpf_iflist); 1743 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000); 1744 } 1745 1746 static void 1747 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 1748 { 1749 1750 bzero(d, sizeof(*d)); 1751 BPFD_LOCK_ASSERT(bd); 1752 d->bd_immediate = bd->bd_immediate; 1753 d->bd_promisc = bd->bd_promisc; 1754 d->bd_hdrcmplt = bd->bd_hdrcmplt; 1755 d->bd_direction = bd->bd_direction; 1756 d->bd_feedback = bd->bd_feedback; 1757 d->bd_async = bd->bd_async; 1758 d->bd_rcount = bd->bd_rcount; 1759 d->bd_dcount = bd->bd_dcount; 1760 d->bd_fcount = bd->bd_fcount; 1761 d->bd_sig = bd->bd_sig; 1762 d->bd_slen = bd->bd_slen; 1763 d->bd_hlen = bd->bd_hlen; 1764 d->bd_bufsize = bd->bd_bufsize; 1765 d->bd_pid = bd->bd_pid; 1766 strlcpy(d->bd_ifname, 1767 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 1768 d->bd_locked = bd->bd_locked; 1769 } 1770 1771 static int 1772 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 1773 { 1774 struct xbpf_d *xbdbuf, *xbd; 1775 int index, error; 1776 struct bpf_if *bp; 1777 struct bpf_d *bd; 1778 1779 /* 1780 * XXX This is not technically correct. It is possible for non 1781 * privileged users to open bpf devices. It would make sense 1782 * if the users who opened the devices were able to retrieve 1783 * the statistics for them, too. 1784 */ 1785 error = priv_check(req->td, PRIV_NET_BPF); 1786 if (error) 1787 return (error); 1788 if (req->oldptr == NULL) 1789 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 1790 if (bpf_bpfd_cnt == 0) 1791 return (SYSCTL_OUT(req, 0, 0)); 1792 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 1793 mtx_lock(&bpf_mtx); 1794 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 1795 mtx_unlock(&bpf_mtx); 1796 free(xbdbuf, M_BPF); 1797 return (ENOMEM); 1798 } 1799 index = 0; 1800 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1801 BPFIF_LOCK(bp); 1802 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 1803 xbd = &xbdbuf[index++]; 1804 BPFD_LOCK(bd); 1805 bpfstats_fill_xbpf(xbd, bd); 1806 BPFD_UNLOCK(bd); 1807 } 1808 BPFIF_UNLOCK(bp); 1809 } 1810 mtx_unlock(&bpf_mtx); 1811 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 1812 free(xbdbuf, M_BPF); 1813 return (error); 1814 } 1815 1816 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL) 1817 1818 #else /* !DEV_BPF && !NETGRAPH_BPF */ 1819 /* 1820 * NOP stubs to allow bpf-using drivers to load and function. 1821 * 1822 * A 'better' implementation would allow the core bpf functionality 1823 * to be loaded at runtime. 1824 */ 1825 static struct bpf_if bp_null; 1826 1827 void 1828 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1829 { 1830 } 1831 1832 void 1833 bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1834 { 1835 } 1836 1837 void 1838 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 1839 { 1840 } 1841 1842 void 1843 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1844 { 1845 1846 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1847 } 1848 1849 void 1850 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1851 { 1852 1853 *driverp = &bp_null; 1854 } 1855 1856 void 1857 bpfdetach(struct ifnet *ifp) 1858 { 1859 } 1860 1861 u_int 1862 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1863 { 1864 return -1; /* "no filter" behaviour */ 1865 } 1866 1867 int 1868 bpf_validate(const struct bpf_insn *f, int len) 1869 { 1870 return 0; /* false */ 1871 } 1872 1873 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 1874