1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 * 36 * $FreeBSD$ 37 */ 38 39 #include "opt_bpf.h" 40 #include "opt_mac.h" 41 #include "opt_netgraph.h" 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/conf.h> 47 #include <sys/fcntl.h> 48 #include <sys/mac.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/time.h> 52 #include <sys/proc.h> 53 #include <sys/signalvar.h> 54 #include <sys/filio.h> 55 #include <sys/sockio.h> 56 #include <sys/ttycom.h> 57 #include <sys/uio.h> 58 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <sys/poll.h> 62 #include <sys/proc.h> 63 64 #include <sys/socket.h> 65 66 #include <net/if.h> 67 #include <net/bpf.h> 68 #include <net/bpfdesc.h> 69 70 #include <netinet/in.h> 71 #include <netinet/if_ether.h> 72 #include <sys/kernel.h> 73 #include <sys/sysctl.h> 74 75 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 76 77 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 78 79 #define PRINET 26 /* interruptible */ 80 81 /* 82 * The default read buffer size is patchable. 83 */ 84 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 85 static int bpf_bufsize = 4096; 86 SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW, 87 &bpf_bufsize, 0, ""); 88 static int bpf_maxbufsize = BPF_MAXBUFSIZE; 89 SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW, 90 &bpf_maxbufsize, 0, ""); 91 92 /* 93 * bpf_iflist is a list of BPF interface structures, each corresponding to a 94 * specific DLT. The same network interface might have several BPF interface 95 * structures registered by different layers in the stack (i.e., 802.11 96 * frames, ethernet frames, etc). 97 */ 98 static LIST_HEAD(, bpf_if) bpf_iflist; 99 static struct mtx bpf_mtx; /* bpf global lock */ 100 101 static int bpf_allocbufs(struct bpf_d *); 102 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 103 static void bpf_detachd(struct bpf_d *d); 104 static void bpf_freed(struct bpf_d *); 105 static void bpf_mcopy(const void *, void *, size_t); 106 static int bpf_movein(struct uio *, int, 107 struct mbuf **, struct sockaddr *, int *); 108 static int bpf_setif(struct bpf_d *, struct ifreq *); 109 static void bpf_timed_out(void *); 110 static __inline void 111 bpf_wakeup(struct bpf_d *); 112 static void catchpacket(struct bpf_d *, u_char *, u_int, 113 u_int, void (*)(const void *, void *, size_t)); 114 static void reset_d(struct bpf_d *); 115 static int bpf_setf(struct bpf_d *, struct bpf_program *); 116 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 117 static int bpf_setdlt(struct bpf_d *, u_int); 118 static void filt_bpfdetach(struct knote *); 119 static int filt_bpfread(struct knote *, long); 120 static void bpf_drvinit(void *); 121 static void bpf_clone(void *, char *, int, struct cdev **); 122 123 static d_open_t bpfopen; 124 static d_close_t bpfclose; 125 static d_read_t bpfread; 126 static d_write_t bpfwrite; 127 static d_ioctl_t bpfioctl; 128 static d_poll_t bpfpoll; 129 static d_kqfilter_t bpfkqfilter; 130 131 static struct cdevsw bpf_cdevsw = { 132 .d_version = D_VERSION, 133 .d_flags = D_NEEDGIANT, 134 .d_open = bpfopen, 135 .d_close = bpfclose, 136 .d_read = bpfread, 137 .d_write = bpfwrite, 138 .d_ioctl = bpfioctl, 139 .d_poll = bpfpoll, 140 .d_name = "bpf", 141 .d_kqfilter = bpfkqfilter, 142 }; 143 144 static struct filterops bpfread_filtops = 145 { 1, NULL, filt_bpfdetach, filt_bpfread }; 146 147 static int 148 bpf_movein(uio, linktype, mp, sockp, datlen) 149 struct uio *uio; 150 int linktype, *datlen; 151 struct mbuf **mp; 152 struct sockaddr *sockp; 153 { 154 struct mbuf *m; 155 int error; 156 int len; 157 int hlen; 158 159 /* 160 * Build a sockaddr based on the data link layer type. 161 * We do this at this level because the ethernet header 162 * is copied directly into the data field of the sockaddr. 163 * In the case of SLIP, there is no header and the packet 164 * is forwarded as is. 165 * Also, we are careful to leave room at the front of the mbuf 166 * for the link level header. 167 */ 168 switch (linktype) { 169 170 case DLT_SLIP: 171 sockp->sa_family = AF_INET; 172 hlen = 0; 173 break; 174 175 case DLT_EN10MB: 176 sockp->sa_family = AF_UNSPEC; 177 /* XXX Would MAXLINKHDR be better? */ 178 hlen = ETHER_HDR_LEN; 179 break; 180 181 case DLT_FDDI: 182 sockp->sa_family = AF_IMPLINK; 183 hlen = 0; 184 break; 185 186 case DLT_RAW: 187 case DLT_NULL: 188 sockp->sa_family = AF_UNSPEC; 189 hlen = 0; 190 break; 191 192 case DLT_ATM_RFC1483: 193 /* 194 * en atm driver requires 4-byte atm pseudo header. 195 * though it isn't standard, vpi:vci needs to be 196 * specified anyway. 197 */ 198 sockp->sa_family = AF_UNSPEC; 199 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 200 break; 201 202 case DLT_PPP: 203 sockp->sa_family = AF_UNSPEC; 204 hlen = 4; /* This should match PPP_HDRLEN */ 205 break; 206 207 default: 208 return (EIO); 209 } 210 211 len = uio->uio_resid; 212 *datlen = len - hlen; 213 if ((unsigned)len > MCLBYTES) 214 return (EIO); 215 216 if (len > MHLEN) { 217 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 218 } else { 219 MGETHDR(m, M_TRYWAIT, MT_DATA); 220 } 221 if (m == NULL) 222 return (ENOBUFS); 223 m->m_pkthdr.len = m->m_len = len; 224 m->m_pkthdr.rcvif = NULL; 225 *mp = m; 226 227 /* 228 * Make room for link header. 229 */ 230 if (hlen != 0) { 231 m->m_pkthdr.len -= hlen; 232 m->m_len -= hlen; 233 #if BSD >= 199103 234 m->m_data += hlen; /* XXX */ 235 #else 236 m->m_off += hlen; 237 #endif 238 error = uiomove(sockp->sa_data, hlen, uio); 239 if (error) 240 goto bad; 241 } 242 error = uiomove(mtod(m, void *), len - hlen, uio); 243 if (!error) 244 return (0); 245 bad: 246 m_freem(m); 247 return (error); 248 } 249 250 /* 251 * Attach file to the bpf interface, i.e. make d listen on bp. 252 */ 253 static void 254 bpf_attachd(d, bp) 255 struct bpf_d *d; 256 struct bpf_if *bp; 257 { 258 /* 259 * Point d at bp, and add d to the interface's list of listeners. 260 * Finally, point the driver's bpf cookie at the interface so 261 * it will divert packets to bpf. 262 */ 263 BPFIF_LOCK(bp); 264 d->bd_bif = bp; 265 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 266 267 *bp->bif_driverp = bp; 268 BPFIF_UNLOCK(bp); 269 } 270 271 /* 272 * Detach a file from its interface. 273 */ 274 static void 275 bpf_detachd(d) 276 struct bpf_d *d; 277 { 278 int error; 279 struct bpf_if *bp; 280 struct ifnet *ifp; 281 282 bp = d->bd_bif; 283 BPFIF_LOCK(bp); 284 BPFD_LOCK(d); 285 ifp = d->bd_bif->bif_ifp; 286 287 /* 288 * Remove d from the interface's descriptor list. 289 */ 290 LIST_REMOVE(d, bd_next); 291 292 /* 293 * Let the driver know that there are no more listeners. 294 */ 295 if (LIST_EMPTY(&bp->bif_dlist)) 296 *bp->bif_driverp = NULL; 297 298 d->bd_bif = NULL; 299 BPFD_UNLOCK(d); 300 BPFIF_UNLOCK(bp); 301 302 /* 303 * Check if this descriptor had requested promiscuous mode. 304 * If so, turn it off. 305 */ 306 if (d->bd_promisc) { 307 d->bd_promisc = 0; 308 error = ifpromisc(ifp, 0); 309 if (error != 0 && error != ENXIO) { 310 /* 311 * ENXIO can happen if a pccard is unplugged 312 * Something is really wrong if we were able to put 313 * the driver into promiscuous mode, but can't 314 * take it out. 315 */ 316 if_printf(bp->bif_ifp, 317 "bpf_detach: ifpromisc failed (%d)\n", error); 318 } 319 } 320 } 321 322 /* 323 * Open ethernet device. Returns ENXIO for illegal minor device number, 324 * EBUSY if file is open by another process. 325 */ 326 /* ARGSUSED */ 327 static int 328 bpfopen(dev, flags, fmt, td) 329 struct cdev *dev; 330 int flags; 331 int fmt; 332 struct thread *td; 333 { 334 struct bpf_d *d; 335 336 mtx_lock(&bpf_mtx); 337 d = dev->si_drv1; 338 /* 339 * Each minor can be opened by only one process. If the requested 340 * minor is in use, return EBUSY. 341 */ 342 if (d != NULL) { 343 mtx_unlock(&bpf_mtx); 344 return (EBUSY); 345 } 346 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */ 347 mtx_unlock(&bpf_mtx); 348 349 if ((dev->si_flags & SI_NAMED) == 0) 350 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600, 351 "bpf%d", dev2unit(dev)); 352 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 353 dev->si_drv1 = d; 354 d->bd_bufsize = bpf_bufsize; 355 d->bd_sig = SIGIO; 356 d->bd_seesent = 1; 357 #ifdef MAC 358 mac_init_bpfdesc(d); 359 mac_create_bpfdesc(td->td_ucred, d); 360 #endif 361 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 362 callout_init(&d->bd_callout, NET_CALLOUT_MPSAFE); 363 knlist_init(&d->bd_sel.si_note, &d->bd_mtx); 364 365 return (0); 366 } 367 368 /* 369 * Close the descriptor by detaching it from its interface, 370 * deallocating its buffers, and marking it free. 371 */ 372 /* ARGSUSED */ 373 static int 374 bpfclose(dev, flags, fmt, td) 375 struct cdev *dev; 376 int flags; 377 int fmt; 378 struct thread *td; 379 { 380 struct bpf_d *d = dev->si_drv1; 381 382 BPFD_LOCK(d); 383 if (d->bd_state == BPF_WAITING) 384 callout_stop(&d->bd_callout); 385 d->bd_state = BPF_IDLE; 386 BPFD_UNLOCK(d); 387 funsetown(&d->bd_sigio); 388 mtx_lock(&bpf_mtx); 389 if (d->bd_bif) 390 bpf_detachd(d); 391 mtx_unlock(&bpf_mtx); 392 selwakeuppri(&d->bd_sel, PRINET); 393 #ifdef MAC 394 mac_destroy_bpfdesc(d); 395 #endif /* MAC */ 396 knlist_destroy(&d->bd_sel.si_note); 397 bpf_freed(d); 398 dev->si_drv1 = NULL; 399 free(d, M_BPF); 400 401 return (0); 402 } 403 404 405 /* 406 * Rotate the packet buffers in descriptor d. Move the store buffer 407 * into the hold slot, and the free buffer into the store slot. 408 * Zero the length of the new store buffer. 409 */ 410 #define ROTATE_BUFFERS(d) \ 411 (d)->bd_hbuf = (d)->bd_sbuf; \ 412 (d)->bd_hlen = (d)->bd_slen; \ 413 (d)->bd_sbuf = (d)->bd_fbuf; \ 414 (d)->bd_slen = 0; \ 415 (d)->bd_fbuf = NULL; 416 /* 417 * bpfread - read next chunk of packets from buffers 418 */ 419 static int 420 bpfread(dev, uio, ioflag) 421 struct cdev *dev; 422 struct uio *uio; 423 int ioflag; 424 { 425 struct bpf_d *d = dev->si_drv1; 426 int timed_out; 427 int error; 428 429 /* 430 * Restrict application to use a buffer the same size as 431 * as kernel buffers. 432 */ 433 if (uio->uio_resid != d->bd_bufsize) 434 return (EINVAL); 435 436 BPFD_LOCK(d); 437 if (d->bd_state == BPF_WAITING) 438 callout_stop(&d->bd_callout); 439 timed_out = (d->bd_state == BPF_TIMED_OUT); 440 d->bd_state = BPF_IDLE; 441 /* 442 * If the hold buffer is empty, then do a timed sleep, which 443 * ends when the timeout expires or when enough packets 444 * have arrived to fill the store buffer. 445 */ 446 while (d->bd_hbuf == NULL) { 447 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 448 /* 449 * A packet(s) either arrived since the previous 450 * read or arrived while we were asleep. 451 * Rotate the buffers and return what's here. 452 */ 453 ROTATE_BUFFERS(d); 454 break; 455 } 456 457 /* 458 * No data is available, check to see if the bpf device 459 * is still pointed at a real interface. If not, return 460 * ENXIO so that the userland process knows to rebind 461 * it before using it again. 462 */ 463 if (d->bd_bif == NULL) { 464 BPFD_UNLOCK(d); 465 return (ENXIO); 466 } 467 468 if (ioflag & O_NONBLOCK) { 469 BPFD_UNLOCK(d); 470 return (EWOULDBLOCK); 471 } 472 error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 473 "bpf", d->bd_rtout); 474 if (error == EINTR || error == ERESTART) { 475 BPFD_UNLOCK(d); 476 return (error); 477 } 478 if (error == EWOULDBLOCK) { 479 /* 480 * On a timeout, return what's in the buffer, 481 * which may be nothing. If there is something 482 * in the store buffer, we can rotate the buffers. 483 */ 484 if (d->bd_hbuf) 485 /* 486 * We filled up the buffer in between 487 * getting the timeout and arriving 488 * here, so we don't need to rotate. 489 */ 490 break; 491 492 if (d->bd_slen == 0) { 493 BPFD_UNLOCK(d); 494 return (0); 495 } 496 ROTATE_BUFFERS(d); 497 break; 498 } 499 } 500 /* 501 * At this point, we know we have something in the hold slot. 502 */ 503 BPFD_UNLOCK(d); 504 505 /* 506 * Move data from hold buffer into user space. 507 * We know the entire buffer is transferred since 508 * we checked above that the read buffer is bpf_bufsize bytes. 509 */ 510 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 511 512 BPFD_LOCK(d); 513 d->bd_fbuf = d->bd_hbuf; 514 d->bd_hbuf = NULL; 515 d->bd_hlen = 0; 516 BPFD_UNLOCK(d); 517 518 return (error); 519 } 520 521 522 /* 523 * If there are processes sleeping on this descriptor, wake them up. 524 */ 525 static __inline void 526 bpf_wakeup(d) 527 struct bpf_d *d; 528 { 529 530 BPFD_LOCK_ASSERT(d); 531 if (d->bd_state == BPF_WAITING) { 532 callout_stop(&d->bd_callout); 533 d->bd_state = BPF_IDLE; 534 } 535 wakeup(d); 536 if (d->bd_async && d->bd_sig && d->bd_sigio) 537 pgsigio(&d->bd_sigio, d->bd_sig, 0); 538 539 selwakeuppri(&d->bd_sel, PRINET); 540 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 541 } 542 543 static void 544 bpf_timed_out(arg) 545 void *arg; 546 { 547 struct bpf_d *d = (struct bpf_d *)arg; 548 549 BPFD_LOCK(d); 550 if (d->bd_state == BPF_WAITING) { 551 d->bd_state = BPF_TIMED_OUT; 552 if (d->bd_slen != 0) 553 bpf_wakeup(d); 554 } 555 BPFD_UNLOCK(d); 556 } 557 558 static int 559 bpfwrite(dev, uio, ioflag) 560 struct cdev *dev; 561 struct uio *uio; 562 int ioflag; 563 { 564 struct bpf_d *d = dev->si_drv1; 565 struct ifnet *ifp; 566 struct mbuf *m; 567 int error; 568 struct sockaddr dst; 569 int datlen; 570 571 if (d->bd_bif == NULL) 572 return (ENXIO); 573 574 ifp = d->bd_bif->bif_ifp; 575 576 if ((ifp->if_flags & IFF_UP) == 0) 577 return (ENETDOWN); 578 579 if (uio->uio_resid == 0) 580 return (0); 581 582 bzero(&dst, sizeof(dst)); 583 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 584 if (error) 585 return (error); 586 587 if (datlen > ifp->if_mtu) { 588 m_freem(m); 589 return (EMSGSIZE); 590 } 591 592 if (d->bd_hdrcmplt) 593 dst.sa_family = pseudo_AF_HDRCMPLT; 594 595 #ifdef MAC 596 BPFD_LOCK(d); 597 mac_create_mbuf_from_bpfdesc(d, m); 598 BPFD_UNLOCK(d); 599 #endif 600 NET_LOCK_GIANT(); 601 error = (*ifp->if_output)(ifp, m, &dst, NULL); 602 NET_UNLOCK_GIANT(); 603 /* 604 * The driver frees the mbuf. 605 */ 606 return (error); 607 } 608 609 /* 610 * Reset a descriptor by flushing its packet buffer and clearing the 611 * receive and drop counts. 612 */ 613 static void 614 reset_d(d) 615 struct bpf_d *d; 616 { 617 618 mtx_assert(&d->bd_mtx, MA_OWNED); 619 if (d->bd_hbuf) { 620 /* Free the hold buffer. */ 621 d->bd_fbuf = d->bd_hbuf; 622 d->bd_hbuf = NULL; 623 } 624 d->bd_slen = 0; 625 d->bd_hlen = 0; 626 d->bd_rcount = 0; 627 d->bd_dcount = 0; 628 } 629 630 /* 631 * FIONREAD Check for read packet available. 632 * SIOCGIFADDR Get interface address - convenient hook to driver. 633 * BIOCGBLEN Get buffer len [for read()]. 634 * BIOCSETF Set ethernet read filter. 635 * BIOCFLUSH Flush read packet buffer. 636 * BIOCPROMISC Put interface into promiscuous mode. 637 * BIOCGDLT Get link layer type. 638 * BIOCGETIF Get interface name. 639 * BIOCSETIF Set interface. 640 * BIOCSRTIMEOUT Set read timeout. 641 * BIOCGRTIMEOUT Get read timeout. 642 * BIOCGSTATS Get packet stats. 643 * BIOCIMMEDIATE Set immediate mode. 644 * BIOCVERSION Get filter language version. 645 * BIOCGHDRCMPLT Get "header already complete" flag 646 * BIOCSHDRCMPLT Set "header already complete" flag 647 * BIOCGSEESENT Get "see packets sent" flag 648 * BIOCSSEESENT Set "see packets sent" flag 649 */ 650 /* ARGSUSED */ 651 static int 652 bpfioctl(dev, cmd, addr, flags, td) 653 struct cdev *dev; 654 u_long cmd; 655 caddr_t addr; 656 int flags; 657 struct thread *td; 658 { 659 struct bpf_d *d = dev->si_drv1; 660 int error = 0; 661 662 BPFD_LOCK(d); 663 if (d->bd_state == BPF_WAITING) 664 callout_stop(&d->bd_callout); 665 d->bd_state = BPF_IDLE; 666 BPFD_UNLOCK(d); 667 668 switch (cmd) { 669 670 default: 671 error = EINVAL; 672 break; 673 674 /* 675 * Check for read packet available. 676 */ 677 case FIONREAD: 678 { 679 int n; 680 681 BPFD_LOCK(d); 682 n = d->bd_slen; 683 if (d->bd_hbuf) 684 n += d->bd_hlen; 685 BPFD_UNLOCK(d); 686 687 *(int *)addr = n; 688 break; 689 } 690 691 case SIOCGIFADDR: 692 { 693 struct ifnet *ifp; 694 695 if (d->bd_bif == NULL) 696 error = EINVAL; 697 else { 698 ifp = d->bd_bif->bif_ifp; 699 error = (*ifp->if_ioctl)(ifp, cmd, addr); 700 } 701 break; 702 } 703 704 /* 705 * Get buffer len [for read()]. 706 */ 707 case BIOCGBLEN: 708 *(u_int *)addr = d->bd_bufsize; 709 break; 710 711 /* 712 * Set buffer length. 713 */ 714 case BIOCSBLEN: 715 if (d->bd_bif != NULL) 716 error = EINVAL; 717 else { 718 u_int size = *(u_int *)addr; 719 720 if (size > bpf_maxbufsize) 721 *(u_int *)addr = size = bpf_maxbufsize; 722 else if (size < BPF_MINBUFSIZE) 723 *(u_int *)addr = size = BPF_MINBUFSIZE; 724 d->bd_bufsize = size; 725 } 726 break; 727 728 /* 729 * Set link layer read filter. 730 */ 731 case BIOCSETF: 732 error = bpf_setf(d, (struct bpf_program *)addr); 733 break; 734 735 /* 736 * Flush read packet buffer. 737 */ 738 case BIOCFLUSH: 739 BPFD_LOCK(d); 740 reset_d(d); 741 BPFD_UNLOCK(d); 742 break; 743 744 /* 745 * Put interface into promiscuous mode. 746 */ 747 case BIOCPROMISC: 748 if (d->bd_bif == NULL) { 749 /* 750 * No interface attached yet. 751 */ 752 error = EINVAL; 753 break; 754 } 755 if (d->bd_promisc == 0) { 756 mtx_lock(&Giant); 757 error = ifpromisc(d->bd_bif->bif_ifp, 1); 758 mtx_unlock(&Giant); 759 if (error == 0) 760 d->bd_promisc = 1; 761 } 762 break; 763 764 /* 765 * Get current data link type. 766 */ 767 case BIOCGDLT: 768 if (d->bd_bif == NULL) 769 error = EINVAL; 770 else 771 *(u_int *)addr = d->bd_bif->bif_dlt; 772 break; 773 774 /* 775 * Get a list of supported data link types. 776 */ 777 case BIOCGDLTLIST: 778 if (d->bd_bif == NULL) 779 error = EINVAL; 780 else 781 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 782 break; 783 784 /* 785 * Set data link type. 786 */ 787 case BIOCSDLT: 788 if (d->bd_bif == NULL) 789 error = EINVAL; 790 else 791 error = bpf_setdlt(d, *(u_int *)addr); 792 break; 793 794 /* 795 * Get interface name. 796 */ 797 case BIOCGETIF: 798 if (d->bd_bif == NULL) 799 error = EINVAL; 800 else { 801 struct ifnet *const ifp = d->bd_bif->bif_ifp; 802 struct ifreq *const ifr = (struct ifreq *)addr; 803 804 strlcpy(ifr->ifr_name, ifp->if_xname, 805 sizeof(ifr->ifr_name)); 806 } 807 break; 808 809 /* 810 * Set interface. 811 */ 812 case BIOCSETIF: 813 error = bpf_setif(d, (struct ifreq *)addr); 814 break; 815 816 /* 817 * Set read timeout. 818 */ 819 case BIOCSRTIMEOUT: 820 { 821 struct timeval *tv = (struct timeval *)addr; 822 823 /* 824 * Subtract 1 tick from tvtohz() since this isn't 825 * a one-shot timer. 826 */ 827 if ((error = itimerfix(tv)) == 0) 828 d->bd_rtout = tvtohz(tv) - 1; 829 break; 830 } 831 832 /* 833 * Get read timeout. 834 */ 835 case BIOCGRTIMEOUT: 836 { 837 struct timeval *tv = (struct timeval *)addr; 838 839 tv->tv_sec = d->bd_rtout / hz; 840 tv->tv_usec = (d->bd_rtout % hz) * tick; 841 break; 842 } 843 844 /* 845 * Get packet stats. 846 */ 847 case BIOCGSTATS: 848 { 849 struct bpf_stat *bs = (struct bpf_stat *)addr; 850 851 bs->bs_recv = d->bd_rcount; 852 bs->bs_drop = d->bd_dcount; 853 break; 854 } 855 856 /* 857 * Set immediate mode. 858 */ 859 case BIOCIMMEDIATE: 860 d->bd_immediate = *(u_int *)addr; 861 break; 862 863 case BIOCVERSION: 864 { 865 struct bpf_version *bv = (struct bpf_version *)addr; 866 867 bv->bv_major = BPF_MAJOR_VERSION; 868 bv->bv_minor = BPF_MINOR_VERSION; 869 break; 870 } 871 872 /* 873 * Get "header already complete" flag 874 */ 875 case BIOCGHDRCMPLT: 876 *(u_int *)addr = d->bd_hdrcmplt; 877 break; 878 879 /* 880 * Set "header already complete" flag 881 */ 882 case BIOCSHDRCMPLT: 883 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 884 break; 885 886 /* 887 * Get "see sent packets" flag 888 */ 889 case BIOCGSEESENT: 890 *(u_int *)addr = d->bd_seesent; 891 break; 892 893 /* 894 * Set "see sent packets" flag 895 */ 896 case BIOCSSEESENT: 897 d->bd_seesent = *(u_int *)addr; 898 break; 899 900 case FIONBIO: /* Non-blocking I/O */ 901 break; 902 903 case FIOASYNC: /* Send signal on receive packets */ 904 d->bd_async = *(int *)addr; 905 break; 906 907 case FIOSETOWN: 908 error = fsetown(*(int *)addr, &d->bd_sigio); 909 break; 910 911 case FIOGETOWN: 912 *(int *)addr = fgetown(&d->bd_sigio); 913 break; 914 915 /* This is deprecated, FIOSETOWN should be used instead. */ 916 case TIOCSPGRP: 917 error = fsetown(-(*(int *)addr), &d->bd_sigio); 918 break; 919 920 /* This is deprecated, FIOGETOWN should be used instead. */ 921 case TIOCGPGRP: 922 *(int *)addr = -fgetown(&d->bd_sigio); 923 break; 924 925 case BIOCSRSIG: /* Set receive signal */ 926 { 927 u_int sig; 928 929 sig = *(u_int *)addr; 930 931 if (sig >= NSIG) 932 error = EINVAL; 933 else 934 d->bd_sig = sig; 935 break; 936 } 937 case BIOCGRSIG: 938 *(u_int *)addr = d->bd_sig; 939 break; 940 } 941 return (error); 942 } 943 944 /* 945 * Set d's packet filter program to fp. If this file already has a filter, 946 * free it and replace it. Returns EINVAL for bogus requests. 947 */ 948 static int 949 bpf_setf(d, fp) 950 struct bpf_d *d; 951 struct bpf_program *fp; 952 { 953 struct bpf_insn *fcode, *old; 954 u_int flen, size; 955 956 if (fp->bf_insns == NULL) { 957 if (fp->bf_len != 0) 958 return (EINVAL); 959 BPFD_LOCK(d); 960 old = d->bd_filter; 961 d->bd_filter = NULL; 962 reset_d(d); 963 BPFD_UNLOCK(d); 964 if (old != NULL) 965 free((caddr_t)old, M_BPF); 966 return (0); 967 } 968 flen = fp->bf_len; 969 if (flen > BPF_MAXINSNS) 970 return (EINVAL); 971 972 size = flen * sizeof(*fp->bf_insns); 973 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 974 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 975 bpf_validate(fcode, (int)flen)) { 976 BPFD_LOCK(d); 977 old = d->bd_filter; 978 d->bd_filter = fcode; 979 reset_d(d); 980 BPFD_UNLOCK(d); 981 if (old != NULL) 982 free((caddr_t)old, M_BPF); 983 984 return (0); 985 } 986 free((caddr_t)fcode, M_BPF); 987 return (EINVAL); 988 } 989 990 /* 991 * Detach a file from its current interface (if attached at all) and attach 992 * to the interface indicated by the name stored in ifr. 993 * Return an errno or 0. 994 */ 995 static int 996 bpf_setif(d, ifr) 997 struct bpf_d *d; 998 struct ifreq *ifr; 999 { 1000 struct bpf_if *bp; 1001 int error; 1002 struct ifnet *theywant; 1003 1004 theywant = ifunit(ifr->ifr_name); 1005 if (theywant == NULL) 1006 return ENXIO; 1007 1008 /* 1009 * Look through attached interfaces for the named one. 1010 */ 1011 mtx_lock(&bpf_mtx); 1012 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1013 struct ifnet *ifp = bp->bif_ifp; 1014 1015 if (ifp == NULL || ifp != theywant) 1016 continue; 1017 /* skip additional entry */ 1018 if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf) 1019 continue; 1020 1021 mtx_unlock(&bpf_mtx); 1022 /* 1023 * We found the requested interface. 1024 * Allocate the packet buffers if we need to. 1025 * If we're already attached to requested interface, 1026 * just flush the buffer. 1027 */ 1028 if (d->bd_sbuf == NULL) { 1029 error = bpf_allocbufs(d); 1030 if (error != 0) 1031 return (error); 1032 } 1033 if (bp != d->bd_bif) { 1034 if (d->bd_bif) 1035 /* 1036 * Detach if attached to something else. 1037 */ 1038 bpf_detachd(d); 1039 1040 bpf_attachd(d, bp); 1041 } 1042 BPFD_LOCK(d); 1043 reset_d(d); 1044 BPFD_UNLOCK(d); 1045 return (0); 1046 } 1047 mtx_unlock(&bpf_mtx); 1048 /* Not found. */ 1049 return (ENXIO); 1050 } 1051 1052 /* 1053 * Support for select() and poll() system calls 1054 * 1055 * Return true iff the specific operation will not block indefinitely. 1056 * Otherwise, return false but make a note that a selwakeup() must be done. 1057 */ 1058 static int 1059 bpfpoll(dev, events, td) 1060 struct cdev *dev; 1061 int events; 1062 struct thread *td; 1063 { 1064 struct bpf_d *d; 1065 int revents; 1066 1067 d = dev->si_drv1; 1068 if (d->bd_bif == NULL) 1069 return (ENXIO); 1070 1071 revents = events & (POLLOUT | POLLWRNORM); 1072 BPFD_LOCK(d); 1073 if (events & (POLLIN | POLLRDNORM)) { 1074 if (bpf_ready(d)) 1075 revents |= events & (POLLIN | POLLRDNORM); 1076 else { 1077 selrecord(td, &d->bd_sel); 1078 /* Start the read timeout if necessary. */ 1079 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1080 callout_reset(&d->bd_callout, d->bd_rtout, 1081 bpf_timed_out, d); 1082 d->bd_state = BPF_WAITING; 1083 } 1084 } 1085 } 1086 BPFD_UNLOCK(d); 1087 return (revents); 1088 } 1089 1090 /* 1091 * Support for kevent() system call. Register EVFILT_READ filters and 1092 * reject all others. 1093 */ 1094 int 1095 bpfkqfilter(dev, kn) 1096 struct cdev *dev; 1097 struct knote *kn; 1098 { 1099 struct bpf_d *d = (struct bpf_d *)dev->si_drv1; 1100 1101 if (kn->kn_filter != EVFILT_READ) 1102 return (1); 1103 1104 kn->kn_fop = &bpfread_filtops; 1105 kn->kn_hook = d; 1106 knlist_add(&d->bd_sel.si_note, kn, 0); 1107 1108 return (0); 1109 } 1110 1111 static void 1112 filt_bpfdetach(kn) 1113 struct knote *kn; 1114 { 1115 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1116 1117 knlist_remove(&d->bd_sel.si_note, kn, 0); 1118 } 1119 1120 static int 1121 filt_bpfread(kn, hint) 1122 struct knote *kn; 1123 long hint; 1124 { 1125 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1126 int ready; 1127 1128 BPFD_LOCK_ASSERT(d); 1129 ready = bpf_ready(d); 1130 if (ready) { 1131 kn->kn_data = d->bd_slen; 1132 if (d->bd_hbuf) 1133 kn->kn_data += d->bd_hlen; 1134 } 1135 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1136 callout_reset(&d->bd_callout, d->bd_rtout, 1137 bpf_timed_out, d); 1138 d->bd_state = BPF_WAITING; 1139 } 1140 1141 return (ready); 1142 } 1143 1144 /* 1145 * Incoming linkage from device drivers. Process the packet pkt, of length 1146 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1147 * by each process' filter, and if accepted, stashed into the corresponding 1148 * buffer. 1149 */ 1150 void 1151 bpf_tap(bp, pkt, pktlen) 1152 struct bpf_if *bp; 1153 u_char *pkt; 1154 u_int pktlen; 1155 { 1156 struct bpf_d *d; 1157 u_int slen; 1158 1159 /* 1160 * Lockless read to avoid cost of locking the interface if there are 1161 * no descriptors attached. 1162 */ 1163 if (LIST_EMPTY(&bp->bif_dlist)) 1164 return; 1165 1166 BPFIF_LOCK(bp); 1167 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1168 BPFD_LOCK(d); 1169 ++d->bd_rcount; 1170 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1171 if (slen != 0) { 1172 #ifdef MAC 1173 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1174 #endif 1175 catchpacket(d, pkt, pktlen, slen, bcopy); 1176 } 1177 BPFD_UNLOCK(d); 1178 } 1179 BPFIF_UNLOCK(bp); 1180 } 1181 1182 /* 1183 * Copy data from an mbuf chain into a buffer. This code is derived 1184 * from m_copydata in sys/uipc_mbuf.c. 1185 */ 1186 static void 1187 bpf_mcopy(src_arg, dst_arg, len) 1188 const void *src_arg; 1189 void *dst_arg; 1190 size_t len; 1191 { 1192 const struct mbuf *m; 1193 u_int count; 1194 u_char *dst; 1195 1196 m = src_arg; 1197 dst = dst_arg; 1198 while (len > 0) { 1199 if (m == NULL) 1200 panic("bpf_mcopy"); 1201 count = min(m->m_len, len); 1202 bcopy(mtod(m, void *), dst, count); 1203 m = m->m_next; 1204 dst += count; 1205 len -= count; 1206 } 1207 } 1208 1209 /* 1210 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1211 */ 1212 void 1213 bpf_mtap(bp, m) 1214 struct bpf_if *bp; 1215 struct mbuf *m; 1216 { 1217 struct bpf_d *d; 1218 u_int pktlen, slen; 1219 1220 /* 1221 * Lockless read to avoid cost of locking the interface if there are 1222 * no descriptors attached. 1223 */ 1224 if (LIST_EMPTY(&bp->bif_dlist)) 1225 return; 1226 1227 pktlen = m_length(m, NULL); 1228 if (pktlen == m->m_len) { 1229 bpf_tap(bp, mtod(m, u_char *), pktlen); 1230 return; 1231 } 1232 1233 BPFIF_LOCK(bp); 1234 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1235 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1236 continue; 1237 BPFD_LOCK(d); 1238 ++d->bd_rcount; 1239 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1240 if (slen != 0) 1241 #ifdef MAC 1242 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1243 #endif 1244 catchpacket(d, (u_char *)m, pktlen, slen, 1245 bpf_mcopy); 1246 BPFD_UNLOCK(d); 1247 } 1248 BPFIF_UNLOCK(bp); 1249 } 1250 1251 /* 1252 * Incoming linkage from device drivers, when packet is in 1253 * an mbuf chain and to be prepended by a contiguous header. 1254 */ 1255 void 1256 bpf_mtap2(bp, data, dlen, m) 1257 struct bpf_if *bp; 1258 void *data; 1259 u_int dlen; 1260 struct mbuf *m; 1261 { 1262 struct mbuf mb; 1263 struct bpf_d *d; 1264 u_int pktlen, slen; 1265 1266 /* 1267 * Lockless read to avoid cost of locking the interface if there are 1268 * no descriptors attached. 1269 */ 1270 if (LIST_EMPTY(&bp->bif_dlist)) 1271 return; 1272 1273 pktlen = m_length(m, NULL); 1274 /* 1275 * Craft on-stack mbuf suitable for passing to bpf_filter. 1276 * Note that we cut corners here; we only setup what's 1277 * absolutely needed--this mbuf should never go anywhere else. 1278 */ 1279 mb.m_next = m; 1280 mb.m_data = data; 1281 mb.m_len = dlen; 1282 pktlen += dlen; 1283 1284 BPFIF_LOCK(bp); 1285 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1286 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1287 continue; 1288 BPFD_LOCK(d); 1289 ++d->bd_rcount; 1290 slen = bpf_filter(d->bd_filter, (u_char *)&mb, pktlen, 0); 1291 if (slen != 0) 1292 #ifdef MAC 1293 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1294 #endif 1295 catchpacket(d, (u_char *)&mb, pktlen, slen, 1296 bpf_mcopy); 1297 BPFD_UNLOCK(d); 1298 } 1299 BPFIF_UNLOCK(bp); 1300 } 1301 1302 /* 1303 * Move the packet data from interface memory (pkt) into the 1304 * store buffer. "cpfn" is the routine called to do the actual data 1305 * transfer. bcopy is passed in to copy contiguous chunks, while 1306 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1307 * pkt is really an mbuf. 1308 */ 1309 static void 1310 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1311 struct bpf_d *d; 1312 u_char *pkt; 1313 u_int pktlen, snaplen; 1314 void (*cpfn)(const void *, void *, size_t); 1315 { 1316 struct bpf_hdr *hp; 1317 int totlen, curlen; 1318 int hdrlen = d->bd_bif->bif_hdrlen; 1319 int do_wakeup = 0; 1320 1321 BPFD_LOCK_ASSERT(d); 1322 /* 1323 * Figure out how many bytes to move. If the packet is 1324 * greater or equal to the snapshot length, transfer that 1325 * much. Otherwise, transfer the whole packet (unless 1326 * we hit the buffer size limit). 1327 */ 1328 totlen = hdrlen + min(snaplen, pktlen); 1329 if (totlen > d->bd_bufsize) 1330 totlen = d->bd_bufsize; 1331 1332 /* 1333 * Round up the end of the previous packet to the next longword. 1334 */ 1335 curlen = BPF_WORDALIGN(d->bd_slen); 1336 if (curlen + totlen > d->bd_bufsize) { 1337 /* 1338 * This packet will overflow the storage buffer. 1339 * Rotate the buffers if we can, then wakeup any 1340 * pending reads. 1341 */ 1342 if (d->bd_fbuf == NULL) { 1343 /* 1344 * We haven't completed the previous read yet, 1345 * so drop the packet. 1346 */ 1347 ++d->bd_dcount; 1348 return; 1349 } 1350 ROTATE_BUFFERS(d); 1351 do_wakeup = 1; 1352 curlen = 0; 1353 } 1354 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1355 /* 1356 * Immediate mode is set, or the read timeout has 1357 * already expired during a select call. A packet 1358 * arrived, so the reader should be woken up. 1359 */ 1360 do_wakeup = 1; 1361 1362 /* 1363 * Append the bpf header. 1364 */ 1365 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1366 microtime(&hp->bh_tstamp); 1367 hp->bh_datalen = pktlen; 1368 hp->bh_hdrlen = hdrlen; 1369 /* 1370 * Copy the packet data into the store buffer and update its length. 1371 */ 1372 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1373 d->bd_slen = curlen + totlen; 1374 1375 if (do_wakeup) 1376 bpf_wakeup(d); 1377 } 1378 1379 /* 1380 * Initialize all nonzero fields of a descriptor. 1381 */ 1382 static int 1383 bpf_allocbufs(d) 1384 struct bpf_d *d; 1385 { 1386 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1387 if (d->bd_fbuf == NULL) 1388 return (ENOBUFS); 1389 1390 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1391 if (d->bd_sbuf == NULL) { 1392 free(d->bd_fbuf, M_BPF); 1393 return (ENOBUFS); 1394 } 1395 d->bd_slen = 0; 1396 d->bd_hlen = 0; 1397 return (0); 1398 } 1399 1400 /* 1401 * Free buffers currently in use by a descriptor. 1402 * Called on close. 1403 */ 1404 static void 1405 bpf_freed(d) 1406 struct bpf_d *d; 1407 { 1408 /* 1409 * We don't need to lock out interrupts since this descriptor has 1410 * been detached from its interface and it yet hasn't been marked 1411 * free. 1412 */ 1413 if (d->bd_sbuf != NULL) { 1414 free(d->bd_sbuf, M_BPF); 1415 if (d->bd_hbuf != NULL) 1416 free(d->bd_hbuf, M_BPF); 1417 if (d->bd_fbuf != NULL) 1418 free(d->bd_fbuf, M_BPF); 1419 } 1420 if (d->bd_filter) 1421 free((caddr_t)d->bd_filter, M_BPF); 1422 mtx_destroy(&d->bd_mtx); 1423 } 1424 1425 /* 1426 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1427 * fixed size of the link header (variable length headers not yet supported). 1428 */ 1429 void 1430 bpfattach(ifp, dlt, hdrlen) 1431 struct ifnet *ifp; 1432 u_int dlt, hdrlen; 1433 { 1434 1435 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1436 } 1437 1438 /* 1439 * Attach an interface to bpf. ifp is a pointer to the structure 1440 * defining the interface to be attached, dlt is the link layer type, 1441 * and hdrlen is the fixed size of the link header (variable length 1442 * headers are not yet supporrted). 1443 */ 1444 void 1445 bpfattach2(ifp, dlt, hdrlen, driverp) 1446 struct ifnet *ifp; 1447 u_int dlt, hdrlen; 1448 struct bpf_if **driverp; 1449 { 1450 struct bpf_if *bp; 1451 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1452 if (bp == NULL) 1453 panic("bpfattach"); 1454 1455 LIST_INIT(&bp->bif_dlist); 1456 bp->bif_driverp = driverp; 1457 bp->bif_ifp = ifp; 1458 bp->bif_dlt = dlt; 1459 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1460 1461 mtx_lock(&bpf_mtx); 1462 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1463 mtx_unlock(&bpf_mtx); 1464 1465 *bp->bif_driverp = NULL; 1466 1467 /* 1468 * Compute the length of the bpf header. This is not necessarily 1469 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1470 * that the network layer header begins on a longword boundary (for 1471 * performance reasons and to alleviate alignment restrictions). 1472 */ 1473 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1474 1475 if (bootverbose) 1476 if_printf(ifp, "bpf attached\n"); 1477 } 1478 1479 /* 1480 * Detach bpf from an interface. This involves detaching each descriptor 1481 * associated with the interface, and leaving bd_bif NULL. Notify each 1482 * descriptor as it's detached so that any sleepers wake up and get 1483 * ENXIO. 1484 */ 1485 void 1486 bpfdetach(ifp) 1487 struct ifnet *ifp; 1488 { 1489 struct bpf_if *bp; 1490 struct bpf_d *d; 1491 1492 /* Locate BPF interface information */ 1493 mtx_lock(&bpf_mtx); 1494 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1495 if (ifp == bp->bif_ifp) 1496 break; 1497 } 1498 1499 /* Interface wasn't attached */ 1500 if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1501 mtx_unlock(&bpf_mtx); 1502 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1503 return; 1504 } 1505 1506 LIST_REMOVE(bp, bif_next); 1507 mtx_unlock(&bpf_mtx); 1508 1509 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1510 bpf_detachd(d); 1511 BPFD_LOCK(d); 1512 bpf_wakeup(d); 1513 BPFD_UNLOCK(d); 1514 } 1515 1516 mtx_destroy(&bp->bif_mtx); 1517 free(bp, M_BPF); 1518 } 1519 1520 /* 1521 * Get a list of available data link type of the interface. 1522 */ 1523 static int 1524 bpf_getdltlist(d, bfl) 1525 struct bpf_d *d; 1526 struct bpf_dltlist *bfl; 1527 { 1528 int n, error; 1529 struct ifnet *ifp; 1530 struct bpf_if *bp; 1531 1532 ifp = d->bd_bif->bif_ifp; 1533 n = 0; 1534 error = 0; 1535 mtx_lock(&bpf_mtx); 1536 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1537 if (bp->bif_ifp != ifp) 1538 continue; 1539 if (bfl->bfl_list != NULL) { 1540 if (n >= bfl->bfl_len) { 1541 mtx_unlock(&bpf_mtx); 1542 return (ENOMEM); 1543 } 1544 error = copyout(&bp->bif_dlt, 1545 bfl->bfl_list + n, sizeof(u_int)); 1546 } 1547 n++; 1548 } 1549 mtx_unlock(&bpf_mtx); 1550 bfl->bfl_len = n; 1551 return (error); 1552 } 1553 1554 /* 1555 * Set the data link type of a BPF instance. 1556 */ 1557 static int 1558 bpf_setdlt(d, dlt) 1559 struct bpf_d *d; 1560 u_int dlt; 1561 { 1562 int error, opromisc; 1563 struct ifnet *ifp; 1564 struct bpf_if *bp; 1565 1566 if (d->bd_bif->bif_dlt == dlt) 1567 return (0); 1568 ifp = d->bd_bif->bif_ifp; 1569 mtx_lock(&bpf_mtx); 1570 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1571 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1572 break; 1573 } 1574 mtx_unlock(&bpf_mtx); 1575 if (bp != NULL) { 1576 opromisc = d->bd_promisc; 1577 bpf_detachd(d); 1578 bpf_attachd(d, bp); 1579 BPFD_LOCK(d); 1580 reset_d(d); 1581 BPFD_UNLOCK(d); 1582 if (opromisc) { 1583 error = ifpromisc(bp->bif_ifp, 1); 1584 if (error) 1585 if_printf(bp->bif_ifp, 1586 "bpf_setdlt: ifpromisc failed (%d)\n", 1587 error); 1588 else 1589 d->bd_promisc = 1; 1590 } 1591 } 1592 return (bp == NULL ? EINVAL : 0); 1593 } 1594 1595 static void 1596 bpf_clone(arg, name, namelen, dev) 1597 void *arg; 1598 char *name; 1599 int namelen; 1600 struct cdev **dev; 1601 { 1602 int u; 1603 1604 if (*dev != NULL) 1605 return; 1606 if (dev_stdclone(name, NULL, "bpf", &u) != 1) 1607 return; 1608 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600, 1609 "bpf%d", u); 1610 dev_ref(*dev); 1611 (*dev)->si_flags |= SI_CHEAPCLONE; 1612 return; 1613 } 1614 1615 static void 1616 bpf_drvinit(unused) 1617 void *unused; 1618 { 1619 1620 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 1621 LIST_INIT(&bpf_iflist); 1622 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000); 1623 } 1624 1625 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL) 1626 1627 #else /* !DEV_BPF && !NETGRAPH_BPF */ 1628 /* 1629 * NOP stubs to allow bpf-using drivers to load and function. 1630 * 1631 * A 'better' implementation would allow the core bpf functionality 1632 * to be loaded at runtime. 1633 */ 1634 1635 void 1636 bpf_tap(bp, pkt, pktlen) 1637 struct bpf_if *bp; 1638 u_char *pkt; 1639 u_int pktlen; 1640 { 1641 } 1642 1643 void 1644 bpf_mtap(bp, m) 1645 struct bpf_if *bp; 1646 struct mbuf *m; 1647 { 1648 } 1649 1650 void 1651 bpf_mtap2(bp, d, l, m) 1652 struct bpf_if *bp; 1653 void *d; 1654 u_int l; 1655 struct mbuf *m; 1656 { 1657 } 1658 1659 void 1660 bpfattach(ifp, dlt, hdrlen) 1661 struct ifnet *ifp; 1662 u_int dlt, hdrlen; 1663 { 1664 } 1665 1666 void 1667 bpfattach2(ifp, dlt, hdrlen, driverp) 1668 struct ifnet *ifp; 1669 u_int dlt, hdrlen; 1670 struct bpf_if **driverp; 1671 { 1672 } 1673 1674 void 1675 bpfdetach(ifp) 1676 struct ifnet *ifp; 1677 { 1678 } 1679 1680 u_int 1681 bpf_filter(pc, p, wirelen, buflen) 1682 const struct bpf_insn *pc; 1683 u_char *p; 1684 u_int wirelen; 1685 u_int buflen; 1686 { 1687 return -1; /* "no filter" behaviour */ 1688 } 1689 1690 int 1691 bpf_validate(f, len) 1692 const struct bpf_insn *f; 1693 int len; 1694 { 1695 return 0; /* false */ 1696 } 1697 1698 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 1699