1 /*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 * 36 * $FreeBSD$ 37 */ 38 39 #include "opt_bpf.h" 40 #include "opt_mac.h" 41 #include "opt_netgraph.h" 42 43 #include <sys/types.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/conf.h> 47 #include <sys/fcntl.h> 48 #include <sys/mac.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/time.h> 52 #include <sys/proc.h> 53 #include <sys/signalvar.h> 54 #include <sys/filio.h> 55 #include <sys/sockio.h> 56 #include <sys/ttycom.h> 57 #include <sys/uio.h> 58 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <sys/poll.h> 62 #include <sys/proc.h> 63 64 #include <sys/socket.h> 65 66 #include <net/if.h> 67 #include <net/bpf.h> 68 #ifdef BPF_JITTER 69 #include <net/bpf_jitter.h> 70 #endif 71 #include <net/bpfdesc.h> 72 73 #include <netinet/in.h> 74 #include <netinet/if_ether.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 78 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 79 80 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 81 82 #define PRINET 26 /* interruptible */ 83 84 /* 85 * bpf_iflist is a list of BPF interface structures, each corresponding to a 86 * specific DLT. The same network interface might have several BPF interface 87 * structures registered by different layers in the stack (i.e., 802.11 88 * frames, ethernet frames, etc). 89 */ 90 static LIST_HEAD(, bpf_if) bpf_iflist; 91 static struct mtx bpf_mtx; /* bpf global lock */ 92 static int bpf_bpfd_cnt; 93 94 static int bpf_allocbufs(struct bpf_d *); 95 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); 96 static void bpf_detachd(struct bpf_d *d); 97 static void bpf_freed(struct bpf_d *); 98 static void bpf_mcopy(const void *, void *, size_t); 99 static int bpf_movein(struct uio *, int, int, 100 struct mbuf **, struct sockaddr *, struct bpf_insn *); 101 static int bpf_setif(struct bpf_d *, struct ifreq *); 102 static void bpf_timed_out(void *); 103 static __inline void 104 bpf_wakeup(struct bpf_d *); 105 static void catchpacket(struct bpf_d *, u_char *, u_int, 106 u_int, void (*)(const void *, void *, size_t)); 107 static void reset_d(struct bpf_d *); 108 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 109 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 110 static int bpf_setdlt(struct bpf_d *, u_int); 111 static void filt_bpfdetach(struct knote *); 112 static int filt_bpfread(struct knote *, long); 113 static void bpf_drvinit(void *); 114 static void bpf_clone(void *, struct ucred *, char *, int, struct cdev **); 115 static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 116 117 /* 118 * The default read buffer size is patchable. 119 */ 120 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 121 static int bpf_bufsize = 4096; 122 SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW, 123 &bpf_bufsize, 0, ""); 124 static int bpf_maxbufsize = BPF_MAXBUFSIZE; 125 SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW, 126 &bpf_maxbufsize, 0, ""); 127 static int bpf_maxinsns = BPF_MAXINSNS; 128 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 129 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 130 SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW, 131 bpf_stats_sysctl, "bpf statistics portal"); 132 133 static d_open_t bpfopen; 134 static d_close_t bpfclose; 135 static d_read_t bpfread; 136 static d_write_t bpfwrite; 137 static d_ioctl_t bpfioctl; 138 static d_poll_t bpfpoll; 139 static d_kqfilter_t bpfkqfilter; 140 141 static struct cdevsw bpf_cdevsw = { 142 .d_version = D_VERSION, 143 .d_flags = D_NEEDGIANT, 144 .d_open = bpfopen, 145 .d_close = bpfclose, 146 .d_read = bpfread, 147 .d_write = bpfwrite, 148 .d_ioctl = bpfioctl, 149 .d_poll = bpfpoll, 150 .d_name = "bpf", 151 .d_kqfilter = bpfkqfilter, 152 }; 153 154 static struct filterops bpfread_filtops = 155 { 1, NULL, filt_bpfdetach, filt_bpfread }; 156 157 static int 158 bpf_movein(uio, linktype, mtu, mp, sockp, wfilter) 159 struct uio *uio; 160 int linktype; 161 int mtu; 162 struct mbuf **mp; 163 struct sockaddr *sockp; 164 struct bpf_insn *wfilter; 165 { 166 struct mbuf *m; 167 int error; 168 int len; 169 int hlen; 170 int slen; 171 172 /* 173 * Build a sockaddr based on the data link layer type. 174 * We do this at this level because the ethernet header 175 * is copied directly into the data field of the sockaddr. 176 * In the case of SLIP, there is no header and the packet 177 * is forwarded as is. 178 * Also, we are careful to leave room at the front of the mbuf 179 * for the link level header. 180 */ 181 switch (linktype) { 182 183 case DLT_SLIP: 184 sockp->sa_family = AF_INET; 185 hlen = 0; 186 break; 187 188 case DLT_EN10MB: 189 sockp->sa_family = AF_UNSPEC; 190 /* XXX Would MAXLINKHDR be better? */ 191 hlen = ETHER_HDR_LEN; 192 break; 193 194 case DLT_FDDI: 195 sockp->sa_family = AF_IMPLINK; 196 hlen = 0; 197 break; 198 199 case DLT_RAW: 200 sockp->sa_family = AF_UNSPEC; 201 hlen = 0; 202 break; 203 204 case DLT_NULL: 205 /* 206 * null interface types require a 4 byte pseudo header which 207 * corresponds to the address family of the packet. 208 */ 209 sockp->sa_family = AF_UNSPEC; 210 hlen = 4; 211 break; 212 213 case DLT_ATM_RFC1483: 214 /* 215 * en atm driver requires 4-byte atm pseudo header. 216 * though it isn't standard, vpi:vci needs to be 217 * specified anyway. 218 */ 219 sockp->sa_family = AF_UNSPEC; 220 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 221 break; 222 223 case DLT_PPP: 224 sockp->sa_family = AF_UNSPEC; 225 hlen = 4; /* This should match PPP_HDRLEN */ 226 break; 227 228 default: 229 return (EIO); 230 } 231 232 len = uio->uio_resid; 233 234 if (len - hlen > mtu) 235 return (EMSGSIZE); 236 237 if ((unsigned)len > MCLBYTES) 238 return (EIO); 239 240 if (len > MHLEN) { 241 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 242 } else { 243 MGETHDR(m, M_TRYWAIT, MT_DATA); 244 } 245 if (m == NULL) 246 return (ENOBUFS); 247 m->m_pkthdr.len = m->m_len = len; 248 m->m_pkthdr.rcvif = NULL; 249 *mp = m; 250 251 if (m->m_len < hlen) { 252 error = EPERM; 253 goto bad; 254 } 255 256 error = uiomove(mtod(m, u_char *), len, uio); 257 if (error) 258 goto bad; 259 260 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 261 if (slen == 0) { 262 error = EPERM; 263 goto bad; 264 } 265 266 /* 267 * Make room for link header, and copy it to sockaddr 268 */ 269 if (hlen != 0) { 270 bcopy(m->m_data, sockp->sa_data, hlen); 271 m->m_pkthdr.len -= hlen; 272 m->m_len -= hlen; 273 #if BSD >= 199103 274 m->m_data += hlen; /* XXX */ 275 #else 276 m->m_off += hlen; 277 #endif 278 } 279 280 return (0); 281 bad: 282 m_freem(m); 283 return (error); 284 } 285 286 /* 287 * Attach file to the bpf interface, i.e. make d listen on bp. 288 */ 289 static void 290 bpf_attachd(d, bp) 291 struct bpf_d *d; 292 struct bpf_if *bp; 293 { 294 /* 295 * Point d at bp, and add d to the interface's list of listeners. 296 * Finally, point the driver's bpf cookie at the interface so 297 * it will divert packets to bpf. 298 */ 299 BPFIF_LOCK(bp); 300 d->bd_bif = bp; 301 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 302 303 bpf_bpfd_cnt++; 304 BPFIF_UNLOCK(bp); 305 } 306 307 /* 308 * Detach a file from its interface. 309 */ 310 static void 311 bpf_detachd(d) 312 struct bpf_d *d; 313 { 314 int error; 315 struct bpf_if *bp; 316 struct ifnet *ifp; 317 318 bp = d->bd_bif; 319 BPFIF_LOCK(bp); 320 BPFD_LOCK(d); 321 ifp = d->bd_bif->bif_ifp; 322 323 /* 324 * Remove d from the interface's descriptor list. 325 */ 326 LIST_REMOVE(d, bd_next); 327 328 bpf_bpfd_cnt--; 329 d->bd_bif = NULL; 330 BPFD_UNLOCK(d); 331 BPFIF_UNLOCK(bp); 332 333 /* 334 * Check if this descriptor had requested promiscuous mode. 335 * If so, turn it off. 336 */ 337 if (d->bd_promisc) { 338 d->bd_promisc = 0; 339 error = ifpromisc(ifp, 0); 340 if (error != 0 && error != ENXIO) { 341 /* 342 * ENXIO can happen if a pccard is unplugged 343 * Something is really wrong if we were able to put 344 * the driver into promiscuous mode, but can't 345 * take it out. 346 */ 347 if_printf(bp->bif_ifp, 348 "bpf_detach: ifpromisc failed (%d)\n", error); 349 } 350 } 351 } 352 353 /* 354 * Open ethernet device. Returns ENXIO for illegal minor device number, 355 * EBUSY if file is open by another process. 356 */ 357 /* ARGSUSED */ 358 static int 359 bpfopen(dev, flags, fmt, td) 360 struct cdev *dev; 361 int flags; 362 int fmt; 363 struct thread *td; 364 { 365 struct bpf_d *d; 366 367 mtx_lock(&bpf_mtx); 368 d = dev->si_drv1; 369 /* 370 * Each minor can be opened by only one process. If the requested 371 * minor is in use, return EBUSY. 372 */ 373 if (d != NULL) { 374 mtx_unlock(&bpf_mtx); 375 return (EBUSY); 376 } 377 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */ 378 mtx_unlock(&bpf_mtx); 379 380 if ((dev->si_flags & SI_NAMED) == 0) 381 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600, 382 "bpf%d", dev2unit(dev)); 383 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 384 dev->si_drv1 = d; 385 d->bd_bufsize = bpf_bufsize; 386 d->bd_sig = SIGIO; 387 d->bd_seesent = 1; 388 d->bd_pid = td->td_proc->p_pid; 389 #ifdef MAC 390 mac_init_bpfdesc(d); 391 mac_create_bpfdesc(td->td_ucred, d); 392 #endif 393 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 394 callout_init(&d->bd_callout, NET_CALLOUT_MPSAFE); 395 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL); 396 397 return (0); 398 } 399 400 /* 401 * Close the descriptor by detaching it from its interface, 402 * deallocating its buffers, and marking it free. 403 */ 404 /* ARGSUSED */ 405 static int 406 bpfclose(dev, flags, fmt, td) 407 struct cdev *dev; 408 int flags; 409 int fmt; 410 struct thread *td; 411 { 412 struct bpf_d *d = dev->si_drv1; 413 414 BPFD_LOCK(d); 415 if (d->bd_state == BPF_WAITING) 416 callout_stop(&d->bd_callout); 417 d->bd_state = BPF_IDLE; 418 BPFD_UNLOCK(d); 419 funsetown(&d->bd_sigio); 420 mtx_lock(&bpf_mtx); 421 if (d->bd_bif) 422 bpf_detachd(d); 423 mtx_unlock(&bpf_mtx); 424 selwakeuppri(&d->bd_sel, PRINET); 425 #ifdef MAC 426 mac_destroy_bpfdesc(d); 427 #endif /* MAC */ 428 knlist_destroy(&d->bd_sel.si_note); 429 bpf_freed(d); 430 dev->si_drv1 = NULL; 431 free(d, M_BPF); 432 433 return (0); 434 } 435 436 437 /* 438 * Rotate the packet buffers in descriptor d. Move the store buffer 439 * into the hold slot, and the free buffer into the store slot. 440 * Zero the length of the new store buffer. 441 */ 442 #define ROTATE_BUFFERS(d) \ 443 (d)->bd_hbuf = (d)->bd_sbuf; \ 444 (d)->bd_hlen = (d)->bd_slen; \ 445 (d)->bd_sbuf = (d)->bd_fbuf; \ 446 (d)->bd_slen = 0; \ 447 (d)->bd_fbuf = NULL; 448 /* 449 * bpfread - read next chunk of packets from buffers 450 */ 451 static int 452 bpfread(dev, uio, ioflag) 453 struct cdev *dev; 454 struct uio *uio; 455 int ioflag; 456 { 457 struct bpf_d *d = dev->si_drv1; 458 int timed_out; 459 int error; 460 461 /* 462 * Restrict application to use a buffer the same size as 463 * as kernel buffers. 464 */ 465 if (uio->uio_resid != d->bd_bufsize) 466 return (EINVAL); 467 468 BPFD_LOCK(d); 469 if (d->bd_state == BPF_WAITING) 470 callout_stop(&d->bd_callout); 471 timed_out = (d->bd_state == BPF_TIMED_OUT); 472 d->bd_state = BPF_IDLE; 473 /* 474 * If the hold buffer is empty, then do a timed sleep, which 475 * ends when the timeout expires or when enough packets 476 * have arrived to fill the store buffer. 477 */ 478 while (d->bd_hbuf == NULL) { 479 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 480 /* 481 * A packet(s) either arrived since the previous 482 * read or arrived while we were asleep. 483 * Rotate the buffers and return what's here. 484 */ 485 ROTATE_BUFFERS(d); 486 break; 487 } 488 489 /* 490 * No data is available, check to see if the bpf device 491 * is still pointed at a real interface. If not, return 492 * ENXIO so that the userland process knows to rebind 493 * it before using it again. 494 */ 495 if (d->bd_bif == NULL) { 496 BPFD_UNLOCK(d); 497 return (ENXIO); 498 } 499 500 if (ioflag & O_NONBLOCK) { 501 BPFD_UNLOCK(d); 502 return (EWOULDBLOCK); 503 } 504 error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 505 "bpf", d->bd_rtout); 506 if (error == EINTR || error == ERESTART) { 507 BPFD_UNLOCK(d); 508 return (error); 509 } 510 if (error == EWOULDBLOCK) { 511 /* 512 * On a timeout, return what's in the buffer, 513 * which may be nothing. If there is something 514 * in the store buffer, we can rotate the buffers. 515 */ 516 if (d->bd_hbuf) 517 /* 518 * We filled up the buffer in between 519 * getting the timeout and arriving 520 * here, so we don't need to rotate. 521 */ 522 break; 523 524 if (d->bd_slen == 0) { 525 BPFD_UNLOCK(d); 526 return (0); 527 } 528 ROTATE_BUFFERS(d); 529 break; 530 } 531 } 532 /* 533 * At this point, we know we have something in the hold slot. 534 */ 535 BPFD_UNLOCK(d); 536 537 /* 538 * Move data from hold buffer into user space. 539 * We know the entire buffer is transferred since 540 * we checked above that the read buffer is bpf_bufsize bytes. 541 */ 542 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 543 544 BPFD_LOCK(d); 545 d->bd_fbuf = d->bd_hbuf; 546 d->bd_hbuf = NULL; 547 d->bd_hlen = 0; 548 BPFD_UNLOCK(d); 549 550 return (error); 551 } 552 553 554 /* 555 * If there are processes sleeping on this descriptor, wake them up. 556 */ 557 static __inline void 558 bpf_wakeup(d) 559 struct bpf_d *d; 560 { 561 562 BPFD_LOCK_ASSERT(d); 563 if (d->bd_state == BPF_WAITING) { 564 callout_stop(&d->bd_callout); 565 d->bd_state = BPF_IDLE; 566 } 567 wakeup(d); 568 if (d->bd_async && d->bd_sig && d->bd_sigio) 569 pgsigio(&d->bd_sigio, d->bd_sig, 0); 570 571 selwakeuppri(&d->bd_sel, PRINET); 572 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 573 } 574 575 static void 576 bpf_timed_out(arg) 577 void *arg; 578 { 579 struct bpf_d *d = (struct bpf_d *)arg; 580 581 BPFD_LOCK(d); 582 if (d->bd_state == BPF_WAITING) { 583 d->bd_state = BPF_TIMED_OUT; 584 if (d->bd_slen != 0) 585 bpf_wakeup(d); 586 } 587 BPFD_UNLOCK(d); 588 } 589 590 static int 591 bpfwrite(dev, uio, ioflag) 592 struct cdev *dev; 593 struct uio *uio; 594 int ioflag; 595 { 596 struct bpf_d *d = dev->si_drv1; 597 struct ifnet *ifp; 598 struct mbuf *m; 599 int error; 600 struct sockaddr dst; 601 602 if (d->bd_bif == NULL) 603 return (ENXIO); 604 605 ifp = d->bd_bif->bif_ifp; 606 607 if ((ifp->if_flags & IFF_UP) == 0) 608 return (ENETDOWN); 609 610 if (uio->uio_resid == 0) 611 return (0); 612 613 bzero(&dst, sizeof(dst)); 614 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, 615 &m, &dst, d->bd_wfilter); 616 if (error) 617 return (error); 618 619 if (d->bd_hdrcmplt) 620 dst.sa_family = pseudo_AF_HDRCMPLT; 621 622 #ifdef MAC 623 BPFD_LOCK(d); 624 mac_create_mbuf_from_bpfdesc(d, m); 625 BPFD_UNLOCK(d); 626 #endif 627 NET_LOCK_GIANT(); 628 error = (*ifp->if_output)(ifp, m, &dst, NULL); 629 NET_UNLOCK_GIANT(); 630 /* 631 * The driver frees the mbuf. 632 */ 633 return (error); 634 } 635 636 /* 637 * Reset a descriptor by flushing its packet buffer and clearing the 638 * receive and drop counts. 639 */ 640 static void 641 reset_d(d) 642 struct bpf_d *d; 643 { 644 645 mtx_assert(&d->bd_mtx, MA_OWNED); 646 if (d->bd_hbuf) { 647 /* Free the hold buffer. */ 648 d->bd_fbuf = d->bd_hbuf; 649 d->bd_hbuf = NULL; 650 } 651 d->bd_slen = 0; 652 d->bd_hlen = 0; 653 d->bd_rcount = 0; 654 d->bd_dcount = 0; 655 d->bd_fcount = 0; 656 } 657 658 /* 659 * FIONREAD Check for read packet available. 660 * SIOCGIFADDR Get interface address - convenient hook to driver. 661 * BIOCGBLEN Get buffer len [for read()]. 662 * BIOCSETF Set ethernet read filter. 663 * BIOCSETWF Set ethernet write filter. 664 * BIOCFLUSH Flush read packet buffer. 665 * BIOCPROMISC Put interface into promiscuous mode. 666 * BIOCGDLT Get link layer type. 667 * BIOCGETIF Get interface name. 668 * BIOCSETIF Set interface. 669 * BIOCSRTIMEOUT Set read timeout. 670 * BIOCGRTIMEOUT Get read timeout. 671 * BIOCGSTATS Get packet stats. 672 * BIOCIMMEDIATE Set immediate mode. 673 * BIOCVERSION Get filter language version. 674 * BIOCGHDRCMPLT Get "header already complete" flag 675 * BIOCSHDRCMPLT Set "header already complete" flag 676 * BIOCGSEESENT Get "see packets sent" flag 677 * BIOCSSEESENT Set "see packets sent" flag 678 * BIOCLOCK Set "locked" flag 679 */ 680 /* ARGSUSED */ 681 static int 682 bpfioctl(dev, cmd, addr, flags, td) 683 struct cdev *dev; 684 u_long cmd; 685 caddr_t addr; 686 int flags; 687 struct thread *td; 688 { 689 struct bpf_d *d = dev->si_drv1; 690 int error = 0; 691 692 /* 693 * Refresh PID associated with this descriptor. 694 */ 695 BPFD_LOCK(d); 696 d->bd_pid = td->td_proc->p_pid; 697 if (d->bd_state == BPF_WAITING) 698 callout_stop(&d->bd_callout); 699 d->bd_state = BPF_IDLE; 700 BPFD_UNLOCK(d); 701 702 if (d->bd_locked == 1) { 703 switch (cmd) { 704 case BIOCGBLEN: 705 case BIOCFLUSH: 706 case BIOCGDLT: 707 case BIOCGDLTLIST: 708 case BIOCGETIF: 709 case BIOCGRTIMEOUT: 710 case BIOCGSTATS: 711 case BIOCVERSION: 712 case BIOCGRSIG: 713 case BIOCGHDRCMPLT: 714 case FIONREAD: 715 case BIOCLOCK: 716 case BIOCSRTIMEOUT: 717 case BIOCIMMEDIATE: 718 case TIOCGPGRP: 719 break; 720 default: 721 return (EPERM); 722 } 723 } 724 switch (cmd) { 725 726 default: 727 error = EINVAL; 728 break; 729 730 /* 731 * Check for read packet available. 732 */ 733 case FIONREAD: 734 { 735 int n; 736 737 BPFD_LOCK(d); 738 n = d->bd_slen; 739 if (d->bd_hbuf) 740 n += d->bd_hlen; 741 BPFD_UNLOCK(d); 742 743 *(int *)addr = n; 744 break; 745 } 746 747 case SIOCGIFADDR: 748 { 749 struct ifnet *ifp; 750 751 if (d->bd_bif == NULL) 752 error = EINVAL; 753 else { 754 ifp = d->bd_bif->bif_ifp; 755 error = (*ifp->if_ioctl)(ifp, cmd, addr); 756 } 757 break; 758 } 759 760 /* 761 * Get buffer len [for read()]. 762 */ 763 case BIOCGBLEN: 764 *(u_int *)addr = d->bd_bufsize; 765 break; 766 767 /* 768 * Set buffer length. 769 */ 770 case BIOCSBLEN: 771 if (d->bd_bif != NULL) 772 error = EINVAL; 773 else { 774 u_int size = *(u_int *)addr; 775 776 if (size > bpf_maxbufsize) 777 *(u_int *)addr = size = bpf_maxbufsize; 778 else if (size < BPF_MINBUFSIZE) 779 *(u_int *)addr = size = BPF_MINBUFSIZE; 780 d->bd_bufsize = size; 781 } 782 break; 783 784 /* 785 * Set link layer read filter. 786 */ 787 case BIOCSETF: 788 case BIOCSETWF: 789 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 790 break; 791 792 /* 793 * Flush read packet buffer. 794 */ 795 case BIOCFLUSH: 796 BPFD_LOCK(d); 797 reset_d(d); 798 BPFD_UNLOCK(d); 799 break; 800 801 /* 802 * Put interface into promiscuous mode. 803 */ 804 case BIOCPROMISC: 805 if (d->bd_bif == NULL) { 806 /* 807 * No interface attached yet. 808 */ 809 error = EINVAL; 810 break; 811 } 812 if (d->bd_promisc == 0) { 813 mtx_lock(&Giant); 814 error = ifpromisc(d->bd_bif->bif_ifp, 1); 815 mtx_unlock(&Giant); 816 if (error == 0) 817 d->bd_promisc = 1; 818 } 819 break; 820 821 /* 822 * Get current data link type. 823 */ 824 case BIOCGDLT: 825 if (d->bd_bif == NULL) 826 error = EINVAL; 827 else 828 *(u_int *)addr = d->bd_bif->bif_dlt; 829 break; 830 831 /* 832 * Get a list of supported data link types. 833 */ 834 case BIOCGDLTLIST: 835 if (d->bd_bif == NULL) 836 error = EINVAL; 837 else 838 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 839 break; 840 841 /* 842 * Set data link type. 843 */ 844 case BIOCSDLT: 845 if (d->bd_bif == NULL) 846 error = EINVAL; 847 else 848 error = bpf_setdlt(d, *(u_int *)addr); 849 break; 850 851 /* 852 * Get interface name. 853 */ 854 case BIOCGETIF: 855 if (d->bd_bif == NULL) 856 error = EINVAL; 857 else { 858 struct ifnet *const ifp = d->bd_bif->bif_ifp; 859 struct ifreq *const ifr = (struct ifreq *)addr; 860 861 strlcpy(ifr->ifr_name, ifp->if_xname, 862 sizeof(ifr->ifr_name)); 863 } 864 break; 865 866 /* 867 * Set interface. 868 */ 869 case BIOCSETIF: 870 error = bpf_setif(d, (struct ifreq *)addr); 871 break; 872 873 /* 874 * Set read timeout. 875 */ 876 case BIOCSRTIMEOUT: 877 { 878 struct timeval *tv = (struct timeval *)addr; 879 880 /* 881 * Subtract 1 tick from tvtohz() since this isn't 882 * a one-shot timer. 883 */ 884 if ((error = itimerfix(tv)) == 0) 885 d->bd_rtout = tvtohz(tv) - 1; 886 break; 887 } 888 889 /* 890 * Get read timeout. 891 */ 892 case BIOCGRTIMEOUT: 893 { 894 struct timeval *tv = (struct timeval *)addr; 895 896 tv->tv_sec = d->bd_rtout / hz; 897 tv->tv_usec = (d->bd_rtout % hz) * tick; 898 break; 899 } 900 901 /* 902 * Get packet stats. 903 */ 904 case BIOCGSTATS: 905 { 906 struct bpf_stat *bs = (struct bpf_stat *)addr; 907 908 bs->bs_recv = d->bd_rcount; 909 bs->bs_drop = d->bd_dcount; 910 break; 911 } 912 913 /* 914 * Set immediate mode. 915 */ 916 case BIOCIMMEDIATE: 917 d->bd_immediate = *(u_int *)addr; 918 break; 919 920 case BIOCVERSION: 921 { 922 struct bpf_version *bv = (struct bpf_version *)addr; 923 924 bv->bv_major = BPF_MAJOR_VERSION; 925 bv->bv_minor = BPF_MINOR_VERSION; 926 break; 927 } 928 929 /* 930 * Get "header already complete" flag 931 */ 932 case BIOCGHDRCMPLT: 933 *(u_int *)addr = d->bd_hdrcmplt; 934 break; 935 936 case BIOCLOCK: 937 d->bd_locked = 1; 938 break; 939 /* 940 * Set "header already complete" flag 941 */ 942 case BIOCSHDRCMPLT: 943 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 944 break; 945 946 /* 947 * Get "see sent packets" flag 948 */ 949 case BIOCGSEESENT: 950 *(u_int *)addr = d->bd_seesent; 951 break; 952 953 /* 954 * Set "see sent packets" flag 955 */ 956 case BIOCSSEESENT: 957 d->bd_seesent = *(u_int *)addr; 958 break; 959 960 case FIONBIO: /* Non-blocking I/O */ 961 break; 962 963 case FIOASYNC: /* Send signal on receive packets */ 964 d->bd_async = *(int *)addr; 965 break; 966 967 case FIOSETOWN: 968 error = fsetown(*(int *)addr, &d->bd_sigio); 969 break; 970 971 case FIOGETOWN: 972 *(int *)addr = fgetown(&d->bd_sigio); 973 break; 974 975 /* This is deprecated, FIOSETOWN should be used instead. */ 976 case TIOCSPGRP: 977 error = fsetown(-(*(int *)addr), &d->bd_sigio); 978 break; 979 980 /* This is deprecated, FIOGETOWN should be used instead. */ 981 case TIOCGPGRP: 982 *(int *)addr = -fgetown(&d->bd_sigio); 983 break; 984 985 case BIOCSRSIG: /* Set receive signal */ 986 { 987 u_int sig; 988 989 sig = *(u_int *)addr; 990 991 if (sig >= NSIG) 992 error = EINVAL; 993 else 994 d->bd_sig = sig; 995 break; 996 } 997 case BIOCGRSIG: 998 *(u_int *)addr = d->bd_sig; 999 break; 1000 } 1001 return (error); 1002 } 1003 1004 /* 1005 * Set d's packet filter program to fp. If this file already has a filter, 1006 * free it and replace it. Returns EINVAL for bogus requests. 1007 */ 1008 static int 1009 bpf_setf(d, fp, cmd) 1010 struct bpf_d *d; 1011 struct bpf_program *fp; 1012 u_long cmd; 1013 { 1014 struct bpf_insn *fcode, *old; 1015 u_int wfilter, flen, size; 1016 #ifdef BPF_JITTER 1017 bpf_jit_filter *ofunc; 1018 #endif 1019 1020 if (cmd == BIOCSETWF) { 1021 old = d->bd_wfilter; 1022 wfilter = 1; 1023 #ifdef BPF_JITTER 1024 ofunc = NULL; 1025 #endif 1026 } else { 1027 wfilter = 0; 1028 old = d->bd_rfilter; 1029 #ifdef BPF_JITTER 1030 ofunc = d->bd_bfilter; 1031 #endif 1032 } 1033 if (fp->bf_insns == NULL) { 1034 if (fp->bf_len != 0) 1035 return (EINVAL); 1036 BPFD_LOCK(d); 1037 if (wfilter) 1038 d->bd_wfilter = NULL; 1039 else { 1040 d->bd_rfilter = NULL; 1041 #ifdef BPF_JITTER 1042 d->bd_bfilter = NULL; 1043 #endif 1044 } 1045 reset_d(d); 1046 BPFD_UNLOCK(d); 1047 if (old != NULL) 1048 free((caddr_t)old, M_BPF); 1049 #ifdef BPF_JITTER 1050 if (ofunc != NULL) 1051 bpf_destroy_jit_filter(ofunc); 1052 #endif 1053 return (0); 1054 } 1055 flen = fp->bf_len; 1056 if (flen > bpf_maxinsns) 1057 return (EINVAL); 1058 1059 size = flen * sizeof(*fp->bf_insns); 1060 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1061 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1062 bpf_validate(fcode, (int)flen)) { 1063 BPFD_LOCK(d); 1064 if (wfilter) 1065 d->bd_wfilter = fcode; 1066 else { 1067 d->bd_rfilter = fcode; 1068 #ifdef BPF_JITTER 1069 d->bd_bfilter = bpf_jitter(fcode, flen); 1070 #endif 1071 } 1072 reset_d(d); 1073 BPFD_UNLOCK(d); 1074 if (old != NULL) 1075 free((caddr_t)old, M_BPF); 1076 #ifdef BPF_JITTER 1077 if (ofunc != NULL) 1078 bpf_destroy_jit_filter(ofunc); 1079 #endif 1080 1081 return (0); 1082 } 1083 free((caddr_t)fcode, M_BPF); 1084 return (EINVAL); 1085 } 1086 1087 /* 1088 * Detach a file from its current interface (if attached at all) and attach 1089 * to the interface indicated by the name stored in ifr. 1090 * Return an errno or 0. 1091 */ 1092 static int 1093 bpf_setif(d, ifr) 1094 struct bpf_d *d; 1095 struct ifreq *ifr; 1096 { 1097 struct bpf_if *bp; 1098 int error; 1099 struct ifnet *theywant; 1100 1101 theywant = ifunit(ifr->ifr_name); 1102 if (theywant == NULL || theywant->if_bpf == NULL) 1103 return (ENXIO); 1104 1105 bp = theywant->if_bpf; 1106 /* 1107 * Allocate the packet buffers if we need to. 1108 * If we're already attached to requested interface, 1109 * just flush the buffer. 1110 */ 1111 if (d->bd_sbuf == NULL) { 1112 error = bpf_allocbufs(d); 1113 if (error != 0) 1114 return (error); 1115 } 1116 if (bp != d->bd_bif) { 1117 if (d->bd_bif) 1118 /* 1119 * Detach if attached to something else. 1120 */ 1121 bpf_detachd(d); 1122 1123 bpf_attachd(d, bp); 1124 } 1125 BPFD_LOCK(d); 1126 reset_d(d); 1127 BPFD_UNLOCK(d); 1128 return (0); 1129 } 1130 1131 /* 1132 * Support for select() and poll() system calls 1133 * 1134 * Return true iff the specific operation will not block indefinitely. 1135 * Otherwise, return false but make a note that a selwakeup() must be done. 1136 */ 1137 static int 1138 bpfpoll(dev, events, td) 1139 struct cdev *dev; 1140 int events; 1141 struct thread *td; 1142 { 1143 struct bpf_d *d; 1144 int revents; 1145 1146 d = dev->si_drv1; 1147 if (d->bd_bif == NULL) 1148 return (ENXIO); 1149 1150 /* 1151 * Refresh PID associated with this descriptor. 1152 */ 1153 revents = events & (POLLOUT | POLLWRNORM); 1154 BPFD_LOCK(d); 1155 d->bd_pid = td->td_proc->p_pid; 1156 if (events & (POLLIN | POLLRDNORM)) { 1157 if (bpf_ready(d)) 1158 revents |= events & (POLLIN | POLLRDNORM); 1159 else { 1160 selrecord(td, &d->bd_sel); 1161 /* Start the read timeout if necessary. */ 1162 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1163 callout_reset(&d->bd_callout, d->bd_rtout, 1164 bpf_timed_out, d); 1165 d->bd_state = BPF_WAITING; 1166 } 1167 } 1168 } 1169 BPFD_UNLOCK(d); 1170 return (revents); 1171 } 1172 1173 /* 1174 * Support for kevent() system call. Register EVFILT_READ filters and 1175 * reject all others. 1176 */ 1177 int 1178 bpfkqfilter(dev, kn) 1179 struct cdev *dev; 1180 struct knote *kn; 1181 { 1182 struct bpf_d *d = (struct bpf_d *)dev->si_drv1; 1183 1184 if (kn->kn_filter != EVFILT_READ) 1185 return (1); 1186 1187 /* 1188 * Refresh PID associated with this descriptor. 1189 */ 1190 BPFD_LOCK(d); 1191 d->bd_pid = curthread->td_proc->p_pid; 1192 kn->kn_fop = &bpfread_filtops; 1193 kn->kn_hook = d; 1194 knlist_add(&d->bd_sel.si_note, kn, 0); 1195 BPFD_UNLOCK(d); 1196 1197 return (0); 1198 } 1199 1200 static void 1201 filt_bpfdetach(kn) 1202 struct knote *kn; 1203 { 1204 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1205 1206 BPFD_LOCK(d); 1207 knlist_remove(&d->bd_sel.si_note, kn, 0); 1208 BPFD_UNLOCK(d); 1209 } 1210 1211 static int 1212 filt_bpfread(kn, hint) 1213 struct knote *kn; 1214 long hint; 1215 { 1216 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1217 int ready; 1218 1219 BPFD_LOCK_ASSERT(d); 1220 ready = bpf_ready(d); 1221 if (ready) { 1222 kn->kn_data = d->bd_slen; 1223 if (d->bd_hbuf) 1224 kn->kn_data += d->bd_hlen; 1225 } 1226 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1227 callout_reset(&d->bd_callout, d->bd_rtout, 1228 bpf_timed_out, d); 1229 d->bd_state = BPF_WAITING; 1230 } 1231 1232 return (ready); 1233 } 1234 1235 /* 1236 * Incoming linkage from device drivers. Process the packet pkt, of length 1237 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1238 * by each process' filter, and if accepted, stashed into the corresponding 1239 * buffer. 1240 */ 1241 void 1242 bpf_tap(bp, pkt, pktlen) 1243 struct bpf_if *bp; 1244 u_char *pkt; 1245 u_int pktlen; 1246 { 1247 struct bpf_d *d; 1248 u_int slen; 1249 1250 BPFIF_LOCK(bp); 1251 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1252 BPFD_LOCK(d); 1253 ++d->bd_rcount; 1254 #ifdef BPF_JITTER 1255 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL) 1256 slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen); 1257 else 1258 #endif 1259 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1260 if (slen != 0) { 1261 d->bd_fcount++; 1262 #ifdef MAC 1263 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1264 #endif 1265 catchpacket(d, pkt, pktlen, slen, bcopy); 1266 } 1267 BPFD_UNLOCK(d); 1268 } 1269 BPFIF_UNLOCK(bp); 1270 } 1271 1272 /* 1273 * Copy data from an mbuf chain into a buffer. This code is derived 1274 * from m_copydata in sys/uipc_mbuf.c. 1275 */ 1276 static void 1277 bpf_mcopy(src_arg, dst_arg, len) 1278 const void *src_arg; 1279 void *dst_arg; 1280 size_t len; 1281 { 1282 const struct mbuf *m; 1283 u_int count; 1284 u_char *dst; 1285 1286 m = src_arg; 1287 dst = dst_arg; 1288 while (len > 0) { 1289 if (m == NULL) 1290 panic("bpf_mcopy"); 1291 count = min(m->m_len, len); 1292 bcopy(mtod(m, void *), dst, count); 1293 m = m->m_next; 1294 dst += count; 1295 len -= count; 1296 } 1297 } 1298 1299 /* 1300 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1301 */ 1302 void 1303 bpf_mtap(bp, m) 1304 struct bpf_if *bp; 1305 struct mbuf *m; 1306 { 1307 struct bpf_d *d; 1308 u_int pktlen, slen; 1309 1310 pktlen = m_length(m, NULL); 1311 1312 BPFIF_LOCK(bp); 1313 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1314 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1315 continue; 1316 BPFD_LOCK(d); 1317 ++d->bd_rcount; 1318 #ifdef BPF_JITTER 1319 /* XXX We cannot handle multiple mbufs. */ 1320 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL && 1321 m->m_next == NULL) 1322 slen = (*(d->bd_bfilter->func))(mtod(m, u_char *), 1323 pktlen, pktlen); 1324 else 1325 #endif 1326 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1327 if (slen != 0) { 1328 d->bd_fcount++; 1329 #ifdef MAC 1330 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1331 #endif 1332 catchpacket(d, (u_char *)m, pktlen, slen, 1333 bpf_mcopy); 1334 } 1335 BPFD_UNLOCK(d); 1336 } 1337 BPFIF_UNLOCK(bp); 1338 } 1339 1340 /* 1341 * Incoming linkage from device drivers, when packet is in 1342 * an mbuf chain and to be prepended by a contiguous header. 1343 */ 1344 void 1345 bpf_mtap2(bp, data, dlen, m) 1346 struct bpf_if *bp; 1347 void *data; 1348 u_int dlen; 1349 struct mbuf *m; 1350 { 1351 struct mbuf mb; 1352 struct bpf_d *d; 1353 u_int pktlen, slen; 1354 1355 pktlen = m_length(m, NULL); 1356 /* 1357 * Craft on-stack mbuf suitable for passing to bpf_filter. 1358 * Note that we cut corners here; we only setup what's 1359 * absolutely needed--this mbuf should never go anywhere else. 1360 */ 1361 mb.m_next = m; 1362 mb.m_data = data; 1363 mb.m_len = dlen; 1364 pktlen += dlen; 1365 1366 BPFIF_LOCK(bp); 1367 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1368 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1369 continue; 1370 BPFD_LOCK(d); 1371 ++d->bd_rcount; 1372 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 1373 if (slen != 0) { 1374 d->bd_fcount++; 1375 #ifdef MAC 1376 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1377 #endif 1378 catchpacket(d, (u_char *)&mb, pktlen, slen, 1379 bpf_mcopy); 1380 } 1381 BPFD_UNLOCK(d); 1382 } 1383 BPFIF_UNLOCK(bp); 1384 } 1385 1386 /* 1387 * Move the packet data from interface memory (pkt) into the 1388 * store buffer. "cpfn" is the routine called to do the actual data 1389 * transfer. bcopy is passed in to copy contiguous chunks, while 1390 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1391 * pkt is really an mbuf. 1392 */ 1393 static void 1394 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1395 struct bpf_d *d; 1396 u_char *pkt; 1397 u_int pktlen, snaplen; 1398 void (*cpfn)(const void *, void *, size_t); 1399 { 1400 struct bpf_hdr *hp; 1401 int totlen, curlen; 1402 int hdrlen = d->bd_bif->bif_hdrlen; 1403 int do_wakeup = 0; 1404 1405 BPFD_LOCK_ASSERT(d); 1406 /* 1407 * Figure out how many bytes to move. If the packet is 1408 * greater or equal to the snapshot length, transfer that 1409 * much. Otherwise, transfer the whole packet (unless 1410 * we hit the buffer size limit). 1411 */ 1412 totlen = hdrlen + min(snaplen, pktlen); 1413 if (totlen > d->bd_bufsize) 1414 totlen = d->bd_bufsize; 1415 1416 /* 1417 * Round up the end of the previous packet to the next longword. 1418 */ 1419 curlen = BPF_WORDALIGN(d->bd_slen); 1420 if (curlen + totlen > d->bd_bufsize) { 1421 /* 1422 * This packet will overflow the storage buffer. 1423 * Rotate the buffers if we can, then wakeup any 1424 * pending reads. 1425 */ 1426 if (d->bd_fbuf == NULL) { 1427 /* 1428 * We haven't completed the previous read yet, 1429 * so drop the packet. 1430 */ 1431 ++d->bd_dcount; 1432 return; 1433 } 1434 ROTATE_BUFFERS(d); 1435 do_wakeup = 1; 1436 curlen = 0; 1437 } 1438 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1439 /* 1440 * Immediate mode is set, or the read timeout has 1441 * already expired during a select call. A packet 1442 * arrived, so the reader should be woken up. 1443 */ 1444 do_wakeup = 1; 1445 1446 /* 1447 * Append the bpf header. 1448 */ 1449 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1450 microtime(&hp->bh_tstamp); 1451 hp->bh_datalen = pktlen; 1452 hp->bh_hdrlen = hdrlen; 1453 /* 1454 * Copy the packet data into the store buffer and update its length. 1455 */ 1456 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1457 d->bd_slen = curlen + totlen; 1458 1459 if (do_wakeup) 1460 bpf_wakeup(d); 1461 } 1462 1463 /* 1464 * Initialize all nonzero fields of a descriptor. 1465 */ 1466 static int 1467 bpf_allocbufs(d) 1468 struct bpf_d *d; 1469 { 1470 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1471 if (d->bd_fbuf == NULL) 1472 return (ENOBUFS); 1473 1474 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1475 if (d->bd_sbuf == NULL) { 1476 free(d->bd_fbuf, M_BPF); 1477 return (ENOBUFS); 1478 } 1479 d->bd_slen = 0; 1480 d->bd_hlen = 0; 1481 return (0); 1482 } 1483 1484 /* 1485 * Free buffers currently in use by a descriptor. 1486 * Called on close. 1487 */ 1488 static void 1489 bpf_freed(d) 1490 struct bpf_d *d; 1491 { 1492 /* 1493 * We don't need to lock out interrupts since this descriptor has 1494 * been detached from its interface and it yet hasn't been marked 1495 * free. 1496 */ 1497 if (d->bd_sbuf != NULL) { 1498 free(d->bd_sbuf, M_BPF); 1499 if (d->bd_hbuf != NULL) 1500 free(d->bd_hbuf, M_BPF); 1501 if (d->bd_fbuf != NULL) 1502 free(d->bd_fbuf, M_BPF); 1503 } 1504 if (d->bd_rfilter) { 1505 free((caddr_t)d->bd_rfilter, M_BPF); 1506 #ifdef BPF_JITTER 1507 bpf_destroy_jit_filter(d->bd_bfilter); 1508 #endif 1509 } 1510 if (d->bd_wfilter) 1511 free((caddr_t)d->bd_wfilter, M_BPF); 1512 mtx_destroy(&d->bd_mtx); 1513 } 1514 1515 /* 1516 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1517 * fixed size of the link header (variable length headers not yet supported). 1518 */ 1519 void 1520 bpfattach(ifp, dlt, hdrlen) 1521 struct ifnet *ifp; 1522 u_int dlt, hdrlen; 1523 { 1524 1525 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1526 } 1527 1528 /* 1529 * Attach an interface to bpf. ifp is a pointer to the structure 1530 * defining the interface to be attached, dlt is the link layer type, 1531 * and hdrlen is the fixed size of the link header (variable length 1532 * headers are not yet supporrted). 1533 */ 1534 void 1535 bpfattach2(ifp, dlt, hdrlen, driverp) 1536 struct ifnet *ifp; 1537 u_int dlt, hdrlen; 1538 struct bpf_if **driverp; 1539 { 1540 struct bpf_if *bp; 1541 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1542 if (bp == NULL) 1543 panic("bpfattach"); 1544 1545 LIST_INIT(&bp->bif_dlist); 1546 bp->bif_ifp = ifp; 1547 bp->bif_dlt = dlt; 1548 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1549 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 1550 *driverp = bp; 1551 1552 mtx_lock(&bpf_mtx); 1553 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1554 mtx_unlock(&bpf_mtx); 1555 1556 /* 1557 * Compute the length of the bpf header. This is not necessarily 1558 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1559 * that the network layer header begins on a longword boundary (for 1560 * performance reasons and to alleviate alignment restrictions). 1561 */ 1562 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1563 1564 if (bootverbose) 1565 if_printf(ifp, "bpf attached\n"); 1566 } 1567 1568 /* 1569 * Detach bpf from an interface. This involves detaching each descriptor 1570 * associated with the interface, and leaving bd_bif NULL. Notify each 1571 * descriptor as it's detached so that any sleepers wake up and get 1572 * ENXIO. 1573 */ 1574 void 1575 bpfdetach(ifp) 1576 struct ifnet *ifp; 1577 { 1578 struct bpf_if *bp; 1579 struct bpf_d *d; 1580 1581 /* Locate BPF interface information */ 1582 mtx_lock(&bpf_mtx); 1583 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1584 if (ifp == bp->bif_ifp) 1585 break; 1586 } 1587 1588 /* Interface wasn't attached */ 1589 if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1590 mtx_unlock(&bpf_mtx); 1591 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1592 return; 1593 } 1594 1595 LIST_REMOVE(bp, bif_next); 1596 mtx_unlock(&bpf_mtx); 1597 1598 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1599 bpf_detachd(d); 1600 BPFD_LOCK(d); 1601 bpf_wakeup(d); 1602 BPFD_UNLOCK(d); 1603 } 1604 1605 mtx_destroy(&bp->bif_mtx); 1606 free(bp, M_BPF); 1607 } 1608 1609 /* 1610 * Get a list of available data link type of the interface. 1611 */ 1612 static int 1613 bpf_getdltlist(d, bfl) 1614 struct bpf_d *d; 1615 struct bpf_dltlist *bfl; 1616 { 1617 int n, error; 1618 struct ifnet *ifp; 1619 struct bpf_if *bp; 1620 1621 ifp = d->bd_bif->bif_ifp; 1622 n = 0; 1623 error = 0; 1624 mtx_lock(&bpf_mtx); 1625 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1626 if (bp->bif_ifp != ifp) 1627 continue; 1628 if (bfl->bfl_list != NULL) { 1629 if (n >= bfl->bfl_len) { 1630 mtx_unlock(&bpf_mtx); 1631 return (ENOMEM); 1632 } 1633 error = copyout(&bp->bif_dlt, 1634 bfl->bfl_list + n, sizeof(u_int)); 1635 } 1636 n++; 1637 } 1638 mtx_unlock(&bpf_mtx); 1639 bfl->bfl_len = n; 1640 return (error); 1641 } 1642 1643 /* 1644 * Set the data link type of a BPF instance. 1645 */ 1646 static int 1647 bpf_setdlt(d, dlt) 1648 struct bpf_d *d; 1649 u_int dlt; 1650 { 1651 int error, opromisc; 1652 struct ifnet *ifp; 1653 struct bpf_if *bp; 1654 1655 if (d->bd_bif->bif_dlt == dlt) 1656 return (0); 1657 ifp = d->bd_bif->bif_ifp; 1658 mtx_lock(&bpf_mtx); 1659 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1660 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1661 break; 1662 } 1663 mtx_unlock(&bpf_mtx); 1664 if (bp != NULL) { 1665 opromisc = d->bd_promisc; 1666 bpf_detachd(d); 1667 bpf_attachd(d, bp); 1668 BPFD_LOCK(d); 1669 reset_d(d); 1670 BPFD_UNLOCK(d); 1671 if (opromisc) { 1672 error = ifpromisc(bp->bif_ifp, 1); 1673 if (error) 1674 if_printf(bp->bif_ifp, 1675 "bpf_setdlt: ifpromisc failed (%d)\n", 1676 error); 1677 else 1678 d->bd_promisc = 1; 1679 } 1680 } 1681 return (bp == NULL ? EINVAL : 0); 1682 } 1683 1684 static void 1685 bpf_clone(arg, cred, name, namelen, dev) 1686 void *arg; 1687 struct ucred *cred; 1688 char *name; 1689 int namelen; 1690 struct cdev **dev; 1691 { 1692 int u; 1693 1694 if (*dev != NULL) 1695 return; 1696 if (dev_stdclone(name, NULL, "bpf", &u) != 1) 1697 return; 1698 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600, 1699 "bpf%d", u); 1700 dev_ref(*dev); 1701 (*dev)->si_flags |= SI_CHEAPCLONE; 1702 return; 1703 } 1704 1705 static void 1706 bpf_drvinit(unused) 1707 void *unused; 1708 { 1709 1710 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 1711 LIST_INIT(&bpf_iflist); 1712 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000); 1713 } 1714 1715 static void 1716 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 1717 { 1718 1719 bzero(d, sizeof(*d)); 1720 BPFD_LOCK_ASSERT(bd); 1721 d->bd_immediate = bd->bd_immediate; 1722 d->bd_promisc = bd->bd_promisc; 1723 d->bd_hdrcmplt = bd->bd_hdrcmplt; 1724 d->bd_seesent = bd->bd_seesent; 1725 d->bd_async = bd->bd_async; 1726 d->bd_rcount = bd->bd_rcount; 1727 d->bd_dcount = bd->bd_dcount; 1728 d->bd_fcount = bd->bd_fcount; 1729 d->bd_sig = bd->bd_sig; 1730 d->bd_slen = bd->bd_slen; 1731 d->bd_hlen = bd->bd_hlen; 1732 d->bd_bufsize = bd->bd_bufsize; 1733 d->bd_pid = bd->bd_pid; 1734 strlcpy(d->bd_ifname, 1735 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 1736 d->bd_locked = bd->bd_locked; 1737 } 1738 1739 static int 1740 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 1741 { 1742 struct xbpf_d *xbdbuf, *xbd; 1743 int index, error; 1744 struct bpf_if *bp; 1745 struct bpf_d *bd; 1746 1747 /* 1748 * XXX This is not technically correct. It is possible for non 1749 * privileged users to open bpf devices. It would make sense 1750 * if the users who opened the devices were able to retrieve 1751 * the statistics for them, too. 1752 */ 1753 error = suser(req->td); 1754 if (error) 1755 return (error); 1756 if (req->oldptr == NULL) 1757 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 1758 if (bpf_bpfd_cnt == 0) 1759 return (SYSCTL_OUT(req, 0, 0)); 1760 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 1761 mtx_lock(&bpf_mtx); 1762 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 1763 mtx_unlock(&bpf_mtx); 1764 free(xbdbuf, M_BPF); 1765 return (ENOMEM); 1766 } 1767 index = 0; 1768 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1769 BPFIF_LOCK(bp); 1770 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 1771 xbd = &xbdbuf[index++]; 1772 BPFD_LOCK(bd); 1773 bpfstats_fill_xbpf(xbd, bd); 1774 BPFD_UNLOCK(bd); 1775 } 1776 BPFIF_UNLOCK(bp); 1777 } 1778 mtx_unlock(&bpf_mtx); 1779 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 1780 free(xbdbuf, M_BPF); 1781 return (error); 1782 } 1783 1784 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL) 1785 1786 #else /* !DEV_BPF && !NETGRAPH_BPF */ 1787 /* 1788 * NOP stubs to allow bpf-using drivers to load and function. 1789 * 1790 * A 'better' implementation would allow the core bpf functionality 1791 * to be loaded at runtime. 1792 */ 1793 1794 void 1795 bpf_tap(bp, pkt, pktlen) 1796 struct bpf_if *bp; 1797 u_char *pkt; 1798 u_int pktlen; 1799 { 1800 } 1801 1802 void 1803 bpf_mtap(bp, m) 1804 struct bpf_if *bp; 1805 struct mbuf *m; 1806 { 1807 } 1808 1809 void 1810 bpf_mtap2(bp, d, l, m) 1811 struct bpf_if *bp; 1812 void *d; 1813 u_int l; 1814 struct mbuf *m; 1815 { 1816 } 1817 1818 void 1819 bpfattach(ifp, dlt, hdrlen) 1820 struct ifnet *ifp; 1821 u_int dlt, hdrlen; 1822 { 1823 } 1824 1825 void 1826 bpfattach2(ifp, dlt, hdrlen, driverp) 1827 struct ifnet *ifp; 1828 u_int dlt, hdrlen; 1829 struct bpf_if **driverp; 1830 { 1831 } 1832 1833 void 1834 bpfdetach(ifp) 1835 struct ifnet *ifp; 1836 { 1837 } 1838 1839 u_int 1840 bpf_filter(pc, p, wirelen, buflen) 1841 const struct bpf_insn *pc; 1842 u_char *p; 1843 u_int wirelen; 1844 u_int buflen; 1845 { 1846 return -1; /* "no filter" behaviour */ 1847 } 1848 1849 int 1850 bpf_validate(f, len) 1851 const struct bpf_insn *f; 1852 int len; 1853 { 1854 return 0; /* false */ 1855 } 1856 1857 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 1858