1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $FreeBSD$ 41 */ 42 43 #include "opt_bpf.h" 44 #include "opt_netgraph.h" 45 46 #ifndef __GNUC__ 47 #define inline 48 #else 49 #define inline __inline 50 #endif 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/conf.h> 55 #include <sys/malloc.h> 56 #include <sys/mbuf.h> 57 #include <sys/time.h> 58 #include <sys/proc.h> 59 #include <sys/signalvar.h> 60 #include <sys/filio.h> 61 #include <sys/sockio.h> 62 #include <sys/ttycom.h> 63 #include <sys/filedesc.h> 64 65 #if defined(sparc) && BSD < 199103 66 #include <sys/stream.h> 67 #endif 68 #include <sys/poll.h> 69 70 #include <sys/socket.h> 71 #include <sys/vnode.h> 72 73 #include <net/if.h> 74 #include <net/bpf.h> 75 #include <net/bpfdesc.h> 76 77 #include <netinet/in.h> 78 #include <netinet/if_ether.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 82 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 83 84 #if defined(DEV_BPF) || defined(NETGRAPH_BPF) 85 86 /* 87 * Older BSDs don't have kernel malloc. 88 */ 89 #if BSD < 199103 90 extern bcopy(); 91 static caddr_t bpf_alloc(); 92 #include <net/bpf_compat.h> 93 #define BPF_BUFSIZE (MCLBYTES-8) 94 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 95 #else 96 #define BPF_BUFSIZE 4096 97 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 98 #endif 99 100 #define PRINET 26 /* interruptible */ 101 102 /* 103 * The default read buffer size is patchable. 104 */ 105 static int bpf_bufsize = BPF_BUFSIZE; 106 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 107 &bpf_bufsize, 0, ""); 108 static int bpf_maxbufsize = BPF_MAXBUFSIZE; 109 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 110 &bpf_maxbufsize, 0, ""); 111 112 /* 113 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 114 */ 115 static struct bpf_if *bpf_iflist; 116 117 static int bpf_allocbufs __P((struct bpf_d *)); 118 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 119 static void bpf_detachd __P((struct bpf_d *d)); 120 static void bpf_freed __P((struct bpf_d *)); 121 static void bpf_mcopy __P((const void *, void *, size_t)); 122 static int bpf_movein __P((struct uio *, int, 123 struct mbuf **, struct sockaddr *, int *)); 124 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 125 static inline void 126 bpf_wakeup __P((struct bpf_d *)); 127 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 128 u_int, void (*)(const void *, void *, size_t))); 129 static void reset_d __P((struct bpf_d *)); 130 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 131 132 static d_open_t bpfopen; 133 static d_close_t bpfclose; 134 static d_read_t bpfread; 135 static d_write_t bpfwrite; 136 static d_ioctl_t bpfioctl; 137 static d_poll_t bpfpoll; 138 139 #define CDEV_MAJOR 23 140 static struct cdevsw bpf_cdevsw = { 141 /* open */ bpfopen, 142 /* close */ bpfclose, 143 /* read */ bpfread, 144 /* write */ bpfwrite, 145 /* ioctl */ bpfioctl, 146 /* poll */ bpfpoll, 147 /* mmap */ nommap, 148 /* strategy */ nostrategy, 149 /* name */ "bpf", 150 /* maj */ CDEV_MAJOR, 151 /* dump */ nodump, 152 /* psize */ nopsize, 153 /* flags */ 0, 154 /* bmaj */ -1 155 }; 156 157 158 static int 159 bpf_movein(uio, linktype, mp, sockp, datlen) 160 register struct uio *uio; 161 int linktype, *datlen; 162 register struct mbuf **mp; 163 register struct sockaddr *sockp; 164 { 165 struct mbuf *m; 166 int error; 167 int len; 168 int hlen; 169 170 /* 171 * Build a sockaddr based on the data link layer type. 172 * We do this at this level because the ethernet header 173 * is copied directly into the data field of the sockaddr. 174 * In the case of SLIP, there is no header and the packet 175 * is forwarded as is. 176 * Also, we are careful to leave room at the front of the mbuf 177 * for the link level header. 178 */ 179 switch (linktype) { 180 181 case DLT_SLIP: 182 sockp->sa_family = AF_INET; 183 hlen = 0; 184 break; 185 186 case DLT_EN10MB: 187 sockp->sa_family = AF_UNSPEC; 188 /* XXX Would MAXLINKHDR be better? */ 189 hlen = sizeof(struct ether_header); 190 break; 191 192 case DLT_FDDI: 193 #if defined(__FreeBSD__) || defined(__bsdi__) 194 sockp->sa_family = AF_IMPLINK; 195 hlen = 0; 196 #else 197 sockp->sa_family = AF_UNSPEC; 198 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 199 hlen = 24; 200 #endif 201 break; 202 203 case DLT_RAW: 204 case DLT_NULL: 205 sockp->sa_family = AF_UNSPEC; 206 hlen = 0; 207 break; 208 209 #ifdef __FreeBSD__ 210 case DLT_ATM_RFC1483: 211 /* 212 * en atm driver requires 4-byte atm pseudo header. 213 * though it isn't standard, vpi:vci needs to be 214 * specified anyway. 215 */ 216 sockp->sa_family = AF_UNSPEC; 217 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 218 break; 219 #endif 220 221 case DLT_PPP: 222 sockp->sa_family = AF_UNSPEC; 223 hlen = 4; /* This should match PPP_HDRLEN */ 224 break; 225 226 default: 227 return (EIO); 228 } 229 230 len = uio->uio_resid; 231 *datlen = len - hlen; 232 if ((unsigned)len > MCLBYTES) 233 return (EIO); 234 235 MGETHDR(m, M_TRYWAIT, MT_DATA); 236 if (m == 0) 237 return (ENOBUFS); 238 if (len > MHLEN) { 239 #if BSD >= 199103 240 MCLGET(m, M_TRYWAIT); 241 if ((m->m_flags & M_EXT) == 0) { 242 #else 243 MCLGET(m); 244 if (m->m_len != MCLBYTES) { 245 #endif 246 error = ENOBUFS; 247 goto bad; 248 } 249 } 250 m->m_pkthdr.len = m->m_len = len; 251 m->m_pkthdr.rcvif = NULL; 252 *mp = m; 253 /* 254 * Make room for link header. 255 */ 256 if (hlen != 0) { 257 m->m_pkthdr.len -= hlen; 258 m->m_len -= hlen; 259 #if BSD >= 199103 260 m->m_data += hlen; /* XXX */ 261 #else 262 m->m_off += hlen; 263 #endif 264 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 265 if (error) 266 goto bad; 267 } 268 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 269 if (!error) 270 return (0); 271 bad: 272 m_freem(m); 273 return (error); 274 } 275 276 /* 277 * Attach file to the bpf interface, i.e. make d listen on bp. 278 * Must be called at splimp. 279 */ 280 static void 281 bpf_attachd(d, bp) 282 struct bpf_d *d; 283 struct bpf_if *bp; 284 { 285 /* 286 * Point d at bp, and add d to the interface's list of listeners. 287 * Finally, point the driver's bpf cookie at the interface so 288 * it will divert packets to bpf. 289 */ 290 d->bd_bif = bp; 291 d->bd_next = bp->bif_dlist; 292 bp->bif_dlist = d; 293 294 bp->bif_ifp->if_bpf = bp; 295 } 296 297 /* 298 * Detach a file from its interface. 299 */ 300 static void 301 bpf_detachd(d) 302 struct bpf_d *d; 303 { 304 int error; 305 struct bpf_d **p; 306 struct bpf_if *bp; 307 308 bp = d->bd_bif; 309 /* 310 * Check if this descriptor had requested promiscuous mode. 311 * If so, turn it off. 312 */ 313 if (d->bd_promisc) { 314 d->bd_promisc = 0; 315 error = ifpromisc(bp->bif_ifp, 0); 316 if (error != 0 && error != ENXIO) { 317 /* 318 * ENXIO can happen if a pccard is unplugged 319 * Something is really wrong if we were able to put 320 * the driver into promiscuous mode, but can't 321 * take it out. 322 */ 323 printf("%s%d: ifpromisc failed %d\n", 324 bp->bif_ifp->if_name, bp->bif_ifp->if_unit, error); 325 } 326 } 327 /* Remove d from the interface's descriptor list. */ 328 p = &bp->bif_dlist; 329 while (*p != d) { 330 p = &(*p)->bd_next; 331 if (*p == 0) 332 panic("bpf_detachd: descriptor not in list"); 333 } 334 *p = (*p)->bd_next; 335 if (bp->bif_dlist == 0) 336 /* 337 * Let the driver know that there are no more listeners. 338 */ 339 d->bd_bif->bif_ifp->if_bpf = 0; 340 d->bd_bif = 0; 341 } 342 343 /* 344 * Open ethernet device. Returns ENXIO for illegal minor device number, 345 * EBUSY if file is open by another process. 346 */ 347 /* ARGSUSED */ 348 static int 349 bpfopen(dev, flags, fmt, p) 350 dev_t dev; 351 int flags; 352 int fmt; 353 struct proc *p; 354 { 355 register struct bpf_d *d; 356 357 if (p->p_prison) 358 return (EPERM); 359 360 d = dev->si_drv1; 361 /* 362 * Each minor can be opened by only one process. If the requested 363 * minor is in use, return EBUSY. 364 */ 365 if (d) 366 return (EBUSY); 367 if ((dev->si_flags & SI_NAMED) == 0) 368 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600, 369 "bpf%d", dev2unit(dev)); 370 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 371 dev->si_drv1 = d; 372 d->bd_bufsize = bpf_bufsize; 373 d->bd_sig = SIGIO; 374 d->bd_seesent = 1; 375 376 return (0); 377 } 378 379 /* 380 * Close the descriptor by detaching it from its interface, 381 * deallocating its buffers, and marking it free. 382 */ 383 /* ARGSUSED */ 384 static int 385 bpfclose(dev, flags, fmt, p) 386 dev_t dev; 387 int flags; 388 int fmt; 389 struct proc *p; 390 { 391 register struct bpf_d *d = dev->si_drv1; 392 register int s; 393 394 funsetown(d->bd_sigio); 395 s = splimp(); 396 if (d->bd_bif) 397 bpf_detachd(d); 398 splx(s); 399 bpf_freed(d); 400 dev->si_drv1 = 0; 401 FREE(d, M_BPF); 402 403 return (0); 404 } 405 406 /* 407 * Support for SunOS, which does not have tsleep. 408 */ 409 #if BSD < 199103 410 static 411 bpf_timeout(arg) 412 caddr_t arg; 413 { 414 struct bpf_d *d = (struct bpf_d *)arg; 415 d->bd_timedout = 1; 416 wakeup(arg); 417 } 418 419 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 420 421 int 422 bpf_sleep(d) 423 register struct bpf_d *d; 424 { 425 register int rto = d->bd_rtout; 426 register int st; 427 428 if (rto != 0) { 429 d->bd_timedout = 0; 430 timeout(bpf_timeout, (caddr_t)d, rto); 431 } 432 st = sleep((caddr_t)d, PRINET|PCATCH); 433 if (rto != 0) { 434 if (d->bd_timedout == 0) 435 untimeout(bpf_timeout, (caddr_t)d); 436 else if (st == 0) 437 return EWOULDBLOCK; 438 } 439 return (st != 0) ? EINTR : 0; 440 } 441 #else 442 #define BPF_SLEEP tsleep 443 #endif 444 445 /* 446 * Rotate the packet buffers in descriptor d. Move the store buffer 447 * into the hold slot, and the free buffer into the store slot. 448 * Zero the length of the new store buffer. 449 */ 450 #define ROTATE_BUFFERS(d) \ 451 (d)->bd_hbuf = (d)->bd_sbuf; \ 452 (d)->bd_hlen = (d)->bd_slen; \ 453 (d)->bd_sbuf = (d)->bd_fbuf; \ 454 (d)->bd_slen = 0; \ 455 (d)->bd_fbuf = 0; 456 /* 457 * bpfread - read next chunk of packets from buffers 458 */ 459 static int 460 bpfread(dev, uio, ioflag) 461 dev_t dev; 462 register struct uio *uio; 463 int ioflag; 464 { 465 register struct bpf_d *d = dev->si_drv1; 466 int error; 467 int s; 468 469 /* 470 * Restrict application to use a buffer the same size as 471 * as kernel buffers. 472 */ 473 if (uio->uio_resid != d->bd_bufsize) 474 return (EINVAL); 475 476 s = splimp(); 477 /* 478 * If the hold buffer is empty, then do a timed sleep, which 479 * ends when the timeout expires or when enough packets 480 * have arrived to fill the store buffer. 481 */ 482 while (d->bd_hbuf == 0) { 483 if (d->bd_immediate && d->bd_slen != 0) { 484 /* 485 * A packet(s) either arrived since the previous 486 * read or arrived while we were asleep. 487 * Rotate the buffers and return what's here. 488 */ 489 ROTATE_BUFFERS(d); 490 break; 491 } 492 493 /* 494 * No data is available, check to see if the bpf device 495 * is still pointed at a real interface. If not, return 496 * ENXIO so that the userland process knows to rebind 497 * it before using it again. 498 */ 499 if (d->bd_bif == NULL) { 500 splx(s); 501 return (ENXIO); 502 } 503 504 if (ioflag & IO_NDELAY) { 505 splx(s); 506 return (EWOULDBLOCK); 507 } 508 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 509 d->bd_rtout); 510 if (error == EINTR || error == ERESTART) { 511 splx(s); 512 return (error); 513 } 514 if (error == EWOULDBLOCK) { 515 /* 516 * On a timeout, return what's in the buffer, 517 * which may be nothing. If there is something 518 * in the store buffer, we can rotate the buffers. 519 */ 520 if (d->bd_hbuf) 521 /* 522 * We filled up the buffer in between 523 * getting the timeout and arriving 524 * here, so we don't need to rotate. 525 */ 526 break; 527 528 if (d->bd_slen == 0) { 529 splx(s); 530 return (0); 531 } 532 ROTATE_BUFFERS(d); 533 break; 534 } 535 } 536 /* 537 * At this point, we know we have something in the hold slot. 538 */ 539 splx(s); 540 541 /* 542 * Move data from hold buffer into user space. 543 * We know the entire buffer is transferred since 544 * we checked above that the read buffer is bpf_bufsize bytes. 545 */ 546 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 547 548 s = splimp(); 549 d->bd_fbuf = d->bd_hbuf; 550 d->bd_hbuf = 0; 551 d->bd_hlen = 0; 552 splx(s); 553 554 return (error); 555 } 556 557 558 /* 559 * If there are processes sleeping on this descriptor, wake them up. 560 */ 561 static inline void 562 bpf_wakeup(d) 563 register struct bpf_d *d; 564 { 565 wakeup((caddr_t)d); 566 if (d->bd_async && d->bd_sig && d->bd_sigio) 567 pgsigio(d->bd_sigio, d->bd_sig, 0); 568 569 #if BSD >= 199103 570 selwakeup(&d->bd_sel); 571 /* XXX */ 572 d->bd_sel.si_pid = 0; 573 #else 574 if (d->bd_selproc) { 575 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 576 d->bd_selcoll = 0; 577 d->bd_selproc = 0; 578 } 579 #endif 580 } 581 582 static int 583 bpfwrite(dev, uio, ioflag) 584 dev_t dev; 585 struct uio *uio; 586 int ioflag; 587 { 588 register struct bpf_d *d = dev->si_drv1; 589 struct ifnet *ifp; 590 struct mbuf *m; 591 int error, s; 592 static struct sockaddr dst; 593 int datlen; 594 595 if (d->bd_bif == 0) 596 return (ENXIO); 597 598 ifp = d->bd_bif->bif_ifp; 599 600 if (uio->uio_resid == 0) 601 return (0); 602 603 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 604 if (error) 605 return (error); 606 607 if (datlen > ifp->if_mtu) 608 return (EMSGSIZE); 609 610 if (d->bd_hdrcmplt) 611 dst.sa_family = pseudo_AF_HDRCMPLT; 612 613 s = splnet(); 614 #if BSD >= 199103 615 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 616 #else 617 error = (*ifp->if_output)(ifp, m, &dst); 618 #endif 619 splx(s); 620 /* 621 * The driver frees the mbuf. 622 */ 623 return (error); 624 } 625 626 /* 627 * Reset a descriptor by flushing its packet buffer and clearing the 628 * receive and drop counts. Should be called at splimp. 629 */ 630 static void 631 reset_d(d) 632 struct bpf_d *d; 633 { 634 if (d->bd_hbuf) { 635 /* Free the hold buffer. */ 636 d->bd_fbuf = d->bd_hbuf; 637 d->bd_hbuf = 0; 638 } 639 d->bd_slen = 0; 640 d->bd_hlen = 0; 641 d->bd_rcount = 0; 642 d->bd_dcount = 0; 643 } 644 645 /* 646 * FIONREAD Check for read packet available. 647 * SIOCGIFADDR Get interface address - convenient hook to driver. 648 * BIOCGBLEN Get buffer len [for read()]. 649 * BIOCSETF Set ethernet read filter. 650 * BIOCFLUSH Flush read packet buffer. 651 * BIOCPROMISC Put interface into promiscuous mode. 652 * BIOCGDLT Get link layer type. 653 * BIOCGETIF Get interface name. 654 * BIOCSETIF Set interface. 655 * BIOCSRTIMEOUT Set read timeout. 656 * BIOCGRTIMEOUT Get read timeout. 657 * BIOCGSTATS Get packet stats. 658 * BIOCIMMEDIATE Set immediate mode. 659 * BIOCVERSION Get filter language version. 660 * BIOCGHDRCMPLT Get "header already complete" flag 661 * BIOCSHDRCMPLT Set "header already complete" flag 662 * BIOCGSEESENT Get "see packets sent" flag 663 * BIOCSSEESENT Set "see packets sent" flag 664 */ 665 /* ARGSUSED */ 666 static int 667 bpfioctl(dev, cmd, addr, flags, p) 668 dev_t dev; 669 u_long cmd; 670 caddr_t addr; 671 int flags; 672 struct proc *p; 673 { 674 register struct bpf_d *d = dev->si_drv1; 675 int s, error = 0; 676 677 switch (cmd) { 678 679 default: 680 error = EINVAL; 681 break; 682 683 /* 684 * Check for read packet available. 685 */ 686 case FIONREAD: 687 { 688 int n; 689 690 s = splimp(); 691 n = d->bd_slen; 692 if (d->bd_hbuf) 693 n += d->bd_hlen; 694 splx(s); 695 696 *(int *)addr = n; 697 break; 698 } 699 700 case SIOCGIFADDR: 701 { 702 struct ifnet *ifp; 703 704 if (d->bd_bif == 0) 705 error = EINVAL; 706 else { 707 ifp = d->bd_bif->bif_ifp; 708 error = (*ifp->if_ioctl)(ifp, cmd, addr); 709 } 710 break; 711 } 712 713 /* 714 * Get buffer len [for read()]. 715 */ 716 case BIOCGBLEN: 717 *(u_int *)addr = d->bd_bufsize; 718 break; 719 720 /* 721 * Set buffer length. 722 */ 723 case BIOCSBLEN: 724 #if BSD < 199103 725 error = EINVAL; 726 #else 727 if (d->bd_bif != 0) 728 error = EINVAL; 729 else { 730 register u_int size = *(u_int *)addr; 731 732 if (size > bpf_maxbufsize) 733 *(u_int *)addr = size = bpf_maxbufsize; 734 else if (size < BPF_MINBUFSIZE) 735 *(u_int *)addr = size = BPF_MINBUFSIZE; 736 d->bd_bufsize = size; 737 } 738 #endif 739 break; 740 741 /* 742 * Set link layer read filter. 743 */ 744 case BIOCSETF: 745 error = bpf_setf(d, (struct bpf_program *)addr); 746 break; 747 748 /* 749 * Flush read packet buffer. 750 */ 751 case BIOCFLUSH: 752 s = splimp(); 753 reset_d(d); 754 splx(s); 755 break; 756 757 /* 758 * Put interface into promiscuous mode. 759 */ 760 case BIOCPROMISC: 761 if (d->bd_bif == 0) { 762 /* 763 * No interface attached yet. 764 */ 765 error = EINVAL; 766 break; 767 } 768 s = splimp(); 769 if (d->bd_promisc == 0) { 770 error = ifpromisc(d->bd_bif->bif_ifp, 1); 771 if (error == 0) 772 d->bd_promisc = 1; 773 } 774 splx(s); 775 break; 776 777 /* 778 * Get device parameters. 779 */ 780 case BIOCGDLT: 781 if (d->bd_bif == 0) 782 error = EINVAL; 783 else 784 *(u_int *)addr = d->bd_bif->bif_dlt; 785 break; 786 787 /* 788 * Get interface name. 789 */ 790 case BIOCGETIF: 791 if (d->bd_bif == 0) 792 error = EINVAL; 793 else { 794 struct ifnet *const ifp = d->bd_bif->bif_ifp; 795 struct ifreq *const ifr = (struct ifreq *)addr; 796 797 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), 798 "%s%d", ifp->if_name, ifp->if_unit); 799 } 800 break; 801 802 /* 803 * Set interface. 804 */ 805 case BIOCSETIF: 806 error = bpf_setif(d, (struct ifreq *)addr); 807 break; 808 809 /* 810 * Set read timeout. 811 */ 812 case BIOCSRTIMEOUT: 813 { 814 struct timeval *tv = (struct timeval *)addr; 815 816 /* 817 * Subtract 1 tick from tvtohz() since this isn't 818 * a one-shot timer. 819 */ 820 if ((error = itimerfix(tv)) == 0) 821 d->bd_rtout = tvtohz(tv) - 1; 822 break; 823 } 824 825 /* 826 * Get read timeout. 827 */ 828 case BIOCGRTIMEOUT: 829 { 830 struct timeval *tv = (struct timeval *)addr; 831 832 tv->tv_sec = d->bd_rtout / hz; 833 tv->tv_usec = (d->bd_rtout % hz) * tick; 834 break; 835 } 836 837 /* 838 * Get packet stats. 839 */ 840 case BIOCGSTATS: 841 { 842 struct bpf_stat *bs = (struct bpf_stat *)addr; 843 844 bs->bs_recv = d->bd_rcount; 845 bs->bs_drop = d->bd_dcount; 846 break; 847 } 848 849 /* 850 * Set immediate mode. 851 */ 852 case BIOCIMMEDIATE: 853 d->bd_immediate = *(u_int *)addr; 854 break; 855 856 case BIOCVERSION: 857 { 858 struct bpf_version *bv = (struct bpf_version *)addr; 859 860 bv->bv_major = BPF_MAJOR_VERSION; 861 bv->bv_minor = BPF_MINOR_VERSION; 862 break; 863 } 864 865 /* 866 * Get "header already complete" flag 867 */ 868 case BIOCGHDRCMPLT: 869 *(u_int *)addr = d->bd_hdrcmplt; 870 break; 871 872 /* 873 * Set "header already complete" flag 874 */ 875 case BIOCSHDRCMPLT: 876 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 877 break; 878 879 /* 880 * Get "see sent packets" flag 881 */ 882 case BIOCGSEESENT: 883 *(u_int *)addr = d->bd_seesent; 884 break; 885 886 /* 887 * Set "see sent packets" flag 888 */ 889 case BIOCSSEESENT: 890 d->bd_seesent = *(u_int *)addr; 891 break; 892 893 case FIONBIO: /* Non-blocking I/O */ 894 break; 895 896 case FIOASYNC: /* Send signal on receive packets */ 897 d->bd_async = *(int *)addr; 898 break; 899 900 case FIOSETOWN: 901 error = fsetown(*(int *)addr, &d->bd_sigio); 902 break; 903 904 case FIOGETOWN: 905 *(int *)addr = fgetown(d->bd_sigio); 906 break; 907 908 /* This is deprecated, FIOSETOWN should be used instead. */ 909 case TIOCSPGRP: 910 error = fsetown(-(*(int *)addr), &d->bd_sigio); 911 break; 912 913 /* This is deprecated, FIOGETOWN should be used instead. */ 914 case TIOCGPGRP: 915 *(int *)addr = -fgetown(d->bd_sigio); 916 break; 917 918 case BIOCSRSIG: /* Set receive signal */ 919 { 920 u_int sig; 921 922 sig = *(u_int *)addr; 923 924 if (sig >= NSIG) 925 error = EINVAL; 926 else 927 d->bd_sig = sig; 928 break; 929 } 930 case BIOCGRSIG: 931 *(u_int *)addr = d->bd_sig; 932 break; 933 } 934 return (error); 935 } 936 937 /* 938 * Set d's packet filter program to fp. If this file already has a filter, 939 * free it and replace it. Returns EINVAL for bogus requests. 940 */ 941 static int 942 bpf_setf(d, fp) 943 struct bpf_d *d; 944 struct bpf_program *fp; 945 { 946 struct bpf_insn *fcode, *old; 947 u_int flen, size; 948 int s; 949 950 old = d->bd_filter; 951 if (fp->bf_insns == 0) { 952 if (fp->bf_len != 0) 953 return (EINVAL); 954 s = splimp(); 955 d->bd_filter = 0; 956 reset_d(d); 957 splx(s); 958 if (old != 0) 959 free((caddr_t)old, M_BPF); 960 return (0); 961 } 962 flen = fp->bf_len; 963 if (flen > BPF_MAXINSNS) 964 return (EINVAL); 965 966 size = flen * sizeof(*fp->bf_insns); 967 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 968 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 969 bpf_validate(fcode, (int)flen)) { 970 s = splimp(); 971 d->bd_filter = fcode; 972 reset_d(d); 973 splx(s); 974 if (old != 0) 975 free((caddr_t)old, M_BPF); 976 977 return (0); 978 } 979 free((caddr_t)fcode, M_BPF); 980 return (EINVAL); 981 } 982 983 /* 984 * Detach a file from its current interface (if attached at all) and attach 985 * to the interface indicated by the name stored in ifr. 986 * Return an errno or 0. 987 */ 988 static int 989 bpf_setif(d, ifr) 990 struct bpf_d *d; 991 struct ifreq *ifr; 992 { 993 struct bpf_if *bp; 994 int s, error; 995 struct ifnet *theywant; 996 997 theywant = ifunit(ifr->ifr_name); 998 if (theywant == 0) 999 return ENXIO; 1000 1001 /* 1002 * Look through attached interfaces for the named one. 1003 */ 1004 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 1005 struct ifnet *ifp = bp->bif_ifp; 1006 1007 if (ifp == 0 || ifp != theywant) 1008 continue; 1009 /* 1010 * We found the requested interface. 1011 * If it's not up, return an error. 1012 * Allocate the packet buffers if we need to. 1013 * If we're already attached to requested interface, 1014 * just flush the buffer. 1015 */ 1016 if ((ifp->if_flags & IFF_UP) == 0) 1017 return (ENETDOWN); 1018 1019 if (d->bd_sbuf == 0) { 1020 error = bpf_allocbufs(d); 1021 if (error != 0) 1022 return (error); 1023 } 1024 s = splimp(); 1025 if (bp != d->bd_bif) { 1026 if (d->bd_bif) 1027 /* 1028 * Detach if attached to something else. 1029 */ 1030 bpf_detachd(d); 1031 1032 bpf_attachd(d, bp); 1033 } 1034 reset_d(d); 1035 splx(s); 1036 return (0); 1037 } 1038 /* Not found. */ 1039 return (ENXIO); 1040 } 1041 1042 /* 1043 * Support for select() and poll() system calls 1044 * 1045 * Return true iff the specific operation will not block indefinitely. 1046 * Otherwise, return false but make a note that a selwakeup() must be done. 1047 */ 1048 int 1049 bpfpoll(dev, events, p) 1050 register dev_t dev; 1051 int events; 1052 struct proc *p; 1053 { 1054 register struct bpf_d *d; 1055 register int s; 1056 int revents = 0; 1057 1058 /* 1059 * An imitation of the FIONREAD ioctl code. 1060 */ 1061 d = dev->si_drv1; 1062 1063 if (d->bd_bif == NULL) 1064 return (ENXIO); 1065 1066 s = splimp(); 1067 if (events & (POLLIN | POLLRDNORM)) { 1068 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1069 revents |= events & (POLLIN | POLLRDNORM); 1070 else 1071 selrecord(p, &d->bd_sel); 1072 } 1073 splx(s); 1074 return (revents); 1075 } 1076 1077 /* 1078 * Incoming linkage from device drivers. Process the packet pkt, of length 1079 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1080 * by each process' filter, and if accepted, stashed into the corresponding 1081 * buffer. 1082 */ 1083 void 1084 bpf_tap(ifp, pkt, pktlen) 1085 struct ifnet *ifp; 1086 register u_char *pkt; 1087 register u_int pktlen; 1088 { 1089 struct bpf_if *bp; 1090 register struct bpf_d *d; 1091 register u_int slen; 1092 /* 1093 * Note that the ipl does not have to be raised at this point. 1094 * The only problem that could arise here is that if two different 1095 * interfaces shared any data. This is not the case. 1096 */ 1097 bp = ifp->if_bpf; 1098 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1099 ++d->bd_rcount; 1100 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1101 if (slen != 0) 1102 catchpacket(d, pkt, pktlen, slen, bcopy); 1103 } 1104 } 1105 1106 /* 1107 * Copy data from an mbuf chain into a buffer. This code is derived 1108 * from m_copydata in sys/uipc_mbuf.c. 1109 */ 1110 static void 1111 bpf_mcopy(src_arg, dst_arg, len) 1112 const void *src_arg; 1113 void *dst_arg; 1114 register size_t len; 1115 { 1116 register const struct mbuf *m; 1117 register u_int count; 1118 u_char *dst; 1119 1120 m = src_arg; 1121 dst = dst_arg; 1122 while (len > 0) { 1123 if (m == 0) 1124 panic("bpf_mcopy"); 1125 count = min(m->m_len, len); 1126 bcopy(mtod(m, void *), dst, count); 1127 m = m->m_next; 1128 dst += count; 1129 len -= count; 1130 } 1131 } 1132 1133 /* 1134 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1135 */ 1136 void 1137 bpf_mtap(ifp, m) 1138 struct ifnet *ifp; 1139 struct mbuf *m; 1140 { 1141 struct bpf_if *bp = ifp->if_bpf; 1142 struct bpf_d *d; 1143 u_int pktlen, slen; 1144 struct mbuf *m0; 1145 1146 pktlen = 0; 1147 for (m0 = m; m0 != 0; m0 = m0->m_next) 1148 pktlen += m0->m_len; 1149 1150 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1151 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1152 continue; 1153 ++d->bd_rcount; 1154 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1155 if (slen != 0) 1156 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1157 } 1158 } 1159 1160 /* 1161 * Move the packet data from interface memory (pkt) into the 1162 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1163 * otherwise 0. "copy" is the routine called to do the actual data 1164 * transfer. bcopy is passed in to copy contiguous chunks, while 1165 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1166 * pkt is really an mbuf. 1167 */ 1168 static void 1169 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1170 register struct bpf_d *d; 1171 register u_char *pkt; 1172 register u_int pktlen, snaplen; 1173 register void (*cpfn) __P((const void *, void *, size_t)); 1174 { 1175 register struct bpf_hdr *hp; 1176 register int totlen, curlen; 1177 register int hdrlen = d->bd_bif->bif_hdrlen; 1178 /* 1179 * Figure out how many bytes to move. If the packet is 1180 * greater or equal to the snapshot length, transfer that 1181 * much. Otherwise, transfer the whole packet (unless 1182 * we hit the buffer size limit). 1183 */ 1184 totlen = hdrlen + min(snaplen, pktlen); 1185 if (totlen > d->bd_bufsize) 1186 totlen = d->bd_bufsize; 1187 1188 /* 1189 * Round up the end of the previous packet to the next longword. 1190 */ 1191 curlen = BPF_WORDALIGN(d->bd_slen); 1192 if (curlen + totlen > d->bd_bufsize) { 1193 /* 1194 * This packet will overflow the storage buffer. 1195 * Rotate the buffers if we can, then wakeup any 1196 * pending reads. 1197 */ 1198 if (d->bd_fbuf == 0) { 1199 /* 1200 * We haven't completed the previous read yet, 1201 * so drop the packet. 1202 */ 1203 ++d->bd_dcount; 1204 return; 1205 } 1206 ROTATE_BUFFERS(d); 1207 bpf_wakeup(d); 1208 curlen = 0; 1209 } 1210 else if (d->bd_immediate) 1211 /* 1212 * Immediate mode is set. A packet arrived so any 1213 * reads should be woken up. 1214 */ 1215 bpf_wakeup(d); 1216 1217 /* 1218 * Append the bpf header. 1219 */ 1220 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1221 #if BSD >= 199103 1222 microtime(&hp->bh_tstamp); 1223 #elif defined(sun) 1224 uniqtime(&hp->bh_tstamp); 1225 #else 1226 hp->bh_tstamp = time; 1227 #endif 1228 hp->bh_datalen = pktlen; 1229 hp->bh_hdrlen = hdrlen; 1230 /* 1231 * Copy the packet data into the store buffer and update its length. 1232 */ 1233 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1234 d->bd_slen = curlen + totlen; 1235 } 1236 1237 /* 1238 * Initialize all nonzero fields of a descriptor. 1239 */ 1240 static int 1241 bpf_allocbufs(d) 1242 register struct bpf_d *d; 1243 { 1244 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1245 if (d->bd_fbuf == 0) 1246 return (ENOBUFS); 1247 1248 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1249 if (d->bd_sbuf == 0) { 1250 free(d->bd_fbuf, M_BPF); 1251 return (ENOBUFS); 1252 } 1253 d->bd_slen = 0; 1254 d->bd_hlen = 0; 1255 return (0); 1256 } 1257 1258 /* 1259 * Free buffers currently in use by a descriptor. 1260 * Called on close. 1261 */ 1262 static void 1263 bpf_freed(d) 1264 register struct bpf_d *d; 1265 { 1266 /* 1267 * We don't need to lock out interrupts since this descriptor has 1268 * been detached from its interface and it yet hasn't been marked 1269 * free. 1270 */ 1271 if (d->bd_sbuf != 0) { 1272 free(d->bd_sbuf, M_BPF); 1273 if (d->bd_hbuf != 0) 1274 free(d->bd_hbuf, M_BPF); 1275 if (d->bd_fbuf != 0) 1276 free(d->bd_fbuf, M_BPF); 1277 } 1278 if (d->bd_filter) 1279 free((caddr_t)d->bd_filter, M_BPF); 1280 } 1281 1282 /* 1283 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1284 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1285 * size of the link header (variable length headers not yet supported). 1286 */ 1287 void 1288 bpfattach(ifp, dlt, hdrlen) 1289 struct ifnet *ifp; 1290 u_int dlt, hdrlen; 1291 { 1292 struct bpf_if *bp; 1293 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT); 1294 if (bp == 0) 1295 panic("bpfattach"); 1296 1297 bp->bif_dlist = 0; 1298 bp->bif_ifp = ifp; 1299 bp->bif_dlt = dlt; 1300 1301 bp->bif_next = bpf_iflist; 1302 bpf_iflist = bp; 1303 1304 bp->bif_ifp->if_bpf = 0; 1305 1306 /* 1307 * Compute the length of the bpf header. This is not necessarily 1308 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1309 * that the network layer header begins on a longword boundary (for 1310 * performance reasons and to alleviate alignment restrictions). 1311 */ 1312 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1313 1314 if (bootverbose) 1315 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1316 } 1317 1318 /* 1319 * Detach bpf from an interface. This involves detaching each descriptor 1320 * associated with the interface, and leaving bd_bif NULL. Notify each 1321 * descriptor as it's detached so that any sleepers wake up and get 1322 * ENXIO. 1323 */ 1324 void 1325 bpfdetach(ifp) 1326 struct ifnet *ifp; 1327 { 1328 struct bpf_if *bp, *bp_prev; 1329 struct bpf_d *d; 1330 int s; 1331 1332 s = splimp(); 1333 1334 /* Locate BPF interface information */ 1335 bp_prev = NULL; 1336 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { 1337 if (ifp == bp->bif_ifp) 1338 break; 1339 bp_prev = bp; 1340 } 1341 1342 /* Interface wasn't attached */ 1343 if (bp->bif_ifp == NULL) { 1344 splx(s); 1345 printf("bpfdetach: %s%d was not attached\n", ifp->if_name, 1346 ifp->if_unit); 1347 return; 1348 } 1349 1350 while ((d = bp->bif_dlist) != NULL) { 1351 bpf_detachd(d); 1352 bpf_wakeup(d); 1353 } 1354 1355 if (bp_prev) { 1356 bp_prev->bif_next = bp->bif_next; 1357 } else { 1358 bpf_iflist = bp->bif_next; 1359 } 1360 1361 free(bp, M_BPF); 1362 1363 splx(s); 1364 } 1365 1366 static void bpf_drvinit __P((void *unused)); 1367 1368 static void bpf_clone __P((void *arg, char *name, int namelen, dev_t *dev)); 1369 1370 static void 1371 bpf_clone(arg, name, namelen, dev) 1372 void *arg; 1373 char *name; 1374 int namelen; 1375 dev_t *dev; 1376 { 1377 int u; 1378 1379 if (*dev != NODEV) 1380 return; 1381 if (dev_stdclone(name, NULL, "bpf", &u) != 1) 1382 return; 1383 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600, 1384 "bpf%d", u); 1385 (*dev)->si_flags |= SI_CHEAPCLONE; 1386 return; 1387 } 1388 1389 static void 1390 bpf_drvinit(unused) 1391 void *unused; 1392 { 1393 1394 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000); 1395 cdevsw_add(&bpf_cdevsw); 1396 } 1397 1398 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1399 1400 #else /* !DEV_BPF && !NETGRAPH_BPF */ 1401 /* 1402 * NOP stubs to allow bpf-using drivers to load and function. 1403 * 1404 * A 'better' implementation would allow the core bpf functionality 1405 * to be loaded at runtime. 1406 */ 1407 1408 void 1409 bpf_tap(ifp, pkt, pktlen) 1410 struct ifnet *ifp; 1411 register u_char *pkt; 1412 register u_int pktlen; 1413 { 1414 } 1415 1416 void 1417 bpf_mtap(ifp, m) 1418 struct ifnet *ifp; 1419 struct mbuf *m; 1420 { 1421 } 1422 1423 void 1424 bpfattach(ifp, dlt, hdrlen) 1425 struct ifnet *ifp; 1426 u_int dlt, hdrlen; 1427 { 1428 } 1429 1430 void 1431 bpfdetach(ifp) 1432 struct ifnet *ifp; 1433 { 1434 } 1435 1436 u_int 1437 bpf_filter(pc, p, wirelen, buflen) 1438 register const struct bpf_insn *pc; 1439 register u_char *p; 1440 u_int wirelen; 1441 register u_int buflen; 1442 { 1443 return -1; /* "no filter" behaviour */ 1444 } 1445 1446 int 1447 bpf_validate(f, len) 1448 const struct bpf_insn *f; 1449 int len; 1450 { 1451 return 0; /* false */ 1452 } 1453 1454 #endif /* !DEV_BPF && !NETGRAPH_BPF */ 1455