1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $FreeBSD$ 41 */ 42 43 #include "bpf.h" 44 45 #ifndef __GNUC__ 46 #define inline 47 #else 48 #define inline __inline 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/conf.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/time.h> 57 #include <sys/proc.h> 58 #include <sys/signalvar.h> 59 #include <sys/filio.h> 60 #include <sys/sockio.h> 61 #include <sys/ttycom.h> 62 #include <sys/filedesc.h> 63 64 #if defined(sparc) && BSD < 199103 65 #include <sys/stream.h> 66 #endif 67 #include <sys/poll.h> 68 69 #include <sys/socket.h> 70 #include <sys/vnode.h> 71 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/bpfdesc.h> 75 76 #include <netinet/in.h> 77 #include <netinet/if_ether.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 81 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 82 83 #if NBPF > 0 84 85 /* 86 * Older BSDs don't have kernel malloc. 87 */ 88 #if BSD < 199103 89 extern bcopy(); 90 static caddr_t bpf_alloc(); 91 #include <net/bpf_compat.h> 92 #define BPF_BUFSIZE (MCLBYTES-8) 93 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 94 #else 95 #define BPF_BUFSIZE 4096 96 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 97 #endif 98 99 #define PRINET 26 /* interruptible */ 100 101 /* 102 * The default read buffer size is patchable. 103 */ 104 static int bpf_bufsize = BPF_BUFSIZE; 105 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 106 &bpf_bufsize, 0, ""); 107 108 /* 109 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 110 */ 111 static struct bpf_if *bpf_iflist; 112 113 static int bpf_allocbufs __P((struct bpf_d *)); 114 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 115 static void bpf_detachd __P((struct bpf_d *d)); 116 static void bpf_freed __P((struct bpf_d *)); 117 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 118 static void bpf_mcopy __P((const void *, void *, size_t)); 119 static int bpf_movein __P((struct uio *, int, 120 struct mbuf **, struct sockaddr *, int *)); 121 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 122 static inline void 123 bpf_wakeup __P((struct bpf_d *)); 124 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 125 u_int, void (*)(const void *, void *, size_t))); 126 static void reset_d __P((struct bpf_d *)); 127 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 128 129 static d_open_t bpfopen; 130 static d_close_t bpfclose; 131 static d_read_t bpfread; 132 static d_write_t bpfwrite; 133 static d_ioctl_t bpfioctl; 134 static d_poll_t bpfpoll; 135 136 #define CDEV_MAJOR 23 137 static struct cdevsw bpf_cdevsw = { 138 /* open */ bpfopen, 139 /* close */ bpfclose, 140 /* read */ bpfread, 141 /* write */ bpfwrite, 142 /* ioctl */ bpfioctl, 143 /* stop */ nostop, 144 /* reset */ noreset, 145 /* devtotty */ nodevtotty, 146 /* poll */ bpfpoll, 147 /* mmap */ nommap, 148 /* strategy */ nostrategy, 149 /* name */ "bpf", 150 /* parms */ noparms, 151 /* maj */ CDEV_MAJOR, 152 /* dump */ nodump, 153 /* psize */ nopsize, 154 /* flags */ 0, 155 /* maxio */ 0, 156 /* bmaj */ -1 157 }; 158 159 160 static int 161 bpf_movein(uio, linktype, mp, sockp, datlen) 162 register struct uio *uio; 163 int linktype, *datlen; 164 register struct mbuf **mp; 165 register struct sockaddr *sockp; 166 { 167 struct mbuf *m; 168 int error; 169 int len; 170 int hlen; 171 172 /* 173 * Build a sockaddr based on the data link layer type. 174 * We do this at this level because the ethernet header 175 * is copied directly into the data field of the sockaddr. 176 * In the case of SLIP, there is no header and the packet 177 * is forwarded as is. 178 * Also, we are careful to leave room at the front of the mbuf 179 * for the link level header. 180 */ 181 switch (linktype) { 182 183 case DLT_SLIP: 184 sockp->sa_family = AF_INET; 185 hlen = 0; 186 break; 187 188 case DLT_EN10MB: 189 sockp->sa_family = AF_UNSPEC; 190 /* XXX Would MAXLINKHDR be better? */ 191 hlen = sizeof(struct ether_header); 192 break; 193 194 case DLT_FDDI: 195 #if defined(__FreeBSD__) || defined(__bsdi__) 196 sockp->sa_family = AF_IMPLINK; 197 hlen = 0; 198 #else 199 sockp->sa_family = AF_UNSPEC; 200 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 201 hlen = 24; 202 #endif 203 break; 204 205 case DLT_RAW: 206 case DLT_NULL: 207 sockp->sa_family = AF_UNSPEC; 208 hlen = 0; 209 break; 210 211 #ifdef __FreeBSD__ 212 case DLT_ATM_RFC1483: 213 /* 214 * en atm driver requires 4-byte atm pseudo header. 215 * though it isn't standard, vpi:vci needs to be 216 * specified anyway. 217 */ 218 sockp->sa_family = AF_UNSPEC; 219 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 220 break; 221 #endif 222 223 default: 224 return (EIO); 225 } 226 227 len = uio->uio_resid; 228 *datlen = len - hlen; 229 if ((unsigned)len > MCLBYTES) 230 return (EIO); 231 232 MGETHDR(m, M_WAIT, MT_DATA); 233 if (m == 0) 234 return (ENOBUFS); 235 if (len > MHLEN) { 236 #if BSD >= 199103 237 MCLGET(m, M_WAIT); 238 if ((m->m_flags & M_EXT) == 0) { 239 #else 240 MCLGET(m); 241 if (m->m_len != MCLBYTES) { 242 #endif 243 error = ENOBUFS; 244 goto bad; 245 } 246 } 247 m->m_pkthdr.len = m->m_len = len; 248 m->m_pkthdr.rcvif = NULL; 249 *mp = m; 250 /* 251 * Make room for link header. 252 */ 253 if (hlen != 0) { 254 m->m_pkthdr.len -= hlen; 255 m->m_len -= hlen; 256 #if BSD >= 199103 257 m->m_data += hlen; /* XXX */ 258 #else 259 m->m_off += hlen; 260 #endif 261 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 262 if (error) 263 goto bad; 264 } 265 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 266 if (!error) 267 return (0); 268 bad: 269 m_freem(m); 270 return (error); 271 } 272 273 /* 274 * Attach file to the bpf interface, i.e. make d listen on bp. 275 * Must be called at splimp. 276 */ 277 static void 278 bpf_attachd(d, bp) 279 struct bpf_d *d; 280 struct bpf_if *bp; 281 { 282 /* 283 * Point d at bp, and add d to the interface's list of listeners. 284 * Finally, point the driver's bpf cookie at the interface so 285 * it will divert packets to bpf. 286 */ 287 d->bd_bif = bp; 288 d->bd_next = bp->bif_dlist; 289 bp->bif_dlist = d; 290 291 bp->bif_ifp->if_bpf = bp; 292 } 293 294 /* 295 * Detach a file from its interface. 296 */ 297 static void 298 bpf_detachd(d) 299 struct bpf_d *d; 300 { 301 struct bpf_d **p; 302 struct bpf_if *bp; 303 304 bp = d->bd_bif; 305 /* 306 * Check if this descriptor had requested promiscuous mode. 307 * If so, turn it off. 308 */ 309 if (d->bd_promisc) { 310 d->bd_promisc = 0; 311 if (ifpromisc(bp->bif_ifp, 0)) 312 /* 313 * Something is really wrong if we were able to put 314 * the driver into promiscuous mode, but can't 315 * take it out. 316 */ 317 panic("bpf: ifpromisc failed"); 318 } 319 /* Remove d from the interface's descriptor list. */ 320 p = &bp->bif_dlist; 321 while (*p != d) { 322 p = &(*p)->bd_next; 323 if (*p == 0) 324 panic("bpf_detachd: descriptor not in list"); 325 } 326 *p = (*p)->bd_next; 327 if (bp->bif_dlist == 0) 328 /* 329 * Let the driver know that there are no more listeners. 330 */ 331 d->bd_bif->bif_ifp->if_bpf = 0; 332 d->bd_bif = 0; 333 } 334 335 /* 336 * Open ethernet device. Returns ENXIO for illegal minor device number, 337 * EBUSY if file is open by another process. 338 */ 339 /* ARGSUSED */ 340 static int 341 bpfopen(dev, flags, fmt, p) 342 dev_t dev; 343 int flags; 344 int fmt; 345 struct proc *p; 346 { 347 register struct bpf_d *d; 348 349 if (p->p_prison) 350 return (EPERM); 351 352 d = dev->si_drv1; 353 /* 354 * Each minor can be opened by only one process. If the requested 355 * minor is in use, return EBUSY. 356 */ 357 if (d) 358 return (EBUSY); 359 make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev)); 360 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK); 361 bzero(d, sizeof(*d)); 362 dev->si_drv1 = d; 363 d->bd_bufsize = bpf_bufsize; 364 d->bd_sig = SIGIO; 365 366 return (0); 367 } 368 369 /* 370 * Close the descriptor by detaching it from its interface, 371 * deallocating its buffers, and marking it free. 372 */ 373 /* ARGSUSED */ 374 static int 375 bpfclose(dev, flags, fmt, p) 376 dev_t dev; 377 int flags; 378 int fmt; 379 struct proc *p; 380 { 381 register struct bpf_d *d = dev->si_drv1; 382 register int s; 383 384 funsetown(d->bd_sigio); 385 s = splimp(); 386 if (d->bd_bif) 387 bpf_detachd(d); 388 splx(s); 389 bpf_freed(d); 390 dev->si_drv1 = 0; 391 FREE(d, M_BPF); 392 393 return (0); 394 } 395 396 /* 397 * Support for SunOS, which does not have tsleep. 398 */ 399 #if BSD < 199103 400 static 401 bpf_timeout(arg) 402 caddr_t arg; 403 { 404 struct bpf_d *d = (struct bpf_d *)arg; 405 d->bd_timedout = 1; 406 wakeup(arg); 407 } 408 409 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 410 411 int 412 bpf_sleep(d) 413 register struct bpf_d *d; 414 { 415 register int rto = d->bd_rtout; 416 register int st; 417 418 if (rto != 0) { 419 d->bd_timedout = 0; 420 timeout(bpf_timeout, (caddr_t)d, rto); 421 } 422 st = sleep((caddr_t)d, PRINET|PCATCH); 423 if (rto != 0) { 424 if (d->bd_timedout == 0) 425 untimeout(bpf_timeout, (caddr_t)d); 426 else if (st == 0) 427 return EWOULDBLOCK; 428 } 429 return (st != 0) ? EINTR : 0; 430 } 431 #else 432 #define BPF_SLEEP tsleep 433 #endif 434 435 /* 436 * Rotate the packet buffers in descriptor d. Move the store buffer 437 * into the hold slot, and the free buffer into the store slot. 438 * Zero the length of the new store buffer. 439 */ 440 #define ROTATE_BUFFERS(d) \ 441 (d)->bd_hbuf = (d)->bd_sbuf; \ 442 (d)->bd_hlen = (d)->bd_slen; \ 443 (d)->bd_sbuf = (d)->bd_fbuf; \ 444 (d)->bd_slen = 0; \ 445 (d)->bd_fbuf = 0; 446 /* 447 * bpfread - read next chunk of packets from buffers 448 */ 449 static int 450 bpfread(dev, uio, ioflag) 451 dev_t dev; 452 register struct uio *uio; 453 int ioflag; 454 { 455 register struct bpf_d *d = dev->si_drv1; 456 int error; 457 int s; 458 459 /* 460 * Restrict application to use a buffer the same size as 461 * as kernel buffers. 462 */ 463 if (uio->uio_resid != d->bd_bufsize) 464 return (EINVAL); 465 466 s = splimp(); 467 /* 468 * If the hold buffer is empty, then do a timed sleep, which 469 * ends when the timeout expires or when enough packets 470 * have arrived to fill the store buffer. 471 */ 472 while (d->bd_hbuf == 0) { 473 if (d->bd_immediate && d->bd_slen != 0) { 474 /* 475 * A packet(s) either arrived since the previous 476 * read or arrived while we were asleep. 477 * Rotate the buffers and return what's here. 478 */ 479 ROTATE_BUFFERS(d); 480 break; 481 } 482 if (ioflag & IO_NDELAY) 483 error = EWOULDBLOCK; 484 else 485 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 486 d->bd_rtout); 487 if (error == EINTR || error == ERESTART) { 488 splx(s); 489 return (error); 490 } 491 if (error == EWOULDBLOCK) { 492 /* 493 * On a timeout, return what's in the buffer, 494 * which may be nothing. If there is something 495 * in the store buffer, we can rotate the buffers. 496 */ 497 if (d->bd_hbuf) 498 /* 499 * We filled up the buffer in between 500 * getting the timeout and arriving 501 * here, so we don't need to rotate. 502 */ 503 break; 504 505 if (d->bd_slen == 0) { 506 splx(s); 507 return (0); 508 } 509 ROTATE_BUFFERS(d); 510 break; 511 } 512 } 513 /* 514 * At this point, we know we have something in the hold slot. 515 */ 516 splx(s); 517 518 /* 519 * Move data from hold buffer into user space. 520 * We know the entire buffer is transferred since 521 * we checked above that the read buffer is bpf_bufsize bytes. 522 */ 523 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 524 525 s = splimp(); 526 d->bd_fbuf = d->bd_hbuf; 527 d->bd_hbuf = 0; 528 d->bd_hlen = 0; 529 splx(s); 530 531 return (error); 532 } 533 534 535 /* 536 * If there are processes sleeping on this descriptor, wake them up. 537 */ 538 static inline void 539 bpf_wakeup(d) 540 register struct bpf_d *d; 541 { 542 wakeup((caddr_t)d); 543 if (d->bd_async && d->bd_sig && d->bd_sigio) 544 pgsigio(d->bd_sigio, d->bd_sig, 0); 545 546 #if BSD >= 199103 547 selwakeup(&d->bd_sel); 548 /* XXX */ 549 d->bd_sel.si_pid = 0; 550 #else 551 if (d->bd_selproc) { 552 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 553 d->bd_selcoll = 0; 554 d->bd_selproc = 0; 555 } 556 #endif 557 } 558 559 static int 560 bpfwrite(dev, uio, ioflag) 561 dev_t dev; 562 struct uio *uio; 563 int ioflag; 564 { 565 register struct bpf_d *d = dev->si_drv1; 566 struct ifnet *ifp; 567 struct mbuf *m; 568 int error, s; 569 static struct sockaddr dst; 570 int datlen; 571 572 if (d->bd_bif == 0) 573 return (ENXIO); 574 575 ifp = d->bd_bif->bif_ifp; 576 577 if (uio->uio_resid == 0) 578 return (0); 579 580 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 581 if (error) 582 return (error); 583 584 if (datlen > ifp->if_mtu) 585 return (EMSGSIZE); 586 587 s = splnet(); 588 #if BSD >= 199103 589 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 590 #else 591 error = (*ifp->if_output)(ifp, m, &dst); 592 #endif 593 splx(s); 594 /* 595 * The driver frees the mbuf. 596 */ 597 return (error); 598 } 599 600 /* 601 * Reset a descriptor by flushing its packet buffer and clearing the 602 * receive and drop counts. Should be called at splimp. 603 */ 604 static void 605 reset_d(d) 606 struct bpf_d *d; 607 { 608 if (d->bd_hbuf) { 609 /* Free the hold buffer. */ 610 d->bd_fbuf = d->bd_hbuf; 611 d->bd_hbuf = 0; 612 } 613 d->bd_slen = 0; 614 d->bd_hlen = 0; 615 d->bd_rcount = 0; 616 d->bd_dcount = 0; 617 } 618 619 /* 620 * FIONREAD Check for read packet available. 621 * SIOCGIFADDR Get interface address - convenient hook to driver. 622 * BIOCGBLEN Get buffer len [for read()]. 623 * BIOCSETF Set ethernet read filter. 624 * BIOCFLUSH Flush read packet buffer. 625 * BIOCPROMISC Put interface into promiscuous mode. 626 * BIOCGDLT Get link layer type. 627 * BIOCGETIF Get interface name. 628 * BIOCSETIF Set interface. 629 * BIOCSRTIMEOUT Set read timeout. 630 * BIOCGRTIMEOUT Get read timeout. 631 * BIOCGSTATS Get packet stats. 632 * BIOCIMMEDIATE Set immediate mode. 633 * BIOCVERSION Get filter language version. 634 */ 635 /* ARGSUSED */ 636 static int 637 bpfioctl(dev, cmd, addr, flags, p) 638 dev_t dev; 639 u_long cmd; 640 caddr_t addr; 641 int flags; 642 struct proc *p; 643 { 644 register struct bpf_d *d = dev->si_drv1; 645 int s, error = 0; 646 647 switch (cmd) { 648 649 default: 650 error = EINVAL; 651 break; 652 653 /* 654 * Check for read packet available. 655 */ 656 case FIONREAD: 657 { 658 int n; 659 660 s = splimp(); 661 n = d->bd_slen; 662 if (d->bd_hbuf) 663 n += d->bd_hlen; 664 splx(s); 665 666 *(int *)addr = n; 667 break; 668 } 669 670 case SIOCGIFADDR: 671 { 672 struct ifnet *ifp; 673 674 if (d->bd_bif == 0) 675 error = EINVAL; 676 else { 677 ifp = d->bd_bif->bif_ifp; 678 error = (*ifp->if_ioctl)(ifp, cmd, addr); 679 } 680 break; 681 } 682 683 /* 684 * Get buffer len [for read()]. 685 */ 686 case BIOCGBLEN: 687 *(u_int *)addr = d->bd_bufsize; 688 break; 689 690 /* 691 * Set buffer length. 692 */ 693 case BIOCSBLEN: 694 #if BSD < 199103 695 error = EINVAL; 696 #else 697 if (d->bd_bif != 0) 698 error = EINVAL; 699 else { 700 register u_int size = *(u_int *)addr; 701 702 if (size > BPF_MAXBUFSIZE) 703 *(u_int *)addr = size = BPF_MAXBUFSIZE; 704 else if (size < BPF_MINBUFSIZE) 705 *(u_int *)addr = size = BPF_MINBUFSIZE; 706 d->bd_bufsize = size; 707 } 708 #endif 709 break; 710 711 /* 712 * Set link layer read filter. 713 */ 714 case BIOCSETF: 715 error = bpf_setf(d, (struct bpf_program *)addr); 716 break; 717 718 /* 719 * Flush read packet buffer. 720 */ 721 case BIOCFLUSH: 722 s = splimp(); 723 reset_d(d); 724 splx(s); 725 break; 726 727 /* 728 * Put interface into promiscuous mode. 729 */ 730 case BIOCPROMISC: 731 if (d->bd_bif == 0) { 732 /* 733 * No interface attached yet. 734 */ 735 error = EINVAL; 736 break; 737 } 738 s = splimp(); 739 if (d->bd_promisc == 0) { 740 error = ifpromisc(d->bd_bif->bif_ifp, 1); 741 if (error == 0) 742 d->bd_promisc = 1; 743 } 744 splx(s); 745 break; 746 747 /* 748 * Get device parameters. 749 */ 750 case BIOCGDLT: 751 if (d->bd_bif == 0) 752 error = EINVAL; 753 else 754 *(u_int *)addr = d->bd_bif->bif_dlt; 755 break; 756 757 /* 758 * Set interface name. 759 */ 760 case BIOCGETIF: 761 if (d->bd_bif == 0) 762 error = EINVAL; 763 else 764 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 765 break; 766 767 /* 768 * Set interface. 769 */ 770 case BIOCSETIF: 771 error = bpf_setif(d, (struct ifreq *)addr); 772 break; 773 774 /* 775 * Set read timeout. 776 */ 777 case BIOCSRTIMEOUT: 778 { 779 struct timeval *tv = (struct timeval *)addr; 780 781 /* 782 * Subtract 1 tick from tvtohz() since this isn't 783 * a one-shot timer. 784 */ 785 if ((error = itimerfix(tv)) == 0) 786 d->bd_rtout = tvtohz(tv) - 1; 787 break; 788 } 789 790 /* 791 * Get read timeout. 792 */ 793 case BIOCGRTIMEOUT: 794 { 795 struct timeval *tv = (struct timeval *)addr; 796 797 tv->tv_sec = d->bd_rtout / hz; 798 tv->tv_usec = (d->bd_rtout % hz) * tick; 799 break; 800 } 801 802 /* 803 * Get packet stats. 804 */ 805 case BIOCGSTATS: 806 { 807 struct bpf_stat *bs = (struct bpf_stat *)addr; 808 809 bs->bs_recv = d->bd_rcount; 810 bs->bs_drop = d->bd_dcount; 811 break; 812 } 813 814 /* 815 * Set immediate mode. 816 */ 817 case BIOCIMMEDIATE: 818 d->bd_immediate = *(u_int *)addr; 819 break; 820 821 case BIOCVERSION: 822 { 823 struct bpf_version *bv = (struct bpf_version *)addr; 824 825 bv->bv_major = BPF_MAJOR_VERSION; 826 bv->bv_minor = BPF_MINOR_VERSION; 827 break; 828 } 829 830 case FIONBIO: /* Non-blocking I/O */ 831 break; 832 833 case FIOASYNC: /* Send signal on receive packets */ 834 d->bd_async = *(int *)addr; 835 break; 836 837 case FIOSETOWN: 838 error = fsetown(*(int *)addr, &d->bd_sigio); 839 break; 840 841 case FIOGETOWN: 842 *(int *)addr = fgetown(d->bd_sigio); 843 break; 844 845 /* This is deprecated, FIOSETOWN should be used instead. */ 846 case TIOCSPGRP: 847 error = fsetown(-(*(int *)addr), &d->bd_sigio); 848 break; 849 850 /* This is deprecated, FIOGETOWN should be used instead. */ 851 case TIOCGPGRP: 852 *(int *)addr = -fgetown(d->bd_sigio); 853 break; 854 855 case BIOCSRSIG: /* Set receive signal */ 856 { 857 u_int sig; 858 859 sig = *(u_int *)addr; 860 861 if (sig >= NSIG) 862 error = EINVAL; 863 else 864 d->bd_sig = sig; 865 break; 866 } 867 case BIOCGRSIG: 868 *(u_int *)addr = d->bd_sig; 869 break; 870 } 871 return (error); 872 } 873 874 /* 875 * Set d's packet filter program to fp. If this file already has a filter, 876 * free it and replace it. Returns EINVAL for bogus requests. 877 */ 878 static int 879 bpf_setf(d, fp) 880 struct bpf_d *d; 881 struct bpf_program *fp; 882 { 883 struct bpf_insn *fcode, *old; 884 u_int flen, size; 885 int s; 886 887 old = d->bd_filter; 888 if (fp->bf_insns == 0) { 889 if (fp->bf_len != 0) 890 return (EINVAL); 891 s = splimp(); 892 d->bd_filter = 0; 893 reset_d(d); 894 splx(s); 895 if (old != 0) 896 free((caddr_t)old, M_BPF); 897 return (0); 898 } 899 flen = fp->bf_len; 900 if (flen > BPF_MAXINSNS) 901 return (EINVAL); 902 903 size = flen * sizeof(*fp->bf_insns); 904 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 905 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 906 bpf_validate(fcode, (int)flen)) { 907 s = splimp(); 908 d->bd_filter = fcode; 909 reset_d(d); 910 splx(s); 911 if (old != 0) 912 free((caddr_t)old, M_BPF); 913 914 return (0); 915 } 916 free((caddr_t)fcode, M_BPF); 917 return (EINVAL); 918 } 919 920 /* 921 * Detach a file from its current interface (if attached at all) and attach 922 * to the interface indicated by the name stored in ifr. 923 * Return an errno or 0. 924 */ 925 static int 926 bpf_setif(d, ifr) 927 struct bpf_d *d; 928 struct ifreq *ifr; 929 { 930 struct bpf_if *bp; 931 int s, error; 932 struct ifnet *theywant; 933 934 theywant = ifunit(ifr->ifr_name); 935 if (theywant == 0) 936 return ENXIO; 937 938 /* 939 * Look through attached interfaces for the named one. 940 */ 941 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 942 struct ifnet *ifp = bp->bif_ifp; 943 944 if (ifp == 0 || ifp != theywant) 945 continue; 946 /* 947 * We found the requested interface. 948 * If it's not up, return an error. 949 * Allocate the packet buffers if we need to. 950 * If we're already attached to requested interface, 951 * just flush the buffer. 952 */ 953 if ((ifp->if_flags & IFF_UP) == 0) 954 return (ENETDOWN); 955 956 if (d->bd_sbuf == 0) { 957 error = bpf_allocbufs(d); 958 if (error != 0) 959 return (error); 960 } 961 s = splimp(); 962 if (bp != d->bd_bif) { 963 if (d->bd_bif) 964 /* 965 * Detach if attached to something else. 966 */ 967 bpf_detachd(d); 968 969 bpf_attachd(d, bp); 970 } 971 reset_d(d); 972 splx(s); 973 return (0); 974 } 975 /* Not found. */ 976 return (ENXIO); 977 } 978 979 /* 980 * Convert an interface name plus unit number of an ifp to a single 981 * name which is returned in the ifr. 982 */ 983 static void 984 bpf_ifname(ifp, ifr) 985 struct ifnet *ifp; 986 struct ifreq *ifr; 987 { 988 char *s = ifp->if_name; 989 char *d = ifr->ifr_name; 990 991 while ((*d++ = *s++) != 0) 992 continue; 993 d--; /* back to the null */ 994 /* XXX Assume that unit number is less than 10. */ 995 *d++ = ifp->if_unit + '0'; 996 *d = '\0'; 997 } 998 999 /* 1000 * Support for select() and poll() system calls 1001 * 1002 * Return true iff the specific operation will not block indefinitely. 1003 * Otherwise, return false but make a note that a selwakeup() must be done. 1004 */ 1005 int 1006 bpfpoll(dev, events, p) 1007 register dev_t dev; 1008 int events; 1009 struct proc *p; 1010 { 1011 register struct bpf_d *d; 1012 register int s; 1013 int revents = 0; 1014 1015 /* 1016 * An imitation of the FIONREAD ioctl code. 1017 */ 1018 d = dev->si_drv1; 1019 1020 s = splimp(); 1021 if (events & (POLLIN | POLLRDNORM)) { 1022 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1023 revents |= events & (POLLIN | POLLRDNORM); 1024 else 1025 selrecord(p, &d->bd_sel); 1026 } 1027 splx(s); 1028 return (revents); 1029 } 1030 1031 /* 1032 * Incoming linkage from device drivers. Process the packet pkt, of length 1033 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1034 * by each process' filter, and if accepted, stashed into the corresponding 1035 * buffer. 1036 */ 1037 void 1038 bpf_tap(ifp, pkt, pktlen) 1039 struct ifnet *ifp; 1040 register u_char *pkt; 1041 register u_int pktlen; 1042 { 1043 struct bpf_if *bp; 1044 register struct bpf_d *d; 1045 register u_int slen; 1046 /* 1047 * Note that the ipl does not have to be raised at this point. 1048 * The only problem that could arise here is that if two different 1049 * interfaces shared any data. This is not the case. 1050 */ 1051 bp = ifp->if_bpf; 1052 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1053 ++d->bd_rcount; 1054 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1055 if (slen != 0) 1056 catchpacket(d, pkt, pktlen, slen, bcopy); 1057 } 1058 } 1059 1060 /* 1061 * Copy data from an mbuf chain into a buffer. This code is derived 1062 * from m_copydata in sys/uipc_mbuf.c. 1063 */ 1064 static void 1065 bpf_mcopy(src_arg, dst_arg, len) 1066 const void *src_arg; 1067 void *dst_arg; 1068 register size_t len; 1069 { 1070 register const struct mbuf *m; 1071 register u_int count; 1072 u_char *dst; 1073 1074 m = src_arg; 1075 dst = dst_arg; 1076 while (len > 0) { 1077 if (m == 0) 1078 panic("bpf_mcopy"); 1079 count = min(m->m_len, len); 1080 bcopy(mtod(m, void *), dst, count); 1081 m = m->m_next; 1082 dst += count; 1083 len -= count; 1084 } 1085 } 1086 1087 /* 1088 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1089 */ 1090 void 1091 bpf_mtap(ifp, m) 1092 struct ifnet *ifp; 1093 struct mbuf *m; 1094 { 1095 struct bpf_if *bp = ifp->if_bpf; 1096 struct bpf_d *d; 1097 u_int pktlen, slen; 1098 struct mbuf *m0; 1099 1100 pktlen = 0; 1101 for (m0 = m; m0 != 0; m0 = m0->m_next) 1102 pktlen += m0->m_len; 1103 1104 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1105 ++d->bd_rcount; 1106 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1107 if (slen != 0) 1108 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1109 } 1110 } 1111 1112 /* 1113 * Move the packet data from interface memory (pkt) into the 1114 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1115 * otherwise 0. "copy" is the routine called to do the actual data 1116 * transfer. bcopy is passed in to copy contiguous chunks, while 1117 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1118 * pkt is really an mbuf. 1119 */ 1120 static void 1121 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1122 register struct bpf_d *d; 1123 register u_char *pkt; 1124 register u_int pktlen, snaplen; 1125 register void (*cpfn) __P((const void *, void *, size_t)); 1126 { 1127 register struct bpf_hdr *hp; 1128 register int totlen, curlen; 1129 register int hdrlen = d->bd_bif->bif_hdrlen; 1130 /* 1131 * Figure out how many bytes to move. If the packet is 1132 * greater or equal to the snapshot length, transfer that 1133 * much. Otherwise, transfer the whole packet (unless 1134 * we hit the buffer size limit). 1135 */ 1136 totlen = hdrlen + min(snaplen, pktlen); 1137 if (totlen > d->bd_bufsize) 1138 totlen = d->bd_bufsize; 1139 1140 /* 1141 * Round up the end of the previous packet to the next longword. 1142 */ 1143 curlen = BPF_WORDALIGN(d->bd_slen); 1144 if (curlen + totlen > d->bd_bufsize) { 1145 /* 1146 * This packet will overflow the storage buffer. 1147 * Rotate the buffers if we can, then wakeup any 1148 * pending reads. 1149 */ 1150 if (d->bd_fbuf == 0) { 1151 /* 1152 * We haven't completed the previous read yet, 1153 * so drop the packet. 1154 */ 1155 ++d->bd_dcount; 1156 return; 1157 } 1158 ROTATE_BUFFERS(d); 1159 bpf_wakeup(d); 1160 curlen = 0; 1161 } 1162 else if (d->bd_immediate) 1163 /* 1164 * Immediate mode is set. A packet arrived so any 1165 * reads should be woken up. 1166 */ 1167 bpf_wakeup(d); 1168 1169 /* 1170 * Append the bpf header. 1171 */ 1172 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1173 #if BSD >= 199103 1174 microtime(&hp->bh_tstamp); 1175 #elif defined(sun) 1176 uniqtime(&hp->bh_tstamp); 1177 #else 1178 hp->bh_tstamp = time; 1179 #endif 1180 hp->bh_datalen = pktlen; 1181 hp->bh_hdrlen = hdrlen; 1182 /* 1183 * Copy the packet data into the store buffer and update its length. 1184 */ 1185 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1186 d->bd_slen = curlen + totlen; 1187 } 1188 1189 /* 1190 * Initialize all nonzero fields of a descriptor. 1191 */ 1192 static int 1193 bpf_allocbufs(d) 1194 register struct bpf_d *d; 1195 { 1196 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1197 if (d->bd_fbuf == 0) 1198 return (ENOBUFS); 1199 1200 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1201 if (d->bd_sbuf == 0) { 1202 free(d->bd_fbuf, M_BPF); 1203 return (ENOBUFS); 1204 } 1205 d->bd_slen = 0; 1206 d->bd_hlen = 0; 1207 return (0); 1208 } 1209 1210 /* 1211 * Free buffers currently in use by a descriptor. 1212 * Called on close. 1213 */ 1214 static void 1215 bpf_freed(d) 1216 register struct bpf_d *d; 1217 { 1218 /* 1219 * We don't need to lock out interrupts since this descriptor has 1220 * been detached from its interface and it yet hasn't been marked 1221 * free. 1222 */ 1223 if (d->bd_sbuf != 0) { 1224 free(d->bd_sbuf, M_BPF); 1225 if (d->bd_hbuf != 0) 1226 free(d->bd_hbuf, M_BPF); 1227 if (d->bd_fbuf != 0) 1228 free(d->bd_fbuf, M_BPF); 1229 } 1230 if (d->bd_filter) 1231 free((caddr_t)d->bd_filter, M_BPF); 1232 } 1233 1234 /* 1235 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1236 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1237 * size of the link header (variable length headers not yet supported). 1238 */ 1239 void 1240 bpfattach(ifp, dlt, hdrlen) 1241 struct ifnet *ifp; 1242 u_int dlt, hdrlen; 1243 { 1244 struct bpf_if *bp; 1245 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_DONTWAIT); 1246 if (bp == 0) 1247 panic("bpfattach"); 1248 1249 bp->bif_dlist = 0; 1250 bp->bif_ifp = ifp; 1251 bp->bif_dlt = dlt; 1252 1253 bp->bif_next = bpf_iflist; 1254 bpf_iflist = bp; 1255 1256 bp->bif_ifp->if_bpf = 0; 1257 1258 /* 1259 * Compute the length of the bpf header. This is not necessarily 1260 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1261 * that the network layer header begins on a longword boundary (for 1262 * performance reasons and to alleviate alignment restrictions). 1263 */ 1264 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1265 1266 if (bootverbose) 1267 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1268 } 1269 1270 static void bpf_drvinit __P((void *unused)); 1271 1272 static void 1273 bpf_drvinit(unused) 1274 void *unused; 1275 { 1276 1277 cdevsw_add(&bpf_cdevsw); 1278 } 1279 1280 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1281 1282 #else /* !BPF */ 1283 /* 1284 * NOP stubs to allow bpf-using drivers to load and function. 1285 * 1286 * A 'better' implementation would allow the core bpf functionality 1287 * to be loaded at runtime. 1288 */ 1289 1290 void 1291 bpf_tap(ifp, pkt, pktlen) 1292 struct ifnet *ifp; 1293 register u_char *pkt; 1294 register u_int pktlen; 1295 { 1296 } 1297 1298 void 1299 bpf_mtap(ifp, m) 1300 struct ifnet *ifp; 1301 struct mbuf *m; 1302 { 1303 } 1304 1305 void 1306 bpfattach(ifp, dlt, hdrlen) 1307 struct ifnet *ifp; 1308 u_int dlt, hdrlen; 1309 { 1310 } 1311 1312 u_int 1313 bpf_filter(pc, p, wirelen, buflen) 1314 register struct bpf_insn *pc; 1315 register u_char *p; 1316 u_int wirelen; 1317 register u_int buflen; 1318 { 1319 return -1; /* "no filter" behaviour */ 1320 } 1321 1322 #endif /* !BPF */ 1323