1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $FreeBSD$ 41 */ 42 43 #include "bpf.h" 44 45 #ifndef __GNUC__ 46 #define inline 47 #else 48 #define inline __inline 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/conf.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/time.h> 57 #include <sys/proc.h> 58 #include <sys/signalvar.h> 59 #include <sys/filio.h> 60 #include <sys/sockio.h> 61 #include <sys/ttycom.h> 62 #include <sys/filedesc.h> 63 64 #if defined(sparc) && BSD < 199103 65 #include <sys/stream.h> 66 #endif 67 #include <sys/poll.h> 68 69 #include <sys/socket.h> 70 #include <sys/vnode.h> 71 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/bpfdesc.h> 75 76 #include <netinet/in.h> 77 #include <netinet/if_ether.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 81 MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 82 83 #if NBPF > 0 84 85 /* 86 * Older BSDs don't have kernel malloc. 87 */ 88 #if BSD < 199103 89 extern bcopy(); 90 static caddr_t bpf_alloc(); 91 #include <net/bpf_compat.h> 92 #define BPF_BUFSIZE (MCLBYTES-8) 93 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 94 #else 95 #define BPF_BUFSIZE 4096 96 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 97 #endif 98 99 #define PRINET 26 /* interruptible */ 100 101 /* 102 * The default read buffer size is patchable. 103 */ 104 static int bpf_bufsize = BPF_BUFSIZE; 105 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 106 &bpf_bufsize, 0, ""); 107 static int bpf_maxbufsize = BPF_MAXBUFSIZE; 108 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, 109 &bpf_maxbufsize, 0, ""); 110 111 /* 112 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 113 */ 114 static struct bpf_if *bpf_iflist; 115 116 static int bpf_allocbufs __P((struct bpf_d *)); 117 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 118 static void bpf_detachd __P((struct bpf_d *d)); 119 static void bpf_freed __P((struct bpf_d *)); 120 static void bpf_mcopy __P((const void *, void *, size_t)); 121 static int bpf_movein __P((struct uio *, int, 122 struct mbuf **, struct sockaddr *, int *)); 123 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 124 static inline void 125 bpf_wakeup __P((struct bpf_d *)); 126 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 127 u_int, void (*)(const void *, void *, size_t))); 128 static void reset_d __P((struct bpf_d *)); 129 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 130 131 static d_open_t bpfopen; 132 static d_close_t bpfclose; 133 static d_read_t bpfread; 134 static d_write_t bpfwrite; 135 static d_ioctl_t bpfioctl; 136 static d_poll_t bpfpoll; 137 138 #define CDEV_MAJOR 23 139 static struct cdevsw bpf_cdevsw = { 140 /* open */ bpfopen, 141 /* close */ bpfclose, 142 /* read */ bpfread, 143 /* write */ bpfwrite, 144 /* ioctl */ bpfioctl, 145 /* poll */ bpfpoll, 146 /* mmap */ nommap, 147 /* strategy */ nostrategy, 148 /* name */ "bpf", 149 /* maj */ CDEV_MAJOR, 150 /* dump */ nodump, 151 /* psize */ nopsize, 152 /* flags */ 0, 153 /* bmaj */ -1 154 }; 155 156 157 static int 158 bpf_movein(uio, linktype, mp, sockp, datlen) 159 register struct uio *uio; 160 int linktype, *datlen; 161 register struct mbuf **mp; 162 register struct sockaddr *sockp; 163 { 164 struct mbuf *m; 165 int error; 166 int len; 167 int hlen; 168 169 /* 170 * Build a sockaddr based on the data link layer type. 171 * We do this at this level because the ethernet header 172 * is copied directly into the data field of the sockaddr. 173 * In the case of SLIP, there is no header and the packet 174 * is forwarded as is. 175 * Also, we are careful to leave room at the front of the mbuf 176 * for the link level header. 177 */ 178 switch (linktype) { 179 180 case DLT_SLIP: 181 sockp->sa_family = AF_INET; 182 hlen = 0; 183 break; 184 185 case DLT_EN10MB: 186 sockp->sa_family = AF_UNSPEC; 187 /* XXX Would MAXLINKHDR be better? */ 188 hlen = sizeof(struct ether_header); 189 break; 190 191 case DLT_FDDI: 192 #if defined(__FreeBSD__) || defined(__bsdi__) 193 sockp->sa_family = AF_IMPLINK; 194 hlen = 0; 195 #else 196 sockp->sa_family = AF_UNSPEC; 197 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 198 hlen = 24; 199 #endif 200 break; 201 202 case DLT_RAW: 203 case DLT_NULL: 204 sockp->sa_family = AF_UNSPEC; 205 hlen = 0; 206 break; 207 208 #ifdef __FreeBSD__ 209 case DLT_ATM_RFC1483: 210 /* 211 * en atm driver requires 4-byte atm pseudo header. 212 * though it isn't standard, vpi:vci needs to be 213 * specified anyway. 214 */ 215 sockp->sa_family = AF_UNSPEC; 216 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 217 break; 218 #endif 219 220 default: 221 return (EIO); 222 } 223 224 len = uio->uio_resid; 225 *datlen = len - hlen; 226 if ((unsigned)len > MCLBYTES) 227 return (EIO); 228 229 MGETHDR(m, M_WAIT, MT_DATA); 230 if (m == 0) 231 return (ENOBUFS); 232 if (len > MHLEN) { 233 #if BSD >= 199103 234 MCLGET(m, M_WAIT); 235 if ((m->m_flags & M_EXT) == 0) { 236 #else 237 MCLGET(m); 238 if (m->m_len != MCLBYTES) { 239 #endif 240 error = ENOBUFS; 241 goto bad; 242 } 243 } 244 m->m_pkthdr.len = m->m_len = len; 245 m->m_pkthdr.rcvif = NULL; 246 *mp = m; 247 /* 248 * Make room for link header. 249 */ 250 if (hlen != 0) { 251 m->m_pkthdr.len -= hlen; 252 m->m_len -= hlen; 253 #if BSD >= 199103 254 m->m_data += hlen; /* XXX */ 255 #else 256 m->m_off += hlen; 257 #endif 258 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 259 if (error) 260 goto bad; 261 } 262 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 263 if (!error) 264 return (0); 265 bad: 266 m_freem(m); 267 return (error); 268 } 269 270 /* 271 * Attach file to the bpf interface, i.e. make d listen on bp. 272 * Must be called at splimp. 273 */ 274 static void 275 bpf_attachd(d, bp) 276 struct bpf_d *d; 277 struct bpf_if *bp; 278 { 279 /* 280 * Point d at bp, and add d to the interface's list of listeners. 281 * Finally, point the driver's bpf cookie at the interface so 282 * it will divert packets to bpf. 283 */ 284 d->bd_bif = bp; 285 d->bd_next = bp->bif_dlist; 286 bp->bif_dlist = d; 287 288 bp->bif_ifp->if_bpf = bp; 289 } 290 291 /* 292 * Detach a file from its interface. 293 */ 294 static void 295 bpf_detachd(d) 296 struct bpf_d *d; 297 { 298 struct bpf_d **p; 299 struct bpf_if *bp; 300 301 bp = d->bd_bif; 302 /* 303 * Check if this descriptor had requested promiscuous mode. 304 * If so, turn it off. 305 */ 306 if (d->bd_promisc) { 307 d->bd_promisc = 0; 308 if (ifpromisc(bp->bif_ifp, 0)) 309 /* 310 * Something is really wrong if we were able to put 311 * the driver into promiscuous mode, but can't 312 * take it out. 313 */ 314 panic("bpf: ifpromisc failed"); 315 } 316 /* Remove d from the interface's descriptor list. */ 317 p = &bp->bif_dlist; 318 while (*p != d) { 319 p = &(*p)->bd_next; 320 if (*p == 0) 321 panic("bpf_detachd: descriptor not in list"); 322 } 323 *p = (*p)->bd_next; 324 if (bp->bif_dlist == 0) 325 /* 326 * Let the driver know that there are no more listeners. 327 */ 328 d->bd_bif->bif_ifp->if_bpf = 0; 329 d->bd_bif = 0; 330 } 331 332 /* 333 * Open ethernet device. Returns ENXIO for illegal minor device number, 334 * EBUSY if file is open by another process. 335 */ 336 /* ARGSUSED */ 337 static int 338 bpfopen(dev, flags, fmt, p) 339 dev_t dev; 340 int flags; 341 int fmt; 342 struct proc *p; 343 { 344 register struct bpf_d *d; 345 346 if (p->p_prison) 347 return (EPERM); 348 349 d = dev->si_drv1; 350 /* 351 * Each minor can be opened by only one process. If the requested 352 * minor is in use, return EBUSY. 353 */ 354 if (d) 355 return (EBUSY); 356 make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev)); 357 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK); 358 bzero(d, sizeof(*d)); 359 dev->si_drv1 = d; 360 d->bd_bufsize = bpf_bufsize; 361 d->bd_sig = SIGIO; 362 363 return (0); 364 } 365 366 /* 367 * Close the descriptor by detaching it from its interface, 368 * deallocating its buffers, and marking it free. 369 */ 370 /* ARGSUSED */ 371 static int 372 bpfclose(dev, flags, fmt, p) 373 dev_t dev; 374 int flags; 375 int fmt; 376 struct proc *p; 377 { 378 register struct bpf_d *d = dev->si_drv1; 379 register int s; 380 381 funsetown(d->bd_sigio); 382 s = splimp(); 383 if (d->bd_bif) 384 bpf_detachd(d); 385 splx(s); 386 bpf_freed(d); 387 dev->si_drv1 = 0; 388 FREE(d, M_BPF); 389 390 return (0); 391 } 392 393 /* 394 * Support for SunOS, which does not have tsleep. 395 */ 396 #if BSD < 199103 397 static 398 bpf_timeout(arg) 399 caddr_t arg; 400 { 401 struct bpf_d *d = (struct bpf_d *)arg; 402 d->bd_timedout = 1; 403 wakeup(arg); 404 } 405 406 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 407 408 int 409 bpf_sleep(d) 410 register struct bpf_d *d; 411 { 412 register int rto = d->bd_rtout; 413 register int st; 414 415 if (rto != 0) { 416 d->bd_timedout = 0; 417 timeout(bpf_timeout, (caddr_t)d, rto); 418 } 419 st = sleep((caddr_t)d, PRINET|PCATCH); 420 if (rto != 0) { 421 if (d->bd_timedout == 0) 422 untimeout(bpf_timeout, (caddr_t)d); 423 else if (st == 0) 424 return EWOULDBLOCK; 425 } 426 return (st != 0) ? EINTR : 0; 427 } 428 #else 429 #define BPF_SLEEP tsleep 430 #endif 431 432 /* 433 * Rotate the packet buffers in descriptor d. Move the store buffer 434 * into the hold slot, and the free buffer into the store slot. 435 * Zero the length of the new store buffer. 436 */ 437 #define ROTATE_BUFFERS(d) \ 438 (d)->bd_hbuf = (d)->bd_sbuf; \ 439 (d)->bd_hlen = (d)->bd_slen; \ 440 (d)->bd_sbuf = (d)->bd_fbuf; \ 441 (d)->bd_slen = 0; \ 442 (d)->bd_fbuf = 0; 443 /* 444 * bpfread - read next chunk of packets from buffers 445 */ 446 static int 447 bpfread(dev, uio, ioflag) 448 dev_t dev; 449 register struct uio *uio; 450 int ioflag; 451 { 452 register struct bpf_d *d = dev->si_drv1; 453 int error; 454 int s; 455 456 /* 457 * Restrict application to use a buffer the same size as 458 * as kernel buffers. 459 */ 460 if (uio->uio_resid != d->bd_bufsize) 461 return (EINVAL); 462 463 s = splimp(); 464 /* 465 * If the hold buffer is empty, then do a timed sleep, which 466 * ends when the timeout expires or when enough packets 467 * have arrived to fill the store buffer. 468 */ 469 while (d->bd_hbuf == 0) { 470 if (d->bd_immediate && d->bd_slen != 0) { 471 /* 472 * A packet(s) either arrived since the previous 473 * read or arrived while we were asleep. 474 * Rotate the buffers and return what's here. 475 */ 476 ROTATE_BUFFERS(d); 477 break; 478 } 479 if (ioflag & IO_NDELAY) 480 error = EWOULDBLOCK; 481 else 482 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 483 d->bd_rtout); 484 if (error == EINTR || error == ERESTART) { 485 splx(s); 486 return (error); 487 } 488 if (error == EWOULDBLOCK) { 489 /* 490 * On a timeout, return what's in the buffer, 491 * which may be nothing. If there is something 492 * in the store buffer, we can rotate the buffers. 493 */ 494 if (d->bd_hbuf) 495 /* 496 * We filled up the buffer in between 497 * getting the timeout and arriving 498 * here, so we don't need to rotate. 499 */ 500 break; 501 502 if (d->bd_slen == 0) { 503 splx(s); 504 return (0); 505 } 506 ROTATE_BUFFERS(d); 507 break; 508 } 509 } 510 /* 511 * At this point, we know we have something in the hold slot. 512 */ 513 splx(s); 514 515 /* 516 * Move data from hold buffer into user space. 517 * We know the entire buffer is transferred since 518 * we checked above that the read buffer is bpf_bufsize bytes. 519 */ 520 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 521 522 s = splimp(); 523 d->bd_fbuf = d->bd_hbuf; 524 d->bd_hbuf = 0; 525 d->bd_hlen = 0; 526 splx(s); 527 528 return (error); 529 } 530 531 532 /* 533 * If there are processes sleeping on this descriptor, wake them up. 534 */ 535 static inline void 536 bpf_wakeup(d) 537 register struct bpf_d *d; 538 { 539 wakeup((caddr_t)d); 540 if (d->bd_async && d->bd_sig && d->bd_sigio) 541 pgsigio(d->bd_sigio, d->bd_sig, 0); 542 543 #if BSD >= 199103 544 selwakeup(&d->bd_sel); 545 /* XXX */ 546 d->bd_sel.si_pid = 0; 547 #else 548 if (d->bd_selproc) { 549 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 550 d->bd_selcoll = 0; 551 d->bd_selproc = 0; 552 } 553 #endif 554 } 555 556 static int 557 bpfwrite(dev, uio, ioflag) 558 dev_t dev; 559 struct uio *uio; 560 int ioflag; 561 { 562 register struct bpf_d *d = dev->si_drv1; 563 struct ifnet *ifp; 564 struct mbuf *m; 565 int error, s; 566 static struct sockaddr dst; 567 int datlen; 568 569 if (d->bd_bif == 0) 570 return (ENXIO); 571 572 ifp = d->bd_bif->bif_ifp; 573 574 if (uio->uio_resid == 0) 575 return (0); 576 577 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 578 if (error) 579 return (error); 580 581 if (datlen > ifp->if_mtu) 582 return (EMSGSIZE); 583 584 if (d->bd_hdrcmplt) 585 dst.sa_family = pseudo_AF_HDRCMPLT; 586 587 s = splnet(); 588 #if BSD >= 199103 589 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 590 #else 591 error = (*ifp->if_output)(ifp, m, &dst); 592 #endif 593 splx(s); 594 /* 595 * The driver frees the mbuf. 596 */ 597 return (error); 598 } 599 600 /* 601 * Reset a descriptor by flushing its packet buffer and clearing the 602 * receive and drop counts. Should be called at splimp. 603 */ 604 static void 605 reset_d(d) 606 struct bpf_d *d; 607 { 608 if (d->bd_hbuf) { 609 /* Free the hold buffer. */ 610 d->bd_fbuf = d->bd_hbuf; 611 d->bd_hbuf = 0; 612 } 613 d->bd_slen = 0; 614 d->bd_hlen = 0; 615 d->bd_rcount = 0; 616 d->bd_dcount = 0; 617 } 618 619 /* 620 * FIONREAD Check for read packet available. 621 * SIOCGIFADDR Get interface address - convenient hook to driver. 622 * BIOCGBLEN Get buffer len [for read()]. 623 * BIOCSETF Set ethernet read filter. 624 * BIOCFLUSH Flush read packet buffer. 625 * BIOCPROMISC Put interface into promiscuous mode. 626 * BIOCGDLT Get link layer type. 627 * BIOCGETIF Get interface name. 628 * BIOCSETIF Set interface. 629 * BIOCSRTIMEOUT Set read timeout. 630 * BIOCGRTIMEOUT Get read timeout. 631 * BIOCGSTATS Get packet stats. 632 * BIOCIMMEDIATE Set immediate mode. 633 * BIOCVERSION Get filter language version. 634 * BIOCGHDRCMPLT Get "header already complete" flag 635 * BIOCSHDRCMPLT Set "header already complete" flag 636 */ 637 /* ARGSUSED */ 638 static int 639 bpfioctl(dev, cmd, addr, flags, p) 640 dev_t dev; 641 u_long cmd; 642 caddr_t addr; 643 int flags; 644 struct proc *p; 645 { 646 register struct bpf_d *d = dev->si_drv1; 647 int s, error = 0; 648 649 switch (cmd) { 650 651 default: 652 error = EINVAL; 653 break; 654 655 /* 656 * Check for read packet available. 657 */ 658 case FIONREAD: 659 { 660 int n; 661 662 s = splimp(); 663 n = d->bd_slen; 664 if (d->bd_hbuf) 665 n += d->bd_hlen; 666 splx(s); 667 668 *(int *)addr = n; 669 break; 670 } 671 672 case SIOCGIFADDR: 673 { 674 struct ifnet *ifp; 675 676 if (d->bd_bif == 0) 677 error = EINVAL; 678 else { 679 ifp = d->bd_bif->bif_ifp; 680 error = (*ifp->if_ioctl)(ifp, cmd, addr); 681 } 682 break; 683 } 684 685 /* 686 * Get buffer len [for read()]. 687 */ 688 case BIOCGBLEN: 689 *(u_int *)addr = d->bd_bufsize; 690 break; 691 692 /* 693 * Set buffer length. 694 */ 695 case BIOCSBLEN: 696 #if BSD < 199103 697 error = EINVAL; 698 #else 699 if (d->bd_bif != 0) 700 error = EINVAL; 701 else { 702 register u_int size = *(u_int *)addr; 703 704 if (size > bpf_maxbufsize) 705 *(u_int *)addr = size = bpf_maxbufsize; 706 else if (size < BPF_MINBUFSIZE) 707 *(u_int *)addr = size = BPF_MINBUFSIZE; 708 d->bd_bufsize = size; 709 } 710 #endif 711 break; 712 713 /* 714 * Set link layer read filter. 715 */ 716 case BIOCSETF: 717 error = bpf_setf(d, (struct bpf_program *)addr); 718 break; 719 720 /* 721 * Flush read packet buffer. 722 */ 723 case BIOCFLUSH: 724 s = splimp(); 725 reset_d(d); 726 splx(s); 727 break; 728 729 /* 730 * Put interface into promiscuous mode. 731 */ 732 case BIOCPROMISC: 733 if (d->bd_bif == 0) { 734 /* 735 * No interface attached yet. 736 */ 737 error = EINVAL; 738 break; 739 } 740 s = splimp(); 741 if (d->bd_promisc == 0) { 742 error = ifpromisc(d->bd_bif->bif_ifp, 1); 743 if (error == 0) 744 d->bd_promisc = 1; 745 } 746 splx(s); 747 break; 748 749 /* 750 * Get device parameters. 751 */ 752 case BIOCGDLT: 753 if (d->bd_bif == 0) 754 error = EINVAL; 755 else 756 *(u_int *)addr = d->bd_bif->bif_dlt; 757 break; 758 759 /* 760 * Get interface name. 761 */ 762 case BIOCGETIF: 763 if (d->bd_bif == 0) 764 error = EINVAL; 765 else { 766 struct ifnet *const ifp = d->bd_bif->bif_ifp; 767 struct ifreq *const ifr = (struct ifreq *)addr; 768 769 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), 770 "%s%d", ifp->if_name, ifp->if_unit); 771 } 772 break; 773 774 /* 775 * Set interface. 776 */ 777 case BIOCSETIF: 778 error = bpf_setif(d, (struct ifreq *)addr); 779 break; 780 781 /* 782 * Set read timeout. 783 */ 784 case BIOCSRTIMEOUT: 785 { 786 struct timeval *tv = (struct timeval *)addr; 787 788 /* 789 * Subtract 1 tick from tvtohz() since this isn't 790 * a one-shot timer. 791 */ 792 if ((error = itimerfix(tv)) == 0) 793 d->bd_rtout = tvtohz(tv) - 1; 794 break; 795 } 796 797 /* 798 * Get read timeout. 799 */ 800 case BIOCGRTIMEOUT: 801 { 802 struct timeval *tv = (struct timeval *)addr; 803 804 tv->tv_sec = d->bd_rtout / hz; 805 tv->tv_usec = (d->bd_rtout % hz) * tick; 806 break; 807 } 808 809 /* 810 * Get packet stats. 811 */ 812 case BIOCGSTATS: 813 { 814 struct bpf_stat *bs = (struct bpf_stat *)addr; 815 816 bs->bs_recv = d->bd_rcount; 817 bs->bs_drop = d->bd_dcount; 818 break; 819 } 820 821 /* 822 * Set immediate mode. 823 */ 824 case BIOCIMMEDIATE: 825 d->bd_immediate = *(u_int *)addr; 826 break; 827 828 case BIOCVERSION: 829 { 830 struct bpf_version *bv = (struct bpf_version *)addr; 831 832 bv->bv_major = BPF_MAJOR_VERSION; 833 bv->bv_minor = BPF_MINOR_VERSION; 834 break; 835 } 836 837 /* 838 * Get "header already complete" flag 839 */ 840 case BIOCGHDRCMPLT: 841 *(u_int *)addr = d->bd_hdrcmplt; 842 break; 843 844 /* 845 * Set "header already complete" flag 846 */ 847 case BIOCSHDRCMPLT: 848 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 849 break; 850 851 case FIONBIO: /* Non-blocking I/O */ 852 break; 853 854 case FIOASYNC: /* Send signal on receive packets */ 855 d->bd_async = *(int *)addr; 856 break; 857 858 case FIOSETOWN: 859 error = fsetown(*(int *)addr, &d->bd_sigio); 860 break; 861 862 case FIOGETOWN: 863 *(int *)addr = fgetown(d->bd_sigio); 864 break; 865 866 /* This is deprecated, FIOSETOWN should be used instead. */ 867 case TIOCSPGRP: 868 error = fsetown(-(*(int *)addr), &d->bd_sigio); 869 break; 870 871 /* This is deprecated, FIOGETOWN should be used instead. */ 872 case TIOCGPGRP: 873 *(int *)addr = -fgetown(d->bd_sigio); 874 break; 875 876 case BIOCSRSIG: /* Set receive signal */ 877 { 878 u_int sig; 879 880 sig = *(u_int *)addr; 881 882 if (sig >= NSIG) 883 error = EINVAL; 884 else 885 d->bd_sig = sig; 886 break; 887 } 888 case BIOCGRSIG: 889 *(u_int *)addr = d->bd_sig; 890 break; 891 } 892 return (error); 893 } 894 895 /* 896 * Set d's packet filter program to fp. If this file already has a filter, 897 * free it and replace it. Returns EINVAL for bogus requests. 898 */ 899 static int 900 bpf_setf(d, fp) 901 struct bpf_d *d; 902 struct bpf_program *fp; 903 { 904 struct bpf_insn *fcode, *old; 905 u_int flen, size; 906 int s; 907 908 old = d->bd_filter; 909 if (fp->bf_insns == 0) { 910 if (fp->bf_len != 0) 911 return (EINVAL); 912 s = splimp(); 913 d->bd_filter = 0; 914 reset_d(d); 915 splx(s); 916 if (old != 0) 917 free((caddr_t)old, M_BPF); 918 return (0); 919 } 920 flen = fp->bf_len; 921 if (flen > BPF_MAXINSNS) 922 return (EINVAL); 923 924 size = flen * sizeof(*fp->bf_insns); 925 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 926 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 927 bpf_validate(fcode, (int)flen)) { 928 s = splimp(); 929 d->bd_filter = fcode; 930 reset_d(d); 931 splx(s); 932 if (old != 0) 933 free((caddr_t)old, M_BPF); 934 935 return (0); 936 } 937 free((caddr_t)fcode, M_BPF); 938 return (EINVAL); 939 } 940 941 /* 942 * Detach a file from its current interface (if attached at all) and attach 943 * to the interface indicated by the name stored in ifr. 944 * Return an errno or 0. 945 */ 946 static int 947 bpf_setif(d, ifr) 948 struct bpf_d *d; 949 struct ifreq *ifr; 950 { 951 struct bpf_if *bp; 952 int s, error; 953 struct ifnet *theywant; 954 955 theywant = ifunit(ifr->ifr_name); 956 if (theywant == 0) 957 return ENXIO; 958 959 /* 960 * Look through attached interfaces for the named one. 961 */ 962 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 963 struct ifnet *ifp = bp->bif_ifp; 964 965 if (ifp == 0 || ifp != theywant) 966 continue; 967 /* 968 * We found the requested interface. 969 * If it's not up, return an error. 970 * Allocate the packet buffers if we need to. 971 * If we're already attached to requested interface, 972 * just flush the buffer. 973 */ 974 if ((ifp->if_flags & IFF_UP) == 0) 975 return (ENETDOWN); 976 977 if (d->bd_sbuf == 0) { 978 error = bpf_allocbufs(d); 979 if (error != 0) 980 return (error); 981 } 982 s = splimp(); 983 if (bp != d->bd_bif) { 984 if (d->bd_bif) 985 /* 986 * Detach if attached to something else. 987 */ 988 bpf_detachd(d); 989 990 bpf_attachd(d, bp); 991 } 992 reset_d(d); 993 splx(s); 994 return (0); 995 } 996 /* Not found. */ 997 return (ENXIO); 998 } 999 1000 /* 1001 * Support for select() and poll() system calls 1002 * 1003 * Return true iff the specific operation will not block indefinitely. 1004 * Otherwise, return false but make a note that a selwakeup() must be done. 1005 */ 1006 int 1007 bpfpoll(dev, events, p) 1008 register dev_t dev; 1009 int events; 1010 struct proc *p; 1011 { 1012 register struct bpf_d *d; 1013 register int s; 1014 int revents = 0; 1015 1016 /* 1017 * An imitation of the FIONREAD ioctl code. 1018 */ 1019 d = dev->si_drv1; 1020 1021 s = splimp(); 1022 if (events & (POLLIN | POLLRDNORM)) { 1023 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1024 revents |= events & (POLLIN | POLLRDNORM); 1025 else 1026 selrecord(p, &d->bd_sel); 1027 } 1028 splx(s); 1029 return (revents); 1030 } 1031 1032 /* 1033 * Incoming linkage from device drivers. Process the packet pkt, of length 1034 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1035 * by each process' filter, and if accepted, stashed into the corresponding 1036 * buffer. 1037 */ 1038 void 1039 bpf_tap(ifp, pkt, pktlen) 1040 struct ifnet *ifp; 1041 register u_char *pkt; 1042 register u_int pktlen; 1043 { 1044 struct bpf_if *bp; 1045 register struct bpf_d *d; 1046 register u_int slen; 1047 /* 1048 * Note that the ipl does not have to be raised at this point. 1049 * The only problem that could arise here is that if two different 1050 * interfaces shared any data. This is not the case. 1051 */ 1052 bp = ifp->if_bpf; 1053 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1054 ++d->bd_rcount; 1055 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1056 if (slen != 0) 1057 catchpacket(d, pkt, pktlen, slen, bcopy); 1058 } 1059 } 1060 1061 /* 1062 * Copy data from an mbuf chain into a buffer. This code is derived 1063 * from m_copydata in sys/uipc_mbuf.c. 1064 */ 1065 static void 1066 bpf_mcopy(src_arg, dst_arg, len) 1067 const void *src_arg; 1068 void *dst_arg; 1069 register size_t len; 1070 { 1071 register const struct mbuf *m; 1072 register u_int count; 1073 u_char *dst; 1074 1075 m = src_arg; 1076 dst = dst_arg; 1077 while (len > 0) { 1078 if (m == 0) 1079 panic("bpf_mcopy"); 1080 count = min(m->m_len, len); 1081 bcopy(mtod(m, void *), dst, count); 1082 m = m->m_next; 1083 dst += count; 1084 len -= count; 1085 } 1086 } 1087 1088 /* 1089 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1090 */ 1091 void 1092 bpf_mtap(ifp, m) 1093 struct ifnet *ifp; 1094 struct mbuf *m; 1095 { 1096 struct bpf_if *bp = ifp->if_bpf; 1097 struct bpf_d *d; 1098 u_int pktlen, slen; 1099 struct mbuf *m0; 1100 1101 pktlen = 0; 1102 for (m0 = m; m0 != 0; m0 = m0->m_next) 1103 pktlen += m0->m_len; 1104 1105 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1106 ++d->bd_rcount; 1107 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1108 if (slen != 0) 1109 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1110 } 1111 } 1112 1113 /* 1114 * Move the packet data from interface memory (pkt) into the 1115 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1116 * otherwise 0. "copy" is the routine called to do the actual data 1117 * transfer. bcopy is passed in to copy contiguous chunks, while 1118 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1119 * pkt is really an mbuf. 1120 */ 1121 static void 1122 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1123 register struct bpf_d *d; 1124 register u_char *pkt; 1125 register u_int pktlen, snaplen; 1126 register void (*cpfn) __P((const void *, void *, size_t)); 1127 { 1128 register struct bpf_hdr *hp; 1129 register int totlen, curlen; 1130 register int hdrlen = d->bd_bif->bif_hdrlen; 1131 /* 1132 * Figure out how many bytes to move. If the packet is 1133 * greater or equal to the snapshot length, transfer that 1134 * much. Otherwise, transfer the whole packet (unless 1135 * we hit the buffer size limit). 1136 */ 1137 totlen = hdrlen + min(snaplen, pktlen); 1138 if (totlen > d->bd_bufsize) 1139 totlen = d->bd_bufsize; 1140 1141 /* 1142 * Round up the end of the previous packet to the next longword. 1143 */ 1144 curlen = BPF_WORDALIGN(d->bd_slen); 1145 if (curlen + totlen > d->bd_bufsize) { 1146 /* 1147 * This packet will overflow the storage buffer. 1148 * Rotate the buffers if we can, then wakeup any 1149 * pending reads. 1150 */ 1151 if (d->bd_fbuf == 0) { 1152 /* 1153 * We haven't completed the previous read yet, 1154 * so drop the packet. 1155 */ 1156 ++d->bd_dcount; 1157 return; 1158 } 1159 ROTATE_BUFFERS(d); 1160 bpf_wakeup(d); 1161 curlen = 0; 1162 } 1163 else if (d->bd_immediate) 1164 /* 1165 * Immediate mode is set. A packet arrived so any 1166 * reads should be woken up. 1167 */ 1168 bpf_wakeup(d); 1169 1170 /* 1171 * Append the bpf header. 1172 */ 1173 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1174 #if BSD >= 199103 1175 microtime(&hp->bh_tstamp); 1176 #elif defined(sun) 1177 uniqtime(&hp->bh_tstamp); 1178 #else 1179 hp->bh_tstamp = time; 1180 #endif 1181 hp->bh_datalen = pktlen; 1182 hp->bh_hdrlen = hdrlen; 1183 /* 1184 * Copy the packet data into the store buffer and update its length. 1185 */ 1186 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1187 d->bd_slen = curlen + totlen; 1188 } 1189 1190 /* 1191 * Initialize all nonzero fields of a descriptor. 1192 */ 1193 static int 1194 bpf_allocbufs(d) 1195 register struct bpf_d *d; 1196 { 1197 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1198 if (d->bd_fbuf == 0) 1199 return (ENOBUFS); 1200 1201 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1202 if (d->bd_sbuf == 0) { 1203 free(d->bd_fbuf, M_BPF); 1204 return (ENOBUFS); 1205 } 1206 d->bd_slen = 0; 1207 d->bd_hlen = 0; 1208 return (0); 1209 } 1210 1211 /* 1212 * Free buffers currently in use by a descriptor. 1213 * Called on close. 1214 */ 1215 static void 1216 bpf_freed(d) 1217 register struct bpf_d *d; 1218 { 1219 /* 1220 * We don't need to lock out interrupts since this descriptor has 1221 * been detached from its interface and it yet hasn't been marked 1222 * free. 1223 */ 1224 if (d->bd_sbuf != 0) { 1225 free(d->bd_sbuf, M_BPF); 1226 if (d->bd_hbuf != 0) 1227 free(d->bd_hbuf, M_BPF); 1228 if (d->bd_fbuf != 0) 1229 free(d->bd_fbuf, M_BPF); 1230 } 1231 if (d->bd_filter) 1232 free((caddr_t)d->bd_filter, M_BPF); 1233 } 1234 1235 /* 1236 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1237 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1238 * size of the link header (variable length headers not yet supported). 1239 */ 1240 void 1241 bpfattach(ifp, dlt, hdrlen) 1242 struct ifnet *ifp; 1243 u_int dlt, hdrlen; 1244 { 1245 struct bpf_if *bp; 1246 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_DONTWAIT); 1247 if (bp == 0) 1248 panic("bpfattach"); 1249 1250 bp->bif_dlist = 0; 1251 bp->bif_ifp = ifp; 1252 bp->bif_dlt = dlt; 1253 1254 bp->bif_next = bpf_iflist; 1255 bpf_iflist = bp; 1256 1257 bp->bif_ifp->if_bpf = 0; 1258 1259 /* 1260 * Compute the length of the bpf header. This is not necessarily 1261 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1262 * that the network layer header begins on a longword boundary (for 1263 * performance reasons and to alleviate alignment restrictions). 1264 */ 1265 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1266 1267 if (bootverbose) 1268 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1269 } 1270 1271 static void bpf_drvinit __P((void *unused)); 1272 1273 static void 1274 bpf_drvinit(unused) 1275 void *unused; 1276 { 1277 1278 cdevsw_add(&bpf_cdevsw); 1279 } 1280 1281 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1282 1283 #else /* !BPF */ 1284 /* 1285 * NOP stubs to allow bpf-using drivers to load and function. 1286 * 1287 * A 'better' implementation would allow the core bpf functionality 1288 * to be loaded at runtime. 1289 */ 1290 1291 void 1292 bpf_tap(ifp, pkt, pktlen) 1293 struct ifnet *ifp; 1294 register u_char *pkt; 1295 register u_int pktlen; 1296 { 1297 } 1298 1299 void 1300 bpf_mtap(ifp, m) 1301 struct ifnet *ifp; 1302 struct mbuf *m; 1303 { 1304 } 1305 1306 void 1307 bpfattach(ifp, dlt, hdrlen) 1308 struct ifnet *ifp; 1309 u_int dlt, hdrlen; 1310 { 1311 } 1312 1313 u_int 1314 bpf_filter(pc, p, wirelen, buflen) 1315 register const struct bpf_insn *pc; 1316 register u_char *p; 1317 u_int wirelen; 1318 register u_int buflen; 1319 { 1320 return -1; /* "no filter" behaviour */ 1321 } 1322 1323 #endif /* !BPF */ 1324