1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.34 1997/09/16 11:43:42 bde Exp $ 41 */ 42 43 #include "bpfilter.h" 44 45 #if NBPFILTER > 0 46 47 #ifndef __GNUC__ 48 #define inline 49 #else 50 #define inline __inline 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/conf.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/buf.h> 59 #include <sys/time.h> 60 #include <sys/proc.h> 61 #include <sys/signalvar.h> 62 #include <sys/filio.h> 63 #include <sys/sockio.h> 64 #include <sys/ttycom.h> 65 66 #include <sys/fcntl.h> 67 #if defined(sparc) && BSD < 199103 68 #include <sys/stream.h> 69 #endif 70 #include <sys/uio.h> 71 #include <sys/poll.h> 72 73 #include <sys/socket.h> 74 #include <sys/socketvar.h> 75 #include <sys/protosw.h> 76 #include <net/if.h> 77 78 #include <net/bpf.h> 79 #include <net/bpfdesc.h> 80 81 #include <sys/errno.h> 82 83 #include <netinet/in.h> 84 #include <netinet/if_ether.h> 85 #include <sys/kernel.h> 86 #include <sys/sysctl.h> 87 #include <sys/conf.h> 88 #ifdef DEVFS 89 #include <sys/devfsext.h> 90 #endif /*DEVFS*/ 91 92 93 /* 94 * Older BSDs don't have kernel malloc. 95 */ 96 #if BSD < 199103 97 extern bcopy(); 98 static caddr_t bpf_alloc(); 99 #include <net/bpf_compat.h> 100 #define BPF_BUFSIZE (MCLBYTES-8) 101 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 102 #else 103 #define BPF_BUFSIZE 4096 104 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 105 #endif 106 107 #define PRINET 26 /* interruptible */ 108 109 /* 110 * The default read buffer size is patchable. 111 */ 112 static int bpf_bufsize = BPF_BUFSIZE; 113 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 114 &bpf_bufsize, 0, ""); 115 116 /* 117 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 118 * bpf_dtab holds the descriptors, indexed by minor device # 119 */ 120 static struct bpf_if *bpf_iflist; 121 static struct bpf_d bpf_dtab[NBPFILTER]; 122 static int bpf_dtab_init; 123 124 static int bpf_allocbufs __P((struct bpf_d *)); 125 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 126 static void bpf_detachd __P((struct bpf_d *d)); 127 static void bpf_freed __P((struct bpf_d *)); 128 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 129 static void bpf_mcopy __P((const void *, void *, u_int)); 130 static int bpf_movein __P((struct uio *, int, 131 struct mbuf **, struct sockaddr *, int *)); 132 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 133 static inline void 134 bpf_wakeup __P((struct bpf_d *)); 135 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 136 u_int, void (*)(const void *, void *, u_int))); 137 static void reset_d __P((struct bpf_d *)); 138 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 139 140 static d_open_t bpfopen; 141 static d_close_t bpfclose; 142 static d_read_t bpfread; 143 static d_write_t bpfwrite; 144 static d_ioctl_t bpfioctl; 145 static d_poll_t bpfpoll; 146 147 #define CDEV_MAJOR 23 148 static struct cdevsw bpf_cdevsw = 149 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 150 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 151 bpfpoll, nommap, NULL, "bpf", NULL, -1 }; 152 153 154 static int 155 bpf_movein(uio, linktype, mp, sockp, datlen) 156 register struct uio *uio; 157 int linktype, *datlen; 158 register struct mbuf **mp; 159 register struct sockaddr *sockp; 160 { 161 struct mbuf *m; 162 int error; 163 int len; 164 int hlen; 165 166 /* 167 * Build a sockaddr based on the data link layer type. 168 * We do this at this level because the ethernet header 169 * is copied directly into the data field of the sockaddr. 170 * In the case of SLIP, there is no header and the packet 171 * is forwarded as is. 172 * Also, we are careful to leave room at the front of the mbuf 173 * for the link level header. 174 */ 175 switch (linktype) { 176 177 case DLT_SLIP: 178 sockp->sa_family = AF_INET; 179 hlen = 0; 180 break; 181 182 case DLT_EN10MB: 183 sockp->sa_family = AF_UNSPEC; 184 /* XXX Would MAXLINKHDR be better? */ 185 hlen = sizeof(struct ether_header); 186 break; 187 188 case DLT_FDDI: 189 #if defined(__FreeBSD__) || defined(__bsdi__) 190 sockp->sa_family = AF_IMPLINK; 191 hlen = 0; 192 #else 193 sockp->sa_family = AF_UNSPEC; 194 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 195 hlen = 24; 196 #endif 197 break; 198 199 case DLT_NULL: 200 sockp->sa_family = AF_UNSPEC; 201 hlen = 0; 202 break; 203 204 default: 205 return (EIO); 206 } 207 208 len = uio->uio_resid; 209 *datlen = len - hlen; 210 if ((unsigned)len > MCLBYTES) 211 return (EIO); 212 213 MGETHDR(m, M_WAIT, MT_DATA); 214 if (m == 0) 215 return (ENOBUFS); 216 if (len > MHLEN) { 217 #if BSD >= 199103 218 MCLGET(m, M_WAIT); 219 if ((m->m_flags & M_EXT) == 0) { 220 #else 221 MCLGET(m); 222 if (m->m_len != MCLBYTES) { 223 #endif 224 error = ENOBUFS; 225 goto bad; 226 } 227 } 228 m->m_pkthdr.len = m->m_len = len; 229 m->m_pkthdr.rcvif = NULL; 230 *mp = m; 231 /* 232 * Make room for link header. 233 */ 234 if (hlen != 0) { 235 m->m_pkthdr.len -= hlen; 236 m->m_len -= hlen; 237 #if BSD >= 199103 238 m->m_data += hlen; /* XXX */ 239 #else 240 m->m_off += hlen; 241 #endif 242 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 243 if (error) 244 goto bad; 245 } 246 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 247 if (!error) 248 return (0); 249 bad: 250 m_freem(m); 251 return (error); 252 } 253 254 /* 255 * Attach file to the bpf interface, i.e. make d listen on bp. 256 * Must be called at splimp. 257 */ 258 static void 259 bpf_attachd(d, bp) 260 struct bpf_d *d; 261 struct bpf_if *bp; 262 { 263 /* 264 * Point d at bp, and add d to the interface's list of listeners. 265 * Finally, point the driver's bpf cookie at the interface so 266 * it will divert packets to bpf. 267 */ 268 d->bd_bif = bp; 269 d->bd_next = bp->bif_dlist; 270 bp->bif_dlist = d; 271 272 bp->bif_ifp->if_bpf = bp; 273 } 274 275 /* 276 * Detach a file from its interface. 277 */ 278 static void 279 bpf_detachd(d) 280 struct bpf_d *d; 281 { 282 struct bpf_d **p; 283 struct bpf_if *bp; 284 285 bp = d->bd_bif; 286 /* 287 * Check if this descriptor had requested promiscuous mode. 288 * If so, turn it off. 289 */ 290 if (d->bd_promisc) { 291 d->bd_promisc = 0; 292 if (ifpromisc(bp->bif_ifp, 0)) 293 /* 294 * Something is really wrong if we were able to put 295 * the driver into promiscuous mode, but can't 296 * take it out. 297 */ 298 panic("bpf: ifpromisc failed"); 299 } 300 /* Remove d from the interface's descriptor list. */ 301 p = &bp->bif_dlist; 302 while (*p != d) { 303 p = &(*p)->bd_next; 304 if (*p == 0) 305 panic("bpf_detachd: descriptor not in list"); 306 } 307 *p = (*p)->bd_next; 308 if (bp->bif_dlist == 0) 309 /* 310 * Let the driver know that there are no more listeners. 311 */ 312 d->bd_bif->bif_ifp->if_bpf = 0; 313 d->bd_bif = 0; 314 } 315 316 317 /* 318 * Mark a descriptor free by making it point to itself. 319 * This is probably cheaper than marking with a constant since 320 * the address should be in a register anyway. 321 */ 322 #define D_ISFREE(d) ((d) == (d)->bd_next) 323 #define D_MARKFREE(d) ((d)->bd_next = (d)) 324 #define D_MARKUSED(d) ((d)->bd_next = 0) 325 326 /* 327 * Open ethernet device. Returns ENXIO for illegal minor device number, 328 * EBUSY if file is open by another process. 329 */ 330 /* ARGSUSED */ 331 static int 332 bpfopen(dev, flags, fmt, p) 333 dev_t dev; 334 int flags; 335 int fmt; 336 struct proc *p; 337 { 338 register struct bpf_d *d; 339 340 if (minor(dev) >= NBPFILTER) 341 return (ENXIO); 342 /* 343 * Each minor can be opened by only one process. If the requested 344 * minor is in use, return EBUSY. 345 */ 346 d = &bpf_dtab[minor(dev)]; 347 if (!D_ISFREE(d)) 348 return (EBUSY); 349 350 /* Mark "free" and do most initialization. */ 351 bzero((char *)d, sizeof(*d)); 352 d->bd_bufsize = bpf_bufsize; 353 d->bd_sig = SIGIO; 354 355 return (0); 356 } 357 358 /* 359 * Close the descriptor by detaching it from its interface, 360 * deallocating its buffers, and marking it free. 361 */ 362 /* ARGSUSED */ 363 static int 364 bpfclose(dev, flags, fmt, p) 365 dev_t dev; 366 int flags; 367 int fmt; 368 struct proc *p; 369 { 370 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 371 register int s; 372 373 s = splimp(); 374 if (d->bd_bif) 375 bpf_detachd(d); 376 splx(s); 377 bpf_freed(d); 378 379 return (0); 380 } 381 382 /* 383 * Support for SunOS, which does not have tsleep. 384 */ 385 #if BSD < 199103 386 static 387 bpf_timeout(arg) 388 caddr_t arg; 389 { 390 struct bpf_d *d = (struct bpf_d *)arg; 391 d->bd_timedout = 1; 392 wakeup(arg); 393 } 394 395 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 396 397 int 398 bpf_sleep(d) 399 register struct bpf_d *d; 400 { 401 register int rto = d->bd_rtout; 402 register int st; 403 404 if (rto != 0) { 405 d->bd_timedout = 0; 406 timeout(bpf_timeout, (caddr_t)d, rto); 407 } 408 st = sleep((caddr_t)d, PRINET|PCATCH); 409 if (rto != 0) { 410 if (d->bd_timedout == 0) 411 untimeout(bpf_timeout, (caddr_t)d); 412 else if (st == 0) 413 return EWOULDBLOCK; 414 } 415 return (st != 0) ? EINTR : 0; 416 } 417 #else 418 #define BPF_SLEEP tsleep 419 #endif 420 421 /* 422 * Rotate the packet buffers in descriptor d. Move the store buffer 423 * into the hold slot, and the free buffer into the store slot. 424 * Zero the length of the new store buffer. 425 */ 426 #define ROTATE_BUFFERS(d) \ 427 (d)->bd_hbuf = (d)->bd_sbuf; \ 428 (d)->bd_hlen = (d)->bd_slen; \ 429 (d)->bd_sbuf = (d)->bd_fbuf; \ 430 (d)->bd_slen = 0; \ 431 (d)->bd_fbuf = 0; 432 /* 433 * bpfread - read next chunk of packets from buffers 434 */ 435 static int 436 bpfread(dev, uio, ioflag) 437 dev_t dev; 438 register struct uio *uio; 439 int ioflag; 440 { 441 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 442 int error; 443 int s; 444 445 /* 446 * Restrict application to use a buffer the same size as 447 * as kernel buffers. 448 */ 449 if (uio->uio_resid != d->bd_bufsize) 450 return (EINVAL); 451 452 s = splimp(); 453 /* 454 * If the hold buffer is empty, then do a timed sleep, which 455 * ends when the timeout expires or when enough packets 456 * have arrived to fill the store buffer. 457 */ 458 while (d->bd_hbuf == 0) { 459 if (d->bd_immediate && d->bd_slen != 0) { 460 /* 461 * A packet(s) either arrived since the previous 462 * read or arrived while we were asleep. 463 * Rotate the buffers and return what's here. 464 */ 465 ROTATE_BUFFERS(d); 466 break; 467 } 468 if (d->bd_rtout != -1) 469 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 470 d->bd_rtout); 471 else 472 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 473 if (error == EINTR || error == ERESTART) { 474 splx(s); 475 return (error); 476 } 477 if (error == EWOULDBLOCK) { 478 /* 479 * On a timeout, return what's in the buffer, 480 * which may be nothing. If there is something 481 * in the store buffer, we can rotate the buffers. 482 */ 483 if (d->bd_hbuf) 484 /* 485 * We filled up the buffer in between 486 * getting the timeout and arriving 487 * here, so we don't need to rotate. 488 */ 489 break; 490 491 if (d->bd_slen == 0) { 492 splx(s); 493 return (0); 494 } 495 ROTATE_BUFFERS(d); 496 break; 497 } 498 } 499 /* 500 * At this point, we know we have something in the hold slot. 501 */ 502 splx(s); 503 504 /* 505 * Move data from hold buffer into user space. 506 * We know the entire buffer is transferred since 507 * we checked above that the read buffer is bpf_bufsize bytes. 508 */ 509 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 510 511 s = splimp(); 512 d->bd_fbuf = d->bd_hbuf; 513 d->bd_hbuf = 0; 514 d->bd_hlen = 0; 515 splx(s); 516 517 return (error); 518 } 519 520 521 /* 522 * If there are processes sleeping on this descriptor, wake them up. 523 */ 524 static inline void 525 bpf_wakeup(d) 526 register struct bpf_d *d; 527 { 528 struct proc *p; 529 530 wakeup((caddr_t)d); 531 if (d->bd_async && d->bd_sig) 532 if (d->bd_pgid > 0) 533 gsignal (d->bd_pgid, d->bd_sig); 534 else if (p = pfind (-d->bd_pgid)) 535 psignal (p, d->bd_sig); 536 537 #if BSD >= 199103 538 selwakeup(&d->bd_sel); 539 /* XXX */ 540 d->bd_sel.si_pid = 0; 541 #else 542 if (d->bd_selproc) { 543 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 544 d->bd_selcoll = 0; 545 d->bd_selproc = 0; 546 } 547 #endif 548 } 549 550 static int 551 bpfwrite(dev, uio, ioflag) 552 dev_t dev; 553 struct uio *uio; 554 int ioflag; 555 { 556 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 557 struct ifnet *ifp; 558 struct mbuf *m; 559 int error, s; 560 static struct sockaddr dst; 561 int datlen; 562 563 if (d->bd_bif == 0) 564 return (ENXIO); 565 566 ifp = d->bd_bif->bif_ifp; 567 568 if (uio->uio_resid == 0) 569 return (0); 570 571 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 572 if (error) 573 return (error); 574 575 if (datlen > ifp->if_mtu) 576 return (EMSGSIZE); 577 578 s = splnet(); 579 #if BSD >= 199103 580 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 581 #else 582 error = (*ifp->if_output)(ifp, m, &dst); 583 #endif 584 splx(s); 585 /* 586 * The driver frees the mbuf. 587 */ 588 return (error); 589 } 590 591 /* 592 * Reset a descriptor by flushing its packet buffer and clearing the 593 * receive and drop counts. Should be called at splimp. 594 */ 595 static void 596 reset_d(d) 597 struct bpf_d *d; 598 { 599 if (d->bd_hbuf) { 600 /* Free the hold buffer. */ 601 d->bd_fbuf = d->bd_hbuf; 602 d->bd_hbuf = 0; 603 } 604 d->bd_slen = 0; 605 d->bd_hlen = 0; 606 d->bd_rcount = 0; 607 d->bd_dcount = 0; 608 } 609 610 /* 611 * FIONREAD Check for read packet available. 612 * SIOCGIFADDR Get interface address - convenient hook to driver. 613 * BIOCGBLEN Get buffer len [for read()]. 614 * BIOCSETF Set ethernet read filter. 615 * BIOCFLUSH Flush read packet buffer. 616 * BIOCPROMISC Put interface into promiscuous mode. 617 * BIOCGDLT Get link layer type. 618 * BIOCGETIF Get interface name. 619 * BIOCSETIF Set interface. 620 * BIOCSRTIMEOUT Set read timeout. 621 * BIOCGRTIMEOUT Get read timeout. 622 * BIOCGSTATS Get packet stats. 623 * BIOCIMMEDIATE Set immediate mode. 624 * BIOCVERSION Get filter language version. 625 */ 626 /* ARGSUSED */ 627 static int 628 bpfioctl(dev, cmd, addr, flags, p) 629 dev_t dev; 630 int cmd; 631 caddr_t addr; 632 int flags; 633 struct proc *p; 634 { 635 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 636 int s, error = 0; 637 638 switch (cmd) { 639 640 default: 641 error = EINVAL; 642 break; 643 644 /* 645 * Check for read packet available. 646 */ 647 case FIONREAD: 648 { 649 int n; 650 651 s = splimp(); 652 n = d->bd_slen; 653 if (d->bd_hbuf) 654 n += d->bd_hlen; 655 splx(s); 656 657 *(int *)addr = n; 658 break; 659 } 660 661 case SIOCGIFADDR: 662 { 663 struct ifnet *ifp; 664 665 if (d->bd_bif == 0) 666 error = EINVAL; 667 else { 668 ifp = d->bd_bif->bif_ifp; 669 error = (*ifp->if_ioctl)(ifp, cmd, addr); 670 } 671 break; 672 } 673 674 /* 675 * Get buffer len [for read()]. 676 */ 677 case BIOCGBLEN: 678 *(u_int *)addr = d->bd_bufsize; 679 break; 680 681 /* 682 * Set buffer length. 683 */ 684 case BIOCSBLEN: 685 #if BSD < 199103 686 error = EINVAL; 687 #else 688 if (d->bd_bif != 0) 689 error = EINVAL; 690 else { 691 register u_int size = *(u_int *)addr; 692 693 if (size > BPF_MAXBUFSIZE) 694 *(u_int *)addr = size = BPF_MAXBUFSIZE; 695 else if (size < BPF_MINBUFSIZE) 696 *(u_int *)addr = size = BPF_MINBUFSIZE; 697 d->bd_bufsize = size; 698 } 699 #endif 700 break; 701 702 /* 703 * Set link layer read filter. 704 */ 705 case BIOCSETF: 706 error = bpf_setf(d, (struct bpf_program *)addr); 707 break; 708 709 /* 710 * Flush read packet buffer. 711 */ 712 case BIOCFLUSH: 713 s = splimp(); 714 reset_d(d); 715 splx(s); 716 break; 717 718 /* 719 * Put interface into promiscuous mode. 720 */ 721 case BIOCPROMISC: 722 if (d->bd_bif == 0) { 723 /* 724 * No interface attached yet. 725 */ 726 error = EINVAL; 727 break; 728 } 729 s = splimp(); 730 if (d->bd_promisc == 0) { 731 error = ifpromisc(d->bd_bif->bif_ifp, 1); 732 if (error == 0) 733 d->bd_promisc = 1; 734 } 735 splx(s); 736 break; 737 738 /* 739 * Get device parameters. 740 */ 741 case BIOCGDLT: 742 if (d->bd_bif == 0) 743 error = EINVAL; 744 else 745 *(u_int *)addr = d->bd_bif->bif_dlt; 746 break; 747 748 /* 749 * Set interface name. 750 */ 751 case BIOCGETIF: 752 if (d->bd_bif == 0) 753 error = EINVAL; 754 else 755 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 756 break; 757 758 /* 759 * Set interface. 760 */ 761 case BIOCSETIF: 762 error = bpf_setif(d, (struct ifreq *)addr); 763 break; 764 765 /* 766 * Set read timeout. 767 */ 768 case BIOCSRTIMEOUT: 769 { 770 struct timeval *tv = (struct timeval *)addr; 771 u_long msec; 772 773 /* Compute number of milliseconds. */ 774 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 775 /* Scale milliseconds to ticks. Assume hard 776 clock has millisecond or greater resolution 777 (i.e. tick >= 1000). For 10ms hardclock, 778 tick/1000 = 10, so rtout<-msec/10. */ 779 d->bd_rtout = msec / (tick / 1000); 780 break; 781 } 782 783 /* 784 * Get read timeout. 785 */ 786 case BIOCGRTIMEOUT: 787 { 788 struct timeval *tv = (struct timeval *)addr; 789 u_long msec = d->bd_rtout; 790 791 msec *= tick / 1000; 792 tv->tv_sec = msec / 1000; 793 tv->tv_usec = msec % 1000; 794 break; 795 } 796 797 /* 798 * Get packet stats. 799 */ 800 case BIOCGSTATS: 801 { 802 struct bpf_stat *bs = (struct bpf_stat *)addr; 803 804 bs->bs_recv = d->bd_rcount; 805 bs->bs_drop = d->bd_dcount; 806 break; 807 } 808 809 /* 810 * Set immediate mode. 811 */ 812 case BIOCIMMEDIATE: 813 d->bd_immediate = *(u_int *)addr; 814 break; 815 816 case BIOCVERSION: 817 { 818 struct bpf_version *bv = (struct bpf_version *)addr; 819 820 bv->bv_major = BPF_MAJOR_VERSION; 821 bv->bv_minor = BPF_MINOR_VERSION; 822 break; 823 } 824 825 826 case FIONBIO: /* Non-blocking I/O */ 827 if (*(int *)addr) 828 d->bd_rtout = -1; 829 else 830 d->bd_rtout = 0; 831 break; 832 833 case FIOASYNC: /* Send signal on receive packets */ 834 d->bd_async = *(int *)addr; 835 break; 836 837 /* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 838 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 839 is a process group if it's positive and a process id if it's negative. This 840 is exactly the opposite of what the other two functions want! Therefore 841 there is code in ioctl and fcntl to negate the arg before calling here. */ 842 843 case TIOCSPGRP: /* Process or group to send signals to */ 844 d->bd_pgid = *(int *)addr; 845 break; 846 847 case TIOCGPGRP: 848 *(int *)addr = d->bd_pgid; 849 break; 850 851 case BIOCSRSIG: /* Set receive signal */ 852 { 853 u_int sig; 854 855 sig = *(u_int *)addr; 856 857 if (sig >= NSIG) 858 error = EINVAL; 859 else 860 d->bd_sig = sig; 861 break; 862 } 863 case BIOCGRSIG: 864 *(u_int *)addr = d->bd_sig; 865 break; 866 } 867 return (error); 868 } 869 870 /* 871 * Set d's packet filter program to fp. If this file already has a filter, 872 * free it and replace it. Returns EINVAL for bogus requests. 873 */ 874 static int 875 bpf_setf(d, fp) 876 struct bpf_d *d; 877 struct bpf_program *fp; 878 { 879 struct bpf_insn *fcode, *old; 880 u_int flen, size; 881 int s; 882 883 old = d->bd_filter; 884 if (fp->bf_insns == 0) { 885 if (fp->bf_len != 0) 886 return (EINVAL); 887 s = splimp(); 888 d->bd_filter = 0; 889 reset_d(d); 890 splx(s); 891 if (old != 0) 892 free((caddr_t)old, M_DEVBUF); 893 return (0); 894 } 895 flen = fp->bf_len; 896 if (flen > BPF_MAXINSNS) 897 return (EINVAL); 898 899 size = flen * sizeof(*fp->bf_insns); 900 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 901 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 902 bpf_validate(fcode, (int)flen)) { 903 s = splimp(); 904 d->bd_filter = fcode; 905 reset_d(d); 906 splx(s); 907 if (old != 0) 908 free((caddr_t)old, M_DEVBUF); 909 910 return (0); 911 } 912 free((caddr_t)fcode, M_DEVBUF); 913 return (EINVAL); 914 } 915 916 /* 917 * Detach a file from its current interface (if attached at all) and attach 918 * to the interface indicated by the name stored in ifr. 919 * Return an errno or 0. 920 */ 921 static int 922 bpf_setif(d, ifr) 923 struct bpf_d *d; 924 struct ifreq *ifr; 925 { 926 struct bpf_if *bp; 927 int s, error; 928 struct ifnet *theywant; 929 930 theywant = ifunit(ifr->ifr_name); 931 if (theywant == 0) 932 return ENXIO; 933 934 /* 935 * Look through attached interfaces for the named one. 936 */ 937 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 938 struct ifnet *ifp = bp->bif_ifp; 939 940 if (ifp == 0 || ifp != theywant) 941 continue; 942 /* 943 * We found the requested interface. 944 * If it's not up, return an error. 945 * Allocate the packet buffers if we need to. 946 * If we're already attached to requested interface, 947 * just flush the buffer. 948 */ 949 if ((ifp->if_flags & IFF_UP) == 0) 950 return (ENETDOWN); 951 952 if (d->bd_sbuf == 0) { 953 error = bpf_allocbufs(d); 954 if (error != 0) 955 return (error); 956 } 957 s = splimp(); 958 if (bp != d->bd_bif) { 959 if (d->bd_bif) 960 /* 961 * Detach if attached to something else. 962 */ 963 bpf_detachd(d); 964 965 bpf_attachd(d, bp); 966 } 967 reset_d(d); 968 splx(s); 969 return (0); 970 } 971 /* Not found. */ 972 return (ENXIO); 973 } 974 975 /* 976 * Convert an interface name plus unit number of an ifp to a single 977 * name which is returned in the ifr. 978 */ 979 static void 980 bpf_ifname(ifp, ifr) 981 struct ifnet *ifp; 982 struct ifreq *ifr; 983 { 984 char *s = ifp->if_name; 985 char *d = ifr->ifr_name; 986 987 while (*d++ = *s++) 988 continue; 989 d--; /* back to the null */ 990 /* XXX Assume that unit number is less than 10. */ 991 *d++ = ifp->if_unit + '0'; 992 *d = '\0'; 993 } 994 995 /* 996 * Support for select() and poll() system calls 997 * 998 * Return true iff the specific operation will not block indefinitely. 999 * Otherwise, return false but make a note that a selwakeup() must be done. 1000 */ 1001 int 1002 bpfpoll(dev, events, p) 1003 register dev_t dev; 1004 int events; 1005 struct proc *p; 1006 { 1007 register struct bpf_d *d; 1008 register int s; 1009 int revents = 0; 1010 1011 /* 1012 * An imitation of the FIONREAD ioctl code. 1013 */ 1014 d = &bpf_dtab[minor(dev)]; 1015 1016 s = splimp(); 1017 if (events & (POLLIN | POLLRDNORM)) 1018 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1019 revents |= events & (POLLIN | POLLRDNORM); 1020 else 1021 selrecord(p, &d->bd_sel); 1022 1023 splx(s); 1024 return (revents); 1025 } 1026 1027 /* 1028 * Incoming linkage from device drivers. Process the packet pkt, of length 1029 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1030 * by each process' filter, and if accepted, stashed into the corresponding 1031 * buffer. 1032 */ 1033 void 1034 bpf_tap(ifp, pkt, pktlen) 1035 struct ifnet *ifp; 1036 register u_char *pkt; 1037 register u_int pktlen; 1038 { 1039 struct bpf_if *bp; 1040 register struct bpf_d *d; 1041 register u_int slen; 1042 /* 1043 * Note that the ipl does not have to be raised at this point. 1044 * The only problem that could arise here is that if two different 1045 * interfaces shared any data. This is not the case. 1046 */ 1047 bp = ifp->if_bpf; 1048 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1049 ++d->bd_rcount; 1050 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1051 if (slen != 0) 1052 catchpacket(d, pkt, pktlen, slen, bcopy); 1053 } 1054 } 1055 1056 /* 1057 * Copy data from an mbuf chain into a buffer. This code is derived 1058 * from m_copydata in sys/uipc_mbuf.c. 1059 */ 1060 static void 1061 bpf_mcopy(src_arg, dst_arg, len) 1062 const void *src_arg; 1063 void *dst_arg; 1064 register u_int len; 1065 { 1066 register const struct mbuf *m; 1067 register u_int count; 1068 u_char *dst; 1069 1070 m = src_arg; 1071 dst = dst_arg; 1072 while (len > 0) { 1073 if (m == 0) 1074 panic("bpf_mcopy"); 1075 count = min(m->m_len, len); 1076 bcopy(mtod(m, void *), dst, count); 1077 m = m->m_next; 1078 dst += count; 1079 len -= count; 1080 } 1081 } 1082 1083 /* 1084 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1085 */ 1086 void 1087 bpf_mtap(ifp, m) 1088 struct ifnet *ifp; 1089 struct mbuf *m; 1090 { 1091 struct bpf_if *bp = ifp->if_bpf; 1092 struct bpf_d *d; 1093 u_int pktlen, slen; 1094 struct mbuf *m0; 1095 1096 pktlen = 0; 1097 for (m0 = m; m0 != 0; m0 = m0->m_next) 1098 pktlen += m0->m_len; 1099 1100 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1101 ++d->bd_rcount; 1102 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1103 if (slen != 0) 1104 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1105 } 1106 } 1107 1108 /* 1109 * Move the packet data from interface memory (pkt) into the 1110 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1111 * otherwise 0. "copy" is the routine called to do the actual data 1112 * transfer. bcopy is passed in to copy contiguous chunks, while 1113 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1114 * pkt is really an mbuf. 1115 */ 1116 static void 1117 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1118 register struct bpf_d *d; 1119 register u_char *pkt; 1120 register u_int pktlen, snaplen; 1121 register void (*cpfn) __P((const void *, void *, u_int)); 1122 { 1123 register struct bpf_hdr *hp; 1124 register int totlen, curlen; 1125 register int hdrlen = d->bd_bif->bif_hdrlen; 1126 /* 1127 * Figure out how many bytes to move. If the packet is 1128 * greater or equal to the snapshot length, transfer that 1129 * much. Otherwise, transfer the whole packet (unless 1130 * we hit the buffer size limit). 1131 */ 1132 totlen = hdrlen + min(snaplen, pktlen); 1133 if (totlen > d->bd_bufsize) 1134 totlen = d->bd_bufsize; 1135 1136 /* 1137 * Round up the end of the previous packet to the next longword. 1138 */ 1139 curlen = BPF_WORDALIGN(d->bd_slen); 1140 if (curlen + totlen > d->bd_bufsize) { 1141 /* 1142 * This packet will overflow the storage buffer. 1143 * Rotate the buffers if we can, then wakeup any 1144 * pending reads. 1145 */ 1146 if (d->bd_fbuf == 0) { 1147 /* 1148 * We haven't completed the previous read yet, 1149 * so drop the packet. 1150 */ 1151 ++d->bd_dcount; 1152 return; 1153 } 1154 ROTATE_BUFFERS(d); 1155 bpf_wakeup(d); 1156 curlen = 0; 1157 } 1158 else if (d->bd_immediate) 1159 /* 1160 * Immediate mode is set. A packet arrived so any 1161 * reads should be woken up. 1162 */ 1163 bpf_wakeup(d); 1164 1165 /* 1166 * Append the bpf header. 1167 */ 1168 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1169 #if BSD >= 199103 1170 microtime(&hp->bh_tstamp); 1171 #elif defined(sun) 1172 uniqtime(&hp->bh_tstamp); 1173 #else 1174 hp->bh_tstamp = time; 1175 #endif 1176 hp->bh_datalen = pktlen; 1177 hp->bh_hdrlen = hdrlen; 1178 /* 1179 * Copy the packet data into the store buffer and update its length. 1180 */ 1181 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1182 d->bd_slen = curlen + totlen; 1183 } 1184 1185 /* 1186 * Initialize all nonzero fields of a descriptor. 1187 */ 1188 static int 1189 bpf_allocbufs(d) 1190 register struct bpf_d *d; 1191 { 1192 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1193 if (d->bd_fbuf == 0) 1194 return (ENOBUFS); 1195 1196 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1197 if (d->bd_sbuf == 0) { 1198 free(d->bd_fbuf, M_DEVBUF); 1199 return (ENOBUFS); 1200 } 1201 d->bd_slen = 0; 1202 d->bd_hlen = 0; 1203 return (0); 1204 } 1205 1206 /* 1207 * Free buffers currently in use by a descriptor. 1208 * Called on close. 1209 */ 1210 static void 1211 bpf_freed(d) 1212 register struct bpf_d *d; 1213 { 1214 /* 1215 * We don't need to lock out interrupts since this descriptor has 1216 * been detached from its interface and it yet hasn't been marked 1217 * free. 1218 */ 1219 if (d->bd_sbuf != 0) { 1220 free(d->bd_sbuf, M_DEVBUF); 1221 if (d->bd_hbuf != 0) 1222 free(d->bd_hbuf, M_DEVBUF); 1223 if (d->bd_fbuf != 0) 1224 free(d->bd_fbuf, M_DEVBUF); 1225 } 1226 if (d->bd_filter) 1227 free((caddr_t)d->bd_filter, M_DEVBUF); 1228 1229 D_MARKFREE(d); 1230 } 1231 1232 /* 1233 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1234 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1235 * size of the link header (variable length headers not yet supported). 1236 */ 1237 void 1238 bpfattach(ifp, dlt, hdrlen) 1239 struct ifnet *ifp; 1240 u_int dlt, hdrlen; 1241 { 1242 struct bpf_if *bp; 1243 int i; 1244 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1245 if (bp == 0) 1246 panic("bpfattach"); 1247 1248 bp->bif_dlist = 0; 1249 bp->bif_ifp = ifp; 1250 bp->bif_dlt = dlt; 1251 1252 bp->bif_next = bpf_iflist; 1253 bpf_iflist = bp; 1254 1255 bp->bif_ifp->if_bpf = 0; 1256 1257 /* 1258 * Compute the length of the bpf header. This is not necessarily 1259 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1260 * that the network layer header begins on a longword boundary (for 1261 * performance reasons and to alleviate alignment restrictions). 1262 */ 1263 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1264 1265 /* 1266 * Mark all the descriptors free if this hasn't been done. 1267 */ 1268 if (!bpf_dtab_init) { 1269 for (i = 0; i < NBPFILTER; ++i) 1270 D_MARKFREE(&bpf_dtab[i]); 1271 bpf_dtab_init = 1; 1272 } 1273 1274 if (bootverbose) 1275 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1276 } 1277 1278 #ifdef DEVFS 1279 static void *bpf_devfs_token[NBPFILTER]; 1280 #endif 1281 1282 static bpf_devsw_installed = 0; 1283 1284 static void bpf_drvinit __P((void *unused)); 1285 static void 1286 bpf_drvinit(unused) 1287 void *unused; 1288 { 1289 dev_t dev; 1290 #ifdef DEVFS 1291 int i; 1292 #endif 1293 1294 if( ! bpf_devsw_installed ) { 1295 dev = makedev(CDEV_MAJOR, 0); 1296 cdevsw_add(&dev,&bpf_cdevsw, NULL); 1297 bpf_devsw_installed = 1; 1298 #ifdef DEVFS 1299 1300 for ( i = 0 ; i < NBPFILTER ; i++ ) { 1301 bpf_devfs_token[i] = 1302 devfs_add_devswf(&bpf_cdevsw, i, DV_CHR, 0, 0, 1303 0600, "bpf%d", i); 1304 } 1305 #endif 1306 } 1307 } 1308 1309 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1310 1311 #endif 1312