1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.43 1998/10/04 23:04:48 alex Exp $ 41 */ 42 43 #include "bpfilter.h" 44 45 #if NBPFILTER > 0 46 47 #ifndef __GNUC__ 48 #define inline 49 #else 50 #define inline __inline 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/conf.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/time.h> 59 #include <sys/proc.h> 60 #include <sys/signalvar.h> 61 #include <sys/filio.h> 62 #include <sys/sockio.h> 63 #include <sys/ttycom.h> 64 65 #if defined(sparc) && BSD < 199103 66 #include <sys/stream.h> 67 #endif 68 #include <sys/poll.h> 69 70 #include <sys/socket.h> 71 #include <sys/vnode.h> 72 73 #include <net/if.h> 74 #include <net/bpf.h> 75 #include <net/bpfdesc.h> 76 77 #include <netinet/in.h> 78 #include <netinet/if_ether.h> 79 #include <sys/kernel.h> 80 #include <sys/sysctl.h> 81 82 #include "opt_devfs.h" 83 84 #ifdef DEVFS 85 #include <sys/devfsext.h> 86 #endif /*DEVFS*/ 87 88 89 /* 90 * Older BSDs don't have kernel malloc. 91 */ 92 #if BSD < 199103 93 extern bcopy(); 94 static caddr_t bpf_alloc(); 95 #include <net/bpf_compat.h> 96 #define BPF_BUFSIZE (MCLBYTES-8) 97 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 98 #else 99 #define BPF_BUFSIZE 4096 100 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 101 #endif 102 103 #define PRINET 26 /* interruptible */ 104 105 /* 106 * The default read buffer size is patchable. 107 */ 108 static int bpf_bufsize = BPF_BUFSIZE; 109 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 110 &bpf_bufsize, 0, ""); 111 112 /* 113 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 114 * bpf_dtab holds the descriptors, indexed by minor device # 115 */ 116 static struct bpf_if *bpf_iflist; 117 static struct bpf_d bpf_dtab[NBPFILTER]; 118 static int bpf_dtab_init; 119 120 static int bpf_allocbufs __P((struct bpf_d *)); 121 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 122 static void bpf_detachd __P((struct bpf_d *d)); 123 static void bpf_freed __P((struct bpf_d *)); 124 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 125 static void bpf_mcopy __P((const void *, void *, size_t)); 126 static int bpf_movein __P((struct uio *, int, 127 struct mbuf **, struct sockaddr *, int *)); 128 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 129 static inline void 130 bpf_wakeup __P((struct bpf_d *)); 131 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 132 u_int, void (*)(const void *, void *, size_t))); 133 static void reset_d __P((struct bpf_d *)); 134 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 135 136 static d_open_t bpfopen; 137 static d_close_t bpfclose; 138 static d_read_t bpfread; 139 static d_write_t bpfwrite; 140 static d_ioctl_t bpfioctl; 141 static d_poll_t bpfpoll; 142 143 #define CDEV_MAJOR 23 144 static struct cdevsw bpf_cdevsw = 145 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 146 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 147 bpfpoll, nommap, NULL, "bpf", NULL, -1 }; 148 149 150 static int 151 bpf_movein(uio, linktype, mp, sockp, datlen) 152 register struct uio *uio; 153 int linktype, *datlen; 154 register struct mbuf **mp; 155 register struct sockaddr *sockp; 156 { 157 struct mbuf *m; 158 int error; 159 int len; 160 int hlen; 161 162 /* 163 * Build a sockaddr based on the data link layer type. 164 * We do this at this level because the ethernet header 165 * is copied directly into the data field of the sockaddr. 166 * In the case of SLIP, there is no header and the packet 167 * is forwarded as is. 168 * Also, we are careful to leave room at the front of the mbuf 169 * for the link level header. 170 */ 171 switch (linktype) { 172 173 case DLT_SLIP: 174 sockp->sa_family = AF_INET; 175 hlen = 0; 176 break; 177 178 case DLT_EN10MB: 179 sockp->sa_family = AF_UNSPEC; 180 /* XXX Would MAXLINKHDR be better? */ 181 hlen = sizeof(struct ether_header); 182 break; 183 184 case DLT_FDDI: 185 #if defined(__FreeBSD__) || defined(__bsdi__) 186 sockp->sa_family = AF_IMPLINK; 187 hlen = 0; 188 #else 189 sockp->sa_family = AF_UNSPEC; 190 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 191 hlen = 24; 192 #endif 193 break; 194 195 case DLT_RAW: 196 case DLT_NULL: 197 sockp->sa_family = AF_UNSPEC; 198 hlen = 0; 199 break; 200 201 #ifdef __FreeBSD__ 202 case DLT_ATM_RFC1483: 203 /* 204 * en atm driver requires 4-byte atm pseudo header. 205 * though it isn't standard, vpi:vci needs to be 206 * specified anyway. 207 */ 208 sockp->sa_family = AF_UNSPEC; 209 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 210 break; 211 #endif 212 213 default: 214 return (EIO); 215 } 216 217 len = uio->uio_resid; 218 *datlen = len - hlen; 219 if ((unsigned)len > MCLBYTES) 220 return (EIO); 221 222 MGETHDR(m, M_WAIT, MT_DATA); 223 if (m == 0) 224 return (ENOBUFS); 225 if (len > MHLEN) { 226 #if BSD >= 199103 227 MCLGET(m, M_WAIT); 228 if ((m->m_flags & M_EXT) == 0) { 229 #else 230 MCLGET(m); 231 if (m->m_len != MCLBYTES) { 232 #endif 233 error = ENOBUFS; 234 goto bad; 235 } 236 } 237 m->m_pkthdr.len = m->m_len = len; 238 m->m_pkthdr.rcvif = NULL; 239 *mp = m; 240 /* 241 * Make room for link header. 242 */ 243 if (hlen != 0) { 244 m->m_pkthdr.len -= hlen; 245 m->m_len -= hlen; 246 #if BSD >= 199103 247 m->m_data += hlen; /* XXX */ 248 #else 249 m->m_off += hlen; 250 #endif 251 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 252 if (error) 253 goto bad; 254 } 255 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 256 if (!error) 257 return (0); 258 bad: 259 m_freem(m); 260 return (error); 261 } 262 263 /* 264 * Attach file to the bpf interface, i.e. make d listen on bp. 265 * Must be called at splimp. 266 */ 267 static void 268 bpf_attachd(d, bp) 269 struct bpf_d *d; 270 struct bpf_if *bp; 271 { 272 /* 273 * Point d at bp, and add d to the interface's list of listeners. 274 * Finally, point the driver's bpf cookie at the interface so 275 * it will divert packets to bpf. 276 */ 277 d->bd_bif = bp; 278 d->bd_next = bp->bif_dlist; 279 bp->bif_dlist = d; 280 281 bp->bif_ifp->if_bpf = bp; 282 } 283 284 /* 285 * Detach a file from its interface. 286 */ 287 static void 288 bpf_detachd(d) 289 struct bpf_d *d; 290 { 291 struct bpf_d **p; 292 struct bpf_if *bp; 293 294 bp = d->bd_bif; 295 /* 296 * Check if this descriptor had requested promiscuous mode. 297 * If so, turn it off. 298 */ 299 if (d->bd_promisc) { 300 d->bd_promisc = 0; 301 if (ifpromisc(bp->bif_ifp, 0)) 302 /* 303 * Something is really wrong if we were able to put 304 * the driver into promiscuous mode, but can't 305 * take it out. 306 */ 307 panic("bpf: ifpromisc failed"); 308 } 309 /* Remove d from the interface's descriptor list. */ 310 p = &bp->bif_dlist; 311 while (*p != d) { 312 p = &(*p)->bd_next; 313 if (*p == 0) 314 panic("bpf_detachd: descriptor not in list"); 315 } 316 *p = (*p)->bd_next; 317 if (bp->bif_dlist == 0) 318 /* 319 * Let the driver know that there are no more listeners. 320 */ 321 d->bd_bif->bif_ifp->if_bpf = 0; 322 d->bd_bif = 0; 323 } 324 325 326 /* 327 * Mark a descriptor free by making it point to itself. 328 * This is probably cheaper than marking with a constant since 329 * the address should be in a register anyway. 330 */ 331 #define D_ISFREE(d) ((d) == (d)->bd_next) 332 #define D_MARKFREE(d) ((d)->bd_next = (d)) 333 #define D_MARKUSED(d) ((d)->bd_next = 0) 334 335 /* 336 * Open ethernet device. Returns ENXIO for illegal minor device number, 337 * EBUSY if file is open by another process. 338 */ 339 /* ARGSUSED */ 340 static int 341 bpfopen(dev, flags, fmt, p) 342 dev_t dev; 343 int flags; 344 int fmt; 345 struct proc *p; 346 { 347 register struct bpf_d *d; 348 349 if (minor(dev) >= NBPFILTER) 350 return (ENXIO); 351 /* 352 * Each minor can be opened by only one process. If the requested 353 * minor is in use, return EBUSY. 354 */ 355 d = &bpf_dtab[minor(dev)]; 356 if (!D_ISFREE(d)) 357 return (EBUSY); 358 359 /* Mark "free" and do most initialization. */ 360 bzero((char *)d, sizeof(*d)); 361 d->bd_bufsize = bpf_bufsize; 362 d->bd_sig = SIGIO; 363 364 return (0); 365 } 366 367 /* 368 * Close the descriptor by detaching it from its interface, 369 * deallocating its buffers, and marking it free. 370 */ 371 /* ARGSUSED */ 372 static int 373 bpfclose(dev, flags, fmt, p) 374 dev_t dev; 375 int flags; 376 int fmt; 377 struct proc *p; 378 { 379 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 380 register int s; 381 382 s = splimp(); 383 if (d->bd_bif) 384 bpf_detachd(d); 385 splx(s); 386 bpf_freed(d); 387 388 return (0); 389 } 390 391 /* 392 * Support for SunOS, which does not have tsleep. 393 */ 394 #if BSD < 199103 395 static 396 bpf_timeout(arg) 397 caddr_t arg; 398 { 399 struct bpf_d *d = (struct bpf_d *)arg; 400 d->bd_timedout = 1; 401 wakeup(arg); 402 } 403 404 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 405 406 int 407 bpf_sleep(d) 408 register struct bpf_d *d; 409 { 410 register int rto = d->bd_rtout; 411 register int st; 412 413 if (rto != 0) { 414 d->bd_timedout = 0; 415 timeout(bpf_timeout, (caddr_t)d, rto); 416 } 417 st = sleep((caddr_t)d, PRINET|PCATCH); 418 if (rto != 0) { 419 if (d->bd_timedout == 0) 420 untimeout(bpf_timeout, (caddr_t)d); 421 else if (st == 0) 422 return EWOULDBLOCK; 423 } 424 return (st != 0) ? EINTR : 0; 425 } 426 #else 427 #define BPF_SLEEP tsleep 428 #endif 429 430 /* 431 * Rotate the packet buffers in descriptor d. Move the store buffer 432 * into the hold slot, and the free buffer into the store slot. 433 * Zero the length of the new store buffer. 434 */ 435 #define ROTATE_BUFFERS(d) \ 436 (d)->bd_hbuf = (d)->bd_sbuf; \ 437 (d)->bd_hlen = (d)->bd_slen; \ 438 (d)->bd_sbuf = (d)->bd_fbuf; \ 439 (d)->bd_slen = 0; \ 440 (d)->bd_fbuf = 0; 441 /* 442 * bpfread - read next chunk of packets from buffers 443 */ 444 static int 445 bpfread(dev, uio, ioflag) 446 dev_t dev; 447 register struct uio *uio; 448 int ioflag; 449 { 450 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 451 int error; 452 int s; 453 454 /* 455 * Restrict application to use a buffer the same size as 456 * as kernel buffers. 457 */ 458 if (uio->uio_resid != d->bd_bufsize) 459 return (EINVAL); 460 461 s = splimp(); 462 /* 463 * If the hold buffer is empty, then do a timed sleep, which 464 * ends when the timeout expires or when enough packets 465 * have arrived to fill the store buffer. 466 */ 467 while (d->bd_hbuf == 0) { 468 if (d->bd_immediate && d->bd_slen != 0) { 469 /* 470 * A packet(s) either arrived since the previous 471 * read or arrived while we were asleep. 472 * Rotate the buffers and return what's here. 473 */ 474 ROTATE_BUFFERS(d); 475 break; 476 } 477 if (ioflag & IO_NDELAY) 478 error = EWOULDBLOCK; 479 else 480 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 481 d->bd_rtout); 482 if (error == EINTR || error == ERESTART) { 483 splx(s); 484 return (error); 485 } 486 if (error == EWOULDBLOCK) { 487 /* 488 * On a timeout, return what's in the buffer, 489 * which may be nothing. If there is something 490 * in the store buffer, we can rotate the buffers. 491 */ 492 if (d->bd_hbuf) 493 /* 494 * We filled up the buffer in between 495 * getting the timeout and arriving 496 * here, so we don't need to rotate. 497 */ 498 break; 499 500 if (d->bd_slen == 0) { 501 splx(s); 502 return (0); 503 } 504 ROTATE_BUFFERS(d); 505 break; 506 } 507 } 508 /* 509 * At this point, we know we have something in the hold slot. 510 */ 511 splx(s); 512 513 /* 514 * Move data from hold buffer into user space. 515 * We know the entire buffer is transferred since 516 * we checked above that the read buffer is bpf_bufsize bytes. 517 */ 518 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 519 520 s = splimp(); 521 d->bd_fbuf = d->bd_hbuf; 522 d->bd_hbuf = 0; 523 d->bd_hlen = 0; 524 splx(s); 525 526 return (error); 527 } 528 529 530 /* 531 * If there are processes sleeping on this descriptor, wake them up. 532 */ 533 static inline void 534 bpf_wakeup(d) 535 register struct bpf_d *d; 536 { 537 struct proc *p; 538 539 wakeup((caddr_t)d); 540 if (d->bd_async && d->bd_sig) 541 if (d->bd_pgid > 0) 542 gsignal (d->bd_pgid, d->bd_sig); 543 else if (p = pfind (-d->bd_pgid)) 544 psignal (p, d->bd_sig); 545 546 #if BSD >= 199103 547 selwakeup(&d->bd_sel); 548 /* XXX */ 549 d->bd_sel.si_pid = 0; 550 #else 551 if (d->bd_selproc) { 552 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 553 d->bd_selcoll = 0; 554 d->bd_selproc = 0; 555 } 556 #endif 557 } 558 559 static int 560 bpfwrite(dev, uio, ioflag) 561 dev_t dev; 562 struct uio *uio; 563 int ioflag; 564 { 565 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 566 struct ifnet *ifp; 567 struct mbuf *m; 568 int error, s; 569 static struct sockaddr dst; 570 int datlen; 571 572 if (d->bd_bif == 0) 573 return (ENXIO); 574 575 ifp = d->bd_bif->bif_ifp; 576 577 if (uio->uio_resid == 0) 578 return (0); 579 580 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 581 if (error) 582 return (error); 583 584 if (datlen > ifp->if_mtu) 585 return (EMSGSIZE); 586 587 s = splnet(); 588 #if BSD >= 199103 589 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 590 #else 591 error = (*ifp->if_output)(ifp, m, &dst); 592 #endif 593 splx(s); 594 /* 595 * The driver frees the mbuf. 596 */ 597 return (error); 598 } 599 600 /* 601 * Reset a descriptor by flushing its packet buffer and clearing the 602 * receive and drop counts. Should be called at splimp. 603 */ 604 static void 605 reset_d(d) 606 struct bpf_d *d; 607 { 608 if (d->bd_hbuf) { 609 /* Free the hold buffer. */ 610 d->bd_fbuf = d->bd_hbuf; 611 d->bd_hbuf = 0; 612 } 613 d->bd_slen = 0; 614 d->bd_hlen = 0; 615 d->bd_rcount = 0; 616 d->bd_dcount = 0; 617 } 618 619 /* 620 * FIONREAD Check for read packet available. 621 * SIOCGIFADDR Get interface address - convenient hook to driver. 622 * BIOCGBLEN Get buffer len [for read()]. 623 * BIOCSETF Set ethernet read filter. 624 * BIOCFLUSH Flush read packet buffer. 625 * BIOCPROMISC Put interface into promiscuous mode. 626 * BIOCGDLT Get link layer type. 627 * BIOCGETIF Get interface name. 628 * BIOCSETIF Set interface. 629 * BIOCSRTIMEOUT Set read timeout. 630 * BIOCGRTIMEOUT Get read timeout. 631 * BIOCGSTATS Get packet stats. 632 * BIOCIMMEDIATE Set immediate mode. 633 * BIOCVERSION Get filter language version. 634 */ 635 /* ARGSUSED */ 636 static int 637 bpfioctl(dev, cmd, addr, flags, p) 638 dev_t dev; 639 u_long cmd; 640 caddr_t addr; 641 int flags; 642 struct proc *p; 643 { 644 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 645 int s, error = 0; 646 647 switch (cmd) { 648 649 default: 650 error = EINVAL; 651 break; 652 653 /* 654 * Check for read packet available. 655 */ 656 case FIONREAD: 657 { 658 int n; 659 660 s = splimp(); 661 n = d->bd_slen; 662 if (d->bd_hbuf) 663 n += d->bd_hlen; 664 splx(s); 665 666 *(int *)addr = n; 667 break; 668 } 669 670 case SIOCGIFADDR: 671 { 672 struct ifnet *ifp; 673 674 if (d->bd_bif == 0) 675 error = EINVAL; 676 else { 677 ifp = d->bd_bif->bif_ifp; 678 error = (*ifp->if_ioctl)(ifp, cmd, addr); 679 } 680 break; 681 } 682 683 /* 684 * Get buffer len [for read()]. 685 */ 686 case BIOCGBLEN: 687 *(u_int *)addr = d->bd_bufsize; 688 break; 689 690 /* 691 * Set buffer length. 692 */ 693 case BIOCSBLEN: 694 #if BSD < 199103 695 error = EINVAL; 696 #else 697 if (d->bd_bif != 0) 698 error = EINVAL; 699 else { 700 register u_int size = *(u_int *)addr; 701 702 if (size > BPF_MAXBUFSIZE) 703 *(u_int *)addr = size = BPF_MAXBUFSIZE; 704 else if (size < BPF_MINBUFSIZE) 705 *(u_int *)addr = size = BPF_MINBUFSIZE; 706 d->bd_bufsize = size; 707 } 708 #endif 709 break; 710 711 /* 712 * Set link layer read filter. 713 */ 714 case BIOCSETF: 715 error = bpf_setf(d, (struct bpf_program *)addr); 716 break; 717 718 /* 719 * Flush read packet buffer. 720 */ 721 case BIOCFLUSH: 722 s = splimp(); 723 reset_d(d); 724 splx(s); 725 break; 726 727 /* 728 * Put interface into promiscuous mode. 729 */ 730 case BIOCPROMISC: 731 if (d->bd_bif == 0) { 732 /* 733 * No interface attached yet. 734 */ 735 error = EINVAL; 736 break; 737 } 738 s = splimp(); 739 if (d->bd_promisc == 0) { 740 error = ifpromisc(d->bd_bif->bif_ifp, 1); 741 if (error == 0) 742 d->bd_promisc = 1; 743 } 744 splx(s); 745 break; 746 747 /* 748 * Get device parameters. 749 */ 750 case BIOCGDLT: 751 if (d->bd_bif == 0) 752 error = EINVAL; 753 else 754 *(u_int *)addr = d->bd_bif->bif_dlt; 755 break; 756 757 /* 758 * Set interface name. 759 */ 760 case BIOCGETIF: 761 if (d->bd_bif == 0) 762 error = EINVAL; 763 else 764 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 765 break; 766 767 /* 768 * Set interface. 769 */ 770 case BIOCSETIF: 771 error = bpf_setif(d, (struct ifreq *)addr); 772 break; 773 774 /* 775 * Set read timeout. 776 */ 777 case BIOCSRTIMEOUT: 778 { 779 struct timeval *tv = (struct timeval *)addr; 780 781 /* 782 * Subtract 1 tick from tvtohz() since this isn't 783 * a one-shot timer. 784 */ 785 if ((error = itimerfix(tv)) == 0) 786 d->bd_rtout = tvtohz(tv) - 1; 787 break; 788 } 789 790 /* 791 * Get read timeout. 792 */ 793 case BIOCGRTIMEOUT: 794 { 795 struct timeval *tv = (struct timeval *)addr; 796 797 tv->tv_sec = d->bd_rtout / hz; 798 tv->tv_usec = (d->bd_rtout % hz) * tick; 799 break; 800 } 801 802 /* 803 * Get packet stats. 804 */ 805 case BIOCGSTATS: 806 { 807 struct bpf_stat *bs = (struct bpf_stat *)addr; 808 809 bs->bs_recv = d->bd_rcount; 810 bs->bs_drop = d->bd_dcount; 811 break; 812 } 813 814 /* 815 * Set immediate mode. 816 */ 817 case BIOCIMMEDIATE: 818 d->bd_immediate = *(u_int *)addr; 819 break; 820 821 case BIOCVERSION: 822 { 823 struct bpf_version *bv = (struct bpf_version *)addr; 824 825 bv->bv_major = BPF_MAJOR_VERSION; 826 bv->bv_minor = BPF_MINOR_VERSION; 827 break; 828 } 829 830 case FIONBIO: /* Non-blocking I/O */ 831 break; 832 833 case FIOASYNC: /* Send signal on receive packets */ 834 d->bd_async = *(int *)addr; 835 break; 836 837 /* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 838 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 839 is a process group if it's positive and a process id if it's negative. This 840 is exactly the opposite of what the other two functions want! Therefore 841 there is code in ioctl and fcntl to negate the arg before calling here. */ 842 843 case TIOCSPGRP: /* Process or group to send signals to */ 844 d->bd_pgid = *(int *)addr; 845 break; 846 847 case TIOCGPGRP: 848 *(int *)addr = d->bd_pgid; 849 break; 850 851 case BIOCSRSIG: /* Set receive signal */ 852 { 853 u_int sig; 854 855 sig = *(u_int *)addr; 856 857 if (sig >= NSIG) 858 error = EINVAL; 859 else 860 d->bd_sig = sig; 861 break; 862 } 863 case BIOCGRSIG: 864 *(u_int *)addr = d->bd_sig; 865 break; 866 } 867 return (error); 868 } 869 870 /* 871 * Set d's packet filter program to fp. If this file already has a filter, 872 * free it and replace it. Returns EINVAL for bogus requests. 873 */ 874 static int 875 bpf_setf(d, fp) 876 struct bpf_d *d; 877 struct bpf_program *fp; 878 { 879 struct bpf_insn *fcode, *old; 880 u_int flen, size; 881 int s; 882 883 old = d->bd_filter; 884 if (fp->bf_insns == 0) { 885 if (fp->bf_len != 0) 886 return (EINVAL); 887 s = splimp(); 888 d->bd_filter = 0; 889 reset_d(d); 890 splx(s); 891 if (old != 0) 892 free((caddr_t)old, M_DEVBUF); 893 return (0); 894 } 895 flen = fp->bf_len; 896 if (flen > BPF_MAXINSNS) 897 return (EINVAL); 898 899 size = flen * sizeof(*fp->bf_insns); 900 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 901 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 902 bpf_validate(fcode, (int)flen)) { 903 s = splimp(); 904 d->bd_filter = fcode; 905 reset_d(d); 906 splx(s); 907 if (old != 0) 908 free((caddr_t)old, M_DEVBUF); 909 910 return (0); 911 } 912 free((caddr_t)fcode, M_DEVBUF); 913 return (EINVAL); 914 } 915 916 /* 917 * Detach a file from its current interface (if attached at all) and attach 918 * to the interface indicated by the name stored in ifr. 919 * Return an errno or 0. 920 */ 921 static int 922 bpf_setif(d, ifr) 923 struct bpf_d *d; 924 struct ifreq *ifr; 925 { 926 struct bpf_if *bp; 927 int s, error; 928 struct ifnet *theywant; 929 930 theywant = ifunit(ifr->ifr_name); 931 if (theywant == 0) 932 return ENXIO; 933 934 /* 935 * Look through attached interfaces for the named one. 936 */ 937 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 938 struct ifnet *ifp = bp->bif_ifp; 939 940 if (ifp == 0 || ifp != theywant) 941 continue; 942 /* 943 * We found the requested interface. 944 * If it's not up, return an error. 945 * Allocate the packet buffers if we need to. 946 * If we're already attached to requested interface, 947 * just flush the buffer. 948 */ 949 if ((ifp->if_flags & IFF_UP) == 0) 950 return (ENETDOWN); 951 952 if (d->bd_sbuf == 0) { 953 error = bpf_allocbufs(d); 954 if (error != 0) 955 return (error); 956 } 957 s = splimp(); 958 if (bp != d->bd_bif) { 959 if (d->bd_bif) 960 /* 961 * Detach if attached to something else. 962 */ 963 bpf_detachd(d); 964 965 bpf_attachd(d, bp); 966 } 967 reset_d(d); 968 splx(s); 969 return (0); 970 } 971 /* Not found. */ 972 return (ENXIO); 973 } 974 975 /* 976 * Convert an interface name plus unit number of an ifp to a single 977 * name which is returned in the ifr. 978 */ 979 static void 980 bpf_ifname(ifp, ifr) 981 struct ifnet *ifp; 982 struct ifreq *ifr; 983 { 984 char *s = ifp->if_name; 985 char *d = ifr->ifr_name; 986 987 while (*d++ = *s++) 988 continue; 989 d--; /* back to the null */ 990 /* XXX Assume that unit number is less than 10. */ 991 *d++ = ifp->if_unit + '0'; 992 *d = '\0'; 993 } 994 995 /* 996 * Support for select() and poll() system calls 997 * 998 * Return true iff the specific operation will not block indefinitely. 999 * Otherwise, return false but make a note that a selwakeup() must be done. 1000 */ 1001 int 1002 bpfpoll(dev, events, p) 1003 register dev_t dev; 1004 int events; 1005 struct proc *p; 1006 { 1007 register struct bpf_d *d; 1008 register int s; 1009 int revents = 0; 1010 1011 /* 1012 * An imitation of the FIONREAD ioctl code. 1013 */ 1014 d = &bpf_dtab[minor(dev)]; 1015 1016 s = splimp(); 1017 if (events & (POLLIN | POLLRDNORM)) 1018 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1019 revents |= events & (POLLIN | POLLRDNORM); 1020 else 1021 selrecord(p, &d->bd_sel); 1022 1023 splx(s); 1024 return (revents); 1025 } 1026 1027 /* 1028 * Incoming linkage from device drivers. Process the packet pkt, of length 1029 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1030 * by each process' filter, and if accepted, stashed into the corresponding 1031 * buffer. 1032 */ 1033 void 1034 bpf_tap(ifp, pkt, pktlen) 1035 struct ifnet *ifp; 1036 register u_char *pkt; 1037 register u_int pktlen; 1038 { 1039 struct bpf_if *bp; 1040 register struct bpf_d *d; 1041 register u_int slen; 1042 /* 1043 * Note that the ipl does not have to be raised at this point. 1044 * The only problem that could arise here is that if two different 1045 * interfaces shared any data. This is not the case. 1046 */ 1047 bp = ifp->if_bpf; 1048 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1049 ++d->bd_rcount; 1050 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1051 if (slen != 0) 1052 catchpacket(d, pkt, pktlen, slen, bcopy); 1053 } 1054 } 1055 1056 /* 1057 * Copy data from an mbuf chain into a buffer. This code is derived 1058 * from m_copydata in sys/uipc_mbuf.c. 1059 */ 1060 static void 1061 bpf_mcopy(src_arg, dst_arg, len) 1062 const void *src_arg; 1063 void *dst_arg; 1064 register size_t len; 1065 { 1066 register const struct mbuf *m; 1067 register u_int count; 1068 u_char *dst; 1069 1070 m = src_arg; 1071 dst = dst_arg; 1072 while (len > 0) { 1073 if (m == 0) 1074 panic("bpf_mcopy"); 1075 count = min(m->m_len, len); 1076 bcopy(mtod(m, void *), dst, count); 1077 m = m->m_next; 1078 dst += count; 1079 len -= count; 1080 } 1081 } 1082 1083 /* 1084 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1085 */ 1086 void 1087 bpf_mtap(ifp, m) 1088 struct ifnet *ifp; 1089 struct mbuf *m; 1090 { 1091 struct bpf_if *bp = ifp->if_bpf; 1092 struct bpf_d *d; 1093 u_int pktlen, slen; 1094 struct mbuf *m0; 1095 1096 pktlen = 0; 1097 for (m0 = m; m0 != 0; m0 = m0->m_next) 1098 pktlen += m0->m_len; 1099 1100 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1101 ++d->bd_rcount; 1102 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1103 if (slen != 0) 1104 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1105 } 1106 } 1107 1108 /* 1109 * Move the packet data from interface memory (pkt) into the 1110 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1111 * otherwise 0. "copy" is the routine called to do the actual data 1112 * transfer. bcopy is passed in to copy contiguous chunks, while 1113 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1114 * pkt is really an mbuf. 1115 */ 1116 static void 1117 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1118 register struct bpf_d *d; 1119 register u_char *pkt; 1120 register u_int pktlen, snaplen; 1121 register void (*cpfn) __P((const void *, void *, size_t)); 1122 { 1123 register struct bpf_hdr *hp; 1124 register int totlen, curlen; 1125 register int hdrlen = d->bd_bif->bif_hdrlen; 1126 /* 1127 * Figure out how many bytes to move. If the packet is 1128 * greater or equal to the snapshot length, transfer that 1129 * much. Otherwise, transfer the whole packet (unless 1130 * we hit the buffer size limit). 1131 */ 1132 totlen = hdrlen + min(snaplen, pktlen); 1133 if (totlen > d->bd_bufsize) 1134 totlen = d->bd_bufsize; 1135 1136 /* 1137 * Round up the end of the previous packet to the next longword. 1138 */ 1139 curlen = BPF_WORDALIGN(d->bd_slen); 1140 if (curlen + totlen > d->bd_bufsize) { 1141 /* 1142 * This packet will overflow the storage buffer. 1143 * Rotate the buffers if we can, then wakeup any 1144 * pending reads. 1145 */ 1146 if (d->bd_fbuf == 0) { 1147 /* 1148 * We haven't completed the previous read yet, 1149 * so drop the packet. 1150 */ 1151 ++d->bd_dcount; 1152 return; 1153 } 1154 ROTATE_BUFFERS(d); 1155 bpf_wakeup(d); 1156 curlen = 0; 1157 } 1158 else if (d->bd_immediate) 1159 /* 1160 * Immediate mode is set. A packet arrived so any 1161 * reads should be woken up. 1162 */ 1163 bpf_wakeup(d); 1164 1165 /* 1166 * Append the bpf header. 1167 */ 1168 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1169 #if BSD >= 199103 1170 microtime(&hp->bh_tstamp); 1171 #elif defined(sun) 1172 uniqtime(&hp->bh_tstamp); 1173 #else 1174 hp->bh_tstamp = time; 1175 #endif 1176 hp->bh_datalen = pktlen; 1177 hp->bh_hdrlen = hdrlen; 1178 /* 1179 * Copy the packet data into the store buffer and update its length. 1180 */ 1181 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1182 d->bd_slen = curlen + totlen; 1183 } 1184 1185 /* 1186 * Initialize all nonzero fields of a descriptor. 1187 */ 1188 static int 1189 bpf_allocbufs(d) 1190 register struct bpf_d *d; 1191 { 1192 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1193 if (d->bd_fbuf == 0) 1194 return (ENOBUFS); 1195 1196 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1197 if (d->bd_sbuf == 0) { 1198 free(d->bd_fbuf, M_DEVBUF); 1199 return (ENOBUFS); 1200 } 1201 d->bd_slen = 0; 1202 d->bd_hlen = 0; 1203 return (0); 1204 } 1205 1206 /* 1207 * Free buffers currently in use by a descriptor. 1208 * Called on close. 1209 */ 1210 static void 1211 bpf_freed(d) 1212 register struct bpf_d *d; 1213 { 1214 /* 1215 * We don't need to lock out interrupts since this descriptor has 1216 * been detached from its interface and it yet hasn't been marked 1217 * free. 1218 */ 1219 if (d->bd_sbuf != 0) { 1220 free(d->bd_sbuf, M_DEVBUF); 1221 if (d->bd_hbuf != 0) 1222 free(d->bd_hbuf, M_DEVBUF); 1223 if (d->bd_fbuf != 0) 1224 free(d->bd_fbuf, M_DEVBUF); 1225 } 1226 if (d->bd_filter) 1227 free((caddr_t)d->bd_filter, M_DEVBUF); 1228 1229 D_MARKFREE(d); 1230 } 1231 1232 /* 1233 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1234 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1235 * size of the link header (variable length headers not yet supported). 1236 */ 1237 void 1238 bpfattach(ifp, dlt, hdrlen) 1239 struct ifnet *ifp; 1240 u_int dlt, hdrlen; 1241 { 1242 struct bpf_if *bp; 1243 int i; 1244 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1245 if (bp == 0) 1246 panic("bpfattach"); 1247 1248 bp->bif_dlist = 0; 1249 bp->bif_ifp = ifp; 1250 bp->bif_dlt = dlt; 1251 1252 bp->bif_next = bpf_iflist; 1253 bpf_iflist = bp; 1254 1255 bp->bif_ifp->if_bpf = 0; 1256 1257 /* 1258 * Compute the length of the bpf header. This is not necessarily 1259 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1260 * that the network layer header begins on a longword boundary (for 1261 * performance reasons and to alleviate alignment restrictions). 1262 */ 1263 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1264 1265 /* 1266 * Mark all the descriptors free if this hasn't been done. 1267 */ 1268 if (!bpf_dtab_init) { 1269 for (i = 0; i < NBPFILTER; ++i) 1270 D_MARKFREE(&bpf_dtab[i]); 1271 bpf_dtab_init = 1; 1272 } 1273 1274 if (bootverbose) 1275 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1276 } 1277 1278 #ifdef DEVFS 1279 static void *bpf_devfs_token[NBPFILTER]; 1280 #endif 1281 1282 static int bpf_devsw_installed; 1283 1284 static void bpf_drvinit __P((void *unused)); 1285 static void 1286 bpf_drvinit(unused) 1287 void *unused; 1288 { 1289 dev_t dev; 1290 #ifdef DEVFS 1291 int i; 1292 #endif 1293 1294 if( ! bpf_devsw_installed ) { 1295 dev = makedev(CDEV_MAJOR, 0); 1296 cdevsw_add(&dev,&bpf_cdevsw, NULL); 1297 bpf_devsw_installed = 1; 1298 #ifdef DEVFS 1299 1300 for ( i = 0 ; i < NBPFILTER ; i++ ) { 1301 bpf_devfs_token[i] = 1302 devfs_add_devswf(&bpf_cdevsw, i, DV_CHR, 0, 0, 1303 0600, "bpf%d", i); 1304 } 1305 #endif 1306 } 1307 } 1308 1309 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1310 1311 #endif 1312