1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.30 1997/03/23 03:37:14 bde Exp $ 41 */ 42 43 #include "bpfilter.h" 44 45 #if NBPFILTER > 0 46 47 #ifndef __GNUC__ 48 #define inline 49 #else 50 #define inline __inline 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/conf.h> 56 #include <sys/mbuf.h> 57 #include <sys/buf.h> 58 #include <sys/time.h> 59 #include <sys/proc.h> 60 #include <sys/signalvar.h> 61 #include <sys/filio.h> 62 #include <sys/sockio.h> 63 #include <sys/ttycom.h> 64 65 #include <sys/fcntl.h> 66 #if defined(sparc) && BSD < 199103 67 #include <sys/stream.h> 68 #endif 69 #include <sys/uio.h> 70 71 #include <sys/socket.h> 72 #include <sys/socketvar.h> 73 #include <sys/protosw.h> 74 #include <net/if.h> 75 76 #include <net/bpf.h> 77 #include <net/bpfdesc.h> 78 79 #include <sys/errno.h> 80 81 #include <netinet/in.h> 82 #include <netinet/if_ether.h> 83 #include <sys/kernel.h> 84 #include <sys/sysctl.h> 85 #include <sys/conf.h> 86 #ifdef DEVFS 87 #include <sys/devfsext.h> 88 #endif /*DEVFS*/ 89 90 91 /* 92 * Older BSDs don't have kernel malloc. 93 */ 94 #if BSD < 199103 95 extern bcopy(); 96 static caddr_t bpf_alloc(); 97 #include <net/bpf_compat.h> 98 #define BPF_BUFSIZE (MCLBYTES-8) 99 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 100 #else 101 #define BPF_BUFSIZE 4096 102 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 103 #endif 104 105 #define PRINET 26 /* interruptible */ 106 107 /* 108 * The default read buffer size is patchable. 109 */ 110 static int bpf_bufsize = BPF_BUFSIZE; 111 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 112 &bpf_bufsize, 0, ""); 113 114 /* 115 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 116 * bpf_dtab holds the descriptors, indexed by minor device # 117 */ 118 static struct bpf_if *bpf_iflist; 119 static struct bpf_d bpf_dtab[NBPFILTER]; 120 121 static int bpf_allocbufs __P((struct bpf_d *)); 122 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 123 static void bpf_detachd __P((struct bpf_d *d)); 124 static void bpf_freed __P((struct bpf_d *)); 125 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 126 static void bpf_mcopy __P((const void *, void *, u_int)); 127 static int bpf_movein __P((struct uio *, int, 128 struct mbuf **, struct sockaddr *, int *)); 129 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 130 static inline void 131 bpf_wakeup __P((struct bpf_d *)); 132 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 133 u_int, void (*)(const void *, void *, u_int))); 134 static void reset_d __P((struct bpf_d *)); 135 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 136 137 static d_open_t bpfopen; 138 static d_close_t bpfclose; 139 static d_read_t bpfread; 140 static d_write_t bpfwrite; 141 static d_ioctl_t bpfioctl; 142 static d_select_t bpfselect; 143 144 #define CDEV_MAJOR 23 145 static struct cdevsw bpf_cdevsw = 146 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 147 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 148 bpfselect, nommap, NULL, "bpf", NULL, -1 }; 149 150 151 static int 152 bpf_movein(uio, linktype, mp, sockp, datlen) 153 register struct uio *uio; 154 int linktype, *datlen; 155 register struct mbuf **mp; 156 register struct sockaddr *sockp; 157 { 158 struct mbuf *m; 159 int error; 160 int len; 161 int hlen; 162 163 /* 164 * Build a sockaddr based on the data link layer type. 165 * We do this at this level because the ethernet header 166 * is copied directly into the data field of the sockaddr. 167 * In the case of SLIP, there is no header and the packet 168 * is forwarded as is. 169 * Also, we are careful to leave room at the front of the mbuf 170 * for the link level header. 171 */ 172 switch (linktype) { 173 174 case DLT_SLIP: 175 sockp->sa_family = AF_INET; 176 hlen = 0; 177 break; 178 179 case DLT_EN10MB: 180 sockp->sa_family = AF_UNSPEC; 181 /* XXX Would MAXLINKHDR be better? */ 182 hlen = sizeof(struct ether_header); 183 break; 184 185 case DLT_FDDI: 186 #if defined(__FreeBSD__) || defined(__bsdi__) 187 sockp->sa_family = AF_IMPLINK; 188 hlen = 0; 189 #else 190 sockp->sa_family = AF_UNSPEC; 191 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 192 hlen = 24; 193 #endif 194 break; 195 196 case DLT_NULL: 197 sockp->sa_family = AF_UNSPEC; 198 hlen = 0; 199 break; 200 201 default: 202 return (EIO); 203 } 204 205 len = uio->uio_resid; 206 *datlen = len - hlen; 207 if ((unsigned)len > MCLBYTES) 208 return (EIO); 209 210 MGETHDR(m, M_WAIT, MT_DATA); 211 if (m == 0) 212 return (ENOBUFS); 213 if (len > MHLEN) { 214 #if BSD >= 199103 215 MCLGET(m, M_WAIT); 216 if ((m->m_flags & M_EXT) == 0) { 217 #else 218 MCLGET(m); 219 if (m->m_len != MCLBYTES) { 220 #endif 221 error = ENOBUFS; 222 goto bad; 223 } 224 } 225 m->m_pkthdr.len = m->m_len = len; 226 m->m_pkthdr.rcvif = NULL; 227 *mp = m; 228 /* 229 * Make room for link header. 230 */ 231 if (hlen != 0) { 232 m->m_pkthdr.len -= hlen; 233 m->m_len -= hlen; 234 #if BSD >= 199103 235 m->m_data += hlen; /* XXX */ 236 #else 237 m->m_off += hlen; 238 #endif 239 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 240 if (error) 241 goto bad; 242 } 243 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 244 if (!error) 245 return (0); 246 bad: 247 m_freem(m); 248 return (error); 249 } 250 251 /* 252 * Attach file to the bpf interface, i.e. make d listen on bp. 253 * Must be called at splimp. 254 */ 255 static void 256 bpf_attachd(d, bp) 257 struct bpf_d *d; 258 struct bpf_if *bp; 259 { 260 /* 261 * Point d at bp, and add d to the interface's list of listeners. 262 * Finally, point the driver's bpf cookie at the interface so 263 * it will divert packets to bpf. 264 */ 265 d->bd_bif = bp; 266 d->bd_next = bp->bif_dlist; 267 bp->bif_dlist = d; 268 269 bp->bif_ifp->if_bpf = bp; 270 } 271 272 /* 273 * Detach a file from its interface. 274 */ 275 static void 276 bpf_detachd(d) 277 struct bpf_d *d; 278 { 279 struct bpf_d **p; 280 struct bpf_if *bp; 281 282 bp = d->bd_bif; 283 /* 284 * Check if this descriptor had requested promiscuous mode. 285 * If so, turn it off. 286 */ 287 if (d->bd_promisc) { 288 d->bd_promisc = 0; 289 if (ifpromisc(bp->bif_ifp, 0)) 290 /* 291 * Something is really wrong if we were able to put 292 * the driver into promiscuous mode, but can't 293 * take it out. 294 */ 295 panic("bpf: ifpromisc failed"); 296 } 297 /* Remove d from the interface's descriptor list. */ 298 p = &bp->bif_dlist; 299 while (*p != d) { 300 p = &(*p)->bd_next; 301 if (*p == 0) 302 panic("bpf_detachd: descriptor not in list"); 303 } 304 *p = (*p)->bd_next; 305 if (bp->bif_dlist == 0) 306 /* 307 * Let the driver know that there are no more listeners. 308 */ 309 d->bd_bif->bif_ifp->if_bpf = 0; 310 d->bd_bif = 0; 311 } 312 313 314 /* 315 * Mark a descriptor free by making it point to itself. 316 * This is probably cheaper than marking with a constant since 317 * the address should be in a register anyway. 318 */ 319 #define D_ISFREE(d) ((d) == (d)->bd_next) 320 #define D_MARKFREE(d) ((d)->bd_next = (d)) 321 #define D_MARKUSED(d) ((d)->bd_next = 0) 322 323 /* 324 * Open ethernet device. Returns ENXIO for illegal minor device number, 325 * EBUSY if file is open by another process. 326 */ 327 /* ARGSUSED */ 328 static int 329 bpfopen(dev, flags, fmt, p) 330 dev_t dev; 331 int flags; 332 int fmt; 333 struct proc *p; 334 { 335 register struct bpf_d *d; 336 337 if (minor(dev) >= NBPFILTER) 338 return (ENXIO); 339 /* 340 * Each minor can be opened by only one process. If the requested 341 * minor is in use, return EBUSY. 342 */ 343 d = &bpf_dtab[minor(dev)]; 344 if (!D_ISFREE(d)) 345 return (EBUSY); 346 347 /* Mark "free" and do most initialization. */ 348 bzero((char *)d, sizeof(*d)); 349 d->bd_bufsize = bpf_bufsize; 350 d->bd_sig = SIGIO; 351 352 return (0); 353 } 354 355 /* 356 * Close the descriptor by detaching it from its interface, 357 * deallocating its buffers, and marking it free. 358 */ 359 /* ARGSUSED */ 360 static int 361 bpfclose(dev, flags, fmt, p) 362 dev_t dev; 363 int flags; 364 int fmt; 365 struct proc *p; 366 { 367 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 368 register int s; 369 370 s = splimp(); 371 if (d->bd_bif) 372 bpf_detachd(d); 373 splx(s); 374 bpf_freed(d); 375 376 return (0); 377 } 378 379 /* 380 * Support for SunOS, which does not have tsleep. 381 */ 382 #if BSD < 199103 383 static 384 bpf_timeout(arg) 385 caddr_t arg; 386 { 387 struct bpf_d *d = (struct bpf_d *)arg; 388 d->bd_timedout = 1; 389 wakeup(arg); 390 } 391 392 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 393 394 int 395 bpf_sleep(d) 396 register struct bpf_d *d; 397 { 398 register int rto = d->bd_rtout; 399 register int st; 400 401 if (rto != 0) { 402 d->bd_timedout = 0; 403 timeout(bpf_timeout, (caddr_t)d, rto); 404 } 405 st = sleep((caddr_t)d, PRINET|PCATCH); 406 if (rto != 0) { 407 if (d->bd_timedout == 0) 408 untimeout(bpf_timeout, (caddr_t)d); 409 else if (st == 0) 410 return EWOULDBLOCK; 411 } 412 return (st != 0) ? EINTR : 0; 413 } 414 #else 415 #define BPF_SLEEP tsleep 416 #endif 417 418 /* 419 * Rotate the packet buffers in descriptor d. Move the store buffer 420 * into the hold slot, and the free buffer into the store slot. 421 * Zero the length of the new store buffer. 422 */ 423 #define ROTATE_BUFFERS(d) \ 424 (d)->bd_hbuf = (d)->bd_sbuf; \ 425 (d)->bd_hlen = (d)->bd_slen; \ 426 (d)->bd_sbuf = (d)->bd_fbuf; \ 427 (d)->bd_slen = 0; \ 428 (d)->bd_fbuf = 0; 429 /* 430 * bpfread - read next chunk of packets from buffers 431 */ 432 static int 433 bpfread(dev, uio, ioflag) 434 dev_t dev; 435 register struct uio *uio; 436 int ioflag; 437 { 438 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 439 int error; 440 int s; 441 442 /* 443 * Restrict application to use a buffer the same size as 444 * as kernel buffers. 445 */ 446 if (uio->uio_resid != d->bd_bufsize) 447 return (EINVAL); 448 449 s = splimp(); 450 /* 451 * If the hold buffer is empty, then do a timed sleep, which 452 * ends when the timeout expires or when enough packets 453 * have arrived to fill the store buffer. 454 */ 455 while (d->bd_hbuf == 0) { 456 if (d->bd_immediate && d->bd_slen != 0) { 457 /* 458 * A packet(s) either arrived since the previous 459 * read or arrived while we were asleep. 460 * Rotate the buffers and return what's here. 461 */ 462 ROTATE_BUFFERS(d); 463 break; 464 } 465 if (d->bd_rtout != -1) 466 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 467 d->bd_rtout); 468 else 469 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 470 if (error == EINTR || error == ERESTART) { 471 splx(s); 472 return (error); 473 } 474 if (error == EWOULDBLOCK) { 475 /* 476 * On a timeout, return what's in the buffer, 477 * which may be nothing. If there is something 478 * in the store buffer, we can rotate the buffers. 479 */ 480 if (d->bd_hbuf) 481 /* 482 * We filled up the buffer in between 483 * getting the timeout and arriving 484 * here, so we don't need to rotate. 485 */ 486 break; 487 488 if (d->bd_slen == 0) { 489 splx(s); 490 return (0); 491 } 492 ROTATE_BUFFERS(d); 493 break; 494 } 495 } 496 /* 497 * At this point, we know we have something in the hold slot. 498 */ 499 splx(s); 500 501 /* 502 * Move data from hold buffer into user space. 503 * We know the entire buffer is transferred since 504 * we checked above that the read buffer is bpf_bufsize bytes. 505 */ 506 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 507 508 s = splimp(); 509 d->bd_fbuf = d->bd_hbuf; 510 d->bd_hbuf = 0; 511 d->bd_hlen = 0; 512 splx(s); 513 514 return (error); 515 } 516 517 518 /* 519 * If there are processes sleeping on this descriptor, wake them up. 520 */ 521 static inline void 522 bpf_wakeup(d) 523 register struct bpf_d *d; 524 { 525 struct proc *p; 526 527 wakeup((caddr_t)d); 528 if (d->bd_async && d->bd_sig) 529 if (d->bd_pgid > 0) 530 gsignal (d->bd_pgid, d->bd_sig); 531 else if (p = pfind (-d->bd_pgid)) 532 psignal (p, d->bd_sig); 533 534 #if BSD >= 199103 535 selwakeup(&d->bd_sel); 536 /* XXX */ 537 d->bd_sel.si_pid = 0; 538 #else 539 if (d->bd_selproc) { 540 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 541 d->bd_selcoll = 0; 542 d->bd_selproc = 0; 543 } 544 #endif 545 } 546 547 static int 548 bpfwrite(dev, uio, ioflag) 549 dev_t dev; 550 struct uio *uio; 551 int ioflag; 552 { 553 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 554 struct ifnet *ifp; 555 struct mbuf *m; 556 int error, s; 557 static struct sockaddr dst; 558 int datlen; 559 560 if (d->bd_bif == 0) 561 return (ENXIO); 562 563 ifp = d->bd_bif->bif_ifp; 564 565 if (uio->uio_resid == 0) 566 return (0); 567 568 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 569 if (error) 570 return (error); 571 572 if (datlen > ifp->if_mtu) 573 return (EMSGSIZE); 574 575 s = splnet(); 576 #if BSD >= 199103 577 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 578 #else 579 error = (*ifp->if_output)(ifp, m, &dst); 580 #endif 581 splx(s); 582 /* 583 * The driver frees the mbuf. 584 */ 585 return (error); 586 } 587 588 /* 589 * Reset a descriptor by flushing its packet buffer and clearing the 590 * receive and drop counts. Should be called at splimp. 591 */ 592 static void 593 reset_d(d) 594 struct bpf_d *d; 595 { 596 if (d->bd_hbuf) { 597 /* Free the hold buffer. */ 598 d->bd_fbuf = d->bd_hbuf; 599 d->bd_hbuf = 0; 600 } 601 d->bd_slen = 0; 602 d->bd_hlen = 0; 603 d->bd_rcount = 0; 604 d->bd_dcount = 0; 605 } 606 607 /* 608 * FIONREAD Check for read packet available. 609 * SIOCGIFADDR Get interface address - convenient hook to driver. 610 * BIOCGBLEN Get buffer len [for read()]. 611 * BIOCSETF Set ethernet read filter. 612 * BIOCFLUSH Flush read packet buffer. 613 * BIOCPROMISC Put interface into promiscuous mode. 614 * BIOCGDLT Get link layer type. 615 * BIOCGETIF Get interface name. 616 * BIOCSETIF Set interface. 617 * BIOCSRTIMEOUT Set read timeout. 618 * BIOCGRTIMEOUT Get read timeout. 619 * BIOCGSTATS Get packet stats. 620 * BIOCIMMEDIATE Set immediate mode. 621 * BIOCVERSION Get filter language version. 622 */ 623 /* ARGSUSED */ 624 static int 625 bpfioctl(dev, cmd, addr, flags, p) 626 dev_t dev; 627 int cmd; 628 caddr_t addr; 629 int flags; 630 struct proc *p; 631 { 632 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 633 int s, error = 0; 634 635 switch (cmd) { 636 637 default: 638 error = EINVAL; 639 break; 640 641 /* 642 * Check for read packet available. 643 */ 644 case FIONREAD: 645 { 646 int n; 647 648 s = splimp(); 649 n = d->bd_slen; 650 if (d->bd_hbuf) 651 n += d->bd_hlen; 652 splx(s); 653 654 *(int *)addr = n; 655 break; 656 } 657 658 case SIOCGIFADDR: 659 { 660 struct ifnet *ifp; 661 662 if (d->bd_bif == 0) 663 error = EINVAL; 664 else { 665 ifp = d->bd_bif->bif_ifp; 666 error = (*ifp->if_ioctl)(ifp, cmd, addr); 667 } 668 break; 669 } 670 671 /* 672 * Get buffer len [for read()]. 673 */ 674 case BIOCGBLEN: 675 *(u_int *)addr = d->bd_bufsize; 676 break; 677 678 /* 679 * Set buffer length. 680 */ 681 case BIOCSBLEN: 682 #if BSD < 199103 683 error = EINVAL; 684 #else 685 if (d->bd_bif != 0) 686 error = EINVAL; 687 else { 688 register u_int size = *(u_int *)addr; 689 690 if (size > BPF_MAXBUFSIZE) 691 *(u_int *)addr = size = BPF_MAXBUFSIZE; 692 else if (size < BPF_MINBUFSIZE) 693 *(u_int *)addr = size = BPF_MINBUFSIZE; 694 d->bd_bufsize = size; 695 } 696 #endif 697 break; 698 699 /* 700 * Set link layer read filter. 701 */ 702 case BIOCSETF: 703 error = bpf_setf(d, (struct bpf_program *)addr); 704 break; 705 706 /* 707 * Flush read packet buffer. 708 */ 709 case BIOCFLUSH: 710 s = splimp(); 711 reset_d(d); 712 splx(s); 713 break; 714 715 /* 716 * Put interface into promiscuous mode. 717 */ 718 case BIOCPROMISC: 719 if (d->bd_bif == 0) { 720 /* 721 * No interface attached yet. 722 */ 723 error = EINVAL; 724 break; 725 } 726 s = splimp(); 727 if (d->bd_promisc == 0) { 728 error = ifpromisc(d->bd_bif->bif_ifp, 1); 729 if (error == 0) 730 d->bd_promisc = 1; 731 } 732 splx(s); 733 break; 734 735 /* 736 * Get device parameters. 737 */ 738 case BIOCGDLT: 739 if (d->bd_bif == 0) 740 error = EINVAL; 741 else 742 *(u_int *)addr = d->bd_bif->bif_dlt; 743 break; 744 745 /* 746 * Set interface name. 747 */ 748 case BIOCGETIF: 749 if (d->bd_bif == 0) 750 error = EINVAL; 751 else 752 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 753 break; 754 755 /* 756 * Set interface. 757 */ 758 case BIOCSETIF: 759 error = bpf_setif(d, (struct ifreq *)addr); 760 break; 761 762 /* 763 * Set read timeout. 764 */ 765 case BIOCSRTIMEOUT: 766 { 767 struct timeval *tv = (struct timeval *)addr; 768 u_long msec; 769 770 /* Compute number of milliseconds. */ 771 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 772 /* Scale milliseconds to ticks. Assume hard 773 clock has millisecond or greater resolution 774 (i.e. tick >= 1000). For 10ms hardclock, 775 tick/1000 = 10, so rtout<-msec/10. */ 776 d->bd_rtout = msec / (tick / 1000); 777 break; 778 } 779 780 /* 781 * Get read timeout. 782 */ 783 case BIOCGRTIMEOUT: 784 { 785 struct timeval *tv = (struct timeval *)addr; 786 u_long msec = d->bd_rtout; 787 788 msec *= tick / 1000; 789 tv->tv_sec = msec / 1000; 790 tv->tv_usec = msec % 1000; 791 break; 792 } 793 794 /* 795 * Get packet stats. 796 */ 797 case BIOCGSTATS: 798 { 799 struct bpf_stat *bs = (struct bpf_stat *)addr; 800 801 bs->bs_recv = d->bd_rcount; 802 bs->bs_drop = d->bd_dcount; 803 break; 804 } 805 806 /* 807 * Set immediate mode. 808 */ 809 case BIOCIMMEDIATE: 810 d->bd_immediate = *(u_int *)addr; 811 break; 812 813 case BIOCVERSION: 814 { 815 struct bpf_version *bv = (struct bpf_version *)addr; 816 817 bv->bv_major = BPF_MAJOR_VERSION; 818 bv->bv_minor = BPF_MINOR_VERSION; 819 break; 820 } 821 822 823 case FIONBIO: /* Non-blocking I/O */ 824 if (*(int *)addr) 825 d->bd_rtout = -1; 826 else 827 d->bd_rtout = 0; 828 break; 829 830 case FIOASYNC: /* Send signal on receive packets */ 831 d->bd_async = *(int *)addr; 832 break; 833 834 /* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 835 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 836 is a process group if it's positive and a process id if it's negative. This 837 is exactly the opposite of what the other two functions want! Therefore 838 there is code in ioctl and fcntl to negate the arg before calling here. */ 839 840 case TIOCSPGRP: /* Process or group to send signals to */ 841 d->bd_pgid = *(int *)addr; 842 break; 843 844 case TIOCGPGRP: 845 *(int *)addr = d->bd_pgid; 846 break; 847 848 case BIOCSRSIG: /* Set receive signal */ 849 { 850 u_int sig; 851 852 sig = *(u_int *)addr; 853 854 if (sig >= NSIG) 855 error = EINVAL; 856 else 857 d->bd_sig = sig; 858 break; 859 } 860 case BIOCGRSIG: 861 *(u_int *)addr = d->bd_sig; 862 break; 863 } 864 return (error); 865 } 866 867 /* 868 * Set d's packet filter program to fp. If this file already has a filter, 869 * free it and replace it. Returns EINVAL for bogus requests. 870 */ 871 static int 872 bpf_setf(d, fp) 873 struct bpf_d *d; 874 struct bpf_program *fp; 875 { 876 struct bpf_insn *fcode, *old; 877 u_int flen, size; 878 int s; 879 880 old = d->bd_filter; 881 if (fp->bf_insns == 0) { 882 if (fp->bf_len != 0) 883 return (EINVAL); 884 s = splimp(); 885 d->bd_filter = 0; 886 reset_d(d); 887 splx(s); 888 if (old != 0) 889 free((caddr_t)old, M_DEVBUF); 890 return (0); 891 } 892 flen = fp->bf_len; 893 if (flen > BPF_MAXINSNS) 894 return (EINVAL); 895 896 size = flen * sizeof(*fp->bf_insns); 897 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 898 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 899 bpf_validate(fcode, (int)flen)) { 900 s = splimp(); 901 d->bd_filter = fcode; 902 reset_d(d); 903 splx(s); 904 if (old != 0) 905 free((caddr_t)old, M_DEVBUF); 906 907 return (0); 908 } 909 free((caddr_t)fcode, M_DEVBUF); 910 return (EINVAL); 911 } 912 913 /* 914 * Detach a file from its current interface (if attached at all) and attach 915 * to the interface indicated by the name stored in ifr. 916 * Return an errno or 0. 917 */ 918 static int 919 bpf_setif(d, ifr) 920 struct bpf_d *d; 921 struct ifreq *ifr; 922 { 923 struct bpf_if *bp; 924 int s, error; 925 struct ifnet *theywant; 926 927 theywant = ifunit(ifr->ifr_name); 928 if (theywant == 0) 929 return ENXIO; 930 931 /* 932 * Look through attached interfaces for the named one. 933 */ 934 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 935 struct ifnet *ifp = bp->bif_ifp; 936 937 if (ifp == 0 || ifp != theywant) 938 continue; 939 /* 940 * We found the requested interface. 941 * If it's not up, return an error. 942 * Allocate the packet buffers if we need to. 943 * If we're already attached to requested interface, 944 * just flush the buffer. 945 */ 946 if ((ifp->if_flags & IFF_UP) == 0) 947 return (ENETDOWN); 948 949 if (d->bd_sbuf == 0) { 950 error = bpf_allocbufs(d); 951 if (error != 0) 952 return (error); 953 } 954 s = splimp(); 955 if (bp != d->bd_bif) { 956 if (d->bd_bif) 957 /* 958 * Detach if attached to something else. 959 */ 960 bpf_detachd(d); 961 962 bpf_attachd(d, bp); 963 } 964 reset_d(d); 965 splx(s); 966 return (0); 967 } 968 /* Not found. */ 969 return (ENXIO); 970 } 971 972 /* 973 * Convert an interface name plus unit number of an ifp to a single 974 * name which is returned in the ifr. 975 */ 976 static void 977 bpf_ifname(ifp, ifr) 978 struct ifnet *ifp; 979 struct ifreq *ifr; 980 { 981 char *s = ifp->if_name; 982 char *d = ifr->ifr_name; 983 984 while (*d++ = *s++) 985 continue; 986 d--; /* back to the null */ 987 /* XXX Assume that unit number is less than 10. */ 988 *d++ = ifp->if_unit + '0'; 989 *d = '\0'; 990 } 991 992 /* 993 * The new select interface passes down the proc pointer; the old select 994 * stubs had to grab it out of the user struct. This glue allows either case. 995 */ 996 #if BSD >= 199103 997 #define bpf_select bpfselect 998 #else 999 static int 1000 bpfselect(dev, rw) 1001 register dev_t dev; 1002 int rw; 1003 { 1004 return (bpf_select(dev, rw, u.u_procp)); 1005 } 1006 #endif 1007 1008 /* 1009 * Support for select() system call 1010 * 1011 * Return true iff the specific operation will not block indefinitely. 1012 * Otherwise, return false but make a note that a selwakeup() must be done. 1013 */ 1014 int 1015 bpf_select(dev, rw, p) 1016 register dev_t dev; 1017 int rw; 1018 struct proc *p; 1019 { 1020 register struct bpf_d *d; 1021 register int s; 1022 1023 if (rw != FREAD) 1024 return (0); 1025 /* 1026 * An imitation of the FIONREAD ioctl code. 1027 */ 1028 d = &bpf_dtab[minor(dev)]; 1029 1030 s = splimp(); 1031 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 1032 /* 1033 * There is data waiting. 1034 */ 1035 splx(s); 1036 return (1); 1037 } 1038 #if BSD >= 199103 1039 selrecord(p, &d->bd_sel); 1040 #else 1041 /* 1042 * No data ready. If there's already a select() waiting on this 1043 * minor device then this is a collision. This shouldn't happen 1044 * because minors really should not be shared, but if a process 1045 * forks while one of these is open, it is possible that both 1046 * processes could select on the same descriptor. 1047 */ 1048 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 1049 d->bd_selcoll = 1; 1050 else 1051 d->bd_selproc = p; 1052 #endif 1053 splx(s); 1054 return (0); 1055 } 1056 1057 /* 1058 * Incoming linkage from device drivers. Process the packet pkt, of length 1059 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1060 * by each process' filter, and if accepted, stashed into the corresponding 1061 * buffer. 1062 */ 1063 void 1064 bpf_tap(ifp, pkt, pktlen) 1065 struct ifnet *ifp; 1066 register u_char *pkt; 1067 register u_int pktlen; 1068 { 1069 struct bpf_if *bp; 1070 register struct bpf_d *d; 1071 register u_int slen; 1072 /* 1073 * Note that the ipl does not have to be raised at this point. 1074 * The only problem that could arise here is that if two different 1075 * interfaces shared any data. This is not the case. 1076 */ 1077 bp = ifp->if_bpf; 1078 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1079 ++d->bd_rcount; 1080 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1081 if (slen != 0) 1082 catchpacket(d, pkt, pktlen, slen, bcopy); 1083 } 1084 } 1085 1086 /* 1087 * Copy data from an mbuf chain into a buffer. This code is derived 1088 * from m_copydata in sys/uipc_mbuf.c. 1089 */ 1090 static void 1091 bpf_mcopy(src_arg, dst_arg, len) 1092 const void *src_arg; 1093 void *dst_arg; 1094 register u_int len; 1095 { 1096 register const struct mbuf *m; 1097 register u_int count; 1098 u_char *dst; 1099 1100 m = src_arg; 1101 dst = dst_arg; 1102 while (len > 0) { 1103 if (m == 0) 1104 panic("bpf_mcopy"); 1105 count = min(m->m_len, len); 1106 bcopy(mtod(m, void *), dst, count); 1107 m = m->m_next; 1108 dst += count; 1109 len -= count; 1110 } 1111 } 1112 1113 /* 1114 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1115 */ 1116 void 1117 bpf_mtap(ifp, m) 1118 struct ifnet *ifp; 1119 struct mbuf *m; 1120 { 1121 struct bpf_if *bp = ifp->if_bpf; 1122 struct bpf_d *d; 1123 u_int pktlen, slen; 1124 struct mbuf *m0; 1125 1126 pktlen = 0; 1127 for (m0 = m; m0 != 0; m0 = m0->m_next) 1128 pktlen += m0->m_len; 1129 1130 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1131 ++d->bd_rcount; 1132 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1133 if (slen != 0) 1134 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1135 } 1136 } 1137 1138 /* 1139 * Move the packet data from interface memory (pkt) into the 1140 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1141 * otherwise 0. "copy" is the routine called to do the actual data 1142 * transfer. bcopy is passed in to copy contiguous chunks, while 1143 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1144 * pkt is really an mbuf. 1145 */ 1146 static void 1147 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1148 register struct bpf_d *d; 1149 register u_char *pkt; 1150 register u_int pktlen, snaplen; 1151 register void (*cpfn)(const void *, void *, u_int); 1152 { 1153 register struct bpf_hdr *hp; 1154 register int totlen, curlen; 1155 register int hdrlen = d->bd_bif->bif_hdrlen; 1156 /* 1157 * Figure out how many bytes to move. If the packet is 1158 * greater or equal to the snapshot length, transfer that 1159 * much. Otherwise, transfer the whole packet (unless 1160 * we hit the buffer size limit). 1161 */ 1162 totlen = hdrlen + min(snaplen, pktlen); 1163 if (totlen > d->bd_bufsize) 1164 totlen = d->bd_bufsize; 1165 1166 /* 1167 * Round up the end of the previous packet to the next longword. 1168 */ 1169 curlen = BPF_WORDALIGN(d->bd_slen); 1170 if (curlen + totlen > d->bd_bufsize) { 1171 /* 1172 * This packet will overflow the storage buffer. 1173 * Rotate the buffers if we can, then wakeup any 1174 * pending reads. 1175 */ 1176 if (d->bd_fbuf == 0) { 1177 /* 1178 * We haven't completed the previous read yet, 1179 * so drop the packet. 1180 */ 1181 ++d->bd_dcount; 1182 return; 1183 } 1184 ROTATE_BUFFERS(d); 1185 bpf_wakeup(d); 1186 curlen = 0; 1187 } 1188 else if (d->bd_immediate) 1189 /* 1190 * Immediate mode is set. A packet arrived so any 1191 * reads should be woken up. 1192 */ 1193 bpf_wakeup(d); 1194 1195 /* 1196 * Append the bpf header. 1197 */ 1198 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1199 #if BSD >= 199103 1200 microtime(&hp->bh_tstamp); 1201 #elif defined(sun) 1202 uniqtime(&hp->bh_tstamp); 1203 #else 1204 hp->bh_tstamp = time; 1205 #endif 1206 hp->bh_datalen = pktlen; 1207 hp->bh_hdrlen = hdrlen; 1208 /* 1209 * Copy the packet data into the store buffer and update its length. 1210 */ 1211 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1212 d->bd_slen = curlen + totlen; 1213 } 1214 1215 /* 1216 * Initialize all nonzero fields of a descriptor. 1217 */ 1218 static int 1219 bpf_allocbufs(d) 1220 register struct bpf_d *d; 1221 { 1222 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1223 if (d->bd_fbuf == 0) 1224 return (ENOBUFS); 1225 1226 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1227 if (d->bd_sbuf == 0) { 1228 free(d->bd_fbuf, M_DEVBUF); 1229 return (ENOBUFS); 1230 } 1231 d->bd_slen = 0; 1232 d->bd_hlen = 0; 1233 return (0); 1234 } 1235 1236 /* 1237 * Free buffers currently in use by a descriptor. 1238 * Called on close. 1239 */ 1240 static void 1241 bpf_freed(d) 1242 register struct bpf_d *d; 1243 { 1244 /* 1245 * We don't need to lock out interrupts since this descriptor has 1246 * been detached from its interface and it yet hasn't been marked 1247 * free. 1248 */ 1249 if (d->bd_sbuf != 0) { 1250 free(d->bd_sbuf, M_DEVBUF); 1251 if (d->bd_hbuf != 0) 1252 free(d->bd_hbuf, M_DEVBUF); 1253 if (d->bd_fbuf != 0) 1254 free(d->bd_fbuf, M_DEVBUF); 1255 } 1256 if (d->bd_filter) 1257 free((caddr_t)d->bd_filter, M_DEVBUF); 1258 1259 D_MARKFREE(d); 1260 } 1261 1262 /* 1263 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1264 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1265 * size of the link header (variable length headers not yet supported). 1266 */ 1267 void 1268 bpfattach(ifp, dlt, hdrlen) 1269 struct ifnet *ifp; 1270 u_int dlt, hdrlen; 1271 { 1272 struct bpf_if *bp; 1273 int i; 1274 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1275 if (bp == 0) 1276 panic("bpfattach"); 1277 1278 bp->bif_dlist = 0; 1279 bp->bif_ifp = ifp; 1280 bp->bif_dlt = dlt; 1281 1282 bp->bif_next = bpf_iflist; 1283 bpf_iflist = bp; 1284 1285 bp->bif_ifp->if_bpf = 0; 1286 1287 /* 1288 * Compute the length of the bpf header. This is not necessarily 1289 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1290 * that the network layer header begins on a longword boundary (for 1291 * performance reasons and to alleviate alignment restrictions). 1292 */ 1293 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1294 1295 /* 1296 * Mark all the descriptors free if this hasn't been done. 1297 */ 1298 if (!D_ISFREE(&bpf_dtab[0])) 1299 for (i = 0; i < NBPFILTER; ++i) 1300 D_MARKFREE(&bpf_dtab[i]); 1301 1302 if (bootverbose) 1303 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1304 } 1305 1306 #ifdef DEVFS 1307 static void *bpf_devfs_token[NBPFILTER]; 1308 #endif 1309 1310 static bpf_devsw_installed = 0; 1311 1312 static void bpf_drvinit(void *unused) 1313 { 1314 dev_t dev; 1315 #ifdef DEVFS 1316 int i; 1317 #endif 1318 1319 if( ! bpf_devsw_installed ) { 1320 dev = makedev(CDEV_MAJOR, 0); 1321 cdevsw_add(&dev,&bpf_cdevsw, NULL); 1322 bpf_devsw_installed = 1; 1323 #ifdef DEVFS 1324 1325 for ( i = 0 ; i < NBPFILTER ; i++ ) { 1326 bpf_devfs_token[i] = 1327 devfs_add_devswf(&bpf_cdevsw, i, DV_CHR, 0, 0, 1328 0600, "bpf%d", i); 1329 } 1330 #endif 1331 } 1332 } 1333 1334 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1335 1336 #endif 1337