1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.21 1995/12/14 09:53:10 phk Exp $ 41 */ 42 43 #include "bpfilter.h" 44 45 #if NBPFILTER > 0 46 47 #ifndef __GNUC__ 48 #define inline 49 #else 50 #define inline __inline 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/conf.h> 56 #include <machine/cpu.h> /* for bootverbose */ 57 #include <sys/mbuf.h> 58 #include <sys/buf.h> 59 #include <sys/time.h> 60 #include <sys/proc.h> 61 #include <sys/signalvar.h> 62 #include <sys/ioctl.h> 63 64 #include <sys/file.h> 65 #if defined(sparc) && BSD < 199103 66 #include <sys/stream.h> 67 #endif 68 #include <sys/uio.h> 69 70 #include <sys/socket.h> 71 #include <sys/socketvar.h> 72 #include <sys/protosw.h> 73 #include <net/if.h> 74 75 #include <net/bpf.h> 76 #include <net/bpfdesc.h> 77 78 #include <sys/errno.h> 79 80 #include <netinet/in.h> 81 #include <netinet/if_ether.h> 82 #include <sys/kernel.h> 83 #include <sys/sysctl.h> 84 #include <sys/conf.h> 85 #ifdef DEVFS 86 #include <sys/devfsext.h> 87 #endif /*DEVFS*/ 88 89 90 /* 91 * Older BSDs don't have kernel malloc. 92 */ 93 #if BSD < 199103 94 extern bcopy(); 95 static caddr_t bpf_alloc(); 96 #include <net/bpf_compat.h> 97 #define BPF_BUFSIZE (MCLBYTES-8) 98 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 99 #else 100 #define BPF_BUFSIZE 4096 101 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 102 #endif 103 104 #define PRINET 26 /* interruptible */ 105 106 /* 107 * The default read buffer size is patchable. 108 */ 109 static int bpf_bufsize = BPF_BUFSIZE; 110 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 111 &bpf_bufsize, 0, ""); 112 113 /* 114 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 115 * bpf_dtab holds the descriptors, indexed by minor device # 116 */ 117 static struct bpf_if *bpf_iflist; 118 static struct bpf_d bpf_dtab[NBPFILTER]; 119 120 static int bpf_allocbufs __P((struct bpf_d *)); 121 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 122 static void bpf_detachd __P((struct bpf_d *d)); 123 static void bpf_freed __P((struct bpf_d *)); 124 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 125 static void bpf_mcopy __P((const void *, void *, u_int)); 126 static int bpf_movein __P((struct uio *, int, 127 struct mbuf **, struct sockaddr *, int *)); 128 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 129 static inline void 130 bpf_wakeup __P((struct bpf_d *)); 131 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 132 u_int, void (*)(const void *, void *, u_int))); 133 static void reset_d __P((struct bpf_d *)); 134 static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 135 136 static d_open_t bpfopen; 137 static d_close_t bpfclose; 138 static d_read_t bpfread; 139 static d_write_t bpfwrite; 140 static d_ioctl_t bpfioctl; 141 static d_select_t bpfselect; 142 143 #define CDEV_MAJOR 23 144 static struct cdevsw bpf_cdevsw = 145 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 146 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 147 bpfselect, nommap, NULL, "bpf", NULL, -1 }; 148 149 150 static int 151 bpf_movein(uio, linktype, mp, sockp, datlen) 152 register struct uio *uio; 153 int linktype, *datlen; 154 register struct mbuf **mp; 155 register struct sockaddr *sockp; 156 { 157 struct mbuf *m; 158 int error; 159 int len; 160 int hlen; 161 162 /* 163 * Build a sockaddr based on the data link layer type. 164 * We do this at this level because the ethernet header 165 * is copied directly into the data field of the sockaddr. 166 * In the case of SLIP, there is no header and the packet 167 * is forwarded as is. 168 * Also, we are careful to leave room at the front of the mbuf 169 * for the link level header. 170 */ 171 switch (linktype) { 172 173 case DLT_SLIP: 174 sockp->sa_family = AF_INET; 175 hlen = 0; 176 break; 177 178 case DLT_EN10MB: 179 sockp->sa_family = AF_UNSPEC; 180 /* XXX Would MAXLINKHDR be better? */ 181 hlen = sizeof(struct ether_header); 182 break; 183 184 case DLT_FDDI: 185 #if defined(__FreeBSD__) || defined(__bsdi__) 186 sockp->sa_family = AF_IMPLINK; 187 hlen = 0; 188 #else 189 sockp->sa_family = AF_UNSPEC; 190 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 191 hlen = 24; 192 #endif 193 break; 194 195 case DLT_NULL: 196 sockp->sa_family = AF_UNSPEC; 197 hlen = 0; 198 break; 199 200 default: 201 return (EIO); 202 } 203 204 len = uio->uio_resid; 205 *datlen = len - hlen; 206 if ((unsigned)len > MCLBYTES) 207 return (EIO); 208 209 MGETHDR(m, M_WAIT, MT_DATA); 210 if (m == 0) 211 return (ENOBUFS); 212 if (len > MHLEN) { 213 #if BSD >= 199103 214 MCLGET(m, M_WAIT); 215 if ((m->m_flags & M_EXT) == 0) { 216 #else 217 MCLGET(m); 218 if (m->m_len != MCLBYTES) { 219 #endif 220 error = ENOBUFS; 221 goto bad; 222 } 223 } 224 m->m_pkthdr.len = m->m_len = len; 225 m->m_pkthdr.rcvif = NULL; 226 *mp = m; 227 /* 228 * Make room for link header. 229 */ 230 if (hlen != 0) { 231 m->m_len -= hlen; 232 #if BSD >= 199103 233 m->m_data += hlen; /* XXX */ 234 #else 235 m->m_off += hlen; 236 #endif 237 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 238 if (error) 239 goto bad; 240 } 241 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 242 if (!error) 243 return (0); 244 bad: 245 m_freem(m); 246 return (error); 247 } 248 249 /* 250 * Attach file to the bpf interface, i.e. make d listen on bp. 251 * Must be called at splimp. 252 */ 253 static void 254 bpf_attachd(d, bp) 255 struct bpf_d *d; 256 struct bpf_if *bp; 257 { 258 /* 259 * Point d at bp, and add d to the interface's list of listeners. 260 * Finally, point the driver's bpf cookie at the interface so 261 * it will divert packets to bpf. 262 */ 263 d->bd_bif = bp; 264 d->bd_next = bp->bif_dlist; 265 bp->bif_dlist = d; 266 267 bp->bif_ifp->if_bpf = bp; 268 } 269 270 /* 271 * Detach a file from its interface. 272 */ 273 static void 274 bpf_detachd(d) 275 struct bpf_d *d; 276 { 277 struct bpf_d **p; 278 struct bpf_if *bp; 279 280 bp = d->bd_bif; 281 /* 282 * Check if this descriptor had requested promiscuous mode. 283 * If so, turn it off. 284 */ 285 if (d->bd_promisc) { 286 d->bd_promisc = 0; 287 if (ifpromisc(bp->bif_ifp, 0)) 288 /* 289 * Something is really wrong if we were able to put 290 * the driver into promiscuous mode, but can't 291 * take it out. 292 */ 293 panic("bpf: ifpromisc failed"); 294 } 295 /* Remove d from the interface's descriptor list. */ 296 p = &bp->bif_dlist; 297 while (*p != d) { 298 p = &(*p)->bd_next; 299 if (*p == 0) 300 panic("bpf_detachd: descriptor not in list"); 301 } 302 *p = (*p)->bd_next; 303 if (bp->bif_dlist == 0) 304 /* 305 * Let the driver know that there are no more listeners. 306 */ 307 d->bd_bif->bif_ifp->if_bpf = 0; 308 d->bd_bif = 0; 309 } 310 311 312 /* 313 * Mark a descriptor free by making it point to itself. 314 * This is probably cheaper than marking with a constant since 315 * the address should be in a register anyway. 316 */ 317 #define D_ISFREE(d) ((d) == (d)->bd_next) 318 #define D_MARKFREE(d) ((d)->bd_next = (d)) 319 #define D_MARKUSED(d) ((d)->bd_next = 0) 320 321 /* 322 * Open ethernet device. Returns ENXIO for illegal minor device number, 323 * EBUSY if file is open by another process. 324 */ 325 /* ARGSUSED */ 326 static int 327 bpfopen(dev, flags, fmt, p) 328 dev_t dev; 329 int flags; 330 int fmt; 331 struct proc *p; 332 { 333 register struct bpf_d *d; 334 335 if (minor(dev) >= NBPFILTER) 336 return (ENXIO); 337 /* 338 * Each minor can be opened by only one process. If the requested 339 * minor is in use, return EBUSY. 340 */ 341 d = &bpf_dtab[minor(dev)]; 342 if (!D_ISFREE(d)) 343 return (EBUSY); 344 345 /* Mark "free" and do most initialization. */ 346 bzero((char *)d, sizeof(*d)); 347 d->bd_bufsize = bpf_bufsize; 348 d->bd_sig = SIGIO; 349 350 return (0); 351 } 352 353 /* 354 * Close the descriptor by detaching it from its interface, 355 * deallocating its buffers, and marking it free. 356 */ 357 /* ARGSUSED */ 358 static int 359 bpfclose(dev, flags, fmt, p) 360 dev_t dev; 361 int flags; 362 int fmt; 363 struct proc *p; 364 { 365 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 366 register int s; 367 368 s = splimp(); 369 if (d->bd_bif) 370 bpf_detachd(d); 371 splx(s); 372 bpf_freed(d); 373 374 return (0); 375 } 376 377 /* 378 * Support for SunOS, which does not have tsleep. 379 */ 380 #if BSD < 199103 381 static 382 bpf_timeout(arg) 383 caddr_t arg; 384 { 385 struct bpf_d *d = (struct bpf_d *)arg; 386 d->bd_timedout = 1; 387 wakeup(arg); 388 } 389 390 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 391 392 int 393 bpf_sleep(d) 394 register struct bpf_d *d; 395 { 396 register int rto = d->bd_rtout; 397 register int st; 398 399 if (rto != 0) { 400 d->bd_timedout = 0; 401 timeout(bpf_timeout, (caddr_t)d, rto); 402 } 403 st = sleep((caddr_t)d, PRINET|PCATCH); 404 if (rto != 0) { 405 if (d->bd_timedout == 0) 406 untimeout(bpf_timeout, (caddr_t)d); 407 else if (st == 0) 408 return EWOULDBLOCK; 409 } 410 return (st != 0) ? EINTR : 0; 411 } 412 #else 413 #define BPF_SLEEP tsleep 414 #endif 415 416 /* 417 * Rotate the packet buffers in descriptor d. Move the store buffer 418 * into the hold slot, and the free buffer into the store slot. 419 * Zero the length of the new store buffer. 420 */ 421 #define ROTATE_BUFFERS(d) \ 422 (d)->bd_hbuf = (d)->bd_sbuf; \ 423 (d)->bd_hlen = (d)->bd_slen; \ 424 (d)->bd_sbuf = (d)->bd_fbuf; \ 425 (d)->bd_slen = 0; \ 426 (d)->bd_fbuf = 0; 427 /* 428 * bpfread - read next chunk of packets from buffers 429 */ 430 static int 431 bpfread(dev, uio, ioflag) 432 dev_t dev; 433 register struct uio *uio; 434 int ioflag; 435 { 436 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 437 int error; 438 int s; 439 440 /* 441 * Restrict application to use a buffer the same size as 442 * as kernel buffers. 443 */ 444 if (uio->uio_resid != d->bd_bufsize) 445 return (EINVAL); 446 447 s = splimp(); 448 /* 449 * If the hold buffer is empty, then do a timed sleep, which 450 * ends when the timeout expires or when enough packets 451 * have arrived to fill the store buffer. 452 */ 453 while (d->bd_hbuf == 0) { 454 if (d->bd_immediate && d->bd_slen != 0) { 455 /* 456 * A packet(s) either arrived since the previous 457 * read or arrived while we were asleep. 458 * Rotate the buffers and return what's here. 459 */ 460 ROTATE_BUFFERS(d); 461 break; 462 } 463 if (d->bd_rtout != -1) 464 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 465 d->bd_rtout); 466 else 467 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 468 if (error == EINTR || error == ERESTART) { 469 splx(s); 470 return (error); 471 } 472 if (error == EWOULDBLOCK) { 473 /* 474 * On a timeout, return what's in the buffer, 475 * which may be nothing. If there is something 476 * in the store buffer, we can rotate the buffers. 477 */ 478 if (d->bd_hbuf) 479 /* 480 * We filled up the buffer in between 481 * getting the timeout and arriving 482 * here, so we don't need to rotate. 483 */ 484 break; 485 486 if (d->bd_slen == 0) { 487 splx(s); 488 return (0); 489 } 490 ROTATE_BUFFERS(d); 491 break; 492 } 493 } 494 /* 495 * At this point, we know we have something in the hold slot. 496 */ 497 splx(s); 498 499 /* 500 * Move data from hold buffer into user space. 501 * We know the entire buffer is transferred since 502 * we checked above that the read buffer is bpf_bufsize bytes. 503 */ 504 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 505 506 s = splimp(); 507 d->bd_fbuf = d->bd_hbuf; 508 d->bd_hbuf = 0; 509 d->bd_hlen = 0; 510 splx(s); 511 512 return (error); 513 } 514 515 516 /* 517 * If there are processes sleeping on this descriptor, wake them up. 518 */ 519 static inline void 520 bpf_wakeup(d) 521 register struct bpf_d *d; 522 { 523 struct proc *p; 524 525 wakeup((caddr_t)d); 526 if (d->bd_async && d->bd_sig) 527 if (d->bd_pgid > 0) 528 gsignal (d->bd_pgid, d->bd_sig); 529 else if (p = pfind (-d->bd_pgid)) 530 psignal (p, d->bd_sig); 531 532 #if BSD >= 199103 533 selwakeup(&d->bd_sel); 534 /* XXX */ 535 d->bd_sel.si_pid = 0; 536 #else 537 if (d->bd_selproc) { 538 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 539 d->bd_selcoll = 0; 540 d->bd_selproc = 0; 541 } 542 #endif 543 } 544 545 static int 546 bpfwrite(dev, uio, ioflag) 547 dev_t dev; 548 struct uio *uio; 549 int ioflag; 550 { 551 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 552 struct ifnet *ifp; 553 struct mbuf *m; 554 int error, s; 555 static struct sockaddr dst; 556 int datlen; 557 558 if (d->bd_bif == 0) 559 return (ENXIO); 560 561 ifp = d->bd_bif->bif_ifp; 562 563 if (uio->uio_resid == 0) 564 return (0); 565 566 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 567 if (error) 568 return (error); 569 570 if (datlen > ifp->if_mtu) 571 return (EMSGSIZE); 572 573 s = splnet(); 574 #if BSD >= 199103 575 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 576 #else 577 error = (*ifp->if_output)(ifp, m, &dst); 578 #endif 579 splx(s); 580 /* 581 * The driver frees the mbuf. 582 */ 583 return (error); 584 } 585 586 /* 587 * Reset a descriptor by flushing its packet buffer and clearing the 588 * receive and drop counts. Should be called at splimp. 589 */ 590 static void 591 reset_d(d) 592 struct bpf_d *d; 593 { 594 if (d->bd_hbuf) { 595 /* Free the hold buffer. */ 596 d->bd_fbuf = d->bd_hbuf; 597 d->bd_hbuf = 0; 598 } 599 d->bd_slen = 0; 600 d->bd_hlen = 0; 601 d->bd_rcount = 0; 602 d->bd_dcount = 0; 603 } 604 605 /* 606 * FIONREAD Check for read packet available. 607 * SIOCGIFADDR Get interface address - convenient hook to driver. 608 * BIOCGBLEN Get buffer len [for read()]. 609 * BIOCSETF Set ethernet read filter. 610 * BIOCFLUSH Flush read packet buffer. 611 * BIOCPROMISC Put interface into promiscuous mode. 612 * BIOCGDLT Get link layer type. 613 * BIOCGETIF Get interface name. 614 * BIOCSETIF Set interface. 615 * BIOCSRTIMEOUT Set read timeout. 616 * BIOCGRTIMEOUT Get read timeout. 617 * BIOCGSTATS Get packet stats. 618 * BIOCIMMEDIATE Set immediate mode. 619 * BIOCVERSION Get filter language version. 620 */ 621 /* ARGSUSED */ 622 static int 623 bpfioctl(dev, cmd, addr, flags, p) 624 dev_t dev; 625 int cmd; 626 caddr_t addr; 627 int flags; 628 struct proc *p; 629 { 630 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 631 int s, error = 0; 632 633 switch (cmd) { 634 635 default: 636 error = EINVAL; 637 break; 638 639 /* 640 * Check for read packet available. 641 */ 642 case FIONREAD: 643 { 644 int n; 645 646 s = splimp(); 647 n = d->bd_slen; 648 if (d->bd_hbuf) 649 n += d->bd_hlen; 650 splx(s); 651 652 *(int *)addr = n; 653 break; 654 } 655 656 case SIOCGIFADDR: 657 { 658 struct ifnet *ifp; 659 660 if (d->bd_bif == 0) 661 error = EINVAL; 662 else { 663 ifp = d->bd_bif->bif_ifp; 664 error = (*ifp->if_ioctl)(ifp, cmd, addr); 665 } 666 break; 667 } 668 669 /* 670 * Get buffer len [for read()]. 671 */ 672 case BIOCGBLEN: 673 *(u_int *)addr = d->bd_bufsize; 674 break; 675 676 /* 677 * Set buffer length. 678 */ 679 case BIOCSBLEN: 680 #if BSD < 199103 681 error = EINVAL; 682 #else 683 if (d->bd_bif != 0) 684 error = EINVAL; 685 else { 686 register u_int size = *(u_int *)addr; 687 688 if (size > BPF_MAXBUFSIZE) 689 *(u_int *)addr = size = BPF_MAXBUFSIZE; 690 else if (size < BPF_MINBUFSIZE) 691 *(u_int *)addr = size = BPF_MINBUFSIZE; 692 d->bd_bufsize = size; 693 } 694 #endif 695 break; 696 697 /* 698 * Set link layer read filter. 699 */ 700 case BIOCSETF: 701 error = bpf_setf(d, (struct bpf_program *)addr); 702 break; 703 704 /* 705 * Flush read packet buffer. 706 */ 707 case BIOCFLUSH: 708 s = splimp(); 709 reset_d(d); 710 splx(s); 711 break; 712 713 /* 714 * Put interface into promiscuous mode. 715 */ 716 case BIOCPROMISC: 717 if (d->bd_bif == 0) { 718 /* 719 * No interface attached yet. 720 */ 721 error = EINVAL; 722 break; 723 } 724 s = splimp(); 725 if (d->bd_promisc == 0) { 726 error = ifpromisc(d->bd_bif->bif_ifp, 1); 727 if (error == 0) 728 d->bd_promisc = 1; 729 } 730 splx(s); 731 break; 732 733 /* 734 * Get device parameters. 735 */ 736 case BIOCGDLT: 737 if (d->bd_bif == 0) 738 error = EINVAL; 739 else 740 *(u_int *)addr = d->bd_bif->bif_dlt; 741 break; 742 743 /* 744 * Set interface name. 745 */ 746 case BIOCGETIF: 747 if (d->bd_bif == 0) 748 error = EINVAL; 749 else 750 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 751 break; 752 753 /* 754 * Set interface. 755 */ 756 case BIOCSETIF: 757 error = bpf_setif(d, (struct ifreq *)addr); 758 break; 759 760 /* 761 * Set read timeout. 762 */ 763 case BIOCSRTIMEOUT: 764 { 765 struct timeval *tv = (struct timeval *)addr; 766 u_long msec; 767 768 /* Compute number of milliseconds. */ 769 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 770 /* Scale milliseconds to ticks. Assume hard 771 clock has millisecond or greater resolution 772 (i.e. tick >= 1000). For 10ms hardclock, 773 tick/1000 = 10, so rtout<-msec/10. */ 774 d->bd_rtout = msec / (tick / 1000); 775 break; 776 } 777 778 /* 779 * Get read timeout. 780 */ 781 case BIOCGRTIMEOUT: 782 { 783 struct timeval *tv = (struct timeval *)addr; 784 u_long msec = d->bd_rtout; 785 786 msec *= tick / 1000; 787 tv->tv_sec = msec / 1000; 788 tv->tv_usec = msec % 1000; 789 break; 790 } 791 792 /* 793 * Get packet stats. 794 */ 795 case BIOCGSTATS: 796 { 797 struct bpf_stat *bs = (struct bpf_stat *)addr; 798 799 bs->bs_recv = d->bd_rcount; 800 bs->bs_drop = d->bd_dcount; 801 break; 802 } 803 804 /* 805 * Set immediate mode. 806 */ 807 case BIOCIMMEDIATE: 808 d->bd_immediate = *(u_int *)addr; 809 break; 810 811 case BIOCVERSION: 812 { 813 struct bpf_version *bv = (struct bpf_version *)addr; 814 815 bv->bv_major = BPF_MAJOR_VERSION; 816 bv->bv_minor = BPF_MINOR_VERSION; 817 break; 818 } 819 820 821 case FIONBIO: /* Non-blocking I/O */ 822 if (*(int *)addr) 823 d->bd_rtout = -1; 824 else 825 d->bd_rtout = 0; 826 break; 827 828 case FIOASYNC: /* Send signal on receive packets */ 829 d->bd_async = *(int *)addr; 830 break; 831 832 /* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 833 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 834 is a process group if it's positive and a process id if it's negative. This 835 is exactly the opposite of what the other two functions want! Therefore 836 there is code in ioctl and fcntl to negate the arg before calling here. */ 837 838 case TIOCSPGRP: /* Process or group to send signals to */ 839 d->bd_pgid = *(int *)addr; 840 break; 841 842 case TIOCGPGRP: 843 *(int *)addr = d->bd_pgid; 844 break; 845 846 case BIOCSRSIG: /* Set receive signal */ 847 { 848 u_int sig; 849 850 sig = *(u_int *)addr; 851 852 if (sig >= NSIG) 853 error = EINVAL; 854 else 855 d->bd_sig = sig; 856 break; 857 } 858 case BIOCGRSIG: 859 *(u_int *)addr = d->bd_sig; 860 break; 861 } 862 return (error); 863 } 864 865 /* 866 * Set d's packet filter program to fp. If this file already has a filter, 867 * free it and replace it. Returns EINVAL for bogus requests. 868 */ 869 static int 870 bpf_setf(d, fp) 871 struct bpf_d *d; 872 struct bpf_program *fp; 873 { 874 struct bpf_insn *fcode, *old; 875 u_int flen, size; 876 int s; 877 878 old = d->bd_filter; 879 if (fp->bf_insns == 0) { 880 if (fp->bf_len != 0) 881 return (EINVAL); 882 s = splimp(); 883 d->bd_filter = 0; 884 reset_d(d); 885 splx(s); 886 if (old != 0) 887 free((caddr_t)old, M_DEVBUF); 888 return (0); 889 } 890 flen = fp->bf_len; 891 if (flen > BPF_MAXINSNS) 892 return (EINVAL); 893 894 size = flen * sizeof(*fp->bf_insns); 895 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 896 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 897 bpf_validate(fcode, (int)flen)) { 898 s = splimp(); 899 d->bd_filter = fcode; 900 reset_d(d); 901 splx(s); 902 if (old != 0) 903 free((caddr_t)old, M_DEVBUF); 904 905 return (0); 906 } 907 free((caddr_t)fcode, M_DEVBUF); 908 return (EINVAL); 909 } 910 911 /* 912 * Detach a file from its current interface (if attached at all) and attach 913 * to the interface indicated by the name stored in ifr. 914 * Return an errno or 0. 915 */ 916 static int 917 bpf_setif(d, ifr) 918 struct bpf_d *d; 919 struct ifreq *ifr; 920 { 921 struct bpf_if *bp; 922 int s, error; 923 struct ifnet *theywant; 924 925 theywant = ifunit(ifr->ifr_name); 926 if (theywant == 0) 927 return ENXIO; 928 929 /* 930 * Look through attached interfaces for the named one. 931 */ 932 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 933 struct ifnet *ifp = bp->bif_ifp; 934 935 if (ifp == 0 || ifp != theywant) 936 continue; 937 /* 938 * We found the requested interface. 939 * If it's not up, return an error. 940 * Allocate the packet buffers if we need to. 941 * If we're already attached to requested interface, 942 * just flush the buffer. 943 */ 944 if ((ifp->if_flags & IFF_UP) == 0) 945 return (ENETDOWN); 946 947 if (d->bd_sbuf == 0) { 948 error = bpf_allocbufs(d); 949 if (error != 0) 950 return (error); 951 } 952 s = splimp(); 953 if (bp != d->bd_bif) { 954 if (d->bd_bif) 955 /* 956 * Detach if attached to something else. 957 */ 958 bpf_detachd(d); 959 960 bpf_attachd(d, bp); 961 } 962 reset_d(d); 963 splx(s); 964 return (0); 965 } 966 /* Not found. */ 967 return (ENXIO); 968 } 969 970 /* 971 * Convert an interface name plus unit number of an ifp to a single 972 * name which is returned in the ifr. 973 */ 974 static void 975 bpf_ifname(ifp, ifr) 976 struct ifnet *ifp; 977 struct ifreq *ifr; 978 { 979 char *s = ifp->if_name; 980 char *d = ifr->ifr_name; 981 982 while (*d++ = *s++) 983 continue; 984 /* XXX Assume that unit number is less than 10. */ 985 *d++ = ifp->if_unit + '0'; 986 *d = '\0'; 987 } 988 989 /* 990 * The new select interface passes down the proc pointer; the old select 991 * stubs had to grab it out of the user struct. This glue allows either case. 992 */ 993 #if BSD >= 199103 994 #define bpf_select bpfselect 995 #else 996 static int 997 bpfselect(dev, rw) 998 register dev_t dev; 999 int rw; 1000 { 1001 return (bpf_select(dev, rw, u.u_procp)); 1002 } 1003 #endif 1004 1005 /* 1006 * Support for select() system call 1007 * 1008 * Return true iff the specific operation will not block indefinitely. 1009 * Otherwise, return false but make a note that a selwakeup() must be done. 1010 */ 1011 int 1012 bpf_select(dev, rw, p) 1013 register dev_t dev; 1014 int rw; 1015 struct proc *p; 1016 { 1017 register struct bpf_d *d; 1018 register int s; 1019 1020 if (rw != FREAD) 1021 return (0); 1022 /* 1023 * An imitation of the FIONREAD ioctl code. 1024 */ 1025 d = &bpf_dtab[minor(dev)]; 1026 1027 s = splimp(); 1028 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 1029 /* 1030 * There is data waiting. 1031 */ 1032 splx(s); 1033 return (1); 1034 } 1035 #if BSD >= 199103 1036 selrecord(p, &d->bd_sel); 1037 #else 1038 /* 1039 * No data ready. If there's already a select() waiting on this 1040 * minor device then this is a collision. This shouldn't happen 1041 * because minors really should not be shared, but if a process 1042 * forks while one of these is open, it is possible that both 1043 * processes could select on the same descriptor. 1044 */ 1045 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 1046 d->bd_selcoll = 1; 1047 else 1048 d->bd_selproc = p; 1049 #endif 1050 splx(s); 1051 return (0); 1052 } 1053 1054 /* 1055 * Incoming linkage from device drivers. Process the packet pkt, of length 1056 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1057 * by each process' filter, and if accepted, stashed into the corresponding 1058 * buffer. 1059 */ 1060 void 1061 bpf_tap(ifp, pkt, pktlen) 1062 struct ifnet *ifp; 1063 register u_char *pkt; 1064 register u_int pktlen; 1065 { 1066 struct bpf_if *bp; 1067 register struct bpf_d *d; 1068 register u_int slen; 1069 /* 1070 * Note that the ipl does not have to be raised at this point. 1071 * The only problem that could arise here is that if two different 1072 * interfaces shared any data. This is not the case. 1073 */ 1074 bp = ifp->if_bpf; 1075 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1076 ++d->bd_rcount; 1077 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1078 if (slen != 0) 1079 catchpacket(d, pkt, pktlen, slen, bcopy); 1080 } 1081 } 1082 1083 /* 1084 * Copy data from an mbuf chain into a buffer. This code is derived 1085 * from m_copydata in sys/uipc_mbuf.c. 1086 */ 1087 static void 1088 bpf_mcopy(src_arg, dst_arg, len) 1089 const void *src_arg; 1090 void *dst_arg; 1091 register u_int len; 1092 { 1093 register const struct mbuf *m; 1094 register u_int count; 1095 u_char *dst; 1096 1097 m = src_arg; 1098 dst = dst_arg; 1099 while (len > 0) { 1100 if (m == 0) 1101 panic("bpf_mcopy"); 1102 count = min(m->m_len, len); 1103 (void)memcpy((caddr_t)dst, mtod(m, caddr_t), count); 1104 m = m->m_next; 1105 dst += count; 1106 len -= count; 1107 } 1108 } 1109 1110 /* 1111 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1112 */ 1113 void 1114 bpf_mtap(ifp, m) 1115 struct ifnet *ifp; 1116 struct mbuf *m; 1117 { 1118 struct bpf_if *bp = ifp->if_bpf; 1119 struct bpf_d *d; 1120 u_int pktlen, slen; 1121 struct mbuf *m0; 1122 1123 pktlen = 0; 1124 for (m0 = m; m0 != 0; m0 = m0->m_next) 1125 pktlen += m0->m_len; 1126 1127 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1128 ++d->bd_rcount; 1129 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1130 if (slen != 0) 1131 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1132 } 1133 } 1134 1135 /* 1136 * Move the packet data from interface memory (pkt) into the 1137 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1138 * otherwise 0. "copy" is the routine called to do the actual data 1139 * transfer. bcopy is passed in to copy contiguous chunks, while 1140 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1141 * pkt is really an mbuf. 1142 */ 1143 static void 1144 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1145 register struct bpf_d *d; 1146 register u_char *pkt; 1147 register u_int pktlen, snaplen; 1148 register void (*cpfn)(const void *, void *, u_int); 1149 { 1150 register struct bpf_hdr *hp; 1151 register int totlen, curlen; 1152 register int hdrlen = d->bd_bif->bif_hdrlen; 1153 /* 1154 * Figure out how many bytes to move. If the packet is 1155 * greater or equal to the snapshot length, transfer that 1156 * much. Otherwise, transfer the whole packet (unless 1157 * we hit the buffer size limit). 1158 */ 1159 totlen = hdrlen + min(snaplen, pktlen); 1160 if (totlen > d->bd_bufsize) 1161 totlen = d->bd_bufsize; 1162 1163 /* 1164 * Round up the end of the previous packet to the next longword. 1165 */ 1166 curlen = BPF_WORDALIGN(d->bd_slen); 1167 if (curlen + totlen > d->bd_bufsize) { 1168 /* 1169 * This packet will overflow the storage buffer. 1170 * Rotate the buffers if we can, then wakeup any 1171 * pending reads. 1172 */ 1173 if (d->bd_fbuf == 0) { 1174 /* 1175 * We haven't completed the previous read yet, 1176 * so drop the packet. 1177 */ 1178 ++d->bd_dcount; 1179 return; 1180 } 1181 ROTATE_BUFFERS(d); 1182 bpf_wakeup(d); 1183 curlen = 0; 1184 } 1185 else if (d->bd_immediate) 1186 /* 1187 * Immediate mode is set. A packet arrived so any 1188 * reads should be woken up. 1189 */ 1190 bpf_wakeup(d); 1191 1192 /* 1193 * Append the bpf header. 1194 */ 1195 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1196 #if BSD >= 199103 1197 microtime(&hp->bh_tstamp); 1198 #elif defined(sun) 1199 uniqtime(&hp->bh_tstamp); 1200 #else 1201 hp->bh_tstamp = time; 1202 #endif 1203 hp->bh_datalen = pktlen; 1204 hp->bh_hdrlen = hdrlen; 1205 /* 1206 * Copy the packet data into the store buffer and update its length. 1207 */ 1208 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1209 d->bd_slen = curlen + totlen; 1210 } 1211 1212 /* 1213 * Initialize all nonzero fields of a descriptor. 1214 */ 1215 static int 1216 bpf_allocbufs(d) 1217 register struct bpf_d *d; 1218 { 1219 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1220 if (d->bd_fbuf == 0) 1221 return (ENOBUFS); 1222 1223 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1224 if (d->bd_sbuf == 0) { 1225 free(d->bd_fbuf, M_DEVBUF); 1226 return (ENOBUFS); 1227 } 1228 d->bd_slen = 0; 1229 d->bd_hlen = 0; 1230 return (0); 1231 } 1232 1233 /* 1234 * Free buffers currently in use by a descriptor. 1235 * Called on close. 1236 */ 1237 static void 1238 bpf_freed(d) 1239 register struct bpf_d *d; 1240 { 1241 /* 1242 * We don't need to lock out interrupts since this descriptor has 1243 * been detached from its interface and it yet hasn't been marked 1244 * free. 1245 */ 1246 if (d->bd_sbuf != 0) { 1247 free(d->bd_sbuf, M_DEVBUF); 1248 if (d->bd_hbuf != 0) 1249 free(d->bd_hbuf, M_DEVBUF); 1250 if (d->bd_fbuf != 0) 1251 free(d->bd_fbuf, M_DEVBUF); 1252 } 1253 if (d->bd_filter) 1254 free((caddr_t)d->bd_filter, M_DEVBUF); 1255 1256 D_MARKFREE(d); 1257 } 1258 1259 /* 1260 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1261 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1262 * size of the link header (variable length headers not yet supported). 1263 */ 1264 void 1265 bpfattach(ifp, dlt, hdrlen) 1266 struct ifnet *ifp; 1267 u_int dlt, hdrlen; 1268 { 1269 struct bpf_if *bp; 1270 int i; 1271 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1272 if (bp == 0) 1273 panic("bpfattach"); 1274 1275 bp->bif_dlist = 0; 1276 bp->bif_ifp = ifp; 1277 bp->bif_dlt = dlt; 1278 1279 bp->bif_next = bpf_iflist; 1280 bpf_iflist = bp; 1281 1282 bp->bif_ifp->if_bpf = 0; 1283 1284 /* 1285 * Compute the length of the bpf header. This is not necessarily 1286 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1287 * that the network layer header begins on a longword boundary (for 1288 * performance reasons and to alleviate alignment restrictions). 1289 */ 1290 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1291 1292 /* 1293 * Mark all the descriptors free if this hasn't been done. 1294 */ 1295 if (!D_ISFREE(&bpf_dtab[0])) 1296 for (i = 0; i < NBPFILTER; ++i) 1297 D_MARKFREE(&bpf_dtab[i]); 1298 1299 if (bootverbose) 1300 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1301 } 1302 1303 static void *bpf_devfs_token[NBPFILTER]; 1304 1305 static bpf_devsw_installed = 0; 1306 1307 static void bpf_drvinit(void *unused) 1308 { 1309 dev_t dev; 1310 int i; 1311 char name[32]; 1312 1313 if( ! bpf_devsw_installed ) { 1314 dev = makedev(CDEV_MAJOR, 0); 1315 cdevsw_add(&dev,&bpf_cdevsw, NULL); 1316 bpf_devsw_installed = 1; 1317 #ifdef DEVFS 1318 for ( i = 0 ; i < NBPFILTER ; i++ ) { 1319 sprintf(name,"bpf%d",i); 1320 bpf_devfs_token[i] = 1321 devfs_add_devsw( "/", name, 1322 &bpf_cdevsw, i, DV_CHR, 0, 0, 0600); 1323 } 1324 #endif 1325 } 1326 } 1327 1328 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1329 1330 #endif 1331