1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.12 1995/09/20 20:48:29 wollman Exp $ 41 */ 42 43 #include "bpfilter.h" 44 45 #if NBPFILTER > 0 46 47 #ifndef __GNUC__ 48 #define inline 49 #else 50 #define inline __inline 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <machine/cpu.h> /* for bootverbose */ 56 #include <sys/mbuf.h> 57 #include <sys/buf.h> 58 #include <sys/time.h> 59 #include <sys/proc.h> 60 #include <sys/user.h> 61 #include <sys/ioctl.h> 62 63 #include <sys/file.h> 64 #if defined(sparc) && BSD < 199103 65 #include <sys/stream.h> 66 #endif 67 #include <sys/uio.h> 68 69 #include <sys/protosw.h> 70 #include <sys/socket.h> 71 #include <net/if.h> 72 73 #include <net/bpf.h> 74 #include <net/bpfdesc.h> 75 76 #include <sys/errno.h> 77 78 #include <netinet/in.h> 79 #include <netinet/if_ether.h> 80 #include <sys/kernel.h> 81 82 /* 83 * Older BSDs don't have kernel malloc. 84 */ 85 #if BSD < 199103 86 extern bcopy(); 87 static caddr_t bpf_alloc(); 88 #include <net/bpf_compat.h> 89 #define BPF_BUFSIZE (MCLBYTES-8) 90 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 91 #else 92 #define BPF_BUFSIZE 4096 93 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 94 #endif 95 96 #define PRINET 26 /* interruptible */ 97 98 /* 99 * The default read buffer size is patchable. 100 */ 101 int bpf_bufsize = BPF_BUFSIZE; 102 103 /* 104 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 105 * bpf_dtab holds the descriptors, indexed by minor device # 106 */ 107 struct bpf_if *bpf_iflist; 108 struct bpf_d bpf_dtab[NBPFILTER]; 109 110 #if BSD >= 199207 111 /* 112 * bpfilterattach() is called at boot time in new systems. We do 113 * nothing here since old systems will not call this. 114 */ 115 /* ARGSUSED */ 116 void 117 bpfilterattach(n) 118 int n; 119 { 120 } 121 #endif 122 123 static int bpf_allocbufs __P((struct bpf_d *)); 124 static void bpf_freed __P((struct bpf_d *)); 125 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 126 static void bpf_mcopy __P((const void *, void *, u_int)); 127 static int bpf_movein __P((struct uio *, int, 128 struct mbuf **, struct sockaddr *, int *)); 129 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 130 static inline void 131 bpf_wakeup __P((struct bpf_d *)); 132 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 133 u_int, void (*)(const void *, void *, u_int))); 134 static void reset_d __P((struct bpf_d *)); 135 136 static int 137 bpf_movein(uio, linktype, mp, sockp, datlen) 138 register struct uio *uio; 139 int linktype, *datlen; 140 register struct mbuf **mp; 141 register struct sockaddr *sockp; 142 { 143 struct mbuf *m; 144 int error; 145 int len; 146 int hlen; 147 148 /* 149 * Build a sockaddr based on the data link layer type. 150 * We do this at this level because the ethernet header 151 * is copied directly into the data field of the sockaddr. 152 * In the case of SLIP, there is no header and the packet 153 * is forwarded as is. 154 * Also, we are careful to leave room at the front of the mbuf 155 * for the link level header. 156 */ 157 switch (linktype) { 158 159 case DLT_SLIP: 160 sockp->sa_family = AF_INET; 161 hlen = 0; 162 break; 163 164 case DLT_EN10MB: 165 sockp->sa_family = AF_UNSPEC; 166 /* XXX Would MAXLINKHDR be better? */ 167 hlen = sizeof(struct ether_header); 168 break; 169 170 case DLT_FDDI: 171 #if defined(__FreeBSD__) || defined(__bsdi__) 172 sockp->sa_family = AF_IMPLINK; 173 hlen = 0; 174 #else 175 sockp->sa_family = AF_UNSPEC; 176 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 177 hlen = 24; 178 #endif 179 break; 180 181 case DLT_NULL: 182 sockp->sa_family = AF_UNSPEC; 183 hlen = 0; 184 break; 185 186 default: 187 return (EIO); 188 } 189 190 len = uio->uio_resid; 191 *datlen = len - hlen; 192 if ((unsigned)len > MCLBYTES) 193 return (EIO); 194 195 MGETHDR(m, M_WAIT, MT_DATA); 196 if (m == 0) 197 return (ENOBUFS); 198 if (len > MHLEN) { 199 #if BSD >= 199103 200 MCLGET(m, M_WAIT); 201 if ((m->m_flags & M_EXT) == 0) { 202 #else 203 MCLGET(m); 204 if (m->m_len != MCLBYTES) { 205 #endif 206 error = ENOBUFS; 207 goto bad; 208 } 209 } 210 m->m_pkthdr.len = m->m_len = len; 211 m->m_pkthdr.rcvif = NULL; 212 *mp = m; 213 /* 214 * Make room for link header. 215 */ 216 if (hlen != 0) { 217 m->m_len -= hlen; 218 #if BSD >= 199103 219 m->m_data += hlen; /* XXX */ 220 #else 221 m->m_off += hlen; 222 #endif 223 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 224 if (error) 225 goto bad; 226 } 227 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 228 if (!error) 229 return (0); 230 bad: 231 m_freem(m); 232 return (error); 233 } 234 235 /* 236 * Attach file to the bpf interface, i.e. make d listen on bp. 237 * Must be called at splimp. 238 */ 239 static void 240 bpf_attachd(d, bp) 241 struct bpf_d *d; 242 struct bpf_if *bp; 243 { 244 /* 245 * Point d at bp, and add d to the interface's list of listeners. 246 * Finally, point the driver's bpf cookie at the interface so 247 * it will divert packets to bpf. 248 */ 249 d->bd_bif = bp; 250 d->bd_next = bp->bif_dlist; 251 bp->bif_dlist = d; 252 253 *bp->bif_driverp = bp; 254 } 255 256 /* 257 * Detach a file from its interface. 258 */ 259 static void 260 bpf_detachd(d) 261 struct bpf_d *d; 262 { 263 struct bpf_d **p; 264 struct bpf_if *bp; 265 266 bp = d->bd_bif; 267 /* 268 * Check if this descriptor had requested promiscuous mode. 269 * If so, turn it off. 270 */ 271 if (d->bd_promisc) { 272 d->bd_promisc = 0; 273 if (ifpromisc(bp->bif_ifp, 0)) 274 /* 275 * Something is really wrong if we were able to put 276 * the driver into promiscuous mode, but can't 277 * take it out. 278 */ 279 panic("bpf: ifpromisc failed"); 280 } 281 /* Remove d from the interface's descriptor list. */ 282 p = &bp->bif_dlist; 283 while (*p != d) { 284 p = &(*p)->bd_next; 285 if (*p == 0) 286 panic("bpf_detachd: descriptor not in list"); 287 } 288 *p = (*p)->bd_next; 289 if (bp->bif_dlist == 0) 290 /* 291 * Let the driver know that there are no more listeners. 292 */ 293 *d->bd_bif->bif_driverp = 0; 294 d->bd_bif = 0; 295 } 296 297 298 /* 299 * Mark a descriptor free by making it point to itself. 300 * This is probably cheaper than marking with a constant since 301 * the address should be in a register anyway. 302 */ 303 #define D_ISFREE(d) ((d) == (d)->bd_next) 304 #define D_MARKFREE(d) ((d)->bd_next = (d)) 305 #define D_MARKUSED(d) ((d)->bd_next = 0) 306 307 /* 308 * Open ethernet device. Returns ENXIO for illegal minor device number, 309 * EBUSY if file is open by another process. 310 */ 311 /* ARGSUSED */ 312 int 313 bpfopen(dev, flags, fmt, p) 314 dev_t dev; 315 int flags; 316 int fmt; 317 struct proc *p; 318 { 319 register struct bpf_d *d; 320 321 if (minor(dev) >= NBPFILTER) 322 return (ENXIO); 323 /* 324 * Each minor can be opened by only one process. If the requested 325 * minor is in use, return EBUSY. 326 */ 327 d = &bpf_dtab[minor(dev)]; 328 if (!D_ISFREE(d)) 329 return (EBUSY); 330 331 /* Mark "free" and do most initialization. */ 332 bzero((char *)d, sizeof(*d)); 333 d->bd_bufsize = bpf_bufsize; 334 d->bd_sig = SIGIO; 335 336 return (0); 337 } 338 339 /* 340 * Close the descriptor by detaching it from its interface, 341 * deallocating its buffers, and marking it free. 342 */ 343 /* ARGSUSED */ 344 int 345 bpfclose(dev, flags, fmt, p) 346 dev_t dev; 347 int flags; 348 int fmt; 349 struct proc *p; 350 { 351 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 352 register int s; 353 354 s = splimp(); 355 if (d->bd_bif) 356 bpf_detachd(d); 357 splx(s); 358 bpf_freed(d); 359 360 return (0); 361 } 362 363 /* 364 * Support for SunOS, which does not have tsleep. 365 */ 366 #if BSD < 199103 367 static 368 bpf_timeout(arg) 369 caddr_t arg; 370 { 371 struct bpf_d *d = (struct bpf_d *)arg; 372 d->bd_timedout = 1; 373 wakeup(arg); 374 } 375 376 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 377 378 int 379 bpf_sleep(d) 380 register struct bpf_d *d; 381 { 382 register int rto = d->bd_rtout; 383 register int st; 384 385 if (rto != 0) { 386 d->bd_timedout = 0; 387 timeout(bpf_timeout, (caddr_t)d, rto); 388 } 389 st = sleep((caddr_t)d, PRINET|PCATCH); 390 if (rto != 0) { 391 if (d->bd_timedout == 0) 392 untimeout(bpf_timeout, (caddr_t)d); 393 else if (st == 0) 394 return EWOULDBLOCK; 395 } 396 return (st != 0) ? EINTR : 0; 397 } 398 #else 399 #define BPF_SLEEP tsleep 400 #endif 401 402 /* 403 * Rotate the packet buffers in descriptor d. Move the store buffer 404 * into the hold slot, and the free buffer into the store slot. 405 * Zero the length of the new store buffer. 406 */ 407 #define ROTATE_BUFFERS(d) \ 408 (d)->bd_hbuf = (d)->bd_sbuf; \ 409 (d)->bd_hlen = (d)->bd_slen; \ 410 (d)->bd_sbuf = (d)->bd_fbuf; \ 411 (d)->bd_slen = 0; \ 412 (d)->bd_fbuf = 0; 413 /* 414 * bpfread - read next chunk of packets from buffers 415 */ 416 int 417 bpfread(dev, uio, ioflag) 418 dev_t dev; 419 register struct uio *uio; 420 int ioflag; 421 { 422 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 423 int error; 424 int s; 425 426 /* 427 * Restrict application to use a buffer the same size as 428 * as kernel buffers. 429 */ 430 if (uio->uio_resid != d->bd_bufsize) 431 return (EINVAL); 432 433 s = splimp(); 434 /* 435 * If the hold buffer is empty, then do a timed sleep, which 436 * ends when the timeout expires or when enough packets 437 * have arrived to fill the store buffer. 438 */ 439 while (d->bd_hbuf == 0) { 440 if (d->bd_immediate && d->bd_slen != 0) { 441 /* 442 * A packet(s) either arrived since the previous 443 * read or arrived while we were asleep. 444 * Rotate the buffers and return what's here. 445 */ 446 ROTATE_BUFFERS(d); 447 break; 448 } 449 if (d->bd_rtout != -1) 450 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 451 d->bd_rtout); 452 else 453 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 454 if (error == EINTR || error == ERESTART) { 455 splx(s); 456 return (error); 457 } 458 if (error == EWOULDBLOCK) { 459 /* 460 * On a timeout, return what's in the buffer, 461 * which may be nothing. If there is something 462 * in the store buffer, we can rotate the buffers. 463 */ 464 if (d->bd_hbuf) 465 /* 466 * We filled up the buffer in between 467 * getting the timeout and arriving 468 * here, so we don't need to rotate. 469 */ 470 break; 471 472 if (d->bd_slen == 0) { 473 splx(s); 474 return (0); 475 } 476 ROTATE_BUFFERS(d); 477 break; 478 } 479 } 480 /* 481 * At this point, we know we have something in the hold slot. 482 */ 483 splx(s); 484 485 /* 486 * Move data from hold buffer into user space. 487 * We know the entire buffer is transferred since 488 * we checked above that the read buffer is bpf_bufsize bytes. 489 */ 490 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 491 492 s = splimp(); 493 d->bd_fbuf = d->bd_hbuf; 494 d->bd_hbuf = 0; 495 d->bd_hlen = 0; 496 splx(s); 497 498 return (error); 499 } 500 501 502 /* 503 * If there are processes sleeping on this descriptor, wake them up. 504 */ 505 static inline void 506 bpf_wakeup(d) 507 register struct bpf_d *d; 508 { 509 struct proc *p; 510 511 wakeup((caddr_t)d); 512 if (d->bd_async && d->bd_sig) 513 if (d->bd_pgid > 0) 514 gsignal (d->bd_pgid, d->bd_sig); 515 else if (p = pfind (-d->bd_pgid)) 516 psignal (p, d->bd_sig); 517 518 #if BSD >= 199103 519 selwakeup(&d->bd_sel); 520 /* XXX */ 521 d->bd_sel.si_pid = 0; 522 #else 523 if (d->bd_selproc) { 524 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 525 d->bd_selcoll = 0; 526 d->bd_selproc = 0; 527 } 528 #endif 529 } 530 531 int 532 bpfwrite(dev, uio, ioflag) 533 dev_t dev; 534 struct uio *uio; 535 int ioflag; 536 { 537 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 538 struct ifnet *ifp; 539 struct mbuf *m; 540 int error, s; 541 static struct sockaddr dst; 542 int datlen; 543 544 if (d->bd_bif == 0) 545 return (ENXIO); 546 547 ifp = d->bd_bif->bif_ifp; 548 549 if (uio->uio_resid == 0) 550 return (0); 551 552 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 553 if (error) 554 return (error); 555 556 if (datlen > ifp->if_mtu) 557 return (EMSGSIZE); 558 559 s = splnet(); 560 #if BSD >= 199103 561 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 562 #else 563 error = (*ifp->if_output)(ifp, m, &dst); 564 #endif 565 splx(s); 566 /* 567 * The driver frees the mbuf. 568 */ 569 return (error); 570 } 571 572 /* 573 * Reset a descriptor by flushing its packet buffer and clearing the 574 * receive and drop counts. Should be called at splimp. 575 */ 576 static void 577 reset_d(d) 578 struct bpf_d *d; 579 { 580 if (d->bd_hbuf) { 581 /* Free the hold buffer. */ 582 d->bd_fbuf = d->bd_hbuf; 583 d->bd_hbuf = 0; 584 } 585 d->bd_slen = 0; 586 d->bd_hlen = 0; 587 d->bd_rcount = 0; 588 d->bd_dcount = 0; 589 } 590 591 /* 592 * FIONREAD Check for read packet available. 593 * SIOCGIFADDR Get interface address - convenient hook to driver. 594 * BIOCGBLEN Get buffer len [for read()]. 595 * BIOCSETF Set ethernet read filter. 596 * BIOCFLUSH Flush read packet buffer. 597 * BIOCPROMISC Put interface into promiscuous mode. 598 * BIOCGDLT Get link layer type. 599 * BIOCGETIF Get interface name. 600 * BIOCSETIF Set interface. 601 * BIOCSRTIMEOUT Set read timeout. 602 * BIOCGRTIMEOUT Get read timeout. 603 * BIOCGSTATS Get packet stats. 604 * BIOCIMMEDIATE Set immediate mode. 605 * BIOCVERSION Get filter language version. 606 */ 607 /* ARGSUSED */ 608 int 609 bpfioctl(dev, cmd, addr, flags, p) 610 dev_t dev; 611 int cmd; 612 caddr_t addr; 613 int flags; 614 struct proc *p; 615 { 616 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 617 int s, error = 0; 618 619 switch (cmd) { 620 621 default: 622 error = EINVAL; 623 break; 624 625 /* 626 * Check for read packet available. 627 */ 628 case FIONREAD: 629 { 630 int n; 631 632 s = splimp(); 633 n = d->bd_slen; 634 if (d->bd_hbuf) 635 n += d->bd_hlen; 636 splx(s); 637 638 *(int *)addr = n; 639 break; 640 } 641 642 case SIOCGIFADDR: 643 { 644 struct ifnet *ifp; 645 646 if (d->bd_bif == 0) 647 error = EINVAL; 648 else { 649 ifp = d->bd_bif->bif_ifp; 650 error = (*ifp->if_ioctl)(ifp, cmd, addr); 651 } 652 break; 653 } 654 655 /* 656 * Get buffer len [for read()]. 657 */ 658 case BIOCGBLEN: 659 *(u_int *)addr = d->bd_bufsize; 660 break; 661 662 /* 663 * Set buffer length. 664 */ 665 case BIOCSBLEN: 666 #if BSD < 199103 667 error = EINVAL; 668 #else 669 if (d->bd_bif != 0) 670 error = EINVAL; 671 else { 672 register u_int size = *(u_int *)addr; 673 674 if (size > BPF_MAXBUFSIZE) 675 *(u_int *)addr = size = BPF_MAXBUFSIZE; 676 else if (size < BPF_MINBUFSIZE) 677 *(u_int *)addr = size = BPF_MINBUFSIZE; 678 d->bd_bufsize = size; 679 } 680 #endif 681 break; 682 683 /* 684 * Set link layer read filter. 685 */ 686 case BIOCSETF: 687 error = bpf_setf(d, (struct bpf_program *)addr); 688 break; 689 690 /* 691 * Flush read packet buffer. 692 */ 693 case BIOCFLUSH: 694 s = splimp(); 695 reset_d(d); 696 splx(s); 697 break; 698 699 /* 700 * Put interface into promiscuous mode. 701 */ 702 case BIOCPROMISC: 703 if (d->bd_bif == 0) { 704 /* 705 * No interface attached yet. 706 */ 707 error = EINVAL; 708 break; 709 } 710 s = splimp(); 711 if (d->bd_promisc == 0) { 712 error = ifpromisc(d->bd_bif->bif_ifp, 1); 713 if (error == 0) 714 d->bd_promisc = 1; 715 } 716 splx(s); 717 break; 718 719 /* 720 * Get device parameters. 721 */ 722 case BIOCGDLT: 723 if (d->bd_bif == 0) 724 error = EINVAL; 725 else 726 *(u_int *)addr = d->bd_bif->bif_dlt; 727 break; 728 729 /* 730 * Set interface name. 731 */ 732 case BIOCGETIF: 733 if (d->bd_bif == 0) 734 error = EINVAL; 735 else 736 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 737 break; 738 739 /* 740 * Set interface. 741 */ 742 case BIOCSETIF: 743 error = bpf_setif(d, (struct ifreq *)addr); 744 break; 745 746 /* 747 * Set read timeout. 748 */ 749 case BIOCSRTIMEOUT: 750 { 751 struct timeval *tv = (struct timeval *)addr; 752 u_long msec; 753 754 /* Compute number of milliseconds. */ 755 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 756 /* Scale milliseconds to ticks. Assume hard 757 clock has millisecond or greater resolution 758 (i.e. tick >= 1000). For 10ms hardclock, 759 tick/1000 = 10, so rtout<-msec/10. */ 760 d->bd_rtout = msec / (tick / 1000); 761 break; 762 } 763 764 /* 765 * Get read timeout. 766 */ 767 case BIOCGRTIMEOUT: 768 { 769 struct timeval *tv = (struct timeval *)addr; 770 u_long msec = d->bd_rtout; 771 772 msec *= tick / 1000; 773 tv->tv_sec = msec / 1000; 774 tv->tv_usec = msec % 1000; 775 break; 776 } 777 778 /* 779 * Get packet stats. 780 */ 781 case BIOCGSTATS: 782 { 783 struct bpf_stat *bs = (struct bpf_stat *)addr; 784 785 bs->bs_recv = d->bd_rcount; 786 bs->bs_drop = d->bd_dcount; 787 break; 788 } 789 790 /* 791 * Set immediate mode. 792 */ 793 case BIOCIMMEDIATE: 794 d->bd_immediate = *(u_int *)addr; 795 break; 796 797 case BIOCVERSION: 798 { 799 struct bpf_version *bv = (struct bpf_version *)addr; 800 801 bv->bv_major = BPF_MAJOR_VERSION; 802 bv->bv_minor = BPF_MINOR_VERSION; 803 break; 804 } 805 806 807 case FIONBIO: /* Non-blocking I/O */ 808 if (*(int *)addr) 809 d->bd_rtout = -1; 810 else 811 d->bd_rtout = 0; 812 break; 813 814 case FIOASYNC: /* Send signal on receive packets */ 815 d->bd_async = *(int *)addr; 816 break; 817 818 /* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 819 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 820 is a process group if it's positive and a process id if it's negative. This 821 is exactly the opposite of what the other two functions want! Therefore 822 there is code in ioctl and fcntl to negate the arg before calling here. */ 823 824 case TIOCSPGRP: /* Process or group to send signals to */ 825 d->bd_pgid = *(int *)addr; 826 break; 827 828 case TIOCGPGRP: 829 *(int *)addr = d->bd_pgid; 830 break; 831 832 case BIOCSRSIG: /* Set receive signal */ 833 { 834 u_int sig; 835 836 sig = *(u_int *)addr; 837 838 if (sig >= NSIG) 839 error = EINVAL; 840 else 841 d->bd_sig = sig; 842 break; 843 } 844 case BIOCGRSIG: 845 *(u_int *)addr = d->bd_sig; 846 break; 847 } 848 return (error); 849 } 850 851 /* 852 * Set d's packet filter program to fp. If this file already has a filter, 853 * free it and replace it. Returns EINVAL for bogus requests. 854 */ 855 int 856 bpf_setf(d, fp) 857 struct bpf_d *d; 858 struct bpf_program *fp; 859 { 860 struct bpf_insn *fcode, *old; 861 u_int flen, size; 862 int s; 863 864 old = d->bd_filter; 865 if (fp->bf_insns == 0) { 866 if (fp->bf_len != 0) 867 return (EINVAL); 868 s = splimp(); 869 d->bd_filter = 0; 870 reset_d(d); 871 splx(s); 872 if (old != 0) 873 free((caddr_t)old, M_DEVBUF); 874 return (0); 875 } 876 flen = fp->bf_len; 877 if (flen > BPF_MAXINSNS) 878 return (EINVAL); 879 880 size = flen * sizeof(*fp->bf_insns); 881 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 882 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 883 bpf_validate(fcode, (int)flen)) { 884 s = splimp(); 885 d->bd_filter = fcode; 886 reset_d(d); 887 splx(s); 888 if (old != 0) 889 free((caddr_t)old, M_DEVBUF); 890 891 return (0); 892 } 893 free((caddr_t)fcode, M_DEVBUF); 894 return (EINVAL); 895 } 896 897 /* 898 * Detach a file from its current interface (if attached at all) and attach 899 * to the interface indicated by the name stored in ifr. 900 * Return an errno or 0. 901 */ 902 static int 903 bpf_setif(d, ifr) 904 struct bpf_d *d; 905 struct ifreq *ifr; 906 { 907 struct bpf_if *bp; 908 char *cp; 909 int unit, s, error; 910 911 /* 912 * Separate string into name part and unit number. Put a null 913 * byte at the end of the name part, and compute the number. 914 * If the a unit number is unspecified, the default is 0, 915 * as initialized above. XXX This should be common code. 916 */ 917 unit = 0; 918 cp = ifr->ifr_name; 919 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 920 while (*cp++) { 921 if (*cp >= '0' && *cp <= '9') { 922 unit = *cp - '0'; 923 *cp++ = '\0'; 924 while (*cp) 925 unit = 10 * unit + *cp++ - '0'; 926 break; 927 } 928 } 929 /* 930 * Look through attached interfaces for the named one. 931 */ 932 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 933 struct ifnet *ifp = bp->bif_ifp; 934 935 if (ifp == 0 || unit != ifp->if_unit 936 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 937 continue; 938 /* 939 * We found the requested interface. 940 * If it's not up, return an error. 941 * Allocate the packet buffers if we need to. 942 * If we're already attached to requested interface, 943 * just flush the buffer. 944 */ 945 if ((ifp->if_flags & IFF_UP) == 0) 946 return (ENETDOWN); 947 948 if (d->bd_sbuf == 0) { 949 error = bpf_allocbufs(d); 950 if (error != 0) 951 return (error); 952 } 953 s = splimp(); 954 if (bp != d->bd_bif) { 955 if (d->bd_bif) 956 /* 957 * Detach if attached to something else. 958 */ 959 bpf_detachd(d); 960 961 bpf_attachd(d, bp); 962 } 963 reset_d(d); 964 splx(s); 965 return (0); 966 } 967 /* Not found. */ 968 return (ENXIO); 969 } 970 971 /* 972 * Convert an interface name plus unit number of an ifp to a single 973 * name which is returned in the ifr. 974 */ 975 static void 976 bpf_ifname(ifp, ifr) 977 struct ifnet *ifp; 978 struct ifreq *ifr; 979 { 980 char *s = ifp->if_name; 981 char *d = ifr->ifr_name; 982 983 while (*d++ = *s++) 984 continue; 985 /* XXX Assume that unit number is less than 10. */ 986 *d++ = ifp->if_unit + '0'; 987 *d = '\0'; 988 } 989 990 /* 991 * The new select interface passes down the proc pointer; the old select 992 * stubs had to grab it out of the user struct. This glue allows either case. 993 */ 994 #if BSD >= 199103 995 #define bpf_select bpfselect 996 #else 997 int 998 bpfselect(dev, rw) 999 register dev_t dev; 1000 int rw; 1001 { 1002 return (bpf_select(dev, rw, u.u_procp)); 1003 } 1004 #endif 1005 1006 /* 1007 * Support for select() system call 1008 * 1009 * Return true iff the specific operation will not block indefinitely. 1010 * Otherwise, return false but make a note that a selwakeup() must be done. 1011 */ 1012 int 1013 bpf_select(dev, rw, p) 1014 register dev_t dev; 1015 int rw; 1016 struct proc *p; 1017 { 1018 register struct bpf_d *d; 1019 register int s; 1020 1021 if (rw != FREAD) 1022 return (0); 1023 /* 1024 * An imitation of the FIONREAD ioctl code. 1025 */ 1026 d = &bpf_dtab[minor(dev)]; 1027 1028 s = splimp(); 1029 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 1030 /* 1031 * There is data waiting. 1032 */ 1033 splx(s); 1034 return (1); 1035 } 1036 #if BSD >= 199103 1037 selrecord(p, &d->bd_sel); 1038 #else 1039 /* 1040 * No data ready. If there's already a select() waiting on this 1041 * minor device then this is a collision. This shouldn't happen 1042 * because minors really should not be shared, but if a process 1043 * forks while one of these is open, it is possible that both 1044 * processes could select on the same descriptor. 1045 */ 1046 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 1047 d->bd_selcoll = 1; 1048 else 1049 d->bd_selproc = p; 1050 #endif 1051 splx(s); 1052 return (0); 1053 } 1054 1055 /* 1056 * Incoming linkage from device drivers. Process the packet pkt, of length 1057 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1058 * by each process' filter, and if accepted, stashed into the corresponding 1059 * buffer. 1060 */ 1061 void 1062 bpf_tap(arg, pkt, pktlen) 1063 caddr_t arg; 1064 register u_char *pkt; 1065 register u_int pktlen; 1066 { 1067 struct bpf_if *bp; 1068 register struct bpf_d *d; 1069 register u_int slen; 1070 /* 1071 * Note that the ipl does not have to be raised at this point. 1072 * The only problem that could arise here is that if two different 1073 * interfaces shared any data. This is not the case. 1074 */ 1075 bp = (struct bpf_if *)arg; 1076 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1077 ++d->bd_rcount; 1078 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1079 if (slen != 0) 1080 catchpacket(d, pkt, pktlen, slen, bcopy); 1081 } 1082 } 1083 1084 /* 1085 * Copy data from an mbuf chain into a buffer. This code is derived 1086 * from m_copydata in sys/uipc_mbuf.c. 1087 */ 1088 static void 1089 bpf_mcopy(src_arg, dst_arg, len) 1090 const void *src_arg; 1091 void *dst_arg; 1092 register u_int len; 1093 { 1094 register const struct mbuf *m; 1095 register u_int count; 1096 u_char *dst; 1097 1098 m = src_arg; 1099 dst = dst_arg; 1100 while (len > 0) { 1101 if (m == 0) 1102 panic("bpf_mcopy"); 1103 count = min(m->m_len, len); 1104 (void)memcpy((caddr_t)dst, mtod(m, caddr_t), count); 1105 m = m->m_next; 1106 dst += count; 1107 len -= count; 1108 } 1109 } 1110 1111 /* 1112 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1113 */ 1114 void 1115 bpf_mtap(arg, m) 1116 caddr_t arg; 1117 struct mbuf *m; 1118 { 1119 struct bpf_if *bp = (struct bpf_if *)arg; 1120 struct bpf_d *d; 1121 u_int pktlen, slen; 1122 struct mbuf *m0; 1123 1124 pktlen = 0; 1125 for (m0 = m; m0 != 0; m0 = m0->m_next) 1126 pktlen += m0->m_len; 1127 1128 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1129 ++d->bd_rcount; 1130 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1131 if (slen != 0) 1132 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1133 } 1134 } 1135 1136 /* 1137 * Move the packet data from interface memory (pkt) into the 1138 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1139 * otherwise 0. "copy" is the routine called to do the actual data 1140 * transfer. bcopy is passed in to copy contiguous chunks, while 1141 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1142 * pkt is really an mbuf. 1143 */ 1144 static void 1145 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1146 register struct bpf_d *d; 1147 register u_char *pkt; 1148 register u_int pktlen, snaplen; 1149 register void (*cpfn)(const void *, void *, u_int); 1150 { 1151 register struct bpf_hdr *hp; 1152 register int totlen, curlen; 1153 register int hdrlen = d->bd_bif->bif_hdrlen; 1154 /* 1155 * Figure out how many bytes to move. If the packet is 1156 * greater or equal to the snapshot length, transfer that 1157 * much. Otherwise, transfer the whole packet (unless 1158 * we hit the buffer size limit). 1159 */ 1160 totlen = hdrlen + min(snaplen, pktlen); 1161 if (totlen > d->bd_bufsize) 1162 totlen = d->bd_bufsize; 1163 1164 /* 1165 * Round up the end of the previous packet to the next longword. 1166 */ 1167 curlen = BPF_WORDALIGN(d->bd_slen); 1168 if (curlen + totlen > d->bd_bufsize) { 1169 /* 1170 * This packet will overflow the storage buffer. 1171 * Rotate the buffers if we can, then wakeup any 1172 * pending reads. 1173 */ 1174 if (d->bd_fbuf == 0) { 1175 /* 1176 * We haven't completed the previous read yet, 1177 * so drop the packet. 1178 */ 1179 ++d->bd_dcount; 1180 return; 1181 } 1182 ROTATE_BUFFERS(d); 1183 bpf_wakeup(d); 1184 curlen = 0; 1185 } 1186 else if (d->bd_immediate) 1187 /* 1188 * Immediate mode is set. A packet arrived so any 1189 * reads should be woken up. 1190 */ 1191 bpf_wakeup(d); 1192 1193 /* 1194 * Append the bpf header. 1195 */ 1196 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1197 #if BSD >= 199103 1198 microtime(&hp->bh_tstamp); 1199 #elif defined(sun) 1200 uniqtime(&hp->bh_tstamp); 1201 #else 1202 hp->bh_tstamp = time; 1203 #endif 1204 hp->bh_datalen = pktlen; 1205 hp->bh_hdrlen = hdrlen; 1206 /* 1207 * Copy the packet data into the store buffer and update its length. 1208 */ 1209 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1210 d->bd_slen = curlen + totlen; 1211 } 1212 1213 /* 1214 * Initialize all nonzero fields of a descriptor. 1215 */ 1216 static int 1217 bpf_allocbufs(d) 1218 register struct bpf_d *d; 1219 { 1220 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1221 if (d->bd_fbuf == 0) 1222 return (ENOBUFS); 1223 1224 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1225 if (d->bd_sbuf == 0) { 1226 free(d->bd_fbuf, M_DEVBUF); 1227 return (ENOBUFS); 1228 } 1229 d->bd_slen = 0; 1230 d->bd_hlen = 0; 1231 return (0); 1232 } 1233 1234 /* 1235 * Free buffers currently in use by a descriptor. 1236 * Called on close. 1237 */ 1238 static void 1239 bpf_freed(d) 1240 register struct bpf_d *d; 1241 { 1242 /* 1243 * We don't need to lock out interrupts since this descriptor has 1244 * been detached from its interface and it yet hasn't been marked 1245 * free. 1246 */ 1247 if (d->bd_sbuf != 0) { 1248 free(d->bd_sbuf, M_DEVBUF); 1249 if (d->bd_hbuf != 0) 1250 free(d->bd_hbuf, M_DEVBUF); 1251 if (d->bd_fbuf != 0) 1252 free(d->bd_fbuf, M_DEVBUF); 1253 } 1254 if (d->bd_filter) 1255 free((caddr_t)d->bd_filter, M_DEVBUF); 1256 1257 D_MARKFREE(d); 1258 } 1259 1260 /* 1261 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1262 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1263 * size of the link header (variable length headers not yet supported). 1264 */ 1265 void 1266 bpfattach(driverp, ifp, dlt, hdrlen) 1267 caddr_t *driverp; 1268 struct ifnet *ifp; 1269 u_int dlt, hdrlen; 1270 { 1271 struct bpf_if *bp; 1272 int i; 1273 #if BSD < 199103 1274 static struct bpf_if bpf_ifs[NBPFILTER]; 1275 static int bpfifno; 1276 1277 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1278 #else 1279 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1280 #endif 1281 if (bp == 0) 1282 panic("bpfattach"); 1283 1284 bp->bif_dlist = 0; 1285 bp->bif_driverp = (struct bpf_if **)driverp; 1286 bp->bif_ifp = ifp; 1287 bp->bif_dlt = dlt; 1288 1289 bp->bif_next = bpf_iflist; 1290 bpf_iflist = bp; 1291 1292 *bp->bif_driverp = 0; 1293 1294 /* 1295 * Compute the length of the bpf header. This is not necessarily 1296 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1297 * that the network layer header begins on a longword boundary (for 1298 * performance reasons and to alleviate alignment restrictions). 1299 */ 1300 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1301 1302 /* 1303 * Mark all the descriptors free if this hasn't been done. 1304 */ 1305 if (!D_ISFREE(&bpf_dtab[0])) 1306 for (i = 0; i < NBPFILTER; ++i) 1307 D_MARKFREE(&bpf_dtab[i]); 1308 1309 if (bootverbose) 1310 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1311 } 1312 #endif 1313