1 /* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.3 1994/08/20 03:48:55 davidg Exp $ 41 */ 42 43 #include "bpfilter.h" 44 45 #if NBPFILTER > 0 46 47 #ifndef __GNUC__ 48 #define inline 49 #else 50 #define inline __inline 51 #endif 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/mbuf.h> 56 #include <sys/buf.h> 57 #include <sys/time.h> 58 #include <sys/proc.h> 59 #include <sys/user.h> 60 #include <sys/ioctl.h> 61 62 #include <sys/file.h> 63 #if defined(sparc) && BSD < 199103 64 #include <sys/stream.h> 65 #endif 66 #include <sys/tty.h> 67 #include <sys/uio.h> 68 69 #include <sys/protosw.h> 70 #include <sys/socket.h> 71 #include <net/if.h> 72 73 #include <net/bpf.h> 74 #include <net/bpfdesc.h> 75 76 #include <sys/errno.h> 77 78 #include <netinet/in.h> 79 #include <netinet/if_ether.h> 80 #include <sys/kernel.h> 81 82 /* 83 * Older BSDs don't have kernel malloc. 84 */ 85 #if BSD < 199103 86 extern bcopy(); 87 static caddr_t bpf_alloc(); 88 #include <net/bpf_compat.h> 89 #define BPF_BUFSIZE (MCLBYTES-8) 90 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 91 #else 92 #define BPF_BUFSIZE 4096 93 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 94 #endif 95 96 #define PRINET 26 /* interruptible */ 97 98 /* 99 * The default read buffer size is patchable. 100 */ 101 int bpf_bufsize = BPF_BUFSIZE; 102 103 /* 104 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 105 * bpf_dtab holds the descriptors, indexed by minor device # 106 */ 107 struct bpf_if *bpf_iflist; 108 struct bpf_d bpf_dtab[NBPFILTER]; 109 110 #if BSD >= 199207 111 /* 112 * bpfilterattach() is called at boot time in new systems. We do 113 * nothing here since old systems will not call this. 114 */ 115 /* ARGSUSED */ 116 void 117 bpfilterattach(n) 118 int n; 119 { 120 } 121 #endif 122 123 static int bpf_allocbufs __P((struct bpf_d *)); 124 static void bpf_freed __P((struct bpf_d *)); 125 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 126 static void bpf_mcopy __P((const void *, void *, u_int)); 127 static int bpf_movein __P((struct uio *, int, 128 struct mbuf **, struct sockaddr *, int *)); 129 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 130 static inline void 131 bpf_wakeup __P((struct bpf_d *)); 132 static void catchpacket __P((struct bpf_d *, u_char *, u_int, 133 u_int, void (*)(const void *, void *, u_int))); 134 static void reset_d __P((struct bpf_d *)); 135 136 static int 137 bpf_movein(uio, linktype, mp, sockp, datlen) 138 register struct uio *uio; 139 int linktype, *datlen; 140 register struct mbuf **mp; 141 register struct sockaddr *sockp; 142 { 143 struct mbuf *m; 144 int error; 145 int len; 146 int hlen; 147 148 /* 149 * Build a sockaddr based on the data link layer type. 150 * We do this at this level because the ethernet header 151 * is copied directly into the data field of the sockaddr. 152 * In the case of SLIP, there is no header and the packet 153 * is forwarded as is. 154 * Also, we are careful to leave room at the front of the mbuf 155 * for the link level header. 156 */ 157 switch (linktype) { 158 159 case DLT_SLIP: 160 sockp->sa_family = AF_INET; 161 hlen = 0; 162 break; 163 164 case DLT_EN10MB: 165 sockp->sa_family = AF_UNSPEC; 166 /* XXX Would MAXLINKHDR be better? */ 167 hlen = sizeof(struct ether_header); 168 break; 169 170 case DLT_FDDI: 171 sockp->sa_family = AF_UNSPEC; 172 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 173 hlen = 24; 174 break; 175 176 case DLT_NULL: 177 sockp->sa_family = AF_UNSPEC; 178 hlen = 0; 179 break; 180 181 default: 182 return (EIO); 183 } 184 185 len = uio->uio_resid; 186 *datlen = len - hlen; 187 if ((unsigned)len > MCLBYTES) 188 return (EIO); 189 190 MGET(m, M_WAIT, MT_DATA); 191 if (m == 0) 192 return (ENOBUFS); 193 if (len > MLEN) { 194 #if BSD >= 199103 195 MCLGET(m, M_WAIT); 196 if ((m->m_flags & M_EXT) == 0) { 197 #else 198 MCLGET(m); 199 if (m->m_len != MCLBYTES) { 200 #endif 201 error = ENOBUFS; 202 goto bad; 203 } 204 } 205 m->m_len = len; 206 *mp = m; 207 /* 208 * Make room for link header. 209 */ 210 if (hlen != 0) { 211 m->m_len -= hlen; 212 #if BSD >= 199103 213 m->m_data += hlen; /* XXX */ 214 #else 215 m->m_off += hlen; 216 #endif 217 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 218 if (error) 219 goto bad; 220 } 221 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 222 if (!error) 223 return (0); 224 bad: 225 m_freem(m); 226 return (error); 227 } 228 229 /* 230 * Attach file to the bpf interface, i.e. make d listen on bp. 231 * Must be called at splimp. 232 */ 233 static void 234 bpf_attachd(d, bp) 235 struct bpf_d *d; 236 struct bpf_if *bp; 237 { 238 /* 239 * Point d at bp, and add d to the interface's list of listeners. 240 * Finally, point the driver's bpf cookie at the interface so 241 * it will divert packets to bpf. 242 */ 243 d->bd_bif = bp; 244 d->bd_next = bp->bif_dlist; 245 bp->bif_dlist = d; 246 247 *bp->bif_driverp = bp; 248 } 249 250 /* 251 * Detach a file from its interface. 252 */ 253 static void 254 bpf_detachd(d) 255 struct bpf_d *d; 256 { 257 struct bpf_d **p; 258 struct bpf_if *bp; 259 260 bp = d->bd_bif; 261 /* 262 * Check if this descriptor had requested promiscuous mode. 263 * If so, turn it off. 264 */ 265 if (d->bd_promisc) { 266 d->bd_promisc = 0; 267 if (ifpromisc(bp->bif_ifp, 0)) 268 /* 269 * Something is really wrong if we were able to put 270 * the driver into promiscuous mode, but can't 271 * take it out. 272 */ 273 panic("bpf: ifpromisc failed"); 274 } 275 /* Remove d from the interface's descriptor list. */ 276 p = &bp->bif_dlist; 277 while (*p != d) { 278 p = &(*p)->bd_next; 279 if (*p == 0) 280 panic("bpf_detachd: descriptor not in list"); 281 } 282 *p = (*p)->bd_next; 283 if (bp->bif_dlist == 0) 284 /* 285 * Let the driver know that there are no more listeners. 286 */ 287 *d->bd_bif->bif_driverp = 0; 288 d->bd_bif = 0; 289 } 290 291 292 /* 293 * Mark a descriptor free by making it point to itself. 294 * This is probably cheaper than marking with a constant since 295 * the address should be in a register anyway. 296 */ 297 #define D_ISFREE(d) ((d) == (d)->bd_next) 298 #define D_MARKFREE(d) ((d)->bd_next = (d)) 299 #define D_MARKUSED(d) ((d)->bd_next = 0) 300 301 /* 302 * Open ethernet device. Returns ENXIO for illegal minor device number, 303 * EBUSY if file is open by another process. 304 */ 305 /* ARGSUSED */ 306 int 307 bpfopen(dev, flag) 308 dev_t dev; 309 int flag; 310 { 311 register struct bpf_d *d; 312 313 if (minor(dev) >= NBPFILTER) 314 return (ENXIO); 315 /* 316 * Each minor can be opened by only one process. If the requested 317 * minor is in use, return EBUSY. 318 */ 319 d = &bpf_dtab[minor(dev)]; 320 if (!D_ISFREE(d)) 321 return (EBUSY); 322 323 /* Mark "free" and do most initialization. */ 324 bzero((char *)d, sizeof(*d)); 325 d->bd_bufsize = bpf_bufsize; 326 327 return (0); 328 } 329 330 /* 331 * Close the descriptor by detaching it from its interface, 332 * deallocating its buffers, and marking it free. 333 */ 334 /* ARGSUSED */ 335 int 336 bpfclose(dev, flag) 337 dev_t dev; 338 int flag; 339 { 340 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 341 register int s; 342 343 s = splimp(); 344 if (d->bd_bif) 345 bpf_detachd(d); 346 splx(s); 347 bpf_freed(d); 348 349 return (0); 350 } 351 352 /* 353 * Support for SunOS, which does not have tsleep. 354 */ 355 #if BSD < 199103 356 static 357 bpf_timeout(arg) 358 caddr_t arg; 359 { 360 struct bpf_d *d = (struct bpf_d *)arg; 361 d->bd_timedout = 1; 362 wakeup(arg); 363 } 364 365 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 366 367 int 368 bpf_sleep(d) 369 register struct bpf_d *d; 370 { 371 register int rto = d->bd_rtout; 372 register int st; 373 374 if (rto != 0) { 375 d->bd_timedout = 0; 376 timeout(bpf_timeout, (caddr_t)d, rto); 377 } 378 st = sleep((caddr_t)d, PRINET|PCATCH); 379 if (rto != 0) { 380 if (d->bd_timedout == 0) 381 untimeout(bpf_timeout, (caddr_t)d); 382 else if (st == 0) 383 return EWOULDBLOCK; 384 } 385 return (st != 0) ? EINTR : 0; 386 } 387 #else 388 #define BPF_SLEEP tsleep 389 #endif 390 391 /* 392 * Rotate the packet buffers in descriptor d. Move the store buffer 393 * into the hold slot, and the free buffer into the store slot. 394 * Zero the length of the new store buffer. 395 */ 396 #define ROTATE_BUFFERS(d) \ 397 (d)->bd_hbuf = (d)->bd_sbuf; \ 398 (d)->bd_hlen = (d)->bd_slen; \ 399 (d)->bd_sbuf = (d)->bd_fbuf; \ 400 (d)->bd_slen = 0; \ 401 (d)->bd_fbuf = 0; 402 /* 403 * bpfread - read next chunk of packets from buffers 404 */ 405 int 406 bpfread(dev, uio) 407 dev_t dev; 408 register struct uio *uio; 409 { 410 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 411 int error; 412 int s; 413 414 /* 415 * Restrict application to use a buffer the same size as 416 * as kernel buffers. 417 */ 418 if (uio->uio_resid != d->bd_bufsize) 419 return (EINVAL); 420 421 s = splimp(); 422 /* 423 * If the hold buffer is empty, then do a timed sleep, which 424 * ends when the timeout expires or when enough packets 425 * have arrived to fill the store buffer. 426 */ 427 while (d->bd_hbuf == 0) { 428 if (d->bd_immediate && d->bd_slen != 0) { 429 /* 430 * A packet(s) either arrived since the previous 431 * read or arrived while we were asleep. 432 * Rotate the buffers and return what's here. 433 */ 434 ROTATE_BUFFERS(d); 435 break; 436 } 437 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 438 d->bd_rtout); 439 if (error == EINTR || error == ERESTART) { 440 splx(s); 441 return (error); 442 } 443 if (error == EWOULDBLOCK) { 444 /* 445 * On a timeout, return what's in the buffer, 446 * which may be nothing. If there is something 447 * in the store buffer, we can rotate the buffers. 448 */ 449 if (d->bd_hbuf) 450 /* 451 * We filled up the buffer in between 452 * getting the timeout and arriving 453 * here, so we don't need to rotate. 454 */ 455 break; 456 457 if (d->bd_slen == 0) { 458 splx(s); 459 return (0); 460 } 461 ROTATE_BUFFERS(d); 462 break; 463 } 464 } 465 /* 466 * At this point, we know we have something in the hold slot. 467 */ 468 splx(s); 469 470 /* 471 * Move data from hold buffer into user space. 472 * We know the entire buffer is transferred since 473 * we checked above that the read buffer is bpf_bufsize bytes. 474 */ 475 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 476 477 s = splimp(); 478 d->bd_fbuf = d->bd_hbuf; 479 d->bd_hbuf = 0; 480 d->bd_hlen = 0; 481 splx(s); 482 483 return (error); 484 } 485 486 487 /* 488 * If there are processes sleeping on this descriptor, wake them up. 489 */ 490 static inline void 491 bpf_wakeup(d) 492 register struct bpf_d *d; 493 { 494 wakeup((caddr_t)d); 495 #if BSD >= 199103 496 selwakeup(&d->bd_sel); 497 /* XXX */ 498 d->bd_sel.si_pid = 0; 499 #else 500 if (d->bd_selproc) { 501 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 502 d->bd_selcoll = 0; 503 d->bd_selproc = 0; 504 } 505 #endif 506 } 507 508 int 509 bpfwrite(dev, uio) 510 dev_t dev; 511 struct uio *uio; 512 { 513 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 514 struct ifnet *ifp; 515 struct mbuf *m; 516 int error, s; 517 static struct sockaddr dst; 518 int datlen; 519 520 if (d->bd_bif == 0) 521 return (ENXIO); 522 523 ifp = d->bd_bif->bif_ifp; 524 525 if (uio->uio_resid == 0) 526 return (0); 527 528 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 529 if (error) 530 return (error); 531 532 if (datlen > ifp->if_mtu) 533 return (EMSGSIZE); 534 535 s = splnet(); 536 #if BSD >= 199103 537 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 538 #else 539 error = (*ifp->if_output)(ifp, m, &dst); 540 #endif 541 splx(s); 542 /* 543 * The driver frees the mbuf. 544 */ 545 return (error); 546 } 547 548 /* 549 * Reset a descriptor by flushing its packet buffer and clearing the 550 * receive and drop counts. Should be called at splimp. 551 */ 552 static void 553 reset_d(d) 554 struct bpf_d *d; 555 { 556 if (d->bd_hbuf) { 557 /* Free the hold buffer. */ 558 d->bd_fbuf = d->bd_hbuf; 559 d->bd_hbuf = 0; 560 } 561 d->bd_slen = 0; 562 d->bd_hlen = 0; 563 d->bd_rcount = 0; 564 d->bd_dcount = 0; 565 } 566 567 /* 568 * FIONREAD Check for read packet available. 569 * SIOCGIFADDR Get interface address - convenient hook to driver. 570 * BIOCGBLEN Get buffer len [for read()]. 571 * BIOCSETF Set ethernet read filter. 572 * BIOCFLUSH Flush read packet buffer. 573 * BIOCPROMISC Put interface into promiscuous mode. 574 * BIOCGDLT Get link layer type. 575 * BIOCGETIF Get interface name. 576 * BIOCSETIF Set interface. 577 * BIOCSRTIMEOUT Set read timeout. 578 * BIOCGRTIMEOUT Get read timeout. 579 * BIOCGSTATS Get packet stats. 580 * BIOCIMMEDIATE Set immediate mode. 581 * BIOCVERSION Get filter language version. 582 */ 583 /* ARGSUSED */ 584 int 585 bpfioctl(dev, cmd, addr, flag) 586 dev_t dev; 587 int cmd; 588 caddr_t addr; 589 int flag; 590 { 591 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 592 int s, error = 0; 593 594 switch (cmd) { 595 596 default: 597 error = EINVAL; 598 break; 599 600 /* 601 * Check for read packet available. 602 */ 603 case FIONREAD: 604 { 605 int n; 606 607 s = splimp(); 608 n = d->bd_slen; 609 if (d->bd_hbuf) 610 n += d->bd_hlen; 611 splx(s); 612 613 *(int *)addr = n; 614 break; 615 } 616 617 case SIOCGIFADDR: 618 { 619 struct ifnet *ifp; 620 621 if (d->bd_bif == 0) 622 error = EINVAL; 623 else { 624 ifp = d->bd_bif->bif_ifp; 625 error = (*ifp->if_ioctl)(ifp, cmd, addr); 626 } 627 break; 628 } 629 630 /* 631 * Get buffer len [for read()]. 632 */ 633 case BIOCGBLEN: 634 *(u_int *)addr = d->bd_bufsize; 635 break; 636 637 /* 638 * Set buffer length. 639 */ 640 case BIOCSBLEN: 641 #if BSD < 199103 642 error = EINVAL; 643 #else 644 if (d->bd_bif != 0) 645 error = EINVAL; 646 else { 647 register u_int size = *(u_int *)addr; 648 649 if (size > BPF_MAXBUFSIZE) 650 *(u_int *)addr = size = BPF_MAXBUFSIZE; 651 else if (size < BPF_MINBUFSIZE) 652 *(u_int *)addr = size = BPF_MINBUFSIZE; 653 d->bd_bufsize = size; 654 } 655 #endif 656 break; 657 658 /* 659 * Set link layer read filter. 660 */ 661 case BIOCSETF: 662 error = bpf_setf(d, (struct bpf_program *)addr); 663 break; 664 665 /* 666 * Flush read packet buffer. 667 */ 668 case BIOCFLUSH: 669 s = splimp(); 670 reset_d(d); 671 splx(s); 672 break; 673 674 /* 675 * Put interface into promiscuous mode. 676 */ 677 case BIOCPROMISC: 678 if (d->bd_bif == 0) { 679 /* 680 * No interface attached yet. 681 */ 682 error = EINVAL; 683 break; 684 } 685 s = splimp(); 686 if (d->bd_promisc == 0) { 687 error = ifpromisc(d->bd_bif->bif_ifp, 1); 688 if (error == 0) 689 d->bd_promisc = 1; 690 } 691 splx(s); 692 break; 693 694 /* 695 * Get device parameters. 696 */ 697 case BIOCGDLT: 698 if (d->bd_bif == 0) 699 error = EINVAL; 700 else 701 *(u_int *)addr = d->bd_bif->bif_dlt; 702 break; 703 704 /* 705 * Set interface name. 706 */ 707 case BIOCGETIF: 708 if (d->bd_bif == 0) 709 error = EINVAL; 710 else 711 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 712 break; 713 714 /* 715 * Set interface. 716 */ 717 case BIOCSETIF: 718 error = bpf_setif(d, (struct ifreq *)addr); 719 break; 720 721 /* 722 * Set read timeout. 723 */ 724 case BIOCSRTIMEOUT: 725 { 726 struct timeval *tv = (struct timeval *)addr; 727 u_long msec; 728 729 /* Compute number of milliseconds. */ 730 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 731 /* Scale milliseconds to ticks. Assume hard 732 clock has millisecond or greater resolution 733 (i.e. tick >= 1000). For 10ms hardclock, 734 tick/1000 = 10, so rtout<-msec/10. */ 735 d->bd_rtout = msec / (tick / 1000); 736 break; 737 } 738 739 /* 740 * Get read timeout. 741 */ 742 case BIOCGRTIMEOUT: 743 { 744 struct timeval *tv = (struct timeval *)addr; 745 u_long msec = d->bd_rtout; 746 747 msec *= tick / 1000; 748 tv->tv_sec = msec / 1000; 749 tv->tv_usec = msec % 1000; 750 break; 751 } 752 753 /* 754 * Get packet stats. 755 */ 756 case BIOCGSTATS: 757 { 758 struct bpf_stat *bs = (struct bpf_stat *)addr; 759 760 bs->bs_recv = d->bd_rcount; 761 bs->bs_drop = d->bd_dcount; 762 break; 763 } 764 765 /* 766 * Set immediate mode. 767 */ 768 case BIOCIMMEDIATE: 769 d->bd_immediate = *(u_int *)addr; 770 break; 771 772 case BIOCVERSION: 773 { 774 struct bpf_version *bv = (struct bpf_version *)addr; 775 776 bv->bv_major = BPF_MAJOR_VERSION; 777 bv->bv_minor = BPF_MINOR_VERSION; 778 break; 779 } 780 } 781 return (error); 782 } 783 784 /* 785 * Set d's packet filter program to fp. If this file already has a filter, 786 * free it and replace it. Returns EINVAL for bogus requests. 787 */ 788 int 789 bpf_setf(d, fp) 790 struct bpf_d *d; 791 struct bpf_program *fp; 792 { 793 struct bpf_insn *fcode, *old; 794 u_int flen, size; 795 int s; 796 797 old = d->bd_filter; 798 if (fp->bf_insns == 0) { 799 if (fp->bf_len != 0) 800 return (EINVAL); 801 s = splimp(); 802 d->bd_filter = 0; 803 reset_d(d); 804 splx(s); 805 if (old != 0) 806 free((caddr_t)old, M_DEVBUF); 807 return (0); 808 } 809 flen = fp->bf_len; 810 if (flen > BPF_MAXINSNS) 811 return (EINVAL); 812 813 size = flen * sizeof(*fp->bf_insns); 814 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 815 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 816 bpf_validate(fcode, (int)flen)) { 817 s = splimp(); 818 d->bd_filter = fcode; 819 reset_d(d); 820 splx(s); 821 if (old != 0) 822 free((caddr_t)old, M_DEVBUF); 823 824 return (0); 825 } 826 free((caddr_t)fcode, M_DEVBUF); 827 return (EINVAL); 828 } 829 830 /* 831 * Detach a file from its current interface (if attached at all) and attach 832 * to the interface indicated by the name stored in ifr. 833 * Return an errno or 0. 834 */ 835 static int 836 bpf_setif(d, ifr) 837 struct bpf_d *d; 838 struct ifreq *ifr; 839 { 840 struct bpf_if *bp; 841 char *cp; 842 int unit, s, error; 843 844 /* 845 * Separate string into name part and unit number. Put a null 846 * byte at the end of the name part, and compute the number. 847 * If the a unit number is unspecified, the default is 0, 848 * as initialized above. XXX This should be common code. 849 */ 850 unit = 0; 851 cp = ifr->ifr_name; 852 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 853 while (*cp++) { 854 if (*cp >= '0' && *cp <= '9') { 855 unit = *cp - '0'; 856 *cp++ = '\0'; 857 while (*cp) 858 unit = 10 * unit + *cp++ - '0'; 859 break; 860 } 861 } 862 /* 863 * Look through attached interfaces for the named one. 864 */ 865 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 866 struct ifnet *ifp = bp->bif_ifp; 867 868 if (ifp == 0 || unit != ifp->if_unit 869 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 870 continue; 871 /* 872 * We found the requested interface. 873 * If it's not up, return an error. 874 * Allocate the packet buffers if we need to. 875 * If we're already attached to requested interface, 876 * just flush the buffer. 877 */ 878 if ((ifp->if_flags & IFF_UP) == 0) 879 return (ENETDOWN); 880 881 if (d->bd_sbuf == 0) { 882 error = bpf_allocbufs(d); 883 if (error != 0) 884 return (error); 885 } 886 s = splimp(); 887 if (bp != d->bd_bif) { 888 if (d->bd_bif) 889 /* 890 * Detach if attached to something else. 891 */ 892 bpf_detachd(d); 893 894 bpf_attachd(d, bp); 895 } 896 reset_d(d); 897 splx(s); 898 return (0); 899 } 900 /* Not found. */ 901 return (ENXIO); 902 } 903 904 /* 905 * Convert an interface name plus unit number of an ifp to a single 906 * name which is returned in the ifr. 907 */ 908 static void 909 bpf_ifname(ifp, ifr) 910 struct ifnet *ifp; 911 struct ifreq *ifr; 912 { 913 char *s = ifp->if_name; 914 char *d = ifr->ifr_name; 915 916 while (*d++ = *s++) 917 continue; 918 /* XXX Assume that unit number is less than 10. */ 919 *d++ = ifp->if_unit + '0'; 920 *d = '\0'; 921 } 922 923 /* 924 * The new select interface passes down the proc pointer; the old select 925 * stubs had to grab it out of the user struct. This glue allows either case. 926 */ 927 #if BSD >= 199103 928 #define bpf_select bpfselect 929 #else 930 int 931 bpfselect(dev, rw) 932 register dev_t dev; 933 int rw; 934 { 935 return (bpf_select(dev, rw, u.u_procp)); 936 } 937 #endif 938 939 /* 940 * Support for select() system call 941 * 942 * Return true iff the specific operation will not block indefinitely. 943 * Otherwise, return false but make a note that a selwakeup() must be done. 944 */ 945 int 946 bpf_select(dev, rw, p) 947 register dev_t dev; 948 int rw; 949 struct proc *p; 950 { 951 register struct bpf_d *d; 952 register int s; 953 954 if (rw != FREAD) 955 return (0); 956 /* 957 * An imitation of the FIONREAD ioctl code. 958 */ 959 d = &bpf_dtab[minor(dev)]; 960 961 s = splimp(); 962 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 963 /* 964 * There is data waiting. 965 */ 966 splx(s); 967 return (1); 968 } 969 #if BSD >= 199103 970 selrecord(p, &d->bd_sel); 971 #else 972 /* 973 * No data ready. If there's already a select() waiting on this 974 * minor device then this is a collision. This shouldn't happen 975 * because minors really should not be shared, but if a process 976 * forks while one of these is open, it is possible that both 977 * processes could select on the same descriptor. 978 */ 979 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 980 d->bd_selcoll = 1; 981 else 982 d->bd_selproc = p; 983 #endif 984 splx(s); 985 return (0); 986 } 987 988 /* 989 * Incoming linkage from device drivers. Process the packet pkt, of length 990 * pktlen, which is stored in a contiguous buffer. The packet is parsed 991 * by each process' filter, and if accepted, stashed into the corresponding 992 * buffer. 993 */ 994 void 995 bpf_tap(arg, pkt, pktlen) 996 caddr_t arg; 997 register u_char *pkt; 998 register u_int pktlen; 999 { 1000 struct bpf_if *bp; 1001 register struct bpf_d *d; 1002 register u_int slen; 1003 /* 1004 * Note that the ipl does not have to be raised at this point. 1005 * The only problem that could arise here is that if two different 1006 * interfaces shared any data. This is not the case. 1007 */ 1008 bp = (struct bpf_if *)arg; 1009 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1010 ++d->bd_rcount; 1011 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1012 if (slen != 0) 1013 catchpacket(d, pkt, pktlen, slen, bcopy); 1014 } 1015 } 1016 1017 /* 1018 * Copy data from an mbuf chain into a buffer. This code is derived 1019 * from m_copydata in sys/uipc_mbuf.c. 1020 */ 1021 static void 1022 bpf_mcopy(src_arg, dst_arg, len) 1023 const void *src_arg; 1024 void *dst_arg; 1025 register u_int len; 1026 { 1027 register const struct mbuf *m; 1028 register u_int count; 1029 u_char *dst; 1030 1031 m = src_arg; 1032 dst = dst_arg; 1033 while (len > 0) { 1034 if (m == 0) 1035 panic("bpf_mcopy"); 1036 count = min(m->m_len, len); 1037 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); 1038 m = m->m_next; 1039 dst += count; 1040 len -= count; 1041 } 1042 } 1043 1044 /* 1045 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1046 */ 1047 void 1048 bpf_mtap(arg, m) 1049 caddr_t arg; 1050 struct mbuf *m; 1051 { 1052 struct bpf_if *bp = (struct bpf_if *)arg; 1053 struct bpf_d *d; 1054 u_int pktlen, slen; 1055 struct mbuf *m0; 1056 1057 pktlen = 0; 1058 for (m0 = m; m0 != 0; m0 = m0->m_next) 1059 pktlen += m0->m_len; 1060 1061 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1062 ++d->bd_rcount; 1063 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1064 if (slen != 0) 1065 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1066 } 1067 } 1068 1069 /* 1070 * Move the packet data from interface memory (pkt) into the 1071 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1072 * otherwise 0. "copy" is the routine called to do the actual data 1073 * transfer. bcopy is passed in to copy contiguous chunks, while 1074 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1075 * pkt is really an mbuf. 1076 */ 1077 static void 1078 catchpacket(d, pkt, pktlen, snaplen, cpfn) 1079 register struct bpf_d *d; 1080 register u_char *pkt; 1081 register u_int pktlen, snaplen; 1082 register void (*cpfn)(const void *, void *, u_int); 1083 { 1084 register struct bpf_hdr *hp; 1085 register int totlen, curlen; 1086 register int hdrlen = d->bd_bif->bif_hdrlen; 1087 /* 1088 * Figure out how many bytes to move. If the packet is 1089 * greater or equal to the snapshot length, transfer that 1090 * much. Otherwise, transfer the whole packet (unless 1091 * we hit the buffer size limit). 1092 */ 1093 totlen = hdrlen + min(snaplen, pktlen); 1094 if (totlen > d->bd_bufsize) 1095 totlen = d->bd_bufsize; 1096 1097 /* 1098 * Round up the end of the previous packet to the next longword. 1099 */ 1100 curlen = BPF_WORDALIGN(d->bd_slen); 1101 if (curlen + totlen > d->bd_bufsize) { 1102 /* 1103 * This packet will overflow the storage buffer. 1104 * Rotate the buffers if we can, then wakeup any 1105 * pending reads. 1106 */ 1107 if (d->bd_fbuf == 0) { 1108 /* 1109 * We haven't completed the previous read yet, 1110 * so drop the packet. 1111 */ 1112 ++d->bd_dcount; 1113 return; 1114 } 1115 ROTATE_BUFFERS(d); 1116 bpf_wakeup(d); 1117 curlen = 0; 1118 } 1119 else if (d->bd_immediate) 1120 /* 1121 * Immediate mode is set. A packet arrived so any 1122 * reads should be woken up. 1123 */ 1124 bpf_wakeup(d); 1125 1126 /* 1127 * Append the bpf header. 1128 */ 1129 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1130 #if BSD >= 199103 1131 microtime(&hp->bh_tstamp); 1132 #elif defined(sun) 1133 uniqtime(&hp->bh_tstamp); 1134 #else 1135 hp->bh_tstamp = time; 1136 #endif 1137 hp->bh_datalen = pktlen; 1138 hp->bh_hdrlen = hdrlen; 1139 /* 1140 * Copy the packet data into the store buffer and update its length. 1141 */ 1142 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1143 d->bd_slen = curlen + totlen; 1144 } 1145 1146 /* 1147 * Initialize all nonzero fields of a descriptor. 1148 */ 1149 static int 1150 bpf_allocbufs(d) 1151 register struct bpf_d *d; 1152 { 1153 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1154 if (d->bd_fbuf == 0) 1155 return (ENOBUFS); 1156 1157 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1158 if (d->bd_sbuf == 0) { 1159 free(d->bd_fbuf, M_DEVBUF); 1160 return (ENOBUFS); 1161 } 1162 d->bd_slen = 0; 1163 d->bd_hlen = 0; 1164 return (0); 1165 } 1166 1167 /* 1168 * Free buffers currently in use by a descriptor. 1169 * Called on close. 1170 */ 1171 static void 1172 bpf_freed(d) 1173 register struct bpf_d *d; 1174 { 1175 /* 1176 * We don't need to lock out interrupts since this descriptor has 1177 * been detached from its interface and it yet hasn't been marked 1178 * free. 1179 */ 1180 if (d->bd_sbuf != 0) { 1181 free(d->bd_sbuf, M_DEVBUF); 1182 if (d->bd_hbuf != 0) 1183 free(d->bd_hbuf, M_DEVBUF); 1184 if (d->bd_fbuf != 0) 1185 free(d->bd_fbuf, M_DEVBUF); 1186 } 1187 if (d->bd_filter) 1188 free((caddr_t)d->bd_filter, M_DEVBUF); 1189 1190 D_MARKFREE(d); 1191 } 1192 1193 /* 1194 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1195 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1196 * size of the link header (variable length headers not yet supported). 1197 */ 1198 void 1199 bpfattach(driverp, ifp, dlt, hdrlen) 1200 caddr_t *driverp; 1201 struct ifnet *ifp; 1202 u_int dlt, hdrlen; 1203 { 1204 struct bpf_if *bp; 1205 int i; 1206 #if BSD < 199103 1207 static struct bpf_if bpf_ifs[NBPFILTER]; 1208 static int bpfifno; 1209 1210 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1211 #else 1212 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1213 #endif 1214 if (bp == 0) 1215 panic("bpfattach"); 1216 1217 bp->bif_dlist = 0; 1218 bp->bif_driverp = (struct bpf_if **)driverp; 1219 bp->bif_ifp = ifp; 1220 bp->bif_dlt = dlt; 1221 1222 bp->bif_next = bpf_iflist; 1223 bpf_iflist = bp; 1224 1225 *bp->bif_driverp = 0; 1226 1227 /* 1228 * Compute the length of the bpf header. This is not necessarily 1229 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1230 * that the network layer header begins on a longword boundary (for 1231 * performance reasons and to alleviate alignment restrictions). 1232 */ 1233 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1234 1235 /* 1236 * Mark all the descriptors free if this hasn't been done. 1237 */ 1238 if (!D_ISFREE(&bpf_dtab[0])) 1239 for (i = 0; i < NBPFILTER; ++i) 1240 D_MARKFREE(&bpf_dtab[i]); 1241 1242 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1243 } 1244 1245 #if BSD >= 199103 1246 /* XXX This routine belongs in net/if.c. */ 1247 /* 1248 * Set/clear promiscuous mode on interface ifp based on the truth value 1249 * of pswitch. The calls are reference counted so that only the first 1250 * "on" request actually has an effect, as does the final "off" request. 1251 * Results are undefined if the "off" and "on" requests are not matched. 1252 */ 1253 int 1254 ifpromisc(ifp, pswitch) 1255 struct ifnet *ifp; 1256 int pswitch; 1257 { 1258 struct ifreq ifr; 1259 /* 1260 * If the device is not configured up, we cannot put it in 1261 * promiscuous mode. 1262 */ 1263 if ((ifp->if_flags & IFF_UP) == 0) 1264 return (ENETDOWN); 1265 1266 if (pswitch) { 1267 if (ifp->if_pcount++ != 0) 1268 return (0); 1269 ifp->if_flags |= IFF_PROMISC; 1270 } else { 1271 if (--ifp->if_pcount > 0) 1272 return (0); 1273 ifp->if_flags &= ~IFF_PROMISC; 1274 } 1275 ifr.ifr_flags = ifp->if_flags; 1276 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); 1277 } 1278 #endif 1279 1280 #if BSD < 199103 1281 /* 1282 * Allocate some memory for bpf. This is temporary SunOS support, and 1283 * is admittedly a hack. 1284 * If resources unavaiable, return 0. 1285 */ 1286 static caddr_t 1287 bpf_alloc(size, canwait) 1288 register int size; 1289 register int canwait; 1290 { 1291 register struct mbuf *m; 1292 1293 if ((unsigned)size > (MCLBYTES-8)) 1294 return 0; 1295 1296 MGET(m, canwait, MT_DATA); 1297 if (m == 0) 1298 return 0; 1299 if ((unsigned)size > (MLEN-8)) { 1300 MCLGET(m); 1301 if (m->m_len != MCLBYTES) { 1302 m_freem(m); 1303 return 0; 1304 } 1305 } 1306 *mtod(m, struct mbuf **) = m; 1307 return mtod(m, caddr_t) + 8; 1308 } 1309 #endif 1310 #endif 1311