Lines Matching +full:ts +full:- +full:attached

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
111 struct bpfd_list bif_wlist; /* writer-only list */
134 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
144 * 32-bit version of structure prepended to each packet. We use this header
145 * instead of the standard one for 32-bit streams. We mark the a stream as
146 * 32-bit the first time we see a 32-bit compat ioctl request.
224 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
309 if_rele(bp->bif_ifp); in bpfif_free()
317 refcount_acquire(&bp->bif_refcnt); in bpfif_ref()
324 if (!refcount_release(&bp->bif_refcnt)) in bpfif_rele()
326 NET_EPOCH_CALL(bpfif_free, &bp->epoch_ctx); in bpfif_rele()
333 refcount_acquire(&d->bd_refcnt); in bpfd_ref()
340 if (!refcount_release(&d->bd_refcnt)) in bpfd_rele()
342 NET_EPOCH_CALL(bpfd_free, &d->epoch_ctx); in bpfd_rele()
360 if (ptr->func != NULL) in bpf_program_buffer_free()
361 bpf_destroy_jit_filter(ptr->func); in bpf_program_buffer_free()
378 switch (d->bd_bufmode) { in bpf_append_bytes()
383 counter_u64_add(d->bd_zcopy, 1); in bpf_append_bytes()
398 switch (d->bd_bufmode) { in bpf_append_mbuf()
403 counter_u64_add(d->bd_zcopy, 1); in bpf_append_mbuf()
412 * This function gets called when the free buffer is re-assigned.
420 switch (d->bd_bufmode) { in bpf_buf_reclaimed()
444 switch (d->bd_bufmode) { in bpf_canfreebuf()
461 switch (d->bd_bufmode) { in bpf_canwritebuf()
479 switch (d->bd_bufmode) { in bpf_buffull()
495 switch (d->bd_bufmode) { in bpf_bufheld()
506 switch (d->bd_bufmode) { in bpf_free()
522 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) in bpf_uiomove()
531 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) in bpf_ioctl_sblen()
540 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) in bpf_ioctl_getzmax()
549 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) in bpf_ioctl_rotzbuf()
558 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) in bpf_ioctl_setzbuf()
589 sockp->sa_family = AF_INET; in bpf_movein()
594 sockp->sa_family = AF_UNSPEC; in bpf_movein()
600 sockp->sa_family = AF_IMPLINK; in bpf_movein()
605 sockp->sa_family = AF_UNSPEC; in bpf_movein()
614 sockp->sa_family = AF_UNSPEC; in bpf_movein()
620 * en atm driver requires 4-byte atm pseudo header. in bpf_movein()
624 sockp->sa_family = AF_UNSPEC; in bpf_movein()
629 sockp->sa_family = AF_UNSPEC; in bpf_movein()
634 sockp->sa_family = AF_IEEE80211; in bpf_movein()
639 sockp->sa_family = AF_IEEE80211; in bpf_movein()
640 sockp->sa_len = 12; /* XXX != 0 */ in bpf_movein()
648 len = uio->uio_resid; in bpf_movein()
649 if (len < hlen || len - hlen > ifp->if_mtu) in bpf_movein()
656 m->m_pkthdr.len = m->m_len = len; in bpf_movein()
663 slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len); in bpf_movein()
673 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { in bpf_movein()
674 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, in bpf_movein()
676 m->m_flags |= M_BCAST; in bpf_movein()
678 m->m_flags |= M_MCAST; in bpf_movein()
680 if (d->bd_hdrcmplt == 0) { in bpf_movein()
681 memcpy(eh->ether_shost, IF_LLADDR(ifp), in bpf_movein()
682 sizeof(eh->ether_shost)); in bpf_movein()
691 if (sockp->sa_family == AF_IEEE80211) { in bpf_movein()
702 hlen = p->ibp_len; in bpf_movein()
703 if (hlen > sizeof(sockp->sa_data)) { in bpf_movein()
708 bcopy(mtod(m, const void *), sockp->sa_data, hlen); in bpf_movein()
733 op_w = V_bpf_optimize_writers || d->bd_writer; in bpf_attachd()
735 if (d->bd_bif != NULL) in bpf_attachd()
750 d->bd_bif = bp; in bpf_attachd()
752 /* Add to writers-only list */ in bpf_attachd()
753 CK_LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); in bpf_attachd()
760 d->bd_writer = 2; in bpf_attachd()
762 CK_LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); in bpf_attachd()
773 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); in bpf_attachd()
776 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); in bpf_attachd()
780 * Check if we need to upgrade our descriptor @d from write-only mode.
791 if (d->bd_writer == 0 || fcode == NULL) in bpf_check_upgrade()
800 * while pcap_open_live() definitely sets to non-zero value, in bpf_check_upgrade()
823 if (--d->bd_writer == 0) { in bpf_check_upgrade()
826 * been set. This is probably catch-all in bpf_check_upgrade()
836 __func__, d->bd_pid, d->bd_writer, in bpf_check_upgrade()
861 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); in bpf_detachd_locked()
863 /* Check if descriptor is attached */ in bpf_detachd_locked()
864 if ((bp = d->bd_bif) == NULL) in bpf_detachd_locked()
871 error = d->bd_writer; in bpf_detachd_locked()
872 ifp = bp->bif_ifp; in bpf_detachd_locked()
873 d->bd_bif = NULL; in bpf_detachd_locked()
882 bpf_bpfd_cnt--; in bpf_detachd_locked()
884 /* Call event handler iff d is attached */ in bpf_detachd_locked()
886 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); in bpf_detachd_locked()
892 if (d->bd_promisc && !detached_ifp) { in bpf_detachd_locked()
893 d->bd_promisc = 0; in bpf_detachd_locked()
894 CURVNET_SET(ifp->if_vnet); in bpf_detachd_locked()
904 if_printf(bp->bif_ifp, in bpf_detachd_locked()
921 if (d->bd_state == BPF_WAITING) in bpf_dtor()
922 callout_stop(&d->bd_callout); in bpf_dtor()
923 d->bd_state = BPF_IDLE; in bpf_dtor()
925 funsetown(&d->bd_sigio); in bpf_dtor()
930 seldrain(&d->bd_sel); in bpf_dtor()
931 knlist_destroy(&d->bd_sel.si_note); in bpf_dtor()
932 callout_drain(&d->bd_callout); in bpf_dtor()
955 d->bd_rcount = counter_u64_alloc(M_WAITOK); in bpfopen()
956 d->bd_dcount = counter_u64_alloc(M_WAITOK); in bpfopen()
957 d->bd_fcount = counter_u64_alloc(M_WAITOK); in bpfopen()
958 d->bd_wcount = counter_u64_alloc(M_WAITOK); in bpfopen()
959 d->bd_wfcount = counter_u64_alloc(M_WAITOK); in bpfopen()
960 d->bd_wdcount = counter_u64_alloc(M_WAITOK); in bpfopen()
961 d->bd_zcopy = counter_u64_alloc(M_WAITOK); in bpfopen()
964 * For historical reasons, perform a one-time initialization call to in bpfopen()
970 d->bd_writer = 2; in bpfopen()
971 d->bd_hbuf_in_use = 0; in bpfopen()
972 d->bd_bufmode = BPF_BUFMODE_BUFFER; in bpfopen()
973 d->bd_sig = SIGIO; in bpfopen()
974 d->bd_direction = BPF_D_INOUT; in bpfopen()
975 refcount_init(&d->bd_refcnt, 1); in bpfopen()
979 mac_bpfdesc_create(td->td_ucred, d); in bpfopen()
981 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF); in bpfopen()
982 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0); in bpfopen()
983 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock); in bpfopen()
986 d->bd_pcp = 0; in bpfopen()
992 * bpfread - read next chunk of packets from buffers
1010 if (uio->uio_resid != d->bd_bufsize) in bpfread()
1017 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { in bpfread()
1021 if (d->bd_state == BPF_WAITING) in bpfread()
1022 callout_stop(&d->bd_callout); in bpfread()
1023 timed_out = (d->bd_state == BPF_TIMED_OUT); in bpfread()
1024 d->bd_state = BPF_IDLE; in bpfread()
1025 while (d->bd_hbuf_in_use) { in bpfread()
1026 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, in bpfread()
1038 while (d->bd_hbuf == NULL) { in bpfread()
1039 if (d->bd_slen != 0) { in bpfread()
1044 if (d->bd_immediate || non_block || timed_out) { in bpfread()
1047 * if we are in immediate mode, non-blocking in bpfread()
1061 if (d->bd_bif == NULL) { in bpfread()
1070 error = msleep(d, &d->bd_lock, PRINET | PCATCH, in bpfread()
1071 "bpf", d->bd_rtout); in bpfread()
1082 if (d->bd_hbuf) in bpfread()
1090 if (d->bd_slen == 0) { in bpfread()
1101 d->bd_hbuf_in_use = 1; in bpfread()
1112 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); in bpfread()
1115 if (d->bd_hbuf_in_use) { in bpfread()
1116 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf")); in bpfread()
1117 d->bd_fbuf = d->bd_hbuf; in bpfread()
1118 d->bd_hbuf = NULL; in bpfread()
1119 d->bd_hlen = 0; in bpfread()
1121 d->bd_hbuf_in_use = 0; in bpfread()
1122 wakeup(&d->bd_hbuf_in_use); in bpfread()
1137 if (d->bd_state == BPF_WAITING) { in bpf_wakeup()
1138 callout_stop(&d->bd_callout); in bpf_wakeup()
1139 d->bd_state = BPF_IDLE; in bpf_wakeup()
1142 if (d->bd_async && d->bd_sig && d->bd_sigio) in bpf_wakeup()
1143 pgsigio(&d->bd_sigio, d->bd_sig, 0); in bpf_wakeup()
1145 selwakeuppri(&d->bd_sel, PRINET); in bpf_wakeup()
1146 KNOTE_LOCKED(&d->bd_sel.si_note, 0); in bpf_wakeup()
1156 if (callout_pending(&d->bd_callout) || in bpf_timed_out()
1157 !callout_active(&d->bd_callout)) in bpf_timed_out()
1159 if (d->bd_state == BPF_WAITING) { in bpf_timed_out()
1160 d->bd_state = BPF_TIMED_OUT; in bpf_timed_out()
1161 if (d->bd_slen != 0) in bpf_timed_out()
1172 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) in bpf_ready()
1174 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && in bpf_ready()
1175 d->bd_slen != 0) in bpf_ready()
1199 counter_u64_add(d->bd_wcount, 1); in bpfwrite()
1200 if ((bp = d->bd_bif) == NULL) { in bpfwrite()
1205 ifp = bp->bif_ifp; in bpfwrite()
1206 if ((ifp->if_flags & IFF_UP) == 0) { in bpfwrite()
1211 if (uio->uio_resid == 0) in bpfwrite()
1226 error = bpf_movein(uio, (int)bp->bif_dlt, ifp, in bpfwrite()
1230 counter_u64_add(d->bd_wdcount, 1); in bpfwrite()
1237 * Check that descriptor is still attached to the interface. in bpfwrite()
1241 if (d->bd_bif == NULL) { in bpfwrite()
1242 counter_u64_add(d->bd_wdcount, 1); in bpfwrite()
1248 counter_u64_add(d->bd_wfcount, 1); in bpfwrite()
1249 if (d->bd_hdrcmplt) in bpfwrite()
1252 if (d->bd_feedback) { in bpfwrite()
1255 mc->m_pkthdr.rcvif = ifp; in bpfwrite()
1257 if (d->bd_direction == BPF_D_INOUT) in bpfwrite()
1258 m->m_flags |= M_PROMISC; in bpfwrite()
1262 m->m_pkthdr.len -= hlen; in bpfwrite()
1263 m->m_len -= hlen; in bpfwrite()
1264 m->m_data += hlen; /* XXX */ in bpfwrite()
1266 CURVNET_SET(ifp->if_vnet); in bpfwrite()
1280 if (d->bd_pcp != 0) in bpfwrite()
1281 vlan_set_pcp(m, d->bd_pcp); in bpfwrite()
1286 error = (*ifp->if_output)(ifp, m, &dst, &ro); in bpfwrite()
1288 counter_u64_add(d->bd_wdcount, 1); in bpfwrite()
1292 (*ifp->if_input)(ifp, mc); in bpfwrite()
1302 counter_u64_add(d->bd_wdcount, 1); in bpfwrite()
1310 * and drop counts. This is doable for kernel-only buffers, but with
1311 * zero-copy buffers, we can't write to (or rotate) buffers that are
1321 while (d->bd_hbuf_in_use) in reset_d()
1322 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, in reset_d()
1324 if ((d->bd_hbuf != NULL) && in reset_d()
1325 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { in reset_d()
1327 d->bd_fbuf = d->bd_hbuf; in reset_d()
1328 d->bd_hbuf = NULL; in reset_d()
1329 d->bd_hlen = 0; in reset_d()
1333 d->bd_slen = 0; in reset_d()
1334 counter_u64_zero(d->bd_rcount); in reset_d()
1335 counter_u64_zero(d->bd_dcount); in reset_d()
1336 counter_u64_zero(d->bd_fcount); in reset_d()
1337 counter_u64_zero(d->bd_wcount); in reset_d()
1338 counter_u64_zero(d->bd_wfcount); in reset_d()
1339 counter_u64_zero(d->bd_wdcount); in reset_d()
1340 counter_u64_zero(d->bd_zcopy); in reset_d()
1367 * BIOCSETZBUF Set current zero-copy buffer locations.
1368 * BIOCGETZMAX Get maximum zero-copy buffer size.
1369 * BIOCROTZBUF Force rotation of zero-copy buffer
1391 if (d->bd_state == BPF_WAITING) in bpfioctl()
1392 callout_stop(&d->bd_callout); in bpfioctl()
1393 d->bd_state = BPF_IDLE; in bpfioctl()
1396 if (d->bd_locked == 1) { in bpfioctl()
1432 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so in bpfioctl()
1433 * that it will get 32-bit packet headers. in bpfioctl()
1442 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { in bpfioctl()
1444 d->bd_compat32 = 1; in bpfioctl()
1464 n = d->bd_slen; in bpfioctl()
1465 while (d->bd_hbuf_in_use) in bpfioctl()
1466 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, in bpfioctl()
1468 if (d->bd_hbuf) in bpfioctl()
1469 n += d->bd_hlen; in bpfioctl()
1481 *(u_int *)addr = d->bd_bufsize; in bpfioctl()
1520 if (d->bd_bif == NULL) { in bpfioctl()
1522 * No interface attached yet. in bpfioctl()
1525 } else if (d->bd_promisc == 0) { in bpfioctl()
1526 error = ifpromisc(d->bd_bif->bif_ifp, 1); in bpfioctl()
1528 d->bd_promisc = 1; in bpfioctl()
1538 if (d->bd_bif == NULL) in bpfioctl()
1541 *(u_int *)addr = d->bd_bif->bif_dlt; in bpfioctl()
1555 dltlist.bfl_len = list32->bfl_len; in bpfioctl()
1556 dltlist.bfl_list = PTRIN(list32->bfl_list); in bpfioctl()
1558 if (d->bd_bif == NULL) in bpfioctl()
1563 list32->bfl_len = dltlist.bfl_len; in bpfioctl()
1572 if (d->bd_bif == NULL) in bpfioctl()
1584 if (d->bd_bif == NULL) in bpfioctl()
1596 if (d->bd_bif == NULL) in bpfioctl()
1599 struct ifnet *const ifp = d->bd_bif->bif_ifp; in bpfioctl()
1602 strlcpy(ifr->ifr_name, ifp->if_xname, in bpfioctl()
1603 sizeof(ifr->ifr_name)); in bpfioctl()
1618 * allocate them here. If we're using zero-copy, in bpfioctl()
1624 if (d->bd_bufmode == BPF_BUFMODE_BUFFER && in bpfioctl()
1625 d->bd_sbuf == NULL) in bpfioctl()
1629 size = d->bd_bufsize; in bpfioctl()
1656 tv->tv_sec = tv32->tv_sec; in bpfioctl()
1657 tv->tv_usec = tv32->tv_usec; in bpfioctl()
1664 * a one-shot timer. in bpfioctl()
1667 d->bd_rtout = tvtohz(tv) - 1; in bpfioctl()
1690 tv->tv_sec = d->bd_rtout / hz; in bpfioctl()
1691 tv->tv_usec = (d->bd_rtout % hz) * tick; in bpfioctl()
1695 tv32->tv_sec = tv->tv_sec; in bpfioctl()
1696 tv32->tv_usec = tv->tv_usec; in bpfioctl()
1711 bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount); in bpfioctl()
1712 bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount); in bpfioctl()
1721 d->bd_immediate = *(u_int *)addr; in bpfioctl()
1729 bv->bv_major = BPF_MAJOR_VERSION; in bpfioctl()
1730 bv->bv_minor = BPF_MINOR_VERSION; in bpfioctl()
1739 *(u_int *)addr = d->bd_hdrcmplt; in bpfioctl()
1748 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; in bpfioctl()
1757 *(u_int *)addr = d->bd_direction; in bpfioctl()
1774 d->bd_direction = direction; in bpfioctl()
1788 *(u_int *)addr = d->bd_tstamp; in bpfioctl()
1801 d->bd_tstamp = func; in bpfioctl()
1809 d->bd_feedback = *(u_int *)addr; in bpfioctl()
1815 d->bd_locked = 1; in bpfioctl()
1819 case FIONBIO: /* Non-blocking I/O */ in bpfioctl()
1824 d->bd_async = *(int *)addr; in bpfioctl()
1833 error = fsetown(*(int *)addr, &d->bd_sigio); in bpfioctl()
1838 *(int *)addr = fgetown(&d->bd_sigio); in bpfioctl()
1844 error = fsetown(-(*(int *)addr), &d->bd_sigio); in bpfioctl()
1849 *(int *)addr = -fgetown(&d->bd_sigio); in bpfioctl()
1862 d->bd_sig = sig; in bpfioctl()
1869 *(u_int *)addr = d->bd_sig; in bpfioctl()
1875 *(u_int *)addr = d->bd_bufmode; in bpfioctl()
1884 * buffer has been allocated or an interface attached, since in bpfioctl()
1902 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || in bpfioctl()
1903 d->bd_fbuf != NULL || d->bd_bif != NULL) { in bpfioctl()
1908 d->bd_bufmode = *(u_int *)addr; in bpfioctl()
1933 d->bd_pcp = pcp; in bpfioctl()
1970 fp_swab.bf_len = fp32->bf_len; in bpf_setf()
1972 (struct bpf_insn *)(uintptr_t)fp32->bf_insns; in bpf_setf()
1994 flen = fp->bf_len; in bpf_setf()
1995 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0)) in bpf_setf()
1997 size = flen * sizeof(*fp->bf_insns); in bpf_setf()
2001 filter = (struct bpf_insn *)fcode->buffer; in bpf_setf()
2002 if (copyin(fp->bf_insns, filter, size) != 0 || in bpf_setf()
2025 if (d->bd_wfilter != NULL) { in bpf_setf()
2026 fcode = __containerof((void *)d->bd_wfilter, in bpf_setf()
2029 fcode->func = NULL; in bpf_setf()
2032 d->bd_wfilter = filter; in bpf_setf()
2034 if (d->bd_rfilter != NULL) { in bpf_setf()
2035 fcode = __containerof((void *)d->bd_rfilter, in bpf_setf()
2038 fcode->func = d->bd_bfilter; in bpf_setf()
2041 d->bd_rfilter = filter; in bpf_setf()
2043 d->bd_bfilter = jfunc; in bpf_setf()
2054 d->bd_writer = 0; in bpf_setf()
2055 if (d->bd_bif != NULL) { in bpf_setf()
2057 * Remove descriptor from writers-only list in bpf_setf()
2061 CK_LIST_INSERT_HEAD(&d->bd_bif->bif_dlist, in bpf_setf()
2065 __func__, d->bd_pid); in bpf_setf()
2073 NET_EPOCH_CALL(bpf_program_buffer_free, &fcode->epoch_ctx); in bpf_setf()
2077 d->bd_bif->bif_ifp, d->bd_bif->bif_dlt, 1); in bpf_setf()
2084 * Detach a file from its current interface (if attached at all) and attach
2096 theywant = ifunit(ifr->ifr_name); in bpf_setif()
2100 * Look through attached interfaces for the named one. in bpf_setif()
2103 if (bp->bif_ifp == theywant && in bpf_setif()
2104 bp->bif_bpf == &theywant->if_bpf) in bpf_setif()
2110 MPASS(bp == theywant->if_bpf); in bpf_setif()
2115 switch (d->bd_bufmode) { in bpf_setif()
2118 if (d->bd_sbuf == NULL) in bpf_setif()
2123 panic("bpf_setif: bufmode %d", d->bd_bufmode); in bpf_setif()
2125 if (bp != d->bd_bif) in bpf_setif()
2147 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) in bpfpoll()
2161 selrecord(td, &d->bd_sel); in bpfpoll()
2163 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { in bpfpoll()
2164 callout_reset(&d->bd_callout, d->bd_rtout, in bpfpoll()
2166 d->bd_state = BPF_WAITING; in bpfpoll()
2186 switch (kn->kn_filter) { in bpfkqfilter()
2188 kn->kn_fop = &bpfread_filtops; in bpfkqfilter()
2192 kn->kn_fop = &bpfwrite_filtops; in bpfkqfilter()
2204 kn->kn_hook = d; in bpfkqfilter()
2205 knlist_add(&d->bd_sel.si_note, kn, 1); in bpfkqfilter()
2214 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; in filt_bpfdetach()
2216 knlist_remove(&d->bd_sel.si_note, kn, 0); in filt_bpfdetach()
2222 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; in filt_bpfread()
2228 kn->kn_data = d->bd_slen; in filt_bpfread()
2232 if (!d->bd_hbuf_in_use && d->bd_hbuf) in filt_bpfread()
2233 kn->kn_data += d->bd_hlen; in filt_bpfread()
2234 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { in filt_bpfread()
2235 callout_reset(&d->bd_callout, d->bd_rtout, in filt_bpfread()
2237 d->bd_state = BPF_WAITING; in filt_bpfread()
2246 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; in filt_bpfwrite()
2250 if (d->bd_bif == NULL) { in filt_bpfwrite()
2251 kn->kn_data = 0; in filt_bpfwrite()
2254 kn->kn_data = d->bd_bif->bif_ifp->if_mtu; in filt_bpfwrite()
2279 struct timespec ts; in bpf_gettime() local
2288 if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR | M_TSTMP)) { in bpf_gettime()
2289 mbuf_tstmp2timespec(m, &ts); in bpf_gettime()
2290 timespec2bintime(&ts, bt); in bpf_gettime()
2327 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) { in bpf_tap()
2328 counter_u64_add(d->bd_rcount, 1); in bpf_tap()
2336 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; in bpf_tap()
2338 slen = (*(bf->func))(pkt, pktlen, pktlen); in bpf_tap()
2341 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); in bpf_tap()
2347 counter_u64_add(d->bd_fcount, 1); in bpf_tap()
2348 if (gottime < bpf_ts_quality(d->bd_tstamp)) in bpf_tap()
2349 gottime = bpf_gettime(&bt, d->bd_tstamp, in bpf_tap()
2352 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) in bpf_tap()
2365 if (bpf_peers_present(ifp->if_bpf)) in bpf_tap_if()
2366 bpf_tap(ifp->if_bpf, pkt, pktlen); in bpf_tap_if()
2370 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
2371 ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2390 if ((m->m_flags & M_PROMISC) != 0 && m_rcvif(m) == NULL) { in bpf_mtap()
2391 m->m_flags &= ~M_PROMISC; in bpf_mtap()
2399 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) { in bpf_mtap()
2400 if (BPF_CHECK_DIRECTION(d, m_rcvif(m), bp->bif_ifp)) in bpf_mtap()
2402 counter_u64_add(d->bd_rcount, 1); in bpf_mtap()
2404 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; in bpf_mtap()
2406 if (bf != NULL && m->m_next == NULL) in bpf_mtap()
2407 slen = (*(bf->func))(mtod(m, u_char *), pktlen, in bpf_mtap()
2411 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); in bpf_mtap()
2415 counter_u64_add(d->bd_fcount, 1); in bpf_mtap()
2416 if (gottime < bpf_ts_quality(d->bd_tstamp)) in bpf_mtap()
2417 gottime = bpf_gettime(&bt, d->bd_tstamp, m); in bpf_mtap()
2419 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) in bpf_mtap()
2432 if (bpf_peers_present(ifp->if_bpf)) { in bpf_mtap_if()
2434 bpf_mtap(ifp->if_bpf, m); in bpf_mtap_if()
2453 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { in bpf_mtap2()
2454 m->m_flags &= ~M_PROMISC; in bpf_mtap2()
2460 * Craft on-stack mbuf suitable for passing to bpf_filter. in bpf_mtap2()
2462 * absolutely needed--this mbuf should never go anywhere else. in bpf_mtap2()
2473 CK_LIST_FOREACH(d, &bp->bif_dlist, bd_next) { in bpf_mtap2()
2474 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) in bpf_mtap2()
2476 counter_u64_add(d->bd_rcount, 1); in bpf_mtap2()
2477 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); in bpf_mtap2()
2481 counter_u64_add(d->bd_fcount, 1); in bpf_mtap2()
2482 if (gottime < bpf_ts_quality(d->bd_tstamp)) in bpf_mtap2()
2483 gottime = bpf_gettime(&bt, d->bd_tstamp, m); in bpf_mtap2()
2485 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) in bpf_mtap2()
2498 if (bpf_peers_present(ifp->if_bpf)) { in bpf_mtap2_if()
2500 bpf_mtap2(ifp->if_bpf, data, dlen, m); in bpf_mtap2_if()
2515 hdrlen = d->bd_bif->bif_hdrlen; in bpf_hdrlen()
2517 if (d->bd_tstamp == BPF_T_NONE || in bpf_hdrlen()
2518 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) in bpf_hdrlen()
2520 if (d->bd_compat32) in bpf_hdrlen()
2529 if (d->bd_compat32) in bpf_hdrlen()
2535 return (hdrlen - d->bd_bif->bif_hdrlen); in bpf_hdrlen()
2539 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) in bpf_bintime2ts() argument
2554 ts->bt_sec = tsm.tv_sec; in bpf_bintime2ts()
2555 ts->bt_frac = tsm.tv_usec; in bpf_bintime2ts()
2559 ts->bt_sec = tsn.tv_sec; in bpf_bintime2ts()
2560 ts->bt_frac = tsn.tv_nsec; in bpf_bintime2ts()
2563 ts->bt_sec = bt->sec; in bpf_bintime2ts()
2564 ts->bt_frac = bt->frac; in bpf_bintime2ts()
2595 if (d->bd_bif == NULL) { in catchpacket()
2597 counter_u64_add(d->bd_dcount, 1); in catchpacket()
2608 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { in catchpacket()
2609 d->bd_fbuf = d->bd_hbuf; in catchpacket()
2610 d->bd_hbuf = NULL; in catchpacket()
2611 d->bd_hlen = 0; in catchpacket()
2623 if (totlen > d->bd_bufsize) in catchpacket()
2624 totlen = d->bd_bufsize; in catchpacket()
2635 if (d->bd_compat32) in catchpacket()
2636 curlen = BPF_WORDALIGN32(d->bd_slen); in catchpacket()
2639 curlen = BPF_WORDALIGN(d->bd_slen); in catchpacket()
2640 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { in catchpacket()
2641 if (d->bd_fbuf == NULL) { in catchpacket()
2648 counter_u64_add(d->bd_dcount, 1); in catchpacket()
2651 KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use")); in catchpacket()
2656 if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { in catchpacket()
2664 pad = curlen - d->bd_slen; in catchpacket()
2669 bpf_append_bytes(d, d->bd_sbuf, d->bd_slen, zeroes, in catchpacket()
2674 caplen = totlen - hdrlen; in catchpacket()
2675 tstype = d->bd_tstamp; in catchpacket()
2679 struct bpf_ts ts; in catchpacket() local
2681 bpf_bintime2ts(bt, &ts, tstype); in catchpacket()
2683 if (d->bd_compat32) { in catchpacket()
2686 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; in catchpacket()
2687 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; in catchpacket()
2692 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, in catchpacket()
2699 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; in catchpacket()
2700 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; in catchpacket()
2705 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, in catchpacket()
2721 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); in catchpacket()
2729 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); in catchpacket()
2730 d->bd_slen = curlen + totlen; in catchpacket()
2753 if (d->bd_rfilter != NULL) { in bpfd_free()
2754 p = __containerof((void *)d->bd_rfilter, in bpfd_free()
2757 p->func = d->bd_bfilter; in bpfd_free()
2759 bpf_program_buffer_free(&p->epoch_ctx); in bpfd_free()
2761 if (d->bd_wfilter != NULL) { in bpfd_free()
2762 p = __containerof((void *)d->bd_wfilter, in bpfd_free()
2765 p->func = NULL; in bpfd_free()
2767 bpf_program_buffer_free(&p->epoch_ctx); in bpfd_free()
2770 mtx_destroy(&d->bd_lock); in bpfd_free()
2771 counter_u64_free(d->bd_rcount); in bpfd_free()
2772 counter_u64_free(d->bd_dcount); in bpfd_free()
2773 counter_u64_free(d->bd_fcount); in bpfd_free()
2774 counter_u64_free(d->bd_wcount); in bpfd_free()
2775 counter_u64_free(d->bd_wfcount); in bpfd_free()
2776 counter_u64_free(d->bd_wdcount); in bpfd_free()
2777 counter_u64_free(d->bd_zcopy); in bpfd_free()
2789 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); in bpfattach()
2794 * defining the interface to be attached, dlt is the link layer type,
2809 CK_LIST_INIT(&bp->bif_dlist); in bpfattach2()
2810 CK_LIST_INIT(&bp->bif_wlist); in bpfattach2()
2811 bp->bif_ifp = ifp; in bpfattach2()
2812 bp->bif_dlt = dlt; in bpfattach2()
2813 bp->bif_hdrlen = hdrlen; in bpfattach2()
2814 bp->bif_bpf = driverp; in bpfattach2()
2815 refcount_init(&bp->bif_refcnt, 1); in bpfattach2()
2827 if_printf(ifp, "bpf attached\n"); in bpfattach2()
2833 * query the dlt and hdrlen before detach so we can re-attch the if_bpf
2848 *bif_dlt = bp->bif_dlt; in bpf_get_bp_params()
2850 *bif_hdrlen = bp->bif_hdrlen; in bpf_get_bp_params()
2866 if (bp->bif_ifp != ifp) in bpf_ifdetach()
2870 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) { in bpf_ifdetach()
2874 /* Detach writer-only descriptors */ in bpf_ifdetach()
2875 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) { in bpf_ifdetach()
2897 if (ifp != bp->bif_ifp) in bpfdetach()
2901 *bp->bif_bpf = __DECONST(struct bpf_if *, &dead_bpf_if); in bpfdetach()
2905 __func__, bp->bif_dlt, bp, ifp); in bpfdetach()
2908 while ((d = CK_LIST_FIRST(&bp->bif_dlist)) != NULL) { in bpfdetach()
2912 /* Detach writer-only descriptors */ in bpfdetach()
2913 while ((d = CK_LIST_FIRST(&bp->bif_wlist)) != NULL) { in bpfdetach()
2924 return (bpf_peers_present(ifp->if_bpf)); in bpf_peers_present_if()
2940 ifp = d->bd_bif->bif_ifp; in bpf_getdltlist()
2943 if (bp->bif_ifp == ifp) in bpf_getdltlist()
2946 if (bfl->bfl_list == NULL) { in bpf_getdltlist()
2947 bfl->bfl_len = n1; in bpf_getdltlist()
2950 if (n1 > bfl->bfl_len) in bpf_getdltlist()
2956 if (bp->bif_ifp != ifp) in bpf_getdltlist()
2958 lst[n++] = bp->bif_dlt; in bpf_getdltlist()
2960 error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n); in bpf_getdltlist()
2962 bfl->bfl_len = n; in bpf_getdltlist()
2977 MPASS(d->bd_bif != NULL); in bpf_setdlt()
2983 if (d->bd_bif->bif_dlt == dlt) in bpf_setdlt()
2986 ifp = d->bd_bif->bif_ifp; in bpf_setdlt()
2988 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) in bpf_setdlt()
2994 opromisc = d->bd_promisc; in bpf_setdlt()
2997 error = ifpromisc(bp->bif_ifp, 1); in bpf_setdlt()
2999 if_printf(bp->bif_ifp, "%s: ifpromisc failed (%d)\n", in bpf_setdlt()
3002 d->bd_promisc = 1; in bpf_setdlt()
3035 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { in bpf_zero_counters()
3036 counter_u64_zero(bd->bd_rcount); in bpf_zero_counters()
3037 counter_u64_zero(bd->bd_dcount); in bpf_zero_counters()
3038 counter_u64_zero(bd->bd_fcount); in bpf_zero_counters()
3039 counter_u64_zero(bd->bd_wcount); in bpf_zero_counters()
3040 counter_u64_zero(bd->bd_wfcount); in bpf_zero_counters()
3041 counter_u64_zero(bd->bd_zcopy); in bpf_zero_counters()
3056 d->bd_structsize = sizeof(*d); in bpfstats_fill_xbpf()
3057 d->bd_immediate = bd->bd_immediate; in bpfstats_fill_xbpf()
3058 d->bd_promisc = bd->bd_promisc; in bpfstats_fill_xbpf()
3059 d->bd_hdrcmplt = bd->bd_hdrcmplt; in bpfstats_fill_xbpf()
3060 d->bd_direction = bd->bd_direction; in bpfstats_fill_xbpf()
3061 d->bd_feedback = bd->bd_feedback; in bpfstats_fill_xbpf()
3062 d->bd_async = bd->bd_async; in bpfstats_fill_xbpf()
3063 d->bd_rcount = counter_u64_fetch(bd->bd_rcount); in bpfstats_fill_xbpf()
3064 d->bd_dcount = counter_u64_fetch(bd->bd_dcount); in bpfstats_fill_xbpf()
3065 d->bd_fcount = counter_u64_fetch(bd->bd_fcount); in bpfstats_fill_xbpf()
3066 d->bd_sig = bd->bd_sig; in bpfstats_fill_xbpf()
3067 d->bd_slen = bd->bd_slen; in bpfstats_fill_xbpf()
3068 d->bd_hlen = bd->bd_hlen; in bpfstats_fill_xbpf()
3069 d->bd_bufsize = bd->bd_bufsize; in bpfstats_fill_xbpf()
3070 d->bd_pid = bd->bd_pid; in bpfstats_fill_xbpf()
3071 strlcpy(d->bd_ifname, in bpfstats_fill_xbpf()
3072 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); in bpfstats_fill_xbpf()
3073 d->bd_locked = bd->bd_locked; in bpfstats_fill_xbpf()
3074 d->bd_wcount = counter_u64_fetch(bd->bd_wcount); in bpfstats_fill_xbpf()
3075 d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount); in bpfstats_fill_xbpf()
3076 d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount); in bpfstats_fill_xbpf()
3077 d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy); in bpfstats_fill_xbpf()
3078 d->bd_bufmode = bd->bd_bufmode; in bpfstats_fill_xbpf()
3082 * Handle `netstat -B' stats request
3099 error = priv_check(req->td, PRIV_NET_BPF); in bpf_stats_sysctl()
3107 if (req->newptr != NULL) { in bpf_stats_sysctl()
3108 if (req->newlen != sizeof(tempstats)) in bpf_stats_sysctl()
3119 if (req->oldptr == NULL) in bpf_stats_sysctl()
3123 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); in bpf_stats_sysctl()
3125 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { in bpf_stats_sysctl()
3132 /* Send writers-only first */ in bpf_stats_sysctl()
3133 CK_LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { in bpf_stats_sysctl()
3137 CK_LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { in bpf_stats_sysctl()
3153 * NOP stubs to allow bpf-using drivers to load and function.
3193 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); in bpfattach()
3217 return (-1); /* "no filter" behaviour */ in bpf_filter()
3236 #define BPF_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, bpf_if->e); in bpf_show_bpf_if()
3245 BPF_DB_PRINTF_RAW("%u", refcount_load(&bpf_if->bif_refcnt)); in bpf_show_bpf_if()