xref: /freebsd/sys/net/bpf.c (revision 52f72944b8f5abb2386eae924357dee8aea17d5b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1990, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_bpf.h"
43 #include "opt_compat.h"
44 #include "opt_ddb.h"
45 #include "opt_netgraph.h"
46 
47 #include <sys/types.h>
48 #include <sys/param.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/systm.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/jail.h>
55 #include <sys/malloc.h>
56 #include <sys/mbuf.h>
57 #include <sys/time.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/signalvar.h>
61 #include <sys/filio.h>
62 #include <sys/sockio.h>
63 #include <sys/ttycom.h>
64 #include <sys/uio.h>
65 #include <sys/sysent.h>
66 
67 #include <sys/event.h>
68 #include <sys/file.h>
69 #include <sys/poll.h>
70 #include <sys/proc.h>
71 
72 #include <sys/socket.h>
73 
74 #ifdef DDB
75 #include <ddb/ddb.h>
76 #endif
77 
78 #include <net/if.h>
79 #include <net/if_var.h>
80 #include <net/if_dl.h>
81 #include <net/bpf.h>
82 #include <net/bpf_buffer.h>
83 #ifdef BPF_JITTER
84 #include <net/bpf_jitter.h>
85 #endif
86 #include <net/bpf_zerocopy.h>
87 #include <net/bpfdesc.h>
88 #include <net/route.h>
89 #include <net/vnet.h>
90 
91 #include <netinet/in.h>
92 #include <netinet/if_ether.h>
93 #include <sys/kernel.h>
94 #include <sys/sysctl.h>
95 
96 #include <net80211/ieee80211_freebsd.h>
97 
98 #include <security/mac/mac_framework.h>
99 
100 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
101 
102 struct bpf_if {
103 #define	bif_next	bif_ext.bif_next
104 #define	bif_dlist	bif_ext.bif_dlist
105 	struct bpf_if_ext bif_ext;	/* public members */
106 	u_int		bif_dlt;	/* link layer type */
107 	u_int		bif_hdrlen;	/* length of link header */
108 	struct ifnet	*bif_ifp;	/* corresponding interface */
109 	struct rwlock	bif_lock;	/* interface lock */
110 	LIST_HEAD(, bpf_d) bif_wlist;	/* writer-only list */
111 	int		bif_flags;	/* Interface flags */
112 	struct bpf_if	**bif_bpf;	/* Pointer to pointer to us */
113 };
114 
115 CTASSERT(offsetof(struct bpf_if, bif_ext) == 0);
116 
117 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
118 
119 #define PRINET  26			/* interruptible */
120 
121 #define	SIZEOF_BPF_HDR(type)	\
122     (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
123 
124 #ifdef COMPAT_FREEBSD32
125 #include <sys/mount.h>
126 #include <compat/freebsd32/freebsd32.h>
127 #define BPF_ALIGNMENT32 sizeof(int32_t)
128 #define	BPF_WORDALIGN32(x) roundup2(x, BPF_ALIGNMENT32)
129 
130 #ifndef BURN_BRIDGES
131 /*
132  * 32-bit version of structure prepended to each packet.  We use this header
133  * instead of the standard one for 32-bit streams.  We mark the a stream as
134  * 32-bit the first time we see a 32-bit compat ioctl request.
135  */
136 struct bpf_hdr32 {
137 	struct timeval32 bh_tstamp;	/* time stamp */
138 	uint32_t	bh_caplen;	/* length of captured portion */
139 	uint32_t	bh_datalen;	/* original length of packet */
140 	uint16_t	bh_hdrlen;	/* length of bpf header (this struct
141 					   plus alignment padding) */
142 };
143 #endif
144 
145 struct bpf_program32 {
146 	u_int bf_len;
147 	uint32_t bf_insns;
148 };
149 
150 struct bpf_dltlist32 {
151 	u_int	bfl_len;
152 	u_int	bfl_list;
153 };
154 
155 #define	BIOCSETF32	_IOW('B', 103, struct bpf_program32)
156 #define	BIOCSRTIMEOUT32	_IOW('B', 109, struct timeval32)
157 #define	BIOCGRTIMEOUT32	_IOR('B', 110, struct timeval32)
158 #define	BIOCGDLTLIST32	_IOWR('B', 121, struct bpf_dltlist32)
159 #define	BIOCSETWF32	_IOW('B', 123, struct bpf_program32)
160 #define	BIOCSETFNR32	_IOW('B', 130, struct bpf_program32)
161 #endif
162 
163 /*
164  * bpf_iflist is a list of BPF interface structures, each corresponding to a
165  * specific DLT.  The same network interface might have several BPF interface
166  * structures registered by different layers in the stack (i.e., 802.11
167  * frames, ethernet frames, etc).
168  */
169 static LIST_HEAD(, bpf_if)	bpf_iflist, bpf_freelist;
170 static struct mtx	bpf_mtx;		/* bpf global lock */
171 static int		bpf_bpfd_cnt;
172 
173 static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
174 static void	bpf_detachd(struct bpf_d *);
175 static void	bpf_detachd_locked(struct bpf_d *);
176 static void	bpf_freed(struct bpf_d *);
177 static int	bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
178 		    struct sockaddr *, int *, struct bpf_d *);
179 static int	bpf_setif(struct bpf_d *, struct ifreq *);
180 static void	bpf_timed_out(void *);
181 static __inline void
182 		bpf_wakeup(struct bpf_d *);
183 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
184 		    void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
185 		    struct bintime *);
186 static void	reset_d(struct bpf_d *);
187 static int	bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
188 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
189 static int	bpf_setdlt(struct bpf_d *, u_int);
190 static void	filt_bpfdetach(struct knote *);
191 static int	filt_bpfread(struct knote *, long);
192 static void	bpf_drvinit(void *);
193 static int	bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
194 
195 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
196 int bpf_maxinsns = BPF_MAXINSNS;
197 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
198     &bpf_maxinsns, 0, "Maximum bpf program instructions");
199 static int bpf_zerocopy_enable = 0;
200 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
201     &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
202 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
203     bpf_stats_sysctl, "bpf statistics portal");
204 
205 static VNET_DEFINE(int, bpf_optimize_writers) = 0;
206 #define	V_bpf_optimize_writers VNET(bpf_optimize_writers)
207 SYSCTL_INT(_net_bpf, OID_AUTO, optimize_writers, CTLFLAG_VNET | CTLFLAG_RW,
208     &VNET_NAME(bpf_optimize_writers), 0,
209     "Do not send packets until BPF program is set");
210 
211 static	d_open_t	bpfopen;
212 static	d_read_t	bpfread;
213 static	d_write_t	bpfwrite;
214 static	d_ioctl_t	bpfioctl;
215 static	d_poll_t	bpfpoll;
216 static	d_kqfilter_t	bpfkqfilter;
217 
218 static struct cdevsw bpf_cdevsw = {
219 	.d_version =	D_VERSION,
220 	.d_open =	bpfopen,
221 	.d_read =	bpfread,
222 	.d_write =	bpfwrite,
223 	.d_ioctl =	bpfioctl,
224 	.d_poll =	bpfpoll,
225 	.d_name =	"bpf",
226 	.d_kqfilter =	bpfkqfilter,
227 };
228 
229 static struct filterops bpfread_filtops = {
230 	.f_isfd = 1,
231 	.f_detach = filt_bpfdetach,
232 	.f_event = filt_bpfread,
233 };
234 
235 eventhandler_tag	bpf_ifdetach_cookie = NULL;
236 
237 /*
238  * LOCKING MODEL USED BY BPF:
239  * Locks:
240  * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
241  * some global counters and every bpf_if reference.
242  * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
243  * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
244  *   used by bpf_mtap code.
245  *
246  * Lock order:
247  *
248  * Global lock, interface lock, descriptor lock
249  *
250  * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
251  * working model. In many places (like bpf_detachd) we start with BPF descriptor
252  * (and we need to at least rlock it to get reliable interface pointer). This
253  * gives us potential LOR. As a result, we use global lock to protect from bpf_if
254  * change in every such place.
255  *
256  * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
257  * 3) descriptor main wlock.
258  * Reading bd_bif can be protected by any of these locks, typically global lock.
259  *
260  * Changing read/write BPF filter is protected by the same three locks,
261  * the same applies for reading.
262  *
263  * Sleeping in global lock is not allowed due to bpfdetach() using it.
264  */
265 
266 /*
267  * Wrapper functions for various buffering methods.  If the set of buffer
268  * modes expands, we will probably want to introduce a switch data structure
269  * similar to protosw, et.
270  */
271 static void
272 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
273     u_int len)
274 {
275 
276 	BPFD_LOCK_ASSERT(d);
277 
278 	switch (d->bd_bufmode) {
279 	case BPF_BUFMODE_BUFFER:
280 		return (bpf_buffer_append_bytes(d, buf, offset, src, len));
281 
282 	case BPF_BUFMODE_ZBUF:
283 		counter_u64_add(d->bd_zcopy, 1);
284 		return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
285 
286 	default:
287 		panic("bpf_buf_append_bytes");
288 	}
289 }
290 
291 static void
292 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
293     u_int len)
294 {
295 
296 	BPFD_LOCK_ASSERT(d);
297 
298 	switch (d->bd_bufmode) {
299 	case BPF_BUFMODE_BUFFER:
300 		return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
301 
302 	case BPF_BUFMODE_ZBUF:
303 		counter_u64_add(d->bd_zcopy, 1);
304 		return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
305 
306 	default:
307 		panic("bpf_buf_append_mbuf");
308 	}
309 }
310 
311 /*
312  * This function gets called when the free buffer is re-assigned.
313  */
314 static void
315 bpf_buf_reclaimed(struct bpf_d *d)
316 {
317 
318 	BPFD_LOCK_ASSERT(d);
319 
320 	switch (d->bd_bufmode) {
321 	case BPF_BUFMODE_BUFFER:
322 		return;
323 
324 	case BPF_BUFMODE_ZBUF:
325 		bpf_zerocopy_buf_reclaimed(d);
326 		return;
327 
328 	default:
329 		panic("bpf_buf_reclaimed");
330 	}
331 }
332 
333 /*
334  * If the buffer mechanism has a way to decide that a held buffer can be made
335  * free, then it is exposed via the bpf_canfreebuf() interface.  (1) is
336  * returned if the buffer can be discarded, (0) is returned if it cannot.
337  */
338 static int
339 bpf_canfreebuf(struct bpf_d *d)
340 {
341 
342 	BPFD_LOCK_ASSERT(d);
343 
344 	switch (d->bd_bufmode) {
345 	case BPF_BUFMODE_ZBUF:
346 		return (bpf_zerocopy_canfreebuf(d));
347 	}
348 	return (0);
349 }
350 
351 /*
352  * Allow the buffer model to indicate that the current store buffer is
353  * immutable, regardless of the appearance of space.  Return (1) if the
354  * buffer is writable, and (0) if not.
355  */
356 static int
357 bpf_canwritebuf(struct bpf_d *d)
358 {
359 	BPFD_LOCK_ASSERT(d);
360 
361 	switch (d->bd_bufmode) {
362 	case BPF_BUFMODE_ZBUF:
363 		return (bpf_zerocopy_canwritebuf(d));
364 	}
365 	return (1);
366 }
367 
368 /*
369  * Notify buffer model that an attempt to write to the store buffer has
370  * resulted in a dropped packet, in which case the buffer may be considered
371  * full.
372  */
373 static void
374 bpf_buffull(struct bpf_d *d)
375 {
376 
377 	BPFD_LOCK_ASSERT(d);
378 
379 	switch (d->bd_bufmode) {
380 	case BPF_BUFMODE_ZBUF:
381 		bpf_zerocopy_buffull(d);
382 		break;
383 	}
384 }
385 
386 /*
387  * Notify the buffer model that a buffer has moved into the hold position.
388  */
389 void
390 bpf_bufheld(struct bpf_d *d)
391 {
392 
393 	BPFD_LOCK_ASSERT(d);
394 
395 	switch (d->bd_bufmode) {
396 	case BPF_BUFMODE_ZBUF:
397 		bpf_zerocopy_bufheld(d);
398 		break;
399 	}
400 }
401 
402 static void
403 bpf_free(struct bpf_d *d)
404 {
405 
406 	switch (d->bd_bufmode) {
407 	case BPF_BUFMODE_BUFFER:
408 		return (bpf_buffer_free(d));
409 
410 	case BPF_BUFMODE_ZBUF:
411 		return (bpf_zerocopy_free(d));
412 
413 	default:
414 		panic("bpf_buf_free");
415 	}
416 }
417 
418 static int
419 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
420 {
421 
422 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
423 		return (EOPNOTSUPP);
424 	return (bpf_buffer_uiomove(d, buf, len, uio));
425 }
426 
427 static int
428 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
429 {
430 
431 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
432 		return (EOPNOTSUPP);
433 	return (bpf_buffer_ioctl_sblen(d, i));
434 }
435 
436 static int
437 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
438 {
439 
440 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
441 		return (EOPNOTSUPP);
442 	return (bpf_zerocopy_ioctl_getzmax(td, d, i));
443 }
444 
445 static int
446 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
447 {
448 
449 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
450 		return (EOPNOTSUPP);
451 	return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
452 }
453 
454 static int
455 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
456 {
457 
458 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
459 		return (EOPNOTSUPP);
460 	return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
461 }
462 
463 /*
464  * General BPF functions.
465  */
466 static int
467 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
468     struct sockaddr *sockp, int *hdrlen, struct bpf_d *d)
469 {
470 	const struct ieee80211_bpf_params *p;
471 	struct ether_header *eh;
472 	struct mbuf *m;
473 	int error;
474 	int len;
475 	int hlen;
476 	int slen;
477 
478 	/*
479 	 * Build a sockaddr based on the data link layer type.
480 	 * We do this at this level because the ethernet header
481 	 * is copied directly into the data field of the sockaddr.
482 	 * In the case of SLIP, there is no header and the packet
483 	 * is forwarded as is.
484 	 * Also, we are careful to leave room at the front of the mbuf
485 	 * for the link level header.
486 	 */
487 	switch (linktype) {
488 
489 	case DLT_SLIP:
490 		sockp->sa_family = AF_INET;
491 		hlen = 0;
492 		break;
493 
494 	case DLT_EN10MB:
495 		sockp->sa_family = AF_UNSPEC;
496 		/* XXX Would MAXLINKHDR be better? */
497 		hlen = ETHER_HDR_LEN;
498 		break;
499 
500 	case DLT_FDDI:
501 		sockp->sa_family = AF_IMPLINK;
502 		hlen = 0;
503 		break;
504 
505 	case DLT_RAW:
506 		sockp->sa_family = AF_UNSPEC;
507 		hlen = 0;
508 		break;
509 
510 	case DLT_NULL:
511 		/*
512 		 * null interface types require a 4 byte pseudo header which
513 		 * corresponds to the address family of the packet.
514 		 */
515 		sockp->sa_family = AF_UNSPEC;
516 		hlen = 4;
517 		break;
518 
519 	case DLT_ATM_RFC1483:
520 		/*
521 		 * en atm driver requires 4-byte atm pseudo header.
522 		 * though it isn't standard, vpi:vci needs to be
523 		 * specified anyway.
524 		 */
525 		sockp->sa_family = AF_UNSPEC;
526 		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
527 		break;
528 
529 	case DLT_PPP:
530 		sockp->sa_family = AF_UNSPEC;
531 		hlen = 4;	/* This should match PPP_HDRLEN */
532 		break;
533 
534 	case DLT_IEEE802_11:		/* IEEE 802.11 wireless */
535 		sockp->sa_family = AF_IEEE80211;
536 		hlen = 0;
537 		break;
538 
539 	case DLT_IEEE802_11_RADIO:	/* IEEE 802.11 wireless w/ phy params */
540 		sockp->sa_family = AF_IEEE80211;
541 		sockp->sa_len = 12;	/* XXX != 0 */
542 		hlen = sizeof(struct ieee80211_bpf_params);
543 		break;
544 
545 	default:
546 		return (EIO);
547 	}
548 
549 	len = uio->uio_resid;
550 	if (len < hlen || len - hlen > ifp->if_mtu)
551 		return (EMSGSIZE);
552 
553 	m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
554 	if (m == NULL)
555 		return (EIO);
556 	m->m_pkthdr.len = m->m_len = len;
557 	*mp = m;
558 
559 	error = uiomove(mtod(m, u_char *), len, uio);
560 	if (error)
561 		goto bad;
562 
563 	slen = bpf_filter(d->bd_wfilter, mtod(m, u_char *), len, len);
564 	if (slen == 0) {
565 		error = EPERM;
566 		goto bad;
567 	}
568 
569 	/* Check for multicast destination */
570 	switch (linktype) {
571 	case DLT_EN10MB:
572 		eh = mtod(m, struct ether_header *);
573 		if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
574 			if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
575 			    ETHER_ADDR_LEN) == 0)
576 				m->m_flags |= M_BCAST;
577 			else
578 				m->m_flags |= M_MCAST;
579 		}
580 		if (d->bd_hdrcmplt == 0) {
581 			memcpy(eh->ether_shost, IF_LLADDR(ifp),
582 			    sizeof(eh->ether_shost));
583 		}
584 		break;
585 	}
586 
587 	/*
588 	 * Make room for link header, and copy it to sockaddr
589 	 */
590 	if (hlen != 0) {
591 		if (sockp->sa_family == AF_IEEE80211) {
592 			/*
593 			 * Collect true length from the parameter header
594 			 * NB: sockp is known to be zero'd so if we do a
595 			 *     short copy unspecified parameters will be
596 			 *     zero.
597 			 * NB: packet may not be aligned after stripping
598 			 *     bpf params
599 			 * XXX check ibp_vers
600 			 */
601 			p = mtod(m, const struct ieee80211_bpf_params *);
602 			hlen = p->ibp_len;
603 			if (hlen > sizeof(sockp->sa_data)) {
604 				error = EINVAL;
605 				goto bad;
606 			}
607 		}
608 		bcopy(mtod(m, const void *), sockp->sa_data, hlen);
609 	}
610 	*hdrlen = hlen;
611 
612 	return (0);
613 bad:
614 	m_freem(m);
615 	return (error);
616 }
617 
618 /*
619  * Attach file to the bpf interface, i.e. make d listen on bp.
620  */
621 static void
622 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
623 {
624 	int op_w;
625 
626 	BPF_LOCK_ASSERT();
627 
628 	/*
629 	 * Save sysctl value to protect from sysctl change
630 	 * between reads
631 	 */
632 	op_w = V_bpf_optimize_writers || d->bd_writer;
633 
634 	if (d->bd_bif != NULL)
635 		bpf_detachd_locked(d);
636 	/*
637 	 * Point d at bp, and add d to the interface's list.
638 	 * Since there are many applications using BPF for
639 	 * sending raw packets only (dhcpd, cdpd are good examples)
640 	 * we can delay adding d to the list of active listeners until
641 	 * some filter is configured.
642 	 */
643 
644 	BPFIF_WLOCK(bp);
645 	BPFD_LOCK(d);
646 
647 	d->bd_bif = bp;
648 
649 	if (op_w != 0) {
650 		/* Add to writers-only list */
651 		LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
652 		/*
653 		 * We decrement bd_writer on every filter set operation.
654 		 * First BIOCSETF is done by pcap_open_live() to set up
655 		 * snap length. After that appliation usually sets its own filter
656 		 */
657 		d->bd_writer = 2;
658 	} else
659 		LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
660 
661 	BPFD_UNLOCK(d);
662 	BPFIF_WUNLOCK(bp);
663 
664 	bpf_bpfd_cnt++;
665 
666 	CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
667 	    __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
668 
669 	if (op_w == 0)
670 		EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
671 }
672 
673 /*
674  * Check if we need to upgrade our descriptor @d from write-only mode.
675  */
676 static int
677 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode, int flen)
678 {
679 	int is_snap, need_upgrade;
680 
681 	/*
682 	 * Check if we've already upgraded or new filter is empty.
683 	 */
684 	if (d->bd_writer == 0 || fcode == NULL)
685 		return (0);
686 
687 	need_upgrade = 0;
688 
689 	/*
690 	 * Check if cmd looks like snaplen setting from
691 	 * pcap_bpf.c:pcap_open_live().
692 	 * Note we're not checking .k value here:
693 	 * while pcap_open_live() definitely sets to non-zero value,
694 	 * we'd prefer to treat k=0 (deny ALL) case the same way: e.g.
695 	 * do not consider upgrading immediately
696 	 */
697 	if (cmd == BIOCSETF && flen == 1 && fcode[0].code == (BPF_RET | BPF_K))
698 		is_snap = 1;
699 	else
700 		is_snap = 0;
701 
702 	if (is_snap == 0) {
703 		/*
704 		 * We're setting first filter and it doesn't look like
705 		 * setting snaplen.  We're probably using bpf directly.
706 		 * Upgrade immediately.
707 		 */
708 		need_upgrade = 1;
709 	} else {
710 		/*
711 		 * Do not require upgrade by first BIOCSETF
712 		 * (used to set snaplen) by pcap_open_live().
713 		 */
714 
715 		if (--d->bd_writer == 0) {
716 			/*
717 			 * First snaplen filter has already
718 			 * been set. This is probably catch-all
719 			 * filter
720 			 */
721 			need_upgrade = 1;
722 		}
723 	}
724 
725 	CTR5(KTR_NET,
726 	    "%s: filter function set by pid %d, "
727 	    "bd_writer counter %d, snap %d upgrade %d",
728 	    __func__, d->bd_pid, d->bd_writer,
729 	    is_snap, need_upgrade);
730 
731 	return (need_upgrade);
732 }
733 
734 /*
735  * Add d to the list of active bp filters.
736  * Requires bpf_attachd() to be called before.
737  */
738 static void
739 bpf_upgraded(struct bpf_d *d)
740 {
741 	struct bpf_if *bp;
742 
743 	BPF_LOCK_ASSERT();
744 
745 	bp = d->bd_bif;
746 
747 	/*
748 	 * Filter can be set several times without specifying interface.
749 	 * Mark d as reader and exit.
750 	 */
751 	if (bp == NULL) {
752 		BPFD_LOCK(d);
753 		d->bd_writer = 0;
754 		BPFD_UNLOCK(d);
755 		return;
756 	}
757 
758 	BPFIF_WLOCK(bp);
759 	BPFD_LOCK(d);
760 
761 	/* Remove from writers-only list */
762 	LIST_REMOVE(d, bd_next);
763 	LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
764 	/* Mark d as reader */
765 	d->bd_writer = 0;
766 
767 	BPFD_UNLOCK(d);
768 	BPFIF_WUNLOCK(bp);
769 
770 	CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
771 
772 	EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
773 }
774 
775 /*
776  * Detach a file from its interface.
777  */
778 static void
779 bpf_detachd(struct bpf_d *d)
780 {
781 	BPF_LOCK();
782 	bpf_detachd_locked(d);
783 	BPF_UNLOCK();
784 }
785 
786 static void
787 bpf_detachd_locked(struct bpf_d *d)
788 {
789 	int error;
790 	struct bpf_if *bp;
791 	struct ifnet *ifp;
792 
793 	CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
794 
795 	BPF_LOCK_ASSERT();
796 
797 	/* Check if descriptor is attached */
798 	if ((bp = d->bd_bif) == NULL)
799 		return;
800 
801 	BPFIF_WLOCK(bp);
802 	BPFD_LOCK(d);
803 
804 	/* Save bd_writer value */
805 	error = d->bd_writer;
806 
807 	/*
808 	 * Remove d from the interface's descriptor list.
809 	 */
810 	LIST_REMOVE(d, bd_next);
811 
812 	ifp = bp->bif_ifp;
813 	d->bd_bif = NULL;
814 	BPFD_UNLOCK(d);
815 	BPFIF_WUNLOCK(bp);
816 
817 	bpf_bpfd_cnt--;
818 
819 	/* Call event handler iff d is attached */
820 	if (error == 0)
821 		EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
822 
823 	/*
824 	 * Check if this descriptor had requested promiscuous mode.
825 	 * If so, turn it off.
826 	 */
827 	if (d->bd_promisc) {
828 		d->bd_promisc = 0;
829 		CURVNET_SET(ifp->if_vnet);
830 		error = ifpromisc(ifp, 0);
831 		CURVNET_RESTORE();
832 		if (error != 0 && error != ENXIO) {
833 			/*
834 			 * ENXIO can happen if a pccard is unplugged
835 			 * Something is really wrong if we were able to put
836 			 * the driver into promiscuous mode, but can't
837 			 * take it out.
838 			 */
839 			if_printf(bp->bif_ifp,
840 				"bpf_detach: ifpromisc failed (%d)\n", error);
841 		}
842 	}
843 }
844 
845 /*
846  * Close the descriptor by detaching it from its interface,
847  * deallocating its buffers, and marking it free.
848  */
849 static void
850 bpf_dtor(void *data)
851 {
852 	struct bpf_d *d = data;
853 
854 	BPFD_LOCK(d);
855 	if (d->bd_state == BPF_WAITING)
856 		callout_stop(&d->bd_callout);
857 	d->bd_state = BPF_IDLE;
858 	BPFD_UNLOCK(d);
859 	funsetown(&d->bd_sigio);
860 	bpf_detachd(d);
861 #ifdef MAC
862 	mac_bpfdesc_destroy(d);
863 #endif /* MAC */
864 	seldrain(&d->bd_sel);
865 	knlist_destroy(&d->bd_sel.si_note);
866 	callout_drain(&d->bd_callout);
867 	bpf_freed(d);
868 	free(d, M_BPF);
869 }
870 
871 /*
872  * Open ethernet device.  Returns ENXIO for illegal minor device number,
873  * EBUSY if file is open by another process.
874  */
875 /* ARGSUSED */
876 static	int
877 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
878 {
879 	struct bpf_d *d;
880 	int error;
881 
882 	d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
883 	error = devfs_set_cdevpriv(d, bpf_dtor);
884 	if (error != 0) {
885 		free(d, M_BPF);
886 		return (error);
887 	}
888 
889 	/* Setup counters */
890 	d->bd_rcount = counter_u64_alloc(M_WAITOK);
891 	d->bd_dcount = counter_u64_alloc(M_WAITOK);
892 	d->bd_fcount = counter_u64_alloc(M_WAITOK);
893 	d->bd_wcount = counter_u64_alloc(M_WAITOK);
894 	d->bd_wfcount = counter_u64_alloc(M_WAITOK);
895 	d->bd_wdcount = counter_u64_alloc(M_WAITOK);
896 	d->bd_zcopy = counter_u64_alloc(M_WAITOK);
897 
898 	/*
899 	 * For historical reasons, perform a one-time initialization call to
900 	 * the buffer routines, even though we're not yet committed to a
901 	 * particular buffer method.
902 	 */
903 	bpf_buffer_init(d);
904 	if ((flags & FREAD) == 0)
905 		d->bd_writer = 2;
906 	d->bd_hbuf_in_use = 0;
907 	d->bd_bufmode = BPF_BUFMODE_BUFFER;
908 	d->bd_sig = SIGIO;
909 	d->bd_direction = BPF_D_INOUT;
910 	BPF_PID_REFRESH(d, td);
911 #ifdef MAC
912 	mac_bpfdesc_init(d);
913 	mac_bpfdesc_create(td->td_ucred, d);
914 #endif
915 	mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
916 	callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
917 	knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
918 
919 	return (0);
920 }
921 
922 /*
923  *  bpfread - read next chunk of packets from buffers
924  */
925 static	int
926 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
927 {
928 	struct bpf_d *d;
929 	int error;
930 	int non_block;
931 	int timed_out;
932 
933 	error = devfs_get_cdevpriv((void **)&d);
934 	if (error != 0)
935 		return (error);
936 
937 	/*
938 	 * Restrict application to use a buffer the same size as
939 	 * as kernel buffers.
940 	 */
941 	if (uio->uio_resid != d->bd_bufsize)
942 		return (EINVAL);
943 
944 	non_block = ((ioflag & O_NONBLOCK) != 0);
945 
946 	BPFD_LOCK(d);
947 	BPF_PID_REFRESH_CUR(d);
948 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
949 		BPFD_UNLOCK(d);
950 		return (EOPNOTSUPP);
951 	}
952 	if (d->bd_state == BPF_WAITING)
953 		callout_stop(&d->bd_callout);
954 	timed_out = (d->bd_state == BPF_TIMED_OUT);
955 	d->bd_state = BPF_IDLE;
956 	while (d->bd_hbuf_in_use) {
957 		error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
958 		    PRINET|PCATCH, "bd_hbuf", 0);
959 		if (error != 0) {
960 			BPFD_UNLOCK(d);
961 			return (error);
962 		}
963 	}
964 	/*
965 	 * If the hold buffer is empty, then do a timed sleep, which
966 	 * ends when the timeout expires or when enough packets
967 	 * have arrived to fill the store buffer.
968 	 */
969 	while (d->bd_hbuf == NULL) {
970 		if (d->bd_slen != 0) {
971 			/*
972 			 * A packet(s) either arrived since the previous
973 			 * read or arrived while we were asleep.
974 			 */
975 			if (d->bd_immediate || non_block || timed_out) {
976 				/*
977 				 * Rotate the buffers and return what's here
978 				 * if we are in immediate mode, non-blocking
979 				 * flag is set, or this descriptor timed out.
980 				 */
981 				ROTATE_BUFFERS(d);
982 				break;
983 			}
984 		}
985 
986 		/*
987 		 * No data is available, check to see if the bpf device
988 		 * is still pointed at a real interface.  If not, return
989 		 * ENXIO so that the userland process knows to rebind
990 		 * it before using it again.
991 		 */
992 		if (d->bd_bif == NULL) {
993 			BPFD_UNLOCK(d);
994 			return (ENXIO);
995 		}
996 
997 		if (non_block) {
998 			BPFD_UNLOCK(d);
999 			return (EWOULDBLOCK);
1000 		}
1001 		error = msleep(d, &d->bd_lock, PRINET|PCATCH,
1002 		     "bpf", d->bd_rtout);
1003 		if (error == EINTR || error == ERESTART) {
1004 			BPFD_UNLOCK(d);
1005 			return (error);
1006 		}
1007 		if (error == EWOULDBLOCK) {
1008 			/*
1009 			 * On a timeout, return what's in the buffer,
1010 			 * which may be nothing.  If there is something
1011 			 * in the store buffer, we can rotate the buffers.
1012 			 */
1013 			if (d->bd_hbuf)
1014 				/*
1015 				 * We filled up the buffer in between
1016 				 * getting the timeout and arriving
1017 				 * here, so we don't need to rotate.
1018 				 */
1019 				break;
1020 
1021 			if (d->bd_slen == 0) {
1022 				BPFD_UNLOCK(d);
1023 				return (0);
1024 			}
1025 			ROTATE_BUFFERS(d);
1026 			break;
1027 		}
1028 	}
1029 	/*
1030 	 * At this point, we know we have something in the hold slot.
1031 	 */
1032 	d->bd_hbuf_in_use = 1;
1033 	BPFD_UNLOCK(d);
1034 
1035 	/*
1036 	 * Move data from hold buffer into user space.
1037 	 * We know the entire buffer is transferred since
1038 	 * we checked above that the read buffer is bpf_bufsize bytes.
1039   	 *
1040 	 * We do not have to worry about simultaneous reads because
1041 	 * we waited for sole access to the hold buffer above.
1042 	 */
1043 	error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
1044 
1045 	BPFD_LOCK(d);
1046 	KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
1047 	d->bd_fbuf = d->bd_hbuf;
1048 	d->bd_hbuf = NULL;
1049 	d->bd_hlen = 0;
1050 	bpf_buf_reclaimed(d);
1051 	d->bd_hbuf_in_use = 0;
1052 	wakeup(&d->bd_hbuf_in_use);
1053 	BPFD_UNLOCK(d);
1054 
1055 	return (error);
1056 }
1057 
1058 /*
1059  * If there are processes sleeping on this descriptor, wake them up.
1060  */
1061 static __inline void
1062 bpf_wakeup(struct bpf_d *d)
1063 {
1064 
1065 	BPFD_LOCK_ASSERT(d);
1066 	if (d->bd_state == BPF_WAITING) {
1067 		callout_stop(&d->bd_callout);
1068 		d->bd_state = BPF_IDLE;
1069 	}
1070 	wakeup(d);
1071 	if (d->bd_async && d->bd_sig && d->bd_sigio)
1072 		pgsigio(&d->bd_sigio, d->bd_sig, 0);
1073 
1074 	selwakeuppri(&d->bd_sel, PRINET);
1075 	KNOTE_LOCKED(&d->bd_sel.si_note, 0);
1076 }
1077 
1078 static void
1079 bpf_timed_out(void *arg)
1080 {
1081 	struct bpf_d *d = (struct bpf_d *)arg;
1082 
1083 	BPFD_LOCK_ASSERT(d);
1084 
1085 	if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
1086 		return;
1087 	if (d->bd_state == BPF_WAITING) {
1088 		d->bd_state = BPF_TIMED_OUT;
1089 		if (d->bd_slen != 0)
1090 			bpf_wakeup(d);
1091 	}
1092 }
1093 
1094 static int
1095 bpf_ready(struct bpf_d *d)
1096 {
1097 
1098 	BPFD_LOCK_ASSERT(d);
1099 
1100 	if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1101 		return (1);
1102 	if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1103 	    d->bd_slen != 0)
1104 		return (1);
1105 	return (0);
1106 }
1107 
1108 static int
1109 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1110 {
1111 	struct bpf_d *d;
1112 	struct ifnet *ifp;
1113 	struct mbuf *m, *mc;
1114 	struct sockaddr dst;
1115 	struct route ro;
1116 	int error, hlen;
1117 
1118 	error = devfs_get_cdevpriv((void **)&d);
1119 	if (error != 0)
1120 		return (error);
1121 
1122 	BPF_PID_REFRESH_CUR(d);
1123 	counter_u64_add(d->bd_wcount, 1);
1124 	/* XXX: locking required */
1125 	if (d->bd_bif == NULL) {
1126 		counter_u64_add(d->bd_wdcount, 1);
1127 		return (ENXIO);
1128 	}
1129 
1130 	ifp = d->bd_bif->bif_ifp;
1131 
1132 	if ((ifp->if_flags & IFF_UP) == 0) {
1133 		counter_u64_add(d->bd_wdcount, 1);
1134 		return (ENETDOWN);
1135 	}
1136 
1137 	if (uio->uio_resid == 0) {
1138 		counter_u64_add(d->bd_wdcount, 1);
1139 		return (0);
1140 	}
1141 
1142 	bzero(&dst, sizeof(dst));
1143 	m = NULL;
1144 	hlen = 0;
1145 	/* XXX: bpf_movein() can sleep */
1146 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1147 	    &m, &dst, &hlen, d);
1148 	if (error) {
1149 		counter_u64_add(d->bd_wdcount, 1);
1150 		return (error);
1151 	}
1152 	counter_u64_add(d->bd_wfcount, 1);
1153 	if (d->bd_hdrcmplt)
1154 		dst.sa_family = pseudo_AF_HDRCMPLT;
1155 
1156 	if (d->bd_feedback) {
1157 		mc = m_dup(m, M_NOWAIT);
1158 		if (mc != NULL)
1159 			mc->m_pkthdr.rcvif = ifp;
1160 		/* Set M_PROMISC for outgoing packets to be discarded. */
1161 		if (d->bd_direction == BPF_D_INOUT)
1162 			m->m_flags |= M_PROMISC;
1163 	} else
1164 		mc = NULL;
1165 
1166 	m->m_pkthdr.len -= hlen;
1167 	m->m_len -= hlen;
1168 	m->m_data += hlen;	/* XXX */
1169 
1170 	CURVNET_SET(ifp->if_vnet);
1171 #ifdef MAC
1172 	BPFD_LOCK(d);
1173 	mac_bpfdesc_create_mbuf(d, m);
1174 	if (mc != NULL)
1175 		mac_bpfdesc_create_mbuf(d, mc);
1176 	BPFD_UNLOCK(d);
1177 #endif
1178 
1179 	bzero(&ro, sizeof(ro));
1180 	if (hlen != 0) {
1181 		ro.ro_prepend = (u_char *)&dst.sa_data;
1182 		ro.ro_plen = hlen;
1183 		ro.ro_flags = RT_HAS_HEADER;
1184 	}
1185 
1186 	error = (*ifp->if_output)(ifp, m, &dst, &ro);
1187 	if (error)
1188 		counter_u64_add(d->bd_wdcount, 1);
1189 
1190 	if (mc != NULL) {
1191 		if (error == 0)
1192 			(*ifp->if_input)(ifp, mc);
1193 		else
1194 			m_freem(mc);
1195 	}
1196 	CURVNET_RESTORE();
1197 
1198 	return (error);
1199 }
1200 
1201 /*
1202  * Reset a descriptor by flushing its packet buffer and clearing the receive
1203  * and drop counts.  This is doable for kernel-only buffers, but with
1204  * zero-copy buffers, we can't write to (or rotate) buffers that are
1205  * currently owned by userspace.  It would be nice if we could encapsulate
1206  * this logic in the buffer code rather than here.
1207  */
1208 static void
1209 reset_d(struct bpf_d *d)
1210 {
1211 
1212 	BPFD_LOCK_ASSERT(d);
1213 
1214 	while (d->bd_hbuf_in_use)
1215 		mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1216 		    "bd_hbuf", 0);
1217 	if ((d->bd_hbuf != NULL) &&
1218 	    (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1219 		/* Free the hold buffer. */
1220 		d->bd_fbuf = d->bd_hbuf;
1221 		d->bd_hbuf = NULL;
1222 		d->bd_hlen = 0;
1223 		bpf_buf_reclaimed(d);
1224 	}
1225 	if (bpf_canwritebuf(d))
1226 		d->bd_slen = 0;
1227 	counter_u64_zero(d->bd_rcount);
1228 	counter_u64_zero(d->bd_dcount);
1229 	counter_u64_zero(d->bd_fcount);
1230 	counter_u64_zero(d->bd_wcount);
1231 	counter_u64_zero(d->bd_wfcount);
1232 	counter_u64_zero(d->bd_wdcount);
1233 	counter_u64_zero(d->bd_zcopy);
1234 }
1235 
1236 /*
1237  *  FIONREAD		Check for read packet available.
1238  *  BIOCGBLEN		Get buffer len [for read()].
1239  *  BIOCSETF		Set read filter.
1240  *  BIOCSETFNR		Set read filter without resetting descriptor.
1241  *  BIOCSETWF		Set write filter.
1242  *  BIOCFLUSH		Flush read packet buffer.
1243  *  BIOCPROMISC		Put interface into promiscuous mode.
1244  *  BIOCGDLT		Get link layer type.
1245  *  BIOCGETIF		Get interface name.
1246  *  BIOCSETIF		Set interface.
1247  *  BIOCSRTIMEOUT	Set read timeout.
1248  *  BIOCGRTIMEOUT	Get read timeout.
1249  *  BIOCGSTATS		Get packet stats.
1250  *  BIOCIMMEDIATE	Set immediate mode.
1251  *  BIOCVERSION		Get filter language version.
1252  *  BIOCGHDRCMPLT	Get "header already complete" flag
1253  *  BIOCSHDRCMPLT	Set "header already complete" flag
1254  *  BIOCGDIRECTION	Get packet direction flag
1255  *  BIOCSDIRECTION	Set packet direction flag
1256  *  BIOCGTSTAMP		Get time stamp format and resolution.
1257  *  BIOCSTSTAMP		Set time stamp format and resolution.
1258  *  BIOCLOCK		Set "locked" flag
1259  *  BIOCFEEDBACK	Set packet feedback mode.
1260  *  BIOCSETZBUF		Set current zero-copy buffer locations.
1261  *  BIOCGETZMAX		Get maximum zero-copy buffer size.
1262  *  BIOCROTZBUF		Force rotation of zero-copy buffer
1263  *  BIOCSETBUFMODE	Set buffer mode.
1264  *  BIOCGETBUFMODE	Get current buffer mode.
1265  */
1266 /* ARGSUSED */
1267 static	int
1268 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1269     struct thread *td)
1270 {
1271 	struct bpf_d *d;
1272 	int error;
1273 
1274 	error = devfs_get_cdevpriv((void **)&d);
1275 	if (error != 0)
1276 		return (error);
1277 
1278 	/*
1279 	 * Refresh PID associated with this descriptor.
1280 	 */
1281 	BPFD_LOCK(d);
1282 	BPF_PID_REFRESH(d, td);
1283 	if (d->bd_state == BPF_WAITING)
1284 		callout_stop(&d->bd_callout);
1285 	d->bd_state = BPF_IDLE;
1286 	BPFD_UNLOCK(d);
1287 
1288 	if (d->bd_locked == 1) {
1289 		switch (cmd) {
1290 		case BIOCGBLEN:
1291 		case BIOCFLUSH:
1292 		case BIOCGDLT:
1293 		case BIOCGDLTLIST:
1294 #ifdef COMPAT_FREEBSD32
1295 		case BIOCGDLTLIST32:
1296 #endif
1297 		case BIOCGETIF:
1298 		case BIOCGRTIMEOUT:
1299 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1300 		case BIOCGRTIMEOUT32:
1301 #endif
1302 		case BIOCGSTATS:
1303 		case BIOCVERSION:
1304 		case BIOCGRSIG:
1305 		case BIOCGHDRCMPLT:
1306 		case BIOCSTSTAMP:
1307 		case BIOCFEEDBACK:
1308 		case FIONREAD:
1309 		case BIOCLOCK:
1310 		case BIOCSRTIMEOUT:
1311 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1312 		case BIOCSRTIMEOUT32:
1313 #endif
1314 		case BIOCIMMEDIATE:
1315 		case TIOCGPGRP:
1316 		case BIOCROTZBUF:
1317 			break;
1318 		default:
1319 			return (EPERM);
1320 		}
1321 	}
1322 #ifdef COMPAT_FREEBSD32
1323 	/*
1324 	 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1325 	 * that it will get 32-bit packet headers.
1326 	 */
1327 	switch (cmd) {
1328 	case BIOCSETF32:
1329 	case BIOCSETFNR32:
1330 	case BIOCSETWF32:
1331 	case BIOCGDLTLIST32:
1332 	case BIOCGRTIMEOUT32:
1333 	case BIOCSRTIMEOUT32:
1334 		if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1335 			BPFD_LOCK(d);
1336 			d->bd_compat32 = 1;
1337 			BPFD_UNLOCK(d);
1338 		}
1339 	}
1340 #endif
1341 
1342 	CURVNET_SET(TD_TO_VNET(td));
1343 	switch (cmd) {
1344 
1345 	default:
1346 		error = EINVAL;
1347 		break;
1348 
1349 	/*
1350 	 * Check for read packet available.
1351 	 */
1352 	case FIONREAD:
1353 		{
1354 			int n;
1355 
1356 			BPFD_LOCK(d);
1357 			n = d->bd_slen;
1358 			while (d->bd_hbuf_in_use)
1359 				mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1360 				    PRINET, "bd_hbuf", 0);
1361 			if (d->bd_hbuf)
1362 				n += d->bd_hlen;
1363 			BPFD_UNLOCK(d);
1364 
1365 			*(int *)addr = n;
1366 			break;
1367 		}
1368 
1369 	/*
1370 	 * Get buffer len [for read()].
1371 	 */
1372 	case BIOCGBLEN:
1373 		BPFD_LOCK(d);
1374 		*(u_int *)addr = d->bd_bufsize;
1375 		BPFD_UNLOCK(d);
1376 		break;
1377 
1378 	/*
1379 	 * Set buffer length.
1380 	 */
1381 	case BIOCSBLEN:
1382 		error = bpf_ioctl_sblen(d, (u_int *)addr);
1383 		break;
1384 
1385 	/*
1386 	 * Set link layer read filter.
1387 	 */
1388 	case BIOCSETF:
1389 	case BIOCSETFNR:
1390 	case BIOCSETWF:
1391 #ifdef COMPAT_FREEBSD32
1392 	case BIOCSETF32:
1393 	case BIOCSETFNR32:
1394 	case BIOCSETWF32:
1395 #endif
1396 		error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1397 		break;
1398 
1399 	/*
1400 	 * Flush read packet buffer.
1401 	 */
1402 	case BIOCFLUSH:
1403 		BPFD_LOCK(d);
1404 		reset_d(d);
1405 		BPFD_UNLOCK(d);
1406 		break;
1407 
1408 	/*
1409 	 * Put interface into promiscuous mode.
1410 	 */
1411 	case BIOCPROMISC:
1412 		if (d->bd_bif == NULL) {
1413 			/*
1414 			 * No interface attached yet.
1415 			 */
1416 			error = EINVAL;
1417 			break;
1418 		}
1419 		if (d->bd_promisc == 0) {
1420 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
1421 			if (error == 0)
1422 				d->bd_promisc = 1;
1423 		}
1424 		break;
1425 
1426 	/*
1427 	 * Get current data link type.
1428 	 */
1429 	case BIOCGDLT:
1430 		BPF_LOCK();
1431 		if (d->bd_bif == NULL)
1432 			error = EINVAL;
1433 		else
1434 			*(u_int *)addr = d->bd_bif->bif_dlt;
1435 		BPF_UNLOCK();
1436 		break;
1437 
1438 	/*
1439 	 * Get a list of supported data link types.
1440 	 */
1441 #ifdef COMPAT_FREEBSD32
1442 	case BIOCGDLTLIST32:
1443 		{
1444 			struct bpf_dltlist32 *list32;
1445 			struct bpf_dltlist dltlist;
1446 
1447 			list32 = (struct bpf_dltlist32 *)addr;
1448 			dltlist.bfl_len = list32->bfl_len;
1449 			dltlist.bfl_list = PTRIN(list32->bfl_list);
1450 			BPF_LOCK();
1451 			if (d->bd_bif == NULL)
1452 				error = EINVAL;
1453 			else {
1454 				error = bpf_getdltlist(d, &dltlist);
1455 				if (error == 0)
1456 					list32->bfl_len = dltlist.bfl_len;
1457 			}
1458 			BPF_UNLOCK();
1459 			break;
1460 		}
1461 #endif
1462 
1463 	case BIOCGDLTLIST:
1464 		BPF_LOCK();
1465 		if (d->bd_bif == NULL)
1466 			error = EINVAL;
1467 		else
1468 			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1469 		BPF_UNLOCK();
1470 		break;
1471 
1472 	/*
1473 	 * Set data link type.
1474 	 */
1475 	case BIOCSDLT:
1476 		BPF_LOCK();
1477 		if (d->bd_bif == NULL)
1478 			error = EINVAL;
1479 		else
1480 			error = bpf_setdlt(d, *(u_int *)addr);
1481 		BPF_UNLOCK();
1482 		break;
1483 
1484 	/*
1485 	 * Get interface name.
1486 	 */
1487 	case BIOCGETIF:
1488 		BPF_LOCK();
1489 		if (d->bd_bif == NULL)
1490 			error = EINVAL;
1491 		else {
1492 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
1493 			struct ifreq *const ifr = (struct ifreq *)addr;
1494 
1495 			strlcpy(ifr->ifr_name, ifp->if_xname,
1496 			    sizeof(ifr->ifr_name));
1497 		}
1498 		BPF_UNLOCK();
1499 		break;
1500 
1501 	/*
1502 	 * Set interface.
1503 	 */
1504 	case BIOCSETIF:
1505 		{
1506 			int alloc_buf, size;
1507 
1508 			/*
1509 			 * Behavior here depends on the buffering model.  If
1510 			 * we're using kernel memory buffers, then we can
1511 			 * allocate them here.  If we're using zero-copy,
1512 			 * then the user process must have registered buffers
1513 			 * by the time we get here.
1514 			 */
1515 			alloc_buf = 0;
1516 			BPFD_LOCK(d);
1517 			if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
1518 			    d->bd_sbuf == NULL)
1519 				alloc_buf = 1;
1520 			BPFD_UNLOCK(d);
1521 			if (alloc_buf) {
1522 				size = d->bd_bufsize;
1523 				error = bpf_buffer_ioctl_sblen(d, &size);
1524 				if (error != 0)
1525 					break;
1526 			}
1527 			BPF_LOCK();
1528 			error = bpf_setif(d, (struct ifreq *)addr);
1529 			BPF_UNLOCK();
1530 			break;
1531 		}
1532 
1533 	/*
1534 	 * Set read timeout.
1535 	 */
1536 	case BIOCSRTIMEOUT:
1537 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1538 	case BIOCSRTIMEOUT32:
1539 #endif
1540 		{
1541 			struct timeval *tv = (struct timeval *)addr;
1542 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1543 			struct timeval32 *tv32;
1544 			struct timeval tv64;
1545 
1546 			if (cmd == BIOCSRTIMEOUT32) {
1547 				tv32 = (struct timeval32 *)addr;
1548 				tv = &tv64;
1549 				tv->tv_sec = tv32->tv_sec;
1550 				tv->tv_usec = tv32->tv_usec;
1551 			} else
1552 #endif
1553 				tv = (struct timeval *)addr;
1554 
1555 			/*
1556 			 * Subtract 1 tick from tvtohz() since this isn't
1557 			 * a one-shot timer.
1558 			 */
1559 			if ((error = itimerfix(tv)) == 0)
1560 				d->bd_rtout = tvtohz(tv) - 1;
1561 			break;
1562 		}
1563 
1564 	/*
1565 	 * Get read timeout.
1566 	 */
1567 	case BIOCGRTIMEOUT:
1568 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1569 	case BIOCGRTIMEOUT32:
1570 #endif
1571 		{
1572 			struct timeval *tv;
1573 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1574 			struct timeval32 *tv32;
1575 			struct timeval tv64;
1576 
1577 			if (cmd == BIOCGRTIMEOUT32)
1578 				tv = &tv64;
1579 			else
1580 #endif
1581 				tv = (struct timeval *)addr;
1582 
1583 			tv->tv_sec = d->bd_rtout / hz;
1584 			tv->tv_usec = (d->bd_rtout % hz) * tick;
1585 #if defined(COMPAT_FREEBSD32) && defined(__amd64__)
1586 			if (cmd == BIOCGRTIMEOUT32) {
1587 				tv32 = (struct timeval32 *)addr;
1588 				tv32->tv_sec = tv->tv_sec;
1589 				tv32->tv_usec = tv->tv_usec;
1590 			}
1591 #endif
1592 
1593 			break;
1594 		}
1595 
1596 	/*
1597 	 * Get packet stats.
1598 	 */
1599 	case BIOCGSTATS:
1600 		{
1601 			struct bpf_stat *bs = (struct bpf_stat *)addr;
1602 
1603 			/* XXXCSJP overflow */
1604 			bs->bs_recv = (u_int)counter_u64_fetch(d->bd_rcount);
1605 			bs->bs_drop = (u_int)counter_u64_fetch(d->bd_dcount);
1606 			break;
1607 		}
1608 
1609 	/*
1610 	 * Set immediate mode.
1611 	 */
1612 	case BIOCIMMEDIATE:
1613 		BPFD_LOCK(d);
1614 		d->bd_immediate = *(u_int *)addr;
1615 		BPFD_UNLOCK(d);
1616 		break;
1617 
1618 	case BIOCVERSION:
1619 		{
1620 			struct bpf_version *bv = (struct bpf_version *)addr;
1621 
1622 			bv->bv_major = BPF_MAJOR_VERSION;
1623 			bv->bv_minor = BPF_MINOR_VERSION;
1624 			break;
1625 		}
1626 
1627 	/*
1628 	 * Get "header already complete" flag
1629 	 */
1630 	case BIOCGHDRCMPLT:
1631 		BPFD_LOCK(d);
1632 		*(u_int *)addr = d->bd_hdrcmplt;
1633 		BPFD_UNLOCK(d);
1634 		break;
1635 
1636 	/*
1637 	 * Set "header already complete" flag
1638 	 */
1639 	case BIOCSHDRCMPLT:
1640 		BPFD_LOCK(d);
1641 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1642 		BPFD_UNLOCK(d);
1643 		break;
1644 
1645 	/*
1646 	 * Get packet direction flag
1647 	 */
1648 	case BIOCGDIRECTION:
1649 		BPFD_LOCK(d);
1650 		*(u_int *)addr = d->bd_direction;
1651 		BPFD_UNLOCK(d);
1652 		break;
1653 
1654 	/*
1655 	 * Set packet direction flag
1656 	 */
1657 	case BIOCSDIRECTION:
1658 		{
1659 			u_int	direction;
1660 
1661 			direction = *(u_int *)addr;
1662 			switch (direction) {
1663 			case BPF_D_IN:
1664 			case BPF_D_INOUT:
1665 			case BPF_D_OUT:
1666 				BPFD_LOCK(d);
1667 				d->bd_direction = direction;
1668 				BPFD_UNLOCK(d);
1669 				break;
1670 			default:
1671 				error = EINVAL;
1672 			}
1673 		}
1674 		break;
1675 
1676 	/*
1677 	 * Get packet timestamp format and resolution.
1678 	 */
1679 	case BIOCGTSTAMP:
1680 		BPFD_LOCK(d);
1681 		*(u_int *)addr = d->bd_tstamp;
1682 		BPFD_UNLOCK(d);
1683 		break;
1684 
1685 	/*
1686 	 * Set packet timestamp format and resolution.
1687 	 */
1688 	case BIOCSTSTAMP:
1689 		{
1690 			u_int	func;
1691 
1692 			func = *(u_int *)addr;
1693 			if (BPF_T_VALID(func))
1694 				d->bd_tstamp = func;
1695 			else
1696 				error = EINVAL;
1697 		}
1698 		break;
1699 
1700 	case BIOCFEEDBACK:
1701 		BPFD_LOCK(d);
1702 		d->bd_feedback = *(u_int *)addr;
1703 		BPFD_UNLOCK(d);
1704 		break;
1705 
1706 	case BIOCLOCK:
1707 		BPFD_LOCK(d);
1708 		d->bd_locked = 1;
1709 		BPFD_UNLOCK(d);
1710 		break;
1711 
1712 	case FIONBIO:		/* Non-blocking I/O */
1713 		break;
1714 
1715 	case FIOASYNC:		/* Send signal on receive packets */
1716 		BPFD_LOCK(d);
1717 		d->bd_async = *(int *)addr;
1718 		BPFD_UNLOCK(d);
1719 		break;
1720 
1721 	case FIOSETOWN:
1722 		/*
1723 		 * XXX: Add some sort of locking here?
1724 		 * fsetown() can sleep.
1725 		 */
1726 		error = fsetown(*(int *)addr, &d->bd_sigio);
1727 		break;
1728 
1729 	case FIOGETOWN:
1730 		BPFD_LOCK(d);
1731 		*(int *)addr = fgetown(&d->bd_sigio);
1732 		BPFD_UNLOCK(d);
1733 		break;
1734 
1735 	/* This is deprecated, FIOSETOWN should be used instead. */
1736 	case TIOCSPGRP:
1737 		error = fsetown(-(*(int *)addr), &d->bd_sigio);
1738 		break;
1739 
1740 	/* This is deprecated, FIOGETOWN should be used instead. */
1741 	case TIOCGPGRP:
1742 		*(int *)addr = -fgetown(&d->bd_sigio);
1743 		break;
1744 
1745 	case BIOCSRSIG:		/* Set receive signal */
1746 		{
1747 			u_int sig;
1748 
1749 			sig = *(u_int *)addr;
1750 
1751 			if (sig >= NSIG)
1752 				error = EINVAL;
1753 			else {
1754 				BPFD_LOCK(d);
1755 				d->bd_sig = sig;
1756 				BPFD_UNLOCK(d);
1757 			}
1758 			break;
1759 		}
1760 	case BIOCGRSIG:
1761 		BPFD_LOCK(d);
1762 		*(u_int *)addr = d->bd_sig;
1763 		BPFD_UNLOCK(d);
1764 		break;
1765 
1766 	case BIOCGETBUFMODE:
1767 		BPFD_LOCK(d);
1768 		*(u_int *)addr = d->bd_bufmode;
1769 		BPFD_UNLOCK(d);
1770 		break;
1771 
1772 	case BIOCSETBUFMODE:
1773 		/*
1774 		 * Allow the buffering mode to be changed as long as we
1775 		 * haven't yet committed to a particular mode.  Our
1776 		 * definition of commitment, for now, is whether or not a
1777 		 * buffer has been allocated or an interface attached, since
1778 		 * that's the point where things get tricky.
1779 		 */
1780 		switch (*(u_int *)addr) {
1781 		case BPF_BUFMODE_BUFFER:
1782 			break;
1783 
1784 		case BPF_BUFMODE_ZBUF:
1785 			if (bpf_zerocopy_enable)
1786 				break;
1787 			/* FALLSTHROUGH */
1788 
1789 		default:
1790 			CURVNET_RESTORE();
1791 			return (EINVAL);
1792 		}
1793 
1794 		BPFD_LOCK(d);
1795 		if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1796 		    d->bd_fbuf != NULL || d->bd_bif != NULL) {
1797 			BPFD_UNLOCK(d);
1798 			CURVNET_RESTORE();
1799 			return (EBUSY);
1800 		}
1801 		d->bd_bufmode = *(u_int *)addr;
1802 		BPFD_UNLOCK(d);
1803 		break;
1804 
1805 	case BIOCGETZMAX:
1806 		error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1807 		break;
1808 
1809 	case BIOCSETZBUF:
1810 		error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1811 		break;
1812 
1813 	case BIOCROTZBUF:
1814 		error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1815 		break;
1816 	}
1817 	CURVNET_RESTORE();
1818 	return (error);
1819 }
1820 
1821 /*
1822  * Set d's packet filter program to fp.  If this file already has a filter,
1823  * free it and replace it.  Returns EINVAL for bogus requests.
1824  *
1825  * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1826  * since reading d->bd_bif can't be protected by d or interface lock due to
1827  * lock order.
1828  *
1829  * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1830  * interface read lock to read all filers.
1831  *
1832  */
1833 static int
1834 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1835 {
1836 #ifdef COMPAT_FREEBSD32
1837 	struct bpf_program fp_swab;
1838 	struct bpf_program32 *fp32;
1839 #endif
1840 	struct bpf_insn *fcode, *old;
1841 #ifdef BPF_JITTER
1842 	bpf_jit_filter *jfunc, *ofunc;
1843 #endif
1844 	size_t size;
1845 	u_int flen;
1846 	int need_upgrade;
1847 
1848 #ifdef COMPAT_FREEBSD32
1849 	switch (cmd) {
1850 	case BIOCSETF32:
1851 	case BIOCSETWF32:
1852 	case BIOCSETFNR32:
1853 		fp32 = (struct bpf_program32 *)fp;
1854 		fp_swab.bf_len = fp32->bf_len;
1855 		fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1856 		fp = &fp_swab;
1857 		switch (cmd) {
1858 		case BIOCSETF32:
1859 			cmd = BIOCSETF;
1860 			break;
1861 		case BIOCSETWF32:
1862 			cmd = BIOCSETWF;
1863 			break;
1864 		}
1865 		break;
1866 	}
1867 #endif
1868 
1869 	fcode = NULL;
1870 #ifdef BPF_JITTER
1871 	jfunc = ofunc = NULL;
1872 #endif
1873 	need_upgrade = 0;
1874 
1875 	/*
1876 	 * Check new filter validness before acquiring any locks.
1877 	 * Allocate memory for new filter, if needed.
1878 	 */
1879 	flen = fp->bf_len;
1880 	if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1881 		return (EINVAL);
1882 	size = flen * sizeof(*fp->bf_insns);
1883 	if (size > 0) {
1884 		/* We're setting up new filter.  Copy and check actual data. */
1885 		fcode = malloc(size, M_BPF, M_WAITOK);
1886 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1887 		    !bpf_validate(fcode, flen)) {
1888 			free(fcode, M_BPF);
1889 			return (EINVAL);
1890 		}
1891 #ifdef BPF_JITTER
1892 		/* Filter is copied inside fcode and is perfectly valid. */
1893 		jfunc = bpf_jitter(fcode, flen);
1894 #endif
1895 	}
1896 
1897 	BPF_LOCK();
1898 
1899 	/*
1900 	 * Set up new filter.
1901 	 * Protect filter change by interface lock.
1902 	 * Additionally, we are protected by global lock here.
1903 	 */
1904 	if (d->bd_bif != NULL)
1905 		BPFIF_WLOCK(d->bd_bif);
1906 	BPFD_LOCK(d);
1907 	if (cmd == BIOCSETWF) {
1908 		old = d->bd_wfilter;
1909 		d->bd_wfilter = fcode;
1910 	} else {
1911 		old = d->bd_rfilter;
1912 		d->bd_rfilter = fcode;
1913 #ifdef BPF_JITTER
1914 		ofunc = d->bd_bfilter;
1915 		d->bd_bfilter = jfunc;
1916 #endif
1917 		if (cmd == BIOCSETF)
1918 			reset_d(d);
1919 
1920 		need_upgrade = bpf_check_upgrade(cmd, d, fcode, flen);
1921 	}
1922 	BPFD_UNLOCK(d);
1923 	if (d->bd_bif != NULL)
1924 		BPFIF_WUNLOCK(d->bd_bif);
1925 	if (old != NULL)
1926 		free(old, M_BPF);
1927 #ifdef BPF_JITTER
1928 	if (ofunc != NULL)
1929 		bpf_destroy_jit_filter(ofunc);
1930 #endif
1931 
1932 	/* Move d to active readers list. */
1933 	if (need_upgrade != 0)
1934 		bpf_upgraded(d);
1935 
1936 	BPF_UNLOCK();
1937 	return (0);
1938 }
1939 
1940 /*
1941  * Detach a file from its current interface (if attached at all) and attach
1942  * to the interface indicated by the name stored in ifr.
1943  * Return an errno or 0.
1944  */
1945 static int
1946 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1947 {
1948 	struct bpf_if *bp;
1949 	struct ifnet *theywant;
1950 
1951 	BPF_LOCK_ASSERT();
1952 
1953 	theywant = ifunit(ifr->ifr_name);
1954 	if (theywant == NULL || theywant->if_bpf == NULL)
1955 		return (ENXIO);
1956 
1957 	bp = theywant->if_bpf;
1958 
1959 	/* Check if interface is not being detached from BPF */
1960 	BPFIF_RLOCK(bp);
1961 	if (bp->bif_flags & BPFIF_FLAG_DYING) {
1962 		BPFIF_RUNLOCK(bp);
1963 		return (ENXIO);
1964 	}
1965 	BPFIF_RUNLOCK(bp);
1966 
1967 	/*
1968 	 * At this point, we expect the buffer is already allocated.  If not,
1969 	 * return an error.
1970 	 */
1971 	switch (d->bd_bufmode) {
1972 	case BPF_BUFMODE_BUFFER:
1973 	case BPF_BUFMODE_ZBUF:
1974 		if (d->bd_sbuf == NULL)
1975 			return (EINVAL);
1976 		break;
1977 
1978 	default:
1979 		panic("bpf_setif: bufmode %d", d->bd_bufmode);
1980 	}
1981 	if (bp != d->bd_bif)
1982 		bpf_attachd(d, bp);
1983 	BPFD_LOCK(d);
1984 	reset_d(d);
1985 	BPFD_UNLOCK(d);
1986 	return (0);
1987 }
1988 
1989 /*
1990  * Support for select() and poll() system calls
1991  *
1992  * Return true iff the specific operation will not block indefinitely.
1993  * Otherwise, return false but make a note that a selwakeup() must be done.
1994  */
1995 static int
1996 bpfpoll(struct cdev *dev, int events, struct thread *td)
1997 {
1998 	struct bpf_d *d;
1999 	int revents;
2000 
2001 	if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
2002 		return (events &
2003 		    (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
2004 
2005 	/*
2006 	 * Refresh PID associated with this descriptor.
2007 	 */
2008 	revents = events & (POLLOUT | POLLWRNORM);
2009 	BPFD_LOCK(d);
2010 	BPF_PID_REFRESH(d, td);
2011 	if (events & (POLLIN | POLLRDNORM)) {
2012 		if (bpf_ready(d))
2013 			revents |= events & (POLLIN | POLLRDNORM);
2014 		else {
2015 			selrecord(td, &d->bd_sel);
2016 			/* Start the read timeout if necessary. */
2017 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2018 				callout_reset(&d->bd_callout, d->bd_rtout,
2019 				    bpf_timed_out, d);
2020 				d->bd_state = BPF_WAITING;
2021 			}
2022 		}
2023 	}
2024 	BPFD_UNLOCK(d);
2025 	return (revents);
2026 }
2027 
2028 /*
2029  * Support for kevent() system call.  Register EVFILT_READ filters and
2030  * reject all others.
2031  */
2032 int
2033 bpfkqfilter(struct cdev *dev, struct knote *kn)
2034 {
2035 	struct bpf_d *d;
2036 
2037 	if (devfs_get_cdevpriv((void **)&d) != 0 ||
2038 	    kn->kn_filter != EVFILT_READ)
2039 		return (1);
2040 
2041 	/*
2042 	 * Refresh PID associated with this descriptor.
2043 	 */
2044 	BPFD_LOCK(d);
2045 	BPF_PID_REFRESH_CUR(d);
2046 	kn->kn_fop = &bpfread_filtops;
2047 	kn->kn_hook = d;
2048 	knlist_add(&d->bd_sel.si_note, kn, 1);
2049 	BPFD_UNLOCK(d);
2050 
2051 	return (0);
2052 }
2053 
2054 static void
2055 filt_bpfdetach(struct knote *kn)
2056 {
2057 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2058 
2059 	knlist_remove(&d->bd_sel.si_note, kn, 0);
2060 }
2061 
2062 static int
2063 filt_bpfread(struct knote *kn, long hint)
2064 {
2065 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2066 	int ready;
2067 
2068 	BPFD_LOCK_ASSERT(d);
2069 	ready = bpf_ready(d);
2070 	if (ready) {
2071 		kn->kn_data = d->bd_slen;
2072 		/*
2073 		 * Ignore the hold buffer if it is being copied to user space.
2074 		 */
2075 		if (!d->bd_hbuf_in_use && d->bd_hbuf)
2076 			kn->kn_data += d->bd_hlen;
2077 	} else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2078 		callout_reset(&d->bd_callout, d->bd_rtout,
2079 		    bpf_timed_out, d);
2080 		d->bd_state = BPF_WAITING;
2081 	}
2082 
2083 	return (ready);
2084 }
2085 
2086 #define	BPF_TSTAMP_NONE		0
2087 #define	BPF_TSTAMP_FAST		1
2088 #define	BPF_TSTAMP_NORMAL	2
2089 #define	BPF_TSTAMP_EXTERN	3
2090 
2091 static int
2092 bpf_ts_quality(int tstype)
2093 {
2094 
2095 	if (tstype == BPF_T_NONE)
2096 		return (BPF_TSTAMP_NONE);
2097 	if ((tstype & BPF_T_FAST) != 0)
2098 		return (BPF_TSTAMP_FAST);
2099 
2100 	return (BPF_TSTAMP_NORMAL);
2101 }
2102 
2103 static int
2104 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2105 {
2106 	struct m_tag *tag;
2107 	int quality;
2108 
2109 	quality = bpf_ts_quality(tstype);
2110 	if (quality == BPF_TSTAMP_NONE)
2111 		return (quality);
2112 
2113 	if (m != NULL) {
2114 		tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2115 		if (tag != NULL) {
2116 			*bt = *(struct bintime *)(tag + 1);
2117 			return (BPF_TSTAMP_EXTERN);
2118 		}
2119 	}
2120 	if (quality == BPF_TSTAMP_NORMAL)
2121 		binuptime(bt);
2122 	else
2123 		getbinuptime(bt);
2124 
2125 	return (quality);
2126 }
2127 
2128 /*
2129  * Incoming linkage from device drivers.  Process the packet pkt, of length
2130  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
2131  * by each process' filter, and if accepted, stashed into the corresponding
2132  * buffer.
2133  */
2134 void
2135 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2136 {
2137 	struct bintime bt;
2138 	struct bpf_d *d;
2139 #ifdef BPF_JITTER
2140 	bpf_jit_filter *bf;
2141 #endif
2142 	u_int slen;
2143 	int gottime;
2144 
2145 	gottime = BPF_TSTAMP_NONE;
2146 
2147 	BPFIF_RLOCK(bp);
2148 
2149 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2150 		/*
2151 		 * We are not using any locks for d here because:
2152 		 * 1) any filter change is protected by interface
2153 		 * write lock
2154 		 * 2) destroying/detaching d is protected by interface
2155 		 * write lock, too
2156 		 */
2157 
2158 		counter_u64_add(d->bd_rcount, 1);
2159 		/*
2160 		 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2161 		 * way for the caller to indiciate to us whether this packet
2162 		 * is inbound or outbound.  In the bpf_mtap() routines, we use
2163 		 * the interface pointers on the mbuf to figure it out.
2164 		 */
2165 #ifdef BPF_JITTER
2166 		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2167 		if (bf != NULL)
2168 			slen = (*(bf->func))(pkt, pktlen, pktlen);
2169 		else
2170 #endif
2171 		slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2172 		if (slen != 0) {
2173 			/*
2174 			 * Filter matches. Let's to acquire write lock.
2175 			 */
2176 			BPFD_LOCK(d);
2177 
2178 			counter_u64_add(d->bd_fcount, 1);
2179 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2180 				gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2181 #ifdef MAC
2182 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2183 #endif
2184 				catchpacket(d, pkt, pktlen, slen,
2185 				    bpf_append_bytes, &bt);
2186 			BPFD_UNLOCK(d);
2187 		}
2188 	}
2189 	BPFIF_RUNLOCK(bp);
2190 }
2191 
2192 #define	BPF_CHECK_DIRECTION(d, r, i)				\
2193 	    (((d)->bd_direction == BPF_D_IN && (r) != (i)) ||	\
2194 	    ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2195 
2196 /*
2197  * Incoming linkage from device drivers, when packet is in an mbuf chain.
2198  * Locking model is explained in bpf_tap().
2199  */
2200 void
2201 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2202 {
2203 	struct bintime bt;
2204 	struct bpf_d *d;
2205 #ifdef BPF_JITTER
2206 	bpf_jit_filter *bf;
2207 #endif
2208 	u_int pktlen, slen;
2209 	int gottime;
2210 
2211 	/* Skip outgoing duplicate packets. */
2212 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2213 		m->m_flags &= ~M_PROMISC;
2214 		return;
2215 	}
2216 
2217 	pktlen = m_length(m, NULL);
2218 	gottime = BPF_TSTAMP_NONE;
2219 
2220 	BPFIF_RLOCK(bp);
2221 
2222 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2223 		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2224 			continue;
2225 		counter_u64_add(d->bd_rcount, 1);
2226 #ifdef BPF_JITTER
2227 		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2228 		/* XXX We cannot handle multiple mbufs. */
2229 		if (bf != NULL && m->m_next == NULL)
2230 			slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2231 		else
2232 #endif
2233 		slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2234 		if (slen != 0) {
2235 			BPFD_LOCK(d);
2236 
2237 			counter_u64_add(d->bd_fcount, 1);
2238 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2239 				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2240 #ifdef MAC
2241 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2242 #endif
2243 				catchpacket(d, (u_char *)m, pktlen, slen,
2244 				    bpf_append_mbuf, &bt);
2245 			BPFD_UNLOCK(d);
2246 		}
2247 	}
2248 	BPFIF_RUNLOCK(bp);
2249 }
2250 
2251 /*
2252  * Incoming linkage from device drivers, when packet is in
2253  * an mbuf chain and to be prepended by a contiguous header.
2254  */
2255 void
2256 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2257 {
2258 	struct bintime bt;
2259 	struct mbuf mb;
2260 	struct bpf_d *d;
2261 	u_int pktlen, slen;
2262 	int gottime;
2263 
2264 	/* Skip outgoing duplicate packets. */
2265 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2266 		m->m_flags &= ~M_PROMISC;
2267 		return;
2268 	}
2269 
2270 	pktlen = m_length(m, NULL);
2271 	/*
2272 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
2273 	 * Note that we cut corners here; we only setup what's
2274 	 * absolutely needed--this mbuf should never go anywhere else.
2275 	 */
2276 	mb.m_next = m;
2277 	mb.m_data = data;
2278 	mb.m_len = dlen;
2279 	pktlen += dlen;
2280 
2281 	gottime = BPF_TSTAMP_NONE;
2282 
2283 	BPFIF_RLOCK(bp);
2284 
2285 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2286 		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2287 			continue;
2288 		counter_u64_add(d->bd_rcount, 1);
2289 		slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2290 		if (slen != 0) {
2291 			BPFD_LOCK(d);
2292 
2293 			counter_u64_add(d->bd_fcount, 1);
2294 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2295 				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2296 #ifdef MAC
2297 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2298 #endif
2299 				catchpacket(d, (u_char *)&mb, pktlen, slen,
2300 				    bpf_append_mbuf, &bt);
2301 			BPFD_UNLOCK(d);
2302 		}
2303 	}
2304 	BPFIF_RUNLOCK(bp);
2305 }
2306 
2307 #undef	BPF_CHECK_DIRECTION
2308 
2309 #undef	BPF_TSTAMP_NONE
2310 #undef	BPF_TSTAMP_FAST
2311 #undef	BPF_TSTAMP_NORMAL
2312 #undef	BPF_TSTAMP_EXTERN
2313 
2314 static int
2315 bpf_hdrlen(struct bpf_d *d)
2316 {
2317 	int hdrlen;
2318 
2319 	hdrlen = d->bd_bif->bif_hdrlen;
2320 #ifndef BURN_BRIDGES
2321 	if (d->bd_tstamp == BPF_T_NONE ||
2322 	    BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2323 #ifdef COMPAT_FREEBSD32
2324 		if (d->bd_compat32)
2325 			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2326 		else
2327 #endif
2328 			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2329 	else
2330 #endif
2331 		hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2332 #ifdef COMPAT_FREEBSD32
2333 	if (d->bd_compat32)
2334 		hdrlen = BPF_WORDALIGN32(hdrlen);
2335 	else
2336 #endif
2337 		hdrlen = BPF_WORDALIGN(hdrlen);
2338 
2339 	return (hdrlen - d->bd_bif->bif_hdrlen);
2340 }
2341 
2342 static void
2343 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2344 {
2345 	struct bintime bt2, boottimebin;
2346 	struct timeval tsm;
2347 	struct timespec tsn;
2348 
2349 	if ((tstype & BPF_T_MONOTONIC) == 0) {
2350 		bt2 = *bt;
2351 		getboottimebin(&boottimebin);
2352 		bintime_add(&bt2, &boottimebin);
2353 		bt = &bt2;
2354 	}
2355 	switch (BPF_T_FORMAT(tstype)) {
2356 	case BPF_T_MICROTIME:
2357 		bintime2timeval(bt, &tsm);
2358 		ts->bt_sec = tsm.tv_sec;
2359 		ts->bt_frac = tsm.tv_usec;
2360 		break;
2361 	case BPF_T_NANOTIME:
2362 		bintime2timespec(bt, &tsn);
2363 		ts->bt_sec = tsn.tv_sec;
2364 		ts->bt_frac = tsn.tv_nsec;
2365 		break;
2366 	case BPF_T_BINTIME:
2367 		ts->bt_sec = bt->sec;
2368 		ts->bt_frac = bt->frac;
2369 		break;
2370 	}
2371 }
2372 
2373 /*
2374  * Move the packet data from interface memory (pkt) into the
2375  * store buffer.  "cpfn" is the routine called to do the actual data
2376  * transfer.  bcopy is passed in to copy contiguous chunks, while
2377  * bpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
2378  * pkt is really an mbuf.
2379  */
2380 static void
2381 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2382     void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2383     struct bintime *bt)
2384 {
2385 	struct bpf_xhdr hdr;
2386 #ifndef BURN_BRIDGES
2387 	struct bpf_hdr hdr_old;
2388 #ifdef COMPAT_FREEBSD32
2389 	struct bpf_hdr32 hdr32_old;
2390 #endif
2391 #endif
2392 	int caplen, curlen, hdrlen, totlen;
2393 	int do_wakeup = 0;
2394 	int do_timestamp;
2395 	int tstype;
2396 
2397 	BPFD_LOCK_ASSERT(d);
2398 
2399 	/*
2400 	 * Detect whether user space has released a buffer back to us, and if
2401 	 * so, move it from being a hold buffer to a free buffer.  This may
2402 	 * not be the best place to do it (for example, we might only want to
2403 	 * run this check if we need the space), but for now it's a reliable
2404 	 * spot to do it.
2405 	 */
2406 	if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2407 		d->bd_fbuf = d->bd_hbuf;
2408 		d->bd_hbuf = NULL;
2409 		d->bd_hlen = 0;
2410 		bpf_buf_reclaimed(d);
2411 	}
2412 
2413 	/*
2414 	 * Figure out how many bytes to move.  If the packet is
2415 	 * greater or equal to the snapshot length, transfer that
2416 	 * much.  Otherwise, transfer the whole packet (unless
2417 	 * we hit the buffer size limit).
2418 	 */
2419 	hdrlen = bpf_hdrlen(d);
2420 	totlen = hdrlen + min(snaplen, pktlen);
2421 	if (totlen > d->bd_bufsize)
2422 		totlen = d->bd_bufsize;
2423 
2424 	/*
2425 	 * Round up the end of the previous packet to the next longword.
2426 	 *
2427 	 * Drop the packet if there's no room and no hope of room
2428 	 * If the packet would overflow the storage buffer or the storage
2429 	 * buffer is considered immutable by the buffer model, try to rotate
2430 	 * the buffer and wakeup pending processes.
2431 	 */
2432 #ifdef COMPAT_FREEBSD32
2433 	if (d->bd_compat32)
2434 		curlen = BPF_WORDALIGN32(d->bd_slen);
2435 	else
2436 #endif
2437 		curlen = BPF_WORDALIGN(d->bd_slen);
2438 	if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2439 		if (d->bd_fbuf == NULL) {
2440 			/*
2441 			 * There's no room in the store buffer, and no
2442 			 * prospect of room, so drop the packet.  Notify the
2443 			 * buffer model.
2444 			 */
2445 			bpf_buffull(d);
2446 			counter_u64_add(d->bd_dcount, 1);
2447 			return;
2448 		}
2449 		KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
2450 		ROTATE_BUFFERS(d);
2451 		do_wakeup = 1;
2452 		curlen = 0;
2453 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2454 		/*
2455 		 * Immediate mode is set, or the read timeout has already
2456 		 * expired during a select call.  A packet arrived, so the
2457 		 * reader should be woken up.
2458 		 */
2459 		do_wakeup = 1;
2460 	caplen = totlen - hdrlen;
2461 	tstype = d->bd_tstamp;
2462 	do_timestamp = tstype != BPF_T_NONE;
2463 #ifndef BURN_BRIDGES
2464 	if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2465 		struct bpf_ts ts;
2466 		if (do_timestamp)
2467 			bpf_bintime2ts(bt, &ts, tstype);
2468 #ifdef COMPAT_FREEBSD32
2469 		if (d->bd_compat32) {
2470 			bzero(&hdr32_old, sizeof(hdr32_old));
2471 			if (do_timestamp) {
2472 				hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2473 				hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2474 			}
2475 			hdr32_old.bh_datalen = pktlen;
2476 			hdr32_old.bh_hdrlen = hdrlen;
2477 			hdr32_old.bh_caplen = caplen;
2478 			bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2479 			    sizeof(hdr32_old));
2480 			goto copy;
2481 		}
2482 #endif
2483 		bzero(&hdr_old, sizeof(hdr_old));
2484 		if (do_timestamp) {
2485 			hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2486 			hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2487 		}
2488 		hdr_old.bh_datalen = pktlen;
2489 		hdr_old.bh_hdrlen = hdrlen;
2490 		hdr_old.bh_caplen = caplen;
2491 		bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2492 		    sizeof(hdr_old));
2493 		goto copy;
2494 	}
2495 #endif
2496 
2497 	/*
2498 	 * Append the bpf header.  Note we append the actual header size, but
2499 	 * move forward the length of the header plus padding.
2500 	 */
2501 	bzero(&hdr, sizeof(hdr));
2502 	if (do_timestamp)
2503 		bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2504 	hdr.bh_datalen = pktlen;
2505 	hdr.bh_hdrlen = hdrlen;
2506 	hdr.bh_caplen = caplen;
2507 	bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2508 
2509 	/*
2510 	 * Copy the packet data into the store buffer and update its length.
2511 	 */
2512 #ifndef BURN_BRIDGES
2513 copy:
2514 #endif
2515 	(*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2516 	d->bd_slen = curlen + totlen;
2517 
2518 	if (do_wakeup)
2519 		bpf_wakeup(d);
2520 }
2521 
2522 /*
2523  * Free buffers currently in use by a descriptor.
2524  * Called on close.
2525  */
2526 static void
2527 bpf_freed(struct bpf_d *d)
2528 {
2529 
2530 	/*
2531 	 * We don't need to lock out interrupts since this descriptor has
2532 	 * been detached from its interface and it yet hasn't been marked
2533 	 * free.
2534 	 */
2535 	bpf_free(d);
2536 	if (d->bd_rfilter != NULL) {
2537 		free((caddr_t)d->bd_rfilter, M_BPF);
2538 #ifdef BPF_JITTER
2539 		if (d->bd_bfilter != NULL)
2540 			bpf_destroy_jit_filter(d->bd_bfilter);
2541 #endif
2542 	}
2543 	if (d->bd_wfilter != NULL)
2544 		free((caddr_t)d->bd_wfilter, M_BPF);
2545 	mtx_destroy(&d->bd_lock);
2546 
2547 	counter_u64_free(d->bd_rcount);
2548 	counter_u64_free(d->bd_dcount);
2549 	counter_u64_free(d->bd_fcount);
2550 	counter_u64_free(d->bd_wcount);
2551 	counter_u64_free(d->bd_wfcount);
2552 	counter_u64_free(d->bd_wdcount);
2553 	counter_u64_free(d->bd_zcopy);
2554 
2555 }
2556 
2557 /*
2558  * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
2559  * fixed size of the link header (variable length headers not yet supported).
2560  */
2561 void
2562 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2563 {
2564 
2565 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2566 }
2567 
2568 /*
2569  * Attach an interface to bpf.  ifp is a pointer to the structure
2570  * defining the interface to be attached, dlt is the link layer type,
2571  * and hdrlen is the fixed size of the link header (variable length
2572  * headers are not yet supporrted).
2573  */
2574 void
2575 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2576 {
2577 	struct bpf_if *bp;
2578 
2579 	bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2580 	if (bp == NULL)
2581 		panic("bpfattach");
2582 
2583 	LIST_INIT(&bp->bif_dlist);
2584 	LIST_INIT(&bp->bif_wlist);
2585 	bp->bif_ifp = ifp;
2586 	bp->bif_dlt = dlt;
2587 	rw_init(&bp->bif_lock, "bpf interface lock");
2588 	KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2589 	bp->bif_bpf = driverp;
2590 	*driverp = bp;
2591 
2592 	BPF_LOCK();
2593 	LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2594 	BPF_UNLOCK();
2595 
2596 	bp->bif_hdrlen = hdrlen;
2597 
2598 	if (bootverbose && IS_DEFAULT_VNET(curvnet))
2599 		if_printf(ifp, "bpf attached\n");
2600 }
2601 
2602 #ifdef VIMAGE
2603 /*
2604  * When moving interfaces between vnet instances we need a way to
2605  * query the dlt and hdrlen before detach so we can re-attch the if_bpf
2606  * after the vmove.  We unfortunately have no device driver infrastructure
2607  * to query the interface for these values after creation/attach, thus
2608  * add this as a workaround.
2609  */
2610 int
2611 bpf_get_bp_params(struct bpf_if *bp, u_int *bif_dlt, u_int *bif_hdrlen)
2612 {
2613 
2614 	if (bp == NULL)
2615 		return (ENXIO);
2616 	if (bif_dlt == NULL && bif_hdrlen == NULL)
2617 		return (0);
2618 
2619 	if (bif_dlt != NULL)
2620 		*bif_dlt = bp->bif_dlt;
2621 	if (bif_hdrlen != NULL)
2622 		*bif_hdrlen = bp->bif_hdrlen;
2623 
2624 	return (0);
2625 }
2626 #endif
2627 
2628 /*
2629  * Detach bpf from an interface. This involves detaching each descriptor
2630  * associated with the interface. Notify each descriptor as it's detached
2631  * so that any sleepers wake up and get ENXIO.
2632  */
2633 void
2634 bpfdetach(struct ifnet *ifp)
2635 {
2636 	struct bpf_if	*bp, *bp_temp;
2637 	struct bpf_d	*d;
2638 	int ndetached;
2639 
2640 	ndetached = 0;
2641 
2642 	BPF_LOCK();
2643 	/* Find all bpf_if struct's which reference ifp and detach them. */
2644 	LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2645 		if (ifp != bp->bif_ifp)
2646 			continue;
2647 
2648 		LIST_REMOVE(bp, bif_next);
2649 		/* Add to to-be-freed list */
2650 		LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
2651 
2652 		ndetached++;
2653 		/*
2654 		 * Delay freeing bp till interface is detached
2655 		 * and all routes through this interface are removed.
2656 		 * Mark bp as detached to restrict new consumers.
2657 		 */
2658 		BPFIF_WLOCK(bp);
2659 		bp->bif_flags |= BPFIF_FLAG_DYING;
2660 		*bp->bif_bpf = NULL;
2661 		BPFIF_WUNLOCK(bp);
2662 
2663 		CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
2664 		    __func__, bp->bif_dlt, bp, ifp);
2665 
2666 		/* Free common descriptors */
2667 		while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2668 			bpf_detachd_locked(d);
2669 			BPFD_LOCK(d);
2670 			bpf_wakeup(d);
2671 			BPFD_UNLOCK(d);
2672 		}
2673 
2674 		/* Free writer-only descriptors */
2675 		while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2676 			bpf_detachd_locked(d);
2677 			BPFD_LOCK(d);
2678 			bpf_wakeup(d);
2679 			BPFD_UNLOCK(d);
2680 		}
2681 	}
2682 	BPF_UNLOCK();
2683 
2684 #ifdef INVARIANTS
2685 	if (ndetached == 0)
2686 		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2687 #endif
2688 }
2689 
2690 /*
2691  * Interface departure handler.
2692  * Note departure event does not guarantee interface is going down.
2693  * Interface renaming is currently done via departure/arrival event set.
2694  *
2695  * Departure handled is called after all routes pointing to
2696  * given interface are removed and interface is in down state
2697  * restricting any packets to be sent/received. We assume it is now safe
2698  * to free data allocated by BPF.
2699  */
2700 static void
2701 bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2702 {
2703 	struct bpf_if *bp, *bp_temp;
2704 	int nmatched = 0;
2705 
2706 	/* Ignore ifnet renaming. */
2707 	if (ifp->if_flags & IFF_RENAMING)
2708 		return;
2709 
2710 	BPF_LOCK();
2711 	/*
2712 	 * Find matching entries in free list.
2713 	 * Nothing should be found if bpfdetach() was not called.
2714 	 */
2715 	LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
2716 		if (ifp != bp->bif_ifp)
2717 			continue;
2718 
2719 		CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
2720 		    __func__, bp, ifp);
2721 
2722 		LIST_REMOVE(bp, bif_next);
2723 
2724 		rw_destroy(&bp->bif_lock);
2725 		free(bp, M_BPF);
2726 
2727 		nmatched++;
2728 	}
2729 	BPF_UNLOCK();
2730 }
2731 
2732 /*
2733  * Get a list of available data link type of the interface.
2734  */
2735 static int
2736 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2737 {
2738 	struct ifnet *ifp;
2739 	struct bpf_if *bp;
2740 	u_int *lst;
2741 	int error, n, n1;
2742 
2743 	BPF_LOCK_ASSERT();
2744 
2745 	ifp = d->bd_bif->bif_ifp;
2746 again:
2747 	n1 = 0;
2748 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2749 		if (bp->bif_ifp == ifp)
2750 			n1++;
2751 	}
2752 	if (bfl->bfl_list == NULL) {
2753 		bfl->bfl_len = n1;
2754 		return (0);
2755 	}
2756 	if (n1 > bfl->bfl_len)
2757 		return (ENOMEM);
2758 	BPF_UNLOCK();
2759 	lst = malloc(n1 * sizeof(u_int), M_TEMP, M_WAITOK);
2760 	n = 0;
2761 	BPF_LOCK();
2762 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2763 		if (bp->bif_ifp != ifp)
2764 			continue;
2765 		if (n >= n1) {
2766 			free(lst, M_TEMP);
2767 			goto again;
2768 		}
2769 		lst[n] = bp->bif_dlt;
2770 		n++;
2771 	}
2772 	BPF_UNLOCK();
2773 	error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n);
2774 	free(lst, M_TEMP);
2775 	BPF_LOCK();
2776 	bfl->bfl_len = n;
2777 	return (error);
2778 }
2779 
2780 /*
2781  * Set the data link type of a BPF instance.
2782  */
2783 static int
2784 bpf_setdlt(struct bpf_d *d, u_int dlt)
2785 {
2786 	int error, opromisc;
2787 	struct ifnet *ifp;
2788 	struct bpf_if *bp;
2789 
2790 	BPF_LOCK_ASSERT();
2791 
2792 	if (d->bd_bif->bif_dlt == dlt)
2793 		return (0);
2794 	ifp = d->bd_bif->bif_ifp;
2795 
2796 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2797 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2798 			break;
2799 	}
2800 
2801 	if (bp != NULL) {
2802 		opromisc = d->bd_promisc;
2803 		bpf_attachd(d, bp);
2804 		BPFD_LOCK(d);
2805 		reset_d(d);
2806 		BPFD_UNLOCK(d);
2807 		if (opromisc) {
2808 			error = ifpromisc(bp->bif_ifp, 1);
2809 			if (error)
2810 				if_printf(bp->bif_ifp,
2811 					"bpf_setdlt: ifpromisc failed (%d)\n",
2812 					error);
2813 			else
2814 				d->bd_promisc = 1;
2815 		}
2816 	}
2817 	return (bp == NULL ? EINVAL : 0);
2818 }
2819 
2820 static void
2821 bpf_drvinit(void *unused)
2822 {
2823 	struct cdev *dev;
2824 
2825 	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2826 	LIST_INIT(&bpf_iflist);
2827 	LIST_INIT(&bpf_freelist);
2828 
2829 	dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2830 	/* For compatibility */
2831 	make_dev_alias(dev, "bpf0");
2832 
2833 	/* Register interface departure handler */
2834 	bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2835 		    ifnet_departure_event, bpf_ifdetach, NULL,
2836 		    EVENTHANDLER_PRI_ANY);
2837 }
2838 
2839 /*
2840  * Zero out the various packet counters associated with all of the bpf
2841  * descriptors.  At some point, we will probably want to get a bit more
2842  * granular and allow the user to specify descriptors to be zeroed.
2843  */
2844 static void
2845 bpf_zero_counters(void)
2846 {
2847 	struct bpf_if *bp;
2848 	struct bpf_d *bd;
2849 
2850 	BPF_LOCK();
2851 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2852 		BPFIF_RLOCK(bp);
2853 		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2854 			BPFD_LOCK(bd);
2855 			counter_u64_zero(bd->bd_rcount);
2856 			counter_u64_zero(bd->bd_dcount);
2857 			counter_u64_zero(bd->bd_fcount);
2858 			counter_u64_zero(bd->bd_wcount);
2859 			counter_u64_zero(bd->bd_wfcount);
2860 			counter_u64_zero(bd->bd_zcopy);
2861 			BPFD_UNLOCK(bd);
2862 		}
2863 		BPFIF_RUNLOCK(bp);
2864 	}
2865 	BPF_UNLOCK();
2866 }
2867 
2868 /*
2869  * Fill filter statistics
2870  */
2871 static void
2872 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2873 {
2874 
2875 	bzero(d, sizeof(*d));
2876 	BPFD_LOCK_ASSERT(bd);
2877 	d->bd_structsize = sizeof(*d);
2878 	/* XXX: reading should be protected by global lock */
2879 	d->bd_immediate = bd->bd_immediate;
2880 	d->bd_promisc = bd->bd_promisc;
2881 	d->bd_hdrcmplt = bd->bd_hdrcmplt;
2882 	d->bd_direction = bd->bd_direction;
2883 	d->bd_feedback = bd->bd_feedback;
2884 	d->bd_async = bd->bd_async;
2885 	d->bd_rcount = counter_u64_fetch(bd->bd_rcount);
2886 	d->bd_dcount = counter_u64_fetch(bd->bd_dcount);
2887 	d->bd_fcount = counter_u64_fetch(bd->bd_fcount);
2888 	d->bd_sig = bd->bd_sig;
2889 	d->bd_slen = bd->bd_slen;
2890 	d->bd_hlen = bd->bd_hlen;
2891 	d->bd_bufsize = bd->bd_bufsize;
2892 	d->bd_pid = bd->bd_pid;
2893 	strlcpy(d->bd_ifname,
2894 	    bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2895 	d->bd_locked = bd->bd_locked;
2896 	d->bd_wcount = counter_u64_fetch(bd->bd_wcount);
2897 	d->bd_wdcount = counter_u64_fetch(bd->bd_wdcount);
2898 	d->bd_wfcount = counter_u64_fetch(bd->bd_wfcount);
2899 	d->bd_zcopy = counter_u64_fetch(bd->bd_zcopy);
2900 	d->bd_bufmode = bd->bd_bufmode;
2901 }
2902 
2903 /*
2904  * Handle `netstat -B' stats request
2905  */
2906 static int
2907 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2908 {
2909 	static const struct xbpf_d zerostats;
2910 	struct xbpf_d *xbdbuf, *xbd, tempstats;
2911 	int index, error;
2912 	struct bpf_if *bp;
2913 	struct bpf_d *bd;
2914 
2915 	/*
2916 	 * XXX This is not technically correct. It is possible for non
2917 	 * privileged users to open bpf devices. It would make sense
2918 	 * if the users who opened the devices were able to retrieve
2919 	 * the statistics for them, too.
2920 	 */
2921 	error = priv_check(req->td, PRIV_NET_BPF);
2922 	if (error)
2923 		return (error);
2924 	/*
2925 	 * Check to see if the user is requesting that the counters be
2926 	 * zeroed out.  Explicitly check that the supplied data is zeroed,
2927 	 * as we aren't allowing the user to set the counters currently.
2928 	 */
2929 	if (req->newptr != NULL) {
2930 		if (req->newlen != sizeof(tempstats))
2931 			return (EINVAL);
2932 		memset(&tempstats, 0, sizeof(tempstats));
2933 		error = SYSCTL_IN(req, &tempstats, sizeof(tempstats));
2934 		if (error)
2935 			return (error);
2936 		if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0)
2937 			return (EINVAL);
2938 		bpf_zero_counters();
2939 		return (0);
2940 	}
2941 	if (req->oldptr == NULL)
2942 		return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2943 	if (bpf_bpfd_cnt == 0)
2944 		return (SYSCTL_OUT(req, 0, 0));
2945 	xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2946 	BPF_LOCK();
2947 	if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2948 		BPF_UNLOCK();
2949 		free(xbdbuf, M_BPF);
2950 		return (ENOMEM);
2951 	}
2952 	index = 0;
2953 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2954 		BPFIF_RLOCK(bp);
2955 		/* Send writers-only first */
2956 		LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2957 			xbd = &xbdbuf[index++];
2958 			BPFD_LOCK(bd);
2959 			bpfstats_fill_xbpf(xbd, bd);
2960 			BPFD_UNLOCK(bd);
2961 		}
2962 		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2963 			xbd = &xbdbuf[index++];
2964 			BPFD_LOCK(bd);
2965 			bpfstats_fill_xbpf(xbd, bd);
2966 			BPFD_UNLOCK(bd);
2967 		}
2968 		BPFIF_RUNLOCK(bp);
2969 	}
2970 	BPF_UNLOCK();
2971 	error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2972 	free(xbdbuf, M_BPF);
2973 	return (error);
2974 }
2975 
2976 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2977 
2978 #else /* !DEV_BPF && !NETGRAPH_BPF */
2979 /*
2980  * NOP stubs to allow bpf-using drivers to load and function.
2981  *
2982  * A 'better' implementation would allow the core bpf functionality
2983  * to be loaded at runtime.
2984  */
2985 static struct bpf_if bp_null;
2986 
2987 void
2988 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2989 {
2990 }
2991 
2992 void
2993 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2994 {
2995 }
2996 
2997 void
2998 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2999 {
3000 }
3001 
3002 void
3003 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
3004 {
3005 
3006 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
3007 }
3008 
3009 void
3010 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
3011 {
3012 
3013 	*driverp = &bp_null;
3014 }
3015 
3016 void
3017 bpfdetach(struct ifnet *ifp)
3018 {
3019 }
3020 
3021 u_int
3022 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
3023 {
3024 	return -1;	/* "no filter" behaviour */
3025 }
3026 
3027 int
3028 bpf_validate(const struct bpf_insn *f, int len)
3029 {
3030 	return 0;		/* false */
3031 }
3032 
3033 #endif /* !DEV_BPF && !NETGRAPH_BPF */
3034 
3035 #ifdef DDB
3036 static void
3037 bpf_show_bpf_if(struct bpf_if *bpf_if)
3038 {
3039 
3040 	if (bpf_if == NULL)
3041 		return;
3042 	db_printf("%p:\n", bpf_if);
3043 #define	BPF_DB_PRINTF(f, e)	db_printf("   %s = " f "\n", #e, bpf_if->e);
3044 	/* bif_ext.bif_next */
3045 	/* bif_ext.bif_dlist */
3046 	BPF_DB_PRINTF("%#x", bif_dlt);
3047 	BPF_DB_PRINTF("%u", bif_hdrlen);
3048 	BPF_DB_PRINTF("%p", bif_ifp);
3049 	/* bif_lock */
3050 	/* bif_wlist */
3051 	BPF_DB_PRINTF("%#x", bif_flags);
3052 }
3053 
3054 DB_SHOW_COMMAND(bpf_if, db_show_bpf_if)
3055 {
3056 
3057 	if (!have_addr) {
3058 		db_printf("usage: show bpf_if <struct bpf_if *>\n");
3059 		return;
3060 	}
3061 
3062 	bpf_show_bpf_if((struct bpf_if *)addr);
3063 }
3064 #endif
3065