xref: /freebsd/sys/net/bpf.c (revision 4ce386ff25d77954b8cfa11534f632172e848244)
1 /*-
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_bpf.h"
41 #include "opt_compat.h"
42 #include "opt_netgraph.h"
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/lock.h>
47 #include <sys/rwlock.h>
48 #include <sys/systm.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/jail.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/time.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/signalvar.h>
58 #include <sys/filio.h>
59 #include <sys/sockio.h>
60 #include <sys/ttycom.h>
61 #include <sys/uio.h>
62 
63 #include <sys/event.h>
64 #include <sys/file.h>
65 #include <sys/poll.h>
66 #include <sys/proc.h>
67 
68 #include <sys/socket.h>
69 
70 #include <net/if.h>
71 #include <net/if_var.h>
72 #define	BPF_INTERNAL
73 #include <net/bpf.h>
74 #include <net/bpf_buffer.h>
75 #ifdef BPF_JITTER
76 #include <net/bpf_jitter.h>
77 #endif
78 #include <net/bpf_zerocopy.h>
79 #include <net/bpfdesc.h>
80 #include <net/vnet.h>
81 
82 #include <netinet/in.h>
83 #include <netinet/if_ether.h>
84 #include <sys/kernel.h>
85 #include <sys/sysctl.h>
86 
87 #include <net80211/ieee80211_freebsd.h>
88 
89 #include <security/mac/mac_framework.h>
90 
91 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
92 
93 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
94 
95 #define PRINET  26			/* interruptible */
96 
97 #define	SIZEOF_BPF_HDR(type)	\
98     (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
99 
100 #ifdef COMPAT_FREEBSD32
101 #include <sys/mount.h>
102 #include <compat/freebsd32/freebsd32.h>
103 #define BPF_ALIGNMENT32 sizeof(int32_t)
104 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
105 
106 #ifndef BURN_BRIDGES
107 /*
108  * 32-bit version of structure prepended to each packet.  We use this header
109  * instead of the standard one for 32-bit streams.  We mark the a stream as
110  * 32-bit the first time we see a 32-bit compat ioctl request.
111  */
112 struct bpf_hdr32 {
113 	struct timeval32 bh_tstamp;	/* time stamp */
114 	uint32_t	bh_caplen;	/* length of captured portion */
115 	uint32_t	bh_datalen;	/* original length of packet */
116 	uint16_t	bh_hdrlen;	/* length of bpf header (this struct
117 					   plus alignment padding) */
118 };
119 #endif
120 
121 struct bpf_program32 {
122 	u_int bf_len;
123 	uint32_t bf_insns;
124 };
125 
126 struct bpf_dltlist32 {
127 	u_int	bfl_len;
128 	u_int	bfl_list;
129 };
130 
131 #define	BIOCSETF32	_IOW('B', 103, struct bpf_program32)
132 #define	BIOCSRTIMEOUT32	_IOW('B', 109, struct timeval32)
133 #define	BIOCGRTIMEOUT32	_IOR('B', 110, struct timeval32)
134 #define	BIOCGDLTLIST32	_IOWR('B', 121, struct bpf_dltlist32)
135 #define	BIOCSETWF32	_IOW('B', 123, struct bpf_program32)
136 #define	BIOCSETFNR32	_IOW('B', 130, struct bpf_program32)
137 #endif
138 
139 /*
140  * bpf_iflist is a list of BPF interface structures, each corresponding to a
141  * specific DLT.  The same network interface might have several BPF interface
142  * structures registered by different layers in the stack (i.e., 802.11
143  * frames, ethernet frames, etc).
144  */
145 static LIST_HEAD(, bpf_if)	bpf_iflist, bpf_freelist;
146 static struct mtx	bpf_mtx;		/* bpf global lock */
147 static int		bpf_bpfd_cnt;
148 
149 static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
150 static void	bpf_detachd(struct bpf_d *);
151 static void	bpf_detachd_locked(struct bpf_d *);
152 static void	bpf_freed(struct bpf_d *);
153 static int	bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
154 		    struct sockaddr *, int *, struct bpf_insn *);
155 static int	bpf_setif(struct bpf_d *, struct ifreq *);
156 static void	bpf_timed_out(void *);
157 static __inline void
158 		bpf_wakeup(struct bpf_d *);
159 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
160 		    void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
161 		    struct bintime *);
162 static void	reset_d(struct bpf_d *);
163 static int	bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
164 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
165 static int	bpf_setdlt(struct bpf_d *, u_int);
166 static void	filt_bpfdetach(struct knote *);
167 static int	filt_bpfread(struct knote *, long);
168 static void	bpf_drvinit(void *);
169 static int	bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
170 
171 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
172 int bpf_maxinsns = BPF_MAXINSNS;
173 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
174     &bpf_maxinsns, 0, "Maximum bpf program instructions");
175 static int bpf_zerocopy_enable = 0;
176 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
177     &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
178 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
179     bpf_stats_sysctl, "bpf statistics portal");
180 
181 static VNET_DEFINE(int, bpf_optimize_writers) = 0;
182 #define	V_bpf_optimize_writers VNET(bpf_optimize_writers)
183 SYSCTL_INT(_net_bpf, OID_AUTO, optimize_writers, CTLFLAG_VNET | CTLFLAG_RW,
184     &VNET_NAME(bpf_optimize_writers), 0,
185     "Do not send packets until BPF program is set");
186 
187 static	d_open_t	bpfopen;
188 static	d_read_t	bpfread;
189 static	d_write_t	bpfwrite;
190 static	d_ioctl_t	bpfioctl;
191 static	d_poll_t	bpfpoll;
192 static	d_kqfilter_t	bpfkqfilter;
193 
194 static struct cdevsw bpf_cdevsw = {
195 	.d_version =	D_VERSION,
196 	.d_open =	bpfopen,
197 	.d_read =	bpfread,
198 	.d_write =	bpfwrite,
199 	.d_ioctl =	bpfioctl,
200 	.d_poll =	bpfpoll,
201 	.d_name =	"bpf",
202 	.d_kqfilter =	bpfkqfilter,
203 };
204 
205 static struct filterops bpfread_filtops = {
206 	.f_isfd = 1,
207 	.f_detach = filt_bpfdetach,
208 	.f_event = filt_bpfread,
209 };
210 
211 eventhandler_tag	bpf_ifdetach_cookie = NULL;
212 
213 /*
214  * LOCKING MODEL USED BY BPF:
215  * Locks:
216  * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
217  * some global counters and every bpf_if reference.
218  * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
219  * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
220  *   used by bpf_mtap code.
221  *
222  * Lock order:
223  *
224  * Global lock, interface lock, descriptor lock
225  *
226  * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
227  * working model. In many places (like bpf_detachd) we start with BPF descriptor
228  * (and we need to at least rlock it to get reliable interface pointer). This
229  * gives us potential LOR. As a result, we use global lock to protect from bpf_if
230  * change in every such place.
231  *
232  * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
233  * 3) descriptor main wlock.
234  * Reading bd_bif can be protected by any of these locks, typically global lock.
235  *
236  * Changing read/write BPF filter is protected by the same three locks,
237  * the same applies for reading.
238  *
239  * Sleeping in global lock is not allowed due to bpfdetach() using it.
240  */
241 
242 /*
243  * Wrapper functions for various buffering methods.  If the set of buffer
244  * modes expands, we will probably want to introduce a switch data structure
245  * similar to protosw, et.
246  */
247 static void
248 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
249     u_int len)
250 {
251 
252 	BPFD_LOCK_ASSERT(d);
253 
254 	switch (d->bd_bufmode) {
255 	case BPF_BUFMODE_BUFFER:
256 		return (bpf_buffer_append_bytes(d, buf, offset, src, len));
257 
258 	case BPF_BUFMODE_ZBUF:
259 		d->bd_zcopy++;
260 		return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
261 
262 	default:
263 		panic("bpf_buf_append_bytes");
264 	}
265 }
266 
267 static void
268 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
269     u_int len)
270 {
271 
272 	BPFD_LOCK_ASSERT(d);
273 
274 	switch (d->bd_bufmode) {
275 	case BPF_BUFMODE_BUFFER:
276 		return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
277 
278 	case BPF_BUFMODE_ZBUF:
279 		d->bd_zcopy++;
280 		return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
281 
282 	default:
283 		panic("bpf_buf_append_mbuf");
284 	}
285 }
286 
287 /*
288  * This function gets called when the free buffer is re-assigned.
289  */
290 static void
291 bpf_buf_reclaimed(struct bpf_d *d)
292 {
293 
294 	BPFD_LOCK_ASSERT(d);
295 
296 	switch (d->bd_bufmode) {
297 	case BPF_BUFMODE_BUFFER:
298 		return;
299 
300 	case BPF_BUFMODE_ZBUF:
301 		bpf_zerocopy_buf_reclaimed(d);
302 		return;
303 
304 	default:
305 		panic("bpf_buf_reclaimed");
306 	}
307 }
308 
309 /*
310  * If the buffer mechanism has a way to decide that a held buffer can be made
311  * free, then it is exposed via the bpf_canfreebuf() interface.  (1) is
312  * returned if the buffer can be discarded, (0) is returned if it cannot.
313  */
314 static int
315 bpf_canfreebuf(struct bpf_d *d)
316 {
317 
318 	BPFD_LOCK_ASSERT(d);
319 
320 	switch (d->bd_bufmode) {
321 	case BPF_BUFMODE_ZBUF:
322 		return (bpf_zerocopy_canfreebuf(d));
323 	}
324 	return (0);
325 }
326 
327 /*
328  * Allow the buffer model to indicate that the current store buffer is
329  * immutable, regardless of the appearance of space.  Return (1) if the
330  * buffer is writable, and (0) if not.
331  */
332 static int
333 bpf_canwritebuf(struct bpf_d *d)
334 {
335 	BPFD_LOCK_ASSERT(d);
336 
337 	switch (d->bd_bufmode) {
338 	case BPF_BUFMODE_ZBUF:
339 		return (bpf_zerocopy_canwritebuf(d));
340 	}
341 	return (1);
342 }
343 
344 /*
345  * Notify buffer model that an attempt to write to the store buffer has
346  * resulted in a dropped packet, in which case the buffer may be considered
347  * full.
348  */
349 static void
350 bpf_buffull(struct bpf_d *d)
351 {
352 
353 	BPFD_LOCK_ASSERT(d);
354 
355 	switch (d->bd_bufmode) {
356 	case BPF_BUFMODE_ZBUF:
357 		bpf_zerocopy_buffull(d);
358 		break;
359 	}
360 }
361 
362 /*
363  * Notify the buffer model that a buffer has moved into the hold position.
364  */
365 void
366 bpf_bufheld(struct bpf_d *d)
367 {
368 
369 	BPFD_LOCK_ASSERT(d);
370 
371 	switch (d->bd_bufmode) {
372 	case BPF_BUFMODE_ZBUF:
373 		bpf_zerocopy_bufheld(d);
374 		break;
375 	}
376 }
377 
378 static void
379 bpf_free(struct bpf_d *d)
380 {
381 
382 	switch (d->bd_bufmode) {
383 	case BPF_BUFMODE_BUFFER:
384 		return (bpf_buffer_free(d));
385 
386 	case BPF_BUFMODE_ZBUF:
387 		return (bpf_zerocopy_free(d));
388 
389 	default:
390 		panic("bpf_buf_free");
391 	}
392 }
393 
394 static int
395 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
396 {
397 
398 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
399 		return (EOPNOTSUPP);
400 	return (bpf_buffer_uiomove(d, buf, len, uio));
401 }
402 
403 static int
404 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
405 {
406 
407 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
408 		return (EOPNOTSUPP);
409 	return (bpf_buffer_ioctl_sblen(d, i));
410 }
411 
412 static int
413 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
414 {
415 
416 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
417 		return (EOPNOTSUPP);
418 	return (bpf_zerocopy_ioctl_getzmax(td, d, i));
419 }
420 
421 static int
422 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
423 {
424 
425 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
426 		return (EOPNOTSUPP);
427 	return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
428 }
429 
430 static int
431 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
432 {
433 
434 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
435 		return (EOPNOTSUPP);
436 	return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
437 }
438 
439 /*
440  * General BPF functions.
441  */
442 static int
443 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
444     struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
445 {
446 	const struct ieee80211_bpf_params *p;
447 	struct ether_header *eh;
448 	struct mbuf *m;
449 	int error;
450 	int len;
451 	int hlen;
452 	int slen;
453 
454 	/*
455 	 * Build a sockaddr based on the data link layer type.
456 	 * We do this at this level because the ethernet header
457 	 * is copied directly into the data field of the sockaddr.
458 	 * In the case of SLIP, there is no header and the packet
459 	 * is forwarded as is.
460 	 * Also, we are careful to leave room at the front of the mbuf
461 	 * for the link level header.
462 	 */
463 	switch (linktype) {
464 
465 	case DLT_SLIP:
466 		sockp->sa_family = AF_INET;
467 		hlen = 0;
468 		break;
469 
470 	case DLT_EN10MB:
471 		sockp->sa_family = AF_UNSPEC;
472 		/* XXX Would MAXLINKHDR be better? */
473 		hlen = ETHER_HDR_LEN;
474 		break;
475 
476 	case DLT_FDDI:
477 		sockp->sa_family = AF_IMPLINK;
478 		hlen = 0;
479 		break;
480 
481 	case DLT_RAW:
482 		sockp->sa_family = AF_UNSPEC;
483 		hlen = 0;
484 		break;
485 
486 	case DLT_NULL:
487 		/*
488 		 * null interface types require a 4 byte pseudo header which
489 		 * corresponds to the address family of the packet.
490 		 */
491 		sockp->sa_family = AF_UNSPEC;
492 		hlen = 4;
493 		break;
494 
495 	case DLT_ATM_RFC1483:
496 		/*
497 		 * en atm driver requires 4-byte atm pseudo header.
498 		 * though it isn't standard, vpi:vci needs to be
499 		 * specified anyway.
500 		 */
501 		sockp->sa_family = AF_UNSPEC;
502 		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
503 		break;
504 
505 	case DLT_PPP:
506 		sockp->sa_family = AF_UNSPEC;
507 		hlen = 4;	/* This should match PPP_HDRLEN */
508 		break;
509 
510 	case DLT_IEEE802_11:		/* IEEE 802.11 wireless */
511 		sockp->sa_family = AF_IEEE80211;
512 		hlen = 0;
513 		break;
514 
515 	case DLT_IEEE802_11_RADIO:	/* IEEE 802.11 wireless w/ phy params */
516 		sockp->sa_family = AF_IEEE80211;
517 		sockp->sa_len = 12;	/* XXX != 0 */
518 		hlen = sizeof(struct ieee80211_bpf_params);
519 		break;
520 
521 	default:
522 		return (EIO);
523 	}
524 
525 	len = uio->uio_resid;
526 	if (len < hlen || len - hlen > ifp->if_mtu)
527 		return (EMSGSIZE);
528 
529 	m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
530 	if (m == NULL)
531 		return (EIO);
532 	m->m_pkthdr.len = m->m_len = len;
533 	*mp = m;
534 
535 	error = uiomove(mtod(m, u_char *), len, uio);
536 	if (error)
537 		goto bad;
538 
539 	slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
540 	if (slen == 0) {
541 		error = EPERM;
542 		goto bad;
543 	}
544 
545 	/* Check for multicast destination */
546 	switch (linktype) {
547 	case DLT_EN10MB:
548 		eh = mtod(m, struct ether_header *);
549 		if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
550 			if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
551 			    ETHER_ADDR_LEN) == 0)
552 				m->m_flags |= M_BCAST;
553 			else
554 				m->m_flags |= M_MCAST;
555 		}
556 		break;
557 	}
558 
559 	/*
560 	 * Make room for link header, and copy it to sockaddr
561 	 */
562 	if (hlen != 0) {
563 		if (sockp->sa_family == AF_IEEE80211) {
564 			/*
565 			 * Collect true length from the parameter header
566 			 * NB: sockp is known to be zero'd so if we do a
567 			 *     short copy unspecified parameters will be
568 			 *     zero.
569 			 * NB: packet may not be aligned after stripping
570 			 *     bpf params
571 			 * XXX check ibp_vers
572 			 */
573 			p = mtod(m, const struct ieee80211_bpf_params *);
574 			hlen = p->ibp_len;
575 			if (hlen > sizeof(sockp->sa_data)) {
576 				error = EINVAL;
577 				goto bad;
578 			}
579 		}
580 		bcopy(mtod(m, const void *), sockp->sa_data, hlen);
581 	}
582 	*hdrlen = hlen;
583 
584 	return (0);
585 bad:
586 	m_freem(m);
587 	return (error);
588 }
589 
590 /*
591  * Attach file to the bpf interface, i.e. make d listen on bp.
592  */
593 static void
594 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
595 {
596 	int op_w;
597 
598 	BPF_LOCK_ASSERT();
599 
600 	/*
601 	 * Save sysctl value to protect from sysctl change
602 	 * between reads
603 	 */
604 	op_w = V_bpf_optimize_writers;
605 
606 	if (d->bd_bif != NULL)
607 		bpf_detachd_locked(d);
608 	/*
609 	 * Point d at bp, and add d to the interface's list.
610 	 * Since there are many applicaiotns using BPF for
611 	 * sending raw packets only (dhcpd, cdpd are good examples)
612 	 * we can delay adding d to the list of active listeners until
613 	 * some filter is configured.
614 	 */
615 
616 	BPFIF_WLOCK(bp);
617 	BPFD_LOCK(d);
618 
619 	d->bd_bif = bp;
620 
621 	if (op_w != 0) {
622 		/* Add to writers-only list */
623 		LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
624 		/*
625 		 * We decrement bd_writer on every filter set operation.
626 		 * First BIOCSETF is done by pcap_open_live() to set up
627 		 * snap length. After that appliation usually sets its own filter
628 		 */
629 		d->bd_writer = 2;
630 	} else
631 		LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
632 
633 	BPFD_UNLOCK(d);
634 	BPFIF_WUNLOCK(bp);
635 
636 	bpf_bpfd_cnt++;
637 
638 	CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
639 	    __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
640 
641 	if (op_w == 0)
642 		EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
643 }
644 
645 /*
646  * Check if we need to upgrade our descriptor @d from write-only mode.
647  */
648 static int
649 bpf_check_upgrade(u_long cmd, struct bpf_d *d, struct bpf_insn *fcode, int flen)
650 {
651 	int is_snap, need_upgrade;
652 
653 	/*
654 	 * Check if we've already upgraded or new filter is empty.
655 	 */
656 	if (d->bd_writer == 0 || fcode == NULL)
657 		return (0);
658 
659 	need_upgrade = 0;
660 
661 	/*
662 	 * Check if cmd looks like snaplen setting from
663 	 * pcap_bpf.c:pcap_open_live().
664 	 * Note we're not checking .k value here:
665 	 * while pcap_open_live() definitely sets to to non-zero value,
666 	 * we'd prefer to treat k=0 (deny ALL) case the same way: e.g.
667 	 * do not consider upgrading immediately
668 	 */
669 	if (cmd == BIOCSETF && flen == 1 && fcode[0].code == (BPF_RET | BPF_K))
670 		is_snap = 1;
671 	else
672 		is_snap = 0;
673 
674 	if (is_snap == 0) {
675 		/*
676 		 * We're setting first filter and it doesn't look like
677 		 * setting snaplen.  We're probably using bpf directly.
678 		 * Upgrade immediately.
679 		 */
680 		need_upgrade = 1;
681 	} else {
682 		/*
683 		 * Do not require upgrade by first BIOCSETF
684 		 * (used to set snaplen) by pcap_open_live().
685 		 */
686 
687 		if (--d->bd_writer == 0) {
688 			/*
689 			 * First snaplen filter has already
690 			 * been set. This is probably catch-all
691 			 * filter
692 			 */
693 			need_upgrade = 1;
694 		}
695 	}
696 
697 	CTR5(KTR_NET,
698 	    "%s: filter function set by pid %d, "
699 	    "bd_writer counter %d, snap %d upgrade %d",
700 	    __func__, d->bd_pid, d->bd_writer,
701 	    is_snap, need_upgrade);
702 
703 	return (need_upgrade);
704 }
705 
706 /*
707  * Add d to the list of active bp filters.
708  * Reuqires bpf_attachd() to be called before
709  */
710 static void
711 bpf_upgraded(struct bpf_d *d)
712 {
713 	struct bpf_if *bp;
714 
715 	BPF_LOCK_ASSERT();
716 
717 	bp = d->bd_bif;
718 
719 	/*
720 	 * Filter can be set several times without specifying interface.
721 	 * Mark d as reader and exit.
722 	 */
723 	if (bp == NULL) {
724 		BPFD_LOCK(d);
725 		d->bd_writer = 0;
726 		BPFD_UNLOCK(d);
727 		return;
728 	}
729 
730 	BPFIF_WLOCK(bp);
731 	BPFD_LOCK(d);
732 
733 	/* Remove from writers-only list */
734 	LIST_REMOVE(d, bd_next);
735 	LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
736 	/* Mark d as reader */
737 	d->bd_writer = 0;
738 
739 	BPFD_UNLOCK(d);
740 	BPFIF_WUNLOCK(bp);
741 
742 	CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
743 
744 	EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
745 }
746 
747 /*
748  * Detach a file from its interface.
749  */
750 static void
751 bpf_detachd(struct bpf_d *d)
752 {
753 	BPF_LOCK();
754 	bpf_detachd_locked(d);
755 	BPF_UNLOCK();
756 }
757 
758 static void
759 bpf_detachd_locked(struct bpf_d *d)
760 {
761 	int error;
762 	struct bpf_if *bp;
763 	struct ifnet *ifp;
764 
765 	CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
766 
767 	BPF_LOCK_ASSERT();
768 
769 	/* Check if descriptor is attached */
770 	if ((bp = d->bd_bif) == NULL)
771 		return;
772 
773 	BPFIF_WLOCK(bp);
774 	BPFD_LOCK(d);
775 
776 	/* Save bd_writer value */
777 	error = d->bd_writer;
778 
779 	/*
780 	 * Remove d from the interface's descriptor list.
781 	 */
782 	LIST_REMOVE(d, bd_next);
783 
784 	ifp = bp->bif_ifp;
785 	d->bd_bif = NULL;
786 	BPFD_UNLOCK(d);
787 	BPFIF_WUNLOCK(bp);
788 
789 	bpf_bpfd_cnt--;
790 
791 	/* Call event handler iff d is attached */
792 	if (error == 0)
793 		EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
794 
795 	/*
796 	 * Check if this descriptor had requested promiscuous mode.
797 	 * If so, turn it off.
798 	 */
799 	if (d->bd_promisc) {
800 		d->bd_promisc = 0;
801 		CURVNET_SET(ifp->if_vnet);
802 		error = ifpromisc(ifp, 0);
803 		CURVNET_RESTORE();
804 		if (error != 0 && error != ENXIO) {
805 			/*
806 			 * ENXIO can happen if a pccard is unplugged
807 			 * Something is really wrong if we were able to put
808 			 * the driver into promiscuous mode, but can't
809 			 * take it out.
810 			 */
811 			if_printf(bp->bif_ifp,
812 				"bpf_detach: ifpromisc failed (%d)\n", error);
813 		}
814 	}
815 }
816 
817 /*
818  * Close the descriptor by detaching it from its interface,
819  * deallocating its buffers, and marking it free.
820  */
821 static void
822 bpf_dtor(void *data)
823 {
824 	struct bpf_d *d = data;
825 
826 	BPFD_LOCK(d);
827 	if (d->bd_state == BPF_WAITING)
828 		callout_stop(&d->bd_callout);
829 	d->bd_state = BPF_IDLE;
830 	BPFD_UNLOCK(d);
831 	funsetown(&d->bd_sigio);
832 	bpf_detachd(d);
833 #ifdef MAC
834 	mac_bpfdesc_destroy(d);
835 #endif /* MAC */
836 	seldrain(&d->bd_sel);
837 	knlist_destroy(&d->bd_sel.si_note);
838 	callout_drain(&d->bd_callout);
839 	bpf_freed(d);
840 	free(d, M_BPF);
841 }
842 
843 /*
844  * Open ethernet device.  Returns ENXIO for illegal minor device number,
845  * EBUSY if file is open by another process.
846  */
847 /* ARGSUSED */
848 static	int
849 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
850 {
851 	struct bpf_d *d;
852 	int error, size;
853 
854 	d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
855 	error = devfs_set_cdevpriv(d, bpf_dtor);
856 	if (error != 0) {
857 		free(d, M_BPF);
858 		return (error);
859 	}
860 
861 	/*
862 	 * For historical reasons, perform a one-time initialization call to
863 	 * the buffer routines, even though we're not yet committed to a
864 	 * particular buffer method.
865 	 */
866 	bpf_buffer_init(d);
867 	d->bd_hbuf_in_use = 0;
868 	d->bd_bufmode = BPF_BUFMODE_BUFFER;
869 	d->bd_sig = SIGIO;
870 	d->bd_direction = BPF_D_INOUT;
871 	BPF_PID_REFRESH(d, td);
872 #ifdef MAC
873 	mac_bpfdesc_init(d);
874 	mac_bpfdesc_create(td->td_ucred, d);
875 #endif
876 	mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
877 	callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
878 	knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
879 
880 	/* Allocate default buffers */
881 	size = d->bd_bufsize;
882 	bpf_buffer_ioctl_sblen(d, &size);
883 
884 	return (0);
885 }
886 
887 /*
888  *  bpfread - read next chunk of packets from buffers
889  */
890 static	int
891 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
892 {
893 	struct bpf_d *d;
894 	int error;
895 	int non_block;
896 	int timed_out;
897 
898 	error = devfs_get_cdevpriv((void **)&d);
899 	if (error != 0)
900 		return (error);
901 
902 	/*
903 	 * Restrict application to use a buffer the same size as
904 	 * as kernel buffers.
905 	 */
906 	if (uio->uio_resid != d->bd_bufsize)
907 		return (EINVAL);
908 
909 	non_block = ((ioflag & O_NONBLOCK) != 0);
910 
911 	BPFD_LOCK(d);
912 	BPF_PID_REFRESH_CUR(d);
913 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
914 		BPFD_UNLOCK(d);
915 		return (EOPNOTSUPP);
916 	}
917 	if (d->bd_state == BPF_WAITING)
918 		callout_stop(&d->bd_callout);
919 	timed_out = (d->bd_state == BPF_TIMED_OUT);
920 	d->bd_state = BPF_IDLE;
921 	while (d->bd_hbuf_in_use) {
922 		error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
923 		    PRINET|PCATCH, "bd_hbuf", 0);
924 		if (error != 0) {
925 			BPFD_UNLOCK(d);
926 			return (error);
927 		}
928 	}
929 	/*
930 	 * If the hold buffer is empty, then do a timed sleep, which
931 	 * ends when the timeout expires or when enough packets
932 	 * have arrived to fill the store buffer.
933 	 */
934 	while (d->bd_hbuf == NULL) {
935 		if (d->bd_slen != 0) {
936 			/*
937 			 * A packet(s) either arrived since the previous
938 			 * read or arrived while we were asleep.
939 			 */
940 			if (d->bd_immediate || non_block || timed_out) {
941 				/*
942 				 * Rotate the buffers and return what's here
943 				 * if we are in immediate mode, non-blocking
944 				 * flag is set, or this descriptor timed out.
945 				 */
946 				ROTATE_BUFFERS(d);
947 				break;
948 			}
949 		}
950 
951 		/*
952 		 * No data is available, check to see if the bpf device
953 		 * is still pointed at a real interface.  If not, return
954 		 * ENXIO so that the userland process knows to rebind
955 		 * it before using it again.
956 		 */
957 		if (d->bd_bif == NULL) {
958 			BPFD_UNLOCK(d);
959 			return (ENXIO);
960 		}
961 
962 		if (non_block) {
963 			BPFD_UNLOCK(d);
964 			return (EWOULDBLOCK);
965 		}
966 		error = msleep(d, &d->bd_lock, PRINET|PCATCH,
967 		     "bpf", d->bd_rtout);
968 		if (error == EINTR || error == ERESTART) {
969 			BPFD_UNLOCK(d);
970 			return (error);
971 		}
972 		if (error == EWOULDBLOCK) {
973 			/*
974 			 * On a timeout, return what's in the buffer,
975 			 * which may be nothing.  If there is something
976 			 * in the store buffer, we can rotate the buffers.
977 			 */
978 			if (d->bd_hbuf)
979 				/*
980 				 * We filled up the buffer in between
981 				 * getting the timeout and arriving
982 				 * here, so we don't need to rotate.
983 				 */
984 				break;
985 
986 			if (d->bd_slen == 0) {
987 				BPFD_UNLOCK(d);
988 				return (0);
989 			}
990 			ROTATE_BUFFERS(d);
991 			break;
992 		}
993 	}
994 	/*
995 	 * At this point, we know we have something in the hold slot.
996 	 */
997 	d->bd_hbuf_in_use = 1;
998 	BPFD_UNLOCK(d);
999 
1000 	/*
1001 	 * Move data from hold buffer into user space.
1002 	 * We know the entire buffer is transferred since
1003 	 * we checked above that the read buffer is bpf_bufsize bytes.
1004   	 *
1005 	 * We do not have to worry about simultaneous reads because
1006 	 * we waited for sole access to the hold buffer above.
1007 	 */
1008 	error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
1009 
1010 	BPFD_LOCK(d);
1011 	KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
1012 	d->bd_fbuf = d->bd_hbuf;
1013 	d->bd_hbuf = NULL;
1014 	d->bd_hlen = 0;
1015 	bpf_buf_reclaimed(d);
1016 	d->bd_hbuf_in_use = 0;
1017 	wakeup(&d->bd_hbuf_in_use);
1018 	BPFD_UNLOCK(d);
1019 
1020 	return (error);
1021 }
1022 
1023 /*
1024  * If there are processes sleeping on this descriptor, wake them up.
1025  */
1026 static __inline void
1027 bpf_wakeup(struct bpf_d *d)
1028 {
1029 
1030 	BPFD_LOCK_ASSERT(d);
1031 	if (d->bd_state == BPF_WAITING) {
1032 		callout_stop(&d->bd_callout);
1033 		d->bd_state = BPF_IDLE;
1034 	}
1035 	wakeup(d);
1036 	if (d->bd_async && d->bd_sig && d->bd_sigio)
1037 		pgsigio(&d->bd_sigio, d->bd_sig, 0);
1038 
1039 	selwakeuppri(&d->bd_sel, PRINET);
1040 	KNOTE_LOCKED(&d->bd_sel.si_note, 0);
1041 }
1042 
1043 static void
1044 bpf_timed_out(void *arg)
1045 {
1046 	struct bpf_d *d = (struct bpf_d *)arg;
1047 
1048 	BPFD_LOCK_ASSERT(d);
1049 
1050 	if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
1051 		return;
1052 	if (d->bd_state == BPF_WAITING) {
1053 		d->bd_state = BPF_TIMED_OUT;
1054 		if (d->bd_slen != 0)
1055 			bpf_wakeup(d);
1056 	}
1057 }
1058 
1059 static int
1060 bpf_ready(struct bpf_d *d)
1061 {
1062 
1063 	BPFD_LOCK_ASSERT(d);
1064 
1065 	if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1066 		return (1);
1067 	if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1068 	    d->bd_slen != 0)
1069 		return (1);
1070 	return (0);
1071 }
1072 
1073 static int
1074 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1075 {
1076 	struct bpf_d *d;
1077 	struct ifnet *ifp;
1078 	struct mbuf *m, *mc;
1079 	struct sockaddr dst;
1080 	int error, hlen;
1081 
1082 	error = devfs_get_cdevpriv((void **)&d);
1083 	if (error != 0)
1084 		return (error);
1085 
1086 	BPF_PID_REFRESH_CUR(d);
1087 	d->bd_wcount++;
1088 	/* XXX: locking required */
1089 	if (d->bd_bif == NULL) {
1090 		d->bd_wdcount++;
1091 		return (ENXIO);
1092 	}
1093 
1094 	ifp = d->bd_bif->bif_ifp;
1095 
1096 	if ((ifp->if_flags & IFF_UP) == 0) {
1097 		d->bd_wdcount++;
1098 		return (ENETDOWN);
1099 	}
1100 
1101 	if (uio->uio_resid == 0) {
1102 		d->bd_wdcount++;
1103 		return (0);
1104 	}
1105 
1106 	bzero(&dst, sizeof(dst));
1107 	m = NULL;
1108 	hlen = 0;
1109 	/* XXX: bpf_movein() can sleep */
1110 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1111 	    &m, &dst, &hlen, d->bd_wfilter);
1112 	if (error) {
1113 		d->bd_wdcount++;
1114 		return (error);
1115 	}
1116 	d->bd_wfcount++;
1117 	if (d->bd_hdrcmplt)
1118 		dst.sa_family = pseudo_AF_HDRCMPLT;
1119 
1120 	if (d->bd_feedback) {
1121 		mc = m_dup(m, M_NOWAIT);
1122 		if (mc != NULL)
1123 			mc->m_pkthdr.rcvif = ifp;
1124 		/* Set M_PROMISC for outgoing packets to be discarded. */
1125 		if (d->bd_direction == BPF_D_INOUT)
1126 			m->m_flags |= M_PROMISC;
1127 	} else
1128 		mc = NULL;
1129 
1130 	m->m_pkthdr.len -= hlen;
1131 	m->m_len -= hlen;
1132 	m->m_data += hlen;	/* XXX */
1133 
1134 	CURVNET_SET(ifp->if_vnet);
1135 #ifdef MAC
1136 	BPFD_LOCK(d);
1137 	mac_bpfdesc_create_mbuf(d, m);
1138 	if (mc != NULL)
1139 		mac_bpfdesc_create_mbuf(d, mc);
1140 	BPFD_UNLOCK(d);
1141 #endif
1142 
1143 	error = (*ifp->if_output)(ifp, m, &dst, NULL);
1144 	if (error)
1145 		d->bd_wdcount++;
1146 
1147 	if (mc != NULL) {
1148 		if (error == 0)
1149 			(*ifp->if_input)(ifp, mc);
1150 		else
1151 			m_freem(mc);
1152 	}
1153 	CURVNET_RESTORE();
1154 
1155 	return (error);
1156 }
1157 
1158 /*
1159  * Reset a descriptor by flushing its packet buffer and clearing the receive
1160  * and drop counts.  This is doable for kernel-only buffers, but with
1161  * zero-copy buffers, we can't write to (or rotate) buffers that are
1162  * currently owned by userspace.  It would be nice if we could encapsulate
1163  * this logic in the buffer code rather than here.
1164  */
1165 static void
1166 reset_d(struct bpf_d *d)
1167 {
1168 
1169 	BPFD_LOCK_ASSERT(d);
1170 
1171 	while (d->bd_hbuf_in_use)
1172 		mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1173 		    "bd_hbuf", 0);
1174 	if ((d->bd_hbuf != NULL) &&
1175 	    (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1176 		/* Free the hold buffer. */
1177 		d->bd_fbuf = d->bd_hbuf;
1178 		d->bd_hbuf = NULL;
1179 		d->bd_hlen = 0;
1180 		bpf_buf_reclaimed(d);
1181 	}
1182 	if (bpf_canwritebuf(d))
1183 		d->bd_slen = 0;
1184 	d->bd_rcount = 0;
1185 	d->bd_dcount = 0;
1186 	d->bd_fcount = 0;
1187 	d->bd_wcount = 0;
1188 	d->bd_wfcount = 0;
1189 	d->bd_wdcount = 0;
1190 	d->bd_zcopy = 0;
1191 }
1192 
1193 /*
1194  *  FIONREAD		Check for read packet available.
1195  *  BIOCGBLEN		Get buffer len [for read()].
1196  *  BIOCSETF		Set read filter.
1197  *  BIOCSETFNR		Set read filter without resetting descriptor.
1198  *  BIOCSETWF		Set write filter.
1199  *  BIOCFLUSH		Flush read packet buffer.
1200  *  BIOCPROMISC		Put interface into promiscuous mode.
1201  *  BIOCGDLT		Get link layer type.
1202  *  BIOCGETIF		Get interface name.
1203  *  BIOCSETIF		Set interface.
1204  *  BIOCSRTIMEOUT	Set read timeout.
1205  *  BIOCGRTIMEOUT	Get read timeout.
1206  *  BIOCGSTATS		Get packet stats.
1207  *  BIOCIMMEDIATE	Set immediate mode.
1208  *  BIOCVERSION		Get filter language version.
1209  *  BIOCGHDRCMPLT	Get "header already complete" flag
1210  *  BIOCSHDRCMPLT	Set "header already complete" flag
1211  *  BIOCGDIRECTION	Get packet direction flag
1212  *  BIOCSDIRECTION	Set packet direction flag
1213  *  BIOCGTSTAMP		Get time stamp format and resolution.
1214  *  BIOCSTSTAMP		Set time stamp format and resolution.
1215  *  BIOCLOCK		Set "locked" flag
1216  *  BIOCFEEDBACK	Set packet feedback mode.
1217  *  BIOCSETZBUF		Set current zero-copy buffer locations.
1218  *  BIOCGETZMAX		Get maximum zero-copy buffer size.
1219  *  BIOCROTZBUF		Force rotation of zero-copy buffer
1220  *  BIOCSETBUFMODE	Set buffer mode.
1221  *  BIOCGETBUFMODE	Get current buffer mode.
1222  */
1223 /* ARGSUSED */
1224 static	int
1225 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1226     struct thread *td)
1227 {
1228 	struct bpf_d *d;
1229 	int error;
1230 
1231 	error = devfs_get_cdevpriv((void **)&d);
1232 	if (error != 0)
1233 		return (error);
1234 
1235 	/*
1236 	 * Refresh PID associated with this descriptor.
1237 	 */
1238 	BPFD_LOCK(d);
1239 	BPF_PID_REFRESH(d, td);
1240 	if (d->bd_state == BPF_WAITING)
1241 		callout_stop(&d->bd_callout);
1242 	d->bd_state = BPF_IDLE;
1243 	BPFD_UNLOCK(d);
1244 
1245 	if (d->bd_locked == 1) {
1246 		switch (cmd) {
1247 		case BIOCGBLEN:
1248 		case BIOCFLUSH:
1249 		case BIOCGDLT:
1250 		case BIOCGDLTLIST:
1251 #ifdef COMPAT_FREEBSD32
1252 		case BIOCGDLTLIST32:
1253 #endif
1254 		case BIOCGETIF:
1255 		case BIOCGRTIMEOUT:
1256 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1257 		case BIOCGRTIMEOUT32:
1258 #endif
1259 		case BIOCGSTATS:
1260 		case BIOCVERSION:
1261 		case BIOCGRSIG:
1262 		case BIOCGHDRCMPLT:
1263 		case BIOCSTSTAMP:
1264 		case BIOCFEEDBACK:
1265 		case FIONREAD:
1266 		case BIOCLOCK:
1267 		case BIOCSRTIMEOUT:
1268 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1269 		case BIOCSRTIMEOUT32:
1270 #endif
1271 		case BIOCIMMEDIATE:
1272 		case TIOCGPGRP:
1273 		case BIOCROTZBUF:
1274 			break;
1275 		default:
1276 			return (EPERM);
1277 		}
1278 	}
1279 #ifdef COMPAT_FREEBSD32
1280 	/*
1281 	 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1282 	 * that it will get 32-bit packet headers.
1283 	 */
1284 	switch (cmd) {
1285 	case BIOCSETF32:
1286 	case BIOCSETFNR32:
1287 	case BIOCSETWF32:
1288 	case BIOCGDLTLIST32:
1289 	case BIOCGRTIMEOUT32:
1290 	case BIOCSRTIMEOUT32:
1291 		BPFD_LOCK(d);
1292 		d->bd_compat32 = 1;
1293 		BPFD_UNLOCK(d);
1294 	}
1295 #endif
1296 
1297 	CURVNET_SET(TD_TO_VNET(td));
1298 	switch (cmd) {
1299 
1300 	default:
1301 		error = EINVAL;
1302 		break;
1303 
1304 	/*
1305 	 * Check for read packet available.
1306 	 */
1307 	case FIONREAD:
1308 		{
1309 			int n;
1310 
1311 			BPFD_LOCK(d);
1312 			n = d->bd_slen;
1313 			while (d->bd_hbuf_in_use)
1314 				mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1315 				    PRINET, "bd_hbuf", 0);
1316 			if (d->bd_hbuf)
1317 				n += d->bd_hlen;
1318 			BPFD_UNLOCK(d);
1319 
1320 			*(int *)addr = n;
1321 			break;
1322 		}
1323 
1324 	/*
1325 	 * Get buffer len [for read()].
1326 	 */
1327 	case BIOCGBLEN:
1328 		BPFD_LOCK(d);
1329 		*(u_int *)addr = d->bd_bufsize;
1330 		BPFD_UNLOCK(d);
1331 		break;
1332 
1333 	/*
1334 	 * Set buffer length.
1335 	 */
1336 	case BIOCSBLEN:
1337 		error = bpf_ioctl_sblen(d, (u_int *)addr);
1338 		break;
1339 
1340 	/*
1341 	 * Set link layer read filter.
1342 	 */
1343 	case BIOCSETF:
1344 	case BIOCSETFNR:
1345 	case BIOCSETWF:
1346 #ifdef COMPAT_FREEBSD32
1347 	case BIOCSETF32:
1348 	case BIOCSETFNR32:
1349 	case BIOCSETWF32:
1350 #endif
1351 		error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1352 		break;
1353 
1354 	/*
1355 	 * Flush read packet buffer.
1356 	 */
1357 	case BIOCFLUSH:
1358 		BPFD_LOCK(d);
1359 		reset_d(d);
1360 		BPFD_UNLOCK(d);
1361 		break;
1362 
1363 	/*
1364 	 * Put interface into promiscuous mode.
1365 	 */
1366 	case BIOCPROMISC:
1367 		if (d->bd_bif == NULL) {
1368 			/*
1369 			 * No interface attached yet.
1370 			 */
1371 			error = EINVAL;
1372 			break;
1373 		}
1374 		if (d->bd_promisc == 0) {
1375 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
1376 			if (error == 0)
1377 				d->bd_promisc = 1;
1378 		}
1379 		break;
1380 
1381 	/*
1382 	 * Get current data link type.
1383 	 */
1384 	case BIOCGDLT:
1385 		BPF_LOCK();
1386 		if (d->bd_bif == NULL)
1387 			error = EINVAL;
1388 		else
1389 			*(u_int *)addr = d->bd_bif->bif_dlt;
1390 		BPF_UNLOCK();
1391 		break;
1392 
1393 	/*
1394 	 * Get a list of supported data link types.
1395 	 */
1396 #ifdef COMPAT_FREEBSD32
1397 	case BIOCGDLTLIST32:
1398 		{
1399 			struct bpf_dltlist32 *list32;
1400 			struct bpf_dltlist dltlist;
1401 
1402 			list32 = (struct bpf_dltlist32 *)addr;
1403 			dltlist.bfl_len = list32->bfl_len;
1404 			dltlist.bfl_list = PTRIN(list32->bfl_list);
1405 			BPF_LOCK();
1406 			if (d->bd_bif == NULL)
1407 				error = EINVAL;
1408 			else {
1409 				error = bpf_getdltlist(d, &dltlist);
1410 				if (error == 0)
1411 					list32->bfl_len = dltlist.bfl_len;
1412 			}
1413 			BPF_UNLOCK();
1414 			break;
1415 		}
1416 #endif
1417 
1418 	case BIOCGDLTLIST:
1419 		BPF_LOCK();
1420 		if (d->bd_bif == NULL)
1421 			error = EINVAL;
1422 		else
1423 			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1424 		BPF_UNLOCK();
1425 		break;
1426 
1427 	/*
1428 	 * Set data link type.
1429 	 */
1430 	case BIOCSDLT:
1431 		BPF_LOCK();
1432 		if (d->bd_bif == NULL)
1433 			error = EINVAL;
1434 		else
1435 			error = bpf_setdlt(d, *(u_int *)addr);
1436 		BPF_UNLOCK();
1437 		break;
1438 
1439 	/*
1440 	 * Get interface name.
1441 	 */
1442 	case BIOCGETIF:
1443 		BPF_LOCK();
1444 		if (d->bd_bif == NULL)
1445 			error = EINVAL;
1446 		else {
1447 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
1448 			struct ifreq *const ifr = (struct ifreq *)addr;
1449 
1450 			strlcpy(ifr->ifr_name, ifp->if_xname,
1451 			    sizeof(ifr->ifr_name));
1452 		}
1453 		BPF_UNLOCK();
1454 		break;
1455 
1456 	/*
1457 	 * Set interface.
1458 	 */
1459 	case BIOCSETIF:
1460 		BPF_LOCK();
1461 		error = bpf_setif(d, (struct ifreq *)addr);
1462 		BPF_UNLOCK();
1463 		break;
1464 
1465 	/*
1466 	 * Set read timeout.
1467 	 */
1468 	case BIOCSRTIMEOUT:
1469 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1470 	case BIOCSRTIMEOUT32:
1471 #endif
1472 		{
1473 			struct timeval *tv = (struct timeval *)addr;
1474 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1475 			struct timeval32 *tv32;
1476 			struct timeval tv64;
1477 
1478 			if (cmd == BIOCSRTIMEOUT32) {
1479 				tv32 = (struct timeval32 *)addr;
1480 				tv = &tv64;
1481 				tv->tv_sec = tv32->tv_sec;
1482 				tv->tv_usec = tv32->tv_usec;
1483 			} else
1484 #endif
1485 				tv = (struct timeval *)addr;
1486 
1487 			/*
1488 			 * Subtract 1 tick from tvtohz() since this isn't
1489 			 * a one-shot timer.
1490 			 */
1491 			if ((error = itimerfix(tv)) == 0)
1492 				d->bd_rtout = tvtohz(tv) - 1;
1493 			break;
1494 		}
1495 
1496 	/*
1497 	 * Get read timeout.
1498 	 */
1499 	case BIOCGRTIMEOUT:
1500 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1501 	case BIOCGRTIMEOUT32:
1502 #endif
1503 		{
1504 			struct timeval *tv;
1505 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1506 			struct timeval32 *tv32;
1507 			struct timeval tv64;
1508 
1509 			if (cmd == BIOCGRTIMEOUT32)
1510 				tv = &tv64;
1511 			else
1512 #endif
1513 				tv = (struct timeval *)addr;
1514 
1515 			tv->tv_sec = d->bd_rtout / hz;
1516 			tv->tv_usec = (d->bd_rtout % hz) * tick;
1517 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1518 			if (cmd == BIOCGRTIMEOUT32) {
1519 				tv32 = (struct timeval32 *)addr;
1520 				tv32->tv_sec = tv->tv_sec;
1521 				tv32->tv_usec = tv->tv_usec;
1522 			}
1523 #endif
1524 
1525 			break;
1526 		}
1527 
1528 	/*
1529 	 * Get packet stats.
1530 	 */
1531 	case BIOCGSTATS:
1532 		{
1533 			struct bpf_stat *bs = (struct bpf_stat *)addr;
1534 
1535 			/* XXXCSJP overflow */
1536 			bs->bs_recv = d->bd_rcount;
1537 			bs->bs_drop = d->bd_dcount;
1538 			break;
1539 		}
1540 
1541 	/*
1542 	 * Set immediate mode.
1543 	 */
1544 	case BIOCIMMEDIATE:
1545 		BPFD_LOCK(d);
1546 		d->bd_immediate = *(u_int *)addr;
1547 		BPFD_UNLOCK(d);
1548 		break;
1549 
1550 	case BIOCVERSION:
1551 		{
1552 			struct bpf_version *bv = (struct bpf_version *)addr;
1553 
1554 			bv->bv_major = BPF_MAJOR_VERSION;
1555 			bv->bv_minor = BPF_MINOR_VERSION;
1556 			break;
1557 		}
1558 
1559 	/*
1560 	 * Get "header already complete" flag
1561 	 */
1562 	case BIOCGHDRCMPLT:
1563 		BPFD_LOCK(d);
1564 		*(u_int *)addr = d->bd_hdrcmplt;
1565 		BPFD_UNLOCK(d);
1566 		break;
1567 
1568 	/*
1569 	 * Set "header already complete" flag
1570 	 */
1571 	case BIOCSHDRCMPLT:
1572 		BPFD_LOCK(d);
1573 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1574 		BPFD_UNLOCK(d);
1575 		break;
1576 
1577 	/*
1578 	 * Get packet direction flag
1579 	 */
1580 	case BIOCGDIRECTION:
1581 		BPFD_LOCK(d);
1582 		*(u_int *)addr = d->bd_direction;
1583 		BPFD_UNLOCK(d);
1584 		break;
1585 
1586 	/*
1587 	 * Set packet direction flag
1588 	 */
1589 	case BIOCSDIRECTION:
1590 		{
1591 			u_int	direction;
1592 
1593 			direction = *(u_int *)addr;
1594 			switch (direction) {
1595 			case BPF_D_IN:
1596 			case BPF_D_INOUT:
1597 			case BPF_D_OUT:
1598 				BPFD_LOCK(d);
1599 				d->bd_direction = direction;
1600 				BPFD_UNLOCK(d);
1601 				break;
1602 			default:
1603 				error = EINVAL;
1604 			}
1605 		}
1606 		break;
1607 
1608 	/*
1609 	 * Get packet timestamp format and resolution.
1610 	 */
1611 	case BIOCGTSTAMP:
1612 		BPFD_LOCK(d);
1613 		*(u_int *)addr = d->bd_tstamp;
1614 		BPFD_UNLOCK(d);
1615 		break;
1616 
1617 	/*
1618 	 * Set packet timestamp format and resolution.
1619 	 */
1620 	case BIOCSTSTAMP:
1621 		{
1622 			u_int	func;
1623 
1624 			func = *(u_int *)addr;
1625 			if (BPF_T_VALID(func))
1626 				d->bd_tstamp = func;
1627 			else
1628 				error = EINVAL;
1629 		}
1630 		break;
1631 
1632 	case BIOCFEEDBACK:
1633 		BPFD_LOCK(d);
1634 		d->bd_feedback = *(u_int *)addr;
1635 		BPFD_UNLOCK(d);
1636 		break;
1637 
1638 	case BIOCLOCK:
1639 		BPFD_LOCK(d);
1640 		d->bd_locked = 1;
1641 		BPFD_UNLOCK(d);
1642 		break;
1643 
1644 	case FIONBIO:		/* Non-blocking I/O */
1645 		break;
1646 
1647 	case FIOASYNC:		/* Send signal on receive packets */
1648 		BPFD_LOCK(d);
1649 		d->bd_async = *(int *)addr;
1650 		BPFD_UNLOCK(d);
1651 		break;
1652 
1653 	case FIOSETOWN:
1654 		/*
1655 		 * XXX: Add some sort of locking here?
1656 		 * fsetown() can sleep.
1657 		 */
1658 		error = fsetown(*(int *)addr, &d->bd_sigio);
1659 		break;
1660 
1661 	case FIOGETOWN:
1662 		BPFD_LOCK(d);
1663 		*(int *)addr = fgetown(&d->bd_sigio);
1664 		BPFD_UNLOCK(d);
1665 		break;
1666 
1667 	/* This is deprecated, FIOSETOWN should be used instead. */
1668 	case TIOCSPGRP:
1669 		error = fsetown(-(*(int *)addr), &d->bd_sigio);
1670 		break;
1671 
1672 	/* This is deprecated, FIOGETOWN should be used instead. */
1673 	case TIOCGPGRP:
1674 		*(int *)addr = -fgetown(&d->bd_sigio);
1675 		break;
1676 
1677 	case BIOCSRSIG:		/* Set receive signal */
1678 		{
1679 			u_int sig;
1680 
1681 			sig = *(u_int *)addr;
1682 
1683 			if (sig >= NSIG)
1684 				error = EINVAL;
1685 			else {
1686 				BPFD_LOCK(d);
1687 				d->bd_sig = sig;
1688 				BPFD_UNLOCK(d);
1689 			}
1690 			break;
1691 		}
1692 	case BIOCGRSIG:
1693 		BPFD_LOCK(d);
1694 		*(u_int *)addr = d->bd_sig;
1695 		BPFD_UNLOCK(d);
1696 		break;
1697 
1698 	case BIOCGETBUFMODE:
1699 		BPFD_LOCK(d);
1700 		*(u_int *)addr = d->bd_bufmode;
1701 		BPFD_UNLOCK(d);
1702 		break;
1703 
1704 	case BIOCSETBUFMODE:
1705 		/*
1706 		 * Allow the buffering mode to be changed as long as we
1707 		 * haven't yet committed to a particular mode.  Our
1708 		 * definition of commitment, for now, is whether or not a
1709 		 * buffer has been allocated or an interface attached, since
1710 		 * that's the point where things get tricky.
1711 		 */
1712 		switch (*(u_int *)addr) {
1713 		case BPF_BUFMODE_BUFFER:
1714 			break;
1715 
1716 		case BPF_BUFMODE_ZBUF:
1717 			if (bpf_zerocopy_enable)
1718 				break;
1719 			/* FALLSTHROUGH */
1720 
1721 		default:
1722 			CURVNET_RESTORE();
1723 			return (EINVAL);
1724 		}
1725 
1726 		BPFD_LOCK(d);
1727 		if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1728 		    d->bd_fbuf != NULL || d->bd_bif != NULL) {
1729 			BPFD_UNLOCK(d);
1730 			CURVNET_RESTORE();
1731 			return (EBUSY);
1732 		}
1733 		d->bd_bufmode = *(u_int *)addr;
1734 		BPFD_UNLOCK(d);
1735 		break;
1736 
1737 	case BIOCGETZMAX:
1738 		error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1739 		break;
1740 
1741 	case BIOCSETZBUF:
1742 		error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1743 		break;
1744 
1745 	case BIOCROTZBUF:
1746 		error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1747 		break;
1748 	}
1749 	CURVNET_RESTORE();
1750 	return (error);
1751 }
1752 
1753 /*
1754  * Set d's packet filter program to fp.  If this file already has a filter,
1755  * free it and replace it.  Returns EINVAL for bogus requests.
1756  *
1757  * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1758  * since reading d->bd_bif can't be protected by d or interface lock due to
1759  * lock order.
1760  *
1761  * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1762  * interface read lock to read all filers.
1763  *
1764  */
1765 static int
1766 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1767 {
1768 #ifdef COMPAT_FREEBSD32
1769 	struct bpf_program fp_swab;
1770 	struct bpf_program32 *fp32;
1771 #endif
1772 	struct bpf_insn *fcode, *old;
1773 #ifdef BPF_JITTER
1774 	bpf_jit_filter *jfunc, *ofunc;
1775 #endif
1776 	size_t size;
1777 	u_int flen;
1778 	int need_upgrade;
1779 
1780 #ifdef COMPAT_FREEBSD32
1781 	switch (cmd) {
1782 	case BIOCSETF32:
1783 	case BIOCSETWF32:
1784 	case BIOCSETFNR32:
1785 		fp32 = (struct bpf_program32 *)fp;
1786 		fp_swab.bf_len = fp32->bf_len;
1787 		fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1788 		fp = &fp_swab;
1789 		switch (cmd) {
1790 		case BIOCSETF32:
1791 			cmd = BIOCSETF;
1792 			break;
1793 		case BIOCSETWF32:
1794 			cmd = BIOCSETWF;
1795 			break;
1796 		}
1797 		break;
1798 	}
1799 #endif
1800 
1801 	fcode = NULL;
1802 #ifdef BPF_JITTER
1803 	jfunc = ofunc = NULL;
1804 #endif
1805 	need_upgrade = 0;
1806 
1807 	/*
1808 	 * Check new filter validness before acquiring any locks.
1809 	 * Allocate memory for new filter, if needed.
1810 	 */
1811 	flen = fp->bf_len;
1812 	if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1813 		return (EINVAL);
1814 	size = flen * sizeof(*fp->bf_insns);
1815 	if (size > 0) {
1816 		/* We're setting up new filter.  Copy and check actual data. */
1817 		fcode = malloc(size, M_BPF, M_WAITOK);
1818 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1819 		    !bpf_validate(fcode, flen)) {
1820 			free(fcode, M_BPF);
1821 			return (EINVAL);
1822 		}
1823 #ifdef BPF_JITTER
1824 		/* Filter is copied inside fcode and is perfectly valid. */
1825 		jfunc = bpf_jitter(fcode, flen);
1826 #endif
1827 	}
1828 
1829 	BPF_LOCK();
1830 
1831 	/*
1832 	 * Set up new filter.
1833 	 * Protect filter change by interface lock.
1834 	 * Additionally, we are protected by global lock here.
1835 	 */
1836 	if (d->bd_bif != NULL)
1837 		BPFIF_WLOCK(d->bd_bif);
1838 	BPFD_LOCK(d);
1839 	if (cmd == BIOCSETWF) {
1840 		old = d->bd_wfilter;
1841 		d->bd_wfilter = fcode;
1842 	} else {
1843 		old = d->bd_rfilter;
1844 		d->bd_rfilter = fcode;
1845 #ifdef BPF_JITTER
1846 		ofunc = d->bd_bfilter;
1847 		d->bd_bfilter = jfunc;
1848 #endif
1849 		if (cmd == BIOCSETF)
1850 			reset_d(d);
1851 
1852 		need_upgrade = bpf_check_upgrade(cmd, d, fcode, flen);
1853 	}
1854 	BPFD_UNLOCK(d);
1855 	if (d->bd_bif != NULL)
1856 		BPFIF_WUNLOCK(d->bd_bif);
1857 	if (old != NULL)
1858 		free(old, M_BPF);
1859 #ifdef BPF_JITTER
1860 	if (ofunc != NULL)
1861 		bpf_destroy_jit_filter(ofunc);
1862 #endif
1863 
1864 	/* Move d to active readers list. */
1865 	if (need_upgrade != 0)
1866 		bpf_upgraded(d);
1867 
1868 	BPF_UNLOCK();
1869 	return (0);
1870 }
1871 
1872 /*
1873  * Detach a file from its current interface (if attached at all) and attach
1874  * to the interface indicated by the name stored in ifr.
1875  * Return an errno or 0.
1876  */
1877 static int
1878 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1879 {
1880 	struct bpf_if *bp;
1881 	struct ifnet *theywant;
1882 
1883 	BPF_LOCK_ASSERT();
1884 
1885 	theywant = ifunit(ifr->ifr_name);
1886 	if (theywant == NULL || theywant->if_bpf == NULL)
1887 		return (ENXIO);
1888 
1889 	bp = theywant->if_bpf;
1890 
1891 	/* Check if interface is not being detached from BPF */
1892 	BPFIF_RLOCK(bp);
1893 	if (bp->flags & BPFIF_FLAG_DYING) {
1894 		BPFIF_RUNLOCK(bp);
1895 		return (ENXIO);
1896 	}
1897 	BPFIF_RUNLOCK(bp);
1898 
1899 	/*
1900 	 * Behavior here depends on the buffering model.  If we're using
1901 	 * kernel memory buffers, then we can allocate them here.  If we're
1902 	 * using zero-copy, then the user process must have registered
1903 	 * buffers by the time we get here.  If not, return an error.
1904 	 */
1905 	switch (d->bd_bufmode) {
1906 	case BPF_BUFMODE_BUFFER:
1907 	case BPF_BUFMODE_ZBUF:
1908 		if (d->bd_sbuf == NULL)
1909 			return (EINVAL);
1910 		break;
1911 
1912 	default:
1913 		panic("bpf_setif: bufmode %d", d->bd_bufmode);
1914 	}
1915 	if (bp != d->bd_bif)
1916 		bpf_attachd(d, bp);
1917 	BPFD_LOCK(d);
1918 	reset_d(d);
1919 	BPFD_UNLOCK(d);
1920 	return (0);
1921 }
1922 
1923 /*
1924  * Support for select() and poll() system calls
1925  *
1926  * Return true iff the specific operation will not block indefinitely.
1927  * Otherwise, return false but make a note that a selwakeup() must be done.
1928  */
1929 static int
1930 bpfpoll(struct cdev *dev, int events, struct thread *td)
1931 {
1932 	struct bpf_d *d;
1933 	int revents;
1934 
1935 	if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1936 		return (events &
1937 		    (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1938 
1939 	/*
1940 	 * Refresh PID associated with this descriptor.
1941 	 */
1942 	revents = events & (POLLOUT | POLLWRNORM);
1943 	BPFD_LOCK(d);
1944 	BPF_PID_REFRESH(d, td);
1945 	if (events & (POLLIN | POLLRDNORM)) {
1946 		if (bpf_ready(d))
1947 			revents |= events & (POLLIN | POLLRDNORM);
1948 		else {
1949 			selrecord(td, &d->bd_sel);
1950 			/* Start the read timeout if necessary. */
1951 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1952 				callout_reset(&d->bd_callout, d->bd_rtout,
1953 				    bpf_timed_out, d);
1954 				d->bd_state = BPF_WAITING;
1955 			}
1956 		}
1957 	}
1958 	BPFD_UNLOCK(d);
1959 	return (revents);
1960 }
1961 
1962 /*
1963  * Support for kevent() system call.  Register EVFILT_READ filters and
1964  * reject all others.
1965  */
1966 int
1967 bpfkqfilter(struct cdev *dev, struct knote *kn)
1968 {
1969 	struct bpf_d *d;
1970 
1971 	if (devfs_get_cdevpriv((void **)&d) != 0 ||
1972 	    kn->kn_filter != EVFILT_READ)
1973 		return (1);
1974 
1975 	/*
1976 	 * Refresh PID associated with this descriptor.
1977 	 */
1978 	BPFD_LOCK(d);
1979 	BPF_PID_REFRESH_CUR(d);
1980 	kn->kn_fop = &bpfread_filtops;
1981 	kn->kn_hook = d;
1982 	knlist_add(&d->bd_sel.si_note, kn, 1);
1983 	BPFD_UNLOCK(d);
1984 
1985 	return (0);
1986 }
1987 
1988 static void
1989 filt_bpfdetach(struct knote *kn)
1990 {
1991 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1992 
1993 	knlist_remove(&d->bd_sel.si_note, kn, 0);
1994 }
1995 
1996 static int
1997 filt_bpfread(struct knote *kn, long hint)
1998 {
1999 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2000 	int ready;
2001 
2002 	BPFD_LOCK_ASSERT(d);
2003 	ready = bpf_ready(d);
2004 	if (ready) {
2005 		kn->kn_data = d->bd_slen;
2006 		while (d->bd_hbuf_in_use)
2007 			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2008 			    PRINET, "bd_hbuf", 0);
2009 		if (d->bd_hbuf)
2010 			kn->kn_data += d->bd_hlen;
2011 	} else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
2012 		callout_reset(&d->bd_callout, d->bd_rtout,
2013 		    bpf_timed_out, d);
2014 		d->bd_state = BPF_WAITING;
2015 	}
2016 
2017 	return (ready);
2018 }
2019 
2020 #define	BPF_TSTAMP_NONE		0
2021 #define	BPF_TSTAMP_FAST		1
2022 #define	BPF_TSTAMP_NORMAL	2
2023 #define	BPF_TSTAMP_EXTERN	3
2024 
2025 static int
2026 bpf_ts_quality(int tstype)
2027 {
2028 
2029 	if (tstype == BPF_T_NONE)
2030 		return (BPF_TSTAMP_NONE);
2031 	if ((tstype & BPF_T_FAST) != 0)
2032 		return (BPF_TSTAMP_FAST);
2033 
2034 	return (BPF_TSTAMP_NORMAL);
2035 }
2036 
2037 static int
2038 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2039 {
2040 	struct m_tag *tag;
2041 	int quality;
2042 
2043 	quality = bpf_ts_quality(tstype);
2044 	if (quality == BPF_TSTAMP_NONE)
2045 		return (quality);
2046 
2047 	if (m != NULL) {
2048 		tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2049 		if (tag != NULL) {
2050 			*bt = *(struct bintime *)(tag + 1);
2051 			return (BPF_TSTAMP_EXTERN);
2052 		}
2053 	}
2054 	if (quality == BPF_TSTAMP_NORMAL)
2055 		binuptime(bt);
2056 	else
2057 		getbinuptime(bt);
2058 
2059 	return (quality);
2060 }
2061 
2062 /*
2063  * Incoming linkage from device drivers.  Process the packet pkt, of length
2064  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
2065  * by each process' filter, and if accepted, stashed into the corresponding
2066  * buffer.
2067  */
2068 void
2069 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2070 {
2071 	struct bintime bt;
2072 	struct bpf_d *d;
2073 #ifdef BPF_JITTER
2074 	bpf_jit_filter *bf;
2075 #endif
2076 	u_int slen;
2077 	int gottime;
2078 
2079 	gottime = BPF_TSTAMP_NONE;
2080 
2081 	BPFIF_RLOCK(bp);
2082 
2083 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2084 		/*
2085 		 * We are not using any locks for d here because:
2086 		 * 1) any filter change is protected by interface
2087 		 * write lock
2088 		 * 2) destroying/detaching d is protected by interface
2089 		 * write lock, too
2090 		 */
2091 
2092 		/* XXX: Do not protect counter for the sake of performance. */
2093 		++d->bd_rcount;
2094 		/*
2095 		 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2096 		 * way for the caller to indiciate to us whether this packet
2097 		 * is inbound or outbound.  In the bpf_mtap() routines, we use
2098 		 * the interface pointers on the mbuf to figure it out.
2099 		 */
2100 #ifdef BPF_JITTER
2101 		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2102 		if (bf != NULL)
2103 			slen = (*(bf->func))(pkt, pktlen, pktlen);
2104 		else
2105 #endif
2106 		slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2107 		if (slen != 0) {
2108 			/*
2109 			 * Filter matches. Let's to acquire write lock.
2110 			 */
2111 			BPFD_LOCK(d);
2112 
2113 			d->bd_fcount++;
2114 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2115 				gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2116 #ifdef MAC
2117 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2118 #endif
2119 				catchpacket(d, pkt, pktlen, slen,
2120 				    bpf_append_bytes, &bt);
2121 			BPFD_UNLOCK(d);
2122 		}
2123 	}
2124 	BPFIF_RUNLOCK(bp);
2125 }
2126 
2127 #define	BPF_CHECK_DIRECTION(d, r, i)				\
2128 	    (((d)->bd_direction == BPF_D_IN && (r) != (i)) ||	\
2129 	    ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2130 
2131 /*
2132  * Incoming linkage from device drivers, when packet is in an mbuf chain.
2133  * Locking model is explained in bpf_tap().
2134  */
2135 void
2136 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2137 {
2138 	struct bintime bt;
2139 	struct bpf_d *d;
2140 #ifdef BPF_JITTER
2141 	bpf_jit_filter *bf;
2142 #endif
2143 	u_int pktlen, slen;
2144 	int gottime;
2145 
2146 	/* Skip outgoing duplicate packets. */
2147 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2148 		m->m_flags &= ~M_PROMISC;
2149 		return;
2150 	}
2151 
2152 	pktlen = m_length(m, NULL);
2153 	gottime = BPF_TSTAMP_NONE;
2154 
2155 	BPFIF_RLOCK(bp);
2156 
2157 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2158 		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2159 			continue;
2160 		++d->bd_rcount;
2161 #ifdef BPF_JITTER
2162 		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2163 		/* XXX We cannot handle multiple mbufs. */
2164 		if (bf != NULL && m->m_next == NULL)
2165 			slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2166 		else
2167 #endif
2168 		slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2169 		if (slen != 0) {
2170 			BPFD_LOCK(d);
2171 
2172 			d->bd_fcount++;
2173 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2174 				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2175 #ifdef MAC
2176 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2177 #endif
2178 				catchpacket(d, (u_char *)m, pktlen, slen,
2179 				    bpf_append_mbuf, &bt);
2180 			BPFD_UNLOCK(d);
2181 		}
2182 	}
2183 	BPFIF_RUNLOCK(bp);
2184 }
2185 
2186 /*
2187  * Incoming linkage from device drivers, when packet is in
2188  * an mbuf chain and to be prepended by a contiguous header.
2189  */
2190 void
2191 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2192 {
2193 	struct bintime bt;
2194 	struct mbuf mb;
2195 	struct bpf_d *d;
2196 	u_int pktlen, slen;
2197 	int gottime;
2198 
2199 	/* Skip outgoing duplicate packets. */
2200 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2201 		m->m_flags &= ~M_PROMISC;
2202 		return;
2203 	}
2204 
2205 	pktlen = m_length(m, NULL);
2206 	/*
2207 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
2208 	 * Note that we cut corners here; we only setup what's
2209 	 * absolutely needed--this mbuf should never go anywhere else.
2210 	 */
2211 	mb.m_next = m;
2212 	mb.m_data = data;
2213 	mb.m_len = dlen;
2214 	pktlen += dlen;
2215 
2216 	gottime = BPF_TSTAMP_NONE;
2217 
2218 	BPFIF_RLOCK(bp);
2219 
2220 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2221 		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2222 			continue;
2223 		++d->bd_rcount;
2224 		slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2225 		if (slen != 0) {
2226 			BPFD_LOCK(d);
2227 
2228 			d->bd_fcount++;
2229 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2230 				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2231 #ifdef MAC
2232 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2233 #endif
2234 				catchpacket(d, (u_char *)&mb, pktlen, slen,
2235 				    bpf_append_mbuf, &bt);
2236 			BPFD_UNLOCK(d);
2237 		}
2238 	}
2239 	BPFIF_RUNLOCK(bp);
2240 }
2241 
2242 #undef	BPF_CHECK_DIRECTION
2243 
2244 #undef	BPF_TSTAMP_NONE
2245 #undef	BPF_TSTAMP_FAST
2246 #undef	BPF_TSTAMP_NORMAL
2247 #undef	BPF_TSTAMP_EXTERN
2248 
2249 static int
2250 bpf_hdrlen(struct bpf_d *d)
2251 {
2252 	int hdrlen;
2253 
2254 	hdrlen = d->bd_bif->bif_hdrlen;
2255 #ifndef BURN_BRIDGES
2256 	if (d->bd_tstamp == BPF_T_NONE ||
2257 	    BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2258 #ifdef COMPAT_FREEBSD32
2259 		if (d->bd_compat32)
2260 			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2261 		else
2262 #endif
2263 			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2264 	else
2265 #endif
2266 		hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2267 #ifdef COMPAT_FREEBSD32
2268 	if (d->bd_compat32)
2269 		hdrlen = BPF_WORDALIGN32(hdrlen);
2270 	else
2271 #endif
2272 		hdrlen = BPF_WORDALIGN(hdrlen);
2273 
2274 	return (hdrlen - d->bd_bif->bif_hdrlen);
2275 }
2276 
2277 static void
2278 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2279 {
2280 	struct bintime bt2;
2281 	struct timeval tsm;
2282 	struct timespec tsn;
2283 
2284 	if ((tstype & BPF_T_MONOTONIC) == 0) {
2285 		bt2 = *bt;
2286 		bintime_add(&bt2, &boottimebin);
2287 		bt = &bt2;
2288 	}
2289 	switch (BPF_T_FORMAT(tstype)) {
2290 	case BPF_T_MICROTIME:
2291 		bintime2timeval(bt, &tsm);
2292 		ts->bt_sec = tsm.tv_sec;
2293 		ts->bt_frac = tsm.tv_usec;
2294 		break;
2295 	case BPF_T_NANOTIME:
2296 		bintime2timespec(bt, &tsn);
2297 		ts->bt_sec = tsn.tv_sec;
2298 		ts->bt_frac = tsn.tv_nsec;
2299 		break;
2300 	case BPF_T_BINTIME:
2301 		ts->bt_sec = bt->sec;
2302 		ts->bt_frac = bt->frac;
2303 		break;
2304 	}
2305 }
2306 
2307 /*
2308  * Move the packet data from interface memory (pkt) into the
2309  * store buffer.  "cpfn" is the routine called to do the actual data
2310  * transfer.  bcopy is passed in to copy contiguous chunks, while
2311  * bpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
2312  * pkt is really an mbuf.
2313  */
2314 static void
2315 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2316     void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2317     struct bintime *bt)
2318 {
2319 	struct bpf_xhdr hdr;
2320 #ifndef BURN_BRIDGES
2321 	struct bpf_hdr hdr_old;
2322 #ifdef COMPAT_FREEBSD32
2323 	struct bpf_hdr32 hdr32_old;
2324 #endif
2325 #endif
2326 	int caplen, curlen, hdrlen, totlen;
2327 	int do_wakeup = 0;
2328 	int do_timestamp;
2329 	int tstype;
2330 
2331 	BPFD_LOCK_ASSERT(d);
2332 
2333 	/*
2334 	 * Detect whether user space has released a buffer back to us, and if
2335 	 * so, move it from being a hold buffer to a free buffer.  This may
2336 	 * not be the best place to do it (for example, we might only want to
2337 	 * run this check if we need the space), but for now it's a reliable
2338 	 * spot to do it.
2339 	 */
2340 	if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2341 		while (d->bd_hbuf_in_use)
2342 			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2343 			    PRINET, "bd_hbuf", 0);
2344 		d->bd_fbuf = d->bd_hbuf;
2345 		d->bd_hbuf = NULL;
2346 		d->bd_hlen = 0;
2347 		bpf_buf_reclaimed(d);
2348 	}
2349 
2350 	/*
2351 	 * Figure out how many bytes to move.  If the packet is
2352 	 * greater or equal to the snapshot length, transfer that
2353 	 * much.  Otherwise, transfer the whole packet (unless
2354 	 * we hit the buffer size limit).
2355 	 */
2356 	hdrlen = bpf_hdrlen(d);
2357 	totlen = hdrlen + min(snaplen, pktlen);
2358 	if (totlen > d->bd_bufsize)
2359 		totlen = d->bd_bufsize;
2360 
2361 	/*
2362 	 * Round up the end of the previous packet to the next longword.
2363 	 *
2364 	 * Drop the packet if there's no room and no hope of room
2365 	 * If the packet would overflow the storage buffer or the storage
2366 	 * buffer is considered immutable by the buffer model, try to rotate
2367 	 * the buffer and wakeup pending processes.
2368 	 */
2369 #ifdef COMPAT_FREEBSD32
2370 	if (d->bd_compat32)
2371 		curlen = BPF_WORDALIGN32(d->bd_slen);
2372 	else
2373 #endif
2374 		curlen = BPF_WORDALIGN(d->bd_slen);
2375 	if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2376 		if (d->bd_fbuf == NULL) {
2377 			/*
2378 			 * There's no room in the store buffer, and no
2379 			 * prospect of room, so drop the packet.  Notify the
2380 			 * buffer model.
2381 			 */
2382 			bpf_buffull(d);
2383 			++d->bd_dcount;
2384 			return;
2385 		}
2386 		while (d->bd_hbuf_in_use)
2387 			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2388 			    PRINET, "bd_hbuf", 0);
2389 		ROTATE_BUFFERS(d);
2390 		do_wakeup = 1;
2391 		curlen = 0;
2392 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2393 		/*
2394 		 * Immediate mode is set, or the read timeout has already
2395 		 * expired during a select call.  A packet arrived, so the
2396 		 * reader should be woken up.
2397 		 */
2398 		do_wakeup = 1;
2399 	caplen = totlen - hdrlen;
2400 	tstype = d->bd_tstamp;
2401 	do_timestamp = tstype != BPF_T_NONE;
2402 #ifndef BURN_BRIDGES
2403 	if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2404 		struct bpf_ts ts;
2405 		if (do_timestamp)
2406 			bpf_bintime2ts(bt, &ts, tstype);
2407 #ifdef COMPAT_FREEBSD32
2408 		if (d->bd_compat32) {
2409 			bzero(&hdr32_old, sizeof(hdr32_old));
2410 			if (do_timestamp) {
2411 				hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2412 				hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2413 			}
2414 			hdr32_old.bh_datalen = pktlen;
2415 			hdr32_old.bh_hdrlen = hdrlen;
2416 			hdr32_old.bh_caplen = caplen;
2417 			bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2418 			    sizeof(hdr32_old));
2419 			goto copy;
2420 		}
2421 #endif
2422 		bzero(&hdr_old, sizeof(hdr_old));
2423 		if (do_timestamp) {
2424 			hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2425 			hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2426 		}
2427 		hdr_old.bh_datalen = pktlen;
2428 		hdr_old.bh_hdrlen = hdrlen;
2429 		hdr_old.bh_caplen = caplen;
2430 		bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2431 		    sizeof(hdr_old));
2432 		goto copy;
2433 	}
2434 #endif
2435 
2436 	/*
2437 	 * Append the bpf header.  Note we append the actual header size, but
2438 	 * move forward the length of the header plus padding.
2439 	 */
2440 	bzero(&hdr, sizeof(hdr));
2441 	if (do_timestamp)
2442 		bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2443 	hdr.bh_datalen = pktlen;
2444 	hdr.bh_hdrlen = hdrlen;
2445 	hdr.bh_caplen = caplen;
2446 	bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2447 
2448 	/*
2449 	 * Copy the packet data into the store buffer and update its length.
2450 	 */
2451 #ifndef BURN_BRIDGES
2452 copy:
2453 #endif
2454 	(*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2455 	d->bd_slen = curlen + totlen;
2456 
2457 	if (do_wakeup)
2458 		bpf_wakeup(d);
2459 }
2460 
2461 /*
2462  * Free buffers currently in use by a descriptor.
2463  * Called on close.
2464  */
2465 static void
2466 bpf_freed(struct bpf_d *d)
2467 {
2468 
2469 	/*
2470 	 * We don't need to lock out interrupts since this descriptor has
2471 	 * been detached from its interface and it yet hasn't been marked
2472 	 * free.
2473 	 */
2474 	bpf_free(d);
2475 	if (d->bd_rfilter != NULL) {
2476 		free((caddr_t)d->bd_rfilter, M_BPF);
2477 #ifdef BPF_JITTER
2478 		if (d->bd_bfilter != NULL)
2479 			bpf_destroy_jit_filter(d->bd_bfilter);
2480 #endif
2481 	}
2482 	if (d->bd_wfilter != NULL)
2483 		free((caddr_t)d->bd_wfilter, M_BPF);
2484 	mtx_destroy(&d->bd_lock);
2485 }
2486 
2487 /*
2488  * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
2489  * fixed size of the link header (variable length headers not yet supported).
2490  */
2491 void
2492 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2493 {
2494 
2495 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2496 }
2497 
2498 /*
2499  * Attach an interface to bpf.  ifp is a pointer to the structure
2500  * defining the interface to be attached, dlt is the link layer type,
2501  * and hdrlen is the fixed size of the link header (variable length
2502  * headers are not yet supporrted).
2503  */
2504 void
2505 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2506 {
2507 	struct bpf_if *bp;
2508 
2509 	bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2510 	if (bp == NULL)
2511 		panic("bpfattach");
2512 
2513 	LIST_INIT(&bp->bif_dlist);
2514 	LIST_INIT(&bp->bif_wlist);
2515 	bp->bif_ifp = ifp;
2516 	bp->bif_dlt = dlt;
2517 	rw_init(&bp->bif_lock, "bpf interface lock");
2518 	KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2519 	*driverp = bp;
2520 
2521 	BPF_LOCK();
2522 	LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2523 	BPF_UNLOCK();
2524 
2525 	bp->bif_hdrlen = hdrlen;
2526 
2527 	if (bootverbose)
2528 		if_printf(ifp, "bpf attached\n");
2529 }
2530 
2531 /*
2532  * Detach bpf from an interface. This involves detaching each descriptor
2533  * associated with the interface. Notify each descriptor as it's detached
2534  * so that any sleepers wake up and get ENXIO.
2535  */
2536 void
2537 bpfdetach(struct ifnet *ifp)
2538 {
2539 	struct bpf_if	*bp, *bp_temp;
2540 	struct bpf_d	*d;
2541 	int ndetached;
2542 
2543 	ndetached = 0;
2544 
2545 	BPF_LOCK();
2546 	/* Find all bpf_if struct's which reference ifp and detach them. */
2547 	LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2548 		if (ifp != bp->bif_ifp)
2549 			continue;
2550 
2551 		LIST_REMOVE(bp, bif_next);
2552 		/* Add to to-be-freed list */
2553 		LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
2554 
2555 		ndetached++;
2556 		/*
2557 		 * Delay freeing bp till interface is detached
2558 		 * and all routes through this interface are removed.
2559 		 * Mark bp as detached to restrict new consumers.
2560 		 */
2561 		BPFIF_WLOCK(bp);
2562 		bp->flags |= BPFIF_FLAG_DYING;
2563 		BPFIF_WUNLOCK(bp);
2564 
2565 		CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
2566 		    __func__, bp->bif_dlt, bp, ifp);
2567 
2568 		/* Free common descriptors */
2569 		while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2570 			bpf_detachd_locked(d);
2571 			BPFD_LOCK(d);
2572 			bpf_wakeup(d);
2573 			BPFD_UNLOCK(d);
2574 		}
2575 
2576 		/* Free writer-only descriptors */
2577 		while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2578 			bpf_detachd_locked(d);
2579 			BPFD_LOCK(d);
2580 			bpf_wakeup(d);
2581 			BPFD_UNLOCK(d);
2582 		}
2583 	}
2584 	BPF_UNLOCK();
2585 
2586 #ifdef INVARIANTS
2587 	if (ndetached == 0)
2588 		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2589 #endif
2590 }
2591 
2592 /*
2593  * Interface departure handler.
2594  * Note departure event does not guarantee interface is going down.
2595  * Interface renaming is currently done via departure/arrival event set.
2596  *
2597  * Departure handled is called after all routes pointing to
2598  * given interface are removed and interface is in down state
2599  * restricting any packets to be sent/received. We assume it is now safe
2600  * to free data allocated by BPF.
2601  */
2602 static void
2603 bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2604 {
2605 	struct bpf_if *bp, *bp_temp;
2606 	int nmatched = 0;
2607 
2608 	BPF_LOCK();
2609 	/*
2610 	 * Find matching entries in free list.
2611 	 * Nothing should be found if bpfdetach() was not called.
2612 	 */
2613 	LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
2614 		if (ifp != bp->bif_ifp)
2615 			continue;
2616 
2617 		CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
2618 		    __func__, bp, ifp);
2619 
2620 		LIST_REMOVE(bp, bif_next);
2621 
2622 		rw_destroy(&bp->bif_lock);
2623 		free(bp, M_BPF);
2624 
2625 		nmatched++;
2626 	}
2627 	BPF_UNLOCK();
2628 
2629 	/*
2630 	 * Note that we cannot zero other pointers to
2631 	 * custom DLTs possibly used by given interface.
2632 	 */
2633 	if (nmatched != 0)
2634 		ifp->if_bpf = NULL;
2635 }
2636 
2637 /*
2638  * Get a list of available data link type of the interface.
2639  */
2640 static int
2641 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2642 {
2643 	int n, error;
2644 	struct ifnet *ifp;
2645 	struct bpf_if *bp;
2646 
2647 	BPF_LOCK_ASSERT();
2648 
2649 	ifp = d->bd_bif->bif_ifp;
2650 	n = 0;
2651 	error = 0;
2652 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2653 		if (bp->bif_ifp != ifp)
2654 			continue;
2655 		if (bfl->bfl_list != NULL) {
2656 			if (n >= bfl->bfl_len)
2657 				return (ENOMEM);
2658 			error = copyout(&bp->bif_dlt,
2659 			    bfl->bfl_list + n, sizeof(u_int));
2660 		}
2661 		n++;
2662 	}
2663 	bfl->bfl_len = n;
2664 	return (error);
2665 }
2666 
2667 /*
2668  * Set the data link type of a BPF instance.
2669  */
2670 static int
2671 bpf_setdlt(struct bpf_d *d, u_int dlt)
2672 {
2673 	int error, opromisc;
2674 	struct ifnet *ifp;
2675 	struct bpf_if *bp;
2676 
2677 	BPF_LOCK_ASSERT();
2678 
2679 	if (d->bd_bif->bif_dlt == dlt)
2680 		return (0);
2681 	ifp = d->bd_bif->bif_ifp;
2682 
2683 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2684 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2685 			break;
2686 	}
2687 
2688 	if (bp != NULL) {
2689 		opromisc = d->bd_promisc;
2690 		bpf_attachd(d, bp);
2691 		BPFD_LOCK(d);
2692 		reset_d(d);
2693 		BPFD_UNLOCK(d);
2694 		if (opromisc) {
2695 			error = ifpromisc(bp->bif_ifp, 1);
2696 			if (error)
2697 				if_printf(bp->bif_ifp,
2698 					"bpf_setdlt: ifpromisc failed (%d)\n",
2699 					error);
2700 			else
2701 				d->bd_promisc = 1;
2702 		}
2703 	}
2704 	return (bp == NULL ? EINVAL : 0);
2705 }
2706 
2707 static void
2708 bpf_drvinit(void *unused)
2709 {
2710 	struct cdev *dev;
2711 
2712 	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2713 	LIST_INIT(&bpf_iflist);
2714 	LIST_INIT(&bpf_freelist);
2715 
2716 	dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2717 	/* For compatibility */
2718 	make_dev_alias(dev, "bpf0");
2719 
2720 	/* Register interface departure handler */
2721 	bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2722 		    ifnet_departure_event, bpf_ifdetach, NULL,
2723 		    EVENTHANDLER_PRI_ANY);
2724 }
2725 
2726 /*
2727  * Zero out the various packet counters associated with all of the bpf
2728  * descriptors.  At some point, we will probably want to get a bit more
2729  * granular and allow the user to specify descriptors to be zeroed.
2730  */
2731 static void
2732 bpf_zero_counters(void)
2733 {
2734 	struct bpf_if *bp;
2735 	struct bpf_d *bd;
2736 
2737 	BPF_LOCK();
2738 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2739 		BPFIF_RLOCK(bp);
2740 		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2741 			BPFD_LOCK(bd);
2742 			bd->bd_rcount = 0;
2743 			bd->bd_dcount = 0;
2744 			bd->bd_fcount = 0;
2745 			bd->bd_wcount = 0;
2746 			bd->bd_wfcount = 0;
2747 			bd->bd_zcopy = 0;
2748 			BPFD_UNLOCK(bd);
2749 		}
2750 		BPFIF_RUNLOCK(bp);
2751 	}
2752 	BPF_UNLOCK();
2753 }
2754 
2755 /*
2756  * Fill filter statistics
2757  */
2758 static void
2759 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2760 {
2761 
2762 	bzero(d, sizeof(*d));
2763 	BPFD_LOCK_ASSERT(bd);
2764 	d->bd_structsize = sizeof(*d);
2765 	/* XXX: reading should be protected by global lock */
2766 	d->bd_immediate = bd->bd_immediate;
2767 	d->bd_promisc = bd->bd_promisc;
2768 	d->bd_hdrcmplt = bd->bd_hdrcmplt;
2769 	d->bd_direction = bd->bd_direction;
2770 	d->bd_feedback = bd->bd_feedback;
2771 	d->bd_async = bd->bd_async;
2772 	d->bd_rcount = bd->bd_rcount;
2773 	d->bd_dcount = bd->bd_dcount;
2774 	d->bd_fcount = bd->bd_fcount;
2775 	d->bd_sig = bd->bd_sig;
2776 	d->bd_slen = bd->bd_slen;
2777 	d->bd_hlen = bd->bd_hlen;
2778 	d->bd_bufsize = bd->bd_bufsize;
2779 	d->bd_pid = bd->bd_pid;
2780 	strlcpy(d->bd_ifname,
2781 	    bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2782 	d->bd_locked = bd->bd_locked;
2783 	d->bd_wcount = bd->bd_wcount;
2784 	d->bd_wdcount = bd->bd_wdcount;
2785 	d->bd_wfcount = bd->bd_wfcount;
2786 	d->bd_zcopy = bd->bd_zcopy;
2787 	d->bd_bufmode = bd->bd_bufmode;
2788 }
2789 
2790 /*
2791  * Handle `netstat -B' stats request
2792  */
2793 static int
2794 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2795 {
2796 	static const struct xbpf_d zerostats;
2797 	struct xbpf_d *xbdbuf, *xbd, tempstats;
2798 	int index, error;
2799 	struct bpf_if *bp;
2800 	struct bpf_d *bd;
2801 
2802 	/*
2803 	 * XXX This is not technically correct. It is possible for non
2804 	 * privileged users to open bpf devices. It would make sense
2805 	 * if the users who opened the devices were able to retrieve
2806 	 * the statistics for them, too.
2807 	 */
2808 	error = priv_check(req->td, PRIV_NET_BPF);
2809 	if (error)
2810 		return (error);
2811 	/*
2812 	 * Check to see if the user is requesting that the counters be
2813 	 * zeroed out.  Explicitly check that the supplied data is zeroed,
2814 	 * as we aren't allowing the user to set the counters currently.
2815 	 */
2816 	if (req->newptr != NULL) {
2817 		if (req->newlen != sizeof(tempstats))
2818 			return (EINVAL);
2819 		memset(&tempstats, 0, sizeof(tempstats));
2820 		error = SYSCTL_IN(req, &tempstats, sizeof(tempstats));
2821 		if (error)
2822 			return (error);
2823 		if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0)
2824 			return (EINVAL);
2825 		bpf_zero_counters();
2826 		return (0);
2827 	}
2828 	if (req->oldptr == NULL)
2829 		return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2830 	if (bpf_bpfd_cnt == 0)
2831 		return (SYSCTL_OUT(req, 0, 0));
2832 	xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2833 	BPF_LOCK();
2834 	if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2835 		BPF_UNLOCK();
2836 		free(xbdbuf, M_BPF);
2837 		return (ENOMEM);
2838 	}
2839 	index = 0;
2840 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2841 		BPFIF_RLOCK(bp);
2842 		/* Send writers-only first */
2843 		LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2844 			xbd = &xbdbuf[index++];
2845 			BPFD_LOCK(bd);
2846 			bpfstats_fill_xbpf(xbd, bd);
2847 			BPFD_UNLOCK(bd);
2848 		}
2849 		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2850 			xbd = &xbdbuf[index++];
2851 			BPFD_LOCK(bd);
2852 			bpfstats_fill_xbpf(xbd, bd);
2853 			BPFD_UNLOCK(bd);
2854 		}
2855 		BPFIF_RUNLOCK(bp);
2856 	}
2857 	BPF_UNLOCK();
2858 	error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2859 	free(xbdbuf, M_BPF);
2860 	return (error);
2861 }
2862 
2863 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2864 
2865 #else /* !DEV_BPF && !NETGRAPH_BPF */
2866 /*
2867  * NOP stubs to allow bpf-using drivers to load and function.
2868  *
2869  * A 'better' implementation would allow the core bpf functionality
2870  * to be loaded at runtime.
2871  */
2872 static struct bpf_if bp_null;
2873 
2874 void
2875 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2876 {
2877 }
2878 
2879 void
2880 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2881 {
2882 }
2883 
2884 void
2885 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2886 {
2887 }
2888 
2889 void
2890 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2891 {
2892 
2893 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2894 }
2895 
2896 void
2897 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2898 {
2899 
2900 	*driverp = &bp_null;
2901 }
2902 
2903 void
2904 bpfdetach(struct ifnet *ifp)
2905 {
2906 }
2907 
2908 u_int
2909 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2910 {
2911 	return -1;	/* "no filter" behaviour */
2912 }
2913 
2914 int
2915 bpf_validate(const struct bpf_insn *f, int len)
2916 {
2917 	return 0;		/* false */
2918 }
2919 
2920 #endif /* !DEV_BPF && !NETGRAPH_BPF */
2921