xref: /freebsd/sys/net/bpf.c (revision 595e514d0df2bac5b813d35f83e32875dbf16a83)
1 /*-
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_bpf.h"
41 #include "opt_compat.h"
42 #include "opt_netgraph.h"
43 
44 #include <sys/types.h>
45 #include <sys/param.h>
46 #include <sys/lock.h>
47 #include <sys/rwlock.h>
48 #include <sys/systm.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/jail.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/time.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/signalvar.h>
58 #include <sys/filio.h>
59 #include <sys/sockio.h>
60 #include <sys/ttycom.h>
61 #include <sys/uio.h>
62 
63 #include <sys/event.h>
64 #include <sys/file.h>
65 #include <sys/poll.h>
66 #include <sys/proc.h>
67 
68 #include <sys/socket.h>
69 
70 #include <net/if.h>
71 #define	BPF_INTERNAL
72 #include <net/bpf.h>
73 #include <net/bpf_buffer.h>
74 #ifdef BPF_JITTER
75 #include <net/bpf_jitter.h>
76 #endif
77 #include <net/bpf_zerocopy.h>
78 #include <net/bpfdesc.h>
79 #include <net/vnet.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/if_ether.h>
83 #include <sys/kernel.h>
84 #include <sys/sysctl.h>
85 
86 #include <net80211/ieee80211_freebsd.h>
87 
88 #include <security/mac/mac_framework.h>
89 
90 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
91 
92 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
93 
94 #define PRINET  26			/* interruptible */
95 
96 #define	SIZEOF_BPF_HDR(type)	\
97     (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
98 
99 #ifdef COMPAT_FREEBSD32
100 #include <sys/mount.h>
101 #include <compat/freebsd32/freebsd32.h>
102 #define BPF_ALIGNMENT32 sizeof(int32_t)
103 #define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
104 
105 #ifndef BURN_BRIDGES
106 /*
107  * 32-bit version of structure prepended to each packet.  We use this header
108  * instead of the standard one for 32-bit streams.  We mark the a stream as
109  * 32-bit the first time we see a 32-bit compat ioctl request.
110  */
111 struct bpf_hdr32 {
112 	struct timeval32 bh_tstamp;	/* time stamp */
113 	uint32_t	bh_caplen;	/* length of captured portion */
114 	uint32_t	bh_datalen;	/* original length of packet */
115 	uint16_t	bh_hdrlen;	/* length of bpf header (this struct
116 					   plus alignment padding) */
117 };
118 #endif
119 
120 struct bpf_program32 {
121 	u_int bf_len;
122 	uint32_t bf_insns;
123 };
124 
125 struct bpf_dltlist32 {
126 	u_int	bfl_len;
127 	u_int	bfl_list;
128 };
129 
130 #define	BIOCSETF32	_IOW('B', 103, struct bpf_program32)
131 #define	BIOCSRTIMEOUT32	_IOW('B', 109, struct timeval32)
132 #define	BIOCGRTIMEOUT32	_IOR('B', 110, struct timeval32)
133 #define	BIOCGDLTLIST32	_IOWR('B', 121, struct bpf_dltlist32)
134 #define	BIOCSETWF32	_IOW('B', 123, struct bpf_program32)
135 #define	BIOCSETFNR32	_IOW('B', 130, struct bpf_program32)
136 #endif
137 
138 /*
139  * bpf_iflist is a list of BPF interface structures, each corresponding to a
140  * specific DLT.  The same network interface might have several BPF interface
141  * structures registered by different layers in the stack (i.e., 802.11
142  * frames, ethernet frames, etc).
143  */
144 static LIST_HEAD(, bpf_if)	bpf_iflist, bpf_freelist;
145 static struct mtx	bpf_mtx;		/* bpf global lock */
146 static int		bpf_bpfd_cnt;
147 
148 static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
149 static void	bpf_detachd(struct bpf_d *);
150 static void	bpf_detachd_locked(struct bpf_d *);
151 static void	bpf_freed(struct bpf_d *);
152 static int	bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
153 		    struct sockaddr *, int *, struct bpf_insn *);
154 static int	bpf_setif(struct bpf_d *, struct ifreq *);
155 static void	bpf_timed_out(void *);
156 static __inline void
157 		bpf_wakeup(struct bpf_d *);
158 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
159 		    void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
160 		    struct bintime *);
161 static void	reset_d(struct bpf_d *);
162 static int	bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
163 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
164 static int	bpf_setdlt(struct bpf_d *, u_int);
165 static void	filt_bpfdetach(struct knote *);
166 static int	filt_bpfread(struct knote *, long);
167 static void	bpf_drvinit(void *);
168 static int	bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
169 
170 SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
171 int bpf_maxinsns = BPF_MAXINSNS;
172 SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
173     &bpf_maxinsns, 0, "Maximum bpf program instructions");
174 static int bpf_zerocopy_enable = 0;
175 SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
176     &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
177 static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
178     bpf_stats_sysctl, "bpf statistics portal");
179 
180 static VNET_DEFINE(int, bpf_optimize_writers) = 0;
181 #define	V_bpf_optimize_writers VNET(bpf_optimize_writers)
182 SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
183     CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
184     "Do not send packets until BPF program is set");
185 
186 static	d_open_t	bpfopen;
187 static	d_read_t	bpfread;
188 static	d_write_t	bpfwrite;
189 static	d_ioctl_t	bpfioctl;
190 static	d_poll_t	bpfpoll;
191 static	d_kqfilter_t	bpfkqfilter;
192 
193 static struct cdevsw bpf_cdevsw = {
194 	.d_version =	D_VERSION,
195 	.d_open =	bpfopen,
196 	.d_read =	bpfread,
197 	.d_write =	bpfwrite,
198 	.d_ioctl =	bpfioctl,
199 	.d_poll =	bpfpoll,
200 	.d_name =	"bpf",
201 	.d_kqfilter =	bpfkqfilter,
202 };
203 
204 static struct filterops bpfread_filtops = {
205 	.f_isfd = 1,
206 	.f_detach = filt_bpfdetach,
207 	.f_event = filt_bpfread,
208 };
209 
210 eventhandler_tag	bpf_ifdetach_cookie = NULL;
211 
212 /*
213  * LOCKING MODEL USED BY BPF:
214  * Locks:
215  * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
216  * some global counters and every bpf_if reference.
217  * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
218  * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
219  *   used by bpf_mtap code.
220  *
221  * Lock order:
222  *
223  * Global lock, interface lock, descriptor lock
224  *
225  * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
226  * working model. In many places (like bpf_detachd) we start with BPF descriptor
227  * (and we need to at least rlock it to get reliable interface pointer). This
228  * gives us potential LOR. As a result, we use global lock to protect from bpf_if
229  * change in every such place.
230  *
231  * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
232  * 3) descriptor main wlock.
233  * Reading bd_bif can be protected by any of these locks, typically global lock.
234  *
235  * Changing read/write BPF filter is protected by the same three locks,
236  * the same applies for reading.
237  *
238  * Sleeping in global lock is not allowed due to bpfdetach() using it.
239  */
240 
241 /*
242  * Wrapper functions for various buffering methods.  If the set of buffer
243  * modes expands, we will probably want to introduce a switch data structure
244  * similar to protosw, et.
245  */
246 static void
247 bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
248     u_int len)
249 {
250 
251 	BPFD_LOCK_ASSERT(d);
252 
253 	switch (d->bd_bufmode) {
254 	case BPF_BUFMODE_BUFFER:
255 		return (bpf_buffer_append_bytes(d, buf, offset, src, len));
256 
257 	case BPF_BUFMODE_ZBUF:
258 		d->bd_zcopy++;
259 		return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
260 
261 	default:
262 		panic("bpf_buf_append_bytes");
263 	}
264 }
265 
266 static void
267 bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
268     u_int len)
269 {
270 
271 	BPFD_LOCK_ASSERT(d);
272 
273 	switch (d->bd_bufmode) {
274 	case BPF_BUFMODE_BUFFER:
275 		return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
276 
277 	case BPF_BUFMODE_ZBUF:
278 		d->bd_zcopy++;
279 		return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
280 
281 	default:
282 		panic("bpf_buf_append_mbuf");
283 	}
284 }
285 
286 /*
287  * This function gets called when the free buffer is re-assigned.
288  */
289 static void
290 bpf_buf_reclaimed(struct bpf_d *d)
291 {
292 
293 	BPFD_LOCK_ASSERT(d);
294 
295 	switch (d->bd_bufmode) {
296 	case BPF_BUFMODE_BUFFER:
297 		return;
298 
299 	case BPF_BUFMODE_ZBUF:
300 		bpf_zerocopy_buf_reclaimed(d);
301 		return;
302 
303 	default:
304 		panic("bpf_buf_reclaimed");
305 	}
306 }
307 
308 /*
309  * If the buffer mechanism has a way to decide that a held buffer can be made
310  * free, then it is exposed via the bpf_canfreebuf() interface.  (1) is
311  * returned if the buffer can be discarded, (0) is returned if it cannot.
312  */
313 static int
314 bpf_canfreebuf(struct bpf_d *d)
315 {
316 
317 	BPFD_LOCK_ASSERT(d);
318 
319 	switch (d->bd_bufmode) {
320 	case BPF_BUFMODE_ZBUF:
321 		return (bpf_zerocopy_canfreebuf(d));
322 	}
323 	return (0);
324 }
325 
326 /*
327  * Allow the buffer model to indicate that the current store buffer is
328  * immutable, regardless of the appearance of space.  Return (1) if the
329  * buffer is writable, and (0) if not.
330  */
331 static int
332 bpf_canwritebuf(struct bpf_d *d)
333 {
334 	BPFD_LOCK_ASSERT(d);
335 
336 	switch (d->bd_bufmode) {
337 	case BPF_BUFMODE_ZBUF:
338 		return (bpf_zerocopy_canwritebuf(d));
339 	}
340 	return (1);
341 }
342 
343 /*
344  * Notify buffer model that an attempt to write to the store buffer has
345  * resulted in a dropped packet, in which case the buffer may be considered
346  * full.
347  */
348 static void
349 bpf_buffull(struct bpf_d *d)
350 {
351 
352 	BPFD_LOCK_ASSERT(d);
353 
354 	switch (d->bd_bufmode) {
355 	case BPF_BUFMODE_ZBUF:
356 		bpf_zerocopy_buffull(d);
357 		break;
358 	}
359 }
360 
361 /*
362  * Notify the buffer model that a buffer has moved into the hold position.
363  */
364 void
365 bpf_bufheld(struct bpf_d *d)
366 {
367 
368 	BPFD_LOCK_ASSERT(d);
369 
370 	switch (d->bd_bufmode) {
371 	case BPF_BUFMODE_ZBUF:
372 		bpf_zerocopy_bufheld(d);
373 		break;
374 	}
375 }
376 
377 static void
378 bpf_free(struct bpf_d *d)
379 {
380 
381 	switch (d->bd_bufmode) {
382 	case BPF_BUFMODE_BUFFER:
383 		return (bpf_buffer_free(d));
384 
385 	case BPF_BUFMODE_ZBUF:
386 		return (bpf_zerocopy_free(d));
387 
388 	default:
389 		panic("bpf_buf_free");
390 	}
391 }
392 
393 static int
394 bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
395 {
396 
397 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
398 		return (EOPNOTSUPP);
399 	return (bpf_buffer_uiomove(d, buf, len, uio));
400 }
401 
402 static int
403 bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
404 {
405 
406 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
407 		return (EOPNOTSUPP);
408 	return (bpf_buffer_ioctl_sblen(d, i));
409 }
410 
411 static int
412 bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
413 {
414 
415 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
416 		return (EOPNOTSUPP);
417 	return (bpf_zerocopy_ioctl_getzmax(td, d, i));
418 }
419 
420 static int
421 bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
422 {
423 
424 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
425 		return (EOPNOTSUPP);
426 	return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
427 }
428 
429 static int
430 bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
431 {
432 
433 	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
434 		return (EOPNOTSUPP);
435 	return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
436 }
437 
438 /*
439  * General BPF functions.
440  */
441 static int
442 bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
443     struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
444 {
445 	const struct ieee80211_bpf_params *p;
446 	struct ether_header *eh;
447 	struct mbuf *m;
448 	int error;
449 	int len;
450 	int hlen;
451 	int slen;
452 
453 	/*
454 	 * Build a sockaddr based on the data link layer type.
455 	 * We do this at this level because the ethernet header
456 	 * is copied directly into the data field of the sockaddr.
457 	 * In the case of SLIP, there is no header and the packet
458 	 * is forwarded as is.
459 	 * Also, we are careful to leave room at the front of the mbuf
460 	 * for the link level header.
461 	 */
462 	switch (linktype) {
463 
464 	case DLT_SLIP:
465 		sockp->sa_family = AF_INET;
466 		hlen = 0;
467 		break;
468 
469 	case DLT_EN10MB:
470 		sockp->sa_family = AF_UNSPEC;
471 		/* XXX Would MAXLINKHDR be better? */
472 		hlen = ETHER_HDR_LEN;
473 		break;
474 
475 	case DLT_FDDI:
476 		sockp->sa_family = AF_IMPLINK;
477 		hlen = 0;
478 		break;
479 
480 	case DLT_RAW:
481 		sockp->sa_family = AF_UNSPEC;
482 		hlen = 0;
483 		break;
484 
485 	case DLT_NULL:
486 		/*
487 		 * null interface types require a 4 byte pseudo header which
488 		 * corresponds to the address family of the packet.
489 		 */
490 		sockp->sa_family = AF_UNSPEC;
491 		hlen = 4;
492 		break;
493 
494 	case DLT_ATM_RFC1483:
495 		/*
496 		 * en atm driver requires 4-byte atm pseudo header.
497 		 * though it isn't standard, vpi:vci needs to be
498 		 * specified anyway.
499 		 */
500 		sockp->sa_family = AF_UNSPEC;
501 		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
502 		break;
503 
504 	case DLT_PPP:
505 		sockp->sa_family = AF_UNSPEC;
506 		hlen = 4;	/* This should match PPP_HDRLEN */
507 		break;
508 
509 	case DLT_IEEE802_11:		/* IEEE 802.11 wireless */
510 		sockp->sa_family = AF_IEEE80211;
511 		hlen = 0;
512 		break;
513 
514 	case DLT_IEEE802_11_RADIO:	/* IEEE 802.11 wireless w/ phy params */
515 		sockp->sa_family = AF_IEEE80211;
516 		sockp->sa_len = 12;	/* XXX != 0 */
517 		hlen = sizeof(struct ieee80211_bpf_params);
518 		break;
519 
520 	default:
521 		return (EIO);
522 	}
523 
524 	len = uio->uio_resid;
525 	if (len < hlen || len - hlen > ifp->if_mtu)
526 		return (EMSGSIZE);
527 
528 	m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
529 	if (m == NULL)
530 		return (EIO);
531 	m->m_pkthdr.len = m->m_len = len;
532 	*mp = m;
533 
534 	error = uiomove(mtod(m, u_char *), len, uio);
535 	if (error)
536 		goto bad;
537 
538 	slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
539 	if (slen == 0) {
540 		error = EPERM;
541 		goto bad;
542 	}
543 
544 	/* Check for multicast destination */
545 	switch (linktype) {
546 	case DLT_EN10MB:
547 		eh = mtod(m, struct ether_header *);
548 		if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
549 			if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
550 			    ETHER_ADDR_LEN) == 0)
551 				m->m_flags |= M_BCAST;
552 			else
553 				m->m_flags |= M_MCAST;
554 		}
555 		break;
556 	}
557 
558 	/*
559 	 * Make room for link header, and copy it to sockaddr
560 	 */
561 	if (hlen != 0) {
562 		if (sockp->sa_family == AF_IEEE80211) {
563 			/*
564 			 * Collect true length from the parameter header
565 			 * NB: sockp is known to be zero'd so if we do a
566 			 *     short copy unspecified parameters will be
567 			 *     zero.
568 			 * NB: packet may not be aligned after stripping
569 			 *     bpf params
570 			 * XXX check ibp_vers
571 			 */
572 			p = mtod(m, const struct ieee80211_bpf_params *);
573 			hlen = p->ibp_len;
574 			if (hlen > sizeof(sockp->sa_data)) {
575 				error = EINVAL;
576 				goto bad;
577 			}
578 		}
579 		bcopy(m->m_data, sockp->sa_data, hlen);
580 	}
581 	*hdrlen = hlen;
582 
583 	return (0);
584 bad:
585 	m_freem(m);
586 	return (error);
587 }
588 
589 /*
590  * Attach file to the bpf interface, i.e. make d listen on bp.
591  */
592 static void
593 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
594 {
595 	int op_w;
596 
597 	BPF_LOCK_ASSERT();
598 
599 	/*
600 	 * Save sysctl value to protect from sysctl change
601 	 * between reads
602 	 */
603 	op_w = V_bpf_optimize_writers;
604 
605 	if (d->bd_bif != NULL)
606 		bpf_detachd_locked(d);
607 	/*
608 	 * Point d at bp, and add d to the interface's list.
609 	 * Since there are many applicaiotns using BPF for
610 	 * sending raw packets only (dhcpd, cdpd are good examples)
611 	 * we can delay adding d to the list of active listeners until
612 	 * some filter is configured.
613 	 */
614 
615 	BPFIF_WLOCK(bp);
616 	BPFD_LOCK(d);
617 
618 	d->bd_bif = bp;
619 
620 	if (op_w != 0) {
621 		/* Add to writers-only list */
622 		LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
623 		/*
624 		 * We decrement bd_writer on every filter set operation.
625 		 * First BIOCSETF is done by pcap_open_live() to set up
626 		 * snap length. After that appliation usually sets its own filter
627 		 */
628 		d->bd_writer = 2;
629 	} else
630 		LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
631 
632 	BPFD_UNLOCK(d);
633 	BPFIF_WUNLOCK(bp);
634 
635 	bpf_bpfd_cnt++;
636 
637 	CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
638 	    __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
639 
640 	if (op_w == 0)
641 		EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
642 }
643 
644 /*
645  * Add d to the list of active bp filters.
646  * Reuqires bpf_attachd() to be called before
647  */
648 static void
649 bpf_upgraded(struct bpf_d *d)
650 {
651 	struct bpf_if *bp;
652 
653 	BPF_LOCK_ASSERT();
654 
655 	bp = d->bd_bif;
656 
657 	/*
658 	 * Filter can be set several times without specifying interface.
659 	 * Mark d as reader and exit.
660 	 */
661 	if (bp == NULL) {
662 		BPFD_LOCK(d);
663 		d->bd_writer = 0;
664 		BPFD_UNLOCK(d);
665 		return;
666 	}
667 
668 	BPFIF_WLOCK(bp);
669 	BPFD_LOCK(d);
670 
671 	/* Remove from writers-only list */
672 	LIST_REMOVE(d, bd_next);
673 	LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
674 	/* Mark d as reader */
675 	d->bd_writer = 0;
676 
677 	BPFD_UNLOCK(d);
678 	BPFIF_WUNLOCK(bp);
679 
680 	CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
681 
682 	EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
683 }
684 
685 /*
686  * Detach a file from its interface.
687  */
688 static void
689 bpf_detachd(struct bpf_d *d)
690 {
691 	BPF_LOCK();
692 	bpf_detachd_locked(d);
693 	BPF_UNLOCK();
694 }
695 
696 static void
697 bpf_detachd_locked(struct bpf_d *d)
698 {
699 	int error;
700 	struct bpf_if *bp;
701 	struct ifnet *ifp;
702 
703 	CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
704 
705 	BPF_LOCK_ASSERT();
706 
707 	/* Check if descriptor is attached */
708 	if ((bp = d->bd_bif) == NULL)
709 		return;
710 
711 	BPFIF_WLOCK(bp);
712 	BPFD_LOCK(d);
713 
714 	/* Save bd_writer value */
715 	error = d->bd_writer;
716 
717 	/*
718 	 * Remove d from the interface's descriptor list.
719 	 */
720 	LIST_REMOVE(d, bd_next);
721 
722 	ifp = bp->bif_ifp;
723 	d->bd_bif = NULL;
724 	BPFD_UNLOCK(d);
725 	BPFIF_WUNLOCK(bp);
726 
727 	bpf_bpfd_cnt--;
728 
729 	/* Call event handler iff d is attached */
730 	if (error == 0)
731 		EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
732 
733 	/*
734 	 * Check if this descriptor had requested promiscuous mode.
735 	 * If so, turn it off.
736 	 */
737 	if (d->bd_promisc) {
738 		d->bd_promisc = 0;
739 		CURVNET_SET(ifp->if_vnet);
740 		error = ifpromisc(ifp, 0);
741 		CURVNET_RESTORE();
742 		if (error != 0 && error != ENXIO) {
743 			/*
744 			 * ENXIO can happen if a pccard is unplugged
745 			 * Something is really wrong if we were able to put
746 			 * the driver into promiscuous mode, but can't
747 			 * take it out.
748 			 */
749 			if_printf(bp->bif_ifp,
750 				"bpf_detach: ifpromisc failed (%d)\n", error);
751 		}
752 	}
753 }
754 
755 /*
756  * Close the descriptor by detaching it from its interface,
757  * deallocating its buffers, and marking it free.
758  */
759 static void
760 bpf_dtor(void *data)
761 {
762 	struct bpf_d *d = data;
763 
764 	BPFD_LOCK(d);
765 	if (d->bd_state == BPF_WAITING)
766 		callout_stop(&d->bd_callout);
767 	d->bd_state = BPF_IDLE;
768 	BPFD_UNLOCK(d);
769 	funsetown(&d->bd_sigio);
770 	bpf_detachd(d);
771 #ifdef MAC
772 	mac_bpfdesc_destroy(d);
773 #endif /* MAC */
774 	seldrain(&d->bd_sel);
775 	knlist_destroy(&d->bd_sel.si_note);
776 	callout_drain(&d->bd_callout);
777 	bpf_freed(d);
778 	free(d, M_BPF);
779 }
780 
781 /*
782  * Open ethernet device.  Returns ENXIO for illegal minor device number,
783  * EBUSY if file is open by another process.
784  */
785 /* ARGSUSED */
786 static	int
787 bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
788 {
789 	struct bpf_d *d;
790 	int error, size;
791 
792 	d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
793 	error = devfs_set_cdevpriv(d, bpf_dtor);
794 	if (error != 0) {
795 		free(d, M_BPF);
796 		return (error);
797 	}
798 
799 	/*
800 	 * For historical reasons, perform a one-time initialization call to
801 	 * the buffer routines, even though we're not yet committed to a
802 	 * particular buffer method.
803 	 */
804 	bpf_buffer_init(d);
805 	d->bd_hbuf_in_use = 0;
806 	d->bd_bufmode = BPF_BUFMODE_BUFFER;
807 	d->bd_sig = SIGIO;
808 	d->bd_direction = BPF_D_INOUT;
809 	BPF_PID_REFRESH(d, td);
810 #ifdef MAC
811 	mac_bpfdesc_init(d);
812 	mac_bpfdesc_create(td->td_ucred, d);
813 #endif
814 	mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
815 	callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
816 	knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
817 
818 	/* Allocate default buffers */
819 	size = d->bd_bufsize;
820 	bpf_buffer_ioctl_sblen(d, &size);
821 
822 	return (0);
823 }
824 
825 /*
826  *  bpfread - read next chunk of packets from buffers
827  */
828 static	int
829 bpfread(struct cdev *dev, struct uio *uio, int ioflag)
830 {
831 	struct bpf_d *d;
832 	int error;
833 	int non_block;
834 	int timed_out;
835 
836 	error = devfs_get_cdevpriv((void **)&d);
837 	if (error != 0)
838 		return (error);
839 
840 	/*
841 	 * Restrict application to use a buffer the same size as
842 	 * as kernel buffers.
843 	 */
844 	if (uio->uio_resid != d->bd_bufsize)
845 		return (EINVAL);
846 
847 	non_block = ((ioflag & O_NONBLOCK) != 0);
848 
849 	BPFD_LOCK(d);
850 	BPF_PID_REFRESH_CUR(d);
851 	if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
852 		BPFD_UNLOCK(d);
853 		return (EOPNOTSUPP);
854 	}
855 	if (d->bd_state == BPF_WAITING)
856 		callout_stop(&d->bd_callout);
857 	timed_out = (d->bd_state == BPF_TIMED_OUT);
858 	d->bd_state = BPF_IDLE;
859 	while (d->bd_hbuf_in_use) {
860 		error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
861 		    PRINET|PCATCH, "bd_hbuf", 0);
862 		if (error != 0) {
863 			BPFD_UNLOCK(d);
864 			return (error);
865 		}
866 	}
867 	/*
868 	 * If the hold buffer is empty, then do a timed sleep, which
869 	 * ends when the timeout expires or when enough packets
870 	 * have arrived to fill the store buffer.
871 	 */
872 	while (d->bd_hbuf == NULL) {
873 		if (d->bd_slen != 0) {
874 			/*
875 			 * A packet(s) either arrived since the previous
876 			 * read or arrived while we were asleep.
877 			 */
878 			if (d->bd_immediate || non_block || timed_out) {
879 				/*
880 				 * Rotate the buffers and return what's here
881 				 * if we are in immediate mode, non-blocking
882 				 * flag is set, or this descriptor timed out.
883 				 */
884 				ROTATE_BUFFERS(d);
885 				break;
886 			}
887 		}
888 
889 		/*
890 		 * No data is available, check to see if the bpf device
891 		 * is still pointed at a real interface.  If not, return
892 		 * ENXIO so that the userland process knows to rebind
893 		 * it before using it again.
894 		 */
895 		if (d->bd_bif == NULL) {
896 			BPFD_UNLOCK(d);
897 			return (ENXIO);
898 		}
899 
900 		if (non_block) {
901 			BPFD_UNLOCK(d);
902 			return (EWOULDBLOCK);
903 		}
904 		error = msleep(d, &d->bd_lock, PRINET|PCATCH,
905 		     "bpf", d->bd_rtout);
906 		if (error == EINTR || error == ERESTART) {
907 			BPFD_UNLOCK(d);
908 			return (error);
909 		}
910 		if (error == EWOULDBLOCK) {
911 			/*
912 			 * On a timeout, return what's in the buffer,
913 			 * which may be nothing.  If there is something
914 			 * in the store buffer, we can rotate the buffers.
915 			 */
916 			if (d->bd_hbuf)
917 				/*
918 				 * We filled up the buffer in between
919 				 * getting the timeout and arriving
920 				 * here, so we don't need to rotate.
921 				 */
922 				break;
923 
924 			if (d->bd_slen == 0) {
925 				BPFD_UNLOCK(d);
926 				return (0);
927 			}
928 			ROTATE_BUFFERS(d);
929 			break;
930 		}
931 	}
932 	/*
933 	 * At this point, we know we have something in the hold slot.
934 	 */
935 	d->bd_hbuf_in_use = 1;
936 	BPFD_UNLOCK(d);
937 
938 	/*
939 	 * Move data from hold buffer into user space.
940 	 * We know the entire buffer is transferred since
941 	 * we checked above that the read buffer is bpf_bufsize bytes.
942   	 *
943 	 * We do not have to worry about simultaneous reads because
944 	 * we waited for sole access to the hold buffer above.
945 	 */
946 	error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
947 
948 	BPFD_LOCK(d);
949 	KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
950 	d->bd_fbuf = d->bd_hbuf;
951 	d->bd_hbuf = NULL;
952 	d->bd_hlen = 0;
953 	bpf_buf_reclaimed(d);
954 	d->bd_hbuf_in_use = 0;
955 	wakeup(&d->bd_hbuf_in_use);
956 	BPFD_UNLOCK(d);
957 
958 	return (error);
959 }
960 
961 /*
962  * If there are processes sleeping on this descriptor, wake them up.
963  */
964 static __inline void
965 bpf_wakeup(struct bpf_d *d)
966 {
967 
968 	BPFD_LOCK_ASSERT(d);
969 	if (d->bd_state == BPF_WAITING) {
970 		callout_stop(&d->bd_callout);
971 		d->bd_state = BPF_IDLE;
972 	}
973 	wakeup(d);
974 	if (d->bd_async && d->bd_sig && d->bd_sigio)
975 		pgsigio(&d->bd_sigio, d->bd_sig, 0);
976 
977 	selwakeuppri(&d->bd_sel, PRINET);
978 	KNOTE_LOCKED(&d->bd_sel.si_note, 0);
979 }
980 
981 static void
982 bpf_timed_out(void *arg)
983 {
984 	struct bpf_d *d = (struct bpf_d *)arg;
985 
986 	BPFD_LOCK_ASSERT(d);
987 
988 	if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
989 		return;
990 	if (d->bd_state == BPF_WAITING) {
991 		d->bd_state = BPF_TIMED_OUT;
992 		if (d->bd_slen != 0)
993 			bpf_wakeup(d);
994 	}
995 }
996 
997 static int
998 bpf_ready(struct bpf_d *d)
999 {
1000 
1001 	BPFD_LOCK_ASSERT(d);
1002 
1003 	if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1004 		return (1);
1005 	if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1006 	    d->bd_slen != 0)
1007 		return (1);
1008 	return (0);
1009 }
1010 
1011 static int
1012 bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1013 {
1014 	struct bpf_d *d;
1015 	struct ifnet *ifp;
1016 	struct mbuf *m, *mc;
1017 	struct sockaddr dst;
1018 	int error, hlen;
1019 
1020 	error = devfs_get_cdevpriv((void **)&d);
1021 	if (error != 0)
1022 		return (error);
1023 
1024 	BPF_PID_REFRESH_CUR(d);
1025 	d->bd_wcount++;
1026 	/* XXX: locking required */
1027 	if (d->bd_bif == NULL) {
1028 		d->bd_wdcount++;
1029 		return (ENXIO);
1030 	}
1031 
1032 	ifp = d->bd_bif->bif_ifp;
1033 
1034 	if ((ifp->if_flags & IFF_UP) == 0) {
1035 		d->bd_wdcount++;
1036 		return (ENETDOWN);
1037 	}
1038 
1039 	if (uio->uio_resid == 0) {
1040 		d->bd_wdcount++;
1041 		return (0);
1042 	}
1043 
1044 	bzero(&dst, sizeof(dst));
1045 	m = NULL;
1046 	hlen = 0;
1047 	/* XXX: bpf_movein() can sleep */
1048 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1049 	    &m, &dst, &hlen, d->bd_wfilter);
1050 	if (error) {
1051 		d->bd_wdcount++;
1052 		return (error);
1053 	}
1054 	d->bd_wfcount++;
1055 	if (d->bd_hdrcmplt)
1056 		dst.sa_family = pseudo_AF_HDRCMPLT;
1057 
1058 	if (d->bd_feedback) {
1059 		mc = m_dup(m, M_NOWAIT);
1060 		if (mc != NULL)
1061 			mc->m_pkthdr.rcvif = ifp;
1062 		/* Set M_PROMISC for outgoing packets to be discarded. */
1063 		if (d->bd_direction == BPF_D_INOUT)
1064 			m->m_flags |= M_PROMISC;
1065 	} else
1066 		mc = NULL;
1067 
1068 	m->m_pkthdr.len -= hlen;
1069 	m->m_len -= hlen;
1070 	m->m_data += hlen;	/* XXX */
1071 
1072 	CURVNET_SET(ifp->if_vnet);
1073 #ifdef MAC
1074 	BPFD_LOCK(d);
1075 	mac_bpfdesc_create_mbuf(d, m);
1076 	if (mc != NULL)
1077 		mac_bpfdesc_create_mbuf(d, mc);
1078 	BPFD_UNLOCK(d);
1079 #endif
1080 
1081 	error = (*ifp->if_output)(ifp, m, &dst, NULL);
1082 	if (error)
1083 		d->bd_wdcount++;
1084 
1085 	if (mc != NULL) {
1086 		if (error == 0)
1087 			(*ifp->if_input)(ifp, mc);
1088 		else
1089 			m_freem(mc);
1090 	}
1091 	CURVNET_RESTORE();
1092 
1093 	return (error);
1094 }
1095 
1096 /*
1097  * Reset a descriptor by flushing its packet buffer and clearing the receive
1098  * and drop counts.  This is doable for kernel-only buffers, but with
1099  * zero-copy buffers, we can't write to (or rotate) buffers that are
1100  * currently owned by userspace.  It would be nice if we could encapsulate
1101  * this logic in the buffer code rather than here.
1102  */
1103 static void
1104 reset_d(struct bpf_d *d)
1105 {
1106 
1107 	BPFD_LOCK_ASSERT(d);
1108 
1109 	while (d->bd_hbuf_in_use)
1110 		mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1111 		    "bd_hbuf", 0);
1112 	if ((d->bd_hbuf != NULL) &&
1113 	    (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1114 		/* Free the hold buffer. */
1115 		d->bd_fbuf = d->bd_hbuf;
1116 		d->bd_hbuf = NULL;
1117 		d->bd_hlen = 0;
1118 		bpf_buf_reclaimed(d);
1119 	}
1120 	if (bpf_canwritebuf(d))
1121 		d->bd_slen = 0;
1122 	d->bd_rcount = 0;
1123 	d->bd_dcount = 0;
1124 	d->bd_fcount = 0;
1125 	d->bd_wcount = 0;
1126 	d->bd_wfcount = 0;
1127 	d->bd_wdcount = 0;
1128 	d->bd_zcopy = 0;
1129 }
1130 
1131 /*
1132  *  FIONREAD		Check for read packet available.
1133  *  SIOCGIFADDR		Get interface address - convenient hook to driver.
1134  *  BIOCGBLEN		Get buffer len [for read()].
1135  *  BIOCSETF		Set read filter.
1136  *  BIOCSETFNR		Set read filter without resetting descriptor.
1137  *  BIOCSETWF		Set write filter.
1138  *  BIOCFLUSH		Flush read packet buffer.
1139  *  BIOCPROMISC		Put interface into promiscuous mode.
1140  *  BIOCGDLT		Get link layer type.
1141  *  BIOCGETIF		Get interface name.
1142  *  BIOCSETIF		Set interface.
1143  *  BIOCSRTIMEOUT	Set read timeout.
1144  *  BIOCGRTIMEOUT	Get read timeout.
1145  *  BIOCGSTATS		Get packet stats.
1146  *  BIOCIMMEDIATE	Set immediate mode.
1147  *  BIOCVERSION		Get filter language version.
1148  *  BIOCGHDRCMPLT	Get "header already complete" flag
1149  *  BIOCSHDRCMPLT	Set "header already complete" flag
1150  *  BIOCGDIRECTION	Get packet direction flag
1151  *  BIOCSDIRECTION	Set packet direction flag
1152  *  BIOCGTSTAMP		Get time stamp format and resolution.
1153  *  BIOCSTSTAMP		Set time stamp format and resolution.
1154  *  BIOCLOCK		Set "locked" flag
1155  *  BIOCFEEDBACK	Set packet feedback mode.
1156  *  BIOCSETZBUF		Set current zero-copy buffer locations.
1157  *  BIOCGETZMAX		Get maximum zero-copy buffer size.
1158  *  BIOCROTZBUF		Force rotation of zero-copy buffer
1159  *  BIOCSETBUFMODE	Set buffer mode.
1160  *  BIOCGETBUFMODE	Get current buffer mode.
1161  */
1162 /* ARGSUSED */
1163 static	int
1164 bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1165     struct thread *td)
1166 {
1167 	struct bpf_d *d;
1168 	int error;
1169 
1170 	error = devfs_get_cdevpriv((void **)&d);
1171 	if (error != 0)
1172 		return (error);
1173 
1174 	/*
1175 	 * Refresh PID associated with this descriptor.
1176 	 */
1177 	BPFD_LOCK(d);
1178 	BPF_PID_REFRESH(d, td);
1179 	if (d->bd_state == BPF_WAITING)
1180 		callout_stop(&d->bd_callout);
1181 	d->bd_state = BPF_IDLE;
1182 	BPFD_UNLOCK(d);
1183 
1184 	if (d->bd_locked == 1) {
1185 		switch (cmd) {
1186 		case BIOCGBLEN:
1187 		case BIOCFLUSH:
1188 		case BIOCGDLT:
1189 		case BIOCGDLTLIST:
1190 #ifdef COMPAT_FREEBSD32
1191 		case BIOCGDLTLIST32:
1192 #endif
1193 		case BIOCGETIF:
1194 		case BIOCGRTIMEOUT:
1195 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1196 		case BIOCGRTIMEOUT32:
1197 #endif
1198 		case BIOCGSTATS:
1199 		case BIOCVERSION:
1200 		case BIOCGRSIG:
1201 		case BIOCGHDRCMPLT:
1202 		case BIOCSTSTAMP:
1203 		case BIOCFEEDBACK:
1204 		case FIONREAD:
1205 		case BIOCLOCK:
1206 		case BIOCSRTIMEOUT:
1207 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1208 		case BIOCSRTIMEOUT32:
1209 #endif
1210 		case BIOCIMMEDIATE:
1211 		case TIOCGPGRP:
1212 		case BIOCROTZBUF:
1213 			break;
1214 		default:
1215 			return (EPERM);
1216 		}
1217 	}
1218 #ifdef COMPAT_FREEBSD32
1219 	/*
1220 	 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1221 	 * that it will get 32-bit packet headers.
1222 	 */
1223 	switch (cmd) {
1224 	case BIOCSETF32:
1225 	case BIOCSETFNR32:
1226 	case BIOCSETWF32:
1227 	case BIOCGDLTLIST32:
1228 	case BIOCGRTIMEOUT32:
1229 	case BIOCSRTIMEOUT32:
1230 		BPFD_LOCK(d);
1231 		d->bd_compat32 = 1;
1232 		BPFD_UNLOCK(d);
1233 	}
1234 #endif
1235 
1236 	CURVNET_SET(TD_TO_VNET(td));
1237 	switch (cmd) {
1238 
1239 	default:
1240 		error = EINVAL;
1241 		break;
1242 
1243 	/*
1244 	 * Check for read packet available.
1245 	 */
1246 	case FIONREAD:
1247 		{
1248 			int n;
1249 
1250 			BPFD_LOCK(d);
1251 			n = d->bd_slen;
1252 			while (d->bd_hbuf_in_use)
1253 				mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1254 				    PRINET, "bd_hbuf", 0);
1255 			if (d->bd_hbuf)
1256 				n += d->bd_hlen;
1257 			BPFD_UNLOCK(d);
1258 
1259 			*(int *)addr = n;
1260 			break;
1261 		}
1262 
1263 	case SIOCGIFADDR:
1264 		{
1265 			struct ifnet *ifp;
1266 
1267 			if (d->bd_bif == NULL)
1268 				error = EINVAL;
1269 			else {
1270 				ifp = d->bd_bif->bif_ifp;
1271 				error = (*ifp->if_ioctl)(ifp, cmd, addr);
1272 			}
1273 			break;
1274 		}
1275 
1276 	/*
1277 	 * Get buffer len [for read()].
1278 	 */
1279 	case BIOCGBLEN:
1280 		BPFD_LOCK(d);
1281 		*(u_int *)addr = d->bd_bufsize;
1282 		BPFD_UNLOCK(d);
1283 		break;
1284 
1285 	/*
1286 	 * Set buffer length.
1287 	 */
1288 	case BIOCSBLEN:
1289 		error = bpf_ioctl_sblen(d, (u_int *)addr);
1290 		break;
1291 
1292 	/*
1293 	 * Set link layer read filter.
1294 	 */
1295 	case BIOCSETF:
1296 	case BIOCSETFNR:
1297 	case BIOCSETWF:
1298 #ifdef COMPAT_FREEBSD32
1299 	case BIOCSETF32:
1300 	case BIOCSETFNR32:
1301 	case BIOCSETWF32:
1302 #endif
1303 		error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1304 		break;
1305 
1306 	/*
1307 	 * Flush read packet buffer.
1308 	 */
1309 	case BIOCFLUSH:
1310 		BPFD_LOCK(d);
1311 		reset_d(d);
1312 		BPFD_UNLOCK(d);
1313 		break;
1314 
1315 	/*
1316 	 * Put interface into promiscuous mode.
1317 	 */
1318 	case BIOCPROMISC:
1319 		if (d->bd_bif == NULL) {
1320 			/*
1321 			 * No interface attached yet.
1322 			 */
1323 			error = EINVAL;
1324 			break;
1325 		}
1326 		if (d->bd_promisc == 0) {
1327 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
1328 			if (error == 0)
1329 				d->bd_promisc = 1;
1330 		}
1331 		break;
1332 
1333 	/*
1334 	 * Get current data link type.
1335 	 */
1336 	case BIOCGDLT:
1337 		BPF_LOCK();
1338 		if (d->bd_bif == NULL)
1339 			error = EINVAL;
1340 		else
1341 			*(u_int *)addr = d->bd_bif->bif_dlt;
1342 		BPF_UNLOCK();
1343 		break;
1344 
1345 	/*
1346 	 * Get a list of supported data link types.
1347 	 */
1348 #ifdef COMPAT_FREEBSD32
1349 	case BIOCGDLTLIST32:
1350 		{
1351 			struct bpf_dltlist32 *list32;
1352 			struct bpf_dltlist dltlist;
1353 
1354 			list32 = (struct bpf_dltlist32 *)addr;
1355 			dltlist.bfl_len = list32->bfl_len;
1356 			dltlist.bfl_list = PTRIN(list32->bfl_list);
1357 			BPF_LOCK();
1358 			if (d->bd_bif == NULL)
1359 				error = EINVAL;
1360 			else {
1361 				error = bpf_getdltlist(d, &dltlist);
1362 				if (error == 0)
1363 					list32->bfl_len = dltlist.bfl_len;
1364 			}
1365 			BPF_UNLOCK();
1366 			break;
1367 		}
1368 #endif
1369 
1370 	case BIOCGDLTLIST:
1371 		BPF_LOCK();
1372 		if (d->bd_bif == NULL)
1373 			error = EINVAL;
1374 		else
1375 			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1376 		BPF_UNLOCK();
1377 		break;
1378 
1379 	/*
1380 	 * Set data link type.
1381 	 */
1382 	case BIOCSDLT:
1383 		BPF_LOCK();
1384 		if (d->bd_bif == NULL)
1385 			error = EINVAL;
1386 		else
1387 			error = bpf_setdlt(d, *(u_int *)addr);
1388 		BPF_UNLOCK();
1389 		break;
1390 
1391 	/*
1392 	 * Get interface name.
1393 	 */
1394 	case BIOCGETIF:
1395 		BPF_LOCK();
1396 		if (d->bd_bif == NULL)
1397 			error = EINVAL;
1398 		else {
1399 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
1400 			struct ifreq *const ifr = (struct ifreq *)addr;
1401 
1402 			strlcpy(ifr->ifr_name, ifp->if_xname,
1403 			    sizeof(ifr->ifr_name));
1404 		}
1405 		BPF_UNLOCK();
1406 		break;
1407 
1408 	/*
1409 	 * Set interface.
1410 	 */
1411 	case BIOCSETIF:
1412 		BPF_LOCK();
1413 		error = bpf_setif(d, (struct ifreq *)addr);
1414 		BPF_UNLOCK();
1415 		break;
1416 
1417 	/*
1418 	 * Set read timeout.
1419 	 */
1420 	case BIOCSRTIMEOUT:
1421 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1422 	case BIOCSRTIMEOUT32:
1423 #endif
1424 		{
1425 			struct timeval *tv = (struct timeval *)addr;
1426 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1427 			struct timeval32 *tv32;
1428 			struct timeval tv64;
1429 
1430 			if (cmd == BIOCSRTIMEOUT32) {
1431 				tv32 = (struct timeval32 *)addr;
1432 				tv = &tv64;
1433 				tv->tv_sec = tv32->tv_sec;
1434 				tv->tv_usec = tv32->tv_usec;
1435 			} else
1436 #endif
1437 				tv = (struct timeval *)addr;
1438 
1439 			/*
1440 			 * Subtract 1 tick from tvtohz() since this isn't
1441 			 * a one-shot timer.
1442 			 */
1443 			if ((error = itimerfix(tv)) == 0)
1444 				d->bd_rtout = tvtohz(tv) - 1;
1445 			break;
1446 		}
1447 
1448 	/*
1449 	 * Get read timeout.
1450 	 */
1451 	case BIOCGRTIMEOUT:
1452 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1453 	case BIOCGRTIMEOUT32:
1454 #endif
1455 		{
1456 			struct timeval *tv;
1457 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1458 			struct timeval32 *tv32;
1459 			struct timeval tv64;
1460 
1461 			if (cmd == BIOCGRTIMEOUT32)
1462 				tv = &tv64;
1463 			else
1464 #endif
1465 				tv = (struct timeval *)addr;
1466 
1467 			tv->tv_sec = d->bd_rtout / hz;
1468 			tv->tv_usec = (d->bd_rtout % hz) * tick;
1469 #if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1470 			if (cmd == BIOCGRTIMEOUT32) {
1471 				tv32 = (struct timeval32 *)addr;
1472 				tv32->tv_sec = tv->tv_sec;
1473 				tv32->tv_usec = tv->tv_usec;
1474 			}
1475 #endif
1476 
1477 			break;
1478 		}
1479 
1480 	/*
1481 	 * Get packet stats.
1482 	 */
1483 	case BIOCGSTATS:
1484 		{
1485 			struct bpf_stat *bs = (struct bpf_stat *)addr;
1486 
1487 			/* XXXCSJP overflow */
1488 			bs->bs_recv = d->bd_rcount;
1489 			bs->bs_drop = d->bd_dcount;
1490 			break;
1491 		}
1492 
1493 	/*
1494 	 * Set immediate mode.
1495 	 */
1496 	case BIOCIMMEDIATE:
1497 		BPFD_LOCK(d);
1498 		d->bd_immediate = *(u_int *)addr;
1499 		BPFD_UNLOCK(d);
1500 		break;
1501 
1502 	case BIOCVERSION:
1503 		{
1504 			struct bpf_version *bv = (struct bpf_version *)addr;
1505 
1506 			bv->bv_major = BPF_MAJOR_VERSION;
1507 			bv->bv_minor = BPF_MINOR_VERSION;
1508 			break;
1509 		}
1510 
1511 	/*
1512 	 * Get "header already complete" flag
1513 	 */
1514 	case BIOCGHDRCMPLT:
1515 		BPFD_LOCK(d);
1516 		*(u_int *)addr = d->bd_hdrcmplt;
1517 		BPFD_UNLOCK(d);
1518 		break;
1519 
1520 	/*
1521 	 * Set "header already complete" flag
1522 	 */
1523 	case BIOCSHDRCMPLT:
1524 		BPFD_LOCK(d);
1525 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1526 		BPFD_UNLOCK(d);
1527 		break;
1528 
1529 	/*
1530 	 * Get packet direction flag
1531 	 */
1532 	case BIOCGDIRECTION:
1533 		BPFD_LOCK(d);
1534 		*(u_int *)addr = d->bd_direction;
1535 		BPFD_UNLOCK(d);
1536 		break;
1537 
1538 	/*
1539 	 * Set packet direction flag
1540 	 */
1541 	case BIOCSDIRECTION:
1542 		{
1543 			u_int	direction;
1544 
1545 			direction = *(u_int *)addr;
1546 			switch (direction) {
1547 			case BPF_D_IN:
1548 			case BPF_D_INOUT:
1549 			case BPF_D_OUT:
1550 				BPFD_LOCK(d);
1551 				d->bd_direction = direction;
1552 				BPFD_UNLOCK(d);
1553 				break;
1554 			default:
1555 				error = EINVAL;
1556 			}
1557 		}
1558 		break;
1559 
1560 	/*
1561 	 * Get packet timestamp format and resolution.
1562 	 */
1563 	case BIOCGTSTAMP:
1564 		BPFD_LOCK(d);
1565 		*(u_int *)addr = d->bd_tstamp;
1566 		BPFD_UNLOCK(d);
1567 		break;
1568 
1569 	/*
1570 	 * Set packet timestamp format and resolution.
1571 	 */
1572 	case BIOCSTSTAMP:
1573 		{
1574 			u_int	func;
1575 
1576 			func = *(u_int *)addr;
1577 			if (BPF_T_VALID(func))
1578 				d->bd_tstamp = func;
1579 			else
1580 				error = EINVAL;
1581 		}
1582 		break;
1583 
1584 	case BIOCFEEDBACK:
1585 		BPFD_LOCK(d);
1586 		d->bd_feedback = *(u_int *)addr;
1587 		BPFD_UNLOCK(d);
1588 		break;
1589 
1590 	case BIOCLOCK:
1591 		BPFD_LOCK(d);
1592 		d->bd_locked = 1;
1593 		BPFD_UNLOCK(d);
1594 		break;
1595 
1596 	case FIONBIO:		/* Non-blocking I/O */
1597 		break;
1598 
1599 	case FIOASYNC:		/* Send signal on receive packets */
1600 		BPFD_LOCK(d);
1601 		d->bd_async = *(int *)addr;
1602 		BPFD_UNLOCK(d);
1603 		break;
1604 
1605 	case FIOSETOWN:
1606 		/*
1607 		 * XXX: Add some sort of locking here?
1608 		 * fsetown() can sleep.
1609 		 */
1610 		error = fsetown(*(int *)addr, &d->bd_sigio);
1611 		break;
1612 
1613 	case FIOGETOWN:
1614 		BPFD_LOCK(d);
1615 		*(int *)addr = fgetown(&d->bd_sigio);
1616 		BPFD_UNLOCK(d);
1617 		break;
1618 
1619 	/* This is deprecated, FIOSETOWN should be used instead. */
1620 	case TIOCSPGRP:
1621 		error = fsetown(-(*(int *)addr), &d->bd_sigio);
1622 		break;
1623 
1624 	/* This is deprecated, FIOGETOWN should be used instead. */
1625 	case TIOCGPGRP:
1626 		*(int *)addr = -fgetown(&d->bd_sigio);
1627 		break;
1628 
1629 	case BIOCSRSIG:		/* Set receive signal */
1630 		{
1631 			u_int sig;
1632 
1633 			sig = *(u_int *)addr;
1634 
1635 			if (sig >= NSIG)
1636 				error = EINVAL;
1637 			else {
1638 				BPFD_LOCK(d);
1639 				d->bd_sig = sig;
1640 				BPFD_UNLOCK(d);
1641 			}
1642 			break;
1643 		}
1644 	case BIOCGRSIG:
1645 		BPFD_LOCK(d);
1646 		*(u_int *)addr = d->bd_sig;
1647 		BPFD_UNLOCK(d);
1648 		break;
1649 
1650 	case BIOCGETBUFMODE:
1651 		BPFD_LOCK(d);
1652 		*(u_int *)addr = d->bd_bufmode;
1653 		BPFD_UNLOCK(d);
1654 		break;
1655 
1656 	case BIOCSETBUFMODE:
1657 		/*
1658 		 * Allow the buffering mode to be changed as long as we
1659 		 * haven't yet committed to a particular mode.  Our
1660 		 * definition of commitment, for now, is whether or not a
1661 		 * buffer has been allocated or an interface attached, since
1662 		 * that's the point where things get tricky.
1663 		 */
1664 		switch (*(u_int *)addr) {
1665 		case BPF_BUFMODE_BUFFER:
1666 			break;
1667 
1668 		case BPF_BUFMODE_ZBUF:
1669 			if (bpf_zerocopy_enable)
1670 				break;
1671 			/* FALLSTHROUGH */
1672 
1673 		default:
1674 			CURVNET_RESTORE();
1675 			return (EINVAL);
1676 		}
1677 
1678 		BPFD_LOCK(d);
1679 		if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1680 		    d->bd_fbuf != NULL || d->bd_bif != NULL) {
1681 			BPFD_UNLOCK(d);
1682 			CURVNET_RESTORE();
1683 			return (EBUSY);
1684 		}
1685 		d->bd_bufmode = *(u_int *)addr;
1686 		BPFD_UNLOCK(d);
1687 		break;
1688 
1689 	case BIOCGETZMAX:
1690 		error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1691 		break;
1692 
1693 	case BIOCSETZBUF:
1694 		error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1695 		break;
1696 
1697 	case BIOCROTZBUF:
1698 		error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1699 		break;
1700 	}
1701 	CURVNET_RESTORE();
1702 	return (error);
1703 }
1704 
1705 /*
1706  * Set d's packet filter program to fp.  If this file already has a filter,
1707  * free it and replace it.  Returns EINVAL for bogus requests.
1708  *
1709  * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1710  * since reading d->bd_bif can't be protected by d or interface lock due to
1711  * lock order.
1712  *
1713  * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1714  * interface read lock to read all filers.
1715  *
1716  */
1717 static int
1718 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1719 {
1720 #ifdef COMPAT_FREEBSD32
1721 	struct bpf_program fp_swab;
1722 	struct bpf_program32 *fp32;
1723 #endif
1724 	struct bpf_insn *fcode, *old;
1725 #ifdef BPF_JITTER
1726 	bpf_jit_filter *jfunc, *ofunc;
1727 #endif
1728 	size_t size;
1729 	u_int flen;
1730 	int need_upgrade;
1731 
1732 #ifdef COMPAT_FREEBSD32
1733 	switch (cmd) {
1734 	case BIOCSETF32:
1735 	case BIOCSETWF32:
1736 	case BIOCSETFNR32:
1737 		fp32 = (struct bpf_program32 *)fp;
1738 		fp_swab.bf_len = fp32->bf_len;
1739 		fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1740 		fp = &fp_swab;
1741 		switch (cmd) {
1742 		case BIOCSETF32:
1743 			cmd = BIOCSETF;
1744 			break;
1745 		case BIOCSETWF32:
1746 			cmd = BIOCSETWF;
1747 			break;
1748 		}
1749 		break;
1750 	}
1751 #endif
1752 
1753 	fcode = NULL;
1754 #ifdef BPF_JITTER
1755 	jfunc = ofunc = NULL;
1756 #endif
1757 	need_upgrade = 0;
1758 
1759 	/*
1760 	 * Check new filter validness before acquiring any locks.
1761 	 * Allocate memory for new filter, if needed.
1762 	 */
1763 	flen = fp->bf_len;
1764 	if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1765 		return (EINVAL);
1766 	size = flen * sizeof(*fp->bf_insns);
1767 	if (size > 0) {
1768 		/* We're setting up new filter.  Copy and check actual data. */
1769 		fcode = malloc(size, M_BPF, M_WAITOK);
1770 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1771 		    !bpf_validate(fcode, flen)) {
1772 			free(fcode, M_BPF);
1773 			return (EINVAL);
1774 		}
1775 #ifdef BPF_JITTER
1776 		/* Filter is copied inside fcode and is perfectly valid. */
1777 		jfunc = bpf_jitter(fcode, flen);
1778 #endif
1779 	}
1780 
1781 	BPF_LOCK();
1782 
1783 	/*
1784 	 * Set up new filter.
1785 	 * Protect filter change by interface lock.
1786 	 * Additionally, we are protected by global lock here.
1787 	 */
1788 	if (d->bd_bif != NULL)
1789 		BPFIF_WLOCK(d->bd_bif);
1790 	BPFD_LOCK(d);
1791 	if (cmd == BIOCSETWF) {
1792 		old = d->bd_wfilter;
1793 		d->bd_wfilter = fcode;
1794 	} else {
1795 		old = d->bd_rfilter;
1796 		d->bd_rfilter = fcode;
1797 #ifdef BPF_JITTER
1798 		ofunc = d->bd_bfilter;
1799 		d->bd_bfilter = jfunc;
1800 #endif
1801 		if (cmd == BIOCSETF)
1802 			reset_d(d);
1803 
1804 		if (fcode != NULL) {
1805 			/*
1806 			 * Do not require upgrade by first BIOCSETF
1807 			 * (used to set snaplen) by pcap_open_live().
1808 			 */
1809 			if (d->bd_writer != 0 && --d->bd_writer == 0)
1810 				need_upgrade = 1;
1811 			CTR4(KTR_NET, "%s: filter function set by pid %d, "
1812 			    "bd_writer counter %d, need_upgrade %d",
1813 			    __func__, d->bd_pid, d->bd_writer, need_upgrade);
1814 		}
1815 	}
1816 	BPFD_UNLOCK(d);
1817 	if (d->bd_bif != NULL)
1818 		BPFIF_WUNLOCK(d->bd_bif);
1819 	if (old != NULL)
1820 		free(old, M_BPF);
1821 #ifdef BPF_JITTER
1822 	if (ofunc != NULL)
1823 		bpf_destroy_jit_filter(ofunc);
1824 #endif
1825 
1826 	/* Move d to active readers list. */
1827 	if (need_upgrade)
1828 		bpf_upgraded(d);
1829 
1830 	BPF_UNLOCK();
1831 	return (0);
1832 }
1833 
1834 /*
1835  * Detach a file from its current interface (if attached at all) and attach
1836  * to the interface indicated by the name stored in ifr.
1837  * Return an errno or 0.
1838  */
1839 static int
1840 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1841 {
1842 	struct bpf_if *bp;
1843 	struct ifnet *theywant;
1844 
1845 	BPF_LOCK_ASSERT();
1846 
1847 	theywant = ifunit(ifr->ifr_name);
1848 	if (theywant == NULL || theywant->if_bpf == NULL)
1849 		return (ENXIO);
1850 
1851 	bp = theywant->if_bpf;
1852 
1853 	/* Check if interface is not being detached from BPF */
1854 	BPFIF_RLOCK(bp);
1855 	if (bp->flags & BPFIF_FLAG_DYING) {
1856 		BPFIF_RUNLOCK(bp);
1857 		return (ENXIO);
1858 	}
1859 	BPFIF_RUNLOCK(bp);
1860 
1861 	/*
1862 	 * Behavior here depends on the buffering model.  If we're using
1863 	 * kernel memory buffers, then we can allocate them here.  If we're
1864 	 * using zero-copy, then the user process must have registered
1865 	 * buffers by the time we get here.  If not, return an error.
1866 	 */
1867 	switch (d->bd_bufmode) {
1868 	case BPF_BUFMODE_BUFFER:
1869 	case BPF_BUFMODE_ZBUF:
1870 		if (d->bd_sbuf == NULL)
1871 			return (EINVAL);
1872 		break;
1873 
1874 	default:
1875 		panic("bpf_setif: bufmode %d", d->bd_bufmode);
1876 	}
1877 	if (bp != d->bd_bif)
1878 		bpf_attachd(d, bp);
1879 	BPFD_LOCK(d);
1880 	reset_d(d);
1881 	BPFD_UNLOCK(d);
1882 	return (0);
1883 }
1884 
1885 /*
1886  * Support for select() and poll() system calls
1887  *
1888  * Return true iff the specific operation will not block indefinitely.
1889  * Otherwise, return false but make a note that a selwakeup() must be done.
1890  */
1891 static int
1892 bpfpoll(struct cdev *dev, int events, struct thread *td)
1893 {
1894 	struct bpf_d *d;
1895 	int revents;
1896 
1897 	if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1898 		return (events &
1899 		    (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1900 
1901 	/*
1902 	 * Refresh PID associated with this descriptor.
1903 	 */
1904 	revents = events & (POLLOUT | POLLWRNORM);
1905 	BPFD_LOCK(d);
1906 	BPF_PID_REFRESH(d, td);
1907 	if (events & (POLLIN | POLLRDNORM)) {
1908 		if (bpf_ready(d))
1909 			revents |= events & (POLLIN | POLLRDNORM);
1910 		else {
1911 			selrecord(td, &d->bd_sel);
1912 			/* Start the read timeout if necessary. */
1913 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1914 				callout_reset(&d->bd_callout, d->bd_rtout,
1915 				    bpf_timed_out, d);
1916 				d->bd_state = BPF_WAITING;
1917 			}
1918 		}
1919 	}
1920 	BPFD_UNLOCK(d);
1921 	return (revents);
1922 }
1923 
1924 /*
1925  * Support for kevent() system call.  Register EVFILT_READ filters and
1926  * reject all others.
1927  */
1928 int
1929 bpfkqfilter(struct cdev *dev, struct knote *kn)
1930 {
1931 	struct bpf_d *d;
1932 
1933 	if (devfs_get_cdevpriv((void **)&d) != 0 ||
1934 	    kn->kn_filter != EVFILT_READ)
1935 		return (1);
1936 
1937 	/*
1938 	 * Refresh PID associated with this descriptor.
1939 	 */
1940 	BPFD_LOCK(d);
1941 	BPF_PID_REFRESH_CUR(d);
1942 	kn->kn_fop = &bpfread_filtops;
1943 	kn->kn_hook = d;
1944 	knlist_add(&d->bd_sel.si_note, kn, 1);
1945 	BPFD_UNLOCK(d);
1946 
1947 	return (0);
1948 }
1949 
1950 static void
1951 filt_bpfdetach(struct knote *kn)
1952 {
1953 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1954 
1955 	knlist_remove(&d->bd_sel.si_note, kn, 0);
1956 }
1957 
1958 static int
1959 filt_bpfread(struct knote *kn, long hint)
1960 {
1961 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1962 	int ready;
1963 
1964 	BPFD_LOCK_ASSERT(d);
1965 	ready = bpf_ready(d);
1966 	if (ready) {
1967 		kn->kn_data = d->bd_slen;
1968 		while (d->bd_hbuf_in_use)
1969 			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1970 			    PRINET, "bd_hbuf", 0);
1971 		if (d->bd_hbuf)
1972 			kn->kn_data += d->bd_hlen;
1973 	} else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1974 		callout_reset(&d->bd_callout, d->bd_rtout,
1975 		    bpf_timed_out, d);
1976 		d->bd_state = BPF_WAITING;
1977 	}
1978 
1979 	return (ready);
1980 }
1981 
1982 #define	BPF_TSTAMP_NONE		0
1983 #define	BPF_TSTAMP_FAST		1
1984 #define	BPF_TSTAMP_NORMAL	2
1985 #define	BPF_TSTAMP_EXTERN	3
1986 
1987 static int
1988 bpf_ts_quality(int tstype)
1989 {
1990 
1991 	if (tstype == BPF_T_NONE)
1992 		return (BPF_TSTAMP_NONE);
1993 	if ((tstype & BPF_T_FAST) != 0)
1994 		return (BPF_TSTAMP_FAST);
1995 
1996 	return (BPF_TSTAMP_NORMAL);
1997 }
1998 
1999 static int
2000 bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2001 {
2002 	struct m_tag *tag;
2003 	int quality;
2004 
2005 	quality = bpf_ts_quality(tstype);
2006 	if (quality == BPF_TSTAMP_NONE)
2007 		return (quality);
2008 
2009 	if (m != NULL) {
2010 		tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2011 		if (tag != NULL) {
2012 			*bt = *(struct bintime *)(tag + 1);
2013 			return (BPF_TSTAMP_EXTERN);
2014 		}
2015 	}
2016 	if (quality == BPF_TSTAMP_NORMAL)
2017 		binuptime(bt);
2018 	else
2019 		getbinuptime(bt);
2020 
2021 	return (quality);
2022 }
2023 
2024 /*
2025  * Incoming linkage from device drivers.  Process the packet pkt, of length
2026  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
2027  * by each process' filter, and if accepted, stashed into the corresponding
2028  * buffer.
2029  */
2030 void
2031 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2032 {
2033 	struct bintime bt;
2034 	struct bpf_d *d;
2035 #ifdef BPF_JITTER
2036 	bpf_jit_filter *bf;
2037 #endif
2038 	u_int slen;
2039 	int gottime;
2040 
2041 	gottime = BPF_TSTAMP_NONE;
2042 
2043 	BPFIF_RLOCK(bp);
2044 
2045 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2046 		/*
2047 		 * We are not using any locks for d here because:
2048 		 * 1) any filter change is protected by interface
2049 		 * write lock
2050 		 * 2) destroying/detaching d is protected by interface
2051 		 * write lock, too
2052 		 */
2053 
2054 		/* XXX: Do not protect counter for the sake of performance. */
2055 		++d->bd_rcount;
2056 		/*
2057 		 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2058 		 * way for the caller to indiciate to us whether this packet
2059 		 * is inbound or outbound.  In the bpf_mtap() routines, we use
2060 		 * the interface pointers on the mbuf to figure it out.
2061 		 */
2062 #ifdef BPF_JITTER
2063 		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2064 		if (bf != NULL)
2065 			slen = (*(bf->func))(pkt, pktlen, pktlen);
2066 		else
2067 #endif
2068 		slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2069 		if (slen != 0) {
2070 			/*
2071 			 * Filter matches. Let's to acquire write lock.
2072 			 */
2073 			BPFD_LOCK(d);
2074 
2075 			d->bd_fcount++;
2076 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2077 				gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2078 #ifdef MAC
2079 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2080 #endif
2081 				catchpacket(d, pkt, pktlen, slen,
2082 				    bpf_append_bytes, &bt);
2083 			BPFD_UNLOCK(d);
2084 		}
2085 	}
2086 	BPFIF_RUNLOCK(bp);
2087 }
2088 
2089 #define	BPF_CHECK_DIRECTION(d, r, i)				\
2090 	    (((d)->bd_direction == BPF_D_IN && (r) != (i)) ||	\
2091 	    ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2092 
2093 /*
2094  * Incoming linkage from device drivers, when packet is in an mbuf chain.
2095  * Locking model is explained in bpf_tap().
2096  */
2097 void
2098 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2099 {
2100 	struct bintime bt;
2101 	struct bpf_d *d;
2102 #ifdef BPF_JITTER
2103 	bpf_jit_filter *bf;
2104 #endif
2105 	u_int pktlen, slen;
2106 	int gottime;
2107 
2108 	/* Skip outgoing duplicate packets. */
2109 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2110 		m->m_flags &= ~M_PROMISC;
2111 		return;
2112 	}
2113 
2114 	pktlen = m_length(m, NULL);
2115 	gottime = BPF_TSTAMP_NONE;
2116 
2117 	BPFIF_RLOCK(bp);
2118 
2119 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2120 		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2121 			continue;
2122 		++d->bd_rcount;
2123 #ifdef BPF_JITTER
2124 		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2125 		/* XXX We cannot handle multiple mbufs. */
2126 		if (bf != NULL && m->m_next == NULL)
2127 			slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2128 		else
2129 #endif
2130 		slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2131 		if (slen != 0) {
2132 			BPFD_LOCK(d);
2133 
2134 			d->bd_fcount++;
2135 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2136 				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2137 #ifdef MAC
2138 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2139 #endif
2140 				catchpacket(d, (u_char *)m, pktlen, slen,
2141 				    bpf_append_mbuf, &bt);
2142 			BPFD_UNLOCK(d);
2143 		}
2144 	}
2145 	BPFIF_RUNLOCK(bp);
2146 }
2147 
2148 /*
2149  * Incoming linkage from device drivers, when packet is in
2150  * an mbuf chain and to be prepended by a contiguous header.
2151  */
2152 void
2153 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2154 {
2155 	struct bintime bt;
2156 	struct mbuf mb;
2157 	struct bpf_d *d;
2158 	u_int pktlen, slen;
2159 	int gottime;
2160 
2161 	/* Skip outgoing duplicate packets. */
2162 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2163 		m->m_flags &= ~M_PROMISC;
2164 		return;
2165 	}
2166 
2167 	pktlen = m_length(m, NULL);
2168 	/*
2169 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
2170 	 * Note that we cut corners here; we only setup what's
2171 	 * absolutely needed--this mbuf should never go anywhere else.
2172 	 */
2173 	mb.m_next = m;
2174 	mb.m_data = data;
2175 	mb.m_len = dlen;
2176 	pktlen += dlen;
2177 
2178 	gottime = BPF_TSTAMP_NONE;
2179 
2180 	BPFIF_RLOCK(bp);
2181 
2182 	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2183 		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2184 			continue;
2185 		++d->bd_rcount;
2186 		slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2187 		if (slen != 0) {
2188 			BPFD_LOCK(d);
2189 
2190 			d->bd_fcount++;
2191 			if (gottime < bpf_ts_quality(d->bd_tstamp))
2192 				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2193 #ifdef MAC
2194 			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2195 #endif
2196 				catchpacket(d, (u_char *)&mb, pktlen, slen,
2197 				    bpf_append_mbuf, &bt);
2198 			BPFD_UNLOCK(d);
2199 		}
2200 	}
2201 	BPFIF_RUNLOCK(bp);
2202 }
2203 
2204 #undef	BPF_CHECK_DIRECTION
2205 
2206 #undef	BPF_TSTAMP_NONE
2207 #undef	BPF_TSTAMP_FAST
2208 #undef	BPF_TSTAMP_NORMAL
2209 #undef	BPF_TSTAMP_EXTERN
2210 
2211 static int
2212 bpf_hdrlen(struct bpf_d *d)
2213 {
2214 	int hdrlen;
2215 
2216 	hdrlen = d->bd_bif->bif_hdrlen;
2217 #ifndef BURN_BRIDGES
2218 	if (d->bd_tstamp == BPF_T_NONE ||
2219 	    BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2220 #ifdef COMPAT_FREEBSD32
2221 		if (d->bd_compat32)
2222 			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2223 		else
2224 #endif
2225 			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2226 	else
2227 #endif
2228 		hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2229 #ifdef COMPAT_FREEBSD32
2230 	if (d->bd_compat32)
2231 		hdrlen = BPF_WORDALIGN32(hdrlen);
2232 	else
2233 #endif
2234 		hdrlen = BPF_WORDALIGN(hdrlen);
2235 
2236 	return (hdrlen - d->bd_bif->bif_hdrlen);
2237 }
2238 
2239 static void
2240 bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2241 {
2242 	struct bintime bt2;
2243 	struct timeval tsm;
2244 	struct timespec tsn;
2245 
2246 	if ((tstype & BPF_T_MONOTONIC) == 0) {
2247 		bt2 = *bt;
2248 		bintime_add(&bt2, &boottimebin);
2249 		bt = &bt2;
2250 	}
2251 	switch (BPF_T_FORMAT(tstype)) {
2252 	case BPF_T_MICROTIME:
2253 		bintime2timeval(bt, &tsm);
2254 		ts->bt_sec = tsm.tv_sec;
2255 		ts->bt_frac = tsm.tv_usec;
2256 		break;
2257 	case BPF_T_NANOTIME:
2258 		bintime2timespec(bt, &tsn);
2259 		ts->bt_sec = tsn.tv_sec;
2260 		ts->bt_frac = tsn.tv_nsec;
2261 		break;
2262 	case BPF_T_BINTIME:
2263 		ts->bt_sec = bt->sec;
2264 		ts->bt_frac = bt->frac;
2265 		break;
2266 	}
2267 }
2268 
2269 /*
2270  * Move the packet data from interface memory (pkt) into the
2271  * store buffer.  "cpfn" is the routine called to do the actual data
2272  * transfer.  bcopy is passed in to copy contiguous chunks, while
2273  * bpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
2274  * pkt is really an mbuf.
2275  */
2276 static void
2277 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2278     void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2279     struct bintime *bt)
2280 {
2281 	struct bpf_xhdr hdr;
2282 #ifndef BURN_BRIDGES
2283 	struct bpf_hdr hdr_old;
2284 #ifdef COMPAT_FREEBSD32
2285 	struct bpf_hdr32 hdr32_old;
2286 #endif
2287 #endif
2288 	int caplen, curlen, hdrlen, totlen;
2289 	int do_wakeup = 0;
2290 	int do_timestamp;
2291 	int tstype;
2292 
2293 	BPFD_LOCK_ASSERT(d);
2294 
2295 	/*
2296 	 * Detect whether user space has released a buffer back to us, and if
2297 	 * so, move it from being a hold buffer to a free buffer.  This may
2298 	 * not be the best place to do it (for example, we might only want to
2299 	 * run this check if we need the space), but for now it's a reliable
2300 	 * spot to do it.
2301 	 */
2302 	if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2303 		while (d->bd_hbuf_in_use)
2304 			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2305 			    PRINET, "bd_hbuf", 0);
2306 		d->bd_fbuf = d->bd_hbuf;
2307 		d->bd_hbuf = NULL;
2308 		d->bd_hlen = 0;
2309 		bpf_buf_reclaimed(d);
2310 	}
2311 
2312 	/*
2313 	 * Figure out how many bytes to move.  If the packet is
2314 	 * greater or equal to the snapshot length, transfer that
2315 	 * much.  Otherwise, transfer the whole packet (unless
2316 	 * we hit the buffer size limit).
2317 	 */
2318 	hdrlen = bpf_hdrlen(d);
2319 	totlen = hdrlen + min(snaplen, pktlen);
2320 	if (totlen > d->bd_bufsize)
2321 		totlen = d->bd_bufsize;
2322 
2323 	/*
2324 	 * Round up the end of the previous packet to the next longword.
2325 	 *
2326 	 * Drop the packet if there's no room and no hope of room
2327 	 * If the packet would overflow the storage buffer or the storage
2328 	 * buffer is considered immutable by the buffer model, try to rotate
2329 	 * the buffer and wakeup pending processes.
2330 	 */
2331 #ifdef COMPAT_FREEBSD32
2332 	if (d->bd_compat32)
2333 		curlen = BPF_WORDALIGN32(d->bd_slen);
2334 	else
2335 #endif
2336 		curlen = BPF_WORDALIGN(d->bd_slen);
2337 	if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2338 		if (d->bd_fbuf == NULL) {
2339 			/*
2340 			 * There's no room in the store buffer, and no
2341 			 * prospect of room, so drop the packet.  Notify the
2342 			 * buffer model.
2343 			 */
2344 			bpf_buffull(d);
2345 			++d->bd_dcount;
2346 			return;
2347 		}
2348 		while (d->bd_hbuf_in_use)
2349 			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2350 			    PRINET, "bd_hbuf", 0);
2351 		ROTATE_BUFFERS(d);
2352 		do_wakeup = 1;
2353 		curlen = 0;
2354 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2355 		/*
2356 		 * Immediate mode is set, or the read timeout has already
2357 		 * expired during a select call.  A packet arrived, so the
2358 		 * reader should be woken up.
2359 		 */
2360 		do_wakeup = 1;
2361 	caplen = totlen - hdrlen;
2362 	tstype = d->bd_tstamp;
2363 	do_timestamp = tstype != BPF_T_NONE;
2364 #ifndef BURN_BRIDGES
2365 	if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2366 		struct bpf_ts ts;
2367 		if (do_timestamp)
2368 			bpf_bintime2ts(bt, &ts, tstype);
2369 #ifdef COMPAT_FREEBSD32
2370 		if (d->bd_compat32) {
2371 			bzero(&hdr32_old, sizeof(hdr32_old));
2372 			if (do_timestamp) {
2373 				hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2374 				hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2375 			}
2376 			hdr32_old.bh_datalen = pktlen;
2377 			hdr32_old.bh_hdrlen = hdrlen;
2378 			hdr32_old.bh_caplen = caplen;
2379 			bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2380 			    sizeof(hdr32_old));
2381 			goto copy;
2382 		}
2383 #endif
2384 		bzero(&hdr_old, sizeof(hdr_old));
2385 		if (do_timestamp) {
2386 			hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2387 			hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2388 		}
2389 		hdr_old.bh_datalen = pktlen;
2390 		hdr_old.bh_hdrlen = hdrlen;
2391 		hdr_old.bh_caplen = caplen;
2392 		bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2393 		    sizeof(hdr_old));
2394 		goto copy;
2395 	}
2396 #endif
2397 
2398 	/*
2399 	 * Append the bpf header.  Note we append the actual header size, but
2400 	 * move forward the length of the header plus padding.
2401 	 */
2402 	bzero(&hdr, sizeof(hdr));
2403 	if (do_timestamp)
2404 		bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2405 	hdr.bh_datalen = pktlen;
2406 	hdr.bh_hdrlen = hdrlen;
2407 	hdr.bh_caplen = caplen;
2408 	bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2409 
2410 	/*
2411 	 * Copy the packet data into the store buffer and update its length.
2412 	 */
2413 #ifndef BURN_BRIDGES
2414 copy:
2415 #endif
2416 	(*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2417 	d->bd_slen = curlen + totlen;
2418 
2419 	if (do_wakeup)
2420 		bpf_wakeup(d);
2421 }
2422 
2423 /*
2424  * Free buffers currently in use by a descriptor.
2425  * Called on close.
2426  */
2427 static void
2428 bpf_freed(struct bpf_d *d)
2429 {
2430 
2431 	/*
2432 	 * We don't need to lock out interrupts since this descriptor has
2433 	 * been detached from its interface and it yet hasn't been marked
2434 	 * free.
2435 	 */
2436 	bpf_free(d);
2437 	if (d->bd_rfilter != NULL) {
2438 		free((caddr_t)d->bd_rfilter, M_BPF);
2439 #ifdef BPF_JITTER
2440 		if (d->bd_bfilter != NULL)
2441 			bpf_destroy_jit_filter(d->bd_bfilter);
2442 #endif
2443 	}
2444 	if (d->bd_wfilter != NULL)
2445 		free((caddr_t)d->bd_wfilter, M_BPF);
2446 	mtx_destroy(&d->bd_lock);
2447 }
2448 
2449 /*
2450  * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
2451  * fixed size of the link header (variable length headers not yet supported).
2452  */
2453 void
2454 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2455 {
2456 
2457 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2458 }
2459 
2460 /*
2461  * Attach an interface to bpf.  ifp is a pointer to the structure
2462  * defining the interface to be attached, dlt is the link layer type,
2463  * and hdrlen is the fixed size of the link header (variable length
2464  * headers are not yet supporrted).
2465  */
2466 void
2467 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2468 {
2469 	struct bpf_if *bp;
2470 
2471 	bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2472 	if (bp == NULL)
2473 		panic("bpfattach");
2474 
2475 	LIST_INIT(&bp->bif_dlist);
2476 	LIST_INIT(&bp->bif_wlist);
2477 	bp->bif_ifp = ifp;
2478 	bp->bif_dlt = dlt;
2479 	rw_init(&bp->bif_lock, "bpf interface lock");
2480 	KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2481 	*driverp = bp;
2482 
2483 	BPF_LOCK();
2484 	LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2485 	BPF_UNLOCK();
2486 
2487 	bp->bif_hdrlen = hdrlen;
2488 
2489 	if (bootverbose)
2490 		if_printf(ifp, "bpf attached\n");
2491 }
2492 
2493 /*
2494  * Detach bpf from an interface. This involves detaching each descriptor
2495  * associated with the interface. Notify each descriptor as it's detached
2496  * so that any sleepers wake up and get ENXIO.
2497  */
2498 void
2499 bpfdetach(struct ifnet *ifp)
2500 {
2501 	struct bpf_if	*bp, *bp_temp;
2502 	struct bpf_d	*d;
2503 	int ndetached;
2504 
2505 	ndetached = 0;
2506 
2507 	BPF_LOCK();
2508 	/* Find all bpf_if struct's which reference ifp and detach them. */
2509 	LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2510 		if (ifp != bp->bif_ifp)
2511 			continue;
2512 
2513 		LIST_REMOVE(bp, bif_next);
2514 		/* Add to to-be-freed list */
2515 		LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
2516 
2517 		ndetached++;
2518 		/*
2519 		 * Delay freeing bp till interface is detached
2520 		 * and all routes through this interface are removed.
2521 		 * Mark bp as detached to restrict new consumers.
2522 		 */
2523 		BPFIF_WLOCK(bp);
2524 		bp->flags |= BPFIF_FLAG_DYING;
2525 		BPFIF_WUNLOCK(bp);
2526 
2527 		CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
2528 		    __func__, bp->bif_dlt, bp, ifp);
2529 
2530 		/* Free common descriptors */
2531 		while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2532 			bpf_detachd_locked(d);
2533 			BPFD_LOCK(d);
2534 			bpf_wakeup(d);
2535 			BPFD_UNLOCK(d);
2536 		}
2537 
2538 		/* Free writer-only descriptors */
2539 		while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2540 			bpf_detachd_locked(d);
2541 			BPFD_LOCK(d);
2542 			bpf_wakeup(d);
2543 			BPFD_UNLOCK(d);
2544 		}
2545 	}
2546 	BPF_UNLOCK();
2547 
2548 #ifdef INVARIANTS
2549 	if (ndetached == 0)
2550 		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2551 #endif
2552 }
2553 
2554 /*
2555  * Interface departure handler.
2556  * Note departure event does not guarantee interface is going down.
2557  * Interface renaming is currently done via departure/arrival event set.
2558  *
2559  * Departure handled is called after all routes pointing to
2560  * given interface are removed and interface is in down state
2561  * restricting any packets to be sent/received. We assume it is now safe
2562  * to free data allocated by BPF.
2563  */
2564 static void
2565 bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2566 {
2567 	struct bpf_if *bp, *bp_temp;
2568 	int nmatched = 0;
2569 
2570 	BPF_LOCK();
2571 	/*
2572 	 * Find matching entries in free list.
2573 	 * Nothing should be found if bpfdetach() was not called.
2574 	 */
2575 	LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
2576 		if (ifp != bp->bif_ifp)
2577 			continue;
2578 
2579 		CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
2580 		    __func__, bp, ifp);
2581 
2582 		LIST_REMOVE(bp, bif_next);
2583 
2584 		rw_destroy(&bp->bif_lock);
2585 		free(bp, M_BPF);
2586 
2587 		nmatched++;
2588 	}
2589 	BPF_UNLOCK();
2590 
2591 	/*
2592 	 * Note that we cannot zero other pointers to
2593 	 * custom DLTs possibly used by given interface.
2594 	 */
2595 	if (nmatched != 0)
2596 		ifp->if_bpf = NULL;
2597 }
2598 
2599 /*
2600  * Get a list of available data link type of the interface.
2601  */
2602 static int
2603 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2604 {
2605 	int n, error;
2606 	struct ifnet *ifp;
2607 	struct bpf_if *bp;
2608 
2609 	BPF_LOCK_ASSERT();
2610 
2611 	ifp = d->bd_bif->bif_ifp;
2612 	n = 0;
2613 	error = 0;
2614 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2615 		if (bp->bif_ifp != ifp)
2616 			continue;
2617 		if (bfl->bfl_list != NULL) {
2618 			if (n >= bfl->bfl_len)
2619 				return (ENOMEM);
2620 			error = copyout(&bp->bif_dlt,
2621 			    bfl->bfl_list + n, sizeof(u_int));
2622 		}
2623 		n++;
2624 	}
2625 	bfl->bfl_len = n;
2626 	return (error);
2627 }
2628 
2629 /*
2630  * Set the data link type of a BPF instance.
2631  */
2632 static int
2633 bpf_setdlt(struct bpf_d *d, u_int dlt)
2634 {
2635 	int error, opromisc;
2636 	struct ifnet *ifp;
2637 	struct bpf_if *bp;
2638 
2639 	BPF_LOCK_ASSERT();
2640 
2641 	if (d->bd_bif->bif_dlt == dlt)
2642 		return (0);
2643 	ifp = d->bd_bif->bif_ifp;
2644 
2645 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2646 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2647 			break;
2648 	}
2649 
2650 	if (bp != NULL) {
2651 		opromisc = d->bd_promisc;
2652 		bpf_attachd(d, bp);
2653 		BPFD_LOCK(d);
2654 		reset_d(d);
2655 		BPFD_UNLOCK(d);
2656 		if (opromisc) {
2657 			error = ifpromisc(bp->bif_ifp, 1);
2658 			if (error)
2659 				if_printf(bp->bif_ifp,
2660 					"bpf_setdlt: ifpromisc failed (%d)\n",
2661 					error);
2662 			else
2663 				d->bd_promisc = 1;
2664 		}
2665 	}
2666 	return (bp == NULL ? EINVAL : 0);
2667 }
2668 
2669 static void
2670 bpf_drvinit(void *unused)
2671 {
2672 	struct cdev *dev;
2673 
2674 	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2675 	LIST_INIT(&bpf_iflist);
2676 	LIST_INIT(&bpf_freelist);
2677 
2678 	dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2679 	/* For compatibility */
2680 	make_dev_alias(dev, "bpf0");
2681 
2682 	/* Register interface departure handler */
2683 	bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2684 		    ifnet_departure_event, bpf_ifdetach, NULL,
2685 		    EVENTHANDLER_PRI_ANY);
2686 }
2687 
2688 /*
2689  * Zero out the various packet counters associated with all of the bpf
2690  * descriptors.  At some point, we will probably want to get a bit more
2691  * granular and allow the user to specify descriptors to be zeroed.
2692  */
2693 static void
2694 bpf_zero_counters(void)
2695 {
2696 	struct bpf_if *bp;
2697 	struct bpf_d *bd;
2698 
2699 	BPF_LOCK();
2700 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2701 		BPFIF_RLOCK(bp);
2702 		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2703 			BPFD_LOCK(bd);
2704 			bd->bd_rcount = 0;
2705 			bd->bd_dcount = 0;
2706 			bd->bd_fcount = 0;
2707 			bd->bd_wcount = 0;
2708 			bd->bd_wfcount = 0;
2709 			bd->bd_zcopy = 0;
2710 			BPFD_UNLOCK(bd);
2711 		}
2712 		BPFIF_RUNLOCK(bp);
2713 	}
2714 	BPF_UNLOCK();
2715 }
2716 
2717 /*
2718  * Fill filter statistics
2719  */
2720 static void
2721 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2722 {
2723 
2724 	bzero(d, sizeof(*d));
2725 	BPFD_LOCK_ASSERT(bd);
2726 	d->bd_structsize = sizeof(*d);
2727 	/* XXX: reading should be protected by global lock */
2728 	d->bd_immediate = bd->bd_immediate;
2729 	d->bd_promisc = bd->bd_promisc;
2730 	d->bd_hdrcmplt = bd->bd_hdrcmplt;
2731 	d->bd_direction = bd->bd_direction;
2732 	d->bd_feedback = bd->bd_feedback;
2733 	d->bd_async = bd->bd_async;
2734 	d->bd_rcount = bd->bd_rcount;
2735 	d->bd_dcount = bd->bd_dcount;
2736 	d->bd_fcount = bd->bd_fcount;
2737 	d->bd_sig = bd->bd_sig;
2738 	d->bd_slen = bd->bd_slen;
2739 	d->bd_hlen = bd->bd_hlen;
2740 	d->bd_bufsize = bd->bd_bufsize;
2741 	d->bd_pid = bd->bd_pid;
2742 	strlcpy(d->bd_ifname,
2743 	    bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2744 	d->bd_locked = bd->bd_locked;
2745 	d->bd_wcount = bd->bd_wcount;
2746 	d->bd_wdcount = bd->bd_wdcount;
2747 	d->bd_wfcount = bd->bd_wfcount;
2748 	d->bd_zcopy = bd->bd_zcopy;
2749 	d->bd_bufmode = bd->bd_bufmode;
2750 }
2751 
2752 /*
2753  * Handle `netstat -B' stats request
2754  */
2755 static int
2756 bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2757 {
2758 	struct xbpf_d *xbdbuf, *xbd, zerostats;
2759 	int index, error;
2760 	struct bpf_if *bp;
2761 	struct bpf_d *bd;
2762 
2763 	/*
2764 	 * XXX This is not technically correct. It is possible for non
2765 	 * privileged users to open bpf devices. It would make sense
2766 	 * if the users who opened the devices were able to retrieve
2767 	 * the statistics for them, too.
2768 	 */
2769 	error = priv_check(req->td, PRIV_NET_BPF);
2770 	if (error)
2771 		return (error);
2772 	/*
2773 	 * Check to see if the user is requesting that the counters be
2774 	 * zeroed out.  Explicitly check that the supplied data is zeroed,
2775 	 * as we aren't allowing the user to set the counters currently.
2776 	 */
2777 	if (req->newptr != NULL) {
2778 		if (req->newlen != sizeof(zerostats))
2779 			return (EINVAL);
2780 		bzero(&zerostats, sizeof(zerostats));
2781 		xbd = req->newptr;
2782 		if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2783 			return (EINVAL);
2784 		bpf_zero_counters();
2785 		return (0);
2786 	}
2787 	if (req->oldptr == NULL)
2788 		return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2789 	if (bpf_bpfd_cnt == 0)
2790 		return (SYSCTL_OUT(req, 0, 0));
2791 	xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2792 	BPF_LOCK();
2793 	if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2794 		BPF_UNLOCK();
2795 		free(xbdbuf, M_BPF);
2796 		return (ENOMEM);
2797 	}
2798 	index = 0;
2799 	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2800 		BPFIF_RLOCK(bp);
2801 		/* Send writers-only first */
2802 		LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2803 			xbd = &xbdbuf[index++];
2804 			BPFD_LOCK(bd);
2805 			bpfstats_fill_xbpf(xbd, bd);
2806 			BPFD_UNLOCK(bd);
2807 		}
2808 		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2809 			xbd = &xbdbuf[index++];
2810 			BPFD_LOCK(bd);
2811 			bpfstats_fill_xbpf(xbd, bd);
2812 			BPFD_UNLOCK(bd);
2813 		}
2814 		BPFIF_RUNLOCK(bp);
2815 	}
2816 	BPF_UNLOCK();
2817 	error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2818 	free(xbdbuf, M_BPF);
2819 	return (error);
2820 }
2821 
2822 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2823 
2824 #else /* !DEV_BPF && !NETGRAPH_BPF */
2825 /*
2826  * NOP stubs to allow bpf-using drivers to load and function.
2827  *
2828  * A 'better' implementation would allow the core bpf functionality
2829  * to be loaded at runtime.
2830  */
2831 static struct bpf_if bp_null;
2832 
2833 void
2834 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2835 {
2836 }
2837 
2838 void
2839 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2840 {
2841 }
2842 
2843 void
2844 bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2845 {
2846 }
2847 
2848 void
2849 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2850 {
2851 
2852 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2853 }
2854 
2855 void
2856 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2857 {
2858 
2859 	*driverp = &bp_null;
2860 }
2861 
2862 void
2863 bpfdetach(struct ifnet *ifp)
2864 {
2865 }
2866 
2867 u_int
2868 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2869 {
2870 	return -1;	/* "no filter" behaviour */
2871 }
2872 
2873 int
2874 bpf_validate(const struct bpf_insn *f, int len)
2875 {
2876 	return 0;		/* false */
2877 }
2878 
2879 #endif /* !DEV_BPF && !NETGRAPH_BPF */
2880