xref: /freebsd/sys/net/bpf.c (revision 0f8f86b71f022b803e99151c19db81b280f245dc)
1 /*
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
39  *
40  * $FreeBSD$
41  */
42 
43 #include "opt_bpf.h"
44 #include "opt_mac.h"
45 #include "opt_netgraph.h"
46 
47 #include <sys/types.h>
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/conf.h>
51 #include <sys/mac.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/time.h>
55 #include <sys/proc.h>
56 #include <sys/signalvar.h>
57 #include <sys/filio.h>
58 #include <sys/sockio.h>
59 #include <sys/ttycom.h>
60 #include <sys/filedesc.h>
61 
62 #include <sys/event.h>
63 #include <sys/file.h>
64 #include <sys/poll.h>
65 #include <sys/proc.h>
66 
67 #include <sys/socket.h>
68 #include <sys/vnode.h>
69 
70 #include <net/if.h>
71 #include <net/bpf.h>
72 #include <net/bpfdesc.h>
73 
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 
79 static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
80 
81 #if defined(DEV_BPF) || defined(NETGRAPH_BPF)
82 
83 #define PRINET  26			/* interruptible */
84 
85 /*
86  * The default read buffer size is patchable.
87  */
88 static int bpf_bufsize = 4096;
89 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
90 	&bpf_bufsize, 0, "");
91 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
92 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
93 	&bpf_maxbufsize, 0, "");
94 
95 /*
96  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
97  */
98 static struct bpf_if	*bpf_iflist;
99 static struct mtx	bpf_mtx;		/* bpf global lock */
100 
101 static int	bpf_allocbufs(struct bpf_d *);
102 static void	bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
103 static void	bpf_detachd(struct bpf_d *d);
104 static void	bpf_freed(struct bpf_d *);
105 static void	bpf_mcopy(const void *, void *, size_t);
106 static int	bpf_movein(struct uio *, int,
107 		    struct mbuf **, struct sockaddr *, int *);
108 static int	bpf_setif(struct bpf_d *, struct ifreq *);
109 static void	bpf_timed_out(void *);
110 static __inline void
111 		bpf_wakeup(struct bpf_d *);
112 static void	catchpacket(struct bpf_d *, u_char *, u_int,
113 		    u_int, void (*)(const void *, void *, size_t));
114 static void	reset_d(struct bpf_d *);
115 static int	 bpf_setf(struct bpf_d *, struct bpf_program *);
116 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
117 static int	bpf_setdlt(struct bpf_d *, u_int);
118 static void	filt_bpfdetach(struct knote *);
119 static int	filt_bpfread(struct knote *, long);
120 
121 static	d_open_t	bpfopen;
122 static	d_close_t	bpfclose;
123 static	d_read_t	bpfread;
124 static	d_write_t	bpfwrite;
125 static	d_ioctl_t	bpfioctl;
126 static	d_poll_t	bpfpoll;
127 static	d_kqfilter_t	bpfkqfilter;
128 
129 static struct cdevsw bpf_cdevsw = {
130 	.d_version =	D_VERSION,
131 	.d_flags =	D_NEEDGIANT,
132 	.d_open =	bpfopen,
133 	.d_close =	bpfclose,
134 	.d_read =	bpfread,
135 	.d_write =	bpfwrite,
136 	.d_ioctl =	bpfioctl,
137 	.d_poll =	bpfpoll,
138 	.d_name =	"bpf",
139 	.d_kqfilter =	bpfkqfilter,
140 };
141 
142 static struct filterops bpfread_filtops =
143 	{ 1, NULL, filt_bpfdetach, filt_bpfread };
144 
145 static int
146 bpf_movein(uio, linktype, mp, sockp, datlen)
147 	struct uio *uio;
148 	int linktype, *datlen;
149 	struct mbuf **mp;
150 	struct sockaddr *sockp;
151 {
152 	struct mbuf *m;
153 	int error;
154 	int len;
155 	int hlen;
156 
157 	/*
158 	 * Build a sockaddr based on the data link layer type.
159 	 * We do this at this level because the ethernet header
160 	 * is copied directly into the data field of the sockaddr.
161 	 * In the case of SLIP, there is no header and the packet
162 	 * is forwarded as is.
163 	 * Also, we are careful to leave room at the front of the mbuf
164 	 * for the link level header.
165 	 */
166 	switch (linktype) {
167 
168 	case DLT_SLIP:
169 		sockp->sa_family = AF_INET;
170 		hlen = 0;
171 		break;
172 
173 	case DLT_EN10MB:
174 		sockp->sa_family = AF_UNSPEC;
175 		/* XXX Would MAXLINKHDR be better? */
176 		hlen = ETHER_HDR_LEN;
177 		break;
178 
179 	case DLT_FDDI:
180 		sockp->sa_family = AF_IMPLINK;
181 		hlen = 0;
182 		break;
183 
184 	case DLT_RAW:
185 	case DLT_NULL:
186 		sockp->sa_family = AF_UNSPEC;
187 		hlen = 0;
188 		break;
189 
190 	case DLT_ATM_RFC1483:
191 		/*
192 		 * en atm driver requires 4-byte atm pseudo header.
193 		 * though it isn't standard, vpi:vci needs to be
194 		 * specified anyway.
195 		 */
196 		sockp->sa_family = AF_UNSPEC;
197 		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
198 		break;
199 
200 	case DLT_PPP:
201 		sockp->sa_family = AF_UNSPEC;
202 		hlen = 4;	/* This should match PPP_HDRLEN */
203 		break;
204 
205 	default:
206 		return (EIO);
207 	}
208 
209 	len = uio->uio_resid;
210 	*datlen = len - hlen;
211 	if ((unsigned)len > MCLBYTES)
212 		return (EIO);
213 
214 	if (len > MHLEN) {
215 		m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
216 	} else {
217 		MGETHDR(m, M_TRYWAIT, MT_DATA);
218 	}
219 	if (m == NULL)
220 		return (ENOBUFS);
221 	m->m_pkthdr.len = m->m_len = len;
222 	m->m_pkthdr.rcvif = NULL;
223 	*mp = m;
224 
225 	/*
226 	 * Make room for link header.
227 	 */
228 	if (hlen != 0) {
229 		m->m_pkthdr.len -= hlen;
230 		m->m_len -= hlen;
231 #if BSD >= 199103
232 		m->m_data += hlen; /* XXX */
233 #else
234 		m->m_off += hlen;
235 #endif
236 		error = uiomove(sockp->sa_data, hlen, uio);
237 		if (error)
238 			goto bad;
239 	}
240 	error = uiomove(mtod(m, void *), len - hlen, uio);
241 	if (!error)
242 		return (0);
243 bad:
244 	m_freem(m);
245 	return (error);
246 }
247 
248 /*
249  * Attach file to the bpf interface, i.e. make d listen on bp.
250  */
251 static void
252 bpf_attachd(d, bp)
253 	struct bpf_d *d;
254 	struct bpf_if *bp;
255 {
256 	/*
257 	 * Point d at bp, and add d to the interface's list of listeners.
258 	 * Finally, point the driver's bpf cookie at the interface so
259 	 * it will divert packets to bpf.
260 	 */
261 	BPFIF_LOCK(bp);
262 	d->bd_bif = bp;
263 	d->bd_next = bp->bif_dlist;
264 	bp->bif_dlist = d;
265 
266 	*bp->bif_driverp = bp;
267 	BPFIF_UNLOCK(bp);
268 }
269 
270 /*
271  * Detach a file from its interface.
272  */
273 static void
274 bpf_detachd(d)
275 	struct bpf_d *d;
276 {
277 	int error;
278 	struct bpf_d **p;
279 	struct bpf_if *bp;
280 
281 	/* XXX locking */
282 	bp = d->bd_bif;
283 	d->bd_bif = 0;
284 	/*
285 	 * Check if this descriptor had requested promiscuous mode.
286 	 * If so, turn it off.
287 	 */
288 	if (d->bd_promisc) {
289 		d->bd_promisc = 0;
290 		error = ifpromisc(bp->bif_ifp, 0);
291 		if (error != 0 && error != ENXIO) {
292 			/*
293 			 * ENXIO can happen if a pccard is unplugged
294 			 * Something is really wrong if we were able to put
295 			 * the driver into promiscuous mode, but can't
296 			 * take it out.
297 			 */
298 			if_printf(bp->bif_ifp,
299 				"bpf_detach: ifpromisc failed (%d)\n", error);
300 		}
301 	}
302 	/* Remove d from the interface's descriptor list. */
303 	BPFIF_LOCK(bp);
304 	p = &bp->bif_dlist;
305 	while (*p != d) {
306 		p = &(*p)->bd_next;
307 		if (*p == 0)
308 			panic("bpf_detachd: descriptor not in list");
309 	}
310 	*p = (*p)->bd_next;
311 	if (bp->bif_dlist == 0)
312 		/*
313 		 * Let the driver know that there are no more listeners.
314 		 */
315 		*bp->bif_driverp = 0;
316 	BPFIF_UNLOCK(bp);
317 }
318 
319 /*
320  * Open ethernet device.  Returns ENXIO for illegal minor device number,
321  * EBUSY if file is open by another process.
322  */
323 /* ARGSUSED */
324 static	int
325 bpfopen(dev, flags, fmt, td)
326 	dev_t dev;
327 	int flags;
328 	int fmt;
329 	struct thread *td;
330 {
331 	struct bpf_d *d;
332 
333 	mtx_lock(&bpf_mtx);
334 	d = dev->si_drv1;
335 	/*
336 	 * Each minor can be opened by only one process.  If the requested
337 	 * minor is in use, return EBUSY.
338 	 */
339 	if (d) {
340 		mtx_unlock(&bpf_mtx);
341 		return (EBUSY);
342 	}
343 	dev->si_drv1 = (struct bpf_d *)~0;	/* mark device in use */
344 	mtx_unlock(&bpf_mtx);
345 
346 	if ((dev->si_flags & SI_NAMED) == 0)
347 		make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
348 		    "bpf%d", dev2unit(dev));
349 	MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
350 	dev->si_drv1 = d;
351 	d->bd_bufsize = bpf_bufsize;
352 	d->bd_sig = SIGIO;
353 	d->bd_seesent = 1;
354 #ifdef MAC
355 	mac_init_bpfdesc(d);
356 	mac_create_bpfdesc(td->td_ucred, d);
357 #endif
358 	mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
359 	callout_init(&d->bd_callout, CALLOUT_MPSAFE);
360 
361 	return (0);
362 }
363 
364 /*
365  * Close the descriptor by detaching it from its interface,
366  * deallocating its buffers, and marking it free.
367  */
368 /* ARGSUSED */
369 static	int
370 bpfclose(dev, flags, fmt, td)
371 	dev_t dev;
372 	int flags;
373 	int fmt;
374 	struct thread *td;
375 {
376 	struct bpf_d *d = dev->si_drv1;
377 
378 	BPFD_LOCK(d);
379 	if (d->bd_state == BPF_WAITING)
380 		callout_stop(&d->bd_callout);
381 	d->bd_state = BPF_IDLE;
382 	BPFD_UNLOCK(d);
383 	funsetown(&d->bd_sigio);
384 	mtx_lock(&bpf_mtx);
385 	if (d->bd_bif)
386 		bpf_detachd(d);
387 	mtx_unlock(&bpf_mtx);
388 #ifdef MAC
389 	mac_destroy_bpfdesc(d);
390 #endif /* MAC */
391 	bpf_freed(d);
392 	dev->si_drv1 = 0;
393 	free(d, M_BPF);
394 
395 	return (0);
396 }
397 
398 
399 /*
400  * Rotate the packet buffers in descriptor d.  Move the store buffer
401  * into the hold slot, and the free buffer into the store slot.
402  * Zero the length of the new store buffer.
403  */
404 #define ROTATE_BUFFERS(d) \
405 	(d)->bd_hbuf = (d)->bd_sbuf; \
406 	(d)->bd_hlen = (d)->bd_slen; \
407 	(d)->bd_sbuf = (d)->bd_fbuf; \
408 	(d)->bd_slen = 0; \
409 	(d)->bd_fbuf = 0;
410 /*
411  *  bpfread - read next chunk of packets from buffers
412  */
413 static	int
414 bpfread(dev, uio, ioflag)
415 	dev_t dev;
416 	struct uio *uio;
417 	int ioflag;
418 {
419 	struct bpf_d *d = dev->si_drv1;
420 	int timed_out;
421 	int error;
422 
423 	/*
424 	 * Restrict application to use a buffer the same size as
425 	 * as kernel buffers.
426 	 */
427 	if (uio->uio_resid != d->bd_bufsize)
428 		return (EINVAL);
429 
430 	BPFD_LOCK(d);
431 	if (d->bd_state == BPF_WAITING)
432 		callout_stop(&d->bd_callout);
433 	timed_out = (d->bd_state == BPF_TIMED_OUT);
434 	d->bd_state = BPF_IDLE;
435 	/*
436 	 * If the hold buffer is empty, then do a timed sleep, which
437 	 * ends when the timeout expires or when enough packets
438 	 * have arrived to fill the store buffer.
439 	 */
440 	while (d->bd_hbuf == 0) {
441 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
442 			/*
443 			 * A packet(s) either arrived since the previous
444 			 * read or arrived while we were asleep.
445 			 * Rotate the buffers and return what's here.
446 			 */
447 			ROTATE_BUFFERS(d);
448 			break;
449 		}
450 
451 		/*
452 		 * No data is available, check to see if the bpf device
453 		 * is still pointed at a real interface.  If not, return
454 		 * ENXIO so that the userland process knows to rebind
455 		 * it before using it again.
456 		 */
457 		if (d->bd_bif == NULL) {
458 			BPFD_UNLOCK(d);
459 			return (ENXIO);
460 		}
461 
462 		if (ioflag & IO_NDELAY) {
463 			BPFD_UNLOCK(d);
464 			return (EWOULDBLOCK);
465 		}
466 		error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
467 		     "bpf", d->bd_rtout);
468 		if (error == EINTR || error == ERESTART) {
469 			BPFD_UNLOCK(d);
470 			return (error);
471 		}
472 		if (error == EWOULDBLOCK) {
473 			/*
474 			 * On a timeout, return what's in the buffer,
475 			 * which may be nothing.  If there is something
476 			 * in the store buffer, we can rotate the buffers.
477 			 */
478 			if (d->bd_hbuf)
479 				/*
480 				 * We filled up the buffer in between
481 				 * getting the timeout and arriving
482 				 * here, so we don't need to rotate.
483 				 */
484 				break;
485 
486 			if (d->bd_slen == 0) {
487 				BPFD_UNLOCK(d);
488 				return (0);
489 			}
490 			ROTATE_BUFFERS(d);
491 			break;
492 		}
493 	}
494 	/*
495 	 * At this point, we know we have something in the hold slot.
496 	 */
497 	BPFD_UNLOCK(d);
498 
499 	/*
500 	 * Move data from hold buffer into user space.
501 	 * We know the entire buffer is transferred since
502 	 * we checked above that the read buffer is bpf_bufsize bytes.
503 	 */
504 	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
505 
506 	BPFD_LOCK(d);
507 	d->bd_fbuf = d->bd_hbuf;
508 	d->bd_hbuf = 0;
509 	d->bd_hlen = 0;
510 	BPFD_UNLOCK(d);
511 
512 	return (error);
513 }
514 
515 
516 /*
517  * If there are processes sleeping on this descriptor, wake them up.
518  */
519 static __inline void
520 bpf_wakeup(d)
521 	struct bpf_d *d;
522 {
523 	if (d->bd_state == BPF_WAITING) {
524 		callout_stop(&d->bd_callout);
525 		d->bd_state = BPF_IDLE;
526 	}
527 	wakeup(d);
528 	if (d->bd_async && d->bd_sig && d->bd_sigio)
529 		pgsigio(&d->bd_sigio, d->bd_sig, 0);
530 
531 	selwakeuppri(&d->bd_sel, PRINET);
532 	KNOTE(&d->bd_sel.si_note, 0);
533 }
534 
535 static void
536 bpf_timed_out(arg)
537 	void *arg;
538 {
539 	struct bpf_d *d = (struct bpf_d *)arg;
540 
541 	BPFD_LOCK(d);
542 	if (d->bd_state == BPF_WAITING) {
543 		d->bd_state = BPF_TIMED_OUT;
544 		if (d->bd_slen != 0)
545 			bpf_wakeup(d);
546 	}
547 	BPFD_UNLOCK(d);
548 }
549 
550 static	int
551 bpfwrite(dev, uio, ioflag)
552 	dev_t dev;
553 	struct uio *uio;
554 	int ioflag;
555 {
556 	struct bpf_d *d = dev->si_drv1;
557 	struct ifnet *ifp;
558 	struct mbuf *m;
559 	int error;
560 	static struct sockaddr dst;
561 	int datlen;
562 
563 	if (d->bd_bif == 0)
564 		return (ENXIO);
565 
566 	ifp = d->bd_bif->bif_ifp;
567 
568 	if (uio->uio_resid == 0)
569 		return (0);
570 
571 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
572 	if (error)
573 		return (error);
574 
575 	if (datlen > ifp->if_mtu)
576 		return (EMSGSIZE);
577 
578 	if (d->bd_hdrcmplt)
579 		dst.sa_family = pseudo_AF_HDRCMPLT;
580 
581 #ifdef MAC
582 	BPFD_LOCK(d);
583 	mac_create_mbuf_from_bpfdesc(d, m);
584 	BPFD_UNLOCK(d);
585 #endif
586 	mtx_lock(&Giant);
587 	error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
588 	mtx_unlock(&Giant);
589 	/*
590 	 * The driver frees the mbuf.
591 	 */
592 	return (error);
593 }
594 
595 /*
596  * Reset a descriptor by flushing its packet buffer and clearing the
597  * receive and drop counts.
598  */
599 static void
600 reset_d(d)
601 	struct bpf_d *d;
602 {
603 
604 	mtx_assert(&d->bd_mtx, MA_OWNED);
605 	if (d->bd_hbuf) {
606 		/* Free the hold buffer. */
607 		d->bd_fbuf = d->bd_hbuf;
608 		d->bd_hbuf = 0;
609 	}
610 	d->bd_slen = 0;
611 	d->bd_hlen = 0;
612 	d->bd_rcount = 0;
613 	d->bd_dcount = 0;
614 }
615 
616 /*
617  *  FIONREAD		Check for read packet available.
618  *  SIOCGIFADDR		Get interface address - convenient hook to driver.
619  *  BIOCGBLEN		Get buffer len [for read()].
620  *  BIOCSETF		Set ethernet read filter.
621  *  BIOCFLUSH		Flush read packet buffer.
622  *  BIOCPROMISC		Put interface into promiscuous mode.
623  *  BIOCGDLT		Get link layer type.
624  *  BIOCGETIF		Get interface name.
625  *  BIOCSETIF		Set interface.
626  *  BIOCSRTIMEOUT	Set read timeout.
627  *  BIOCGRTIMEOUT	Get read timeout.
628  *  BIOCGSTATS		Get packet stats.
629  *  BIOCIMMEDIATE	Set immediate mode.
630  *  BIOCVERSION		Get filter language version.
631  *  BIOCGHDRCMPLT	Get "header already complete" flag
632  *  BIOCSHDRCMPLT	Set "header already complete" flag
633  *  BIOCGSEESENT	Get "see packets sent" flag
634  *  BIOCSSEESENT	Set "see packets sent" flag
635  */
636 /* ARGSUSED */
637 static	int
638 bpfioctl(dev, cmd, addr, flags, td)
639 	dev_t dev;
640 	u_long cmd;
641 	caddr_t addr;
642 	int flags;
643 	struct thread *td;
644 {
645 	struct bpf_d *d = dev->si_drv1;
646 	int error = 0;
647 
648 	BPFD_LOCK(d);
649 	if (d->bd_state == BPF_WAITING)
650 		callout_stop(&d->bd_callout);
651 	d->bd_state = BPF_IDLE;
652 	BPFD_UNLOCK(d);
653 
654 	switch (cmd) {
655 
656 	default:
657 		error = EINVAL;
658 		break;
659 
660 	/*
661 	 * Check for read packet available.
662 	 */
663 	case FIONREAD:
664 		{
665 			int n;
666 
667 			BPFD_LOCK(d);
668 			n = d->bd_slen;
669 			if (d->bd_hbuf)
670 				n += d->bd_hlen;
671 			BPFD_UNLOCK(d);
672 
673 			*(int *)addr = n;
674 			break;
675 		}
676 
677 	case SIOCGIFADDR:
678 		{
679 			struct ifnet *ifp;
680 
681 			if (d->bd_bif == 0)
682 				error = EINVAL;
683 			else {
684 				ifp = d->bd_bif->bif_ifp;
685 				error = (*ifp->if_ioctl)(ifp, cmd, addr);
686 			}
687 			break;
688 		}
689 
690 	/*
691 	 * Get buffer len [for read()].
692 	 */
693 	case BIOCGBLEN:
694 		*(u_int *)addr = d->bd_bufsize;
695 		break;
696 
697 	/*
698 	 * Set buffer length.
699 	 */
700 	case BIOCSBLEN:
701 		if (d->bd_bif != 0)
702 			error = EINVAL;
703 		else {
704 			u_int size = *(u_int *)addr;
705 
706 			if (size > bpf_maxbufsize)
707 				*(u_int *)addr = size = bpf_maxbufsize;
708 			else if (size < BPF_MINBUFSIZE)
709 				*(u_int *)addr = size = BPF_MINBUFSIZE;
710 			d->bd_bufsize = size;
711 		}
712 		break;
713 
714 	/*
715 	 * Set link layer read filter.
716 	 */
717 	case BIOCSETF:
718 		error = bpf_setf(d, (struct bpf_program *)addr);
719 		break;
720 
721 	/*
722 	 * Flush read packet buffer.
723 	 */
724 	case BIOCFLUSH:
725 		BPFD_LOCK(d);
726 		reset_d(d);
727 		BPFD_UNLOCK(d);
728 		break;
729 
730 	/*
731 	 * Put interface into promiscuous mode.
732 	 */
733 	case BIOCPROMISC:
734 		if (d->bd_bif == 0) {
735 			/*
736 			 * No interface attached yet.
737 			 */
738 			error = EINVAL;
739 			break;
740 		}
741 		if (d->bd_promisc == 0) {
742 			mtx_lock(&Giant);
743 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
744 			mtx_unlock(&Giant);
745 			if (error == 0)
746 				d->bd_promisc = 1;
747 		}
748 		break;
749 
750 	/*
751 	 * Get current data link type.
752 	 */
753 	case BIOCGDLT:
754 		if (d->bd_bif == 0)
755 			error = EINVAL;
756 		else
757 			*(u_int *)addr = d->bd_bif->bif_dlt;
758 		break;
759 
760 	/*
761 	 * Get a list of supported data link types.
762 	 */
763 	case BIOCGDLTLIST:
764 		if (d->bd_bif == 0)
765 			error = EINVAL;
766 		else
767 			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
768 		break;
769 
770 	/*
771 	 * Set data link type.
772 	 */
773 	case BIOCSDLT:
774 		if (d->bd_bif == 0)
775 			error = EINVAL;
776 		else
777 			error = bpf_setdlt(d, *(u_int *)addr);
778 		break;
779 
780 	/*
781 	 * Get interface name.
782 	 */
783 	case BIOCGETIF:
784 		if (d->bd_bif == 0)
785 			error = EINVAL;
786 		else {
787 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
788 			struct ifreq *const ifr = (struct ifreq *)addr;
789 
790 			strlcpy(ifr->ifr_name, ifp->if_xname,
791 			    sizeof(ifr->ifr_name));
792 		}
793 		break;
794 
795 	/*
796 	 * Set interface.
797 	 */
798 	case BIOCSETIF:
799 		error = bpf_setif(d, (struct ifreq *)addr);
800 		break;
801 
802 	/*
803 	 * Set read timeout.
804 	 */
805 	case BIOCSRTIMEOUT:
806 		{
807 			struct timeval *tv = (struct timeval *)addr;
808 
809 			/*
810 			 * Subtract 1 tick from tvtohz() since this isn't
811 			 * a one-shot timer.
812 			 */
813 			if ((error = itimerfix(tv)) == 0)
814 				d->bd_rtout = tvtohz(tv) - 1;
815 			break;
816 		}
817 
818 	/*
819 	 * Get read timeout.
820 	 */
821 	case BIOCGRTIMEOUT:
822 		{
823 			struct timeval *tv = (struct timeval *)addr;
824 
825 			tv->tv_sec = d->bd_rtout / hz;
826 			tv->tv_usec = (d->bd_rtout % hz) * tick;
827 			break;
828 		}
829 
830 	/*
831 	 * Get packet stats.
832 	 */
833 	case BIOCGSTATS:
834 		{
835 			struct bpf_stat *bs = (struct bpf_stat *)addr;
836 
837 			bs->bs_recv = d->bd_rcount;
838 			bs->bs_drop = d->bd_dcount;
839 			break;
840 		}
841 
842 	/*
843 	 * Set immediate mode.
844 	 */
845 	case BIOCIMMEDIATE:
846 		d->bd_immediate = *(u_int *)addr;
847 		break;
848 
849 	case BIOCVERSION:
850 		{
851 			struct bpf_version *bv = (struct bpf_version *)addr;
852 
853 			bv->bv_major = BPF_MAJOR_VERSION;
854 			bv->bv_minor = BPF_MINOR_VERSION;
855 			break;
856 		}
857 
858 	/*
859 	 * Get "header already complete" flag
860 	 */
861 	case BIOCGHDRCMPLT:
862 		*(u_int *)addr = d->bd_hdrcmplt;
863 		break;
864 
865 	/*
866 	 * Set "header already complete" flag
867 	 */
868 	case BIOCSHDRCMPLT:
869 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
870 		break;
871 
872 	/*
873 	 * Get "see sent packets" flag
874 	 */
875 	case BIOCGSEESENT:
876 		*(u_int *)addr = d->bd_seesent;
877 		break;
878 
879 	/*
880 	 * Set "see sent packets" flag
881 	 */
882 	case BIOCSSEESENT:
883 		d->bd_seesent = *(u_int *)addr;
884 		break;
885 
886 	case FIONBIO:		/* Non-blocking I/O */
887 		break;
888 
889 	case FIOASYNC:		/* Send signal on receive packets */
890 		d->bd_async = *(int *)addr;
891 		break;
892 
893 	case FIOSETOWN:
894 		error = fsetown(*(int *)addr, &d->bd_sigio);
895 		break;
896 
897 	case FIOGETOWN:
898 		*(int *)addr = fgetown(&d->bd_sigio);
899 		break;
900 
901 	/* This is deprecated, FIOSETOWN should be used instead. */
902 	case TIOCSPGRP:
903 		error = fsetown(-(*(int *)addr), &d->bd_sigio);
904 		break;
905 
906 	/* This is deprecated, FIOGETOWN should be used instead. */
907 	case TIOCGPGRP:
908 		*(int *)addr = -fgetown(&d->bd_sigio);
909 		break;
910 
911 	case BIOCSRSIG:		/* Set receive signal */
912 		{
913 			u_int sig;
914 
915 			sig = *(u_int *)addr;
916 
917 			if (sig >= NSIG)
918 				error = EINVAL;
919 			else
920 				d->bd_sig = sig;
921 			break;
922 		}
923 	case BIOCGRSIG:
924 		*(u_int *)addr = d->bd_sig;
925 		break;
926 	}
927 	return (error);
928 }
929 
930 /*
931  * Set d's packet filter program to fp.  If this file already has a filter,
932  * free it and replace it.  Returns EINVAL for bogus requests.
933  */
934 static int
935 bpf_setf(d, fp)
936 	struct bpf_d *d;
937 	struct bpf_program *fp;
938 {
939 	struct bpf_insn *fcode, *old;
940 	u_int flen, size;
941 
942 	old = d->bd_filter;
943 	if (fp->bf_insns == 0) {
944 		if (fp->bf_len != 0)
945 			return (EINVAL);
946 		BPFD_LOCK(d);
947 		d->bd_filter = 0;
948 		reset_d(d);
949 		BPFD_UNLOCK(d);
950 		if (old != 0)
951 			free((caddr_t)old, M_BPF);
952 		return (0);
953 	}
954 	flen = fp->bf_len;
955 	if (flen > BPF_MAXINSNS)
956 		return (EINVAL);
957 
958 	size = flen * sizeof(*fp->bf_insns);
959 	fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
960 	if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
961 	    bpf_validate(fcode, (int)flen)) {
962 		BPFD_LOCK(d);
963 		d->bd_filter = fcode;
964 		reset_d(d);
965 		BPFD_UNLOCK(d);
966 		if (old != 0)
967 			free((caddr_t)old, M_BPF);
968 
969 		return (0);
970 	}
971 	free((caddr_t)fcode, M_BPF);
972 	return (EINVAL);
973 }
974 
975 /*
976  * Detach a file from its current interface (if attached at all) and attach
977  * to the interface indicated by the name stored in ifr.
978  * Return an errno or 0.
979  */
980 static int
981 bpf_setif(d, ifr)
982 	struct bpf_d *d;
983 	struct ifreq *ifr;
984 {
985 	struct bpf_if *bp;
986 	int error;
987 	struct ifnet *theywant;
988 
989 	theywant = ifunit(ifr->ifr_name);
990 	if (theywant == 0)
991 		return ENXIO;
992 
993 	/*
994 	 * Look through attached interfaces for the named one.
995 	 */
996 	mtx_lock(&bpf_mtx);
997 	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
998 		struct ifnet *ifp = bp->bif_ifp;
999 
1000 		if (ifp == 0 || ifp != theywant)
1001 			continue;
1002 		/* skip additional entry */
1003 		if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf)
1004 			continue;
1005 
1006 		mtx_unlock(&bpf_mtx);
1007 		/*
1008 		 * We found the requested interface.
1009 		 * If it's not up, return an error.
1010 		 * Allocate the packet buffers if we need to.
1011 		 * If we're already attached to requested interface,
1012 		 * just flush the buffer.
1013 		 */
1014 		if ((ifp->if_flags & IFF_UP) == 0)
1015 			return (ENETDOWN);
1016 
1017 		if (d->bd_sbuf == 0) {
1018 			error = bpf_allocbufs(d);
1019 			if (error != 0)
1020 				return (error);
1021 		}
1022 		if (bp != d->bd_bif) {
1023 			if (d->bd_bif)
1024 				/*
1025 				 * Detach if attached to something else.
1026 				 */
1027 				bpf_detachd(d);
1028 
1029 			bpf_attachd(d, bp);
1030 		}
1031 		BPFD_LOCK(d);
1032 		reset_d(d);
1033 		BPFD_UNLOCK(d);
1034 		return (0);
1035 	}
1036 	mtx_unlock(&bpf_mtx);
1037 	/* Not found. */
1038 	return (ENXIO);
1039 }
1040 
1041 /*
1042  * Support for select() and poll() system calls
1043  *
1044  * Return true iff the specific operation will not block indefinitely.
1045  * Otherwise, return false but make a note that a selwakeup() must be done.
1046  */
1047 static int
1048 bpfpoll(dev, events, td)
1049 	dev_t dev;
1050 	int events;
1051 	struct thread *td;
1052 {
1053 	struct bpf_d *d;
1054 	int revents;
1055 
1056 	d = dev->si_drv1;
1057 	if (d->bd_bif == NULL)
1058 		return (ENXIO);
1059 
1060 	revents = events & (POLLOUT | POLLWRNORM);
1061 	BPFD_LOCK(d);
1062 	if (events & (POLLIN | POLLRDNORM)) {
1063 		if (bpf_ready(d))
1064 			revents |= events & (POLLIN | POLLRDNORM);
1065 		else {
1066 			selrecord(td, &d->bd_sel);
1067 			/* Start the read timeout if necessary. */
1068 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1069 				callout_reset(&d->bd_callout, d->bd_rtout,
1070 				    bpf_timed_out, d);
1071 				d->bd_state = BPF_WAITING;
1072 			}
1073 		}
1074 	}
1075 	BPFD_UNLOCK(d);
1076 	return (revents);
1077 }
1078 
1079 /*
1080  * Support for kevent() system call.  Register EVFILT_READ filters and
1081  * reject all others.
1082  */
1083 int
1084 bpfkqfilter(dev, kn)
1085 	dev_t dev;
1086 	struct knote *kn;
1087 {
1088 	struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1089 
1090 	if (kn->kn_filter != EVFILT_READ)
1091 		return (1);
1092 
1093 	kn->kn_fop = &bpfread_filtops;
1094 	kn->kn_hook = d;
1095 	BPFD_LOCK(d);
1096 	SLIST_INSERT_HEAD(&d->bd_sel.si_note, kn, kn_selnext);
1097 	BPFD_UNLOCK(d);
1098 
1099 	return (0);
1100 }
1101 
1102 static void
1103 filt_bpfdetach(kn)
1104 	struct knote *kn;
1105 {
1106 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1107 
1108 	BPFD_LOCK(d);
1109 	SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext);
1110 	BPFD_UNLOCK(d);
1111 }
1112 
1113 static int
1114 filt_bpfread(kn, hint)
1115 	struct knote *kn;
1116 	long hint;
1117 {
1118 	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1119 	int ready;
1120 
1121 	BPFD_LOCK(d);
1122 	ready = bpf_ready(d);
1123 	if (ready) {
1124 		kn->kn_data = d->bd_slen;
1125 		if (d->bd_hbuf)
1126 			kn->kn_data += d->bd_hlen;
1127 	}
1128 	else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1129 		callout_reset(&d->bd_callout, d->bd_rtout,
1130 		    bpf_timed_out, d);
1131 		d->bd_state = BPF_WAITING;
1132 	}
1133 	BPFD_UNLOCK(d);
1134 
1135 	return (ready);
1136 }
1137 
1138 /*
1139  * Incoming linkage from device drivers.  Process the packet pkt, of length
1140  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1141  * by each process' filter, and if accepted, stashed into the corresponding
1142  * buffer.
1143  */
1144 void
1145 bpf_tap(bp, pkt, pktlen)
1146 	struct bpf_if *bp;
1147 	u_char *pkt;
1148 	u_int pktlen;
1149 {
1150 	struct bpf_d *d;
1151 	u_int slen;
1152 
1153 	BPFIF_LOCK(bp);
1154 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1155 		BPFD_LOCK(d);
1156 		++d->bd_rcount;
1157 		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1158 		if (slen != 0) {
1159 #ifdef MAC
1160 			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1161 #endif
1162 				catchpacket(d, pkt, pktlen, slen, bcopy);
1163 		}
1164 		BPFD_UNLOCK(d);
1165 	}
1166 	BPFIF_UNLOCK(bp);
1167 }
1168 
1169 /*
1170  * Copy data from an mbuf chain into a buffer.  This code is derived
1171  * from m_copydata in sys/uipc_mbuf.c.
1172  */
1173 static void
1174 bpf_mcopy(src_arg, dst_arg, len)
1175 	const void *src_arg;
1176 	void *dst_arg;
1177 	size_t len;
1178 {
1179 	const struct mbuf *m;
1180 	u_int count;
1181 	u_char *dst;
1182 
1183 	m = src_arg;
1184 	dst = dst_arg;
1185 	while (len > 0) {
1186 		if (m == 0)
1187 			panic("bpf_mcopy");
1188 		count = min(m->m_len, len);
1189 		bcopy(mtod(m, void *), dst, count);
1190 		m = m->m_next;
1191 		dst += count;
1192 		len -= count;
1193 	}
1194 }
1195 
1196 /*
1197  * Incoming linkage from device drivers, when packet is in an mbuf chain.
1198  */
1199 void
1200 bpf_mtap(bp, m)
1201 	struct bpf_if *bp;
1202 	struct mbuf *m;
1203 {
1204 	struct bpf_d *d;
1205 	u_int pktlen, slen;
1206 
1207 	pktlen = m_length(m, NULL);
1208 	if (pktlen == m->m_len) {
1209 		bpf_tap(bp, mtod(m, u_char *), pktlen);
1210 		return;
1211 	}
1212 
1213 	BPFIF_LOCK(bp);
1214 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1215 		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1216 			continue;
1217 		BPFD_LOCK(d);
1218 		++d->bd_rcount;
1219 		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1220 		if (slen != 0)
1221 #ifdef MAC
1222 			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1223 #endif
1224 				catchpacket(d, (u_char *)m, pktlen, slen,
1225 				    bpf_mcopy);
1226 		BPFD_UNLOCK(d);
1227 	}
1228 	BPFIF_UNLOCK(bp);
1229 }
1230 
1231 /*
1232  * Incoming linkage from device drivers, when packet is in
1233  * an mbuf chain and to be prepended by a contiguous header.
1234  */
1235 void
1236 bpf_mtap2(bp, data, dlen, m)
1237 	struct bpf_if *bp;
1238 	void *data;
1239 	u_int dlen;
1240 	struct mbuf *m;
1241 {
1242 	struct mbuf mb;
1243 	struct bpf_d *d;
1244 	u_int pktlen, slen;
1245 
1246 	pktlen = m_length(m, NULL);
1247 	/*
1248 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
1249 	 * Note that we cut corners here; we only setup what's
1250 	 * absolutely needed--this mbuf should never go anywhere else.
1251 	 */
1252 	mb.m_next = m;
1253 	mb.m_data = data;
1254 	mb.m_len = dlen;
1255 	pktlen += dlen;
1256 
1257 	BPFIF_LOCK(bp);
1258 	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1259 		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1260 			continue;
1261 		BPFD_LOCK(d);
1262 		++d->bd_rcount;
1263 		slen = bpf_filter(d->bd_filter, (u_char *)&mb, pktlen, 0);
1264 		if (slen != 0)
1265 #ifdef MAC
1266 			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1267 #endif
1268 				catchpacket(d, (u_char *)&mb, pktlen, slen,
1269 				    bpf_mcopy);
1270 		BPFD_UNLOCK(d);
1271 	}
1272 	BPFIF_UNLOCK(bp);
1273 }
1274 
1275 /*
1276  * Move the packet data from interface memory (pkt) into the
1277  * store buffer.  "cpfn" is the routine called to do the actual data
1278  * transfer.  bcopy is passed in to copy contiguous chunks, while
1279  * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1280  * pkt is really an mbuf.
1281  */
1282 static void
1283 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1284 	struct bpf_d *d;
1285 	u_char *pkt;
1286 	u_int pktlen, snaplen;
1287 	void (*cpfn)(const void *, void *, size_t);
1288 {
1289 	struct bpf_hdr *hp;
1290 	int totlen, curlen;
1291 	int hdrlen = d->bd_bif->bif_hdrlen;
1292 
1293 	/*
1294 	 * Figure out how many bytes to move.  If the packet is
1295 	 * greater or equal to the snapshot length, transfer that
1296 	 * much.  Otherwise, transfer the whole packet (unless
1297 	 * we hit the buffer size limit).
1298 	 */
1299 	totlen = hdrlen + min(snaplen, pktlen);
1300 	if (totlen > d->bd_bufsize)
1301 		totlen = d->bd_bufsize;
1302 
1303 	/*
1304 	 * Round up the end of the previous packet to the next longword.
1305 	 */
1306 	curlen = BPF_WORDALIGN(d->bd_slen);
1307 	if (curlen + totlen > d->bd_bufsize) {
1308 		/*
1309 		 * This packet will overflow the storage buffer.
1310 		 * Rotate the buffers if we can, then wakeup any
1311 		 * pending reads.
1312 		 */
1313 		if (d->bd_fbuf == 0) {
1314 			/*
1315 			 * We haven't completed the previous read yet,
1316 			 * so drop the packet.
1317 			 */
1318 			++d->bd_dcount;
1319 			return;
1320 		}
1321 		ROTATE_BUFFERS(d);
1322 		bpf_wakeup(d);
1323 		curlen = 0;
1324 	}
1325 	else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1326 		/*
1327 		 * Immediate mode is set, or the read timeout has
1328 		 * already expired during a select call.  A packet
1329 		 * arrived, so the reader should be woken up.
1330 		 */
1331 		bpf_wakeup(d);
1332 
1333 	/*
1334 	 * Append the bpf header.
1335 	 */
1336 	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1337 	microtime(&hp->bh_tstamp);
1338 	hp->bh_datalen = pktlen;
1339 	hp->bh_hdrlen = hdrlen;
1340 	/*
1341 	 * Copy the packet data into the store buffer and update its length.
1342 	 */
1343 	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1344 	d->bd_slen = curlen + totlen;
1345 }
1346 
1347 /*
1348  * Initialize all nonzero fields of a descriptor.
1349  */
1350 static int
1351 bpf_allocbufs(d)
1352 	struct bpf_d *d;
1353 {
1354 	d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1355 	if (d->bd_fbuf == 0)
1356 		return (ENOBUFS);
1357 
1358 	d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1359 	if (d->bd_sbuf == 0) {
1360 		free(d->bd_fbuf, M_BPF);
1361 		return (ENOBUFS);
1362 	}
1363 	d->bd_slen = 0;
1364 	d->bd_hlen = 0;
1365 	return (0);
1366 }
1367 
1368 /*
1369  * Free buffers currently in use by a descriptor.
1370  * Called on close.
1371  */
1372 static void
1373 bpf_freed(d)
1374 	struct bpf_d *d;
1375 {
1376 	/*
1377 	 * We don't need to lock out interrupts since this descriptor has
1378 	 * been detached from its interface and it yet hasn't been marked
1379 	 * free.
1380 	 */
1381 	if (d->bd_sbuf != 0) {
1382 		free(d->bd_sbuf, M_BPF);
1383 		if (d->bd_hbuf != 0)
1384 			free(d->bd_hbuf, M_BPF);
1385 		if (d->bd_fbuf != 0)
1386 			free(d->bd_fbuf, M_BPF);
1387 	}
1388 	if (d->bd_filter)
1389 		free((caddr_t)d->bd_filter, M_BPF);
1390 	mtx_destroy(&d->bd_mtx);
1391 }
1392 
1393 /*
1394  * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
1395  * fixed size of the link header (variable length headers not yet supported).
1396  */
1397 void
1398 bpfattach(ifp, dlt, hdrlen)
1399 	struct ifnet *ifp;
1400 	u_int dlt, hdrlen;
1401 {
1402 
1403 	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1404 }
1405 
1406 /*
1407  * Attach an interface to bpf.  ifp is a pointer to the structure
1408  * defining the interface to be attached, dlt is the link layer type,
1409  * and hdrlen is the fixed size of the link header (variable length
1410  * headers are not yet supporrted).
1411  */
1412 void
1413 bpfattach2(ifp, dlt, hdrlen, driverp)
1414 	struct ifnet *ifp;
1415 	u_int dlt, hdrlen;
1416 	struct bpf_if **driverp;
1417 {
1418 	struct bpf_if *bp;
1419 	bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1420 	if (bp == 0)
1421 		panic("bpfattach");
1422 
1423 	bp->bif_dlist = 0;
1424 	bp->bif_driverp = driverp;
1425 	bp->bif_ifp = ifp;
1426 	bp->bif_dlt = dlt;
1427 	mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1428 
1429 	mtx_lock(&bpf_mtx);
1430 	bp->bif_next = bpf_iflist;
1431 	bpf_iflist = bp;
1432 	mtx_unlock(&bpf_mtx);
1433 
1434 	*bp->bif_driverp = 0;
1435 
1436 	/*
1437 	 * Compute the length of the bpf header.  This is not necessarily
1438 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1439 	 * that the network layer header begins on a longword boundary (for
1440 	 * performance reasons and to alleviate alignment restrictions).
1441 	 */
1442 	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1443 
1444 	if (bootverbose)
1445 		if_printf(ifp, "bpf attached\n");
1446 }
1447 
1448 /*
1449  * Detach bpf from an interface.  This involves detaching each descriptor
1450  * associated with the interface, and leaving bd_bif NULL.  Notify each
1451  * descriptor as it's detached so that any sleepers wake up and get
1452  * ENXIO.
1453  */
1454 void
1455 bpfdetach(ifp)
1456 	struct ifnet *ifp;
1457 {
1458 	struct bpf_if	*bp, *bp_prev;
1459 	struct bpf_d	*d;
1460 
1461 	/* Locate BPF interface information */
1462 	bp_prev = NULL;
1463 
1464 	mtx_lock(&bpf_mtx);
1465 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1466 		if (ifp == bp->bif_ifp)
1467 			break;
1468 		bp_prev = bp;
1469 	}
1470 
1471 	/* Interface wasn't attached */
1472 	if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1473 		mtx_unlock(&bpf_mtx);
1474 		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1475 		return;
1476 	}
1477 
1478 	if (bp_prev) {
1479 		bp_prev->bif_next = bp->bif_next;
1480 	} else {
1481 		bpf_iflist = bp->bif_next;
1482 	}
1483 	mtx_unlock(&bpf_mtx);
1484 
1485 	while ((d = bp->bif_dlist) != NULL) {
1486 		bpf_detachd(d);
1487 		BPFD_LOCK(d);
1488 		bpf_wakeup(d);
1489 		BPFD_UNLOCK(d);
1490 	}
1491 
1492 	mtx_destroy(&bp->bif_mtx);
1493 	free(bp, M_BPF);
1494 }
1495 
1496 /*
1497  * Get a list of available data link type of the interface.
1498  */
1499 static int
1500 bpf_getdltlist(d, bfl)
1501 	struct bpf_d *d;
1502 	struct bpf_dltlist *bfl;
1503 {
1504 	int n, error;
1505 	struct ifnet *ifp;
1506 	struct bpf_if *bp;
1507 
1508 	ifp = d->bd_bif->bif_ifp;
1509 	n = 0;
1510 	error = 0;
1511 	mtx_lock(&bpf_mtx);
1512 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1513 		if (bp->bif_ifp != ifp)
1514 			continue;
1515 		if (bfl->bfl_list != NULL) {
1516 			if (n >= bfl->bfl_len) {
1517 				mtx_unlock(&bpf_mtx);
1518 				return (ENOMEM);
1519 			}
1520 			error = copyout(&bp->bif_dlt,
1521 			    bfl->bfl_list + n, sizeof(u_int));
1522 		}
1523 		n++;
1524 	}
1525 	mtx_unlock(&bpf_mtx);
1526 	bfl->bfl_len = n;
1527 	return (error);
1528 }
1529 
1530 /*
1531  * Set the data link type of a BPF instance.
1532  */
1533 static int
1534 bpf_setdlt(d, dlt)
1535 	struct bpf_d *d;
1536 	u_int dlt;
1537 {
1538 	int error, opromisc;
1539 	struct ifnet *ifp;
1540 	struct bpf_if *bp;
1541 
1542 	if (d->bd_bif->bif_dlt == dlt)
1543 		return (0);
1544 	ifp = d->bd_bif->bif_ifp;
1545 	mtx_lock(&bpf_mtx);
1546 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1547 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1548 			break;
1549 	}
1550 	mtx_unlock(&bpf_mtx);
1551 	if (bp != NULL) {
1552 		BPFD_LOCK(d);
1553 		opromisc = d->bd_promisc;
1554 		bpf_detachd(d);
1555 		bpf_attachd(d, bp);
1556 		reset_d(d);
1557 		BPFD_UNLOCK(d);
1558 		if (opromisc) {
1559 			error = ifpromisc(bp->bif_ifp, 1);
1560 			if (error)
1561 				if_printf(bp->bif_ifp,
1562 					"bpf_setdlt: ifpromisc failed (%d)\n",
1563 					error);
1564 			else
1565 				d->bd_promisc = 1;
1566 		}
1567 	}
1568 	return (bp == NULL ? EINVAL : 0);
1569 }
1570 
1571 static void bpf_drvinit(void *unused);
1572 
1573 static void bpf_clone(void *arg, char *name, int namelen, dev_t *dev);
1574 
1575 static void
1576 bpf_clone(arg, name, namelen, dev)
1577 	void *arg;
1578 	char *name;
1579 	int namelen;
1580 	dev_t *dev;
1581 {
1582 	int u;
1583 
1584 	if (*dev != NODEV)
1585 		return;
1586 	if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1587 		return;
1588 	*dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1589 	    "bpf%d", u);
1590 	(*dev)->si_flags |= SI_CHEAPCLONE;
1591 	return;
1592 }
1593 
1594 static void
1595 bpf_drvinit(unused)
1596 	void *unused;
1597 {
1598 
1599 	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1600 	EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1601 }
1602 
1603 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL)
1604 
1605 #else /* !DEV_BPF && !NETGRAPH_BPF */
1606 /*
1607  * NOP stubs to allow bpf-using drivers to load and function.
1608  *
1609  * A 'better' implementation would allow the core bpf functionality
1610  * to be loaded at runtime.
1611  */
1612 
1613 void
1614 bpf_tap(bp, pkt, pktlen)
1615 	struct bpf_if *bp;
1616 	u_char *pkt;
1617 	u_int pktlen;
1618 {
1619 }
1620 
1621 void
1622 bpf_mtap(bp, m)
1623 	struct bpf_if *bp;
1624 	struct mbuf *m;
1625 {
1626 }
1627 
1628 void
1629 bpf_mtap2(bp, d, l, m)
1630 	struct bpf_if *bp;
1631 	void *d;
1632 	u_int l;
1633 	struct mbuf *m;
1634 {
1635 }
1636 
1637 void
1638 bpfattach(ifp, dlt, hdrlen)
1639 	struct ifnet *ifp;
1640 	u_int dlt, hdrlen;
1641 {
1642 }
1643 
1644 void
1645 bpfattach2(ifp, dlt, hdrlen, driverp)
1646 	struct ifnet *ifp;
1647 	u_int dlt, hdrlen;
1648 	struct bpf_if **driverp;
1649 {
1650 }
1651 
1652 void
1653 bpfdetach(ifp)
1654 	struct ifnet *ifp;
1655 {
1656 }
1657 
1658 u_int
1659 bpf_filter(pc, p, wirelen, buflen)
1660 	const struct bpf_insn *pc;
1661 	u_char *p;
1662 	u_int wirelen;
1663 	u_int buflen;
1664 {
1665 	return -1;	/* "no filter" behaviour */
1666 }
1667 
1668 int
1669 bpf_validate(f, len)
1670 	const struct bpf_insn *f;
1671 	int len;
1672 {
1673 	return 0;		/* false */
1674 }
1675 
1676 #endif /* !DEV_BPF && !NETGRAPH_BPF */
1677