xref: /freebsd/sys/dev/usb/usb_pf.c (revision 18ec6525529b6c68d5ce5986c2eb3fa4fbc769c1)
1*18ec6525SWeongyo Jeong /*-
2*18ec6525SWeongyo Jeong  * Copyright (c) 1990, 1991, 1993
3*18ec6525SWeongyo Jeong  *	The Regents of the University of California.  All rights reserved.
4*18ec6525SWeongyo Jeong  *
5*18ec6525SWeongyo Jeong  * This code is derived from the Stanford/CMU enet packet filter,
6*18ec6525SWeongyo Jeong  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7*18ec6525SWeongyo Jeong  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8*18ec6525SWeongyo Jeong  * Berkeley Laboratory.
9*18ec6525SWeongyo Jeong  *
10*18ec6525SWeongyo Jeong  * Redistribution and use in source and binary forms, with or without
11*18ec6525SWeongyo Jeong  * modification, are permitted provided that the following conditions
12*18ec6525SWeongyo Jeong  * are met:
13*18ec6525SWeongyo Jeong  * 1. Redistributions of source code must retain the above copyright
14*18ec6525SWeongyo Jeong  *    notice, this list of conditions and the following disclaimer.
15*18ec6525SWeongyo Jeong  * 2. Redistributions in binary form must reproduce the above copyright
16*18ec6525SWeongyo Jeong  *    notice, this list of conditions and the following disclaimer in the
17*18ec6525SWeongyo Jeong  *    documentation and/or other materials provided with the distribution.
18*18ec6525SWeongyo Jeong  * 4. Neither the name of the University nor the names of its contributors
19*18ec6525SWeongyo Jeong  *    may be used to endorse or promote products derived from this software
20*18ec6525SWeongyo Jeong  *    without specific prior written permission.
21*18ec6525SWeongyo Jeong  *
22*18ec6525SWeongyo Jeong  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23*18ec6525SWeongyo Jeong  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24*18ec6525SWeongyo Jeong  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*18ec6525SWeongyo Jeong  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26*18ec6525SWeongyo Jeong  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27*18ec6525SWeongyo Jeong  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28*18ec6525SWeongyo Jeong  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29*18ec6525SWeongyo Jeong  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30*18ec6525SWeongyo Jeong  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31*18ec6525SWeongyo Jeong  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32*18ec6525SWeongyo Jeong  * SUCH DAMAGE.
33*18ec6525SWeongyo Jeong  */
34*18ec6525SWeongyo Jeong 
35*18ec6525SWeongyo Jeong #include <sys/cdefs.h>
36*18ec6525SWeongyo Jeong __FBSDID("$FreeBSD$");
37*18ec6525SWeongyo Jeong #include <sys/param.h>
38*18ec6525SWeongyo Jeong #include <sys/kernel.h>
39*18ec6525SWeongyo Jeong #include <sys/bus.h>
40*18ec6525SWeongyo Jeong #include <sys/fcntl.h>
41*18ec6525SWeongyo Jeong #include <sys/malloc.h>
42*18ec6525SWeongyo Jeong #include <sys/proc.h>
43*18ec6525SWeongyo Jeong #include <sys/socket.h>
44*18ec6525SWeongyo Jeong #include <sys/sockio.h>
45*18ec6525SWeongyo Jeong #include <net/if.h>
46*18ec6525SWeongyo Jeong 
47*18ec6525SWeongyo Jeong #include <dev/usb/usb.h>
48*18ec6525SWeongyo Jeong #include <dev/usb/usbdi.h>
49*18ec6525SWeongyo Jeong #include <dev/usb/usb_busdma.h>
50*18ec6525SWeongyo Jeong #include <dev/usb/usb_controller.h>
51*18ec6525SWeongyo Jeong #include <dev/usb/usb_core.h>
52*18ec6525SWeongyo Jeong #include <dev/usb/usb_process.h>
53*18ec6525SWeongyo Jeong #include <dev/usb/usb_device.h>
54*18ec6525SWeongyo Jeong #include <dev/usb/usb_bus.h>
55*18ec6525SWeongyo Jeong #include <dev/usb/usb_pf.h>
56*18ec6525SWeongyo Jeong #include <dev/usb/usb_transfer.h>
57*18ec6525SWeongyo Jeong 
58*18ec6525SWeongyo Jeong /*
59*18ec6525SWeongyo Jeong  * All usbpf implementations are extracted from bpf(9) APIs and it's
60*18ec6525SWeongyo Jeong  * specialized for USB packet filtering between the driver and the host
61*18ec6525SWeongyo Jeong  * controller.
62*18ec6525SWeongyo Jeong  */
63*18ec6525SWeongyo Jeong 
64*18ec6525SWeongyo Jeong MALLOC_DEFINE(M_USBPF, "USBPktFilter", "USB Packet Filter");
65*18ec6525SWeongyo Jeong 
66*18ec6525SWeongyo Jeong /*
67*18ec6525SWeongyo Jeong  * Rotate the packet buffers in descriptor ud.  Move the store buffer into the
68*18ec6525SWeongyo Jeong  * hold slot, and the free buffer ino the store slot.  Zero the length of the
69*18ec6525SWeongyo Jeong  * new store buffer.  Descriptor lock should be held.
70*18ec6525SWeongyo Jeong  */
71*18ec6525SWeongyo Jeong #define	USBPF_ROTATE_BUFFERS(ud)	do {				\
72*18ec6525SWeongyo Jeong 	(ud)->ud_hbuf = (ud)->ud_sbuf;					\
73*18ec6525SWeongyo Jeong 	(ud)->ud_hlen = (ud)->ud_slen;					\
74*18ec6525SWeongyo Jeong 	(ud)->ud_sbuf = (ud)->ud_fbuf;					\
75*18ec6525SWeongyo Jeong 	(ud)->ud_slen = 0;						\
76*18ec6525SWeongyo Jeong 	(ud)->ud_fbuf = NULL;						\
77*18ec6525SWeongyo Jeong 	usbpf_bufheld(ud);						\
78*18ec6525SWeongyo Jeong } while (0)
79*18ec6525SWeongyo Jeong 
80*18ec6525SWeongyo Jeong #ifndef __i386__
81*18ec6525SWeongyo Jeong #define	USBPF_ALIGN
82*18ec6525SWeongyo Jeong #endif
83*18ec6525SWeongyo Jeong 
84*18ec6525SWeongyo Jeong #ifndef USBPF_ALIGN
85*18ec6525SWeongyo Jeong #define	USBPF_EXTRACT_SHORT(p)	((u_int16_t)ntohs(*(u_int16_t *)p))
86*18ec6525SWeongyo Jeong #define	USBPF_EXTRACT_LONG(p)	(ntohl(*(u_int32_t *)p))
87*18ec6525SWeongyo Jeong #else
88*18ec6525SWeongyo Jeong #define	USBPF_EXTRACT_SHORT(p)						\
89*18ec6525SWeongyo Jeong 	((u_int16_t)							\
90*18ec6525SWeongyo Jeong 	    ((u_int16_t)*((u_char *)p+0)<<8|				\
91*18ec6525SWeongyo Jeong 		(u_int16_t)*((u_char *)p+1)<<0))
92*18ec6525SWeongyo Jeong #define	USBPF_EXTRACT_LONG(p)						\
93*18ec6525SWeongyo Jeong 	((u_int32_t)*((u_char *)p+0)<<24|				\
94*18ec6525SWeongyo Jeong 	    (u_int32_t)*((u_char *)p+1)<<16|				\
95*18ec6525SWeongyo Jeong 	    (u_int32_t)*((u_char *)p+2)<<8|				\
96*18ec6525SWeongyo Jeong 	    (u_int32_t)*((u_char *)p+3)<<0)
97*18ec6525SWeongyo Jeong #endif
98*18ec6525SWeongyo Jeong 
99*18ec6525SWeongyo Jeong /*
100*18ec6525SWeongyo Jeong  * Number of scratch memory words (for USBPF_LD|USBPF_MEM and USBPF_ST).
101*18ec6525SWeongyo Jeong  */
102*18ec6525SWeongyo Jeong #define	USBPF_MEMWORDS		 16
103*18ec6525SWeongyo Jeong 
104*18ec6525SWeongyo Jeong /* Values for ud_state */
105*18ec6525SWeongyo Jeong #define	USBPF_IDLE		0	/* no select in progress */
106*18ec6525SWeongyo Jeong #define	USBPF_WAITING		1	/* waiting for read timeout in select */
107*18ec6525SWeongyo Jeong #define	USBPF_TIMED_OUT		2	/* read timeout has expired in select */
108*18ec6525SWeongyo Jeong 
109*18ec6525SWeongyo Jeong #define	PRIUSB			26	/* interruptible */
110*18ec6525SWeongyo Jeong 
111*18ec6525SWeongyo Jeong /* Frame directions */
112*18ec6525SWeongyo Jeong enum usbpf_direction {
113*18ec6525SWeongyo Jeong 	USBPF_D_IN,	/* See incoming frames */
114*18ec6525SWeongyo Jeong 	USBPF_D_INOUT,	/* See incoming and outgoing frames */
115*18ec6525SWeongyo Jeong 	USBPF_D_OUT	/* See outgoing frames */
116*18ec6525SWeongyo Jeong };
117*18ec6525SWeongyo Jeong 
118*18ec6525SWeongyo Jeong static void	usbpf_append_bytes(struct usbpf_d *, caddr_t, u_int, void *,
119*18ec6525SWeongyo Jeong 		    u_int);
120*18ec6525SWeongyo Jeong static void	usbpf_attachd(struct usbpf_d *, struct usbpf_if *);
121*18ec6525SWeongyo Jeong static void	usbpf_detachd(struct usbpf_d *);
122*18ec6525SWeongyo Jeong static int	usbpf_canfreebuf(struct usbpf_d *);
123*18ec6525SWeongyo Jeong static void	usbpf_buf_reclaimed(struct usbpf_d *);
124*18ec6525SWeongyo Jeong static int	usbpf_canwritebuf(struct usbpf_d *);
125*18ec6525SWeongyo Jeong 
126*18ec6525SWeongyo Jeong static	d_open_t	usbpf_open;
127*18ec6525SWeongyo Jeong static	d_read_t	usbpf_read;
128*18ec6525SWeongyo Jeong static	d_write_t	usbpf_write;
129*18ec6525SWeongyo Jeong static	d_ioctl_t	usbpf_ioctl;
130*18ec6525SWeongyo Jeong static	d_poll_t	usbpf_poll;
131*18ec6525SWeongyo Jeong static	d_kqfilter_t	usbpf_kqfilter;
132*18ec6525SWeongyo Jeong 
133*18ec6525SWeongyo Jeong static struct cdevsw usbpf_cdevsw = {
134*18ec6525SWeongyo Jeong 	.d_version =	D_VERSION,
135*18ec6525SWeongyo Jeong 	.d_open =	usbpf_open,
136*18ec6525SWeongyo Jeong 	.d_read =	usbpf_read,
137*18ec6525SWeongyo Jeong 	.d_write =	usbpf_write,
138*18ec6525SWeongyo Jeong 	.d_ioctl =	usbpf_ioctl,
139*18ec6525SWeongyo Jeong 	.d_poll =	usbpf_poll,
140*18ec6525SWeongyo Jeong 	.d_name =	"usbpf",
141*18ec6525SWeongyo Jeong 	.d_kqfilter =	usbpf_kqfilter,
142*18ec6525SWeongyo Jeong };
143*18ec6525SWeongyo Jeong 
144*18ec6525SWeongyo Jeong static LIST_HEAD(, usbpf_if)	usbpf_iflist;
145*18ec6525SWeongyo Jeong static struct mtx	usbpf_mtx;		/* global lock */
146*18ec6525SWeongyo Jeong static int usbpf_uifd_cnt;
147*18ec6525SWeongyo Jeong 
148*18ec6525SWeongyo Jeong static int usbpf_bufsize = 4096;
149*18ec6525SWeongyo Jeong #define	USBPF_MINBUFSIZE 32
150*18ec6525SWeongyo Jeong #define	USBPF_MAXBUFSIZE 0x80000
151*18ec6525SWeongyo Jeong static int usbpf_maxbufsize = USBPF_MAXBUFSIZE;
152*18ec6525SWeongyo Jeong #define	USBPF_MAXINSNS 512
153*18ec6525SWeongyo Jeong static int usbpf_maxinsns = USBPF_MAXINSNS;
154*18ec6525SWeongyo Jeong 
155*18ec6525SWeongyo Jeong static void
156*18ec6525SWeongyo Jeong usbpf_buffer_init(struct usbpf_d *ud)
157*18ec6525SWeongyo Jeong {
158*18ec6525SWeongyo Jeong 
159*18ec6525SWeongyo Jeong 	ud->ud_bufsize = usbpf_bufsize;
160*18ec6525SWeongyo Jeong }
161*18ec6525SWeongyo Jeong 
162*18ec6525SWeongyo Jeong /*
163*18ec6525SWeongyo Jeong  * Free USBPF kernel buffers on device close.
164*18ec6525SWeongyo Jeong  */
165*18ec6525SWeongyo Jeong static void
166*18ec6525SWeongyo Jeong usbpf_buffer_free(struct usbpf_d *ud)
167*18ec6525SWeongyo Jeong {
168*18ec6525SWeongyo Jeong 
169*18ec6525SWeongyo Jeong 	if (ud->ud_sbuf != NULL)
170*18ec6525SWeongyo Jeong 		free(ud->ud_sbuf, M_USBPF);
171*18ec6525SWeongyo Jeong 	if (ud->ud_hbuf != NULL)
172*18ec6525SWeongyo Jeong 		free(ud->ud_hbuf, M_USBPF);
173*18ec6525SWeongyo Jeong 	if (ud->ud_fbuf != NULL)
174*18ec6525SWeongyo Jeong 		free(ud->ud_fbuf, M_USBPF);
175*18ec6525SWeongyo Jeong 
176*18ec6525SWeongyo Jeong #ifdef INVARIANTS
177*18ec6525SWeongyo Jeong 	ud->ud_sbuf = ud->ud_hbuf = ud->ud_fbuf = (caddr_t)~0;
178*18ec6525SWeongyo Jeong #endif
179*18ec6525SWeongyo Jeong }
180*18ec6525SWeongyo Jeong 
181*18ec6525SWeongyo Jeong static void
182*18ec6525SWeongyo Jeong usbpf_buffer_alloc(struct usbpf_d *ud)
183*18ec6525SWeongyo Jeong {
184*18ec6525SWeongyo Jeong 
185*18ec6525SWeongyo Jeong 	KASSERT(ud->ud_fbuf == NULL, ("%s: ud_fbuf != NULL", __func__));
186*18ec6525SWeongyo Jeong 	KASSERT(ud->ud_sbuf == NULL, ("%s: ud_sbuf != NULL", __func__));
187*18ec6525SWeongyo Jeong 	KASSERT(ud->ud_hbuf == NULL, ("%s: ud_hbuf != NULL", __func__));
188*18ec6525SWeongyo Jeong 
189*18ec6525SWeongyo Jeong 	ud->ud_fbuf = (caddr_t)malloc(ud->ud_bufsize, M_USBPF, M_WAITOK);
190*18ec6525SWeongyo Jeong 	ud->ud_sbuf = (caddr_t)malloc(ud->ud_bufsize, M_USBPF, M_WAITOK);
191*18ec6525SWeongyo Jeong 	ud->ud_hbuf = NULL;
192*18ec6525SWeongyo Jeong 	ud->ud_slen = 0;
193*18ec6525SWeongyo Jeong 	ud->ud_hlen = 0;
194*18ec6525SWeongyo Jeong }
195*18ec6525SWeongyo Jeong 
196*18ec6525SWeongyo Jeong /*
197*18ec6525SWeongyo Jeong  * Copy buffer storage to user space in read().
198*18ec6525SWeongyo Jeong  */
199*18ec6525SWeongyo Jeong static int
200*18ec6525SWeongyo Jeong usbpf_buffer_uiomove(struct usbpf_d *ud, caddr_t buf, u_int len,
201*18ec6525SWeongyo Jeong     struct uio *uio)
202*18ec6525SWeongyo Jeong {
203*18ec6525SWeongyo Jeong 
204*18ec6525SWeongyo Jeong 	return (uiomove(buf, len, uio));
205*18ec6525SWeongyo Jeong }
206*18ec6525SWeongyo Jeong 
207*18ec6525SWeongyo Jeong /*
208*18ec6525SWeongyo Jeong  * Simple data copy to the current kernel buffer.
209*18ec6525SWeongyo Jeong  */
210*18ec6525SWeongyo Jeong static void
211*18ec6525SWeongyo Jeong usbpf_buffer_append_bytes(struct usbpf_d *ud, caddr_t buf, u_int offset,
212*18ec6525SWeongyo Jeong     void *src, u_int len)
213*18ec6525SWeongyo Jeong {
214*18ec6525SWeongyo Jeong 	u_char *src_bytes;
215*18ec6525SWeongyo Jeong 
216*18ec6525SWeongyo Jeong 	src_bytes = (u_char *)src;
217*18ec6525SWeongyo Jeong 	bcopy(src_bytes, buf + offset, len);
218*18ec6525SWeongyo Jeong }
219*18ec6525SWeongyo Jeong 
220*18ec6525SWeongyo Jeong /*
221*18ec6525SWeongyo Jeong  * Allocate or resize buffers.
222*18ec6525SWeongyo Jeong  */
223*18ec6525SWeongyo Jeong static int
224*18ec6525SWeongyo Jeong usbpf_buffer_ioctl_sblen(struct usbpf_d *ud, u_int *i)
225*18ec6525SWeongyo Jeong {
226*18ec6525SWeongyo Jeong 	u_int size;
227*18ec6525SWeongyo Jeong 
228*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
229*18ec6525SWeongyo Jeong 	if (ud->ud_bif != NULL) {
230*18ec6525SWeongyo Jeong 		USBPFD_UNLOCK(ud);
231*18ec6525SWeongyo Jeong 		return (EINVAL);
232*18ec6525SWeongyo Jeong 	}
233*18ec6525SWeongyo Jeong 	size = *i;
234*18ec6525SWeongyo Jeong 	if (size > usbpf_maxbufsize)
235*18ec6525SWeongyo Jeong 		*i = size = usbpf_maxbufsize;
236*18ec6525SWeongyo Jeong 	else if (size < USBPF_MINBUFSIZE)
237*18ec6525SWeongyo Jeong 		*i = size = USBPF_MINBUFSIZE;
238*18ec6525SWeongyo Jeong 	ud->ud_bufsize = size;
239*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
240*18ec6525SWeongyo Jeong 	return (0);
241*18ec6525SWeongyo Jeong }
242*18ec6525SWeongyo Jeong 
243*18ec6525SWeongyo Jeong static const u_short	usbpf_code_map[] = {
244*18ec6525SWeongyo Jeong 	0x10ff,	/* 0x00-0x0f: 1111111100001000 */
245*18ec6525SWeongyo Jeong 	0x3070,	/* 0x10-0x1f: 0000111000001100 */
246*18ec6525SWeongyo Jeong 	0x3131,	/* 0x20-0x2f: 1000110010001100 */
247*18ec6525SWeongyo Jeong 	0x3031,	/* 0x30-0x3f: 1000110000001100 */
248*18ec6525SWeongyo Jeong 	0x3131,	/* 0x40-0x4f: 1000110010001100 */
249*18ec6525SWeongyo Jeong 	0x1011,	/* 0x50-0x5f: 1000100000001000 */
250*18ec6525SWeongyo Jeong 	0x1013,	/* 0x60-0x6f: 1100100000001000 */
251*18ec6525SWeongyo Jeong 	0x1010,	/* 0x70-0x7f: 0000100000001000 */
252*18ec6525SWeongyo Jeong 	0x0093,	/* 0x80-0x8f: 1100100100000000 */
253*18ec6525SWeongyo Jeong 	0x0000,	/* 0x90-0x9f: 0000000000000000 */
254*18ec6525SWeongyo Jeong 	0x0000,	/* 0xa0-0xaf: 0000000000000000 */
255*18ec6525SWeongyo Jeong 	0x0002,	/* 0xb0-0xbf: 0100000000000000 */
256*18ec6525SWeongyo Jeong 	0x0000,	/* 0xc0-0xcf: 0000000000000000 */
257*18ec6525SWeongyo Jeong 	0x0000,	/* 0xd0-0xdf: 0000000000000000 */
258*18ec6525SWeongyo Jeong 	0x0000,	/* 0xe0-0xef: 0000000000000000 */
259*18ec6525SWeongyo Jeong 	0x0000	/* 0xf0-0xff: 0000000000000000 */
260*18ec6525SWeongyo Jeong };
261*18ec6525SWeongyo Jeong 
262*18ec6525SWeongyo Jeong #define	USBPF_VALIDATE_CODE(c)	\
263*18ec6525SWeongyo Jeong     ((c) <= 0xff && (usbpf_code_map[(c) >> 4] & (1 << ((c) & 0xf))) != 0)
264*18ec6525SWeongyo Jeong 
265*18ec6525SWeongyo Jeong /*
266*18ec6525SWeongyo Jeong  * Return true if the 'fcode' is a valid filter program.
267*18ec6525SWeongyo Jeong  * The constraints are that each jump be forward and to a valid
268*18ec6525SWeongyo Jeong  * code.  The code must terminate with either an accept or reject.
269*18ec6525SWeongyo Jeong  *
270*18ec6525SWeongyo Jeong  * The kernel needs to be able to verify an application's filter code.
271*18ec6525SWeongyo Jeong  * Otherwise, a bogus program could easily crash the system.
272*18ec6525SWeongyo Jeong  */
273*18ec6525SWeongyo Jeong static int
274*18ec6525SWeongyo Jeong usbpf_validate(const struct usbpf_insn *f, int len)
275*18ec6525SWeongyo Jeong {
276*18ec6525SWeongyo Jeong 	register int i;
277*18ec6525SWeongyo Jeong 	register const struct usbpf_insn *p;
278*18ec6525SWeongyo Jeong 
279*18ec6525SWeongyo Jeong 	/* Do not accept negative length filter. */
280*18ec6525SWeongyo Jeong 	if (len < 0)
281*18ec6525SWeongyo Jeong 		return (0);
282*18ec6525SWeongyo Jeong 
283*18ec6525SWeongyo Jeong 	/* An empty filter means accept all. */
284*18ec6525SWeongyo Jeong 	if (len == 0)
285*18ec6525SWeongyo Jeong 		return (1);
286*18ec6525SWeongyo Jeong 
287*18ec6525SWeongyo Jeong 	for (i = 0; i < len; ++i) {
288*18ec6525SWeongyo Jeong 		p = &f[i];
289*18ec6525SWeongyo Jeong 		/*
290*18ec6525SWeongyo Jeong 		 * Check that the code is valid.
291*18ec6525SWeongyo Jeong 		 */
292*18ec6525SWeongyo Jeong 		if (!USBPF_VALIDATE_CODE(p->code))
293*18ec6525SWeongyo Jeong 			return (0);
294*18ec6525SWeongyo Jeong 		/*
295*18ec6525SWeongyo Jeong 		 * Check that that jumps are forward, and within
296*18ec6525SWeongyo Jeong 		 * the code block.
297*18ec6525SWeongyo Jeong 		 */
298*18ec6525SWeongyo Jeong 		if (USBPF_CLASS(p->code) == USBPF_JMP) {
299*18ec6525SWeongyo Jeong 			register u_int offset;
300*18ec6525SWeongyo Jeong 
301*18ec6525SWeongyo Jeong 			if (p->code == (USBPF_JMP|USBPF_JA))
302*18ec6525SWeongyo Jeong 				offset = p->k;
303*18ec6525SWeongyo Jeong 			else
304*18ec6525SWeongyo Jeong 				offset = p->jt > p->jf ? p->jt : p->jf;
305*18ec6525SWeongyo Jeong 			if (offset >= (u_int)(len - i) - 1)
306*18ec6525SWeongyo Jeong 				return (0);
307*18ec6525SWeongyo Jeong 			continue;
308*18ec6525SWeongyo Jeong 		}
309*18ec6525SWeongyo Jeong 		/*
310*18ec6525SWeongyo Jeong 		 * Check that memory operations use valid addresses.
311*18ec6525SWeongyo Jeong 		 */
312*18ec6525SWeongyo Jeong 		if (p->code == USBPF_ST || p->code == USBPF_STX ||
313*18ec6525SWeongyo Jeong 		    p->code == (USBPF_LD|USBPF_MEM) ||
314*18ec6525SWeongyo Jeong 		    p->code == (USBPF_LDX|USBPF_MEM)) {
315*18ec6525SWeongyo Jeong 			if (p->k >= USBPF_MEMWORDS)
316*18ec6525SWeongyo Jeong 				return (0);
317*18ec6525SWeongyo Jeong 			continue;
318*18ec6525SWeongyo Jeong 		}
319*18ec6525SWeongyo Jeong 		/*
320*18ec6525SWeongyo Jeong 		 * Check for constant division by 0.
321*18ec6525SWeongyo Jeong 		 */
322*18ec6525SWeongyo Jeong 		if (p->code == (USBPF_ALU|USBPF_DIV|USBPF_K) && p->k == 0)
323*18ec6525SWeongyo Jeong 			return (0);
324*18ec6525SWeongyo Jeong 	}
325*18ec6525SWeongyo Jeong 	return (USBPF_CLASS(f[len - 1].code) == USBPF_RET);
326*18ec6525SWeongyo Jeong }
327*18ec6525SWeongyo Jeong 
328*18ec6525SWeongyo Jeong #ifdef _KERNEL
329*18ec6525SWeongyo Jeong #define	MINDEX(m, k) \
330*18ec6525SWeongyo Jeong { \
331*18ec6525SWeongyo Jeong 	register int len = m->m_len; \
332*18ec6525SWeongyo Jeong  \
333*18ec6525SWeongyo Jeong 	while (k >= len) { \
334*18ec6525SWeongyo Jeong 		k -= len; \
335*18ec6525SWeongyo Jeong 		m = m->m_next; \
336*18ec6525SWeongyo Jeong 		if (m == 0) \
337*18ec6525SWeongyo Jeong 			return (0); \
338*18ec6525SWeongyo Jeong 		len = m->m_len; \
339*18ec6525SWeongyo Jeong 	} \
340*18ec6525SWeongyo Jeong }
341*18ec6525SWeongyo Jeong 
342*18ec6525SWeongyo Jeong static u_int16_t	m_xhalf(struct mbuf *m, usbpf_u_int32 k, int *err);
343*18ec6525SWeongyo Jeong static u_int32_t	m_xword(struct mbuf *m, usbpf_u_int32 k, int *err);
344*18ec6525SWeongyo Jeong 
345*18ec6525SWeongyo Jeong static u_int32_t
346*18ec6525SWeongyo Jeong m_xword(struct mbuf *m, usbpf_u_int32 k, int *err)
347*18ec6525SWeongyo Jeong {
348*18ec6525SWeongyo Jeong 	size_t len;
349*18ec6525SWeongyo Jeong 	u_char *cp, *np;
350*18ec6525SWeongyo Jeong 	struct mbuf *m0;
351*18ec6525SWeongyo Jeong 
352*18ec6525SWeongyo Jeong 	len = m->m_len;
353*18ec6525SWeongyo Jeong 	while (k >= len) {
354*18ec6525SWeongyo Jeong 		k -= len;
355*18ec6525SWeongyo Jeong 		m = m->m_next;
356*18ec6525SWeongyo Jeong 		if (m == 0)
357*18ec6525SWeongyo Jeong 			goto bad;
358*18ec6525SWeongyo Jeong 		len = m->m_len;
359*18ec6525SWeongyo Jeong 	}
360*18ec6525SWeongyo Jeong 	cp = mtod(m, u_char *) + k;
361*18ec6525SWeongyo Jeong 	if (len - k >= 4) {
362*18ec6525SWeongyo Jeong 		*err = 0;
363*18ec6525SWeongyo Jeong 		return (USBPF_EXTRACT_LONG(cp));
364*18ec6525SWeongyo Jeong 	}
365*18ec6525SWeongyo Jeong 	m0 = m->m_next;
366*18ec6525SWeongyo Jeong 	if (m0 == 0 || m0->m_len + len - k < 4)
367*18ec6525SWeongyo Jeong 		goto bad;
368*18ec6525SWeongyo Jeong 	*err = 0;
369*18ec6525SWeongyo Jeong 	np = mtod(m0, u_char *);
370*18ec6525SWeongyo Jeong 	switch (len - k) {
371*18ec6525SWeongyo Jeong 	case 1:
372*18ec6525SWeongyo Jeong 		return (((u_int32_t)cp[0] << 24) |
373*18ec6525SWeongyo Jeong 		    ((u_int32_t)np[0] << 16) |
374*18ec6525SWeongyo Jeong 		    ((u_int32_t)np[1] << 8)  |
375*18ec6525SWeongyo Jeong 		    (u_int32_t)np[2]);
376*18ec6525SWeongyo Jeong 
377*18ec6525SWeongyo Jeong 	case 2:
378*18ec6525SWeongyo Jeong 		return (((u_int32_t)cp[0] << 24) |
379*18ec6525SWeongyo Jeong 		    ((u_int32_t)cp[1] << 16) |
380*18ec6525SWeongyo Jeong 		    ((u_int32_t)np[0] << 8) |
381*18ec6525SWeongyo Jeong 		    (u_int32_t)np[1]);
382*18ec6525SWeongyo Jeong 
383*18ec6525SWeongyo Jeong 	default:
384*18ec6525SWeongyo Jeong 		return (((u_int32_t)cp[0] << 24) |
385*18ec6525SWeongyo Jeong 		    ((u_int32_t)cp[1] << 16) |
386*18ec6525SWeongyo Jeong 		    ((u_int32_t)cp[2] << 8) |
387*18ec6525SWeongyo Jeong 		    (u_int32_t)np[0]);
388*18ec6525SWeongyo Jeong 	}
389*18ec6525SWeongyo Jeong     bad:
390*18ec6525SWeongyo Jeong 	*err = 1;
391*18ec6525SWeongyo Jeong 	return (0);
392*18ec6525SWeongyo Jeong }
393*18ec6525SWeongyo Jeong 
394*18ec6525SWeongyo Jeong static u_int16_t
395*18ec6525SWeongyo Jeong m_xhalf(struct mbuf *m, usbpf_u_int32 k, int *err)
396*18ec6525SWeongyo Jeong {
397*18ec6525SWeongyo Jeong 	size_t len;
398*18ec6525SWeongyo Jeong 	u_char *cp;
399*18ec6525SWeongyo Jeong 	struct mbuf *m0;
400*18ec6525SWeongyo Jeong 
401*18ec6525SWeongyo Jeong 	len = m->m_len;
402*18ec6525SWeongyo Jeong 	while (k >= len) {
403*18ec6525SWeongyo Jeong 		k -= len;
404*18ec6525SWeongyo Jeong 		m = m->m_next;
405*18ec6525SWeongyo Jeong 		if (m == 0)
406*18ec6525SWeongyo Jeong 			goto bad;
407*18ec6525SWeongyo Jeong 		len = m->m_len;
408*18ec6525SWeongyo Jeong 	}
409*18ec6525SWeongyo Jeong 	cp = mtod(m, u_char *) + k;
410*18ec6525SWeongyo Jeong 	if (len - k >= 2) {
411*18ec6525SWeongyo Jeong 		*err = 0;
412*18ec6525SWeongyo Jeong 		return (USBPF_EXTRACT_SHORT(cp));
413*18ec6525SWeongyo Jeong 	}
414*18ec6525SWeongyo Jeong 	m0 = m->m_next;
415*18ec6525SWeongyo Jeong 	if (m0 == 0)
416*18ec6525SWeongyo Jeong 		goto bad;
417*18ec6525SWeongyo Jeong 	*err = 0;
418*18ec6525SWeongyo Jeong 	return ((cp[0] << 8) | mtod(m0, u_char *)[0]);
419*18ec6525SWeongyo Jeong  bad:
420*18ec6525SWeongyo Jeong 	*err = 1;
421*18ec6525SWeongyo Jeong 	return (0);
422*18ec6525SWeongyo Jeong }
423*18ec6525SWeongyo Jeong #endif
424*18ec6525SWeongyo Jeong 
425*18ec6525SWeongyo Jeong /*
426*18ec6525SWeongyo Jeong  * Execute the filter program starting at pc on the packet p
427*18ec6525SWeongyo Jeong  * wirelen is the length of the original packet
428*18ec6525SWeongyo Jeong  * buflen is the amount of data present
429*18ec6525SWeongyo Jeong  */
430*18ec6525SWeongyo Jeong static u_int
431*18ec6525SWeongyo Jeong usbpf_filter(const struct usbpf_insn *pc, u_char *p, u_int wirelen,
432*18ec6525SWeongyo Jeong     u_int buflen)
433*18ec6525SWeongyo Jeong {
434*18ec6525SWeongyo Jeong 	u_int32_t A = 0, X = 0;
435*18ec6525SWeongyo Jeong 	usbpf_u_int32 k;
436*18ec6525SWeongyo Jeong 	u_int32_t mem[USBPF_MEMWORDS];
437*18ec6525SWeongyo Jeong 
438*18ec6525SWeongyo Jeong 	/*
439*18ec6525SWeongyo Jeong 	 * XXX temporarily the filter system is disabled because currently it
440*18ec6525SWeongyo Jeong 	 * could not handle the some machine code properly that leads to
441*18ec6525SWeongyo Jeong 	 * kernel crash by invalid usage.
442*18ec6525SWeongyo Jeong 	 */
443*18ec6525SWeongyo Jeong 	return ((u_int)-1);
444*18ec6525SWeongyo Jeong 
445*18ec6525SWeongyo Jeong 	if (pc == NULL)
446*18ec6525SWeongyo Jeong 		/*
447*18ec6525SWeongyo Jeong 		 * No filter means accept all.
448*18ec6525SWeongyo Jeong 		 */
449*18ec6525SWeongyo Jeong 		return ((u_int)-1);
450*18ec6525SWeongyo Jeong 
451*18ec6525SWeongyo Jeong 	--pc;
452*18ec6525SWeongyo Jeong 	while (1) {
453*18ec6525SWeongyo Jeong 		++pc;
454*18ec6525SWeongyo Jeong 		switch (pc->code) {
455*18ec6525SWeongyo Jeong 		default:
456*18ec6525SWeongyo Jeong #ifdef _KERNEL
457*18ec6525SWeongyo Jeong 			return (0);
458*18ec6525SWeongyo Jeong #else
459*18ec6525SWeongyo Jeong 			abort();
460*18ec6525SWeongyo Jeong #endif
461*18ec6525SWeongyo Jeong 
462*18ec6525SWeongyo Jeong 		case USBPF_RET|USBPF_K:
463*18ec6525SWeongyo Jeong 			return ((u_int)pc->k);
464*18ec6525SWeongyo Jeong 
465*18ec6525SWeongyo Jeong 		case USBPF_RET|USBPF_A:
466*18ec6525SWeongyo Jeong 			return ((u_int)A);
467*18ec6525SWeongyo Jeong 
468*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_W|USBPF_ABS:
469*18ec6525SWeongyo Jeong 			k = pc->k;
470*18ec6525SWeongyo Jeong 			if (k > buflen || sizeof(int32_t) > buflen - k) {
471*18ec6525SWeongyo Jeong #ifdef _KERNEL
472*18ec6525SWeongyo Jeong 				int merr;
473*18ec6525SWeongyo Jeong 
474*18ec6525SWeongyo Jeong 				if (buflen != 0)
475*18ec6525SWeongyo Jeong 					return (0);
476*18ec6525SWeongyo Jeong 				A = m_xword((struct mbuf *)p, k, &merr);
477*18ec6525SWeongyo Jeong 				if (merr != 0)
478*18ec6525SWeongyo Jeong 					return (0);
479*18ec6525SWeongyo Jeong 				continue;
480*18ec6525SWeongyo Jeong #else
481*18ec6525SWeongyo Jeong 				return (0);
482*18ec6525SWeongyo Jeong #endif
483*18ec6525SWeongyo Jeong 			}
484*18ec6525SWeongyo Jeong #ifdef USBPF_ALIGN
485*18ec6525SWeongyo Jeong 			if (((intptr_t)(p + k) & 3) != 0)
486*18ec6525SWeongyo Jeong 				A = USBPF_EXTRACT_LONG(&p[k]);
487*18ec6525SWeongyo Jeong 			else
488*18ec6525SWeongyo Jeong #endif
489*18ec6525SWeongyo Jeong 				A = ntohl(*(int32_t *)(p + k));
490*18ec6525SWeongyo Jeong 			continue;
491*18ec6525SWeongyo Jeong 
492*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_H|USBPF_ABS:
493*18ec6525SWeongyo Jeong 			k = pc->k;
494*18ec6525SWeongyo Jeong 			if (k > buflen || sizeof(int16_t) > buflen - k) {
495*18ec6525SWeongyo Jeong #ifdef _KERNEL
496*18ec6525SWeongyo Jeong 				int merr;
497*18ec6525SWeongyo Jeong 
498*18ec6525SWeongyo Jeong 				if (buflen != 0)
499*18ec6525SWeongyo Jeong 					return (0);
500*18ec6525SWeongyo Jeong 				A = m_xhalf((struct mbuf *)p, k, &merr);
501*18ec6525SWeongyo Jeong 				continue;
502*18ec6525SWeongyo Jeong #else
503*18ec6525SWeongyo Jeong 				return (0);
504*18ec6525SWeongyo Jeong #endif
505*18ec6525SWeongyo Jeong 			}
506*18ec6525SWeongyo Jeong 			A = USBPF_EXTRACT_SHORT(&p[k]);
507*18ec6525SWeongyo Jeong 			continue;
508*18ec6525SWeongyo Jeong 
509*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_B|USBPF_ABS:
510*18ec6525SWeongyo Jeong 			k = pc->k;
511*18ec6525SWeongyo Jeong 			if (k >= buflen) {
512*18ec6525SWeongyo Jeong #ifdef _KERNEL
513*18ec6525SWeongyo Jeong 				struct mbuf *m;
514*18ec6525SWeongyo Jeong 
515*18ec6525SWeongyo Jeong 				if (buflen != 0)
516*18ec6525SWeongyo Jeong 					return (0);
517*18ec6525SWeongyo Jeong 				m = (struct mbuf *)p;
518*18ec6525SWeongyo Jeong 				MINDEX(m, k);
519*18ec6525SWeongyo Jeong 				A = mtod(m, u_char *)[k];
520*18ec6525SWeongyo Jeong 				continue;
521*18ec6525SWeongyo Jeong #else
522*18ec6525SWeongyo Jeong 				return (0);
523*18ec6525SWeongyo Jeong #endif
524*18ec6525SWeongyo Jeong 			}
525*18ec6525SWeongyo Jeong 			A = p[k];
526*18ec6525SWeongyo Jeong 			continue;
527*18ec6525SWeongyo Jeong 
528*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_W|USBPF_LEN:
529*18ec6525SWeongyo Jeong 			A = wirelen;
530*18ec6525SWeongyo Jeong 			continue;
531*18ec6525SWeongyo Jeong 
532*18ec6525SWeongyo Jeong 		case USBPF_LDX|USBPF_W|USBPF_LEN:
533*18ec6525SWeongyo Jeong 			X = wirelen;
534*18ec6525SWeongyo Jeong 			continue;
535*18ec6525SWeongyo Jeong 
536*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_W|USBPF_IND:
537*18ec6525SWeongyo Jeong 			k = X + pc->k;
538*18ec6525SWeongyo Jeong 			if (pc->k > buflen || X > buflen - pc->k ||
539*18ec6525SWeongyo Jeong 			    sizeof(int32_t) > buflen - k) {
540*18ec6525SWeongyo Jeong #ifdef _KERNEL
541*18ec6525SWeongyo Jeong 				int merr;
542*18ec6525SWeongyo Jeong 
543*18ec6525SWeongyo Jeong 				if (buflen != 0)
544*18ec6525SWeongyo Jeong 					return (0);
545*18ec6525SWeongyo Jeong 				A = m_xword((struct mbuf *)p, k, &merr);
546*18ec6525SWeongyo Jeong 				if (merr != 0)
547*18ec6525SWeongyo Jeong 					return (0);
548*18ec6525SWeongyo Jeong 				continue;
549*18ec6525SWeongyo Jeong #else
550*18ec6525SWeongyo Jeong 				return (0);
551*18ec6525SWeongyo Jeong #endif
552*18ec6525SWeongyo Jeong 			}
553*18ec6525SWeongyo Jeong #ifdef USBPF_ALIGN
554*18ec6525SWeongyo Jeong 			if (((intptr_t)(p + k) & 3) != 0)
555*18ec6525SWeongyo Jeong 				A = USBPF_EXTRACT_LONG(&p[k]);
556*18ec6525SWeongyo Jeong 			else
557*18ec6525SWeongyo Jeong #endif
558*18ec6525SWeongyo Jeong 				A = ntohl(*(int32_t *)(p + k));
559*18ec6525SWeongyo Jeong 			continue;
560*18ec6525SWeongyo Jeong 
561*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_H|USBPF_IND:
562*18ec6525SWeongyo Jeong 			k = X + pc->k;
563*18ec6525SWeongyo Jeong 			if (X > buflen || pc->k > buflen - X ||
564*18ec6525SWeongyo Jeong 			    sizeof(int16_t) > buflen - k) {
565*18ec6525SWeongyo Jeong #ifdef _KERNEL
566*18ec6525SWeongyo Jeong 				int merr;
567*18ec6525SWeongyo Jeong 
568*18ec6525SWeongyo Jeong 				if (buflen != 0)
569*18ec6525SWeongyo Jeong 					return (0);
570*18ec6525SWeongyo Jeong 				A = m_xhalf((struct mbuf *)p, k, &merr);
571*18ec6525SWeongyo Jeong 				if (merr != 0)
572*18ec6525SWeongyo Jeong 					return (0);
573*18ec6525SWeongyo Jeong 				continue;
574*18ec6525SWeongyo Jeong #else
575*18ec6525SWeongyo Jeong 				return (0);
576*18ec6525SWeongyo Jeong #endif
577*18ec6525SWeongyo Jeong 			}
578*18ec6525SWeongyo Jeong 			A = USBPF_EXTRACT_SHORT(&p[k]);
579*18ec6525SWeongyo Jeong 			continue;
580*18ec6525SWeongyo Jeong 
581*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_B|USBPF_IND:
582*18ec6525SWeongyo Jeong 			k = X + pc->k;
583*18ec6525SWeongyo Jeong 			if (pc->k >= buflen || X >= buflen - pc->k) {
584*18ec6525SWeongyo Jeong #ifdef _KERNEL
585*18ec6525SWeongyo Jeong 				struct mbuf *m;
586*18ec6525SWeongyo Jeong 
587*18ec6525SWeongyo Jeong 				if (buflen != 0)
588*18ec6525SWeongyo Jeong 					return (0);
589*18ec6525SWeongyo Jeong 				m = (struct mbuf *)p;
590*18ec6525SWeongyo Jeong 				MINDEX(m, k);
591*18ec6525SWeongyo Jeong 				A = mtod(m, u_char *)[k];
592*18ec6525SWeongyo Jeong 				continue;
593*18ec6525SWeongyo Jeong #else
594*18ec6525SWeongyo Jeong 				return (0);
595*18ec6525SWeongyo Jeong #endif
596*18ec6525SWeongyo Jeong 			}
597*18ec6525SWeongyo Jeong 			A = p[k];
598*18ec6525SWeongyo Jeong 			continue;
599*18ec6525SWeongyo Jeong 
600*18ec6525SWeongyo Jeong 		case USBPF_LDX|USBPF_MSH|USBPF_B:
601*18ec6525SWeongyo Jeong 			k = pc->k;
602*18ec6525SWeongyo Jeong 			if (k >= buflen) {
603*18ec6525SWeongyo Jeong #ifdef _KERNEL
604*18ec6525SWeongyo Jeong 				register struct mbuf *m;
605*18ec6525SWeongyo Jeong 
606*18ec6525SWeongyo Jeong 				if (buflen != 0)
607*18ec6525SWeongyo Jeong 					return (0);
608*18ec6525SWeongyo Jeong 				m = (struct mbuf *)p;
609*18ec6525SWeongyo Jeong 				MINDEX(m, k);
610*18ec6525SWeongyo Jeong 				X = (mtod(m, u_char *)[k] & 0xf) << 2;
611*18ec6525SWeongyo Jeong 				continue;
612*18ec6525SWeongyo Jeong #else
613*18ec6525SWeongyo Jeong 				return (0);
614*18ec6525SWeongyo Jeong #endif
615*18ec6525SWeongyo Jeong 			}
616*18ec6525SWeongyo Jeong 			X = (p[pc->k] & 0xf) << 2;
617*18ec6525SWeongyo Jeong 			continue;
618*18ec6525SWeongyo Jeong 
619*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_IMM:
620*18ec6525SWeongyo Jeong 			A = pc->k;
621*18ec6525SWeongyo Jeong 			continue;
622*18ec6525SWeongyo Jeong 
623*18ec6525SWeongyo Jeong 		case USBPF_LDX|USBPF_IMM:
624*18ec6525SWeongyo Jeong 			X = pc->k;
625*18ec6525SWeongyo Jeong 			continue;
626*18ec6525SWeongyo Jeong 
627*18ec6525SWeongyo Jeong 		case USBPF_LD|USBPF_MEM:
628*18ec6525SWeongyo Jeong 			A = mem[pc->k];
629*18ec6525SWeongyo Jeong 			continue;
630*18ec6525SWeongyo Jeong 
631*18ec6525SWeongyo Jeong 		case USBPF_LDX|USBPF_MEM:
632*18ec6525SWeongyo Jeong 			X = mem[pc->k];
633*18ec6525SWeongyo Jeong 			continue;
634*18ec6525SWeongyo Jeong 
635*18ec6525SWeongyo Jeong 		case USBPF_ST:
636*18ec6525SWeongyo Jeong 			mem[pc->k] = A;
637*18ec6525SWeongyo Jeong 			continue;
638*18ec6525SWeongyo Jeong 
639*18ec6525SWeongyo Jeong 		case USBPF_STX:
640*18ec6525SWeongyo Jeong 			mem[pc->k] = X;
641*18ec6525SWeongyo Jeong 			continue;
642*18ec6525SWeongyo Jeong 
643*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JA:
644*18ec6525SWeongyo Jeong 			pc += pc->k;
645*18ec6525SWeongyo Jeong 			continue;
646*18ec6525SWeongyo Jeong 
647*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JGT|USBPF_K:
648*18ec6525SWeongyo Jeong 			pc += (A > pc->k) ? pc->jt : pc->jf;
649*18ec6525SWeongyo Jeong 			continue;
650*18ec6525SWeongyo Jeong 
651*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JGE|USBPF_K:
652*18ec6525SWeongyo Jeong 			pc += (A >= pc->k) ? pc->jt : pc->jf;
653*18ec6525SWeongyo Jeong 			continue;
654*18ec6525SWeongyo Jeong 
655*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JEQ|USBPF_K:
656*18ec6525SWeongyo Jeong 			pc += (A == pc->k) ? pc->jt : pc->jf;
657*18ec6525SWeongyo Jeong 			continue;
658*18ec6525SWeongyo Jeong 
659*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JSET|USBPF_K:
660*18ec6525SWeongyo Jeong 			pc += (A & pc->k) ? pc->jt : pc->jf;
661*18ec6525SWeongyo Jeong 			continue;
662*18ec6525SWeongyo Jeong 
663*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JGT|USBPF_X:
664*18ec6525SWeongyo Jeong 			pc += (A > X) ? pc->jt : pc->jf;
665*18ec6525SWeongyo Jeong 			continue;
666*18ec6525SWeongyo Jeong 
667*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JGE|USBPF_X:
668*18ec6525SWeongyo Jeong 			pc += (A >= X) ? pc->jt : pc->jf;
669*18ec6525SWeongyo Jeong 			continue;
670*18ec6525SWeongyo Jeong 
671*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JEQ|USBPF_X:
672*18ec6525SWeongyo Jeong 			pc += (A == X) ? pc->jt : pc->jf;
673*18ec6525SWeongyo Jeong 			continue;
674*18ec6525SWeongyo Jeong 
675*18ec6525SWeongyo Jeong 		case USBPF_JMP|USBPF_JSET|USBPF_X:
676*18ec6525SWeongyo Jeong 			pc += (A & X) ? pc->jt : pc->jf;
677*18ec6525SWeongyo Jeong 			continue;
678*18ec6525SWeongyo Jeong 
679*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_ADD|USBPF_X:
680*18ec6525SWeongyo Jeong 			A += X;
681*18ec6525SWeongyo Jeong 			continue;
682*18ec6525SWeongyo Jeong 
683*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_SUB|USBPF_X:
684*18ec6525SWeongyo Jeong 			A -= X;
685*18ec6525SWeongyo Jeong 			continue;
686*18ec6525SWeongyo Jeong 
687*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_MUL|USBPF_X:
688*18ec6525SWeongyo Jeong 			A *= X;
689*18ec6525SWeongyo Jeong 			continue;
690*18ec6525SWeongyo Jeong 
691*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_DIV|USBPF_X:
692*18ec6525SWeongyo Jeong 			if (X == 0)
693*18ec6525SWeongyo Jeong 				return (0);
694*18ec6525SWeongyo Jeong 			A /= X;
695*18ec6525SWeongyo Jeong 			continue;
696*18ec6525SWeongyo Jeong 
697*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_AND|USBPF_X:
698*18ec6525SWeongyo Jeong 			A &= X;
699*18ec6525SWeongyo Jeong 			continue;
700*18ec6525SWeongyo Jeong 
701*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_OR|USBPF_X:
702*18ec6525SWeongyo Jeong 			A |= X;
703*18ec6525SWeongyo Jeong 			continue;
704*18ec6525SWeongyo Jeong 
705*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_LSH|USBPF_X:
706*18ec6525SWeongyo Jeong 			A <<= X;
707*18ec6525SWeongyo Jeong 			continue;
708*18ec6525SWeongyo Jeong 
709*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_RSH|USBPF_X:
710*18ec6525SWeongyo Jeong 			A >>= X;
711*18ec6525SWeongyo Jeong 			continue;
712*18ec6525SWeongyo Jeong 
713*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_ADD|USBPF_K:
714*18ec6525SWeongyo Jeong 			A += pc->k;
715*18ec6525SWeongyo Jeong 			continue;
716*18ec6525SWeongyo Jeong 
717*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_SUB|USBPF_K:
718*18ec6525SWeongyo Jeong 			A -= pc->k;
719*18ec6525SWeongyo Jeong 			continue;
720*18ec6525SWeongyo Jeong 
721*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_MUL|USBPF_K:
722*18ec6525SWeongyo Jeong 			A *= pc->k;
723*18ec6525SWeongyo Jeong 			continue;
724*18ec6525SWeongyo Jeong 
725*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_DIV|USBPF_K:
726*18ec6525SWeongyo Jeong 			A /= pc->k;
727*18ec6525SWeongyo Jeong 			continue;
728*18ec6525SWeongyo Jeong 
729*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_AND|USBPF_K:
730*18ec6525SWeongyo Jeong 			A &= pc->k;
731*18ec6525SWeongyo Jeong 			continue;
732*18ec6525SWeongyo Jeong 
733*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_OR|USBPF_K:
734*18ec6525SWeongyo Jeong 			A |= pc->k;
735*18ec6525SWeongyo Jeong 			continue;
736*18ec6525SWeongyo Jeong 
737*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_LSH|USBPF_K:
738*18ec6525SWeongyo Jeong 			A <<= pc->k;
739*18ec6525SWeongyo Jeong 			continue;
740*18ec6525SWeongyo Jeong 
741*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_RSH|USBPF_K:
742*18ec6525SWeongyo Jeong 			A >>= pc->k;
743*18ec6525SWeongyo Jeong 			continue;
744*18ec6525SWeongyo Jeong 
745*18ec6525SWeongyo Jeong 		case USBPF_ALU|USBPF_NEG:
746*18ec6525SWeongyo Jeong 			A = -A;
747*18ec6525SWeongyo Jeong 			continue;
748*18ec6525SWeongyo Jeong 
749*18ec6525SWeongyo Jeong 		case USBPF_MISC|USBPF_TAX:
750*18ec6525SWeongyo Jeong 			X = A;
751*18ec6525SWeongyo Jeong 			continue;
752*18ec6525SWeongyo Jeong 
753*18ec6525SWeongyo Jeong 		case USBPF_MISC|USBPF_TXA:
754*18ec6525SWeongyo Jeong 			A = X;
755*18ec6525SWeongyo Jeong 			continue;
756*18ec6525SWeongyo Jeong 		}
757*18ec6525SWeongyo Jeong 	}
758*18ec6525SWeongyo Jeong }
759*18ec6525SWeongyo Jeong 
760*18ec6525SWeongyo Jeong static void
761*18ec6525SWeongyo Jeong usbpf_free(struct usbpf_d *ud)
762*18ec6525SWeongyo Jeong {
763*18ec6525SWeongyo Jeong 
764*18ec6525SWeongyo Jeong 	switch (ud->ud_bufmode) {
765*18ec6525SWeongyo Jeong 	case USBPF_BUFMODE_BUFFER:
766*18ec6525SWeongyo Jeong 		return (usbpf_buffer_free(ud));
767*18ec6525SWeongyo Jeong 	default:
768*18ec6525SWeongyo Jeong 		panic("usbpf_buf_free");
769*18ec6525SWeongyo Jeong 	}
770*18ec6525SWeongyo Jeong }
771*18ec6525SWeongyo Jeong 
772*18ec6525SWeongyo Jeong /*
773*18ec6525SWeongyo Jeong  * Notify the buffer model that a buffer has moved into the hold position.
774*18ec6525SWeongyo Jeong  */
775*18ec6525SWeongyo Jeong static void
776*18ec6525SWeongyo Jeong usbpf_bufheld(struct usbpf_d *ud)
777*18ec6525SWeongyo Jeong {
778*18ec6525SWeongyo Jeong 
779*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
780*18ec6525SWeongyo Jeong }
781*18ec6525SWeongyo Jeong 
782*18ec6525SWeongyo Jeong /*
783*18ec6525SWeongyo Jeong  * Free buffers currently in use by a descriptor.
784*18ec6525SWeongyo Jeong  * Called on close.
785*18ec6525SWeongyo Jeong  */
786*18ec6525SWeongyo Jeong static void
787*18ec6525SWeongyo Jeong usbpf_freed(struct usbpf_d *ud)
788*18ec6525SWeongyo Jeong {
789*18ec6525SWeongyo Jeong 
790*18ec6525SWeongyo Jeong 	/*
791*18ec6525SWeongyo Jeong 	 * We don't need to lock out interrupts since this descriptor has
792*18ec6525SWeongyo Jeong 	 * been detached from its interface and it yet hasn't been marked
793*18ec6525SWeongyo Jeong 	 * free.
794*18ec6525SWeongyo Jeong 	 */
795*18ec6525SWeongyo Jeong 	usbpf_free(ud);
796*18ec6525SWeongyo Jeong 	if (ud->ud_rfilter != NULL)
797*18ec6525SWeongyo Jeong 		free((caddr_t)ud->ud_rfilter, M_USBPF);
798*18ec6525SWeongyo Jeong 	if (ud->ud_wfilter != NULL)
799*18ec6525SWeongyo Jeong 		free((caddr_t)ud->ud_wfilter, M_USBPF);
800*18ec6525SWeongyo Jeong 	mtx_destroy(&ud->ud_mtx);
801*18ec6525SWeongyo Jeong }
802*18ec6525SWeongyo Jeong 
803*18ec6525SWeongyo Jeong /*
804*18ec6525SWeongyo Jeong  * Close the descriptor by detaching it from its interface,
805*18ec6525SWeongyo Jeong  * deallocating its buffers, and marking it free.
806*18ec6525SWeongyo Jeong  */
807*18ec6525SWeongyo Jeong static void
808*18ec6525SWeongyo Jeong usbpf_dtor(void *data)
809*18ec6525SWeongyo Jeong {
810*18ec6525SWeongyo Jeong 	struct usbpf_d *ud = data;
811*18ec6525SWeongyo Jeong 
812*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
813*18ec6525SWeongyo Jeong 	if (ud->ud_state == USBPF_WAITING)
814*18ec6525SWeongyo Jeong 		callout_stop(&ud->ud_callout);
815*18ec6525SWeongyo Jeong 	ud->ud_state = USBPF_IDLE;
816*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
817*18ec6525SWeongyo Jeong 	funsetown(&ud->ud_sigio);
818*18ec6525SWeongyo Jeong 	mtx_lock(&usbpf_mtx);
819*18ec6525SWeongyo Jeong 	if (ud->ud_bif)
820*18ec6525SWeongyo Jeong 		usbpf_detachd(ud);
821*18ec6525SWeongyo Jeong 	mtx_unlock(&usbpf_mtx);
822*18ec6525SWeongyo Jeong 	selwakeuppri(&ud->ud_sel, PRIUSB);
823*18ec6525SWeongyo Jeong 	knlist_destroy(&ud->ud_sel.si_note);
824*18ec6525SWeongyo Jeong 	callout_drain(&ud->ud_callout);
825*18ec6525SWeongyo Jeong 	usbpf_freed(ud);
826*18ec6525SWeongyo Jeong 	free(ud, M_USBPF);
827*18ec6525SWeongyo Jeong }
828*18ec6525SWeongyo Jeong 
829*18ec6525SWeongyo Jeong /*
830*18ec6525SWeongyo Jeong  * Open device.  Returns ENXIO for illegal minor device number,
831*18ec6525SWeongyo Jeong  * EBUSY if file is open by another process.
832*18ec6525SWeongyo Jeong  */
833*18ec6525SWeongyo Jeong /* ARGSUSED */
834*18ec6525SWeongyo Jeong static	int
835*18ec6525SWeongyo Jeong usbpf_open(struct cdev *dev, int flags, int fmt, struct thread *td)
836*18ec6525SWeongyo Jeong {
837*18ec6525SWeongyo Jeong 	struct usbpf_d *ud;
838*18ec6525SWeongyo Jeong 	int error;
839*18ec6525SWeongyo Jeong 
840*18ec6525SWeongyo Jeong 	ud = malloc(sizeof(*ud), M_USBPF, M_WAITOK | M_ZERO);
841*18ec6525SWeongyo Jeong 	error = devfs_set_cdevpriv(ud, usbpf_dtor);
842*18ec6525SWeongyo Jeong 	if (error != 0) {
843*18ec6525SWeongyo Jeong 		free(ud, M_USBPF);
844*18ec6525SWeongyo Jeong 		return (error);
845*18ec6525SWeongyo Jeong 	}
846*18ec6525SWeongyo Jeong 
847*18ec6525SWeongyo Jeong 	usbpf_buffer_init(ud);
848*18ec6525SWeongyo Jeong 	ud->ud_bufmode = USBPF_BUFMODE_BUFFER;
849*18ec6525SWeongyo Jeong 	ud->ud_sig = SIGIO;
850*18ec6525SWeongyo Jeong 	ud->ud_direction = USBPF_D_INOUT;
851*18ec6525SWeongyo Jeong 	ud->ud_pid = td->td_proc->p_pid;
852*18ec6525SWeongyo Jeong 	mtx_init(&ud->ud_mtx, devtoname(dev), "usbpf cdev lock", MTX_DEF);
853*18ec6525SWeongyo Jeong 	callout_init_mtx(&ud->ud_callout, &ud->ud_mtx, 0);
854*18ec6525SWeongyo Jeong 	knlist_init_mtx(&ud->ud_sel.si_note, &ud->ud_mtx);
855*18ec6525SWeongyo Jeong 
856*18ec6525SWeongyo Jeong 	return (0);
857*18ec6525SWeongyo Jeong }
858*18ec6525SWeongyo Jeong 
859*18ec6525SWeongyo Jeong static int
860*18ec6525SWeongyo Jeong usbpf_uiomove(struct usbpf_d *ud, caddr_t buf, u_int len, struct uio *uio)
861*18ec6525SWeongyo Jeong {
862*18ec6525SWeongyo Jeong 
863*18ec6525SWeongyo Jeong 	if (ud->ud_bufmode != USBPF_BUFMODE_BUFFER)
864*18ec6525SWeongyo Jeong 		return (EOPNOTSUPP);
865*18ec6525SWeongyo Jeong 	return (usbpf_buffer_uiomove(ud, buf, len, uio));
866*18ec6525SWeongyo Jeong }
867*18ec6525SWeongyo Jeong 
868*18ec6525SWeongyo Jeong /*
869*18ec6525SWeongyo Jeong  *  usbpf_read - read next chunk of packets from buffers
870*18ec6525SWeongyo Jeong  */
871*18ec6525SWeongyo Jeong static	int
872*18ec6525SWeongyo Jeong usbpf_read(struct cdev *dev, struct uio *uio, int ioflag)
873*18ec6525SWeongyo Jeong {
874*18ec6525SWeongyo Jeong 	struct usbpf_d *ud;
875*18ec6525SWeongyo Jeong 	int error;
876*18ec6525SWeongyo Jeong 	int non_block;
877*18ec6525SWeongyo Jeong 	int timed_out;
878*18ec6525SWeongyo Jeong 
879*18ec6525SWeongyo Jeong 	error = devfs_get_cdevpriv((void **)&ud);
880*18ec6525SWeongyo Jeong 	if (error != 0)
881*18ec6525SWeongyo Jeong 		return (error);
882*18ec6525SWeongyo Jeong 
883*18ec6525SWeongyo Jeong 	/*
884*18ec6525SWeongyo Jeong 	 * Restrict application to use a buffer the same size as
885*18ec6525SWeongyo Jeong 	 * as kernel buffers.
886*18ec6525SWeongyo Jeong 	 */
887*18ec6525SWeongyo Jeong 	if (uio->uio_resid != ud->ud_bufsize)
888*18ec6525SWeongyo Jeong 		return (EINVAL);
889*18ec6525SWeongyo Jeong 
890*18ec6525SWeongyo Jeong 	non_block = ((ioflag & O_NONBLOCK) != 0);
891*18ec6525SWeongyo Jeong 
892*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
893*18ec6525SWeongyo Jeong 	ud->ud_pid = curthread->td_proc->p_pid;
894*18ec6525SWeongyo Jeong 	if (ud->ud_bufmode != USBPF_BUFMODE_BUFFER) {
895*18ec6525SWeongyo Jeong 		USBPFD_UNLOCK(ud);
896*18ec6525SWeongyo Jeong 		return (EOPNOTSUPP);
897*18ec6525SWeongyo Jeong 	}
898*18ec6525SWeongyo Jeong 	if (ud->ud_state == USBPF_WAITING)
899*18ec6525SWeongyo Jeong 		callout_stop(&ud->ud_callout);
900*18ec6525SWeongyo Jeong 	timed_out = (ud->ud_state == USBPF_TIMED_OUT);
901*18ec6525SWeongyo Jeong 	ud->ud_state = USBPF_IDLE;
902*18ec6525SWeongyo Jeong 	/*
903*18ec6525SWeongyo Jeong 	 * If the hold buffer is empty, then do a timed sleep, which
904*18ec6525SWeongyo Jeong 	 * ends when the timeout expires or when enough packets
905*18ec6525SWeongyo Jeong 	 * have arrived to fill the store buffer.
906*18ec6525SWeongyo Jeong 	 */
907*18ec6525SWeongyo Jeong 	while (ud->ud_hbuf == NULL) {
908*18ec6525SWeongyo Jeong 		if (ud->ud_slen != 0) {
909*18ec6525SWeongyo Jeong 			/*
910*18ec6525SWeongyo Jeong 			 * A packet(s) either arrived since the previous
911*18ec6525SWeongyo Jeong 			 * read or arrived while we were asleep.
912*18ec6525SWeongyo Jeong 			 */
913*18ec6525SWeongyo Jeong 			if (ud->ud_immediate || non_block || timed_out) {
914*18ec6525SWeongyo Jeong 				/*
915*18ec6525SWeongyo Jeong 				 * Rotate the buffers and return what's here
916*18ec6525SWeongyo Jeong 				 * if we are in immediate mode, non-blocking
917*18ec6525SWeongyo Jeong 				 * flag is set, or this descriptor timed out.
918*18ec6525SWeongyo Jeong 				 */
919*18ec6525SWeongyo Jeong 				USBPF_ROTATE_BUFFERS(ud);
920*18ec6525SWeongyo Jeong 				break;
921*18ec6525SWeongyo Jeong 			}
922*18ec6525SWeongyo Jeong 		}
923*18ec6525SWeongyo Jeong 
924*18ec6525SWeongyo Jeong 		/*
925*18ec6525SWeongyo Jeong 		 * No data is available, check to see if the usbpf device
926*18ec6525SWeongyo Jeong 		 * is still pointed at a real interface.  If not, return
927*18ec6525SWeongyo Jeong 		 * ENXIO so that the userland process knows to rebind
928*18ec6525SWeongyo Jeong 		 * it before using it again.
929*18ec6525SWeongyo Jeong 		 */
930*18ec6525SWeongyo Jeong 		if (ud->ud_bif == NULL) {
931*18ec6525SWeongyo Jeong 			USBPFD_UNLOCK(ud);
932*18ec6525SWeongyo Jeong 			return (ENXIO);
933*18ec6525SWeongyo Jeong 		}
934*18ec6525SWeongyo Jeong 
935*18ec6525SWeongyo Jeong 		if (non_block) {
936*18ec6525SWeongyo Jeong 			USBPFD_UNLOCK(ud);
937*18ec6525SWeongyo Jeong 			return (EWOULDBLOCK);
938*18ec6525SWeongyo Jeong 		}
939*18ec6525SWeongyo Jeong 		error = msleep(ud, &ud->ud_mtx, PRIUSB|PCATCH,
940*18ec6525SWeongyo Jeong 		    "uff", ud->ud_rtout);
941*18ec6525SWeongyo Jeong 		if (error == EINTR || error == ERESTART) {
942*18ec6525SWeongyo Jeong 			USBPFD_UNLOCK(ud);
943*18ec6525SWeongyo Jeong 			return (error);
944*18ec6525SWeongyo Jeong 		}
945*18ec6525SWeongyo Jeong 		if (error == EWOULDBLOCK) {
946*18ec6525SWeongyo Jeong 			/*
947*18ec6525SWeongyo Jeong 			 * On a timeout, return what's in the buffer,
948*18ec6525SWeongyo Jeong 			 * which may be nothing.  If there is something
949*18ec6525SWeongyo Jeong 			 * in the store buffer, we can rotate the buffers.
950*18ec6525SWeongyo Jeong 			 */
951*18ec6525SWeongyo Jeong 			if (ud->ud_hbuf)
952*18ec6525SWeongyo Jeong 				/*
953*18ec6525SWeongyo Jeong 				 * We filled up the buffer in between
954*18ec6525SWeongyo Jeong 				 * getting the timeout and arriving
955*18ec6525SWeongyo Jeong 				 * here, so we don't need to rotate.
956*18ec6525SWeongyo Jeong 				 */
957*18ec6525SWeongyo Jeong 				break;
958*18ec6525SWeongyo Jeong 
959*18ec6525SWeongyo Jeong 			if (ud->ud_slen == 0) {
960*18ec6525SWeongyo Jeong 				USBPFD_UNLOCK(ud);
961*18ec6525SWeongyo Jeong 				return (0);
962*18ec6525SWeongyo Jeong 			}
963*18ec6525SWeongyo Jeong 			USBPF_ROTATE_BUFFERS(ud);
964*18ec6525SWeongyo Jeong 			break;
965*18ec6525SWeongyo Jeong 		}
966*18ec6525SWeongyo Jeong 	}
967*18ec6525SWeongyo Jeong 	/*
968*18ec6525SWeongyo Jeong 	 * At this point, we know we have something in the hold slot.
969*18ec6525SWeongyo Jeong 	 */
970*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
971*18ec6525SWeongyo Jeong 
972*18ec6525SWeongyo Jeong 	/*
973*18ec6525SWeongyo Jeong 	 * Move data from hold buffer into user space.
974*18ec6525SWeongyo Jeong 	 * We know the entire buffer is transferred since
975*18ec6525SWeongyo Jeong 	 * we checked above that the read buffer is usbpf_bufsize bytes.
976*18ec6525SWeongyo Jeong 	 *
977*18ec6525SWeongyo Jeong 	 * XXXRW: More synchronization needed here: what if a second thread
978*18ec6525SWeongyo Jeong 	 * issues a read on the same fd at the same time?  Don't want this
979*18ec6525SWeongyo Jeong 	 * getting invalidated.
980*18ec6525SWeongyo Jeong 	 */
981*18ec6525SWeongyo Jeong 	error = usbpf_uiomove(ud, ud->ud_hbuf, ud->ud_hlen, uio);
982*18ec6525SWeongyo Jeong 
983*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
984*18ec6525SWeongyo Jeong 	ud->ud_fbuf = ud->ud_hbuf;
985*18ec6525SWeongyo Jeong 	ud->ud_hbuf = NULL;
986*18ec6525SWeongyo Jeong 	ud->ud_hlen = 0;
987*18ec6525SWeongyo Jeong 	usbpf_buf_reclaimed(ud);
988*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
989*18ec6525SWeongyo Jeong 
990*18ec6525SWeongyo Jeong 	return (error);
991*18ec6525SWeongyo Jeong }
992*18ec6525SWeongyo Jeong 
993*18ec6525SWeongyo Jeong static int
994*18ec6525SWeongyo Jeong usbpf_write(struct cdev *dev, struct uio *uio, int ioflag)
995*18ec6525SWeongyo Jeong {
996*18ec6525SWeongyo Jeong 
997*18ec6525SWeongyo Jeong 	/* NOT IMPLEMENTED */
998*18ec6525SWeongyo Jeong 	return (ENOSYS);
999*18ec6525SWeongyo Jeong }
1000*18ec6525SWeongyo Jeong 
1001*18ec6525SWeongyo Jeong static int
1002*18ec6525SWeongyo Jeong usbpf_ioctl_sblen(struct usbpf_d *ud, u_int *i)
1003*18ec6525SWeongyo Jeong {
1004*18ec6525SWeongyo Jeong 
1005*18ec6525SWeongyo Jeong 	if (ud->ud_bufmode != USBPF_BUFMODE_BUFFER)
1006*18ec6525SWeongyo Jeong 		return (EOPNOTSUPP);
1007*18ec6525SWeongyo Jeong 	return (usbpf_buffer_ioctl_sblen(ud, i));
1008*18ec6525SWeongyo Jeong }
1009*18ec6525SWeongyo Jeong 
1010*18ec6525SWeongyo Jeong /*
1011*18ec6525SWeongyo Jeong  * Reset a descriptor by flushing its packet buffer and clearing the receive
1012*18ec6525SWeongyo Jeong  * and drop counts.  This is doable for kernel-only buffers, but with
1013*18ec6525SWeongyo Jeong  * zero-copy buffers, we can't write to (or rotate) buffers that are
1014*18ec6525SWeongyo Jeong  * currently owned by userspace.  It would be nice if we could encapsulate
1015*18ec6525SWeongyo Jeong  * this logic in the buffer code rather than here.
1016*18ec6525SWeongyo Jeong  */
1017*18ec6525SWeongyo Jeong static void
1018*18ec6525SWeongyo Jeong usbpf_reset_d(struct usbpf_d *ud)
1019*18ec6525SWeongyo Jeong {
1020*18ec6525SWeongyo Jeong 
1021*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1022*18ec6525SWeongyo Jeong 
1023*18ec6525SWeongyo Jeong 	if ((ud->ud_hbuf != NULL) &&
1024*18ec6525SWeongyo Jeong 	    (ud->ud_bufmode != USBPF_BUFMODE_ZBUF || usbpf_canfreebuf(ud))) {
1025*18ec6525SWeongyo Jeong 		/* Free the hold buffer. */
1026*18ec6525SWeongyo Jeong 		ud->ud_fbuf = ud->ud_hbuf;
1027*18ec6525SWeongyo Jeong 		ud->ud_hbuf = NULL;
1028*18ec6525SWeongyo Jeong 		ud->ud_hlen = 0;
1029*18ec6525SWeongyo Jeong 		usbpf_buf_reclaimed(ud);
1030*18ec6525SWeongyo Jeong 	}
1031*18ec6525SWeongyo Jeong 	if (usbpf_canwritebuf(ud))
1032*18ec6525SWeongyo Jeong 		ud->ud_slen = 0;
1033*18ec6525SWeongyo Jeong 	ud->ud_rcount = 0;
1034*18ec6525SWeongyo Jeong 	ud->ud_dcount = 0;
1035*18ec6525SWeongyo Jeong 	ud->ud_fcount = 0;
1036*18ec6525SWeongyo Jeong 	ud->ud_wcount = 0;
1037*18ec6525SWeongyo Jeong 	ud->ud_wfcount = 0;
1038*18ec6525SWeongyo Jeong 	ud->ud_wdcount = 0;
1039*18ec6525SWeongyo Jeong 	ud->ud_zcopy = 0;
1040*18ec6525SWeongyo Jeong }
1041*18ec6525SWeongyo Jeong 
1042*18ec6525SWeongyo Jeong static int
1043*18ec6525SWeongyo Jeong usbpf_setif(struct usbpf_d *ud, struct usbpf_ifreq *ufr)
1044*18ec6525SWeongyo Jeong {
1045*18ec6525SWeongyo Jeong 	struct usbpf_if *uif;
1046*18ec6525SWeongyo Jeong 	struct usb_bus *theywant;
1047*18ec6525SWeongyo Jeong 
1048*18ec6525SWeongyo Jeong 	theywant = usb_bus_find(ufr->ufr_name);
1049*18ec6525SWeongyo Jeong 	if (theywant == NULL || theywant->uif == NULL)
1050*18ec6525SWeongyo Jeong 		return (ENXIO);
1051*18ec6525SWeongyo Jeong 
1052*18ec6525SWeongyo Jeong 	uif = theywant->uif;
1053*18ec6525SWeongyo Jeong 
1054*18ec6525SWeongyo Jeong 	switch (ud->ud_bufmode) {
1055*18ec6525SWeongyo Jeong 	case USBPF_BUFMODE_BUFFER:
1056*18ec6525SWeongyo Jeong 		if (ud->ud_sbuf == NULL)
1057*18ec6525SWeongyo Jeong 			usbpf_buffer_alloc(ud);
1058*18ec6525SWeongyo Jeong 		KASSERT(ud->ud_sbuf != NULL, ("%s: ud_sbuf == NULL", __func__));
1059*18ec6525SWeongyo Jeong 		break;
1060*18ec6525SWeongyo Jeong 
1061*18ec6525SWeongyo Jeong 	default:
1062*18ec6525SWeongyo Jeong 		panic("usbpf_setif: bufmode %d", ud->ud_bufmode);
1063*18ec6525SWeongyo Jeong 	}
1064*18ec6525SWeongyo Jeong 	if (uif != ud->ud_bif) {
1065*18ec6525SWeongyo Jeong 		if (ud->ud_bif)
1066*18ec6525SWeongyo Jeong 			/*
1067*18ec6525SWeongyo Jeong 			 * Detach if attached to something else.
1068*18ec6525SWeongyo Jeong 			 */
1069*18ec6525SWeongyo Jeong 			usbpf_detachd(ud);
1070*18ec6525SWeongyo Jeong 
1071*18ec6525SWeongyo Jeong 		usbpf_attachd(ud, uif);
1072*18ec6525SWeongyo Jeong 	}
1073*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
1074*18ec6525SWeongyo Jeong 	usbpf_reset_d(ud);
1075*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
1076*18ec6525SWeongyo Jeong 	return (0);
1077*18ec6525SWeongyo Jeong }
1078*18ec6525SWeongyo Jeong 
1079*18ec6525SWeongyo Jeong /*
1080*18ec6525SWeongyo Jeong  * Set d's packet filter program to fp.  If this file already has a filter,
1081*18ec6525SWeongyo Jeong  * free it and replace it.  Returns EINVAL for bogus requests.
1082*18ec6525SWeongyo Jeong  */
1083*18ec6525SWeongyo Jeong static int
1084*18ec6525SWeongyo Jeong usbpf_setf(struct usbpf_d *ud, struct usbpf_program *fp, u_long cmd)
1085*18ec6525SWeongyo Jeong {
1086*18ec6525SWeongyo Jeong 	struct usbpf_insn *fcode, *old;
1087*18ec6525SWeongyo Jeong 	u_int wfilter, flen, size;
1088*18ec6525SWeongyo Jeong 
1089*18ec6525SWeongyo Jeong 	if (cmd == UIOCSETWF) {
1090*18ec6525SWeongyo Jeong 		old = ud->ud_wfilter;
1091*18ec6525SWeongyo Jeong 		wfilter = 1;
1092*18ec6525SWeongyo Jeong 	} else {
1093*18ec6525SWeongyo Jeong 		wfilter = 0;
1094*18ec6525SWeongyo Jeong 		old = ud->ud_rfilter;
1095*18ec6525SWeongyo Jeong 	}
1096*18ec6525SWeongyo Jeong 	if (fp->uf_insns == NULL) {
1097*18ec6525SWeongyo Jeong 		if (fp->uf_len != 0)
1098*18ec6525SWeongyo Jeong 			return (EINVAL);
1099*18ec6525SWeongyo Jeong 		USBPFD_LOCK(ud);
1100*18ec6525SWeongyo Jeong 		if (wfilter)
1101*18ec6525SWeongyo Jeong 			ud->ud_wfilter = NULL;
1102*18ec6525SWeongyo Jeong 		else {
1103*18ec6525SWeongyo Jeong 			ud->ud_rfilter = NULL;
1104*18ec6525SWeongyo Jeong 			if (cmd == UIOCSETF)
1105*18ec6525SWeongyo Jeong 				usbpf_reset_d(ud);
1106*18ec6525SWeongyo Jeong 		}
1107*18ec6525SWeongyo Jeong 		USBPFD_UNLOCK(ud);
1108*18ec6525SWeongyo Jeong 		if (old != NULL)
1109*18ec6525SWeongyo Jeong 			free((caddr_t)old, M_USBPF);
1110*18ec6525SWeongyo Jeong 		return (0);
1111*18ec6525SWeongyo Jeong 	}
1112*18ec6525SWeongyo Jeong 	flen = fp->uf_len;
1113*18ec6525SWeongyo Jeong 	if (flen > usbpf_maxinsns)
1114*18ec6525SWeongyo Jeong 		return (EINVAL);
1115*18ec6525SWeongyo Jeong 
1116*18ec6525SWeongyo Jeong 	size = flen * sizeof(*fp->uf_insns);
1117*18ec6525SWeongyo Jeong 	fcode = (struct usbpf_insn *)malloc(size, M_USBPF, M_WAITOK);
1118*18ec6525SWeongyo Jeong 	if (copyin((caddr_t)fp->uf_insns, (caddr_t)fcode, size) == 0 &&
1119*18ec6525SWeongyo Jeong 	    usbpf_validate(fcode, (int)flen)) {
1120*18ec6525SWeongyo Jeong 		USBPFD_LOCK(ud);
1121*18ec6525SWeongyo Jeong 		if (wfilter)
1122*18ec6525SWeongyo Jeong 			ud->ud_wfilter = fcode;
1123*18ec6525SWeongyo Jeong 		else {
1124*18ec6525SWeongyo Jeong 			ud->ud_rfilter = fcode;
1125*18ec6525SWeongyo Jeong 			if (cmd == UIOCSETF)
1126*18ec6525SWeongyo Jeong 				usbpf_reset_d(ud);
1127*18ec6525SWeongyo Jeong 		}
1128*18ec6525SWeongyo Jeong 		USBPFD_UNLOCK(ud);
1129*18ec6525SWeongyo Jeong 		if (old != NULL)
1130*18ec6525SWeongyo Jeong 			free((caddr_t)old, M_USBPF);
1131*18ec6525SWeongyo Jeong 
1132*18ec6525SWeongyo Jeong 		return (0);
1133*18ec6525SWeongyo Jeong 	}
1134*18ec6525SWeongyo Jeong 	free((caddr_t)fcode, M_USBPF);
1135*18ec6525SWeongyo Jeong 	return (EINVAL);
1136*18ec6525SWeongyo Jeong }
1137*18ec6525SWeongyo Jeong 
1138*18ec6525SWeongyo Jeong static int
1139*18ec6525SWeongyo Jeong usbpf_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1140*18ec6525SWeongyo Jeong     struct thread *td)
1141*18ec6525SWeongyo Jeong {
1142*18ec6525SWeongyo Jeong 	struct usbpf_d *ud;
1143*18ec6525SWeongyo Jeong 	int error;
1144*18ec6525SWeongyo Jeong 
1145*18ec6525SWeongyo Jeong 	error = devfs_get_cdevpriv((void **)&ud);
1146*18ec6525SWeongyo Jeong 	if (error != 0)
1147*18ec6525SWeongyo Jeong 		return (error);
1148*18ec6525SWeongyo Jeong 
1149*18ec6525SWeongyo Jeong 	/*
1150*18ec6525SWeongyo Jeong 	 * Refresh PID associated with this descriptor.
1151*18ec6525SWeongyo Jeong 	 */
1152*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
1153*18ec6525SWeongyo Jeong 	ud->ud_pid = td->td_proc->p_pid;
1154*18ec6525SWeongyo Jeong 	if (ud->ud_state == USBPF_WAITING)
1155*18ec6525SWeongyo Jeong 		callout_stop(&ud->ud_callout);
1156*18ec6525SWeongyo Jeong 	ud->ud_state = USBPF_IDLE;
1157*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
1158*18ec6525SWeongyo Jeong 
1159*18ec6525SWeongyo Jeong 	if (ud->ud_locked == 1) {
1160*18ec6525SWeongyo Jeong 		switch (cmd) {
1161*18ec6525SWeongyo Jeong 		case UIOCGBLEN:
1162*18ec6525SWeongyo Jeong 		case UIOCSBLEN:
1163*18ec6525SWeongyo Jeong 		case UIOCVERSION:
1164*18ec6525SWeongyo Jeong 			break;
1165*18ec6525SWeongyo Jeong 		default:
1166*18ec6525SWeongyo Jeong 			return (EPERM);
1167*18ec6525SWeongyo Jeong 		}
1168*18ec6525SWeongyo Jeong 	}
1169*18ec6525SWeongyo Jeong 
1170*18ec6525SWeongyo Jeong 	switch (cmd) {
1171*18ec6525SWeongyo Jeong 
1172*18ec6525SWeongyo Jeong 	default:
1173*18ec6525SWeongyo Jeong 		error = EINVAL;
1174*18ec6525SWeongyo Jeong 		break;
1175*18ec6525SWeongyo Jeong 
1176*18ec6525SWeongyo Jeong 	/*
1177*18ec6525SWeongyo Jeong 	 * Get buffer len [for read()].
1178*18ec6525SWeongyo Jeong 	 */
1179*18ec6525SWeongyo Jeong 	case UIOCGBLEN:
1180*18ec6525SWeongyo Jeong 		*(u_int *)addr = ud->ud_bufsize;
1181*18ec6525SWeongyo Jeong 		break;
1182*18ec6525SWeongyo Jeong 
1183*18ec6525SWeongyo Jeong 	/*
1184*18ec6525SWeongyo Jeong 	 * Set buffer length.
1185*18ec6525SWeongyo Jeong 	 */
1186*18ec6525SWeongyo Jeong 	case UIOCSBLEN:
1187*18ec6525SWeongyo Jeong 		error = usbpf_ioctl_sblen(ud, (u_int *)addr);
1188*18ec6525SWeongyo Jeong 		break;
1189*18ec6525SWeongyo Jeong 
1190*18ec6525SWeongyo Jeong 	/*
1191*18ec6525SWeongyo Jeong 	 * Set read filter.
1192*18ec6525SWeongyo Jeong 	 */
1193*18ec6525SWeongyo Jeong 	case UIOCSETF:
1194*18ec6525SWeongyo Jeong 		error = usbpf_setf(ud, (struct usbpf_program *)addr, cmd);
1195*18ec6525SWeongyo Jeong 		break;
1196*18ec6525SWeongyo Jeong 
1197*18ec6525SWeongyo Jeong 	/*
1198*18ec6525SWeongyo Jeong 	 * Set read timeout.
1199*18ec6525SWeongyo Jeong 	 */
1200*18ec6525SWeongyo Jeong 	case UIOCSRTIMEOUT:
1201*18ec6525SWeongyo Jeong 		{
1202*18ec6525SWeongyo Jeong 			struct timeval *tv = (struct timeval *)addr;
1203*18ec6525SWeongyo Jeong 
1204*18ec6525SWeongyo Jeong 			/*
1205*18ec6525SWeongyo Jeong 			 * Subtract 1 tick from tvtohz() since this isn't
1206*18ec6525SWeongyo Jeong 			 * a one-shot timer.
1207*18ec6525SWeongyo Jeong 			 */
1208*18ec6525SWeongyo Jeong 			if ((error = itimerfix(tv)) == 0)
1209*18ec6525SWeongyo Jeong 				ud->ud_rtout = tvtohz(tv) - 1;
1210*18ec6525SWeongyo Jeong 			break;
1211*18ec6525SWeongyo Jeong 		}
1212*18ec6525SWeongyo Jeong 
1213*18ec6525SWeongyo Jeong 	/*
1214*18ec6525SWeongyo Jeong 	 * Get read timeout.
1215*18ec6525SWeongyo Jeong 	 */
1216*18ec6525SWeongyo Jeong 	case UIOCGRTIMEOUT:
1217*18ec6525SWeongyo Jeong 		{
1218*18ec6525SWeongyo Jeong 			struct timeval *tv = (struct timeval *)addr;
1219*18ec6525SWeongyo Jeong 
1220*18ec6525SWeongyo Jeong 			tv->tv_sec = ud->ud_rtout / hz;
1221*18ec6525SWeongyo Jeong 			tv->tv_usec = (ud->ud_rtout % hz) * tick;
1222*18ec6525SWeongyo Jeong 			break;
1223*18ec6525SWeongyo Jeong 		}
1224*18ec6525SWeongyo Jeong 
1225*18ec6525SWeongyo Jeong 	/*
1226*18ec6525SWeongyo Jeong 	 * Get packet stats.
1227*18ec6525SWeongyo Jeong 	 */
1228*18ec6525SWeongyo Jeong 	case UIOCGSTATS:
1229*18ec6525SWeongyo Jeong 		{
1230*18ec6525SWeongyo Jeong 			struct usbpf_stat *us = (struct usbpf_stat *)addr;
1231*18ec6525SWeongyo Jeong 
1232*18ec6525SWeongyo Jeong 			/* XXXCSJP overflow */
1233*18ec6525SWeongyo Jeong 			us->us_recv = ud->ud_rcount;
1234*18ec6525SWeongyo Jeong 			us->us_drop = ud->ud_dcount;
1235*18ec6525SWeongyo Jeong 			break;
1236*18ec6525SWeongyo Jeong 		}
1237*18ec6525SWeongyo Jeong 
1238*18ec6525SWeongyo Jeong 	case UIOCVERSION:
1239*18ec6525SWeongyo Jeong 		{
1240*18ec6525SWeongyo Jeong 			struct usbpf_version *uv = (struct usbpf_version *)addr;
1241*18ec6525SWeongyo Jeong 
1242*18ec6525SWeongyo Jeong 			uv->uv_major = USBPF_MAJOR_VERSION;
1243*18ec6525SWeongyo Jeong 			uv->uv_minor = USBPF_MINOR_VERSION;
1244*18ec6525SWeongyo Jeong 			break;
1245*18ec6525SWeongyo Jeong 		}
1246*18ec6525SWeongyo Jeong 
1247*18ec6525SWeongyo Jeong 	/*
1248*18ec6525SWeongyo Jeong 	 * Set interface.
1249*18ec6525SWeongyo Jeong 	 */
1250*18ec6525SWeongyo Jeong 	case UIOCSETIF:
1251*18ec6525SWeongyo Jeong 		error = usbpf_setif(ud, (struct usbpf_ifreq *)addr);
1252*18ec6525SWeongyo Jeong 		break;
1253*18ec6525SWeongyo Jeong 
1254*18ec6525SWeongyo Jeong 	}
1255*18ec6525SWeongyo Jeong 	return (error);
1256*18ec6525SWeongyo Jeong }
1257*18ec6525SWeongyo Jeong 
1258*18ec6525SWeongyo Jeong /*
1259*18ec6525SWeongyo Jeong  * Support for select() and poll() system calls
1260*18ec6525SWeongyo Jeong  *
1261*18ec6525SWeongyo Jeong  * Return true iff the specific operation will not block indefinitely.
1262*18ec6525SWeongyo Jeong  * Otherwise, return false but make a note that a selwakeup() must be done.
1263*18ec6525SWeongyo Jeong  */
1264*18ec6525SWeongyo Jeong static int
1265*18ec6525SWeongyo Jeong usbpf_poll(struct cdev *dev, int events, struct thread *td)
1266*18ec6525SWeongyo Jeong {
1267*18ec6525SWeongyo Jeong 
1268*18ec6525SWeongyo Jeong 	/* NOT IMPLEMENTED */
1269*18ec6525SWeongyo Jeong 	return (ENOSYS);
1270*18ec6525SWeongyo Jeong }
1271*18ec6525SWeongyo Jeong 
1272*18ec6525SWeongyo Jeong /*
1273*18ec6525SWeongyo Jeong  * Support for kevent() system call.  Register EVFILT_READ filters and
1274*18ec6525SWeongyo Jeong  * reject all others.
1275*18ec6525SWeongyo Jeong  */
1276*18ec6525SWeongyo Jeong int
1277*18ec6525SWeongyo Jeong usbpf_kqfilter(struct cdev *dev, struct knote *kn)
1278*18ec6525SWeongyo Jeong {
1279*18ec6525SWeongyo Jeong 
1280*18ec6525SWeongyo Jeong 	/* NOT IMPLEMENTED */
1281*18ec6525SWeongyo Jeong 	return (ENOSYS);
1282*18ec6525SWeongyo Jeong }
1283*18ec6525SWeongyo Jeong 
1284*18ec6525SWeongyo Jeong /*
1285*18ec6525SWeongyo Jeong  * Attach file to the usbpf interface, i.e. make d listen on bp.
1286*18ec6525SWeongyo Jeong  */
1287*18ec6525SWeongyo Jeong static void
1288*18ec6525SWeongyo Jeong usbpf_attachd(struct usbpf_d *ud, struct usbpf_if *uif)
1289*18ec6525SWeongyo Jeong {
1290*18ec6525SWeongyo Jeong 
1291*18ec6525SWeongyo Jeong 	USBPFIF_LOCK(uif);
1292*18ec6525SWeongyo Jeong 	ud->ud_bif = uif;
1293*18ec6525SWeongyo Jeong 	LIST_INSERT_HEAD(&uif->uif_dlist, ud, ud_next);
1294*18ec6525SWeongyo Jeong 
1295*18ec6525SWeongyo Jeong 	usbpf_uifd_cnt++;
1296*18ec6525SWeongyo Jeong 	USBPFIF_UNLOCK(uif);
1297*18ec6525SWeongyo Jeong }
1298*18ec6525SWeongyo Jeong 
1299*18ec6525SWeongyo Jeong /*
1300*18ec6525SWeongyo Jeong  * Detach a file from its interface.
1301*18ec6525SWeongyo Jeong  */
1302*18ec6525SWeongyo Jeong static void
1303*18ec6525SWeongyo Jeong usbpf_detachd(struct usbpf_d *ud)
1304*18ec6525SWeongyo Jeong {
1305*18ec6525SWeongyo Jeong 	struct usbpf_if *uif;
1306*18ec6525SWeongyo Jeong 	struct usb_bus *ubus;
1307*18ec6525SWeongyo Jeong 
1308*18ec6525SWeongyo Jeong 	uif = ud->ud_bif;
1309*18ec6525SWeongyo Jeong 	USBPFIF_LOCK(uif);
1310*18ec6525SWeongyo Jeong 	USBPFD_LOCK(ud);
1311*18ec6525SWeongyo Jeong 	ubus = ud->ud_bif->uif_ubus;
1312*18ec6525SWeongyo Jeong 
1313*18ec6525SWeongyo Jeong 	/*
1314*18ec6525SWeongyo Jeong 	 * Remove d from the interface's descriptor list.
1315*18ec6525SWeongyo Jeong 	 */
1316*18ec6525SWeongyo Jeong 	LIST_REMOVE(ud, ud_next);
1317*18ec6525SWeongyo Jeong 
1318*18ec6525SWeongyo Jeong 	usbpf_uifd_cnt--;
1319*18ec6525SWeongyo Jeong 	ud->ud_bif = NULL;
1320*18ec6525SWeongyo Jeong 	USBPFD_UNLOCK(ud);
1321*18ec6525SWeongyo Jeong 	USBPFIF_UNLOCK(uif);
1322*18ec6525SWeongyo Jeong }
1323*18ec6525SWeongyo Jeong 
1324*18ec6525SWeongyo Jeong void
1325*18ec6525SWeongyo Jeong usbpf_attach(struct usb_bus *ubus, struct usbpf_if **driverp)
1326*18ec6525SWeongyo Jeong {
1327*18ec6525SWeongyo Jeong 	struct usbpf_if *uif;
1328*18ec6525SWeongyo Jeong 
1329*18ec6525SWeongyo Jeong 	uif = malloc(sizeof(*uif), M_USBPF, M_WAITOK | M_ZERO);
1330*18ec6525SWeongyo Jeong 	LIST_INIT(&uif->uif_dlist);
1331*18ec6525SWeongyo Jeong 	uif->uif_ubus = ubus;
1332*18ec6525SWeongyo Jeong 	mtx_init(&uif->uif_mtx, "usbpf interface lock", NULL, MTX_DEF);
1333*18ec6525SWeongyo Jeong 	KASSERT(*driverp == NULL,
1334*18ec6525SWeongyo Jeong 	    ("usbpf_attach: driverp already initialized"));
1335*18ec6525SWeongyo Jeong 	*driverp = uif;
1336*18ec6525SWeongyo Jeong 
1337*18ec6525SWeongyo Jeong 	mtx_lock(&usbpf_mtx);
1338*18ec6525SWeongyo Jeong 	LIST_INSERT_HEAD(&usbpf_iflist, uif, uif_next);
1339*18ec6525SWeongyo Jeong 	mtx_unlock(&usbpf_mtx);
1340*18ec6525SWeongyo Jeong 
1341*18ec6525SWeongyo Jeong 	if (bootverbose)
1342*18ec6525SWeongyo Jeong 		device_printf(ubus->parent, "usbpf attached\n");
1343*18ec6525SWeongyo Jeong }
1344*18ec6525SWeongyo Jeong 
1345*18ec6525SWeongyo Jeong /*
1346*18ec6525SWeongyo Jeong  * If there are processes sleeping on this descriptor, wake them up.
1347*18ec6525SWeongyo Jeong  */
1348*18ec6525SWeongyo Jeong static __inline void
1349*18ec6525SWeongyo Jeong usbpf_wakeup(struct usbpf_d *ud)
1350*18ec6525SWeongyo Jeong {
1351*18ec6525SWeongyo Jeong 
1352*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1353*18ec6525SWeongyo Jeong 	if (ud->ud_state == USBPF_WAITING) {
1354*18ec6525SWeongyo Jeong 		callout_stop(&ud->ud_callout);
1355*18ec6525SWeongyo Jeong 		ud->ud_state = USBPF_IDLE;
1356*18ec6525SWeongyo Jeong 	}
1357*18ec6525SWeongyo Jeong 	wakeup(ud);
1358*18ec6525SWeongyo Jeong 	if (ud->ud_async && ud->ud_sig && ud->ud_sigio)
1359*18ec6525SWeongyo Jeong 		pgsigio(&ud->ud_sigio, ud->ud_sig, 0);
1360*18ec6525SWeongyo Jeong 
1361*18ec6525SWeongyo Jeong 	selwakeuppri(&ud->ud_sel, PRIUSB);
1362*18ec6525SWeongyo Jeong 	KNOTE_LOCKED(&ud->ud_sel.si_note, 0);
1363*18ec6525SWeongyo Jeong }
1364*18ec6525SWeongyo Jeong 
1365*18ec6525SWeongyo Jeong void
1366*18ec6525SWeongyo Jeong usbpf_detach(struct usb_bus *ubus)
1367*18ec6525SWeongyo Jeong {
1368*18ec6525SWeongyo Jeong 	struct usbpf_if	*uif;
1369*18ec6525SWeongyo Jeong 	struct usbpf_d	*ud;
1370*18ec6525SWeongyo Jeong 
1371*18ec6525SWeongyo Jeong 	/* Locate USBPF interface information */
1372*18ec6525SWeongyo Jeong 	mtx_lock(&usbpf_mtx);
1373*18ec6525SWeongyo Jeong 	LIST_FOREACH(uif, &usbpf_iflist, uif_next) {
1374*18ec6525SWeongyo Jeong 		if (ubus == uif->uif_ubus)
1375*18ec6525SWeongyo Jeong 			break;
1376*18ec6525SWeongyo Jeong 	}
1377*18ec6525SWeongyo Jeong 
1378*18ec6525SWeongyo Jeong 	/* Interface wasn't attached */
1379*18ec6525SWeongyo Jeong 	if ((uif == NULL) || (uif->uif_ubus == NULL)) {
1380*18ec6525SWeongyo Jeong 		mtx_unlock(&usbpf_mtx);
1381*18ec6525SWeongyo Jeong 		printf("usbpf_detach: not attached\n");	/* XXX */
1382*18ec6525SWeongyo Jeong 		return;
1383*18ec6525SWeongyo Jeong 	}
1384*18ec6525SWeongyo Jeong 
1385*18ec6525SWeongyo Jeong 	LIST_REMOVE(uif, uif_next);
1386*18ec6525SWeongyo Jeong 	mtx_unlock(&usbpf_mtx);
1387*18ec6525SWeongyo Jeong 
1388*18ec6525SWeongyo Jeong 	while ((ud = LIST_FIRST(&uif->uif_dlist)) != NULL) {
1389*18ec6525SWeongyo Jeong 		usbpf_detachd(ud);
1390*18ec6525SWeongyo Jeong 		USBPFD_LOCK(ud);
1391*18ec6525SWeongyo Jeong 		usbpf_wakeup(ud);
1392*18ec6525SWeongyo Jeong 		USBPFD_UNLOCK(ud);
1393*18ec6525SWeongyo Jeong 	}
1394*18ec6525SWeongyo Jeong 
1395*18ec6525SWeongyo Jeong 	mtx_destroy(&uif->uif_mtx);
1396*18ec6525SWeongyo Jeong 	free(uif, M_USBPF);
1397*18ec6525SWeongyo Jeong }
1398*18ec6525SWeongyo Jeong 
1399*18ec6525SWeongyo Jeong /* Time stamping functions */
1400*18ec6525SWeongyo Jeong #define	USBPF_T_MICROTIME	0x0000
1401*18ec6525SWeongyo Jeong #define	USBPF_T_NANOTIME	0x0001
1402*18ec6525SWeongyo Jeong #define	USBPF_T_BINTIME		0x0002
1403*18ec6525SWeongyo Jeong #define	USBPF_T_NONE		0x0003
1404*18ec6525SWeongyo Jeong #define	USBPF_T_FORMAT_MASK	0x0003
1405*18ec6525SWeongyo Jeong #define	USBPF_T_NORMAL		0x0000
1406*18ec6525SWeongyo Jeong #define	USBPF_T_FAST		0x0100
1407*18ec6525SWeongyo Jeong #define	USBPF_T_MONOTONIC	0x0200
1408*18ec6525SWeongyo Jeong #define	USBPF_T_FORMAT(t)	((t) & USBPF_T_FORMAT_MASK)
1409*18ec6525SWeongyo Jeong 
1410*18ec6525SWeongyo Jeong #define	USBPF_TSTAMP_NONE	0
1411*18ec6525SWeongyo Jeong #define	USBPF_TSTAMP_FAST	1
1412*18ec6525SWeongyo Jeong #define	USBPF_TSTAMP_NORMAL	2
1413*18ec6525SWeongyo Jeong 
1414*18ec6525SWeongyo Jeong static int
1415*18ec6525SWeongyo Jeong usbpf_ts_quality(int tstype)
1416*18ec6525SWeongyo Jeong {
1417*18ec6525SWeongyo Jeong 
1418*18ec6525SWeongyo Jeong 	if (tstype == USBPF_T_NONE)
1419*18ec6525SWeongyo Jeong 		return (USBPF_TSTAMP_NONE);
1420*18ec6525SWeongyo Jeong 	if ((tstype & USBPF_T_FAST) != 0)
1421*18ec6525SWeongyo Jeong 		return (USBPF_TSTAMP_FAST);
1422*18ec6525SWeongyo Jeong 
1423*18ec6525SWeongyo Jeong 	return (USBPF_TSTAMP_NORMAL);
1424*18ec6525SWeongyo Jeong }
1425*18ec6525SWeongyo Jeong 
1426*18ec6525SWeongyo Jeong static int
1427*18ec6525SWeongyo Jeong usbpf_gettime(struct bintime *bt, int tstype)
1428*18ec6525SWeongyo Jeong {
1429*18ec6525SWeongyo Jeong 	int quality;
1430*18ec6525SWeongyo Jeong 
1431*18ec6525SWeongyo Jeong 	quality = usbpf_ts_quality(tstype);
1432*18ec6525SWeongyo Jeong 	if (quality == USBPF_TSTAMP_NONE)
1433*18ec6525SWeongyo Jeong 		return (quality);
1434*18ec6525SWeongyo Jeong 	if (quality == USBPF_TSTAMP_NORMAL)
1435*18ec6525SWeongyo Jeong 		binuptime(bt);
1436*18ec6525SWeongyo Jeong 	else
1437*18ec6525SWeongyo Jeong 		getbinuptime(bt);
1438*18ec6525SWeongyo Jeong 
1439*18ec6525SWeongyo Jeong 	return (quality);
1440*18ec6525SWeongyo Jeong }
1441*18ec6525SWeongyo Jeong 
1442*18ec6525SWeongyo Jeong /*
1443*18ec6525SWeongyo Jeong  * If the buffer mechanism has a way to decide that a held buffer can be made
1444*18ec6525SWeongyo Jeong  * free, then it is exposed via the usbpf_canfreebuf() interface.  (1) is
1445*18ec6525SWeongyo Jeong  * returned if the buffer can be discarded, (0) is returned if it cannot.
1446*18ec6525SWeongyo Jeong  */
1447*18ec6525SWeongyo Jeong static int
1448*18ec6525SWeongyo Jeong usbpf_canfreebuf(struct usbpf_d *ud)
1449*18ec6525SWeongyo Jeong {
1450*18ec6525SWeongyo Jeong 
1451*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1452*18ec6525SWeongyo Jeong 
1453*18ec6525SWeongyo Jeong 	return (0);
1454*18ec6525SWeongyo Jeong }
1455*18ec6525SWeongyo Jeong 
1456*18ec6525SWeongyo Jeong /*
1457*18ec6525SWeongyo Jeong  * Allow the buffer model to indicate that the current store buffer is
1458*18ec6525SWeongyo Jeong  * immutable, regardless of the appearance of space.  Return (1) if the
1459*18ec6525SWeongyo Jeong  * buffer is writable, and (0) if not.
1460*18ec6525SWeongyo Jeong  */
1461*18ec6525SWeongyo Jeong static int
1462*18ec6525SWeongyo Jeong usbpf_canwritebuf(struct usbpf_d *ud)
1463*18ec6525SWeongyo Jeong {
1464*18ec6525SWeongyo Jeong 
1465*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1466*18ec6525SWeongyo Jeong 	return (1);
1467*18ec6525SWeongyo Jeong }
1468*18ec6525SWeongyo Jeong 
1469*18ec6525SWeongyo Jeong /*
1470*18ec6525SWeongyo Jeong  * Notify buffer model that an attempt to write to the store buffer has
1471*18ec6525SWeongyo Jeong  * resulted in a dropped packet, in which case the buffer may be considered
1472*18ec6525SWeongyo Jeong  * full.
1473*18ec6525SWeongyo Jeong  */
1474*18ec6525SWeongyo Jeong static void
1475*18ec6525SWeongyo Jeong usbpf_buffull(struct usbpf_d *ud)
1476*18ec6525SWeongyo Jeong {
1477*18ec6525SWeongyo Jeong 
1478*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1479*18ec6525SWeongyo Jeong }
1480*18ec6525SWeongyo Jeong 
1481*18ec6525SWeongyo Jeong /*
1482*18ec6525SWeongyo Jeong  * This function gets called when the free buffer is re-assigned.
1483*18ec6525SWeongyo Jeong  */
1484*18ec6525SWeongyo Jeong static void
1485*18ec6525SWeongyo Jeong usbpf_buf_reclaimed(struct usbpf_d *ud)
1486*18ec6525SWeongyo Jeong {
1487*18ec6525SWeongyo Jeong 
1488*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1489*18ec6525SWeongyo Jeong 
1490*18ec6525SWeongyo Jeong 	switch (ud->ud_bufmode) {
1491*18ec6525SWeongyo Jeong 	case USBPF_BUFMODE_BUFFER:
1492*18ec6525SWeongyo Jeong 		return;
1493*18ec6525SWeongyo Jeong 
1494*18ec6525SWeongyo Jeong 	default:
1495*18ec6525SWeongyo Jeong 		panic("usbpf_buf_reclaimed");
1496*18ec6525SWeongyo Jeong 	}
1497*18ec6525SWeongyo Jeong }
1498*18ec6525SWeongyo Jeong 
1499*18ec6525SWeongyo Jeong #define	SIZEOF_USBPF_HDR(type)	\
1500*18ec6525SWeongyo Jeong     (offsetof(type, uh_hdrlen) + sizeof(((type *)0)->uh_hdrlen))
1501*18ec6525SWeongyo Jeong 
1502*18ec6525SWeongyo Jeong static int
1503*18ec6525SWeongyo Jeong usbpf_hdrlen(struct usbpf_d *ud)
1504*18ec6525SWeongyo Jeong {
1505*18ec6525SWeongyo Jeong 	int hdrlen;
1506*18ec6525SWeongyo Jeong 
1507*18ec6525SWeongyo Jeong 	hdrlen = ud->ud_bif->uif_hdrlen;
1508*18ec6525SWeongyo Jeong 	hdrlen += SIZEOF_USBPF_HDR(struct usbpf_xhdr);
1509*18ec6525SWeongyo Jeong 	hdrlen = USBPF_WORDALIGN(hdrlen);
1510*18ec6525SWeongyo Jeong 
1511*18ec6525SWeongyo Jeong 	return (hdrlen - ud->ud_bif->uif_hdrlen);
1512*18ec6525SWeongyo Jeong }
1513*18ec6525SWeongyo Jeong 
1514*18ec6525SWeongyo Jeong static void
1515*18ec6525SWeongyo Jeong usbpf_bintime2ts(struct bintime *bt, struct usbpf_ts *ts, int tstype)
1516*18ec6525SWeongyo Jeong {
1517*18ec6525SWeongyo Jeong 	struct bintime bt2;
1518*18ec6525SWeongyo Jeong 	struct timeval tsm;
1519*18ec6525SWeongyo Jeong 	struct timespec tsn;
1520*18ec6525SWeongyo Jeong 
1521*18ec6525SWeongyo Jeong 	if ((tstype & USBPF_T_MONOTONIC) == 0) {
1522*18ec6525SWeongyo Jeong 		bt2 = *bt;
1523*18ec6525SWeongyo Jeong 		bintime_add(&bt2, &boottimebin);
1524*18ec6525SWeongyo Jeong 		bt = &bt2;
1525*18ec6525SWeongyo Jeong 	}
1526*18ec6525SWeongyo Jeong 	switch (USBPF_T_FORMAT(tstype)) {
1527*18ec6525SWeongyo Jeong 	case USBPF_T_MICROTIME:
1528*18ec6525SWeongyo Jeong 		bintime2timeval(bt, &tsm);
1529*18ec6525SWeongyo Jeong 		ts->ut_sec = tsm.tv_sec;
1530*18ec6525SWeongyo Jeong 		ts->ut_frac = tsm.tv_usec;
1531*18ec6525SWeongyo Jeong 		break;
1532*18ec6525SWeongyo Jeong 	case USBPF_T_NANOTIME:
1533*18ec6525SWeongyo Jeong 		bintime2timespec(bt, &tsn);
1534*18ec6525SWeongyo Jeong 		ts->ut_sec = tsn.tv_sec;
1535*18ec6525SWeongyo Jeong 		ts->ut_frac = tsn.tv_nsec;
1536*18ec6525SWeongyo Jeong 		break;
1537*18ec6525SWeongyo Jeong 	case USBPF_T_BINTIME:
1538*18ec6525SWeongyo Jeong 		ts->ut_sec = bt->sec;
1539*18ec6525SWeongyo Jeong 		ts->ut_frac = bt->frac;
1540*18ec6525SWeongyo Jeong 		break;
1541*18ec6525SWeongyo Jeong 	}
1542*18ec6525SWeongyo Jeong }
1543*18ec6525SWeongyo Jeong 
1544*18ec6525SWeongyo Jeong /*
1545*18ec6525SWeongyo Jeong  * Move the packet data from interface memory (pkt) into the
1546*18ec6525SWeongyo Jeong  * store buffer.  "cpfn" is the routine called to do the actual data
1547*18ec6525SWeongyo Jeong  * transfer.  bcopy is passed in to copy contiguous chunks, while
1548*18ec6525SWeongyo Jeong  * usbpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
1549*18ec6525SWeongyo Jeong  * pkt is really an mbuf.
1550*18ec6525SWeongyo Jeong  */
1551*18ec6525SWeongyo Jeong static void
1552*18ec6525SWeongyo Jeong catchpacket(struct usbpf_d *ud, u_char *pkt, u_int pktlen, u_int snaplen,
1553*18ec6525SWeongyo Jeong     void (*cpfn)(struct usbpf_d *, caddr_t, u_int, void *, u_int),
1554*18ec6525SWeongyo Jeong     struct bintime *bt)
1555*18ec6525SWeongyo Jeong {
1556*18ec6525SWeongyo Jeong 	struct usbpf_xhdr hdr;
1557*18ec6525SWeongyo Jeong 	int caplen, curlen, hdrlen, totlen;
1558*18ec6525SWeongyo Jeong 	int do_wakeup = 0;
1559*18ec6525SWeongyo Jeong 	int do_timestamp;
1560*18ec6525SWeongyo Jeong 	int tstype;
1561*18ec6525SWeongyo Jeong 
1562*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1563*18ec6525SWeongyo Jeong 
1564*18ec6525SWeongyo Jeong 	/*
1565*18ec6525SWeongyo Jeong 	 * Detect whether user space has released a buffer back to us, and if
1566*18ec6525SWeongyo Jeong 	 * so, move it from being a hold buffer to a free buffer.  This may
1567*18ec6525SWeongyo Jeong 	 * not be the best place to do it (for example, we might only want to
1568*18ec6525SWeongyo Jeong 	 * run this check if we need the space), but for now it's a reliable
1569*18ec6525SWeongyo Jeong 	 * spot to do it.
1570*18ec6525SWeongyo Jeong 	 */
1571*18ec6525SWeongyo Jeong 	if (ud->ud_fbuf == NULL && usbpf_canfreebuf(ud)) {
1572*18ec6525SWeongyo Jeong 		ud->ud_fbuf = ud->ud_hbuf;
1573*18ec6525SWeongyo Jeong 		ud->ud_hbuf = NULL;
1574*18ec6525SWeongyo Jeong 		ud->ud_hlen = 0;
1575*18ec6525SWeongyo Jeong 		usbpf_buf_reclaimed(ud);
1576*18ec6525SWeongyo Jeong 	}
1577*18ec6525SWeongyo Jeong 
1578*18ec6525SWeongyo Jeong 	/*
1579*18ec6525SWeongyo Jeong 	 * Figure out how many bytes to move.  If the packet is
1580*18ec6525SWeongyo Jeong 	 * greater or equal to the snapshot length, transfer that
1581*18ec6525SWeongyo Jeong 	 * much.  Otherwise, transfer the whole packet (unless
1582*18ec6525SWeongyo Jeong 	 * we hit the buffer size limit).
1583*18ec6525SWeongyo Jeong 	 */
1584*18ec6525SWeongyo Jeong 	hdrlen = usbpf_hdrlen(ud);
1585*18ec6525SWeongyo Jeong 	totlen = hdrlen + min(snaplen, pktlen);
1586*18ec6525SWeongyo Jeong 	if (totlen > ud->ud_bufsize)
1587*18ec6525SWeongyo Jeong 		totlen = ud->ud_bufsize;
1588*18ec6525SWeongyo Jeong 
1589*18ec6525SWeongyo Jeong 	/*
1590*18ec6525SWeongyo Jeong 	 * Round up the end of the previous packet to the next longword.
1591*18ec6525SWeongyo Jeong 	 *
1592*18ec6525SWeongyo Jeong 	 * Drop the packet if there's no room and no hope of room
1593*18ec6525SWeongyo Jeong 	 * If the packet would overflow the storage buffer or the storage
1594*18ec6525SWeongyo Jeong 	 * buffer is considered immutable by the buffer model, try to rotate
1595*18ec6525SWeongyo Jeong 	 * the buffer and wakeup pending processes.
1596*18ec6525SWeongyo Jeong 	 */
1597*18ec6525SWeongyo Jeong 	curlen = USBPF_WORDALIGN(ud->ud_slen);
1598*18ec6525SWeongyo Jeong 	if (curlen + totlen > ud->ud_bufsize || !usbpf_canwritebuf(ud)) {
1599*18ec6525SWeongyo Jeong 		if (ud->ud_fbuf == NULL) {
1600*18ec6525SWeongyo Jeong 			/*
1601*18ec6525SWeongyo Jeong 			 * There's no room in the store buffer, and no
1602*18ec6525SWeongyo Jeong 			 * prospect of room, so drop the packet.  Notify the
1603*18ec6525SWeongyo Jeong 			 * buffer model.
1604*18ec6525SWeongyo Jeong 			 */
1605*18ec6525SWeongyo Jeong 			usbpf_buffull(ud);
1606*18ec6525SWeongyo Jeong 			++ud->ud_dcount;
1607*18ec6525SWeongyo Jeong 			return;
1608*18ec6525SWeongyo Jeong 		}
1609*18ec6525SWeongyo Jeong 		USBPF_ROTATE_BUFFERS(ud);
1610*18ec6525SWeongyo Jeong 		do_wakeup = 1;
1611*18ec6525SWeongyo Jeong 		curlen = 0;
1612*18ec6525SWeongyo Jeong 	} else if (ud->ud_immediate || ud->ud_state == USBPF_TIMED_OUT)
1613*18ec6525SWeongyo Jeong 		/*
1614*18ec6525SWeongyo Jeong 		 * Immediate mode is set, or the read timeout has already
1615*18ec6525SWeongyo Jeong 		 * expired during a select call.  A packet arrived, so the
1616*18ec6525SWeongyo Jeong 		 * reader should be woken up.
1617*18ec6525SWeongyo Jeong 		 */
1618*18ec6525SWeongyo Jeong 		do_wakeup = 1;
1619*18ec6525SWeongyo Jeong 	caplen = totlen - hdrlen;
1620*18ec6525SWeongyo Jeong 	tstype = ud->ud_tstamp;
1621*18ec6525SWeongyo Jeong 	do_timestamp = tstype != USBPF_T_NONE;
1622*18ec6525SWeongyo Jeong 
1623*18ec6525SWeongyo Jeong 	/*
1624*18ec6525SWeongyo Jeong 	 * Append the usbpf header.  Note we append the actual header size, but
1625*18ec6525SWeongyo Jeong 	 * move forward the length of the header plus padding.
1626*18ec6525SWeongyo Jeong 	 */
1627*18ec6525SWeongyo Jeong 	bzero(&hdr, sizeof(hdr));
1628*18ec6525SWeongyo Jeong 	if (do_timestamp)
1629*18ec6525SWeongyo Jeong 		usbpf_bintime2ts(bt, &hdr.uh_tstamp, tstype);
1630*18ec6525SWeongyo Jeong 	hdr.uh_datalen = pktlen;
1631*18ec6525SWeongyo Jeong 	hdr.uh_hdrlen = hdrlen;
1632*18ec6525SWeongyo Jeong 	hdr.uh_caplen = caplen;
1633*18ec6525SWeongyo Jeong 	usbpf_append_bytes(ud, ud->ud_sbuf, curlen, &hdr, sizeof(hdr));
1634*18ec6525SWeongyo Jeong 
1635*18ec6525SWeongyo Jeong 	/*
1636*18ec6525SWeongyo Jeong 	 * Copy the packet data into the store buffer and update its length.
1637*18ec6525SWeongyo Jeong 	 */
1638*18ec6525SWeongyo Jeong 	(*cpfn)(ud, ud->ud_sbuf, curlen + hdrlen, pkt, caplen);
1639*18ec6525SWeongyo Jeong 	ud->ud_slen = curlen + totlen;
1640*18ec6525SWeongyo Jeong 
1641*18ec6525SWeongyo Jeong 	if (do_wakeup)
1642*18ec6525SWeongyo Jeong 		usbpf_wakeup(ud);
1643*18ec6525SWeongyo Jeong }
1644*18ec6525SWeongyo Jeong 
1645*18ec6525SWeongyo Jeong /*
1646*18ec6525SWeongyo Jeong  * Incoming linkage from device drivers.  Process the packet pkt, of length
1647*18ec6525SWeongyo Jeong  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1648*18ec6525SWeongyo Jeong  * by each process' filter, and if accepted, stashed into the corresponding
1649*18ec6525SWeongyo Jeong  * buffer.
1650*18ec6525SWeongyo Jeong  */
1651*18ec6525SWeongyo Jeong static void
1652*18ec6525SWeongyo Jeong usbpf_tap(struct usbpf_if *uif, u_char *pkt, u_int pktlen)
1653*18ec6525SWeongyo Jeong {
1654*18ec6525SWeongyo Jeong 	struct bintime bt;
1655*18ec6525SWeongyo Jeong 	struct usbpf_d *ud;
1656*18ec6525SWeongyo Jeong 	u_int slen;
1657*18ec6525SWeongyo Jeong 	int gottime;
1658*18ec6525SWeongyo Jeong 
1659*18ec6525SWeongyo Jeong 	gottime = USBPF_TSTAMP_NONE;
1660*18ec6525SWeongyo Jeong 	USBPFIF_LOCK(uif);
1661*18ec6525SWeongyo Jeong 	LIST_FOREACH(ud, &uif->uif_dlist, ud_next) {
1662*18ec6525SWeongyo Jeong 		USBPFD_LOCK(ud);
1663*18ec6525SWeongyo Jeong 		++ud->ud_rcount;
1664*18ec6525SWeongyo Jeong 		slen = usbpf_filter(ud->ud_rfilter, pkt, pktlen, pktlen);
1665*18ec6525SWeongyo Jeong 		if (slen != 0) {
1666*18ec6525SWeongyo Jeong 			ud->ud_fcount++;
1667*18ec6525SWeongyo Jeong 			if (gottime < usbpf_ts_quality(ud->ud_tstamp))
1668*18ec6525SWeongyo Jeong 				gottime = usbpf_gettime(&bt, ud->ud_tstamp);
1669*18ec6525SWeongyo Jeong 			catchpacket(ud, pkt, pktlen, slen,
1670*18ec6525SWeongyo Jeong 			    usbpf_append_bytes, &bt);
1671*18ec6525SWeongyo Jeong 		}
1672*18ec6525SWeongyo Jeong 		USBPFD_UNLOCK(ud);
1673*18ec6525SWeongyo Jeong 	}
1674*18ec6525SWeongyo Jeong 	USBPFIF_UNLOCK(uif);
1675*18ec6525SWeongyo Jeong }
1676*18ec6525SWeongyo Jeong 
1677*18ec6525SWeongyo Jeong static uint32_t
1678*18ec6525SWeongyo Jeong usbpf_aggregate_xferflags(struct usb_xfer_flags *flags)
1679*18ec6525SWeongyo Jeong {
1680*18ec6525SWeongyo Jeong 	uint32_t val = 0;
1681*18ec6525SWeongyo Jeong 
1682*18ec6525SWeongyo Jeong 	if (flags->force_short_xfer == 1)
1683*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_FORCE_SHORT_XFER;
1684*18ec6525SWeongyo Jeong 	if (flags->short_xfer_ok == 1)
1685*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_SHORT_XFER_OK;
1686*18ec6525SWeongyo Jeong 	if (flags->short_frames_ok == 1)
1687*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_SHORT_FRAMES_OK;
1688*18ec6525SWeongyo Jeong 	if (flags->pipe_bof == 1)
1689*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_PIPE_BOF;
1690*18ec6525SWeongyo Jeong 	if (flags->proxy_buffer == 1)
1691*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_PROXY_BUFFER;
1692*18ec6525SWeongyo Jeong 	if (flags->ext_buffer == 1)
1693*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_EXT_BUFFER;
1694*18ec6525SWeongyo Jeong 	if (flags->manual_status == 1)
1695*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_MANUAL_STATUS;
1696*18ec6525SWeongyo Jeong 	if (flags->no_pipe_ok == 1)
1697*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_NO_PIPE_OK;
1698*18ec6525SWeongyo Jeong 	if (flags->stall_pipe == 1)
1699*18ec6525SWeongyo Jeong 		val |= USBPF_FLAG_STALL_PIPE;
1700*18ec6525SWeongyo Jeong 	return (val);
1701*18ec6525SWeongyo Jeong }
1702*18ec6525SWeongyo Jeong 
1703*18ec6525SWeongyo Jeong static uint32_t
1704*18ec6525SWeongyo Jeong usbpf_aggregate_status(struct usb_xfer_flags_int *flags)
1705*18ec6525SWeongyo Jeong {
1706*18ec6525SWeongyo Jeong 	uint32_t val = 0;
1707*18ec6525SWeongyo Jeong 
1708*18ec6525SWeongyo Jeong 	if (flags->open == 1)
1709*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_OPEN;
1710*18ec6525SWeongyo Jeong 	if (flags->transferring == 1)
1711*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_TRANSFERRING;
1712*18ec6525SWeongyo Jeong 	if (flags->did_dma_delay == 1)
1713*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_DID_DMA_DELAY;
1714*18ec6525SWeongyo Jeong 	if (flags->did_close == 1)
1715*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_DID_CLOSE;
1716*18ec6525SWeongyo Jeong 	if (flags->draining == 1)
1717*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_DRAINING;
1718*18ec6525SWeongyo Jeong 	if (flags->started == 1)
1719*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_STARTED;
1720*18ec6525SWeongyo Jeong 	if (flags->bandwidth_reclaimed == 1)
1721*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_BW_RECLAIMED;
1722*18ec6525SWeongyo Jeong 	if (flags->control_xfr == 1)
1723*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_CONTROL_XFR;
1724*18ec6525SWeongyo Jeong 	if (flags->control_hdr == 1)
1725*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_CONTROL_HDR;
1726*18ec6525SWeongyo Jeong 	if (flags->control_act == 1)
1727*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_CONTROL_ACT;
1728*18ec6525SWeongyo Jeong 	if (flags->control_stall == 1)
1729*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_CONTROL_STALL;
1730*18ec6525SWeongyo Jeong 	if (flags->short_frames_ok == 1)
1731*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_SHORT_FRAMES_OK;
1732*18ec6525SWeongyo Jeong 	if (flags->short_xfer_ok == 1)
1733*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_SHORT_XFER_OK;
1734*18ec6525SWeongyo Jeong #if USB_HAVE_BUSDMA
1735*18ec6525SWeongyo Jeong 	if (flags->bdma_enable == 1)
1736*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_BDMA_ENABLE;
1737*18ec6525SWeongyo Jeong 	if (flags->bdma_no_post_sync == 1)
1738*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_BDMA_NO_POST_SYNC;
1739*18ec6525SWeongyo Jeong 	if (flags->bdma_setup == 1)
1740*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_BDMA_SETUP;
1741*18ec6525SWeongyo Jeong #endif
1742*18ec6525SWeongyo Jeong 	if (flags->isochronous_xfr == 1)
1743*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_ISOCHRONOUS_XFR;
1744*18ec6525SWeongyo Jeong 	if (flags->curr_dma_set == 1)
1745*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_CURR_DMA_SET;
1746*18ec6525SWeongyo Jeong 	if (flags->can_cancel_immed == 1)
1747*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_CAN_CANCEL_IMMED;
1748*18ec6525SWeongyo Jeong 	if (flags->doing_callback == 1)
1749*18ec6525SWeongyo Jeong 		val |= USBPF_STATUS_DOING_CALLBACK;
1750*18ec6525SWeongyo Jeong 
1751*18ec6525SWeongyo Jeong 	return (val);
1752*18ec6525SWeongyo Jeong }
1753*18ec6525SWeongyo Jeong 
1754*18ec6525SWeongyo Jeong void
1755*18ec6525SWeongyo Jeong usbpf_xfertap(struct usb_xfer *xfer, int type)
1756*18ec6525SWeongyo Jeong {
1757*18ec6525SWeongyo Jeong 	struct usb_endpoint *ep = xfer->endpoint;
1758*18ec6525SWeongyo Jeong 	struct usb_page_search res;
1759*18ec6525SWeongyo Jeong 	struct usb_xfer_root *info = xfer->xroot;
1760*18ec6525SWeongyo Jeong 	struct usb_bus *bus = info->bus;
1761*18ec6525SWeongyo Jeong 	struct usbpf_pkthdr *up;
1762*18ec6525SWeongyo Jeong 	usb_frlength_t isoc_offset = 0;
1763*18ec6525SWeongyo Jeong 	int i;
1764*18ec6525SWeongyo Jeong 	char *buf, *ptr, *end;
1765*18ec6525SWeongyo Jeong 
1766*18ec6525SWeongyo Jeong 	/*
1767*18ec6525SWeongyo Jeong 	 * NB: usbpf_uifd_cnt isn't protected by USBPFIF_LOCK() because it's
1768*18ec6525SWeongyo Jeong 	 * not harmful.
1769*18ec6525SWeongyo Jeong 	 */
1770*18ec6525SWeongyo Jeong 	if (usbpf_uifd_cnt == 0)
1771*18ec6525SWeongyo Jeong 		return;
1772*18ec6525SWeongyo Jeong 
1773*18ec6525SWeongyo Jeong 	/*
1774*18ec6525SWeongyo Jeong 	 * XXX TODO
1775*18ec6525SWeongyo Jeong 	 * Allocating the buffer here causes copy operations twice what's
1776*18ec6525SWeongyo Jeong 	 * really inefficient. Copying usbpf_pkthdr and data is for USB packet
1777*18ec6525SWeongyo Jeong 	 * read filter to pass a virtually linear buffer.
1778*18ec6525SWeongyo Jeong 	 */
1779*18ec6525SWeongyo Jeong 	buf = ptr = malloc(sizeof(struct usbpf_pkthdr) + (USB_PAGE_SIZE * 5),
1780*18ec6525SWeongyo Jeong 	    M_USBPF, M_NOWAIT);
1781*18ec6525SWeongyo Jeong 	if (buf == NULL) {
1782*18ec6525SWeongyo Jeong 		printf("usbpf_xfertap: out of memory\n");	/* XXX */
1783*18ec6525SWeongyo Jeong 		return;
1784*18ec6525SWeongyo Jeong 	}
1785*18ec6525SWeongyo Jeong 	end = buf + sizeof(struct usbpf_pkthdr) + (USB_PAGE_SIZE * 5);
1786*18ec6525SWeongyo Jeong 
1787*18ec6525SWeongyo Jeong 	bzero(ptr, sizeof(struct usbpf_pkthdr));
1788*18ec6525SWeongyo Jeong 	up = (struct usbpf_pkthdr *)ptr;
1789*18ec6525SWeongyo Jeong 	up->up_busunit = htole32(device_get_unit(bus->bdev));
1790*18ec6525SWeongyo Jeong 	up->up_type = type;
1791*18ec6525SWeongyo Jeong 	up->up_xfertype = ep->edesc->bmAttributes & UE_XFERTYPE;
1792*18ec6525SWeongyo Jeong 	up->up_address = xfer->address;
1793*18ec6525SWeongyo Jeong 	up->up_endpoint = xfer->endpointno;
1794*18ec6525SWeongyo Jeong 	up->up_flags = htole32(usbpf_aggregate_xferflags(&xfer->flags));
1795*18ec6525SWeongyo Jeong 	up->up_status = htole32(usbpf_aggregate_status(&xfer->flags_int));
1796*18ec6525SWeongyo Jeong 	switch (type) {
1797*18ec6525SWeongyo Jeong 	case USBPF_XFERTAP_SUBMIT:
1798*18ec6525SWeongyo Jeong 		up->up_length = htole32(xfer->sumlen);
1799*18ec6525SWeongyo Jeong 		up->up_frames = htole32(xfer->nframes);
1800*18ec6525SWeongyo Jeong 		break;
1801*18ec6525SWeongyo Jeong 	case USBPF_XFERTAP_DONE:
1802*18ec6525SWeongyo Jeong 		up->up_length = htole32(xfer->actlen);
1803*18ec6525SWeongyo Jeong 		up->up_frames = htole32(xfer->aframes);
1804*18ec6525SWeongyo Jeong 		break;
1805*18ec6525SWeongyo Jeong 	default:
1806*18ec6525SWeongyo Jeong 		panic("wrong usbpf type (%d)", type);
1807*18ec6525SWeongyo Jeong 	}
1808*18ec6525SWeongyo Jeong 
1809*18ec6525SWeongyo Jeong 	up->up_error = htole32(xfer->error);
1810*18ec6525SWeongyo Jeong 	up->up_interval = htole32(xfer->interval);
1811*18ec6525SWeongyo Jeong 	ptr += sizeof(struct usbpf_pkthdr);
1812*18ec6525SWeongyo Jeong 
1813*18ec6525SWeongyo Jeong 	for (i = 0; i < up->up_frames; i++) {
1814*18ec6525SWeongyo Jeong 		if (ptr + sizeof(u_int32_t) >= end)
1815*18ec6525SWeongyo Jeong 			goto done;
1816*18ec6525SWeongyo Jeong 		*((u_int32_t *)ptr) = htole32(xfer->frlengths[i]);
1817*18ec6525SWeongyo Jeong 		ptr += sizeof(u_int32_t);
1818*18ec6525SWeongyo Jeong 
1819*18ec6525SWeongyo Jeong 		if (ptr + xfer->frlengths[i] >= end)
1820*18ec6525SWeongyo Jeong 			goto done;
1821*18ec6525SWeongyo Jeong 		if (xfer->flags_int.isochronous_xfr == 1) {
1822*18ec6525SWeongyo Jeong 			usbd_get_page(&xfer->frbuffers[0], isoc_offset, &res);
1823*18ec6525SWeongyo Jeong 			isoc_offset += xfer->frlengths[i];
1824*18ec6525SWeongyo Jeong 		} else
1825*18ec6525SWeongyo Jeong 			usbd_get_page(&xfer->frbuffers[i], 0, &res);
1826*18ec6525SWeongyo Jeong 		bcopy(res.buffer, ptr, xfer->frlengths[i]);
1827*18ec6525SWeongyo Jeong 		ptr += xfer->frlengths[i];
1828*18ec6525SWeongyo Jeong 	}
1829*18ec6525SWeongyo Jeong 
1830*18ec6525SWeongyo Jeong 	usbpf_tap(bus->uif, buf, ptr - buf);
1831*18ec6525SWeongyo Jeong done:
1832*18ec6525SWeongyo Jeong 	free(buf, M_USBPF);
1833*18ec6525SWeongyo Jeong }
1834*18ec6525SWeongyo Jeong 
1835*18ec6525SWeongyo Jeong static void
1836*18ec6525SWeongyo Jeong usbpf_append_bytes(struct usbpf_d *ud, caddr_t buf, u_int offset, void *src,
1837*18ec6525SWeongyo Jeong     u_int len)
1838*18ec6525SWeongyo Jeong {
1839*18ec6525SWeongyo Jeong 
1840*18ec6525SWeongyo Jeong 	USBPFD_LOCK_ASSERT(ud);
1841*18ec6525SWeongyo Jeong 
1842*18ec6525SWeongyo Jeong 	switch (ud->ud_bufmode) {
1843*18ec6525SWeongyo Jeong 	case USBPF_BUFMODE_BUFFER:
1844*18ec6525SWeongyo Jeong 		return (usbpf_buffer_append_bytes(ud, buf, offset, src, len));
1845*18ec6525SWeongyo Jeong 	default:
1846*18ec6525SWeongyo Jeong 		panic("usbpf_buf_append_bytes");
1847*18ec6525SWeongyo Jeong 	}
1848*18ec6525SWeongyo Jeong }
1849*18ec6525SWeongyo Jeong 
1850*18ec6525SWeongyo Jeong static void
1851*18ec6525SWeongyo Jeong usbpf_drvinit(void *unused)
1852*18ec6525SWeongyo Jeong {
1853*18ec6525SWeongyo Jeong 	struct cdev *dev;
1854*18ec6525SWeongyo Jeong 
1855*18ec6525SWeongyo Jeong 	mtx_init(&usbpf_mtx, "USB packet filter global lock", NULL,
1856*18ec6525SWeongyo Jeong 	    MTX_DEF);
1857*18ec6525SWeongyo Jeong 	LIST_INIT(&usbpf_iflist);
1858*18ec6525SWeongyo Jeong 
1859*18ec6525SWeongyo Jeong 	dev = make_dev(&usbpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "usbpf");
1860*18ec6525SWeongyo Jeong }
1861*18ec6525SWeongyo Jeong 
1862*18ec6525SWeongyo Jeong SYSINIT(usbpf_dev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, usbpf_drvinit, NULL);
1863