xref: /freebsd/contrib/libpcap/gencode.c (revision ce834215a70ff69e7e222827437116eee2f9ac6f)
1 /*
2  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that: (1) source code distributions
7  * retain the above copyright notice and this paragraph in its entirety, (2)
8  * distributions including binary code include the above copyright notice and
9  * this paragraph in its entirety in the documentation or other materials
10  * provided with the distribution, and (3) all advertising materials mentioning
11  * features or use of this software display the following acknowledgement:
12  * ``This product includes software developed by the University of California,
13  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14  * the University nor the names of its contributors may be used to endorse
15  * or promote products derived from this software without specific prior
16  * written permission.
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20  */
21 #ifndef lint
22 static const char rcsid[] =
23     "@(#) $Header: gencode.c,v 1.91 96/12/11 19:10:23 leres Exp $ (LBL)";
24 #endif
25 
26 #include <sys/types.h>
27 #include <sys/socket.h>
28 #include <sys/time.h>
29 
30 #if __STDC__
31 struct mbuf;
32 struct rtentry;
33 #endif
34 
35 #include <net/if.h>
36 #include <net/ethernet.h>
37 
38 #include <netinet/in.h>
39 
40 #include <stdlib.h>
41 #include <memory.h>
42 #include <setjmp.h>
43 #include <net/if_llc.h>
44 #if __STDC__
45 #include <stdarg.h>
46 #else
47 #include <varargs.h>
48 #endif
49 
50 #include "pcap-int.h"
51 
52 #include "ethertype.h"
53 #include "nlpid.h"
54 #include "gencode.h"
55 #include <pcap-namedb.h>
56 
57 #include "gnuc.h"
58 #ifdef HAVE_OS_PROTO_H
59 #include "os-proto.h"
60 #endif
61 
62 #define JMP(c) ((c)|BPF_JMP|BPF_K)
63 
64 /* Locals */
65 static jmp_buf top_ctx;
66 static pcap_t *bpf_pcap;
67 
68 /* XXX */
69 #ifdef PCAP_FDDIPAD
70 int	pcap_fddipad = PCAP_FDDIPAD;
71 #else
72 int	pcap_fddipad;
73 #endif
74 
75 /* VARARGS */
76 __dead void
77 #if __STDC__
78 bpf_error(const char *fmt, ...)
79 #else
80 bpf_error(fmt, va_alist)
81 	const char *fmt;
82 	va_dcl
83 #endif
84 {
85 	va_list ap;
86 
87 #if __STDC__
88 	va_start(ap, fmt);
89 #else
90 	va_start(ap);
91 #endif
92 	if (bpf_pcap != NULL)
93 		(void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap);
94 	va_end(ap);
95 	longjmp(top_ctx, 1);
96 	/* NOTREACHED */
97 }
98 
99 static void init_linktype(int);
100 
101 static int alloc_reg(void);
102 static void free_reg(int);
103 
104 static struct block *root;
105 
106 /*
107  * We divy out chunks of memory rather than call malloc each time so
108  * we don't have to worry about leaking memory.  It's probably
109  * not a big deal if all this memory was wasted but it this ever
110  * goes into a library that would probably not be a good idea.
111  */
112 #define NCHUNKS 16
113 #define CHUNK0SIZE 1024
114 struct chunk {
115 	u_int n_left;
116 	void *m;
117 };
118 
119 static struct chunk chunks[NCHUNKS];
120 static int cur_chunk;
121 
122 static void *newchunk(u_int);
123 static void freechunks(void);
124 static inline struct block *new_block(int);
125 static inline struct slist *new_stmt(int);
126 static struct block *gen_retblk(int);
127 static inline void syntax(void);
128 
129 static void backpatch(struct block *, struct block *);
130 static void merge(struct block *, struct block *);
131 static struct block *gen_cmp(u_int, u_int, bpf_int32);
132 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32);
133 static struct block *gen_bcmp(u_int, u_int, const u_char *);
134 static struct block *gen_uncond(int);
135 static inline struct block *gen_true(void);
136 static inline struct block *gen_false(void);
137 static struct block *gen_linktype(int);
138 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int);
139 static struct block *gen_ehostop(const u_char *, int);
140 static struct block *gen_fhostop(const u_char *, int);
141 static struct block *gen_dnhostop(bpf_u_int32, int, u_int);
142 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int);
143 static struct block *gen_gateway(const u_char *, bpf_u_int32 **, int, int);
144 static struct block *gen_ipfrag(void);
145 static struct block *gen_portatom(int, bpf_int32);
146 struct block *gen_portop(int, int, int);
147 static struct block *gen_port(int, int, int);
148 static int lookup_proto(const char *, int);
149 static struct block *gen_proto(int, int, int);
150 static struct slist *xfer_to_x(struct arth *);
151 static struct slist *xfer_to_a(struct arth *);
152 static struct block *gen_len(int, int);
153 
154 static void *
155 newchunk(n)
156 	u_int n;
157 {
158 	struct chunk *cp;
159 	int k, size;
160 
161 	/* XXX Round up to nearest long. */
162 	n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1);
163 
164 	cp = &chunks[cur_chunk];
165 	if (n > cp->n_left) {
166 		++cp, k = ++cur_chunk;
167 		if (k >= NCHUNKS)
168 			bpf_error("out of memory");
169 		size = CHUNK0SIZE << k;
170 		cp->m = (void *)malloc(size);
171 		memset((char *)cp->m, 0, size);
172 		cp->n_left = size;
173 		if (n > size)
174 			bpf_error("out of memory");
175 	}
176 	cp->n_left -= n;
177 	return (void *)((char *)cp->m + cp->n_left);
178 }
179 
180 static void
181 freechunks()
182 {
183 	int i;
184 
185 	cur_chunk = 0;
186 	for (i = 0; i < NCHUNKS; ++i)
187 		if (chunks[i].m != NULL) {
188 			free(chunks[i].m);
189 			chunks[i].m = NULL;
190 		}
191 }
192 
193 /*
194  * A strdup whose allocations are freed after code generation is over.
195  */
196 char *
197 sdup(s)
198 	register const char *s;
199 {
200 	int n = strlen(s) + 1;
201 	char *cp = newchunk(n);
202 
203 	strcpy(cp, s);
204 	return (cp);
205 }
206 
207 static inline struct block *
208 new_block(code)
209 	int code;
210 {
211 	struct block *p;
212 
213 	p = (struct block *)newchunk(sizeof(*p));
214 	p->s.code = code;
215 	p->head = p;
216 
217 	return p;
218 }
219 
220 static inline struct slist *
221 new_stmt(code)
222 	int code;
223 {
224 	struct slist *p;
225 
226 	p = (struct slist *)newchunk(sizeof(*p));
227 	p->s.code = code;
228 
229 	return p;
230 }
231 
232 static struct block *
233 gen_retblk(v)
234 	int v;
235 {
236 	struct block *b = new_block(BPF_RET|BPF_K);
237 
238 	b->s.k = v;
239 	return b;
240 }
241 
242 static inline void
243 syntax()
244 {
245 	bpf_error("syntax error in filter expression");
246 }
247 
248 static bpf_u_int32 netmask;
249 static int snaplen;
250 
251 int
252 pcap_compile(pcap_t *p, struct bpf_program *program,
253 	     char *buf, int optimize, bpf_u_int32 mask)
254 {
255 	extern int n_errors;
256 	int len;
257 
258 	n_errors = 0;
259 	root = NULL;
260 	bpf_pcap = p;
261 	if (setjmp(top_ctx)) {
262 		freechunks();
263 		return (-1);
264 	}
265 
266 	netmask = mask;
267 	snaplen = pcap_snapshot(p);
268 
269 	lex_init(buf ? buf : "");
270 	init_linktype(pcap_datalink(p));
271 	(void)pcap_parse();
272 
273 	if (n_errors)
274 		syntax();
275 
276 	if (root == NULL)
277 		root = gen_retblk(snaplen);
278 
279 	if (optimize) {
280 		bpf_optimize(&root);
281 		if (root == NULL ||
282 		    (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0))
283 			bpf_error("expression rejects all packets");
284 	}
285 	program->bf_insns = icode_to_fcode(root, &len);
286 	program->bf_len = len;
287 
288 	freechunks();
289 	return (0);
290 }
291 
292 /*
293  * Backpatch the blocks in 'list' to 'target'.  The 'sense' field indicates
294  * which of the jt and jf fields has been resolved and which is a pointer
295  * back to another unresolved block (or nil).  At least one of the fields
296  * in each block is already resolved.
297  */
298 static void
299 backpatch(list, target)
300 	struct block *list, *target;
301 {
302 	struct block *next;
303 
304 	while (list) {
305 		if (!list->sense) {
306 			next = JT(list);
307 			JT(list) = target;
308 		} else {
309 			next = JF(list);
310 			JF(list) = target;
311 		}
312 		list = next;
313 	}
314 }
315 
316 /*
317  * Merge the lists in b0 and b1, using the 'sense' field to indicate
318  * which of jt and jf is the link.
319  */
320 static void
321 merge(b0, b1)
322 	struct block *b0, *b1;
323 {
324 	register struct block **p = &b0;
325 
326 	/* Find end of list. */
327 	while (*p)
328 		p = !((*p)->sense) ? &JT(*p) : &JF(*p);
329 
330 	/* Concatenate the lists. */
331 	*p = b1;
332 }
333 
334 void
335 finish_parse(p)
336 	struct block *p;
337 {
338 	backpatch(p, gen_retblk(snaplen));
339 	p->sense = !p->sense;
340 	backpatch(p, gen_retblk(0));
341 	root = p->head;
342 }
343 
344 void
345 gen_and(b0, b1)
346 	struct block *b0, *b1;
347 {
348 	backpatch(b0, b1->head);
349 	b0->sense = !b0->sense;
350 	b1->sense = !b1->sense;
351 	merge(b1, b0);
352 	b1->sense = !b1->sense;
353 	b1->head = b0->head;
354 }
355 
356 void
357 gen_or(b0, b1)
358 	struct block *b0, *b1;
359 {
360 	b0->sense = !b0->sense;
361 	backpatch(b0, b1->head);
362 	b0->sense = !b0->sense;
363 	merge(b1, b0);
364 	b1->head = b0->head;
365 }
366 
367 void
368 gen_not(b)
369 	struct block *b;
370 {
371 	b->sense = !b->sense;
372 }
373 
374 static struct block *
375 gen_cmp(offset, size, v)
376 	u_int offset, size;
377 	bpf_int32 v;
378 {
379 	struct slist *s;
380 	struct block *b;
381 
382 	s = new_stmt(BPF_LD|BPF_ABS|size);
383 	s->s.k = offset;
384 
385 	b = new_block(JMP(BPF_JEQ));
386 	b->stmts = s;
387 	b->s.k = v;
388 
389 	return b;
390 }
391 
392 static struct block *
393 gen_mcmp(offset, size, v, mask)
394 	u_int offset, size;
395 	bpf_int32 v;
396 	bpf_u_int32 mask;
397 {
398 	struct block *b = gen_cmp(offset, size, v);
399 	struct slist *s;
400 
401 	if (mask != 0xffffffff) {
402 		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
403 		s->s.k = mask;
404 		b->stmts->next = s;
405 	}
406 	return b;
407 }
408 
409 static struct block *
410 gen_bcmp(offset, size, v)
411 	register u_int offset, size;
412 	register const u_char *v;
413 {
414 	register struct block *b, *tmp;
415 
416 	b = NULL;
417 	while (size >= 4) {
418 		register const u_char *p = &v[size - 4];
419 		bpf_int32 w = ((bpf_int32)p[0] << 24) |
420 		    ((bpf_int32)p[1] << 16) | ((bpf_int32)p[2] << 8) | p[3];
421 
422 		tmp = gen_cmp(offset + size - 4, BPF_W, w);
423 		if (b != NULL)
424 			gen_and(b, tmp);
425 		b = tmp;
426 		size -= 4;
427 	}
428 	while (size >= 2) {
429 		register const u_char *p = &v[size - 2];
430 		bpf_int32 w = ((bpf_int32)p[0] << 8) | p[1];
431 
432 		tmp = gen_cmp(offset + size - 2, BPF_H, w);
433 		if (b != NULL)
434 			gen_and(b, tmp);
435 		b = tmp;
436 		size -= 2;
437 	}
438 	if (size > 0) {
439 		tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]);
440 		if (b != NULL)
441 			gen_and(b, tmp);
442 		b = tmp;
443 	}
444 	return b;
445 }
446 
447 /*
448  * Various code constructs need to know the layout of the data link
449  * layer.  These variables give the necessary offsets.  off_linktype
450  * is set to -1 for no encapsulation, in which case, IP is assumed.
451  */
452 static u_int off_linktype;
453 static u_int off_nl;
454 static int linktype;
455 
456 static void
457 init_linktype(type)
458 	int type;
459 {
460 	linktype = type;
461 
462 	switch (type) {
463 
464 	case DLT_EN10MB:
465 		off_linktype = 12;
466 		off_nl = 14;
467 		return;
468 
469 	case DLT_SLIP:
470 		/*
471 		 * SLIP doesn't have a link level type.  The 16 byte
472 		 * header is hacked into our SLIP driver.
473 		 */
474 		off_linktype = -1;
475 		off_nl = 16;
476 		return;
477 
478 	case DLT_NULL:
479 		off_linktype = 0;
480 		off_nl = 4;
481 		return;
482 
483 	case DLT_PPP:
484 		off_linktype = 2;
485 		off_nl = 4;
486 		return;
487 
488 	case DLT_FDDI:
489 		/*
490 		 * FDDI doesn't really have a link-level type field.
491 		 * We assume that SSAP = SNAP is being used and pick
492 		 * out the encapsulated Ethernet type.
493 		 */
494 		off_linktype = 19;
495 #ifdef PCAP_FDDIPAD
496 		off_linktype += pcap_fddipad;
497 #endif
498 		off_nl = 21;
499 #ifdef PCAP_FDDIPAD
500 		off_nl += pcap_fddipad;
501 #endif
502 		return;
503 
504 	case DLT_IEEE802:
505 		off_linktype = 20;
506 		off_nl = 22;
507 		return;
508 
509 	case DLT_ATM_RFC1483:
510 		/*
511 		 * assume routed, non-ISO PDUs
512 		 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00)
513 		 */
514 		off_linktype = 6;
515 		off_nl = 8;
516 		return;
517 
518 	case DLT_RAW:
519 		off_linktype = -1;
520 		off_nl = 0;
521 		return;
522 	}
523 	bpf_error("unknown data link type 0x%x", linktype);
524 	/* NOTREACHED */
525 }
526 
527 static struct block *
528 gen_uncond(rsense)
529 	int rsense;
530 {
531 	struct block *b;
532 	struct slist *s;
533 
534 	s = new_stmt(BPF_LD|BPF_IMM);
535 	s->s.k = !rsense;
536 	b = new_block(JMP(BPF_JEQ));
537 	b->stmts = s;
538 
539 	return b;
540 }
541 
542 static inline struct block *
543 gen_true()
544 {
545 	return gen_uncond(1);
546 }
547 
548 static inline struct block *
549 gen_false()
550 {
551 	return gen_uncond(0);
552 }
553 
554 static struct block *
555 gen_linktype(proto)
556 	int proto;
557 {
558 	/* If we're not using encapsulation and checking for IP, we're done */
559 	if (off_linktype == -1 && proto == ETHERTYPE_IP)
560 		return gen_true();
561 
562 	switch (linktype) {
563 
564 	case DLT_SLIP:
565 		return gen_false();
566 
567 	case DLT_PPP:
568 		if (proto == ETHERTYPE_IP)
569 			proto = 0x0021;		/* XXX - need ppp.h defs */
570 		break;
571 
572 	case DLT_NULL:
573 		/* XXX */
574 		if (proto == ETHERTYPE_IP)
575 			return (gen_cmp(0, BPF_W, (bpf_int32)htonl(AF_INET)));
576 		else
577 			return gen_false();
578 	case DLT_EN10MB:
579 		/*
580 	 	 * Having to look at SAP's here is quite disgusting,
581 		 * but given an internal architecture that _knows_ that
582 		 * it's looking at IP on Ethernet...
583 		 */
584 		if (proto == LLC_ISO_LSAP) {
585 			struct block *b0, *b1;
586 
587 			b0 = gen_cmp(off_linktype, BPF_H, (long)ETHERMTU);
588 			b0->s.code = JMP(BPF_JGT);
589 			gen_not(b0);
590 			b1 = gen_cmp(off_linktype + 2, BPF_H, (long)
591 				     ((LLC_ISO_LSAP << 8) | LLC_ISO_LSAP));
592 			gen_and(b0, b1);
593 			return b1;
594 		}
595 		break;
596 	}
597 	return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto);
598 }
599 
600 static struct block *
601 gen_hostop(addr, mask, dir, proto, src_off, dst_off)
602 	bpf_u_int32 addr;
603 	bpf_u_int32 mask;
604 	int dir, proto;
605 	u_int src_off, dst_off;
606 {
607 	struct block *b0, *b1;
608 	u_int offset;
609 
610 	switch (dir) {
611 
612 	case Q_SRC:
613 		offset = src_off;
614 		break;
615 
616 	case Q_DST:
617 		offset = dst_off;
618 		break;
619 
620 	case Q_AND:
621 		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
622 		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
623 		gen_and(b0, b1);
624 		return b1;
625 
626 	case Q_OR:
627 	case Q_DEFAULT:
628 		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
629 		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
630 		gen_or(b0, b1);
631 		return b1;
632 
633 	default:
634 		abort();
635 	}
636 	b0 = gen_linktype(proto);
637 	b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask);
638 	gen_and(b0, b1);
639 	return b1;
640 }
641 
642 static struct block *
643 gen_ehostop(eaddr, dir)
644 	register const u_char *eaddr;
645 	register int dir;
646 {
647 	register struct block *b0, *b1;
648 
649 	switch (dir) {
650 	case Q_SRC:
651 		return gen_bcmp(6, 6, eaddr);
652 
653 	case Q_DST:
654 		return gen_bcmp(0, 6, eaddr);
655 
656 	case Q_AND:
657 		b0 = gen_ehostop(eaddr, Q_SRC);
658 		b1 = gen_ehostop(eaddr, Q_DST);
659 		gen_and(b0, b1);
660 		return b1;
661 
662 	case Q_DEFAULT:
663 	case Q_OR:
664 		b0 = gen_ehostop(eaddr, Q_SRC);
665 		b1 = gen_ehostop(eaddr, Q_DST);
666 		gen_or(b0, b1);
667 		return b1;
668 	}
669 	abort();
670 	/* NOTREACHED */
671 }
672 
673 /*
674  * Like gen_ehostop, but for DLT_FDDI
675  */
676 static struct block *
677 gen_fhostop(eaddr, dir)
678 	register const u_char *eaddr;
679 	register int dir;
680 {
681 	struct block *b0, *b1;
682 
683 	switch (dir) {
684 	case Q_SRC:
685 #ifdef PCAP_FDDIPAD
686 		return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr);
687 #else
688 		return gen_bcmp(6 + 1, 6, eaddr);
689 #endif
690 
691 	case Q_DST:
692 #ifdef PCAP_FDDIPAD
693 		return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr);
694 #else
695 		return gen_bcmp(0 + 1, 6, eaddr);
696 #endif
697 
698 	case Q_AND:
699 		b0 = gen_fhostop(eaddr, Q_SRC);
700 		b1 = gen_fhostop(eaddr, Q_DST);
701 		gen_and(b0, b1);
702 		return b1;
703 
704 	case Q_DEFAULT:
705 	case Q_OR:
706 		b0 = gen_fhostop(eaddr, Q_SRC);
707 		b1 = gen_fhostop(eaddr, Q_DST);
708 		gen_or(b0, b1);
709 		return b1;
710 	}
711 	abort();
712 	/* NOTREACHED */
713 }
714 
715 /*
716  * This is quite tricky because there may be pad bytes in front of the
717  * DECNET header, and then there are two possible data packet formats that
718  * carry both src and dst addresses, plus 5 packet types in a format that
719  * carries only the src node, plus 2 types that use a different format and
720  * also carry just the src node.
721  *
722  * Yuck.
723  *
724  * Instead of doing those all right, we just look for data packets with
725  * 0 or 1 bytes of padding.  If you want to look at other packets, that
726  * will require a lot more hacking.
727  *
728  * To add support for filtering on DECNET "areas" (network numbers)
729  * one would want to add a "mask" argument to this routine.  That would
730  * make the filter even more inefficient, although one could be clever
731  * and not generate masking instructions if the mask is 0xFFFF.
732  */
733 static struct block *
734 gen_dnhostop(addr, dir, base_off)
735 	bpf_u_int32 addr;
736 	int dir;
737 	u_int base_off;
738 {
739 	struct block *b0, *b1, *b2, *tmp;
740 	u_int offset_lh;	/* offset if long header is received */
741 	u_int offset_sh;	/* offset if short header is received */
742 
743 	switch (dir) {
744 
745 	case Q_DST:
746 		offset_sh = 1;	/* follows flags */
747 		offset_lh = 7;	/* flgs,darea,dsubarea,HIORD */
748 		break;
749 
750 	case Q_SRC:
751 		offset_sh = 3;	/* follows flags, dstnode */
752 		offset_lh = 15;	/* flgs,darea,dsubarea,did,sarea,ssub,HIORD */
753 		break;
754 
755 	case Q_AND:
756 		/* Inefficient because we do our Calvinball dance twice */
757 		b0 = gen_dnhostop(addr, Q_SRC, base_off);
758 		b1 = gen_dnhostop(addr, Q_DST, base_off);
759 		gen_and(b0, b1);
760 		return b1;
761 
762 	case Q_OR:
763 	case Q_DEFAULT:
764 		/* Inefficient because we do our Calvinball dance twice */
765 		b0 = gen_dnhostop(addr, Q_SRC, base_off);
766 		b1 = gen_dnhostop(addr, Q_DST, base_off);
767 		gen_or(b0, b1);
768 		return b1;
769 
770 	default:
771 		abort();
772 	}
773 	b0 = gen_linktype(ETHERTYPE_DN);
774 	/* Check for pad = 1, long header case */
775 	tmp = gen_mcmp(base_off + 2, BPF_H,
776 	    (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF));
777 	b1 = gen_cmp(base_off + 2 + 1 + offset_lh,
778 	    BPF_H, (bpf_int32)ntohs(addr));
779 	gen_and(tmp, b1);
780 	/* Check for pad = 0, long header case */
781 	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7);
782 	b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr));
783 	gen_and(tmp, b2);
784 	gen_or(b2, b1);
785 	/* Check for pad = 1, short header case */
786 	tmp = gen_mcmp(base_off + 2, BPF_H,
787 	    (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF));
788 	b2 = gen_cmp(base_off + 2 + 1 + offset_sh,
789 	    BPF_H, (bpf_int32)ntohs(addr));
790 	gen_and(tmp, b2);
791 	gen_or(b2, b1);
792 	/* Check for pad = 0, short header case */
793 	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7);
794 	b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr));
795 	gen_and(tmp, b2);
796 	gen_or(b2, b1);
797 
798 	/* Combine with test for linktype */
799 	gen_and(b0, b1);
800 	return b1;
801 }
802 
803 static struct block *
804 gen_host(addr, mask, proto, dir)
805 	bpf_u_int32 addr;
806 	bpf_u_int32 mask;
807 	int proto;
808 	int dir;
809 {
810 	struct block *b0, *b1;
811 
812 	switch (proto) {
813 
814 	case Q_DEFAULT:
815 		b0 = gen_host(addr, mask, Q_IP, dir);
816 		b1 = gen_host(addr, mask, Q_ARP, dir);
817 		gen_or(b0, b1);
818 		b0 = gen_host(addr, mask, Q_RARP, dir);
819 		gen_or(b1, b0);
820 		return b0;
821 
822 	case Q_IP:
823 		return gen_hostop(addr, mask, dir, ETHERTYPE_IP,
824 				  off_nl + 12, off_nl + 16);
825 
826 	case Q_RARP:
827 		return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP,
828 				  off_nl + 14, off_nl + 24);
829 
830 	case Q_ARP:
831 		return gen_hostop(addr, mask, dir, ETHERTYPE_ARP,
832 				  off_nl + 14, off_nl + 24);
833 
834 	case Q_TCP:
835 		bpf_error("'tcp' modifier applied to host");
836 
837 	case Q_UDP:
838 		bpf_error("'udp' modifier applied to host");
839 
840 	case Q_ICMP:
841 		bpf_error("'icmp' modifier applied to host");
842 
843 	case Q_IGMP:
844 		bpf_error("'igmp' modifier applied to host");
845 
846 	case Q_IGRP:
847 		bpf_error("'igrp' modifier applied to host");
848 
849 	case Q_ATALK:
850 		bpf_error("ATALK host filtering not implemented");
851 
852 	case Q_DECNET:
853 		return gen_dnhostop(addr, dir, off_nl);
854 
855 	case Q_SCA:
856 		bpf_error("SCA host filtering not implemented");
857 
858 	case Q_LAT:
859 		bpf_error("LAT host filtering not implemented");
860 
861 	case Q_MOPDL:
862 		bpf_error("MOPDL host filtering not implemented");
863 
864 	case Q_MOPRC:
865 		bpf_error("MOPRC host filtering not implemented");
866 
867 	case Q_ISO:
868 	        bpf_error("ISO host filtering not implemented");
869 
870 	default:
871 		abort();
872 	}
873 	/* NOTREACHED */
874 }
875 
876 static struct block *
877 gen_gateway(eaddr, alist, proto, dir)
878 	const u_char *eaddr;
879 	bpf_u_int32 **alist;
880 	int proto;
881 	int dir;
882 {
883 	struct block *b0, *b1, *tmp;
884 
885 	if (dir != 0)
886 		bpf_error("direction applied to 'gateway'");
887 
888 	switch (proto) {
889 	case Q_DEFAULT:
890 	case Q_IP:
891 	case Q_ARP:
892 	case Q_RARP:
893 		if (linktype == DLT_EN10MB)
894 			b0 = gen_ehostop(eaddr, Q_OR);
895 		else if (linktype == DLT_FDDI)
896 			b0 = gen_fhostop(eaddr, Q_OR);
897 		else
898 			bpf_error(
899 			    "'gateway' supported only on ethernet or FDDI");
900 
901 		b1 = gen_host(**alist++, 0xffffffff, proto, Q_OR);
902 		while (*alist) {
903 			tmp = gen_host(**alist++, 0xffffffff, proto, Q_OR);
904 			gen_or(b1, tmp);
905 			b1 = tmp;
906 		}
907 		gen_not(b1);
908 		gen_and(b0, b1);
909 		return b1;
910 	}
911 	bpf_error("illegal modifier of 'gateway'");
912 	/* NOTREACHED */
913 }
914 
915 struct block *
916 gen_proto_abbrev(proto)
917 	int proto;
918 {
919 	struct block *b0, *b1;
920 
921 	switch (proto) {
922 
923 	case Q_TCP:
924 		b0 = gen_linktype(ETHERTYPE_IP);
925 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP);
926 		gen_and(b0, b1);
927 		break;
928 
929 	case Q_UDP:
930 		b0 =  gen_linktype(ETHERTYPE_IP);
931 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP);
932 		gen_and(b0, b1);
933 		break;
934 
935 	case Q_ICMP:
936 		b0 =  gen_linktype(ETHERTYPE_IP);
937 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP);
938 		gen_and(b0, b1);
939 		break;
940 
941 	case Q_IGMP:
942 		b0 =  gen_linktype(ETHERTYPE_IP);
943 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2);
944 		gen_and(b0, b1);
945 		break;
946 
947 #ifndef	IPPROTO_IGRP
948 #define	IPPROTO_IGRP	9
949 #endif
950 	case Q_IGRP:
951 		b0 = gen_linktype(ETHERTYPE_IP);
952 		b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_IGRP);
953 		gen_and(b0, b1);
954 		break;
955 
956 	case Q_IP:
957 		b1 =  gen_linktype(ETHERTYPE_IP);
958 		break;
959 
960 	case Q_ARP:
961 		b1 =  gen_linktype(ETHERTYPE_ARP);
962 		break;
963 
964 	case Q_RARP:
965 		b1 =  gen_linktype(ETHERTYPE_REVARP);
966 		break;
967 
968 	case Q_LINK:
969 		bpf_error("link layer applied in wrong context");
970 
971 	case Q_ATALK:
972 		b1 =  gen_linktype(ETHERTYPE_ATALK);
973 		break;
974 
975 	case Q_DECNET:
976 		b1 =  gen_linktype(ETHERTYPE_DN);
977 		break;
978 
979 	case Q_SCA:
980 		b1 =  gen_linktype(ETHERTYPE_SCA);
981 		break;
982 
983 	case Q_LAT:
984 		b1 =  gen_linktype(ETHERTYPE_LAT);
985 		break;
986 
987 	case Q_MOPDL:
988 		b1 =  gen_linktype(ETHERTYPE_MOPDL);
989 		break;
990 
991 	case Q_MOPRC:
992 		b1 =  gen_linktype(ETHERTYPE_MOPRC);
993 		break;
994 
995 	case Q_ISO:
996 	        b1 = gen_linktype(LLC_ISO_LSAP);
997 		break;
998 
999 	case Q_ESIS:
1000 	        b1 = gen_proto(ISO9542_ESIS, Q_ISO, Q_DEFAULT);
1001 		break;
1002 
1003 	case Q_ISIS:
1004 	        b1 = gen_proto(ISO10589_ISIS, Q_ISO, Q_DEFAULT);
1005 		break;
1006 
1007 	default:
1008 		abort();
1009 	}
1010 	return b1;
1011 }
1012 
1013 static struct block *
1014 gen_ipfrag()
1015 {
1016 	struct slist *s;
1017 	struct block *b;
1018 
1019 	/* not ip frag */
1020 	s = new_stmt(BPF_LD|BPF_H|BPF_ABS);
1021 	s->s.k = off_nl + 6;
1022 	b = new_block(JMP(BPF_JSET));
1023 	b->s.k = 0x1fff;
1024 	b->stmts = s;
1025 	gen_not(b);
1026 
1027 	return b;
1028 }
1029 
1030 static struct block *
1031 gen_portatom(off, v)
1032 	int off;
1033 	bpf_int32 v;
1034 {
1035 	struct slist *s;
1036 	struct block *b;
1037 
1038 	s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1039 	s->s.k = off_nl;
1040 
1041 	s->next = new_stmt(BPF_LD|BPF_IND|BPF_H);
1042 	s->next->s.k = off_nl + off;
1043 
1044 	b = new_block(JMP(BPF_JEQ));
1045 	b->stmts = s;
1046 	b->s.k = v;
1047 
1048 	return b;
1049 }
1050 
1051 struct block *
1052 gen_portop(port, proto, dir)
1053 	int port, proto, dir;
1054 {
1055 	struct block *b0, *b1, *tmp;
1056 
1057 	/* ip proto 'proto' */
1058 	tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto);
1059 	b0 = gen_ipfrag();
1060 	gen_and(tmp, b0);
1061 
1062 	switch (dir) {
1063 	case Q_SRC:
1064 		b1 = gen_portatom(0, (bpf_int32)port);
1065 		break;
1066 
1067 	case Q_DST:
1068 		b1 = gen_portatom(2, (bpf_int32)port);
1069 		break;
1070 
1071 	case Q_OR:
1072 	case Q_DEFAULT:
1073 		tmp = gen_portatom(0, (bpf_int32)port);
1074 		b1 = gen_portatom(2, (bpf_int32)port);
1075 		gen_or(tmp, b1);
1076 		break;
1077 
1078 	case Q_AND:
1079 		tmp = gen_portatom(0, (bpf_int32)port);
1080 		b1 = gen_portatom(2, (bpf_int32)port);
1081 		gen_and(tmp, b1);
1082 		break;
1083 
1084 	default:
1085 		abort();
1086 	}
1087 	gen_and(b0, b1);
1088 
1089 	return b1;
1090 }
1091 
1092 static struct block *
1093 gen_port(port, ip_proto, dir)
1094 	int port;
1095 	int ip_proto;
1096 	int dir;
1097 {
1098 	struct block *b0, *b1, *tmp;
1099 
1100 	/* ether proto ip */
1101 	b0 =  gen_linktype(ETHERTYPE_IP);
1102 
1103 	switch (ip_proto) {
1104 	case IPPROTO_UDP:
1105 	case IPPROTO_TCP:
1106 		b1 = gen_portop(port, ip_proto, dir);
1107 		break;
1108 
1109 	case PROTO_UNDEF:
1110 		tmp = gen_portop(port, IPPROTO_TCP, dir);
1111 		b1 = gen_portop(port, IPPROTO_UDP, dir);
1112 		gen_or(tmp, b1);
1113 		break;
1114 
1115 	default:
1116 		abort();
1117 	}
1118 	gen_and(b0, b1);
1119 	return b1;
1120 }
1121 
1122 static int
1123 lookup_proto(name, proto)
1124 	register const char *name;
1125 	register int proto;
1126 {
1127 	register int v;
1128 
1129 	switch (proto) {
1130 
1131 	case Q_DEFAULT:
1132 	case Q_IP:
1133 		v = pcap_nametoproto(name);
1134 		if (v == PROTO_UNDEF)
1135 			bpf_error("unknown ip proto '%s'", name);
1136 		break;
1137 
1138 	case Q_LINK:
1139 		/* XXX should look up h/w protocol type based on linktype */
1140 		v = pcap_nametoeproto(name);
1141 		if (v == PROTO_UNDEF)
1142 			bpf_error("unknown ether proto '%s'", name);
1143 		break;
1144 
1145 	default:
1146 		v = PROTO_UNDEF;
1147 		break;
1148 	}
1149 	return v;
1150 }
1151 
1152 static struct block *
1153 gen_proto(v, proto, dir)
1154 	int v;
1155 	int proto;
1156 	int dir;
1157 {
1158 	struct block *b0, *b1;
1159 
1160 	if (dir != Q_DEFAULT)
1161 		bpf_error("direction applied to 'proto'");
1162 
1163 	switch (proto) {
1164 	case Q_DEFAULT:
1165 	case Q_IP:
1166 		b0 = gen_linktype(ETHERTYPE_IP);
1167 		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v);
1168 		gen_and(b0, b1);
1169 		return b1;
1170 
1171 	case Q_ISO:
1172 		b0 = gen_linktype(LLC_ISO_LSAP);
1173 		b1 = gen_cmp(off_nl + 3, BPF_B, (long)v);
1174 		gen_and(b0, b1);
1175 		return b1;
1176 
1177 	case Q_ARP:
1178 		bpf_error("arp does not encapsulate another protocol");
1179 		/* NOTREACHED */
1180 
1181 	case Q_RARP:
1182 		bpf_error("rarp does not encapsulate another protocol");
1183 		/* NOTREACHED */
1184 
1185 	case Q_ATALK:
1186 		bpf_error("atalk encapsulation is not specifiable");
1187 		/* NOTREACHED */
1188 
1189 	case Q_DECNET:
1190 		bpf_error("decnet encapsulation is not specifiable");
1191 		/* NOTREACHED */
1192 
1193 	case Q_SCA:
1194 		bpf_error("sca does not encapsulate another protocol");
1195 		/* NOTREACHED */
1196 
1197 	case Q_LAT:
1198 		bpf_error("lat does not encapsulate another protocol");
1199 		/* NOTREACHED */
1200 
1201 	case Q_MOPRC:
1202 		bpf_error("moprc does not encapsulate another protocol");
1203 		/* NOTREACHED */
1204 
1205 	case Q_MOPDL:
1206 		bpf_error("mopdl does not encapsulate another protocol");
1207 		/* NOTREACHED */
1208 
1209 	case Q_LINK:
1210 		return gen_linktype(v);
1211 
1212 	case Q_UDP:
1213 		bpf_error("'udp proto' is bogus");
1214 		/* NOTREACHED */
1215 
1216 	case Q_TCP:
1217 		bpf_error("'tcp proto' is bogus");
1218 		/* NOTREACHED */
1219 
1220 	case Q_ICMP:
1221 		bpf_error("'icmp proto' is bogus");
1222 		/* NOTREACHED */
1223 
1224 	case Q_IGMP:
1225 		bpf_error("'igmp proto' is bogus");
1226 		/* NOTREACHED */
1227 
1228 	case Q_IGRP:
1229 		bpf_error("'igrp proto' is bogus");
1230 		/* NOTREACHED */
1231 
1232 	default:
1233 		abort();
1234 		/* NOTREACHED */
1235 	}
1236 	/* NOTREACHED */
1237 }
1238 
1239 struct block *
1240 gen_scode(name, q)
1241 	register const char *name;
1242 	struct qual q;
1243 {
1244 	int proto = q.proto;
1245 	int dir = q.dir;
1246 	int tproto;
1247 	u_char *eaddr;
1248 	bpf_u_int32 mask, addr, **alist;
1249 	struct block *b, *tmp;
1250 	int port, real_proto;
1251 
1252 	switch (q.addr) {
1253 
1254 	case Q_NET:
1255 		addr = pcap_nametonetaddr(name);
1256 		if (addr == 0)
1257 			bpf_error("unknown network '%s'", name);
1258 		/* Left justify network addr and calculate its network mask */
1259 		mask = 0xffffffff;
1260 		while (addr && (addr & 0xff000000) == 0) {
1261 			addr <<= 8;
1262 			mask <<= 8;
1263 		}
1264 		return gen_host(addr, mask, proto, dir);
1265 
1266 	case Q_DEFAULT:
1267 	case Q_HOST:
1268 		if (proto == Q_LINK) {
1269 			switch (linktype) {
1270 
1271 			case DLT_EN10MB:
1272 				eaddr = pcap_ether_hostton(name);
1273 				if (eaddr == NULL)
1274 					bpf_error(
1275 					    "unknown ether host '%s'", name);
1276 				return gen_ehostop(eaddr, dir);
1277 
1278 			case DLT_FDDI:
1279 				eaddr = pcap_ether_hostton(name);
1280 				if (eaddr == NULL)
1281 					bpf_error(
1282 					    "unknown FDDI host '%s'", name);
1283 				return gen_fhostop(eaddr, dir);
1284 
1285 			default:
1286 				bpf_error(
1287 			"only ethernet/FDDI supports link-level host name");
1288 				break;
1289 			}
1290 		} else if (proto == Q_DECNET) {
1291 			unsigned short dn_addr = __pcap_nametodnaddr(name);
1292 			/*
1293 			 * I don't think DECNET hosts can be multihomed, so
1294 			 * there is no need to build up a list of addresses
1295 			 */
1296 			return (gen_host(dn_addr, 0, proto, dir));
1297 		} else {
1298 			alist = pcap_nametoaddr(name);
1299 			if (alist == NULL || *alist == NULL)
1300 				bpf_error("unknown host '%s'", name);
1301 			tproto = proto;
1302 			if (off_linktype == -1 && tproto == Q_DEFAULT)
1303 				tproto = Q_IP;
1304 			b = gen_host(**alist++, 0xffffffff, tproto, dir);
1305 			while (*alist) {
1306 				tmp = gen_host(**alist++, 0xffffffff,
1307 					       tproto, dir);
1308 				gen_or(b, tmp);
1309 				b = tmp;
1310 			}
1311 			return b;
1312 		}
1313 
1314 	case Q_PORT:
1315 		if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP)
1316 			bpf_error("illegal qualifier of 'port'");
1317 		if (pcap_nametoport(name, &port, &real_proto) == 0)
1318 			bpf_error("unknown port '%s'", name);
1319 		if (proto == Q_UDP) {
1320 			if (real_proto == IPPROTO_TCP)
1321 				bpf_error("port '%s' is tcp", name);
1322 			else
1323 				/* override PROTO_UNDEF */
1324 				real_proto = IPPROTO_UDP;
1325 		}
1326 		if (proto == Q_TCP) {
1327 			if (real_proto == IPPROTO_UDP)
1328 				bpf_error("port '%s' is udp", name);
1329 			else
1330 				/* override PROTO_UNDEF */
1331 				real_proto = IPPROTO_TCP;
1332 		}
1333 		return gen_port(port, real_proto, dir);
1334 
1335 	case Q_GATEWAY:
1336 		eaddr = pcap_ether_hostton(name);
1337 		if (eaddr == NULL)
1338 			bpf_error("unknown ether host: %s", name);
1339 
1340 		alist = pcap_nametoaddr(name);
1341 		if (alist == NULL || *alist == NULL)
1342 			bpf_error("unknown host '%s'", name);
1343 		return gen_gateway(eaddr, alist, proto, dir);
1344 
1345 	case Q_PROTO:
1346 		real_proto = lookup_proto(name, proto);
1347 		if (real_proto >= 0)
1348 			return gen_proto(real_proto, proto, dir);
1349 		else
1350 			bpf_error("unknown protocol: %s", name);
1351 
1352 	case Q_UNDEF:
1353 		syntax();
1354 		/* NOTREACHED */
1355 	}
1356 	abort();
1357 	/* NOTREACHED */
1358 }
1359 
1360 struct block *
1361 gen_mcode(s1, s2, masklen, q)
1362 	register const char *s1, *s2;
1363 	register int masklen;
1364 	struct qual q;
1365 {
1366 	register int nlen, mlen;
1367 	bpf_u_int32 n, m;
1368 
1369 	nlen = __pcap_atoin(s1, &n);
1370 	/* Promote short ipaddr */
1371 	n <<= 32 - nlen;
1372 
1373 	if (s2 != NULL) {
1374 		mlen = __pcap_atoin(s2, &m);
1375 		/* Promote short ipaddr */
1376 		m <<= 32 - mlen;
1377 	} else {
1378 		/* Convert mask len to mask */
1379 		if (masklen > 32)
1380 			bpf_error("mask length must be <= 32");
1381 		m = 0xffffffff << (32 - masklen);
1382 	}
1383 
1384 	switch (q.addr) {
1385 
1386 	case Q_NET:
1387 		return gen_host(n, m, q.proto, q.dir);
1388 
1389 	default:
1390 		bpf_error("Mask syntax for networks only");
1391 		/* NOTREACHED */
1392 	}
1393 }
1394 
1395 struct block *
1396 gen_ncode(s, v, q)
1397 	register const char *s;
1398 	bpf_u_int32 v;
1399 	struct qual q;
1400 {
1401 	bpf_u_int32 mask;
1402 	int proto = q.proto;
1403 	int dir = q.dir;
1404 	register int vlen;
1405 
1406 	if (s == NULL)
1407 		vlen = 32;
1408 	else if (q.proto == Q_DECNET)
1409 		vlen = __pcap_atodn(s, &v);
1410 	else
1411 		vlen = __pcap_atoin(s, &v);
1412 
1413 	switch (q.addr) {
1414 
1415 	case Q_DEFAULT:
1416 	case Q_HOST:
1417 	case Q_NET:
1418 		if (proto == Q_DECNET)
1419 			return gen_host(v, 0, proto, dir);
1420 		else if (proto == Q_LINK) {
1421 			bpf_error("illegal link layer address");
1422 		} else {
1423 			mask = 0xffffffff;
1424 			if (s == NULL && q.addr == Q_NET) {
1425 				/* Promote short net number */
1426 				while (v && (v & 0xff000000) == 0) {
1427 					v <<= 8;
1428 					mask <<= 8;
1429 				}
1430 			} else {
1431 				/* Promote short ipaddr */
1432 				v <<= 32 - vlen;
1433 				mask <<= 32 - vlen;
1434 			}
1435 			return gen_host(v, mask, proto, dir);
1436 		}
1437 
1438 	case Q_PORT:
1439 		if (proto == Q_UDP)
1440 			proto = IPPROTO_UDP;
1441 		else if (proto == Q_TCP)
1442 			proto = IPPROTO_TCP;
1443 		else if (proto == Q_DEFAULT)
1444 			proto = PROTO_UNDEF;
1445 		else
1446 			bpf_error("illegal qualifier of 'port'");
1447 
1448 		return gen_port((int)v, proto, dir);
1449 
1450 	case Q_GATEWAY:
1451 		bpf_error("'gateway' requires a name");
1452 		/* NOTREACHED */
1453 
1454 	case Q_PROTO:
1455 		return gen_proto((int)v, proto, dir);
1456 
1457 	case Q_UNDEF:
1458 		syntax();
1459 		/* NOTREACHED */
1460 
1461 	default:
1462 		abort();
1463 		/* NOTREACHED */
1464 	}
1465 	/* NOTREACHED */
1466 }
1467 
1468 struct block *
1469 gen_ecode(eaddr, q)
1470 	register const u_char *eaddr;
1471 	struct qual q;
1472 {
1473 	if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) {
1474 		if (linktype == DLT_EN10MB)
1475 			return gen_ehostop(eaddr, (int)q.dir);
1476 		if (linktype == DLT_FDDI)
1477 			return gen_fhostop(eaddr, (int)q.dir);
1478 	}
1479 	bpf_error("ethernet address used in non-ether expression");
1480 	/* NOTREACHED */
1481 }
1482 
1483 void
1484 sappend(s0, s1)
1485 	struct slist *s0, *s1;
1486 {
1487 	/*
1488 	 * This is definitely not the best way to do this, but the
1489 	 * lists will rarely get long.
1490 	 */
1491 	while (s0->next)
1492 		s0 = s0->next;
1493 	s0->next = s1;
1494 }
1495 
1496 static struct slist *
1497 xfer_to_x(a)
1498 	struct arth *a;
1499 {
1500 	struct slist *s;
1501 
1502 	s = new_stmt(BPF_LDX|BPF_MEM);
1503 	s->s.k = a->regno;
1504 	return s;
1505 }
1506 
1507 static struct slist *
1508 xfer_to_a(a)
1509 	struct arth *a;
1510 {
1511 	struct slist *s;
1512 
1513 	s = new_stmt(BPF_LD|BPF_MEM);
1514 	s->s.k = a->regno;
1515 	return s;
1516 }
1517 
1518 struct arth *
1519 gen_load(proto, index, size)
1520 	int proto;
1521 	struct arth *index;
1522 	int size;
1523 {
1524 	struct slist *s, *tmp;
1525 	struct block *b;
1526 	int regno = alloc_reg();
1527 
1528 	free_reg(index->regno);
1529 	switch (size) {
1530 
1531 	default:
1532 		bpf_error("data size must be 1, 2, or 4");
1533 
1534 	case 1:
1535 		size = BPF_B;
1536 		break;
1537 
1538 	case 2:
1539 		size = BPF_H;
1540 		break;
1541 
1542 	case 4:
1543 		size = BPF_W;
1544 		break;
1545 	}
1546 	switch (proto) {
1547 	default:
1548 		bpf_error("unsupported index operation");
1549 
1550 	case Q_LINK:
1551 		s = xfer_to_x(index);
1552 		tmp = new_stmt(BPF_LD|BPF_IND|size);
1553 		sappend(s, tmp);
1554 		sappend(index->s, s);
1555 		break;
1556 
1557 	case Q_IP:
1558 	case Q_ARP:
1559 	case Q_RARP:
1560 	case Q_ATALK:
1561 	case Q_DECNET:
1562 	case Q_SCA:
1563 	case Q_LAT:
1564 	case Q_MOPRC:
1565 	case Q_MOPDL:
1566 		/* XXX Note that we assume a fixed link header here. */
1567 		s = xfer_to_x(index);
1568 		tmp = new_stmt(BPF_LD|BPF_IND|size);
1569 		tmp->s.k = off_nl;
1570 		sappend(s, tmp);
1571 		sappend(index->s, s);
1572 
1573 		b = gen_proto_abbrev(proto);
1574 		if (index->b)
1575 			gen_and(index->b, b);
1576 		index->b = b;
1577 		break;
1578 
1579 	case Q_TCP:
1580 	case Q_UDP:
1581 	case Q_ICMP:
1582 	case Q_IGMP:
1583 	case Q_IGRP:
1584 		s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1585 		s->s.k = off_nl;
1586 		sappend(s, xfer_to_a(index));
1587 		sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X));
1588 		sappend(s, new_stmt(BPF_MISC|BPF_TAX));
1589 		sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size));
1590 		tmp->s.k = off_nl;
1591 		sappend(index->s, s);
1592 
1593 		gen_and(gen_proto_abbrev(proto), b = gen_ipfrag());
1594 		if (index->b)
1595 			gen_and(index->b, b);
1596 		index->b = b;
1597 		break;
1598 	}
1599 	index->regno = regno;
1600 	s = new_stmt(BPF_ST);
1601 	s->s.k = regno;
1602 	sappend(index->s, s);
1603 
1604 	return index;
1605 }
1606 
1607 struct block *
1608 gen_relation(code, a0, a1, reversed)
1609 	int code;
1610 	struct arth *a0, *a1;
1611 	int reversed;
1612 {
1613 	struct slist *s0, *s1, *s2;
1614 	struct block *b, *tmp;
1615 
1616 	s0 = xfer_to_x(a1);
1617 	s1 = xfer_to_a(a0);
1618 	s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
1619 	b = new_block(JMP(code));
1620 	if (code == BPF_JGT || code == BPF_JGE) {
1621 		reversed = !reversed;
1622 		b->s.k = 0x80000000;
1623 	}
1624 	if (reversed)
1625 		gen_not(b);
1626 
1627 	sappend(s1, s2);
1628 	sappend(s0, s1);
1629 	sappend(a1->s, s0);
1630 	sappend(a0->s, a1->s);
1631 
1632 	b->stmts = a0->s;
1633 
1634 	free_reg(a0->regno);
1635 	free_reg(a1->regno);
1636 
1637 	/* 'and' together protocol checks */
1638 	if (a0->b) {
1639 		if (a1->b) {
1640 			gen_and(a0->b, tmp = a1->b);
1641 		}
1642 		else
1643 			tmp = a0->b;
1644 	} else
1645 		tmp = a1->b;
1646 
1647 	if (tmp)
1648 		gen_and(tmp, b);
1649 
1650 	return b;
1651 }
1652 
1653 struct arth *
1654 gen_loadlen()
1655 {
1656 	int regno = alloc_reg();
1657 	struct arth *a = (struct arth *)newchunk(sizeof(*a));
1658 	struct slist *s;
1659 
1660 	s = new_stmt(BPF_LD|BPF_LEN);
1661 	s->next = new_stmt(BPF_ST);
1662 	s->next->s.k = regno;
1663 	a->s = s;
1664 	a->regno = regno;
1665 
1666 	return a;
1667 }
1668 
1669 struct arth *
1670 gen_loadi(val)
1671 	int val;
1672 {
1673 	struct arth *a;
1674 	struct slist *s;
1675 	int reg;
1676 
1677 	a = (struct arth *)newchunk(sizeof(*a));
1678 
1679 	reg = alloc_reg();
1680 
1681 	s = new_stmt(BPF_LD|BPF_IMM);
1682 	s->s.k = val;
1683 	s->next = new_stmt(BPF_ST);
1684 	s->next->s.k = reg;
1685 	a->s = s;
1686 	a->regno = reg;
1687 
1688 	return a;
1689 }
1690 
1691 struct arth *
1692 gen_neg(a)
1693 	struct arth *a;
1694 {
1695 	struct slist *s;
1696 
1697 	s = xfer_to_a(a);
1698 	sappend(a->s, s);
1699 	s = new_stmt(BPF_ALU|BPF_NEG);
1700 	s->s.k = 0;
1701 	sappend(a->s, s);
1702 	s = new_stmt(BPF_ST);
1703 	s->s.k = a->regno;
1704 	sappend(a->s, s);
1705 
1706 	return a;
1707 }
1708 
1709 struct arth *
1710 gen_arth(code, a0, a1)
1711 	int code;
1712 	struct arth *a0, *a1;
1713 {
1714 	struct slist *s0, *s1, *s2;
1715 
1716 	s0 = xfer_to_x(a1);
1717 	s1 = xfer_to_a(a0);
1718 	s2 = new_stmt(BPF_ALU|BPF_X|code);
1719 
1720 	sappend(s1, s2);
1721 	sappend(s0, s1);
1722 	sappend(a1->s, s0);
1723 	sappend(a0->s, a1->s);
1724 
1725 	free_reg(a1->regno);
1726 
1727 	s0 = new_stmt(BPF_ST);
1728 	a0->regno = s0->s.k = alloc_reg();
1729 	sappend(a0->s, s0);
1730 
1731 	return a0;
1732 }
1733 
1734 /*
1735  * Here we handle simple allocation of the scratch registers.
1736  * If too many registers are alloc'd, the allocator punts.
1737  */
1738 static int regused[BPF_MEMWORDS];
1739 static int curreg;
1740 
1741 /*
1742  * Return the next free register.
1743  */
1744 static int
1745 alloc_reg()
1746 {
1747 	int n = BPF_MEMWORDS;
1748 
1749 	while (--n >= 0) {
1750 		if (regused[curreg])
1751 			curreg = (curreg + 1) % BPF_MEMWORDS;
1752 		else {
1753 			regused[curreg] = 1;
1754 			return curreg;
1755 		}
1756 	}
1757 	bpf_error("too many registers needed to evaluate expression");
1758 	/* NOTREACHED */
1759 }
1760 
1761 /*
1762  * Return a register to the table so it can
1763  * be used later.
1764  */
1765 static void
1766 free_reg(n)
1767 	int n;
1768 {
1769 	regused[n] = 0;
1770 }
1771 
1772 static struct block *
1773 gen_len(jmp, n)
1774 	int jmp, n;
1775 {
1776 	struct slist *s;
1777 	struct block *b;
1778 
1779 	s = new_stmt(BPF_LD|BPF_LEN);
1780 	b = new_block(JMP(jmp));
1781 	b->stmts = s;
1782 	b->s.k = n;
1783 
1784 	return b;
1785 }
1786 
1787 struct block *
1788 gen_greater(n)
1789 	int n;
1790 {
1791 	return gen_len(BPF_JGE, n);
1792 }
1793 
1794 /*
1795  * Actually, this is less than or equal.
1796  */
1797 
1798 struct block *
1799 gen_less(n)
1800 	int n;
1801 {
1802 	struct block *b;
1803 
1804 	b = gen_len(BPF_JGT, n);
1805 	gen_not(b);
1806 
1807 	return b;
1808 }
1809 
1810 struct block *
1811 gen_byteop(op, idx, val)
1812 	int op, idx, val;
1813 {
1814 	struct block *b;
1815 	struct slist *s;
1816 
1817 	switch (op) {
1818 	default:
1819 		abort();
1820 
1821 	case '=':
1822 		return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1823 
1824 	case '<':
1825 		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1826 		b->s.code = JMP(BPF_JGE);
1827 		gen_not(b);
1828 		return b;
1829 
1830 	case '>':
1831 		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1832 		b->s.code = JMP(BPF_JGT);
1833 		return b;
1834 
1835 	case '|':
1836 		s = new_stmt(BPF_ALU|BPF_OR|BPF_K);
1837 		break;
1838 
1839 	case '&':
1840 		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
1841 		break;
1842 	}
1843 	s->s.k = val;
1844 	b = new_block(JMP(BPF_JEQ));
1845 	b->stmts = s;
1846 	gen_not(b);
1847 
1848 	return b;
1849 }
1850 
1851 struct block *
1852 gen_broadcast(proto)
1853 	int proto;
1854 {
1855 	bpf_u_int32 hostmask;
1856 	struct block *b0, *b1, *b2;
1857 	static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1858 
1859 	switch (proto) {
1860 
1861 	case Q_DEFAULT:
1862 	case Q_LINK:
1863 		if (linktype == DLT_EN10MB)
1864 			return gen_ehostop(ebroadcast, Q_DST);
1865 		if (linktype == DLT_FDDI)
1866 			return gen_fhostop(ebroadcast, Q_DST);
1867 		bpf_error("not a broadcast link");
1868 		break;
1869 
1870 	case Q_IP:
1871 		b0 = gen_linktype(ETHERTYPE_IP);
1872 		hostmask = ~netmask;
1873 		b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask);
1874 		b2 = gen_mcmp(off_nl + 16, BPF_W,
1875 			      (bpf_int32)(~0 & hostmask), hostmask);
1876 		gen_or(b1, b2);
1877 		gen_and(b0, b2);
1878 		return b2;
1879 	}
1880 	bpf_error("only ether/ip broadcast filters supported");
1881 }
1882 
1883 struct block *
1884 gen_multicast(proto)
1885 	int proto;
1886 {
1887 	register struct block *b0, *b1;
1888 	register struct slist *s;
1889 
1890 	switch (proto) {
1891 
1892 	case Q_DEFAULT:
1893 	case Q_LINK:
1894 		if (linktype == DLT_EN10MB) {
1895 			/* ether[0] & 1 != 0 */
1896 			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1897 			s->s.k = 0;
1898 			b0 = new_block(JMP(BPF_JSET));
1899 			b0->s.k = 1;
1900 			b0->stmts = s;
1901 			return b0;
1902 		}
1903 
1904 		if (linktype == DLT_FDDI) {
1905 			/* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */
1906 			/* fddi[1] & 1 != 0 */
1907 			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1908 			s->s.k = 1;
1909 			b0 = new_block(JMP(BPF_JSET));
1910 			b0->s.k = 1;
1911 			b0->stmts = s;
1912 			return b0;
1913 		}
1914 		/* Link not known to support multicasts */
1915 		break;
1916 
1917 	case Q_IP:
1918 		b0 = gen_linktype(ETHERTYPE_IP);
1919 		b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224);
1920 		b1->s.code = JMP(BPF_JGE);
1921 		gen_and(b0, b1);
1922 		return b1;
1923 	}
1924 	bpf_error("only IP multicast filters supported on ethernet/FDDI");
1925 }
1926 
1927 /*
1928  * generate command for inbound/outbound.  It's here so we can
1929  * make it link-type specific.  'dir' = 0 implies "inbound",
1930  * = 1 implies "outbound".
1931  */
1932 struct block *
1933 gen_inbound(dir)
1934 	int dir;
1935 {
1936 	register struct block *b0;
1937 
1938 	b0 = gen_relation(BPF_JEQ,
1939 			  gen_load(Q_LINK, gen_loadi(0), 1),
1940 			  gen_loadi(0),
1941 			  dir);
1942 	return (b0);
1943 }
1944