1 /* 2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that: (1) source code distributions 7 * retain the above copyright notice and this paragraph in its entirety, (2) 8 * distributions including binary code include the above copyright notice and 9 * this paragraph in its entirety in the documentation or other materials 10 * provided with the distribution, and (3) all advertising materials mentioning 11 * features or use of this software display the following acknowledgement: 12 * ``This product includes software developed by the University of California, 13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 14 * the University nor the names of its contributors may be used to endorse 15 * or promote products derived from this software without specific prior 16 * written permission. 17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 20 */ 21 #ifndef lint 22 static char rcsid[] = 23 "@(#) $Header: gencode.c,v 1.88 96/07/23 01:30:41 leres Exp $ (LBL)"; 24 #endif 25 26 #include <sys/types.h> 27 #include <sys/socket.h> 28 #include <sys/time.h> 29 30 #if __STDC__ 31 struct mbuf; 32 struct rtentry; 33 #endif 34 35 #include <net/if.h> 36 37 #include <netinet/in.h> 38 #include <netinet/if_ether.h> 39 40 #include <stdlib.h> 41 #include <memory.h> 42 #include <setjmp.h> 43 #include <net/if_llc.h> 44 #if __STDC__ 45 #include <stdarg.h> 46 #else 47 #include <varargs.h> 48 #endif 49 50 #include "pcap-int.h" 51 52 #include "ethertype.h" 53 #include "nlpid.h" 54 #include "gencode.h" 55 #include <pcap-namedb.h> 56 57 #include "gnuc.h" 58 #ifdef HAVE_OS_PROTO_H 59 #include "os-proto.h" 60 #endif 61 62 #define JMP(c) ((c)|BPF_JMP|BPF_K) 63 64 /* Locals */ 65 static jmp_buf top_ctx; 66 static pcap_t *bpf_pcap; 67 68 /* XXX */ 69 #ifdef PCAP_FDDIPAD 70 int pcap_fddipad = PCAP_FDDIPAD; 71 #else 72 int pcap_fddipad; 73 #endif 74 75 /* VARARGS */ 76 __dead void 77 #if __STDC__ 78 bpf_error(const char *fmt, ...) 79 #else 80 bpf_error(fmt, va_alist) 81 const char *fmt; 82 va_dcl 83 #endif 84 { 85 va_list ap; 86 87 #if __STDC__ 88 va_start(ap, fmt); 89 #else 90 va_start(ap); 91 #endif 92 if (bpf_pcap != NULL) 93 (void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap); 94 va_end(ap); 95 longjmp(top_ctx, 1); 96 /* NOTREACHED */ 97 } 98 99 static void init_linktype(int); 100 101 static int alloc_reg(void); 102 static void free_reg(int); 103 104 static struct block *root; 105 106 /* 107 * We divy out chunks of memory rather than call malloc each time so 108 * we don't have to worry about leaking memory. It's probably 109 * not a big deal if all this memory was wasted but it this ever 110 * goes into a library that would probably not be a good idea. 111 */ 112 #define NCHUNKS 16 113 #define CHUNK0SIZE 1024 114 struct chunk { 115 u_int n_left; 116 void *m; 117 }; 118 119 static struct chunk chunks[NCHUNKS]; 120 static int cur_chunk; 121 122 static void *newchunk(u_int); 123 static void freechunks(void); 124 static inline struct block *new_block(int); 125 static inline struct slist *new_stmt(int); 126 static struct block *gen_retblk(int); 127 static inline void syntax(void); 128 129 static void backpatch(struct block *, struct block *); 130 static void merge(struct block *, struct block *); 131 static struct block *gen_cmp(u_int, u_int, bpf_int32); 132 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32); 133 static struct block *gen_bcmp(u_int, u_int, const u_char *); 134 static struct block *gen_uncond(int); 135 static inline struct block *gen_true(void); 136 static inline struct block *gen_false(void); 137 static struct block *gen_linktype(int); 138 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int); 139 static struct block *gen_ehostop(const u_char *, int); 140 static struct block *gen_fhostop(const u_char *, int); 141 static struct block *gen_dnhostop(bpf_u_int32, int, u_int); 142 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int); 143 static struct block *gen_gateway(const u_char *, bpf_u_int32 **, int, int); 144 static struct block *gen_ipfrag(void); 145 static struct block *gen_portatom(int, bpf_int32); 146 struct block *gen_portop(int, int, int); 147 static struct block *gen_port(int, int, int); 148 static int lookup_proto(const char *, int); 149 static struct block *gen_proto(int, int, int); 150 static struct slist *xfer_to_x(struct arth *); 151 static struct slist *xfer_to_a(struct arth *); 152 static struct block *gen_len(int, int); 153 154 static void * 155 newchunk(n) 156 u_int n; 157 { 158 struct chunk *cp; 159 int k, size; 160 161 /* XXX Round up to nearest long. */ 162 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1); 163 164 cp = &chunks[cur_chunk]; 165 if (n > cp->n_left) { 166 ++cp, k = ++cur_chunk; 167 if (k >= NCHUNKS) 168 bpf_error("out of memory"); 169 size = CHUNK0SIZE << k; 170 cp->m = (void *)malloc(size); 171 memset((char *)cp->m, 0, size); 172 cp->n_left = size; 173 if (n > size) 174 bpf_error("out of memory"); 175 } 176 cp->n_left -= n; 177 return (void *)((char *)cp->m + cp->n_left); 178 } 179 180 static void 181 freechunks() 182 { 183 int i; 184 185 cur_chunk = 0; 186 for (i = 0; i < NCHUNKS; ++i) 187 if (chunks[i].m != NULL) { 188 free(chunks[i].m); 189 chunks[i].m = NULL; 190 } 191 } 192 193 /* 194 * A strdup whose allocations are freed after code generation is over. 195 */ 196 char * 197 sdup(s) 198 register const char *s; 199 { 200 int n = strlen(s) + 1; 201 char *cp = newchunk(n); 202 203 strcpy(cp, s); 204 return (cp); 205 } 206 207 static inline struct block * 208 new_block(code) 209 int code; 210 { 211 struct block *p; 212 213 p = (struct block *)newchunk(sizeof(*p)); 214 p->s.code = code; 215 p->head = p; 216 217 return p; 218 } 219 220 static inline struct slist * 221 new_stmt(code) 222 int code; 223 { 224 struct slist *p; 225 226 p = (struct slist *)newchunk(sizeof(*p)); 227 p->s.code = code; 228 229 return p; 230 } 231 232 static struct block * 233 gen_retblk(v) 234 int v; 235 { 236 struct block *b = new_block(BPF_RET|BPF_K); 237 238 b->s.k = v; 239 return b; 240 } 241 242 static inline void 243 syntax() 244 { 245 bpf_error("syntax error in filter expression"); 246 } 247 248 static bpf_u_int32 netmask; 249 static int snaplen; 250 251 int 252 pcap_compile(pcap_t *p, struct bpf_program *program, 253 char *buf, int optimize, bpf_u_int32 mask) 254 { 255 extern int n_errors; 256 int len; 257 258 n_errors = 0; 259 root = NULL; 260 bpf_pcap = p; 261 if (setjmp(top_ctx)) { 262 freechunks(); 263 return (-1); 264 } 265 266 netmask = mask; 267 snaplen = pcap_snapshot(p); 268 269 lex_init(buf ? buf : ""); 270 init_linktype(pcap_datalink(p)); 271 (void)pcap_parse(); 272 273 if (n_errors) 274 syntax(); 275 276 if (root == NULL) 277 root = gen_retblk(snaplen); 278 279 if (optimize) { 280 bpf_optimize(&root); 281 if (root == NULL || 282 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0)) 283 bpf_error("expression rejects all packets"); 284 } 285 program->bf_insns = icode_to_fcode(root, &len); 286 program->bf_len = len; 287 288 freechunks(); 289 return (0); 290 } 291 292 /* 293 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates 294 * which of the jt and jf fields has been resolved and which is a pointer 295 * back to another unresolved block (or nil). At least one of the fields 296 * in each block is already resolved. 297 */ 298 static void 299 backpatch(list, target) 300 struct block *list, *target; 301 { 302 struct block *next; 303 304 while (list) { 305 if (!list->sense) { 306 next = JT(list); 307 JT(list) = target; 308 } else { 309 next = JF(list); 310 JF(list) = target; 311 } 312 list = next; 313 } 314 } 315 316 /* 317 * Merge the lists in b0 and b1, using the 'sense' field to indicate 318 * which of jt and jf is the link. 319 */ 320 static void 321 merge(b0, b1) 322 struct block *b0, *b1; 323 { 324 register struct block **p = &b0; 325 326 /* Find end of list. */ 327 while (*p) 328 p = !((*p)->sense) ? &JT(*p) : &JF(*p); 329 330 /* Concatenate the lists. */ 331 *p = b1; 332 } 333 334 void 335 finish_parse(p) 336 struct block *p; 337 { 338 backpatch(p, gen_retblk(snaplen)); 339 p->sense = !p->sense; 340 backpatch(p, gen_retblk(0)); 341 root = p->head; 342 } 343 344 void 345 gen_and(b0, b1) 346 struct block *b0, *b1; 347 { 348 backpatch(b0, b1->head); 349 b0->sense = !b0->sense; 350 b1->sense = !b1->sense; 351 merge(b1, b0); 352 b1->sense = !b1->sense; 353 b1->head = b0->head; 354 } 355 356 void 357 gen_or(b0, b1) 358 struct block *b0, *b1; 359 { 360 b0->sense = !b0->sense; 361 backpatch(b0, b1->head); 362 b0->sense = !b0->sense; 363 merge(b1, b0); 364 b1->head = b0->head; 365 } 366 367 void 368 gen_not(b) 369 struct block *b; 370 { 371 b->sense = !b->sense; 372 } 373 374 static struct block * 375 gen_cmp(offset, size, v) 376 u_int offset, size; 377 bpf_int32 v; 378 { 379 struct slist *s; 380 struct block *b; 381 382 s = new_stmt(BPF_LD|BPF_ABS|size); 383 s->s.k = offset; 384 385 b = new_block(JMP(BPF_JEQ)); 386 b->stmts = s; 387 b->s.k = v; 388 389 return b; 390 } 391 392 static struct block * 393 gen_mcmp(offset, size, v, mask) 394 u_int offset, size; 395 bpf_int32 v; 396 bpf_u_int32 mask; 397 { 398 struct block *b = gen_cmp(offset, size, v); 399 struct slist *s; 400 401 if (mask != 0xffffffff) { 402 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 403 s->s.k = mask; 404 b->stmts->next = s; 405 } 406 return b; 407 } 408 409 static struct block * 410 gen_bcmp(offset, size, v) 411 register u_int offset, size; 412 register const u_char *v; 413 { 414 register struct block *b, *tmp; 415 416 b = NULL; 417 while (size >= 4) { 418 register const u_char *p = &v[size - 4]; 419 bpf_int32 w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 420 421 tmp = gen_cmp(offset + size - 4, BPF_W, w); 422 if (b != NULL) 423 gen_and(b, tmp); 424 b = tmp; 425 size -= 4; 426 } 427 while (size >= 2) { 428 register const u_char *p = &v[size - 2]; 429 bpf_int32 w = (p[0] << 8) | p[1]; 430 431 tmp = gen_cmp(offset + size - 2, BPF_H, w); 432 if (b != NULL) 433 gen_and(b, tmp); 434 b = tmp; 435 size -= 2; 436 } 437 if (size > 0) { 438 tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]); 439 if (b != NULL) 440 gen_and(b, tmp); 441 b = tmp; 442 } 443 return b; 444 } 445 446 /* 447 * Various code constructs need to know the layout of the data link 448 * layer. These variables give the necessary offsets. off_linktype 449 * is set to -1 for no encapsulation, in which case, IP is assumed. 450 */ 451 static u_int off_linktype; 452 static u_int off_nl; 453 static int linktype; 454 455 static void 456 init_linktype(type) 457 int type; 458 { 459 linktype = type; 460 461 switch (type) { 462 463 case DLT_EN10MB: 464 off_linktype = 12; 465 off_nl = 14; 466 return; 467 468 case DLT_SLIP: 469 /* 470 * SLIP doesn't have a link level type. The 16 byte 471 * header is hacked into our SLIP driver. 472 */ 473 off_linktype = -1; 474 off_nl = 16; 475 return; 476 477 case DLT_NULL: 478 off_linktype = 0; 479 off_nl = 4; 480 return; 481 482 case DLT_PPP: 483 off_linktype = 2; 484 off_nl = 4; 485 return; 486 487 case DLT_FDDI: 488 /* 489 * FDDI doesn't really have a link-level type field. 490 * We assume that SSAP = SNAP is being used and pick 491 * out the encapsulated Ethernet type. 492 */ 493 off_linktype = 19; 494 #ifdef PCAP_FDDIPAD 495 off_linktype += pcap_fddipad; 496 #endif 497 off_nl = 21; 498 #ifdef PCAP_FDDIPAD 499 off_nl += pcap_fddipad; 500 #endif 501 return; 502 503 case DLT_IEEE802: 504 off_linktype = 20; 505 off_nl = 22; 506 return; 507 508 case DLT_ATM_RFC1483: 509 /* 510 * assume routed, non-ISO PDUs 511 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00) 512 */ 513 off_linktype = 6; 514 off_nl = 8; 515 return; 516 } 517 bpf_error("unknown data link type 0x%x", linktype); 518 /* NOTREACHED */ 519 } 520 521 static struct block * 522 gen_uncond(rsense) 523 int rsense; 524 { 525 struct block *b; 526 struct slist *s; 527 528 s = new_stmt(BPF_LD|BPF_IMM); 529 s->s.k = !rsense; 530 b = new_block(JMP(BPF_JEQ)); 531 b->stmts = s; 532 533 return b; 534 } 535 536 static inline struct block * 537 gen_true() 538 { 539 return gen_uncond(1); 540 } 541 542 static inline struct block * 543 gen_false() 544 { 545 return gen_uncond(0); 546 } 547 548 static struct block * 549 gen_linktype(proto) 550 int proto; 551 { 552 switch (linktype) { 553 case DLT_SLIP: 554 if (proto == ETHERTYPE_IP) 555 return gen_true(); 556 else 557 return gen_false(); 558 559 case DLT_PPP: 560 if (proto == ETHERTYPE_IP) 561 proto = 0x0021; /* XXX - need ppp.h defs */ 562 break; 563 564 case DLT_NULL: 565 /* XXX */ 566 if (proto == ETHERTYPE_IP) 567 return (gen_cmp(0, BPF_W, (bpf_int32)AF_INET)); 568 else 569 return gen_false(); 570 case DLT_EN10MB: 571 /* 572 * Having to look at SAP's here is quite disgusting, 573 * but given an internal architecture that _knows_ that 574 * it's looking at IP on Ethernet... 575 */ 576 if (proto == LLC_ISO_LSAP) { 577 struct block *b0, *b1; 578 579 b0 = gen_cmp(off_linktype, BPF_H, (long)ETHERMTU); 580 b0->s.code = JMP(BPF_JGT); 581 gen_not(b0); 582 b1 = gen_cmp(off_linktype + 2, BPF_H, (long) 583 ((LLC_ISO_LSAP << 8) | LLC_ISO_LSAP)); 584 gen_and(b0, b1); 585 return b1; 586 } 587 break; 588 } 589 return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto); 590 } 591 592 static struct block * 593 gen_hostop(addr, mask, dir, proto, src_off, dst_off) 594 bpf_u_int32 addr; 595 bpf_u_int32 mask; 596 int dir, proto; 597 u_int src_off, dst_off; 598 { 599 struct block *b0, *b1; 600 u_int offset; 601 602 switch (dir) { 603 604 case Q_SRC: 605 offset = src_off; 606 break; 607 608 case Q_DST: 609 offset = dst_off; 610 break; 611 612 case Q_AND: 613 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 614 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 615 gen_and(b0, b1); 616 return b1; 617 618 case Q_OR: 619 case Q_DEFAULT: 620 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off); 621 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off); 622 gen_or(b0, b1); 623 return b1; 624 625 default: 626 abort(); 627 } 628 b0 = gen_linktype(proto); 629 b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask); 630 gen_and(b0, b1); 631 return b1; 632 } 633 634 static struct block * 635 gen_ehostop(eaddr, dir) 636 register const u_char *eaddr; 637 register int dir; 638 { 639 register struct block *b0, *b1; 640 641 switch (dir) { 642 case Q_SRC: 643 return gen_bcmp(6, 6, eaddr); 644 645 case Q_DST: 646 return gen_bcmp(0, 6, eaddr); 647 648 case Q_AND: 649 b0 = gen_ehostop(eaddr, Q_SRC); 650 b1 = gen_ehostop(eaddr, Q_DST); 651 gen_and(b0, b1); 652 return b1; 653 654 case Q_DEFAULT: 655 case Q_OR: 656 b0 = gen_ehostop(eaddr, Q_SRC); 657 b1 = gen_ehostop(eaddr, Q_DST); 658 gen_or(b0, b1); 659 return b1; 660 } 661 abort(); 662 /* NOTREACHED */ 663 } 664 665 /* 666 * Like gen_ehostop, but for DLT_FDDI 667 */ 668 static struct block * 669 gen_fhostop(eaddr, dir) 670 register const u_char *eaddr; 671 register int dir; 672 { 673 struct block *b0, *b1; 674 675 switch (dir) { 676 case Q_SRC: 677 #ifdef PCAP_FDDIPAD 678 return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr); 679 #else 680 return gen_bcmp(6 + 1, 6, eaddr); 681 #endif 682 683 case Q_DST: 684 #ifdef PCAP_FDDIPAD 685 return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr); 686 #else 687 return gen_bcmp(0 + 1, 6, eaddr); 688 #endif 689 690 case Q_AND: 691 b0 = gen_fhostop(eaddr, Q_SRC); 692 b1 = gen_fhostop(eaddr, Q_DST); 693 gen_and(b0, b1); 694 return b1; 695 696 case Q_DEFAULT: 697 case Q_OR: 698 b0 = gen_fhostop(eaddr, Q_SRC); 699 b1 = gen_fhostop(eaddr, Q_DST); 700 gen_or(b0, b1); 701 return b1; 702 } 703 abort(); 704 /* NOTREACHED */ 705 } 706 707 /* 708 * This is quite tricky because there may be pad bytes in front of the 709 * DECNET header, and then there are two possible data packet formats that 710 * carry both src and dst addresses, plus 5 packet types in a format that 711 * carries only the src node, plus 2 types that use a different format and 712 * also carry just the src node. 713 * 714 * Yuck. 715 * 716 * Instead of doing those all right, we just look for data packets with 717 * 0 or 1 bytes of padding. If you want to look at other packets, that 718 * will require a lot more hacking. 719 * 720 * To add support for filtering on DECNET "areas" (network numbers) 721 * one would want to add a "mask" argument to this routine. That would 722 * make the filter even more inefficient, although one could be clever 723 * and not generate masking instructions if the mask is 0xFFFF. 724 */ 725 static struct block * 726 gen_dnhostop(addr, dir, base_off) 727 bpf_u_int32 addr; 728 int dir; 729 u_int base_off; 730 { 731 struct block *b0, *b1, *b2, *tmp; 732 u_int offset_lh; /* offset if long header is received */ 733 u_int offset_sh; /* offset if short header is received */ 734 735 switch (dir) { 736 737 case Q_DST: 738 offset_sh = 1; /* follows flags */ 739 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */ 740 break; 741 742 case Q_SRC: 743 offset_sh = 3; /* follows flags, dstnode */ 744 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */ 745 break; 746 747 case Q_AND: 748 /* Inefficient because we do our Calvinball dance twice */ 749 b0 = gen_dnhostop(addr, Q_SRC, base_off); 750 b1 = gen_dnhostop(addr, Q_DST, base_off); 751 gen_and(b0, b1); 752 return b1; 753 754 case Q_OR: 755 case Q_DEFAULT: 756 /* Inefficient because we do our Calvinball dance twice */ 757 b0 = gen_dnhostop(addr, Q_SRC, base_off); 758 b1 = gen_dnhostop(addr, Q_DST, base_off); 759 gen_or(b0, b1); 760 return b1; 761 762 default: 763 abort(); 764 } 765 b0 = gen_linktype(ETHERTYPE_DN); 766 /* Check for pad = 1, long header case */ 767 tmp = gen_mcmp(base_off + 2, BPF_H, 768 (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF)); 769 b1 = gen_cmp(base_off + 2 + 1 + offset_lh, 770 BPF_H, (bpf_int32)ntohs(addr)); 771 gen_and(tmp, b1); 772 /* Check for pad = 0, long header case */ 773 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7); 774 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr)); 775 gen_and(tmp, b2); 776 gen_or(b2, b1); 777 /* Check for pad = 1, short header case */ 778 tmp = gen_mcmp(base_off + 2, BPF_H, 779 (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF)); 780 b2 = gen_cmp(base_off + 2 + 1 + offset_sh, 781 BPF_H, (bpf_int32)ntohs(addr)); 782 gen_and(tmp, b2); 783 gen_or(b2, b1); 784 /* Check for pad = 0, short header case */ 785 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7); 786 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr)); 787 gen_and(tmp, b2); 788 gen_or(b2, b1); 789 790 /* Combine with test for linktype */ 791 gen_and(b0, b1); 792 return b1; 793 } 794 795 static struct block * 796 gen_host(addr, mask, proto, dir) 797 bpf_u_int32 addr; 798 bpf_u_int32 mask; 799 int proto; 800 int dir; 801 { 802 struct block *b0, *b1; 803 804 switch (proto) { 805 806 case Q_DEFAULT: 807 b0 = gen_host(addr, mask, Q_IP, dir); 808 b1 = gen_host(addr, mask, Q_ARP, dir); 809 gen_or(b0, b1); 810 b0 = gen_host(addr, mask, Q_RARP, dir); 811 gen_or(b1, b0); 812 return b0; 813 814 case Q_IP: 815 return gen_hostop(addr, mask, dir, ETHERTYPE_IP, 816 off_nl + 12, off_nl + 16); 817 818 case Q_RARP: 819 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP, 820 off_nl + 14, off_nl + 24); 821 822 case Q_ARP: 823 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP, 824 off_nl + 14, off_nl + 24); 825 826 case Q_TCP: 827 bpf_error("'tcp' modifier applied to host"); 828 829 case Q_UDP: 830 bpf_error("'udp' modifier applied to host"); 831 832 case Q_ICMP: 833 bpf_error("'icmp' modifier applied to host"); 834 835 case Q_IGMP: 836 bpf_error("'igmp' modifier applied to host"); 837 838 case Q_IGRP: 839 bpf_error("'igrp' modifier applied to host"); 840 841 case Q_ATALK: 842 bpf_error("ATALK host filtering not implemented"); 843 844 case Q_DECNET: 845 return gen_dnhostop(addr, dir, off_nl); 846 847 case Q_SCA: 848 bpf_error("SCA host filtering not implemented"); 849 850 case Q_LAT: 851 bpf_error("LAT host filtering not implemented"); 852 853 case Q_MOPDL: 854 bpf_error("MOPDL host filtering not implemented"); 855 856 case Q_MOPRC: 857 bpf_error("MOPRC host filtering not implemented"); 858 859 case Q_ISO: 860 bpf_error("ISO host filtering not implemented"); 861 862 default: 863 abort(); 864 } 865 /* NOTREACHED */ 866 } 867 868 static struct block * 869 gen_gateway(eaddr, alist, proto, dir) 870 const u_char *eaddr; 871 bpf_u_int32 **alist; 872 int proto; 873 int dir; 874 { 875 struct block *b0, *b1, *tmp; 876 877 if (dir != 0) 878 bpf_error("direction applied to 'gateway'"); 879 880 switch (proto) { 881 case Q_DEFAULT: 882 case Q_IP: 883 case Q_ARP: 884 case Q_RARP: 885 if (linktype == DLT_EN10MB) 886 b0 = gen_ehostop(eaddr, Q_OR); 887 else if (linktype == DLT_FDDI) 888 b0 = gen_fhostop(eaddr, Q_OR); 889 else 890 bpf_error( 891 "'gateway' supported only on ethernet or FDDI"); 892 893 b1 = gen_host(**alist++, 0xffffffff, proto, Q_OR); 894 while (*alist) { 895 tmp = gen_host(**alist++, 0xffffffff, proto, Q_OR); 896 gen_or(b1, tmp); 897 b1 = tmp; 898 } 899 gen_not(b1); 900 gen_and(b0, b1); 901 return b1; 902 } 903 bpf_error("illegal modifier of 'gateway'"); 904 /* NOTREACHED */ 905 } 906 907 struct block * 908 gen_proto_abbrev(proto) 909 int proto; 910 { 911 struct block *b0, *b1; 912 913 switch (proto) { 914 915 case Q_TCP: 916 b0 = gen_linktype(ETHERTYPE_IP); 917 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP); 918 gen_and(b0, b1); 919 break; 920 921 case Q_UDP: 922 b0 = gen_linktype(ETHERTYPE_IP); 923 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP); 924 gen_and(b0, b1); 925 break; 926 927 case Q_ICMP: 928 b0 = gen_linktype(ETHERTYPE_IP); 929 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP); 930 gen_and(b0, b1); 931 break; 932 933 case Q_IGMP: 934 b0 = gen_linktype(ETHERTYPE_IP); 935 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2); 936 gen_and(b0, b1); 937 break; 938 939 #ifndef IPPROTO_IGRP 940 #define IPPROTO_IGRP 9 941 #endif 942 case Q_IGRP: 943 b0 = gen_linktype(ETHERTYPE_IP); 944 b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_IGRP); 945 gen_and(b0, b1); 946 break; 947 948 case Q_IP: 949 b1 = gen_linktype(ETHERTYPE_IP); 950 break; 951 952 case Q_ARP: 953 b1 = gen_linktype(ETHERTYPE_ARP); 954 break; 955 956 case Q_RARP: 957 b1 = gen_linktype(ETHERTYPE_REVARP); 958 break; 959 960 case Q_LINK: 961 bpf_error("link layer applied in wrong context"); 962 963 case Q_ATALK: 964 b1 = gen_linktype(ETHERTYPE_ATALK); 965 break; 966 967 case Q_DECNET: 968 b1 = gen_linktype(ETHERTYPE_DN); 969 break; 970 971 case Q_SCA: 972 b1 = gen_linktype(ETHERTYPE_SCA); 973 break; 974 975 case Q_LAT: 976 b1 = gen_linktype(ETHERTYPE_LAT); 977 break; 978 979 case Q_MOPDL: 980 b1 = gen_linktype(ETHERTYPE_MOPDL); 981 break; 982 983 case Q_MOPRC: 984 b1 = gen_linktype(ETHERTYPE_MOPRC); 985 break; 986 987 case Q_ISO: 988 b1 = gen_linktype(LLC_ISO_LSAP); 989 break; 990 991 case Q_ESIS: 992 b1 = gen_proto(ISO9542_ESIS, Q_ISO, Q_DEFAULT); 993 break; 994 995 case Q_ISIS: 996 b1 = gen_proto(ISO10589_ISIS, Q_ISO, Q_DEFAULT); 997 break; 998 999 default: 1000 abort(); 1001 } 1002 return b1; 1003 } 1004 1005 static struct block * 1006 gen_ipfrag() 1007 { 1008 struct slist *s; 1009 struct block *b; 1010 1011 /* not ip frag */ 1012 s = new_stmt(BPF_LD|BPF_H|BPF_ABS); 1013 s->s.k = off_nl + 6; 1014 b = new_block(JMP(BPF_JSET)); 1015 b->s.k = 0x1fff; 1016 b->stmts = s; 1017 gen_not(b); 1018 1019 return b; 1020 } 1021 1022 static struct block * 1023 gen_portatom(off, v) 1024 int off; 1025 bpf_int32 v; 1026 { 1027 struct slist *s; 1028 struct block *b; 1029 1030 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 1031 s->s.k = off_nl; 1032 1033 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H); 1034 s->next->s.k = off_nl + off; 1035 1036 b = new_block(JMP(BPF_JEQ)); 1037 b->stmts = s; 1038 b->s.k = v; 1039 1040 return b; 1041 } 1042 1043 struct block * 1044 gen_portop(port, proto, dir) 1045 int port, proto, dir; 1046 { 1047 struct block *b0, *b1, *tmp; 1048 1049 /* ip proto 'proto' */ 1050 tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto); 1051 b0 = gen_ipfrag(); 1052 gen_and(tmp, b0); 1053 1054 switch (dir) { 1055 case Q_SRC: 1056 b1 = gen_portatom(0, (bpf_int32)port); 1057 break; 1058 1059 case Q_DST: 1060 b1 = gen_portatom(2, (bpf_int32)port); 1061 break; 1062 1063 case Q_OR: 1064 case Q_DEFAULT: 1065 tmp = gen_portatom(0, (bpf_int32)port); 1066 b1 = gen_portatom(2, (bpf_int32)port); 1067 gen_or(tmp, b1); 1068 break; 1069 1070 case Q_AND: 1071 tmp = gen_portatom(0, (bpf_int32)port); 1072 b1 = gen_portatom(2, (bpf_int32)port); 1073 gen_and(tmp, b1); 1074 break; 1075 1076 default: 1077 abort(); 1078 } 1079 gen_and(b0, b1); 1080 1081 return b1; 1082 } 1083 1084 static struct block * 1085 gen_port(port, ip_proto, dir) 1086 int port; 1087 int ip_proto; 1088 int dir; 1089 { 1090 struct block *b0, *b1, *tmp; 1091 1092 /* ether proto ip */ 1093 b0 = gen_linktype(ETHERTYPE_IP); 1094 1095 switch (ip_proto) { 1096 case IPPROTO_UDP: 1097 case IPPROTO_TCP: 1098 b1 = gen_portop(port, ip_proto, dir); 1099 break; 1100 1101 case PROTO_UNDEF: 1102 tmp = gen_portop(port, IPPROTO_TCP, dir); 1103 b1 = gen_portop(port, IPPROTO_UDP, dir); 1104 gen_or(tmp, b1); 1105 break; 1106 1107 default: 1108 abort(); 1109 } 1110 gen_and(b0, b1); 1111 return b1; 1112 } 1113 1114 static int 1115 lookup_proto(name, proto) 1116 register const char *name; 1117 register int proto; 1118 { 1119 register int v; 1120 1121 switch (proto) { 1122 1123 case Q_DEFAULT: 1124 case Q_IP: 1125 v = pcap_nametoproto(name); 1126 if (v == PROTO_UNDEF) 1127 bpf_error("unknown ip proto '%s'", name); 1128 break; 1129 1130 case Q_LINK: 1131 /* XXX should look up h/w protocol type based on linktype */ 1132 v = pcap_nametoeproto(name); 1133 if (v == PROTO_UNDEF) 1134 bpf_error("unknown ether proto '%s'", name); 1135 break; 1136 1137 default: 1138 v = PROTO_UNDEF; 1139 break; 1140 } 1141 return v; 1142 } 1143 1144 static struct block * 1145 gen_proto(v, proto, dir) 1146 int v; 1147 int proto; 1148 int dir; 1149 { 1150 struct block *b0, *b1; 1151 1152 if (dir != Q_DEFAULT) 1153 bpf_error("direction applied to 'proto'"); 1154 1155 switch (proto) { 1156 case Q_DEFAULT: 1157 case Q_IP: 1158 b0 = gen_linktype(ETHERTYPE_IP); 1159 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v); 1160 gen_and(b0, b1); 1161 return b1; 1162 1163 case Q_ISO: 1164 b0 = gen_linktype(LLC_ISO_LSAP); 1165 b1 = gen_cmp(off_nl + 3, BPF_B, (long)v); 1166 gen_and(b0, b1); 1167 return b1; 1168 1169 case Q_ARP: 1170 bpf_error("arp does not encapsulate another protocol"); 1171 /* NOTREACHED */ 1172 1173 case Q_RARP: 1174 bpf_error("rarp does not encapsulate another protocol"); 1175 /* NOTREACHED */ 1176 1177 case Q_ATALK: 1178 bpf_error("atalk encapsulation is not specifiable"); 1179 /* NOTREACHED */ 1180 1181 case Q_DECNET: 1182 bpf_error("decnet encapsulation is not specifiable"); 1183 /* NOTREACHED */ 1184 1185 case Q_SCA: 1186 bpf_error("sca does not encapsulate another protocol"); 1187 /* NOTREACHED */ 1188 1189 case Q_LAT: 1190 bpf_error("lat does not encapsulate another protocol"); 1191 /* NOTREACHED */ 1192 1193 case Q_MOPRC: 1194 bpf_error("moprc does not encapsulate another protocol"); 1195 /* NOTREACHED */ 1196 1197 case Q_MOPDL: 1198 bpf_error("mopdl does not encapsulate another protocol"); 1199 /* NOTREACHED */ 1200 1201 case Q_LINK: 1202 return gen_linktype(v); 1203 1204 case Q_UDP: 1205 bpf_error("'udp proto' is bogus"); 1206 /* NOTREACHED */ 1207 1208 case Q_TCP: 1209 bpf_error("'tcp proto' is bogus"); 1210 /* NOTREACHED */ 1211 1212 case Q_ICMP: 1213 bpf_error("'icmp proto' is bogus"); 1214 /* NOTREACHED */ 1215 1216 case Q_IGMP: 1217 bpf_error("'igmp proto' is bogus"); 1218 /* NOTREACHED */ 1219 1220 case Q_IGRP: 1221 bpf_error("'igrp proto' is bogus"); 1222 /* NOTREACHED */ 1223 1224 default: 1225 abort(); 1226 /* NOTREACHED */ 1227 } 1228 /* NOTREACHED */ 1229 } 1230 1231 struct block * 1232 gen_scode(name, q) 1233 register const char *name; 1234 struct qual q; 1235 { 1236 int proto = q.proto; 1237 int dir = q.dir; 1238 u_char *eaddr; 1239 bpf_u_int32 mask, addr, **alist; 1240 struct block *b, *tmp; 1241 int port, real_proto; 1242 1243 switch (q.addr) { 1244 1245 case Q_NET: 1246 addr = pcap_nametonetaddr(name); 1247 if (addr == 0) 1248 bpf_error("unknown network '%s'", name); 1249 /* Left justify network addr and calculate its network mask */ 1250 mask = 0xffffffff; 1251 while (addr && (addr & 0xff000000) == 0) { 1252 addr <<= 8; 1253 mask <<= 8; 1254 } 1255 return gen_host(addr, mask, proto, dir); 1256 1257 case Q_DEFAULT: 1258 case Q_HOST: 1259 if (proto == Q_LINK) { 1260 switch (linktype) { 1261 1262 case DLT_EN10MB: 1263 eaddr = pcap_ether_hostton(name); 1264 if (eaddr == NULL) 1265 bpf_error( 1266 "unknown ether host '%s'", name); 1267 return gen_ehostop(eaddr, dir); 1268 1269 case DLT_FDDI: 1270 eaddr = pcap_ether_hostton(name); 1271 if (eaddr == NULL) 1272 bpf_error( 1273 "unknown FDDI host '%s'", name); 1274 return gen_fhostop(eaddr, dir); 1275 1276 default: 1277 bpf_error( 1278 "only ethernet/FDDI supports link-level host name"); 1279 break; 1280 } 1281 } else if (proto == Q_DECNET) { 1282 unsigned short dn_addr = __pcap_nametodnaddr(name); 1283 /* 1284 * I don't think DECNET hosts can be multihomed, so 1285 * there is no need to build up a list of addresses 1286 */ 1287 return (gen_host(dn_addr, 0, proto, dir)); 1288 } else { 1289 alist = pcap_nametoaddr(name); 1290 if (alist == NULL || *alist == NULL) 1291 bpf_error("unknown host '%s'", name); 1292 b = gen_host(**alist++, 0xffffffff, proto, dir); 1293 while (*alist) { 1294 tmp = gen_host(**alist++, 0xffffffff, 1295 proto, dir); 1296 gen_or(b, tmp); 1297 b = tmp; 1298 } 1299 return b; 1300 } 1301 1302 case Q_PORT: 1303 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP) 1304 bpf_error("illegal qualifier of 'port'"); 1305 if (pcap_nametoport(name, &port, &real_proto) == 0) 1306 bpf_error("unknown port '%s'", name); 1307 if (proto == Q_UDP) { 1308 if (real_proto == IPPROTO_TCP) 1309 bpf_error("port '%s' is tcp", name); 1310 else 1311 /* override PROTO_UNDEF */ 1312 real_proto = IPPROTO_UDP; 1313 } 1314 if (proto == Q_TCP) { 1315 if (real_proto == IPPROTO_UDP) 1316 bpf_error("port '%s' is udp", name); 1317 else 1318 /* override PROTO_UNDEF */ 1319 real_proto = IPPROTO_TCP; 1320 } 1321 return gen_port(port, real_proto, dir); 1322 1323 case Q_GATEWAY: 1324 eaddr = pcap_ether_hostton(name); 1325 if (eaddr == NULL) 1326 bpf_error("unknown ether host: %s", name); 1327 1328 alist = pcap_nametoaddr(name); 1329 if (alist == NULL || *alist == NULL) 1330 bpf_error("unknown host '%s'", name); 1331 return gen_gateway(eaddr, alist, proto, dir); 1332 1333 case Q_PROTO: 1334 real_proto = lookup_proto(name, proto); 1335 if (real_proto >= 0) 1336 return gen_proto(real_proto, proto, dir); 1337 else 1338 bpf_error("unknown protocol: %s", name); 1339 1340 case Q_UNDEF: 1341 syntax(); 1342 /* NOTREACHED */ 1343 } 1344 abort(); 1345 /* NOTREACHED */ 1346 } 1347 1348 struct block * 1349 gen_mcode(s1, s2, masklen, q) 1350 register const char *s1, *s2; 1351 register int masklen; 1352 struct qual q; 1353 { 1354 register int nlen, mlen; 1355 bpf_u_int32 n, m; 1356 1357 nlen = __pcap_atoin(s1, &n); 1358 /* Promote short ipaddr */ 1359 n <<= 32 - nlen; 1360 1361 if (s2 != NULL) { 1362 mlen = __pcap_atoin(s2, &m); 1363 /* Promote short ipaddr */ 1364 m <<= 32 - mlen; 1365 } else { 1366 /* Convert mask len to mask */ 1367 if (masklen > 32) 1368 bpf_error("mask length must be <= 32"); 1369 m = 0xffffffff << (32 - masklen); 1370 } 1371 1372 switch (q.addr) { 1373 1374 case Q_NET: 1375 return gen_host(n, m, q.proto, q.dir); 1376 1377 default: 1378 bpf_error("Mask syntax for networks only"); 1379 /* NOTREACHED */ 1380 } 1381 } 1382 1383 struct block * 1384 gen_ncode(s, v, q) 1385 register const char *s; 1386 bpf_u_int32 v; 1387 struct qual q; 1388 { 1389 bpf_u_int32 mask; 1390 int proto = q.proto; 1391 int dir = q.dir; 1392 register int vlen; 1393 1394 if (s == NULL) 1395 vlen = 32; 1396 else if (q.proto == Q_DECNET) 1397 vlen = __pcap_atodn(s, &v); 1398 else 1399 vlen = __pcap_atoin(s, &v); 1400 1401 switch (q.addr) { 1402 1403 case Q_DEFAULT: 1404 case Q_HOST: 1405 case Q_NET: 1406 if (proto == Q_DECNET) 1407 return gen_host(v, 0, proto, dir); 1408 else if (proto == Q_LINK) { 1409 bpf_error("illegal link layer address"); 1410 } else { 1411 mask = 0xffffffff; 1412 if (s == NULL && q.addr == Q_NET) { 1413 /* Promote short net number */ 1414 while (v && (v & 0xff000000) == 0) { 1415 v <<= 8; 1416 mask <<= 8; 1417 } 1418 } else { 1419 /* Promote short ipaddr */ 1420 v <<= 32 - vlen; 1421 mask <<= 32 - vlen; 1422 } 1423 return gen_host(v, mask, proto, dir); 1424 } 1425 1426 case Q_PORT: 1427 if (proto == Q_UDP) 1428 proto = IPPROTO_UDP; 1429 else if (proto == Q_TCP) 1430 proto = IPPROTO_TCP; 1431 else if (proto == Q_DEFAULT) 1432 proto = PROTO_UNDEF; 1433 else 1434 bpf_error("illegal qualifier of 'port'"); 1435 1436 return gen_port((int)v, proto, dir); 1437 1438 case Q_GATEWAY: 1439 bpf_error("'gateway' requires a name"); 1440 /* NOTREACHED */ 1441 1442 case Q_PROTO: 1443 return gen_proto((int)v, proto, dir); 1444 1445 case Q_UNDEF: 1446 syntax(); 1447 /* NOTREACHED */ 1448 1449 default: 1450 abort(); 1451 /* NOTREACHED */ 1452 } 1453 /* NOTREACHED */ 1454 } 1455 1456 struct block * 1457 gen_ecode(eaddr, q) 1458 register const u_char *eaddr; 1459 struct qual q; 1460 { 1461 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) { 1462 if (linktype == DLT_EN10MB) 1463 return gen_ehostop(eaddr, (int)q.dir); 1464 if (linktype == DLT_FDDI) 1465 return gen_fhostop(eaddr, (int)q.dir); 1466 } 1467 bpf_error("ethernet address used in non-ether expression"); 1468 /* NOTREACHED */ 1469 } 1470 1471 void 1472 sappend(s0, s1) 1473 struct slist *s0, *s1; 1474 { 1475 /* 1476 * This is definitely not the best way to do this, but the 1477 * lists will rarely get long. 1478 */ 1479 while (s0->next) 1480 s0 = s0->next; 1481 s0->next = s1; 1482 } 1483 1484 static struct slist * 1485 xfer_to_x(a) 1486 struct arth *a; 1487 { 1488 struct slist *s; 1489 1490 s = new_stmt(BPF_LDX|BPF_MEM); 1491 s->s.k = a->regno; 1492 return s; 1493 } 1494 1495 static struct slist * 1496 xfer_to_a(a) 1497 struct arth *a; 1498 { 1499 struct slist *s; 1500 1501 s = new_stmt(BPF_LD|BPF_MEM); 1502 s->s.k = a->regno; 1503 return s; 1504 } 1505 1506 struct arth * 1507 gen_load(proto, index, size) 1508 int proto; 1509 struct arth *index; 1510 int size; 1511 { 1512 struct slist *s, *tmp; 1513 struct block *b; 1514 int regno = alloc_reg(); 1515 1516 free_reg(index->regno); 1517 switch (size) { 1518 1519 default: 1520 bpf_error("data size must be 1, 2, or 4"); 1521 1522 case 1: 1523 size = BPF_B; 1524 break; 1525 1526 case 2: 1527 size = BPF_H; 1528 break; 1529 1530 case 4: 1531 size = BPF_W; 1532 break; 1533 } 1534 switch (proto) { 1535 default: 1536 bpf_error("unsupported index operation"); 1537 1538 case Q_LINK: 1539 s = xfer_to_x(index); 1540 tmp = new_stmt(BPF_LD|BPF_IND|size); 1541 sappend(s, tmp); 1542 sappend(index->s, s); 1543 break; 1544 1545 case Q_IP: 1546 case Q_ARP: 1547 case Q_RARP: 1548 case Q_ATALK: 1549 case Q_DECNET: 1550 case Q_SCA: 1551 case Q_LAT: 1552 case Q_MOPRC: 1553 case Q_MOPDL: 1554 /* XXX Note that we assume a fixed link header here. */ 1555 s = xfer_to_x(index); 1556 tmp = new_stmt(BPF_LD|BPF_IND|size); 1557 tmp->s.k = off_nl; 1558 sappend(s, tmp); 1559 sappend(index->s, s); 1560 1561 b = gen_proto_abbrev(proto); 1562 if (index->b) 1563 gen_and(index->b, b); 1564 index->b = b; 1565 break; 1566 1567 case Q_TCP: 1568 case Q_UDP: 1569 case Q_ICMP: 1570 case Q_IGMP: 1571 case Q_IGRP: 1572 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B); 1573 s->s.k = off_nl; 1574 sappend(s, xfer_to_a(index)); 1575 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X)); 1576 sappend(s, new_stmt(BPF_MISC|BPF_TAX)); 1577 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size)); 1578 tmp->s.k = off_nl; 1579 sappend(index->s, s); 1580 1581 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag()); 1582 if (index->b) 1583 gen_and(index->b, b); 1584 index->b = b; 1585 break; 1586 } 1587 index->regno = regno; 1588 s = new_stmt(BPF_ST); 1589 s->s.k = regno; 1590 sappend(index->s, s); 1591 1592 return index; 1593 } 1594 1595 struct block * 1596 gen_relation(code, a0, a1, reversed) 1597 int code; 1598 struct arth *a0, *a1; 1599 int reversed; 1600 { 1601 struct slist *s0, *s1, *s2; 1602 struct block *b, *tmp; 1603 1604 s0 = xfer_to_x(a1); 1605 s1 = xfer_to_a(a0); 1606 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X); 1607 b = new_block(JMP(code)); 1608 if (code == BPF_JGT || code == BPF_JGE) { 1609 reversed = !reversed; 1610 b->s.k = 0x80000000; 1611 } 1612 if (reversed) 1613 gen_not(b); 1614 1615 sappend(s1, s2); 1616 sappend(s0, s1); 1617 sappend(a1->s, s0); 1618 sappend(a0->s, a1->s); 1619 1620 b->stmts = a0->s; 1621 1622 free_reg(a0->regno); 1623 free_reg(a1->regno); 1624 1625 /* 'and' together protocol checks */ 1626 if (a0->b) { 1627 if (a1->b) { 1628 gen_and(a0->b, tmp = a1->b); 1629 } 1630 else 1631 tmp = a0->b; 1632 } else 1633 tmp = a1->b; 1634 1635 if (tmp) 1636 gen_and(tmp, b); 1637 1638 return b; 1639 } 1640 1641 struct arth * 1642 gen_loadlen() 1643 { 1644 int regno = alloc_reg(); 1645 struct arth *a = (struct arth *)newchunk(sizeof(*a)); 1646 struct slist *s; 1647 1648 s = new_stmt(BPF_LD|BPF_LEN); 1649 s->next = new_stmt(BPF_ST); 1650 s->next->s.k = regno; 1651 a->s = s; 1652 a->regno = regno; 1653 1654 return a; 1655 } 1656 1657 struct arth * 1658 gen_loadi(val) 1659 int val; 1660 { 1661 struct arth *a; 1662 struct slist *s; 1663 int reg; 1664 1665 a = (struct arth *)newchunk(sizeof(*a)); 1666 1667 reg = alloc_reg(); 1668 1669 s = new_stmt(BPF_LD|BPF_IMM); 1670 s->s.k = val; 1671 s->next = new_stmt(BPF_ST); 1672 s->next->s.k = reg; 1673 a->s = s; 1674 a->regno = reg; 1675 1676 return a; 1677 } 1678 1679 struct arth * 1680 gen_neg(a) 1681 struct arth *a; 1682 { 1683 struct slist *s; 1684 1685 s = xfer_to_a(a); 1686 sappend(a->s, s); 1687 s = new_stmt(BPF_ALU|BPF_NEG); 1688 s->s.k = 0; 1689 sappend(a->s, s); 1690 s = new_stmt(BPF_ST); 1691 s->s.k = a->regno; 1692 sappend(a->s, s); 1693 1694 return a; 1695 } 1696 1697 struct arth * 1698 gen_arth(code, a0, a1) 1699 int code; 1700 struct arth *a0, *a1; 1701 { 1702 struct slist *s0, *s1, *s2; 1703 1704 s0 = xfer_to_x(a1); 1705 s1 = xfer_to_a(a0); 1706 s2 = new_stmt(BPF_ALU|BPF_X|code); 1707 1708 sappend(s1, s2); 1709 sappend(s0, s1); 1710 sappend(a1->s, s0); 1711 sappend(a0->s, a1->s); 1712 1713 free_reg(a1->regno); 1714 1715 s0 = new_stmt(BPF_ST); 1716 a0->regno = s0->s.k = alloc_reg(); 1717 sappend(a0->s, s0); 1718 1719 return a0; 1720 } 1721 1722 /* 1723 * Here we handle simple allocation of the scratch registers. 1724 * If too many registers are alloc'd, the allocator punts. 1725 */ 1726 static int regused[BPF_MEMWORDS]; 1727 static int curreg; 1728 1729 /* 1730 * Return the next free register. 1731 */ 1732 static int 1733 alloc_reg() 1734 { 1735 int n = BPF_MEMWORDS; 1736 1737 while (--n >= 0) { 1738 if (regused[curreg]) 1739 curreg = (curreg + 1) % BPF_MEMWORDS; 1740 else { 1741 regused[curreg] = 1; 1742 return curreg; 1743 } 1744 } 1745 bpf_error("too many registers needed to evaluate expression"); 1746 /* NOTREACHED */ 1747 } 1748 1749 /* 1750 * Return a register to the table so it can 1751 * be used later. 1752 */ 1753 static void 1754 free_reg(n) 1755 int n; 1756 { 1757 regused[n] = 0; 1758 } 1759 1760 static struct block * 1761 gen_len(jmp, n) 1762 int jmp, n; 1763 { 1764 struct slist *s; 1765 struct block *b; 1766 1767 s = new_stmt(BPF_LD|BPF_LEN); 1768 b = new_block(JMP(jmp)); 1769 b->stmts = s; 1770 b->s.k = n; 1771 1772 return b; 1773 } 1774 1775 struct block * 1776 gen_greater(n) 1777 int n; 1778 { 1779 return gen_len(BPF_JGE, n); 1780 } 1781 1782 /* 1783 * Actually, this is less than or equal. 1784 */ 1785 1786 struct block * 1787 gen_less(n) 1788 int n; 1789 { 1790 struct block *b; 1791 1792 b = gen_len(BPF_JGT, n); 1793 gen_not(b); 1794 1795 return b; 1796 } 1797 1798 struct block * 1799 gen_byteop(op, idx, val) 1800 int op, idx, val; 1801 { 1802 struct block *b; 1803 struct slist *s; 1804 1805 switch (op) { 1806 default: 1807 abort(); 1808 1809 case '=': 1810 return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1811 1812 case '<': 1813 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1814 b->s.code = JMP(BPF_JGE); 1815 gen_not(b); 1816 return b; 1817 1818 case '>': 1819 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val); 1820 b->s.code = JMP(BPF_JGT); 1821 return b; 1822 1823 case '|': 1824 s = new_stmt(BPF_ALU|BPF_OR|BPF_K); 1825 break; 1826 1827 case '&': 1828 s = new_stmt(BPF_ALU|BPF_AND|BPF_K); 1829 break; 1830 } 1831 s->s.k = val; 1832 b = new_block(JMP(BPF_JEQ)); 1833 b->stmts = s; 1834 gen_not(b); 1835 1836 return b; 1837 } 1838 1839 struct block * 1840 gen_broadcast(proto) 1841 int proto; 1842 { 1843 bpf_u_int32 hostmask; 1844 struct block *b0, *b1, *b2; 1845 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 1846 1847 switch (proto) { 1848 1849 case Q_DEFAULT: 1850 case Q_LINK: 1851 if (linktype == DLT_EN10MB) 1852 return gen_ehostop(ebroadcast, Q_DST); 1853 if (linktype == DLT_FDDI) 1854 return gen_fhostop(ebroadcast, Q_DST); 1855 bpf_error("not a broadcast link"); 1856 break; 1857 1858 case Q_IP: 1859 b0 = gen_linktype(ETHERTYPE_IP); 1860 hostmask = ~netmask; 1861 b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask); 1862 b2 = gen_mcmp(off_nl + 16, BPF_W, 1863 (bpf_int32)(~0 & hostmask), hostmask); 1864 gen_or(b1, b2); 1865 gen_and(b0, b2); 1866 return b2; 1867 } 1868 bpf_error("only ether/ip broadcast filters supported"); 1869 } 1870 1871 struct block * 1872 gen_multicast(proto) 1873 int proto; 1874 { 1875 register struct block *b0, *b1; 1876 register struct slist *s; 1877 1878 switch (proto) { 1879 1880 case Q_DEFAULT: 1881 case Q_LINK: 1882 if (linktype == DLT_EN10MB) { 1883 /* ether[0] & 1 != 0 */ 1884 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1885 s->s.k = 0; 1886 b0 = new_block(JMP(BPF_JSET)); 1887 b0->s.k = 1; 1888 b0->stmts = s; 1889 return b0; 1890 } 1891 1892 if (linktype == DLT_FDDI) { 1893 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */ 1894 /* fddi[1] & 1 != 0 */ 1895 s = new_stmt(BPF_LD|BPF_B|BPF_ABS); 1896 s->s.k = 1; 1897 b0 = new_block(JMP(BPF_JSET)); 1898 b0->s.k = 1; 1899 b0->stmts = s; 1900 return b0; 1901 } 1902 /* Link not known to support multicasts */ 1903 break; 1904 1905 case Q_IP: 1906 b0 = gen_linktype(ETHERTYPE_IP); 1907 b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224); 1908 b1->s.code = JMP(BPF_JGE); 1909 gen_and(b0, b1); 1910 return b1; 1911 } 1912 bpf_error("only IP multicast filters supported on ethernet/FDDI"); 1913 } 1914 1915 /* 1916 * generate command for inbound/outbound. It's here so we can 1917 * make it link-type specific. 'dir' = 0 implies "inbound", 1918 * = 1 implies "outbound". 1919 */ 1920 struct block * 1921 gen_inbound(dir) 1922 int dir; 1923 { 1924 register struct block *b0; 1925 1926 b0 = gen_relation(BPF_JEQ, 1927 gen_load(Q_LINK, gen_loadi(0), 1), 1928 gen_loadi(0), 1929 dir); 1930 return (b0); 1931 } 1932