xref: /freebsd/contrib/libpcap/optimize.c (revision e627b39baccd1ec9129690167cf5e6d860509655)
1 /*
2  * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that: (1) source code distributions
7  * retain the above copyright notice and this paragraph in its entirety, (2)
8  * distributions including binary code include the above copyright notice and
9  * this paragraph in its entirety in the documentation or other materials
10  * provided with the distribution, and (3) all advertising materials mentioning
11  * features or use of this software display the following acknowledgement:
12  * ``This product includes software developed by the University of California,
13  * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14  * the University nor the names of its contributors may be used to endorse
15  * or promote products derived from this software without specific prior
16  * written permission.
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20  *
21  *  Optimization module for tcpdump intermediate representation.
22  */
23 #ifndef lint
24 static char rcsid[] =
25     "@(#) $Header: optimize.c,v 1.59 96/07/15 00:48:49 leres Exp $ (LBL)";
26 #endif
27 
28 #include <sys/types.h>
29 #include <sys/time.h>
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <memory.h>
34 
35 #include "pcap-int.h"
36 
37 #include "gencode.h"
38 
39 #include "gnuc.h"
40 #ifdef HAVE_OS_PROTO_H
41 #include "os-proto.h"
42 #endif
43 
44 #ifdef BDEBUG
45 extern int dflag;
46 #endif
47 
48 #define A_ATOM BPF_MEMWORDS
49 #define X_ATOM (BPF_MEMWORDS+1)
50 
51 #define NOP -1
52 
53 /*
54  * This define is used to represent *both* the accumulator and
55  * x register in use-def computations.
56  * Currently, the use-def code assumes only one definition per instruction.
57  */
58 #define AX_ATOM N_ATOMS
59 
60 /*
61  * A flag to indicate that further optimization is needed.
62  * Iterative passes are continued until a given pass yields no
63  * branch movement.
64  */
65 static int done;
66 
67 /*
68  * A block is marked if only if its mark equals the current mark.
69  * Rather than traverse the code array, marking each item, 'cur_mark' is
70  * incremented.  This automatically makes each element unmarked.
71  */
72 static int cur_mark;
73 #define isMarked(p) ((p)->mark == cur_mark)
74 #define unMarkAll() cur_mark += 1
75 #define Mark(p) ((p)->mark = cur_mark)
76 
77 static void opt_init(struct block *);
78 static void opt_cleanup(void);
79 
80 static void make_marks(struct block *);
81 static void mark_code(struct block *);
82 
83 static void intern_blocks(struct block *);
84 
85 static int eq_slist(struct slist *, struct slist *);
86 
87 static void find_levels_r(struct block *);
88 
89 static void find_levels(struct block *);
90 static void find_dom(struct block *);
91 static void propedom(struct edge *);
92 static void find_edom(struct block *);
93 static void find_closure(struct block *);
94 static int atomuse(struct stmt *);
95 static int atomdef(struct stmt *);
96 static void compute_local_ud(struct block *);
97 static void find_ud(struct block *);
98 static void init_val(void);
99 static int F(int, int, int);
100 static inline void vstore(struct stmt *, int *, int, int);
101 static void opt_blk(struct block *, int);
102 static int use_conflict(struct block *, struct block *);
103 static void opt_j(struct edge *);
104 static void or_pullup(struct block *);
105 static void and_pullup(struct block *);
106 static void opt_blks(struct block *, int);
107 static inline void link_inedge(struct edge *, struct block *);
108 static void find_inedges(struct block *);
109 static void opt_root(struct block **);
110 static void opt_loop(struct block *, int);
111 static void fold_op(struct stmt *, int, int);
112 static inline struct slist *this_op(struct slist *);
113 static void opt_not(struct block *);
114 static void opt_peep(struct block *);
115 static void opt_stmt(struct stmt *, int[], int);
116 static void deadstmt(struct stmt *, struct stmt *[]);
117 static void opt_deadstores(struct block *);
118 static void opt_blk(struct block *, int);
119 static int use_conflict(struct block *, struct block *);
120 static void opt_j(struct edge *);
121 static struct block *fold_edge(struct block *, struct edge *);
122 static inline int eq_blk(struct block *, struct block *);
123 static int slength(struct slist *);
124 static int count_blocks(struct block *);
125 static void number_blks_r(struct block *);
126 static int count_stmts(struct block *);
127 static int convert_code_r(struct block *);
128 #ifdef BDEBUG
129 static void opt_dump(struct block *);
130 #endif
131 
132 static int n_blocks;
133 struct block **blocks;
134 static int n_edges;
135 struct edge **edges;
136 
137 /*
138  * A bit vector set representation of the dominators.
139  * We round up the set size to the next power of two.
140  */
141 static int nodewords;
142 static int edgewords;
143 struct block **levels;
144 bpf_u_int32 *space;
145 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
146 /*
147  * True if a is in uset {p}
148  */
149 #define SET_MEMBER(p, a) \
150 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
151 
152 /*
153  * Add 'a' to uset p.
154  */
155 #define SET_INSERT(p, a) \
156 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
157 
158 /*
159  * Delete 'a' from uset p.
160  */
161 #define SET_DELETE(p, a) \
162 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
163 
164 /*
165  * a := a intersect b
166  */
167 #define SET_INTERSECT(a, b, n)\
168 {\
169 	register bpf_u_int32 *_x = a, *_y = b;\
170 	register int _n = n;\
171 	while (--_n >= 0) *_x++ &= *_y++;\
172 }
173 
174 /*
175  * a := a - b
176  */
177 #define SET_SUBTRACT(a, b, n)\
178 {\
179 	register bpf_u_int32 *_x = a, *_y = b;\
180 	register int _n = n;\
181 	while (--_n >= 0) *_x++ &=~ *_y++;\
182 }
183 
184 /*
185  * a := a union b
186  */
187 #define SET_UNION(a, b, n)\
188 {\
189 	register bpf_u_int32 *_x = a, *_y = b;\
190 	register int _n = n;\
191 	while (--_n >= 0) *_x++ |= *_y++;\
192 }
193 
194 static uset all_dom_sets;
195 static uset all_closure_sets;
196 static uset all_edge_sets;
197 
198 #ifndef MAX
199 #define MAX(a,b) ((a)>(b)?(a):(b))
200 #endif
201 
202 static void
203 find_levels_r(b)
204 	struct block *b;
205 {
206 	int level;
207 
208 	if (isMarked(b))
209 		return;
210 
211 	Mark(b);
212 	b->link = 0;
213 
214 	if (JT(b)) {
215 		find_levels_r(JT(b));
216 		find_levels_r(JF(b));
217 		level = MAX(JT(b)->level, JF(b)->level) + 1;
218 	} else
219 		level = 0;
220 	b->level = level;
221 	b->link = levels[level];
222 	levels[level] = b;
223 }
224 
225 /*
226  * Level graph.  The levels go from 0 at the leaves to
227  * N_LEVELS at the root.  The levels[] array points to the
228  * first node of the level list, whose elements are linked
229  * with the 'link' field of the struct block.
230  */
231 static void
232 find_levels(root)
233 	struct block *root;
234 {
235 	memset((char *)levels, 0, n_blocks * sizeof(*levels));
236 	unMarkAll();
237 	find_levels_r(root);
238 }
239 
240 /*
241  * Find dominator relationships.
242  * Assumes graph has been leveled.
243  */
244 static void
245 find_dom(root)
246 	struct block *root;
247 {
248 	int i;
249 	struct block *b;
250 	bpf_u_int32 *x;
251 
252 	/*
253 	 * Initialize sets to contain all nodes.
254 	 */
255 	x = all_dom_sets;
256 	i = n_blocks * nodewords;
257 	while (--i >= 0)
258 		*x++ = ~0;
259 	/* Root starts off empty. */
260 	for (i = nodewords; --i >= 0;)
261 		root->dom[i] = 0;
262 
263 	/* root->level is the highest level no found. */
264 	for (i = root->level; i >= 0; --i) {
265 		for (b = levels[i]; b; b = b->link) {
266 			SET_INSERT(b->dom, b->id);
267 			if (JT(b) == 0)
268 				continue;
269 			SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
270 			SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
271 		}
272 	}
273 }
274 
275 static void
276 propedom(ep)
277 	struct edge *ep;
278 {
279 	SET_INSERT(ep->edom, ep->id);
280 	if (ep->succ) {
281 		SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
282 		SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
283 	}
284 }
285 
286 /*
287  * Compute edge dominators.
288  * Assumes graph has been leveled and predecessors established.
289  */
290 static void
291 find_edom(root)
292 	struct block *root;
293 {
294 	int i;
295 	uset x;
296 	struct block *b;
297 
298 	x = all_edge_sets;
299 	for (i = n_edges * edgewords; --i >= 0; )
300 		x[i] = ~0;
301 
302 	/* root->level is the highest level no found. */
303 	memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
304 	memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
305 	for (i = root->level; i >= 0; --i) {
306 		for (b = levels[i]; b != 0; b = b->link) {
307 			propedom(&b->et);
308 			propedom(&b->ef);
309 		}
310 	}
311 }
312 
313 /*
314  * Find the backwards transitive closure of the flow graph.  These sets
315  * are backwards in the sense that we find the set of nodes that reach
316  * a given node, not the set of nodes that can be reached by a node.
317  *
318  * Assumes graph has been leveled.
319  */
320 static void
321 find_closure(root)
322 	struct block *root;
323 {
324 	int i;
325 	struct block *b;
326 
327 	/*
328 	 * Initialize sets to contain no nodes.
329 	 */
330 	memset((char *)all_closure_sets, 0,
331 	      n_blocks * nodewords * sizeof(*all_closure_sets));
332 
333 	/* root->level is the highest level no found. */
334 	for (i = root->level; i >= 0; --i) {
335 		for (b = levels[i]; b; b = b->link) {
336 			SET_INSERT(b->closure, b->id);
337 			if (JT(b) == 0)
338 				continue;
339 			SET_UNION(JT(b)->closure, b->closure, nodewords);
340 			SET_UNION(JF(b)->closure, b->closure, nodewords);
341 		}
342 	}
343 }
344 
345 /*
346  * Return the register number that is used by s.  If A and X are both
347  * used, return AX_ATOM.  If no register is used, return -1.
348  *
349  * The implementation should probably change to an array access.
350  */
351 static int
352 atomuse(s)
353 	struct stmt *s;
354 {
355 	register int c = s->code;
356 
357 	if (c == NOP)
358 		return -1;
359 
360 	switch (BPF_CLASS(c)) {
361 
362 	case BPF_RET:
363 		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
364 			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
365 
366 	case BPF_LD:
367 	case BPF_LDX:
368 		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
369 			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;
370 
371 	case BPF_ST:
372 		return A_ATOM;
373 
374 	case BPF_STX:
375 		return X_ATOM;
376 
377 	case BPF_JMP:
378 	case BPF_ALU:
379 		if (BPF_SRC(c) == BPF_X)
380 			return AX_ATOM;
381 		return A_ATOM;
382 
383 	case BPF_MISC:
384 		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
385 	}
386 	abort();
387 	/* NOTREACHED */
388 }
389 
390 /*
391  * Return the register number that is defined by 's'.  We assume that
392  * a single stmt cannot define more than one register.  If no register
393  * is defined, return -1.
394  *
395  * The implementation should probably change to an array access.
396  */
397 static int
398 atomdef(s)
399 	struct stmt *s;
400 {
401 	if (s->code == NOP)
402 		return -1;
403 
404 	switch (BPF_CLASS(s->code)) {
405 
406 	case BPF_LD:
407 	case BPF_ALU:
408 		return A_ATOM;
409 
410 	case BPF_LDX:
411 		return X_ATOM;
412 
413 	case BPF_ST:
414 	case BPF_STX:
415 		return s->k;
416 
417 	case BPF_MISC:
418 		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
419 	}
420 	return -1;
421 }
422 
423 static void
424 compute_local_ud(b)
425 	struct block *b;
426 {
427 	struct slist *s;
428 	atomset def = 0, use = 0, kill = 0;
429 	int atom;
430 
431 	for (s = b->stmts; s; s = s->next) {
432 		if (s->s.code == NOP)
433 			continue;
434 		atom = atomuse(&s->s);
435 		if (atom >= 0) {
436 			if (atom == AX_ATOM) {
437 				if (!ATOMELEM(def, X_ATOM))
438 					use |= ATOMMASK(X_ATOM);
439 				if (!ATOMELEM(def, A_ATOM))
440 					use |= ATOMMASK(A_ATOM);
441 			}
442 			else if (atom < N_ATOMS) {
443 				if (!ATOMELEM(def, atom))
444 					use |= ATOMMASK(atom);
445 			}
446 			else
447 				abort();
448 		}
449 		atom = atomdef(&s->s);
450 		if (atom >= 0) {
451 			if (!ATOMELEM(use, atom))
452 				kill |= ATOMMASK(atom);
453 			def |= ATOMMASK(atom);
454 		}
455 	}
456 	if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP)
457 		use |= ATOMMASK(A_ATOM);
458 
459 	b->def = def;
460 	b->kill = kill;
461 	b->in_use = use;
462 }
463 
464 /*
465  * Assume graph is already leveled.
466  */
467 static void
468 find_ud(root)
469 	struct block *root;
470 {
471 	int i, maxlevel;
472 	struct block *p;
473 
474 	/*
475 	 * root->level is the highest level no found;
476 	 * count down from there.
477 	 */
478 	maxlevel = root->level;
479 	for (i = maxlevel; i >= 0; --i)
480 		for (p = levels[i]; p; p = p->link) {
481 			compute_local_ud(p);
482 			p->out_use = 0;
483 		}
484 
485 	for (i = 1; i <= maxlevel; ++i) {
486 		for (p = levels[i]; p; p = p->link) {
487 			p->out_use |= JT(p)->in_use | JF(p)->in_use;
488 			p->in_use |= p->out_use &~ p->kill;
489 		}
490 	}
491 }
492 
493 /*
494  * These data structures are used in a Cocke and Shwarz style
495  * value numbering scheme.  Since the flowgraph is acyclic,
496  * exit values can be propagated from a node's predecessors
497  * provided it is uniquely defined.
498  */
499 struct valnode {
500 	int code;
501 	int v0, v1;
502 	int val;
503 	struct valnode *next;
504 };
505 
506 #define MODULUS 213
507 static struct valnode *hashtbl[MODULUS];
508 static int curval;
509 static int maxval;
510 
511 /* Integer constants mapped with the load immediate opcode. */
512 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
513 
514 struct vmapinfo {
515 	int is_const;
516 	bpf_int32 const_val;
517 };
518 
519 struct vmapinfo *vmap;
520 struct valnode *vnode_base;
521 struct valnode *next_vnode;
522 
523 static void
524 init_val()
525 {
526 	curval = 0;
527 	next_vnode = vnode_base;
528 	memset((char *)vmap, 0, maxval * sizeof(*vmap));
529 	memset((char *)hashtbl, 0, sizeof hashtbl);
530 }
531 
532 /* Because we really don't have an IR, this stuff is a little messy. */
533 static int
534 F(code, v0, v1)
535 	int code;
536 	int v0, v1;
537 {
538 	u_int hash;
539 	int val;
540 	struct valnode *p;
541 
542 	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
543 	hash %= MODULUS;
544 
545 	for (p = hashtbl[hash]; p; p = p->next)
546 		if (p->code == code && p->v0 == v0 && p->v1 == v1)
547 			return p->val;
548 
549 	val = ++curval;
550 	if (BPF_MODE(code) == BPF_IMM &&
551 	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
552 		vmap[val].const_val = v0;
553 		vmap[val].is_const = 1;
554 	}
555 	p = next_vnode++;
556 	p->val = val;
557 	p->code = code;
558 	p->v0 = v0;
559 	p->v1 = v1;
560 	p->next = hashtbl[hash];
561 	hashtbl[hash] = p;
562 
563 	return val;
564 }
565 
566 static inline void
567 vstore(s, valp, newval, alter)
568 	struct stmt *s;
569 	int *valp;
570 	int newval;
571 	int alter;
572 {
573 	if (alter && *valp == newval)
574 		s->code = NOP;
575 	else
576 		*valp = newval;
577 }
578 
579 static void
580 fold_op(s, v0, v1)
581 	struct stmt *s;
582 	int v0, v1;
583 {
584 	bpf_int32 a, b;
585 
586 	a = vmap[v0].const_val;
587 	b = vmap[v1].const_val;
588 
589 	switch (BPF_OP(s->code)) {
590 	case BPF_ADD:
591 		a += b;
592 		break;
593 
594 	case BPF_SUB:
595 		a -= b;
596 		break;
597 
598 	case BPF_MUL:
599 		a *= b;
600 		break;
601 
602 	case BPF_DIV:
603 		if (b == 0)
604 			bpf_error("division by zero");
605 		a /= b;
606 		break;
607 
608 	case BPF_AND:
609 		a &= b;
610 		break;
611 
612 	case BPF_OR:
613 		a |= b;
614 		break;
615 
616 	case BPF_LSH:
617 		a <<= b;
618 		break;
619 
620 	case BPF_RSH:
621 		a >>= b;
622 		break;
623 
624 	case BPF_NEG:
625 		a = -a;
626 		break;
627 
628 	default:
629 		abort();
630 	}
631 	s->k = a;
632 	s->code = BPF_LD|BPF_IMM;
633 	done = 0;
634 }
635 
636 static inline struct slist *
637 this_op(s)
638 	struct slist *s;
639 {
640 	while (s != 0 && s->s.code == NOP)
641 		s = s->next;
642 	return s;
643 }
644 
645 static void
646 opt_not(b)
647 	struct block *b;
648 {
649 	struct block *tmp = JT(b);
650 
651 	JT(b) = JF(b);
652 	JF(b) = tmp;
653 }
654 
655 static void
656 opt_peep(b)
657 	struct block *b;
658 {
659 	struct slist *s;
660 	struct slist *next, *last;
661 	int val;
662 
663 	s = b->stmts;
664 	if (s == 0)
665 		return;
666 
667 	last = s;
668 	while (1) {
669 		s = this_op(s);
670 		if (s == 0)
671 			break;
672 		next = this_op(s->next);
673 		if (next == 0)
674 			break;
675 		last = next;
676 
677 		/*
678 		 * st  M[k]	-->	st  M[k]
679 		 * ldx M[k]		tax
680 		 */
681 		if (s->s.code == BPF_ST &&
682 		    next->s.code == (BPF_LDX|BPF_MEM) &&
683 		    s->s.k == next->s.k) {
684 			done = 0;
685 			next->s.code = BPF_MISC|BPF_TAX;
686 		}
687 		/*
688 		 * ld  #k	-->	ldx  #k
689 		 * tax			txa
690 		 */
691 		if (s->s.code == (BPF_LD|BPF_IMM) &&
692 		    next->s.code == (BPF_MISC|BPF_TAX)) {
693 			s->s.code = BPF_LDX|BPF_IMM;
694 			next->s.code = BPF_MISC|BPF_TXA;
695 			done = 0;
696 		}
697 		/*
698 		 * This is an ugly special case, but it happens
699 		 * when you say tcp[k] or udp[k] where k is a constant.
700 		 */
701 		if (s->s.code == (BPF_LD|BPF_IMM)) {
702 			struct slist *add, *tax, *ild;
703 
704 			/*
705 			 * Check that X isn't used on exit from this
706 			 * block (which the optimizer might cause).
707 			 * We know the code generator won't generate
708 			 * any local dependencies.
709 			 */
710 			if (ATOMELEM(b->out_use, X_ATOM))
711 				break;
712 
713 			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
714 				add = next;
715 			else
716 				add = this_op(next->next);
717 			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
718 				break;
719 
720 			tax = this_op(add->next);
721 			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
722 				break;
723 
724 			ild = this_op(tax->next);
725 			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
726 			    BPF_MODE(ild->s.code) != BPF_IND)
727 				break;
728 			/*
729 			 * XXX We need to check that X is not
730 			 * subsequently used.  We know we can eliminate the
731 			 * accumulator modifications since it is defined
732 			 * by the last stmt of this sequence.
733 			 *
734 			 * We want to turn this sequence:
735 			 *
736 			 * (004) ldi     #0x2		{s}
737 			 * (005) ldxms   [14]		{next}  -- optional
738 			 * (006) addx			{add}
739 			 * (007) tax			{tax}
740 			 * (008) ild     [x+0]		{ild}
741 			 *
742 			 * into this sequence:
743 			 *
744 			 * (004) nop
745 			 * (005) ldxms   [14]
746 			 * (006) nop
747 			 * (007) nop
748 			 * (008) ild     [x+2]
749 			 *
750 			 */
751 			ild->s.k += s->s.k;
752 			s->s.code = NOP;
753 			add->s.code = NOP;
754 			tax->s.code = NOP;
755 			done = 0;
756 		}
757 		s = next;
758 	}
759 	/*
760 	 * If we have a subtract to do a comparison, and the X register
761 	 * is a known constant, we can merge this value into the
762 	 * comparison.
763 	 */
764 	if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&
765 	    !ATOMELEM(b->out_use, A_ATOM)) {
766 		val = b->val[X_ATOM];
767 		if (vmap[val].is_const) {
768 			int op;
769 
770 			b->s.k += vmap[val].const_val;
771 			op = BPF_OP(b->s.code);
772 			if (op == BPF_JGT || op == BPF_JGE) {
773 				struct block *t = JT(b);
774 				JT(b) = JF(b);
775 				JF(b) = t;
776 				b->s.k += 0x80000000;
777 			}
778 			last->s.code = NOP;
779 			done = 0;
780 		} else if (b->s.k == 0) {
781 			/*
782 			 * sub x  ->	nop
783 			 * j  #0	j  x
784 			 */
785 			last->s.code = NOP;
786 			b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) |
787 				BPF_X;
788 			done = 0;
789 		}
790 	}
791 	/*
792 	 * Likewise, a constant subtract can be simplified.
793 	 */
794 	else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&
795 		 !ATOMELEM(b->out_use, A_ATOM)) {
796 		int op;
797 
798 		b->s.k += last->s.k;
799 		last->s.code = NOP;
800 		op = BPF_OP(b->s.code);
801 		if (op == BPF_JGT || op == BPF_JGE) {
802 			struct block *t = JT(b);
803 			JT(b) = JF(b);
804 			JF(b) = t;
805 			b->s.k += 0x80000000;
806 		}
807 		done = 0;
808 	}
809 	/*
810 	 * and #k	nop
811 	 * jeq #0  ->	jset #k
812 	 */
813 	if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
814 	    !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) {
815 		b->s.k = last->s.k;
816 		b->s.code = BPF_JMP|BPF_K|BPF_JSET;
817 		last->s.code = NOP;
818 		done = 0;
819 		opt_not(b);
820 	}
821 	/*
822 	 * If the accumulator is a known constant, we can compute the
823 	 * comparison result.
824 	 */
825 	val = b->val[A_ATOM];
826 	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
827 		bpf_int32 v = vmap[val].const_val;
828 		switch (BPF_OP(b->s.code)) {
829 
830 		case BPF_JEQ:
831 			v = v == b->s.k;
832 			break;
833 
834 		case BPF_JGT:
835 			v = (unsigned)v > b->s.k;
836 			break;
837 
838 		case BPF_JGE:
839 			v = (unsigned)v >= b->s.k;
840 			break;
841 
842 		case BPF_JSET:
843 			v &= b->s.k;
844 			break;
845 
846 		default:
847 			abort();
848 		}
849 		if (JF(b) != JT(b))
850 			done = 0;
851 		if (v)
852 			JF(b) = JT(b);
853 		else
854 			JT(b) = JF(b);
855 	}
856 }
857 
858 /*
859  * Compute the symbolic value of expression of 's', and update
860  * anything it defines in the value table 'val'.  If 'alter' is true,
861  * do various optimizations.  This code would be cleaner if symbolic
862  * evaluation and code transformations weren't folded together.
863  */
864 static void
865 opt_stmt(s, val, alter)
866 	struct stmt *s;
867 	int val[];
868 	int alter;
869 {
870 	int op;
871 	int v;
872 
873 	switch (s->code) {
874 
875 	case BPF_LD|BPF_ABS|BPF_W:
876 	case BPF_LD|BPF_ABS|BPF_H:
877 	case BPF_LD|BPF_ABS|BPF_B:
878 		v = F(s->code, s->k, 0L);
879 		vstore(s, &val[A_ATOM], v, alter);
880 		break;
881 
882 	case BPF_LD|BPF_IND|BPF_W:
883 	case BPF_LD|BPF_IND|BPF_H:
884 	case BPF_LD|BPF_IND|BPF_B:
885 		v = val[X_ATOM];
886 		if (alter && vmap[v].is_const) {
887 			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
888 			s->k += vmap[v].const_val;
889 			v = F(s->code, s->k, 0L);
890 			done = 0;
891 		}
892 		else
893 			v = F(s->code, s->k, v);
894 		vstore(s, &val[A_ATOM], v, alter);
895 		break;
896 
897 	case BPF_LD|BPF_LEN:
898 		v = F(s->code, 0L, 0L);
899 		vstore(s, &val[A_ATOM], v, alter);
900 		break;
901 
902 	case BPF_LD|BPF_IMM:
903 		v = K(s->k);
904 		vstore(s, &val[A_ATOM], v, alter);
905 		break;
906 
907 	case BPF_LDX|BPF_IMM:
908 		v = K(s->k);
909 		vstore(s, &val[X_ATOM], v, alter);
910 		break;
911 
912 	case BPF_LDX|BPF_MSH|BPF_B:
913 		v = F(s->code, s->k, 0L);
914 		vstore(s, &val[X_ATOM], v, alter);
915 		break;
916 
917 	case BPF_ALU|BPF_NEG:
918 		if (alter && vmap[val[A_ATOM]].is_const) {
919 			s->code = BPF_LD|BPF_IMM;
920 			s->k = -vmap[val[A_ATOM]].const_val;
921 			val[A_ATOM] = K(s->k);
922 		}
923 		else
924 			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
925 		break;
926 
927 	case BPF_ALU|BPF_ADD|BPF_K:
928 	case BPF_ALU|BPF_SUB|BPF_K:
929 	case BPF_ALU|BPF_MUL|BPF_K:
930 	case BPF_ALU|BPF_DIV|BPF_K:
931 	case BPF_ALU|BPF_AND|BPF_K:
932 	case BPF_ALU|BPF_OR|BPF_K:
933 	case BPF_ALU|BPF_LSH|BPF_K:
934 	case BPF_ALU|BPF_RSH|BPF_K:
935 		op = BPF_OP(s->code);
936 		if (alter) {
937 			if (s->k == 0) {
938 				if (op == BPF_ADD || op == BPF_SUB ||
939 				    op == BPF_LSH || op == BPF_RSH ||
940 				    op == BPF_OR) {
941 					s->code = NOP;
942 					break;
943 				}
944 				if (op == BPF_MUL || op == BPF_AND) {
945 					s->code = BPF_LD|BPF_IMM;
946 					val[A_ATOM] = K(s->k);
947 					break;
948 				}
949 			}
950 			if (vmap[val[A_ATOM]].is_const) {
951 				fold_op(s, val[A_ATOM], K(s->k));
952 				val[A_ATOM] = K(s->k);
953 				break;
954 			}
955 		}
956 		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
957 		break;
958 
959 	case BPF_ALU|BPF_ADD|BPF_X:
960 	case BPF_ALU|BPF_SUB|BPF_X:
961 	case BPF_ALU|BPF_MUL|BPF_X:
962 	case BPF_ALU|BPF_DIV|BPF_X:
963 	case BPF_ALU|BPF_AND|BPF_X:
964 	case BPF_ALU|BPF_OR|BPF_X:
965 	case BPF_ALU|BPF_LSH|BPF_X:
966 	case BPF_ALU|BPF_RSH|BPF_X:
967 		op = BPF_OP(s->code);
968 		if (alter && vmap[val[X_ATOM]].is_const) {
969 			if (vmap[val[A_ATOM]].is_const) {
970 				fold_op(s, val[A_ATOM], val[X_ATOM]);
971 				val[A_ATOM] = K(s->k);
972 			}
973 			else {
974 				s->code = BPF_ALU|BPF_K|op;
975 				s->k = vmap[val[X_ATOM]].const_val;
976 				done = 0;
977 				val[A_ATOM] =
978 					F(s->code, val[A_ATOM], K(s->k));
979 			}
980 			break;
981 		}
982 		/*
983 		 * Check if we're doing something to an accumulator
984 		 * that is 0, and simplify.  This may not seem like
985 		 * much of a simplification but it could open up further
986 		 * optimizations.
987 		 * XXX We could also check for mul by 1, and -1, etc.
988 		 */
989 		if (alter && vmap[val[A_ATOM]].is_const
990 		    && vmap[val[A_ATOM]].const_val == 0) {
991 			if (op == BPF_ADD || op == BPF_OR ||
992 			    op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) {
993 				s->code = BPF_MISC|BPF_TXA;
994 				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
995 				break;
996 			}
997 			else if (op == BPF_MUL || op == BPF_DIV ||
998 				 op == BPF_AND) {
999 				s->code = BPF_LD|BPF_IMM;
1000 				s->k = 0;
1001 				vstore(s, &val[A_ATOM], K(s->k), alter);
1002 				break;
1003 			}
1004 			else if (op == BPF_NEG) {
1005 				s->code = NOP;
1006 				break;
1007 			}
1008 		}
1009 		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1010 		break;
1011 
1012 	case BPF_MISC|BPF_TXA:
1013 		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1014 		break;
1015 
1016 	case BPF_LD|BPF_MEM:
1017 		v = val[s->k];
1018 		if (alter && vmap[v].is_const) {
1019 			s->code = BPF_LD|BPF_IMM;
1020 			s->k = vmap[v].const_val;
1021 			done = 0;
1022 		}
1023 		vstore(s, &val[A_ATOM], v, alter);
1024 		break;
1025 
1026 	case BPF_MISC|BPF_TAX:
1027 		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1028 		break;
1029 
1030 	case BPF_LDX|BPF_MEM:
1031 		v = val[s->k];
1032 		if (alter && vmap[v].is_const) {
1033 			s->code = BPF_LDX|BPF_IMM;
1034 			s->k = vmap[v].const_val;
1035 			done = 0;
1036 		}
1037 		vstore(s, &val[X_ATOM], v, alter);
1038 		break;
1039 
1040 	case BPF_ST:
1041 		vstore(s, &val[s->k], val[A_ATOM], alter);
1042 		break;
1043 
1044 	case BPF_STX:
1045 		vstore(s, &val[s->k], val[X_ATOM], alter);
1046 		break;
1047 	}
1048 }
1049 
1050 static void
1051 deadstmt(s, last)
1052 	register struct stmt *s;
1053 	register struct stmt *last[];
1054 {
1055 	register int atom;
1056 
1057 	atom = atomuse(s);
1058 	if (atom >= 0) {
1059 		if (atom == AX_ATOM) {
1060 			last[X_ATOM] = 0;
1061 			last[A_ATOM] = 0;
1062 		}
1063 		else
1064 			last[atom] = 0;
1065 	}
1066 	atom = atomdef(s);
1067 	if (atom >= 0) {
1068 		if (last[atom]) {
1069 			done = 0;
1070 			last[atom]->code = NOP;
1071 		}
1072 		last[atom] = s;
1073 	}
1074 }
1075 
1076 static void
1077 opt_deadstores(b)
1078 	register struct block *b;
1079 {
1080 	register struct slist *s;
1081 	register int atom;
1082 	struct stmt *last[N_ATOMS];
1083 
1084 	memset((char *)last, 0, sizeof last);
1085 
1086 	for (s = b->stmts; s != 0; s = s->next)
1087 		deadstmt(&s->s, last);
1088 	deadstmt(&b->s, last);
1089 
1090 	for (atom = 0; atom < N_ATOMS; ++atom)
1091 		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1092 			last[atom]->code = NOP;
1093 			done = 0;
1094 		}
1095 }
1096 
1097 static void
1098 opt_blk(b, do_stmts)
1099 	struct block *b;
1100 	int do_stmts;
1101 {
1102 	struct slist *s;
1103 	struct edge *p;
1104 	int i;
1105 	bpf_int32 aval;
1106 
1107 	/*
1108 	 * Initialize the atom values.
1109 	 * If we have no predecessors, everything is undefined.
1110 	 * Otherwise, we inherent our values from our predecessors.
1111 	 * If any register has an ambiguous value (i.e. control paths are
1112 	 * merging) give it the undefined value of 0.
1113 	 */
1114 	p = b->in_edges;
1115 	if (p == 0)
1116 		memset((char *)b->val, 0, sizeof(b->val));
1117 	else {
1118 		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1119 		while ((p = p->next) != NULL) {
1120 			for (i = 0; i < N_ATOMS; ++i)
1121 				if (b->val[i] != p->pred->val[i])
1122 					b->val[i] = 0;
1123 		}
1124 	}
1125 	aval = b->val[A_ATOM];
1126 	for (s = b->stmts; s; s = s->next)
1127 		opt_stmt(&s->s, b->val, do_stmts);
1128 
1129 	/*
1130 	 * This is a special case: if we don't use anything from this
1131 	 * block, and we load the accumulator with value that is
1132 	 * already there, or if this block is a return,
1133 	 * eliminate all the statements.
1134 	 */
1135 	if (do_stmts &&
1136 	    ((b->out_use == 0 && aval != 0 &&b->val[A_ATOM] == aval) ||
1137 	     BPF_CLASS(b->s.code) == BPF_RET)) {
1138 		if (b->stmts != 0) {
1139 			b->stmts = 0;
1140 			done = 0;
1141 		}
1142 	} else {
1143 		opt_peep(b);
1144 		opt_deadstores(b);
1145 	}
1146 	/*
1147 	 * Set up values for branch optimizer.
1148 	 */
1149 	if (BPF_SRC(b->s.code) == BPF_K)
1150 		b->oval = K(b->s.k);
1151 	else
1152 		b->oval = b->val[X_ATOM];
1153 	b->et.code = b->s.code;
1154 	b->ef.code = -b->s.code;
1155 }
1156 
1157 /*
1158  * Return true if any register that is used on exit from 'succ', has
1159  * an exit value that is different from the corresponding exit value
1160  * from 'b'.
1161  */
1162 static int
1163 use_conflict(b, succ)
1164 	struct block *b, *succ;
1165 {
1166 	int atom;
1167 	atomset use = succ->out_use;
1168 
1169 	if (use == 0)
1170 		return 0;
1171 
1172 	for (atom = 0; atom < N_ATOMS; ++atom)
1173 		if (ATOMELEM(use, atom))
1174 			if (b->val[atom] != succ->val[atom])
1175 				return 1;
1176 	return 0;
1177 }
1178 
1179 static struct block *
1180 fold_edge(child, ep)
1181 	struct block *child;
1182 	struct edge *ep;
1183 {
1184 	int sense;
1185 	int aval0, aval1, oval0, oval1;
1186 	int code = ep->code;
1187 
1188 	if (code < 0) {
1189 		code = -code;
1190 		sense = 0;
1191 	} else
1192 		sense = 1;
1193 
1194 	if (child->s.code != code)
1195 		return 0;
1196 
1197 	aval0 = child->val[A_ATOM];
1198 	oval0 = child->oval;
1199 	aval1 = ep->pred->val[A_ATOM];
1200 	oval1 = ep->pred->oval;
1201 
1202 	if (aval0 != aval1)
1203 		return 0;
1204 
1205 	if (oval0 == oval1)
1206 		/*
1207 		 * The operands are identical, so the
1208 		 * result is true if a true branch was
1209 		 * taken to get here, otherwise false.
1210 		 */
1211 		return sense ? JT(child) : JF(child);
1212 
1213 	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1214 		/*
1215 		 * At this point, we only know the comparison if we
1216 		 * came down the true branch, and it was an equality
1217 		 * comparison with a constant.  We rely on the fact that
1218 		 * distinct constants have distinct value numbers.
1219 		 */
1220 		return JF(child);
1221 
1222 	return 0;
1223 }
1224 
1225 static void
1226 opt_j(ep)
1227 	struct edge *ep;
1228 {
1229 	register int i, k;
1230 	register struct block *target;
1231 
1232 	if (JT(ep->succ) == 0)
1233 		return;
1234 
1235 	if (JT(ep->succ) == JF(ep->succ)) {
1236 		/*
1237 		 * Common branch targets can be eliminated, provided
1238 		 * there is no data dependency.
1239 		 */
1240 		if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1241 			done = 0;
1242 			ep->succ = JT(ep->succ);
1243 		}
1244 	}
1245 	/*
1246 	 * For each edge dominator that matches the successor of this
1247 	 * edge, promote the edge successor to the its grandchild.
1248 	 *
1249 	 * XXX We violate the set abstraction here in favor a reasonably
1250 	 * efficient loop.
1251 	 */
1252  top:
1253 	for (i = 0; i < edgewords; ++i) {
1254 		register bpf_u_int32 x = ep->edom[i];
1255 
1256 		while (x != 0) {
1257 			k = ffs(x) - 1;
1258 			x &=~ (1 << k);
1259 			k += i * BITS_PER_WORD;
1260 
1261 			target = fold_edge(ep->succ, edges[k]);
1262 			/*
1263 			 * Check that there is no data dependency between
1264 			 * nodes that will be violated if we move the edge.
1265 			 */
1266 			if (target != 0 && !use_conflict(ep->pred, target)) {
1267 				done = 0;
1268 				ep->succ = target;
1269 				if (JT(target) != 0)
1270 					/*
1271 					 * Start over unless we hit a leaf.
1272 					 */
1273 					goto top;
1274 				return;
1275 			}
1276 		}
1277 	}
1278 }
1279 
1280 
1281 static void
1282 or_pullup(b)
1283 	struct block *b;
1284 {
1285 	int val, at_top;
1286 	struct block *pull;
1287 	struct block **diffp, **samep;
1288 	struct edge *ep;
1289 
1290 	ep = b->in_edges;
1291 	if (ep == 0)
1292 		return;
1293 
1294 	/*
1295 	 * Make sure each predecessor loads the same value.
1296 	 * XXX why?
1297 	 */
1298 	val = ep->pred->val[A_ATOM];
1299 	for (ep = ep->next; ep != 0; ep = ep->next)
1300 		if (val != ep->pred->val[A_ATOM])
1301 			return;
1302 
1303 	if (JT(b->in_edges->pred) == b)
1304 		diffp = &JT(b->in_edges->pred);
1305 	else
1306 		diffp = &JF(b->in_edges->pred);
1307 
1308 	at_top = 1;
1309 	while (1) {
1310 		if (*diffp == 0)
1311 			return;
1312 
1313 		if (JT(*diffp) != JT(b))
1314 			return;
1315 
1316 		if (!SET_MEMBER((*diffp)->dom, b->id))
1317 			return;
1318 
1319 		if ((*diffp)->val[A_ATOM] != val)
1320 			break;
1321 
1322 		diffp = &JF(*diffp);
1323 		at_top = 0;
1324 	}
1325 	samep = &JF(*diffp);
1326 	while (1) {
1327 		if (*samep == 0)
1328 			return;
1329 
1330 		if (JT(*samep) != JT(b))
1331 			return;
1332 
1333 		if (!SET_MEMBER((*samep)->dom, b->id))
1334 			return;
1335 
1336 		if ((*samep)->val[A_ATOM] == val)
1337 			break;
1338 
1339 		/* XXX Need to check that there are no data dependencies
1340 		   between dp0 and dp1.  Currently, the code generator
1341 		   will not produce such dependencies. */
1342 		samep = &JF(*samep);
1343 	}
1344 #ifdef notdef
1345 	/* XXX This doesn't cover everything. */
1346 	for (i = 0; i < N_ATOMS; ++i)
1347 		if ((*samep)->val[i] != pred->val[i])
1348 			return;
1349 #endif
1350 	/* Pull up the node. */
1351 	pull = *samep;
1352 	*samep = JF(pull);
1353 	JF(pull) = *diffp;
1354 
1355 	/*
1356 	 * At the top of the chain, each predecessor needs to point at the
1357 	 * pulled up node.  Inside the chain, there is only one predecessor
1358 	 * to worry about.
1359 	 */
1360 	if (at_top) {
1361 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1362 			if (JT(ep->pred) == b)
1363 				JT(ep->pred) = pull;
1364 			else
1365 				JF(ep->pred) = pull;
1366 		}
1367 	}
1368 	else
1369 		*diffp = pull;
1370 
1371 	done = 0;
1372 }
1373 
1374 static void
1375 and_pullup(b)
1376 	struct block *b;
1377 {
1378 	int val, at_top;
1379 	struct block *pull;
1380 	struct block **diffp, **samep;
1381 	struct edge *ep;
1382 
1383 	ep = b->in_edges;
1384 	if (ep == 0)
1385 		return;
1386 
1387 	/*
1388 	 * Make sure each predecessor loads the same value.
1389 	 */
1390 	val = ep->pred->val[A_ATOM];
1391 	for (ep = ep->next; ep != 0; ep = ep->next)
1392 		if (val != ep->pred->val[A_ATOM])
1393 			return;
1394 
1395 	if (JT(b->in_edges->pred) == b)
1396 		diffp = &JT(b->in_edges->pred);
1397 	else
1398 		diffp = &JF(b->in_edges->pred);
1399 
1400 	at_top = 1;
1401 	while (1) {
1402 		if (*diffp == 0)
1403 			return;
1404 
1405 		if (JF(*diffp) != JF(b))
1406 			return;
1407 
1408 		if (!SET_MEMBER((*diffp)->dom, b->id))
1409 			return;
1410 
1411 		if ((*diffp)->val[A_ATOM] != val)
1412 			break;
1413 
1414 		diffp = &JT(*diffp);
1415 		at_top = 0;
1416 	}
1417 	samep = &JT(*diffp);
1418 	while (1) {
1419 		if (*samep == 0)
1420 			return;
1421 
1422 		if (JF(*samep) != JF(b))
1423 			return;
1424 
1425 		if (!SET_MEMBER((*samep)->dom, b->id))
1426 			return;
1427 
1428 		if ((*samep)->val[A_ATOM] == val)
1429 			break;
1430 
1431 		/* XXX Need to check that there are no data dependencies
1432 		   between diffp and samep.  Currently, the code generator
1433 		   will not produce such dependencies. */
1434 		samep = &JT(*samep);
1435 	}
1436 #ifdef notdef
1437 	/* XXX This doesn't cover everything. */
1438 	for (i = 0; i < N_ATOMS; ++i)
1439 		if ((*samep)->val[i] != pred->val[i])
1440 			return;
1441 #endif
1442 	/* Pull up the node. */
1443 	pull = *samep;
1444 	*samep = JT(pull);
1445 	JT(pull) = *diffp;
1446 
1447 	/*
1448 	 * At the top of the chain, each predecessor needs to point at the
1449 	 * pulled up node.  Inside the chain, there is only one predecessor
1450 	 * to worry about.
1451 	 */
1452 	if (at_top) {
1453 		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1454 			if (JT(ep->pred) == b)
1455 				JT(ep->pred) = pull;
1456 			else
1457 				JF(ep->pred) = pull;
1458 		}
1459 	}
1460 	else
1461 		*diffp = pull;
1462 
1463 	done = 0;
1464 }
1465 
1466 static void
1467 opt_blks(root, do_stmts)
1468 	struct block *root;
1469 	int do_stmts;
1470 {
1471 	int i, maxlevel;
1472 	struct block *p;
1473 
1474 	init_val();
1475 	maxlevel = root->level;
1476 	for (i = maxlevel; i >= 0; --i)
1477 		for (p = levels[i]; p; p = p->link)
1478 			opt_blk(p, do_stmts);
1479 
1480 	if (do_stmts)
1481 		/*
1482 		 * No point trying to move branches; it can't possibly
1483 		 * make a difference at this point.
1484 		 */
1485 		return;
1486 
1487 	for (i = 1; i <= maxlevel; ++i) {
1488 		for (p = levels[i]; p; p = p->link) {
1489 			opt_j(&p->et);
1490 			opt_j(&p->ef);
1491 		}
1492 	}
1493 	for (i = 1; i <= maxlevel; ++i) {
1494 		for (p = levels[i]; p; p = p->link) {
1495 			or_pullup(p);
1496 			and_pullup(p);
1497 		}
1498 	}
1499 }
1500 
1501 static inline void
1502 link_inedge(parent, child)
1503 	struct edge *parent;
1504 	struct block *child;
1505 {
1506 	parent->next = child->in_edges;
1507 	child->in_edges = parent;
1508 }
1509 
1510 static void
1511 find_inedges(root)
1512 	struct block *root;
1513 {
1514 	int i;
1515 	struct block *b;
1516 
1517 	for (i = 0; i < n_blocks; ++i)
1518 		blocks[i]->in_edges = 0;
1519 
1520 	/*
1521 	 * Traverse the graph, adding each edge to the predecessor
1522 	 * list of its successors.  Skip the leaves (i.e. level 0).
1523 	 */
1524 	for (i = root->level; i > 0; --i) {
1525 		for (b = levels[i]; b != 0; b = b->link) {
1526 			link_inedge(&b->et, JT(b));
1527 			link_inedge(&b->ef, JF(b));
1528 		}
1529 	}
1530 }
1531 
1532 static void
1533 opt_root(b)
1534 	struct block **b;
1535 {
1536 	struct slist *tmp, *s;
1537 
1538 	s = (*b)->stmts;
1539 	(*b)->stmts = 0;
1540 	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1541 		*b = JT(*b);
1542 
1543 	tmp = (*b)->stmts;
1544 	if (tmp != 0)
1545 		sappend(s, tmp);
1546 	(*b)->stmts = s;
1547 
1548 	/*
1549 	 * If the root node is a return, then there is no
1550 	 * point executing any statements (since the bpf machine
1551 	 * has no side effects).
1552 	 */
1553 	if (BPF_CLASS((*b)->s.code) == BPF_RET)
1554 		(*b)->stmts = 0;
1555 }
1556 
1557 static void
1558 opt_loop(root, do_stmts)
1559 	struct block *root;
1560 	int do_stmts;
1561 {
1562 
1563 #ifdef BDEBUG
1564 	if (dflag > 1)
1565 		opt_dump(root);
1566 #endif
1567 	do {
1568 		done = 1;
1569 		find_levels(root);
1570 		find_dom(root);
1571 		find_closure(root);
1572 		find_inedges(root);
1573 		find_ud(root);
1574 		find_edom(root);
1575 		opt_blks(root, do_stmts);
1576 #ifdef BDEBUG
1577 		if (dflag > 1)
1578 			opt_dump(root);
1579 #endif
1580 	} while (!done);
1581 }
1582 
1583 /*
1584  * Optimize the filter code in its dag representation.
1585  */
1586 void
1587 bpf_optimize(rootp)
1588 	struct block **rootp;
1589 {
1590 	struct block *root;
1591 
1592 	root = *rootp;
1593 
1594 	opt_init(root);
1595 	opt_loop(root, 0);
1596 	opt_loop(root, 1);
1597 	intern_blocks(root);
1598 	opt_root(rootp);
1599 	opt_cleanup();
1600 }
1601 
1602 static void
1603 make_marks(p)
1604 	struct block *p;
1605 {
1606 	if (!isMarked(p)) {
1607 		Mark(p);
1608 		if (BPF_CLASS(p->s.code) != BPF_RET) {
1609 			make_marks(JT(p));
1610 			make_marks(JF(p));
1611 		}
1612 	}
1613 }
1614 
1615 /*
1616  * Mark code array such that isMarked(i) is true
1617  * only for nodes that are alive.
1618  */
1619 static void
1620 mark_code(p)
1621 	struct block *p;
1622 {
1623 	cur_mark += 1;
1624 	make_marks(p);
1625 }
1626 
1627 /*
1628  * True iff the two stmt lists load the same value from the packet into
1629  * the accumulator.
1630  */
1631 static int
1632 eq_slist(x, y)
1633 	struct slist *x, *y;
1634 {
1635 	while (1) {
1636 		while (x && x->s.code == NOP)
1637 			x = x->next;
1638 		while (y && y->s.code == NOP)
1639 			y = y->next;
1640 		if (x == 0)
1641 			return y == 0;
1642 		if (y == 0)
1643 			return x == 0;
1644 		if (x->s.code != y->s.code || x->s.k != y->s.k)
1645 			return 0;
1646 		x = x->next;
1647 		y = y->next;
1648 	}
1649 }
1650 
1651 static inline int
1652 eq_blk(b0, b1)
1653 	struct block *b0, *b1;
1654 {
1655 	if (b0->s.code == b1->s.code &&
1656 	    b0->s.k == b1->s.k &&
1657 	    b0->et.succ == b1->et.succ &&
1658 	    b0->ef.succ == b1->ef.succ)
1659 		return eq_slist(b0->stmts, b1->stmts);
1660 	return 0;
1661 }
1662 
1663 static void
1664 intern_blocks(root)
1665 	struct block *root;
1666 {
1667 	struct block *p;
1668 	int i, j;
1669 	int done;
1670  top:
1671 	done = 1;
1672 	for (i = 0; i < n_blocks; ++i)
1673 		blocks[i]->link = 0;
1674 
1675 	mark_code(root);
1676 
1677 	for (i = n_blocks - 1; --i >= 0; ) {
1678 		if (!isMarked(blocks[i]))
1679 			continue;
1680 		for (j = i + 1; j < n_blocks; ++j) {
1681 			if (!isMarked(blocks[j]))
1682 				continue;
1683 			if (eq_blk(blocks[i], blocks[j])) {
1684 				blocks[i]->link = blocks[j]->link ?
1685 					blocks[j]->link : blocks[j];
1686 				break;
1687 			}
1688 		}
1689 	}
1690 	for (i = 0; i < n_blocks; ++i) {
1691 		p = blocks[i];
1692 		if (JT(p) == 0)
1693 			continue;
1694 		if (JT(p)->link) {
1695 			done = 0;
1696 			JT(p) = JT(p)->link;
1697 		}
1698 		if (JF(p)->link) {
1699 			done = 0;
1700 			JF(p) = JF(p)->link;
1701 		}
1702 	}
1703 	if (!done)
1704 		goto top;
1705 }
1706 
1707 static void
1708 opt_cleanup()
1709 {
1710 	free((void *)vnode_base);
1711 	free((void *)vmap);
1712 	free((void *)edges);
1713 	free((void *)space);
1714 	free((void *)levels);
1715 	free((void *)blocks);
1716 }
1717 
1718 /*
1719  * Return the number of stmts in 's'.
1720  */
1721 static int
1722 slength(s)
1723 	struct slist *s;
1724 {
1725 	int n = 0;
1726 
1727 	for (; s; s = s->next)
1728 		if (s->s.code != NOP)
1729 			++n;
1730 	return n;
1731 }
1732 
1733 /*
1734  * Return the number of nodes reachable by 'p'.
1735  * All nodes should be initially unmarked.
1736  */
1737 static int
1738 count_blocks(p)
1739 	struct block *p;
1740 {
1741 	if (p == 0 || isMarked(p))
1742 		return 0;
1743 	Mark(p);
1744 	return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1745 }
1746 
1747 /*
1748  * Do a depth first search on the flow graph, numbering the
1749  * the basic blocks, and entering them into the 'blocks' array.`
1750  */
1751 static void
1752 number_blks_r(p)
1753 	struct block *p;
1754 {
1755 	int n;
1756 
1757 	if (p == 0 || isMarked(p))
1758 		return;
1759 
1760 	Mark(p);
1761 	n = n_blocks++;
1762 	p->id = n;
1763 	blocks[n] = p;
1764 
1765 	number_blks_r(JT(p));
1766 	number_blks_r(JF(p));
1767 }
1768 
1769 /*
1770  * Return the number of stmts in the flowgraph reachable by 'p'.
1771  * The nodes should be unmarked before calling.
1772  */
1773 static int
1774 count_stmts(p)
1775 	struct block *p;
1776 {
1777 	int n;
1778 
1779 	if (p == 0 || isMarked(p))
1780 		return 0;
1781 	Mark(p);
1782 	n = count_stmts(JT(p)) + count_stmts(JF(p));
1783 	return slength(p->stmts) + n + 1;
1784 }
1785 
1786 /*
1787  * Allocate memory.  All allocation is done before optimization
1788  * is begun.  A linear bound on the size of all data structures is computed
1789  * from the total number of blocks and/or statements.
1790  */
1791 static void
1792 opt_init(root)
1793 	struct block *root;
1794 {
1795 	bpf_u_int32 *p;
1796 	int i, n, max_stmts;
1797 
1798 	/*
1799 	 * First, count the blocks, so we can malloc an array to map
1800 	 * block number to block.  Then, put the blocks into the array.
1801 	 */
1802 	unMarkAll();
1803 	n = count_blocks(root);
1804 	blocks = (struct block **)malloc(n * sizeof(*blocks));
1805 	unMarkAll();
1806 	n_blocks = 0;
1807 	number_blks_r(root);
1808 
1809 	n_edges = 2 * n_blocks;
1810 	edges = (struct edge **)malloc(n_edges * sizeof(*edges));
1811 
1812 	/*
1813 	 * The number of levels is bounded by the number of nodes.
1814 	 */
1815 	levels = (struct block **)malloc(n_blocks * sizeof(*levels));
1816 
1817 	edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1818 	nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1819 
1820 	/* XXX */
1821 	space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1822 				 + n_edges * edgewords * sizeof(*space));
1823 	p = space;
1824 	all_dom_sets = p;
1825 	for (i = 0; i < n; ++i) {
1826 		blocks[i]->dom = p;
1827 		p += nodewords;
1828 	}
1829 	all_closure_sets = p;
1830 	for (i = 0; i < n; ++i) {
1831 		blocks[i]->closure = p;
1832 		p += nodewords;
1833 	}
1834 	all_edge_sets = p;
1835 	for (i = 0; i < n; ++i) {
1836 		register struct block *b = blocks[i];
1837 
1838 		b->et.edom = p;
1839 		p += edgewords;
1840 		b->ef.edom = p;
1841 		p += edgewords;
1842 		b->et.id = i;
1843 		edges[i] = &b->et;
1844 		b->ef.id = n_blocks + i;
1845 		edges[n_blocks + i] = &b->ef;
1846 		b->et.pred = b;
1847 		b->ef.pred = b;
1848 	}
1849 	max_stmts = 0;
1850 	for (i = 0; i < n; ++i)
1851 		max_stmts += slength(blocks[i]->stmts) + 1;
1852 	/*
1853 	 * We allocate at most 3 value numbers per statement,
1854 	 * so this is an upper bound on the number of valnodes
1855 	 * we'll need.
1856 	 */
1857 	maxval = 3 * max_stmts;
1858 	vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap));
1859 	vnode_base = (struct valnode *)malloc(maxval * sizeof(*vmap));
1860 }
1861 
1862 /*
1863  * Some pointers used to convert the basic block form of the code,
1864  * into the array form that BPF requires.  'fstart' will point to
1865  * the malloc'd array while 'ftail' is used during the recursive traversal.
1866  */
1867 static struct bpf_insn *fstart;
1868 static struct bpf_insn *ftail;
1869 
1870 #ifdef BDEBUG
1871 int bids[1000];
1872 #endif
1873 
1874 /*
1875  * Returns true if successful.  Returns false if a branch has
1876  * an offset that is too large.  If so, we have marked that
1877  * branch so that on a subsequent iteration, it will be treated
1878  * properly.
1879  */
1880 static int
1881 convert_code_r(p)
1882 	struct block *p;
1883 {
1884 	struct bpf_insn *dst;
1885 	struct slist *src;
1886 	int slen;
1887 	u_int off;
1888 	int extrajmps;		/* number of extra jumps inserted */
1889 
1890 	if (p == 0 || isMarked(p))
1891 		return (1);
1892 	Mark(p);
1893 
1894 	if (convert_code_r(JF(p)) == 0)
1895 		return (0);
1896 	if (convert_code_r(JT(p)) == 0)
1897 		return (0);
1898 
1899 	slen = slength(p->stmts);
1900 	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
1901 		/* inflate length by any extra jumps */
1902 
1903 	p->offset = dst - fstart;
1904 
1905 	for (src = p->stmts; src; src = src->next) {
1906 		if (src->s.code == NOP)
1907 			continue;
1908 		dst->code = (u_short)src->s.code;
1909 		dst->k = src->s.k;
1910 		++dst;
1911 	}
1912 #ifdef BDEBUG
1913 	bids[dst - fstart] = p->id + 1;
1914 #endif
1915 	dst->code = (u_short)p->s.code;
1916 	dst->k = p->s.k;
1917 	if (JT(p)) {
1918 		extrajmps = 0;
1919 		off = JT(p)->offset - (p->offset + slen) - 1;
1920 		if (off >= 256) {
1921 		    /* offset too large for branch, must add a jump */
1922 		    if (p->longjt == 0) {
1923 		    	/* mark this instruction and retry */
1924 			p->longjt++;
1925 			return(0);
1926 		    }
1927 		    /* branch if T to following jump */
1928 		    dst->jt = extrajmps;
1929 		    extrajmps++;
1930 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
1931 		    dst[extrajmps].k = off - extrajmps;
1932 		}
1933 		else
1934 		    dst->jt = off;
1935 		off = JF(p)->offset - (p->offset + slen) - 1;
1936 		if (off >= 256) {
1937 		    /* offset too large for branch, must add a jump */
1938 		    if (p->longjf == 0) {
1939 		    	/* mark this instruction and retry */
1940 			p->longjf++;
1941 			return(0);
1942 		    }
1943 		    /* branch if F to following jump */
1944 		    /* if two jumps are inserted, F goes to second one */
1945 		    dst->jf = extrajmps;
1946 		    extrajmps++;
1947 		    dst[extrajmps].code = BPF_JMP|BPF_JA;
1948 		    dst[extrajmps].k = off - extrajmps;
1949 		}
1950 		else
1951 		    dst->jf = off;
1952 	}
1953 	return (1);
1954 }
1955 
1956 
1957 /*
1958  * Convert flowgraph intermediate representation to the
1959  * BPF array representation.  Set *lenp to the number of instructions.
1960  */
1961 struct bpf_insn *
1962 icode_to_fcode(root, lenp)
1963 	struct block *root;
1964 	int *lenp;
1965 {
1966 	int n;
1967 	struct bpf_insn *fp;
1968 
1969 	/*
1970 	 * Loop doing convert_codr_r() until no branches remain
1971 	 * with too-large offsets.
1972 	 */
1973 	while (1) {
1974 	    unMarkAll();
1975 	    n = *lenp = count_stmts(root);
1976 
1977 	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
1978 	    memset((char *)fp, 0, sizeof(*fp) * n);
1979 	    fstart = fp;
1980 	    ftail = fp + n;
1981 
1982 	    unMarkAll();
1983 	    if (convert_code_r(root))
1984 		break;
1985 	    free(fp);
1986 	}
1987 
1988 	return fp;
1989 }
1990 
1991 #ifdef BDEBUG
1992 static void
1993 opt_dump(root)
1994 	struct block *root;
1995 {
1996 	struct bpf_program f;
1997 
1998 	memset(bids, 0, sizeof bids);
1999 	f.bf_insns = icode_to_fcode(root, &f.bf_len);
2000 	bpf_dump(&f, 1);
2001 	putchar('\n');
2002 	free((char *)f.bf_insns);
2003 }
2004 #endif
2005