xref: /illumos-gate/usr/src/tools/smatch/src/flow.c (revision c85f09cc92abd00c84e58ec9f0f5d942906cb713)
1 /*
2  * Flow - walk the linearized flowgraph, simplifying it as we
3  * go along.
4  *
5  * Copyright (C) 2004 Linus Torvalds
6  */
7 
8 #include <string.h>
9 #include <stdarg.h>
10 #include <stdlib.h>
11 #include <stdio.h>
12 #include <stddef.h>
13 #include <assert.h>
14 
15 #include "parse.h"
16 #include "expression.h"
17 #include "linearize.h"
18 #include "flow.h"
19 #include "target.h"
20 
21 unsigned long bb_generation;
22 
23 /*
24  * Dammit, if we have a phi-node followed by a conditional
25  * branch on that phi-node, we should damn well be able to
26  * do something about the source. Maybe.
27  */
rewrite_branch(struct basic_block * bb,struct basic_block ** ptr,struct basic_block * old,struct basic_block * new)28 static int rewrite_branch(struct basic_block *bb,
29 	struct basic_block **ptr,
30 	struct basic_block *old,
31 	struct basic_block *new)
32 {
33 	if (*ptr != old || new == old || !bb->ep)
34 		return 0;
35 
36 	/* We might find new if-conversions or non-dominating CSEs */
37 	/* we may also create new dead cycles */
38 	repeat_phase |= REPEAT_CSE | REPEAT_CFG_CLEANUP;
39 	*ptr = new;
40 	replace_bb_in_list(&bb->children, old, new, 1);
41 	remove_bb_from_list(&old->parents, bb, 1);
42 	add_bb(&new->parents, bb);
43 	return 1;
44 }
45 
46 /*
47  * Return the known truth value of a pseudo, or -1 if
48  * it's not known.
49  */
pseudo_truth_value(pseudo_t pseudo)50 static int pseudo_truth_value(pseudo_t pseudo)
51 {
52 	switch (pseudo->type) {
53 	case PSEUDO_VAL:
54 		return !!pseudo->value;
55 
56 	case PSEUDO_REG: {
57 		struct instruction *insn = pseudo->def;
58 
59 		/* A symbol address is always considered true.. */
60 		if (insn->opcode == OP_SYMADDR && insn->target == pseudo)
61 			return 1;
62 	}
63 		/* Fall through */
64 	default:
65 		return -1;
66 	}
67 }
68 
69 /*
70  * Does a basic block depend on the pseudos that "src" defines?
71  */
bb_depends_on(struct basic_block * target,struct basic_block * src)72 static int bb_depends_on(struct basic_block *target, struct basic_block *src)
73 {
74 	pseudo_t pseudo;
75 
76 	FOR_EACH_PTR(src->defines, pseudo) {
77 		if (pseudo_in_list(target->needs, pseudo))
78 			return 1;
79 	} END_FOR_EACH_PTR(pseudo);
80 	return 0;
81 }
82 
83 /*
84  * This really should be handled by bb_depends_on()
85  * which efficiently check the dependence using the
86  * defines - needs liveness info. Problem is that
87  * there is no liveness done on OP_PHI & OP_PHISRC.
88  *
89  * This function add the missing dependency checks.
90  */
bb_depends_on_phi(struct basic_block * target,struct basic_block * src)91 static int bb_depends_on_phi(struct basic_block *target, struct basic_block *src)
92 {
93 	struct instruction *insn;
94 	FOR_EACH_PTR(src->insns, insn) {
95 		if (!insn->bb)
96 			continue;
97 		if (insn->opcode != OP_PHI)
98 			continue;
99 		if (pseudo_in_list(target->needs, insn->target))
100 			return 1;
101 	} END_FOR_EACH_PTR(insn);
102 	return 0;
103 }
104 
105 /*
106  * When we reach here, we have:
107  *  - a basic block that ends in a conditional branch and
108  *    that has no side effects apart from the pseudos it
109  *    may change.
110  *  - the phi-node that the conditional branch depends on
111  *  - full pseudo liveness information
112  *
113  * We need to check if any of the _sources_ of the phi-node
114  * may be constant, and not actually need this block at all.
115  */
try_to_simplify_bb(struct basic_block * bb,struct instruction * first,struct instruction * second)116 static int try_to_simplify_bb(struct basic_block *bb, struct instruction *first, struct instruction *second)
117 {
118 	int changed = 0;
119 	pseudo_t phi;
120 	int bogus;
121 
122 	/*
123 	 * This a due to improper dominance tracking during
124 	 * simplify_symbol_usage()/conversion to SSA form.
125 	 * No sane simplification can be done when we have this.
126 	 */
127 	bogus = bb_list_size(bb->parents) != pseudo_list_size(first->phi_list);
128 
129 	FOR_EACH_PTR(first->phi_list, phi) {
130 		struct instruction *def = phi->def;
131 		struct basic_block *source, *target;
132 		pseudo_t pseudo;
133 		struct instruction *br;
134 		int cond;
135 
136 		if (!def)
137 			continue;
138 		source = def->bb;
139 		pseudo = def->src1;
140 		if (!pseudo || !source)
141 			continue;
142 		br = last_instruction(source->insns);
143 		if (!br)
144 			continue;
145 		if (br->opcode != OP_CBR && br->opcode != OP_BR)
146 			continue;
147 		cond = pseudo_truth_value(pseudo);
148 		if (cond < 0)
149 			continue;
150 		target = cond ? second->bb_true : second->bb_false;
151 		if (bb_depends_on(target, bb))
152 			continue;
153 		if (bb_depends_on_phi(target, bb))
154 			continue;
155 		changed |= rewrite_branch(source, &br->bb_true, bb, target);
156 		changed |= rewrite_branch(source, &br->bb_false, bb, target);
157 		if (changed && !bogus)
158 			kill_use(THIS_ADDRESS(phi));
159 	} END_FOR_EACH_PTR(phi);
160 	return changed;
161 }
162 
bb_has_side_effects(struct basic_block * bb)163 static int bb_has_side_effects(struct basic_block *bb)
164 {
165 	struct instruction *insn;
166 	FOR_EACH_PTR(bb->insns, insn) {
167 		if (!insn->bb)
168 			continue;
169 		switch (insn->opcode) {
170 		case OP_CALL:
171 			/* FIXME! This should take "const" etc into account */
172 			return 1;
173 
174 		case OP_LOAD:
175 			if (!insn->type)
176 				return 1;
177 			if (insn->is_volatile)
178 				return 1;
179 			continue;
180 
181 		case OP_STORE:
182 		case OP_CONTEXT:
183 			return 1;
184 
185 		case OP_ASM:
186 			/* FIXME! This should take "volatile" etc into account */
187 			return 1;
188 
189 		default:
190 			continue;
191 		}
192 	} END_FOR_EACH_PTR(insn);
193 	return 0;
194 }
195 
simplify_phi_branch(struct basic_block * bb,struct instruction * br)196 static int simplify_phi_branch(struct basic_block *bb, struct instruction *br)
197 {
198 	pseudo_t cond = br->cond;
199 	struct instruction *def;
200 
201 	if (cond->type != PSEUDO_REG)
202 		return 0;
203 	def = cond->def;
204 	if (def->bb != bb || def->opcode != OP_PHI)
205 		return 0;
206 	if (bb_has_side_effects(bb))
207 		return 0;
208 	return try_to_simplify_bb(bb, def, br);
209 }
210 
simplify_branch_branch(struct basic_block * bb,struct instruction * br,struct basic_block ** target_p,int bb_true)211 static int simplify_branch_branch(struct basic_block *bb, struct instruction *br,
212 	struct basic_block **target_p, int bb_true)
213 {
214 	struct basic_block *target = *target_p, *final;
215 	struct instruction *insn;
216 	int retval;
217 
218 	if (target == bb)
219 		return 0;
220 	insn = last_instruction(target->insns);
221 	if (!insn || insn->opcode != OP_CBR || insn->cond != br->cond)
222 		return 0;
223 	/*
224 	 * Ahhah! We've found a branch to a branch on the same conditional!
225 	 * Now we just need to see if we can rewrite the branch..
226 	 */
227 	retval = 0;
228 	final = bb_true ? insn->bb_true : insn->bb_false;
229 	if (bb_has_side_effects(target))
230 		goto try_to_rewrite_target;
231 	if (bb_depends_on(final, target))
232 		goto try_to_rewrite_target;
233 	if (bb_depends_on_phi(final, target))
234 		return 0;
235 	return rewrite_branch(bb, target_p, target, final);
236 
237 try_to_rewrite_target:
238 	/*
239 	 * If we're the only parent, at least we can rewrite the
240 	 * now-known second branch.
241 	 */
242 	if (bb_list_size(target->parents) != 1)
243 		return retval;
244 	insert_branch(target, insn, final);
245 	return 1;
246 }
247 
simplify_one_branch(struct basic_block * bb,struct instruction * br)248 static int simplify_one_branch(struct basic_block *bb, struct instruction *br)
249 {
250 	if (simplify_phi_branch(bb, br))
251 		return 1;
252 	return simplify_branch_branch(bb, br, &br->bb_true, 1) |
253 	       simplify_branch_branch(bb, br, &br->bb_false, 0);
254 }
255 
simplify_branch_nodes(struct entrypoint * ep)256 static int simplify_branch_nodes(struct entrypoint *ep)
257 {
258 	int changed = 0;
259 	struct basic_block *bb;
260 
261 	FOR_EACH_PTR(ep->bbs, bb) {
262 		struct instruction *br = last_instruction(bb->insns);
263 
264 		if (!br || br->opcode != OP_CBR)
265 			continue;
266 		changed |= simplify_one_branch(bb, br);
267 	} END_FOR_EACH_PTR(bb);
268 	return changed;
269 }
270 
271 /*
272  * This is called late - when we have intra-bb liveness information..
273  */
simplify_flow(struct entrypoint * ep)274 int simplify_flow(struct entrypoint *ep)
275 {
276 	return simplify_branch_nodes(ep);
277 }
278 
concat_user_list(struct pseudo_user_list * src,struct pseudo_user_list ** dst)279 static inline void concat_user_list(struct pseudo_user_list *src, struct pseudo_user_list **dst)
280 {
281 	copy_ptr_list((struct ptr_list **)dst, (struct ptr_list *)src);
282 }
283 
convert_instruction_target(struct instruction * insn,pseudo_t src)284 void convert_instruction_target(struct instruction *insn, pseudo_t src)
285 {
286 	pseudo_t target;
287 	struct pseudo_user *pu;
288 	/*
289 	 * Go through the "insn->users" list and replace them all..
290 	 */
291 	target = insn->target;
292 	if (target == src)
293 		return;
294 	FOR_EACH_PTR(target->users, pu) {
295 		if (*pu->userp != VOID) {
296 			assert(*pu->userp == target);
297 			*pu->userp = src;
298 		}
299 	} END_FOR_EACH_PTR(pu);
300 	if (has_use_list(src))
301 		concat_user_list(target->users, &src->users);
302 	target->users = NULL;
303 }
304 
convert_load_instruction(struct instruction * insn,pseudo_t src)305 void convert_load_instruction(struct instruction *insn, pseudo_t src)
306 {
307 	convert_instruction_target(insn, src);
308 	kill_instruction(insn);
309 	repeat_phase |= REPEAT_SYMBOL_CLEANUP;
310 }
311 
overlapping_memop(struct instruction * a,struct instruction * b)312 static int overlapping_memop(struct instruction *a, struct instruction *b)
313 {
314 	unsigned int a_start = bytes_to_bits(a->offset);
315 	unsigned int b_start = bytes_to_bits(b->offset);
316 	unsigned int a_size = a->size;
317 	unsigned int b_size = b->size;
318 
319 	if (a_size + a_start <= b_start)
320 		return 0;
321 	if (b_size + b_start <= a_start)
322 		return 0;
323 	return 1;
324 }
325 
same_memop(struct instruction * a,struct instruction * b)326 static inline int same_memop(struct instruction *a, struct instruction *b)
327 {
328 	return	a->offset == b->offset && a->size == b->size;
329 }
330 
distinct_symbols(pseudo_t a,pseudo_t b)331 static inline int distinct_symbols(pseudo_t a, pseudo_t b)
332 {
333 	if (a->type != PSEUDO_SYM)
334 		return 0;
335 	if (b->type != PSEUDO_SYM)
336 		return 0;
337 	return a->sym != b->sym;
338 }
339 
340 /*
341  * Return 1 if "dom" dominates the access to "pseudo"
342  * in "insn".
343  *
344  * Return 0 if it doesn't, and -1 if you don't know.
345  */
dominates(pseudo_t pseudo,struct instruction * insn,struct instruction * dom,int local)346 int dominates(pseudo_t pseudo, struct instruction *insn, struct instruction *dom, int local)
347 {
348 	int opcode = dom->opcode;
349 
350 	if (opcode == OP_CALL || opcode == OP_ENTRY)
351 		return local ? 0 : -1;
352 	if (opcode != OP_LOAD && opcode != OP_STORE)
353 		return 0;
354 	if (dom->src != pseudo) {
355 		if (local)
356 			return 0;
357 		/* We don't think two explicitly different symbols ever alias */
358 		if (distinct_symbols(insn->src, dom->src))
359 			return 0;
360 		/* We could try to do some alias analysis here */
361 		return -1;
362 	}
363 	if (!same_memop(insn, dom)) {
364 		if (dom->opcode == OP_LOAD)
365 			return 0;
366 		if (!overlapping_memop(insn, dom))
367 			return 0;
368 		return -1;
369 	}
370 	return 1;
371 }
372 
373 /*
374  * We should probably sort the phi list just to make it easier to compare
375  * later for equality.
376  */
rewrite_load_instruction(struct instruction * insn,struct pseudo_list * dominators)377 void rewrite_load_instruction(struct instruction *insn, struct pseudo_list *dominators)
378 {
379 	pseudo_t new, phi;
380 
381 	/*
382 	 * Check for somewhat common case of duplicate
383 	 * phi nodes.
384 	 */
385 	new = first_pseudo(dominators)->def->phi_src;
386 	FOR_EACH_PTR(dominators, phi) {
387 		if (new != phi->def->phi_src)
388 			goto complex_phi;
389 		new->ident = new->ident ? : phi->ident;
390 	} END_FOR_EACH_PTR(phi);
391 
392 	/*
393 	 * All the same pseudo - mark the phi-nodes unused
394 	 * and convert the load into a LNOP and replace the
395 	 * pseudo.
396 	 */
397 	convert_load_instruction(insn, new);
398 	FOR_EACH_PTR(dominators, phi) {
399 		kill_instruction(phi->def);
400 	} END_FOR_EACH_PTR(phi);
401 	goto end;
402 
403 complex_phi:
404 	/* We leave symbol pseudos with a bogus usage list here */
405 	if (insn->src->type != PSEUDO_SYM)
406 		kill_use(&insn->src);
407 	insn->opcode = OP_PHI;
408 	insn->phi_list = dominators;
409 
410 end:
411 	repeat_phase |= REPEAT_SYMBOL_CLEANUP;
412 }
413 
414 /* Kill a pseudo that is dead on exit from the bb */
415 // The context is:
416 // * the variable is not global but may have its address used (local/non-local)
417 // * the stores are only needed by others functions which would do some
418 //   loads via the escaped address
419 // We start by the terminating BB (normal exit BB + no-return/unreachable)
420 // We walkup the BB' intruction backward
421 // * we're only concerned by loads, stores & calls
422 // * if we reach a call			-> we have to stop if var is non-local
423 // * if we reach a load of our var	-> we have to stop
424 // * if we reach a store of our var	-> we can kill it, it's dead
425 // * we can ignore other stores & loads if the var is local
426 // * if we reach another store or load done via non-symbol access
427 //   (so done via some address calculation) -> we have to stop
428 // If we reach the top of the BB we can recurse into the parents BBs.
kill_dead_stores_bb(pseudo_t pseudo,unsigned long generation,struct basic_block * bb,int local)429 static void kill_dead_stores_bb(pseudo_t pseudo, unsigned long generation, struct basic_block *bb, int local)
430 {
431 	struct instruction *insn;
432 	struct basic_block *parent;
433 
434 	if (bb->generation == generation)
435 		return;
436 	bb->generation = generation;
437 	FOR_EACH_PTR_REVERSE(bb->insns, insn) {
438 		if (!insn->bb)
439 			continue;
440 		switch (insn->opcode) {
441 		case OP_LOAD:
442 			if (insn->src == pseudo)
443 				return;
444 			break;
445 		case OP_STORE:
446 			if (insn->src == pseudo) {
447 				kill_instruction_force(insn);
448 				continue;
449 			}
450 			break;
451 		case OP_CALL:
452 			if (!local)
453 				return;
454 		default:
455 			continue;
456 		}
457 		if (!local && insn->src->type != PSEUDO_SYM)
458 			return;
459 	} END_FOR_EACH_PTR_REVERSE(insn);
460 
461 	FOR_EACH_PTR(bb->parents, parent) {
462 		if (bb_list_size(parent->children) > 1)
463 			continue;
464 		kill_dead_stores_bb(pseudo, generation, parent, local);
465 	} END_FOR_EACH_PTR(parent);
466 }
467 
check_access(struct instruction * insn)468 void check_access(struct instruction *insn)
469 {
470 	pseudo_t pseudo = insn->src;
471 
472 	if (insn->bb && pseudo->type == PSEUDO_SYM) {
473 		int offset = insn->offset, bit = bytes_to_bits(offset) + insn->size;
474 		struct symbol *sym = pseudo->sym;
475 
476 		if (sym->bit_size > 0 && (offset < 0 || bit > sym->bit_size)) {
477 			if (insn->tainted)
478 				return;
479 			warning(insn->pos, "invalid access %s '%s' (%d %d)",
480 				offset < 0 ? "below" : "past the end of",
481 				show_ident(sym->ident), offset,
482 				bits_to_bytes(sym->bit_size));
483 			insn->tainted = 1;
484 		}
485 	}
486 }
487 
first_user(pseudo_t p)488 static struct pseudo_user *first_user(pseudo_t p)
489 {
490 	struct pseudo_user *pu;
491 	FOR_EACH_PTR(p->users, pu) {
492 		if (!pu)
493 			continue;
494 		return pu;
495 	} END_FOR_EACH_PTR(pu);
496 	return NULL;
497 }
498 
kill_dead_stores(struct entrypoint * ep,pseudo_t addr,int local)499 void kill_dead_stores(struct entrypoint *ep, pseudo_t addr, int local)
500 {
501 	unsigned long generation;
502 	struct basic_block *bb;
503 
504 	switch (pseudo_user_list_size(addr->users)) {
505 	case 0:
506 		return;
507 	case 1:
508 		if (local) {
509 			struct pseudo_user *pu = first_user(addr);
510 			struct instruction *insn = pu->insn;
511 			if (insn->opcode == OP_STORE) {
512 				kill_instruction_force(insn);
513 				return;
514 			}
515 		}
516 	default:
517 		break;
518 	}
519 
520 	generation = ++bb_generation;
521 	FOR_EACH_PTR(ep->bbs, bb) {
522 		if (bb->children)
523 			continue;
524 		kill_dead_stores_bb(addr, generation, bb, local);
525 	} END_FOR_EACH_PTR(bb);
526 }
527 
mark_bb_reachable(struct basic_block * bb,unsigned long generation)528 static void mark_bb_reachable(struct basic_block *bb, unsigned long generation)
529 {
530 	struct basic_block *child;
531 
532 	if (bb->generation == generation)
533 		return;
534 	bb->generation = generation;
535 	FOR_EACH_PTR(bb->children, child) {
536 		mark_bb_reachable(child, generation);
537 	} END_FOR_EACH_PTR(child);
538 }
539 
kill_defs(struct instruction * insn)540 static void kill_defs(struct instruction *insn)
541 {
542 	pseudo_t target = insn->target;
543 
544 	if (!has_use_list(target))
545 		return;
546 	if (target->def != insn)
547 		return;
548 
549 	convert_instruction_target(insn, VOID);
550 }
551 
kill_bb(struct basic_block * bb)552 void kill_bb(struct basic_block *bb)
553 {
554 	struct instruction *insn;
555 	struct basic_block *child, *parent;
556 
557 	FOR_EACH_PTR(bb->insns, insn) {
558 		if (!insn->bb)
559 			continue;
560 		kill_instruction_force(insn);
561 		kill_defs(insn);
562 		/*
563 		 * We kill unreachable instructions even if they
564 		 * otherwise aren't "killable" (e.g. volatile loads)
565 		 */
566 	} END_FOR_EACH_PTR(insn);
567 	bb->insns = NULL;
568 
569 	FOR_EACH_PTR(bb->children, child) {
570 		remove_bb_from_list(&child->parents, bb, 0);
571 	} END_FOR_EACH_PTR(child);
572 	bb->children = NULL;
573 
574 	FOR_EACH_PTR(bb->parents, parent) {
575 		remove_bb_from_list(&parent->children, bb, 0);
576 	} END_FOR_EACH_PTR(parent);
577 	bb->parents = NULL;
578 }
579 
kill_unreachable_bbs(struct entrypoint * ep)580 void kill_unreachable_bbs(struct entrypoint *ep)
581 {
582 	struct basic_block *bb;
583 	unsigned long generation = ++bb_generation;
584 
585 	mark_bb_reachable(ep->entry->bb, generation);
586 	FOR_EACH_PTR(ep->bbs, bb) {
587 		if (bb->generation == generation)
588 			continue;
589 		/* Mark it as being dead */
590 		kill_bb(bb);
591 		bb->ep = NULL;
592 		DELETE_CURRENT_PTR(bb);
593 	} END_FOR_EACH_PTR(bb);
594 	PACK_PTR_LIST(&ep->bbs);
595 }
596 
rewrite_parent_branch(struct basic_block * bb,struct basic_block * old,struct basic_block * new)597 static int rewrite_parent_branch(struct basic_block *bb, struct basic_block *old, struct basic_block *new)
598 {
599 	int changed = 0;
600 	struct instruction *insn = last_instruction(bb->insns);
601 
602 	if (!insn)
603 		return 0;
604 
605 	/* Infinite loops: let's not "optimize" them.. */
606 	if (old == new)
607 		return 0;
608 
609 	switch (insn->opcode) {
610 	case OP_CBR:
611 		changed |= rewrite_branch(bb, &insn->bb_false, old, new);
612 		/* fall through */
613 	case OP_BR:
614 		changed |= rewrite_branch(bb, &insn->bb_true, old, new);
615 		assert(changed);
616 		return changed;
617 	case OP_SWITCH: {
618 		struct multijmp *jmp;
619 		FOR_EACH_PTR(insn->multijmp_list, jmp) {
620 			changed |= rewrite_branch(bb, &jmp->target, old, new);
621 		} END_FOR_EACH_PTR(jmp);
622 		assert(changed);
623 		return changed;
624 	}
625 	default:
626 		return 0;
627 	}
628 }
629 
rewrite_branch_bb(struct basic_block * bb,struct instruction * br)630 static struct basic_block * rewrite_branch_bb(struct basic_block *bb, struct instruction *br)
631 {
632 	struct basic_block *parent;
633 	struct basic_block *target = br->bb_true;
634 
635 	if (br->opcode == OP_CBR) {
636 		pseudo_t cond = br->cond;
637 		if (cond->type != PSEUDO_VAL)
638 			return NULL;
639 		target = cond->value ? target : br->bb_false;
640 	}
641 
642 	/*
643 	 * We can't do FOR_EACH_PTR() here, because the parent list
644 	 * may change when we rewrite the parent.
645 	 */
646 	while ((parent = first_basic_block(bb->parents)) != NULL) {
647 		if (!rewrite_parent_branch(parent, bb, target))
648 			return NULL;
649 	}
650 	return target;
651 }
652 
vrfy_bb_in_list(struct basic_block * bb,struct basic_block_list * list)653 static void vrfy_bb_in_list(struct basic_block *bb, struct basic_block_list *list)
654 {
655 	if (bb) {
656 		struct basic_block *tmp;
657 		int no_bb_in_list = 0;
658 
659 		FOR_EACH_PTR(list, tmp) {
660 			if (bb == tmp)
661 				return;
662 		} END_FOR_EACH_PTR(tmp);
663 		assert(no_bb_in_list);
664 	}
665 }
666 
vrfy_parents(struct basic_block * bb)667 static void vrfy_parents(struct basic_block *bb)
668 {
669 	struct basic_block *tmp;
670 	FOR_EACH_PTR(bb->parents, tmp) {
671 		vrfy_bb_in_list(bb, tmp->children);
672 	} END_FOR_EACH_PTR(tmp);
673 }
674 
vrfy_children(struct basic_block * bb)675 static void vrfy_children(struct basic_block *bb)
676 {
677 	struct basic_block *tmp;
678 	struct instruction *br = last_instruction(bb->insns);
679 
680 	if (!br) {
681 		assert(!bb->children);
682 		return;
683 	}
684 	switch (br->opcode) {
685 		struct multijmp *jmp;
686 	case OP_CBR:
687 		vrfy_bb_in_list(br->bb_false, bb->children);
688 		/* fall through */
689 	case OP_BR:
690 		vrfy_bb_in_list(br->bb_true, bb->children);
691 		break;
692 	case OP_SWITCH:
693 	case OP_COMPUTEDGOTO:
694 		FOR_EACH_PTR(br->multijmp_list, jmp) {
695 			vrfy_bb_in_list(jmp->target, bb->children);
696 		} END_FOR_EACH_PTR(jmp);
697 		break;
698 	default:
699 		break;
700 	}
701 
702 	FOR_EACH_PTR(bb->children, tmp) {
703 		vrfy_bb_in_list(bb, tmp->parents);
704 	} END_FOR_EACH_PTR(tmp);
705 }
706 
vrfy_bb_flow(struct basic_block * bb)707 static void vrfy_bb_flow(struct basic_block *bb)
708 {
709 	vrfy_children(bb);
710 	vrfy_parents(bb);
711 }
712 
vrfy_flow(struct entrypoint * ep)713 void vrfy_flow(struct entrypoint *ep)
714 {
715 	struct basic_block *bb;
716 	struct basic_block *entry = ep->entry->bb;
717 
718 	FOR_EACH_PTR(ep->bbs, bb) {
719 		if (bb == entry)
720 			entry = NULL;
721 		vrfy_bb_flow(bb);
722 	} END_FOR_EACH_PTR(bb);
723 	assert(!entry);
724 }
725 
pack_basic_blocks(struct entrypoint * ep)726 void pack_basic_blocks(struct entrypoint *ep)
727 {
728 	struct basic_block *bb;
729 
730 	/* See if we can merge a bb into another one.. */
731 	FOR_EACH_PTR(ep->bbs, bb) {
732 		struct instruction *first, *insn;
733 		struct basic_block *parent, *child, *last;
734 
735 		if (!bb_reachable(bb))
736 			continue;
737 
738 		/*
739 		 * Just a branch?
740 		 */
741 		FOR_EACH_PTR(bb->insns, first) {
742 			if (!first->bb)
743 				continue;
744 			switch (first->opcode) {
745 			case OP_NOP:
746 			case OP_INLINED_CALL:
747 				continue;
748 			case OP_CBR:
749 			case OP_BR: {
750 				struct basic_block *replace;
751 				replace = rewrite_branch_bb(bb, first);
752 				if (replace) {
753 					kill_bb(bb);
754 					goto no_merge;
755 				}
756 			}
757 			/* fallthrough */
758 			default:
759 				goto out;
760 			}
761 		} END_FOR_EACH_PTR(first);
762 
763 out:
764 		/*
765 		 * See if we only have one parent..
766 		 */
767 		last = NULL;
768 		FOR_EACH_PTR(bb->parents, parent) {
769 			if (last) {
770 				if (last != parent)
771 					goto no_merge;
772 				continue;
773 			}
774 			last = parent;
775 		} END_FOR_EACH_PTR(parent);
776 
777 		parent = last;
778 		if (!parent || parent == bb)
779 			continue;
780 
781 		/*
782 		 * Goodie. See if the parent can merge..
783 		 */
784 		FOR_EACH_PTR(parent->children, child) {
785 			if (child != bb)
786 				goto no_merge;
787 		} END_FOR_EACH_PTR(child);
788 
789 		/*
790 		 * Merge the two.
791 		 */
792 		repeat_phase |= REPEAT_CFG_CLEANUP;
793 
794 		parent->children = bb->children;
795 		bb->children = NULL;
796 		bb->parents = NULL;
797 
798 		FOR_EACH_PTR(parent->children, child) {
799 			replace_bb_in_list(&child->parents, bb, parent, 0);
800 		} END_FOR_EACH_PTR(child);
801 
802 		kill_instruction(delete_last_instruction(&parent->insns));
803 		FOR_EACH_PTR(bb->insns, insn) {
804 			if (!insn->bb)
805 				continue;
806 			assert(insn->bb == bb);
807 			insn->bb = parent;
808 			add_instruction(&parent->insns, insn);
809 		} END_FOR_EACH_PTR(insn);
810 		bb->insns = NULL;
811 
812 	no_merge:
813 		/* nothing to do */;
814 	} END_FOR_EACH_PTR(bb);
815 }
816 
817 
818