xref: /linux/tools/bpf/bpftool/cfg.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 
4 #include <linux/list.h>
5 #include <stdlib.h>
6 #include <string.h>
7 
8 #include "cfg.h"
9 #include "main.h"
10 #include "xlated_dumper.h"
11 
12 struct cfg {
13 	struct list_head funcs;
14 	int func_num;
15 };
16 
17 struct func_node {
18 	struct list_head l;
19 	struct list_head bbs;
20 	struct bpf_insn *start;
21 	struct bpf_insn *end;
22 	int idx;
23 	int bb_num;
24 };
25 
26 struct bb_node {
27 	struct list_head l;
28 	struct list_head e_prevs;
29 	struct list_head e_succs;
30 	struct bpf_insn *head;
31 	struct bpf_insn *tail;
32 	int idx;
33 };
34 
35 #define EDGE_FLAG_EMPTY		0x0
36 #define EDGE_FLAG_FALLTHROUGH	0x1
37 #define EDGE_FLAG_JUMP		0x2
38 struct edge_node {
39 	struct list_head l;
40 	struct bb_node *src;
41 	struct bb_node *dst;
42 	int flags;
43 };
44 
45 #define ENTRY_BLOCK_INDEX	0
46 #define EXIT_BLOCK_INDEX	1
47 #define NUM_FIXED_BLOCKS	2
48 #define func_prev(func)		list_prev_entry(func, l)
49 #define func_next(func)		list_next_entry(func, l)
50 #define bb_prev(bb)		list_prev_entry(bb, l)
51 #define bb_next(bb)		list_next_entry(bb, l)
52 #define entry_bb(func)		func_first_bb(func)
53 #define exit_bb(func)		func_last_bb(func)
54 #define cfg_first_func(cfg)	\
55 	list_first_entry(&cfg->funcs, struct func_node, l)
56 #define cfg_last_func(cfg)	\
57 	list_last_entry(&cfg->funcs, struct func_node, l)
58 #define func_first_bb(func)	\
59 	list_first_entry(&func->bbs, struct bb_node, l)
60 #define func_last_bb(func)	\
61 	list_last_entry(&func->bbs, struct bb_node, l)
62 
63 static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
64 {
65 	struct func_node *new_func, *func;
66 
67 	list_for_each_entry(func, &cfg->funcs, l) {
68 		if (func->start == insn)
69 			return func;
70 		else if (func->start > insn)
71 			break;
72 	}
73 
74 	func = func_prev(func);
75 	new_func = calloc(1, sizeof(*new_func));
76 	if (!new_func) {
77 		p_err("OOM when allocating FUNC node");
78 		return NULL;
79 	}
80 	new_func->start = insn;
81 	new_func->idx = cfg->func_num;
82 	list_add(&new_func->l, &func->l);
83 	cfg->func_num++;
84 
85 	return new_func;
86 }
87 
88 static struct bb_node *func_append_bb(struct func_node *func,
89 				      struct bpf_insn *insn)
90 {
91 	struct bb_node *new_bb, *bb;
92 
93 	list_for_each_entry(bb, &func->bbs, l) {
94 		if (bb->head == insn)
95 			return bb;
96 		else if (bb->head > insn)
97 			break;
98 	}
99 
100 	bb = bb_prev(bb);
101 	new_bb = calloc(1, sizeof(*new_bb));
102 	if (!new_bb) {
103 		p_err("OOM when allocating BB node");
104 		return NULL;
105 	}
106 	new_bb->head = insn;
107 	INIT_LIST_HEAD(&new_bb->e_prevs);
108 	INIT_LIST_HEAD(&new_bb->e_succs);
109 	list_add(&new_bb->l, &bb->l);
110 
111 	return new_bb;
112 }
113 
114 static struct bb_node *func_insert_dummy_bb(struct list_head *after)
115 {
116 	struct bb_node *bb;
117 
118 	bb = calloc(1, sizeof(*bb));
119 	if (!bb) {
120 		p_err("OOM when allocating BB node");
121 		return NULL;
122 	}
123 
124 	INIT_LIST_HEAD(&bb->e_prevs);
125 	INIT_LIST_HEAD(&bb->e_succs);
126 	list_add(&bb->l, after);
127 
128 	return bb;
129 }
130 
131 static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
132 				struct bpf_insn *end)
133 {
134 	struct func_node *func, *last_func;
135 
136 	func = cfg_append_func(cfg, cur);
137 	if (!func)
138 		return true;
139 
140 	for (; cur < end; cur++) {
141 		if (cur->code != (BPF_JMP | BPF_CALL))
142 			continue;
143 		if (cur->src_reg != BPF_PSEUDO_CALL)
144 			continue;
145 		func = cfg_append_func(cfg, cur + cur->off + 1);
146 		if (!func)
147 			return true;
148 	}
149 
150 	last_func = cfg_last_func(cfg);
151 	last_func->end = end - 1;
152 	func = cfg_first_func(cfg);
153 	list_for_each_entry_from(func, &last_func->l, l) {
154 		func->end = func_next(func)->start - 1;
155 	}
156 
157 	return false;
158 }
159 
160 static bool is_jmp_insn(__u8 code)
161 {
162 	return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
163 }
164 
165 static bool func_partition_bb_head(struct func_node *func)
166 {
167 	struct bpf_insn *cur, *end;
168 	struct bb_node *bb;
169 
170 	cur = func->start;
171 	end = func->end;
172 	INIT_LIST_HEAD(&func->bbs);
173 	bb = func_append_bb(func, cur);
174 	if (!bb)
175 		return true;
176 
177 	for (; cur <= end; cur++) {
178 		if (is_jmp_insn(cur->code)) {
179 			__u8 opcode = BPF_OP(cur->code);
180 
181 			if (opcode == BPF_EXIT || opcode == BPF_CALL)
182 				continue;
183 
184 			bb = func_append_bb(func, cur + cur->off + 1);
185 			if (!bb)
186 				return true;
187 
188 			if (opcode != BPF_JA) {
189 				bb = func_append_bb(func, cur + 1);
190 				if (!bb)
191 					return true;
192 			}
193 		}
194 	}
195 
196 	return false;
197 }
198 
199 static void func_partition_bb_tail(struct func_node *func)
200 {
201 	unsigned int bb_idx = NUM_FIXED_BLOCKS;
202 	struct bb_node *bb, *last;
203 
204 	last = func_last_bb(func);
205 	last->tail = func->end;
206 	bb = func_first_bb(func);
207 	list_for_each_entry_from(bb, &last->l, l) {
208 		bb->tail = bb_next(bb)->head - 1;
209 		bb->idx = bb_idx++;
210 	}
211 
212 	last->idx = bb_idx++;
213 	func->bb_num = bb_idx;
214 }
215 
216 static bool func_add_special_bb(struct func_node *func)
217 {
218 	struct bb_node *bb;
219 
220 	bb = func_insert_dummy_bb(&func->bbs);
221 	if (!bb)
222 		return true;
223 	bb->idx = ENTRY_BLOCK_INDEX;
224 
225 	bb = func_insert_dummy_bb(&func_last_bb(func)->l);
226 	if (!bb)
227 		return true;
228 	bb->idx = EXIT_BLOCK_INDEX;
229 
230 	return false;
231 }
232 
233 static bool func_partition_bb(struct func_node *func)
234 {
235 	if (func_partition_bb_head(func))
236 		return true;
237 
238 	func_partition_bb_tail(func);
239 
240 	return false;
241 }
242 
243 static struct bb_node *func_search_bb_with_head(struct func_node *func,
244 						struct bpf_insn *insn)
245 {
246 	struct bb_node *bb;
247 
248 	list_for_each_entry(bb, &func->bbs, l) {
249 		if (bb->head == insn)
250 			return bb;
251 	}
252 
253 	return NULL;
254 }
255 
256 static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
257 				  int flags)
258 {
259 	struct edge_node *e;
260 
261 	e = calloc(1, sizeof(*e));
262 	if (!e) {
263 		p_err("OOM when allocating edge node");
264 		return NULL;
265 	}
266 
267 	if (src)
268 		e->src = src;
269 	if (dst)
270 		e->dst = dst;
271 
272 	e->flags |= flags;
273 
274 	return e;
275 }
276 
277 static bool func_add_bb_edges(struct func_node *func)
278 {
279 	struct bpf_insn *insn;
280 	struct edge_node *e;
281 	struct bb_node *bb;
282 
283 	bb = entry_bb(func);
284 	e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
285 	if (!e)
286 		return true;
287 	list_add_tail(&e->l, &bb->e_succs);
288 
289 	bb = exit_bb(func);
290 	e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
291 	if (!e)
292 		return true;
293 	list_add_tail(&e->l, &bb->e_prevs);
294 
295 	bb = entry_bb(func);
296 	bb = bb_next(bb);
297 	list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
298 		e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
299 		if (!e)
300 			return true;
301 		e->src = bb;
302 
303 		insn = bb->tail;
304 		if (!is_jmp_insn(insn->code) ||
305 		    BPF_OP(insn->code) == BPF_EXIT) {
306 			e->dst = bb_next(bb);
307 			e->flags |= EDGE_FLAG_FALLTHROUGH;
308 			list_add_tail(&e->l, &bb->e_succs);
309 			continue;
310 		} else if (BPF_OP(insn->code) == BPF_JA) {
311 			e->dst = func_search_bb_with_head(func,
312 							  insn + insn->off + 1);
313 			e->flags |= EDGE_FLAG_JUMP;
314 			list_add_tail(&e->l, &bb->e_succs);
315 			continue;
316 		}
317 
318 		e->dst = bb_next(bb);
319 		e->flags |= EDGE_FLAG_FALLTHROUGH;
320 		list_add_tail(&e->l, &bb->e_succs);
321 
322 		e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
323 		if (!e)
324 			return true;
325 		e->src = bb;
326 		e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
327 		list_add_tail(&e->l, &bb->e_succs);
328 	}
329 
330 	return false;
331 }
332 
333 static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
334 {
335 	int cnt = len / sizeof(*insn);
336 	struct func_node *func;
337 
338 	INIT_LIST_HEAD(&cfg->funcs);
339 
340 	if (cfg_partition_funcs(cfg, insn, insn + cnt))
341 		return true;
342 
343 	list_for_each_entry(func, &cfg->funcs, l) {
344 		if (func_partition_bb(func) || func_add_special_bb(func))
345 			return true;
346 
347 		if (func_add_bb_edges(func))
348 			return true;
349 	}
350 
351 	return false;
352 }
353 
354 static void cfg_destroy(struct cfg *cfg)
355 {
356 	struct func_node *func, *func2;
357 
358 	list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
359 		struct bb_node *bb, *bb2;
360 
361 		list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
362 			struct edge_node *e, *e2;
363 
364 			list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
365 				list_del(&e->l);
366 				free(e);
367 			}
368 
369 			list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
370 				list_del(&e->l);
371 				free(e);
372 			}
373 
374 			list_del(&bb->l);
375 			free(bb);
376 		}
377 
378 		list_del(&func->l);
379 		free(func);
380 	}
381 }
382 
383 static void
384 draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
385 	     bool opcodes, bool linum)
386 {
387 	const char *shape;
388 
389 	if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
390 		shape = "Mdiamond";
391 	else
392 		shape = "record";
393 
394 	printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
395 	       func->idx, bb->idx, shape);
396 
397 	if (bb->idx == ENTRY_BLOCK_INDEX) {
398 		printf("ENTRY");
399 	} else if (bb->idx == EXIT_BLOCK_INDEX) {
400 		printf("EXIT");
401 	} else {
402 		unsigned int start_idx;
403 		printf("{\\\n");
404 		start_idx = bb->head - func->start;
405 		dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
406 				      opcodes, linum);
407 		printf("}");
408 	}
409 
410 	printf("\"];\n\n");
411 }
412 
413 static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
414 {
415 	const char *style = "\"solid,bold\"";
416 	const char *color = "black";
417 	int func_idx = func->idx;
418 	struct edge_node *e;
419 	int weight = 10;
420 
421 	if (list_empty(&bb->e_succs))
422 		return;
423 
424 	list_for_each_entry(e, &bb->e_succs, l) {
425 		printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
426 		       func_idx, e->src->idx, func_idx, e->dst->idx,
427 		       style, color, weight);
428 		printf("];\n");
429 	}
430 }
431 
432 static void
433 func_output_bb_def(struct func_node *func, struct dump_data *dd,
434 		   bool opcodes, bool linum)
435 {
436 	struct bb_node *bb;
437 
438 	list_for_each_entry(bb, &func->bbs, l) {
439 		draw_bb_node(func, bb, dd, opcodes, linum);
440 	}
441 }
442 
443 static void func_output_edges(struct func_node *func)
444 {
445 	int func_idx = func->idx;
446 	struct bb_node *bb;
447 
448 	list_for_each_entry(bb, &func->bbs, l) {
449 		draw_bb_succ_edges(func, bb);
450 	}
451 
452 	/* Add an invisible edge from ENTRY to EXIT, this is to
453 	 * improve the graph layout.
454 	 */
455 	printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
456 	       func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
457 }
458 
459 static void
460 cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
461 {
462 	struct func_node *func;
463 
464 	printf("digraph \"DOT graph for eBPF program\" {\n");
465 	list_for_each_entry(func, &cfg->funcs, l) {
466 		printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
467 		       func->idx, func->idx);
468 		func_output_bb_def(func, dd, opcodes, linum);
469 		func_output_edges(func);
470 		printf("}\n");
471 	}
472 	printf("}\n");
473 }
474 
475 void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
476 		     bool opcodes, bool linum)
477 {
478 	struct bpf_insn *insn = buf;
479 	struct cfg cfg;
480 
481 	memset(&cfg, 0, sizeof(cfg));
482 	if (cfg_build(&cfg, insn, len))
483 		return;
484 
485 	cfg_dump(&cfg, dd, opcodes, linum);
486 
487 	cfg_destroy(&cfg);
488 }
489