xref: /linux/tools/perf/util/block-range.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "block-range.h"
3 #include "annotate.h"
4 
5 struct {
6 	struct rb_root root;
7 	u64 blocks;
8 } block_ranges;
9 
10 static void block_range__debug(void)
11 {
12 	/*
13 	 * XXX still paranoid for now; see if we can make this depend on
14 	 * DEBUG=1 builds.
15 	 */
16 #if 1
17 	struct rb_node *rb;
18 	u64 old = 0; /* NULL isn't executable */
19 
20 	for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) {
21 		struct block_range *entry = rb_entry(rb, struct block_range, node);
22 
23 		assert(old < entry->start);
24 		assert(entry->start <= entry->end); /* single instruction block; jump to a jump */
25 
26 		old = entry->end;
27 	}
28 #endif
29 }
30 
31 struct block_range *block_range__find(u64 addr)
32 {
33 	struct rb_node **p = &block_ranges.root.rb_node;
34 	struct rb_node *parent = NULL;
35 	struct block_range *entry;
36 
37 	while (*p != NULL) {
38 		parent = *p;
39 		entry = rb_entry(parent, struct block_range, node);
40 
41 		if (addr < entry->start)
42 			p = &parent->rb_left;
43 		else if (addr > entry->end)
44 			p = &parent->rb_right;
45 		else
46 			return entry;
47 	}
48 
49 	return NULL;
50 }
51 
52 static inline void rb_link_left_of_node(struct rb_node *left, struct rb_node *node)
53 {
54 	struct rb_node **p = &node->rb_left;
55 	while (*p) {
56 		node = *p;
57 		p = &node->rb_right;
58 	}
59 	rb_link_node(left, node, p);
60 }
61 
62 static inline void rb_link_right_of_node(struct rb_node *right, struct rb_node *node)
63 {
64 	struct rb_node **p = &node->rb_right;
65 	while (*p) {
66 		node = *p;
67 		p = &node->rb_left;
68 	}
69 	rb_link_node(right, node, p);
70 }
71 
72 /**
73  * block_range__create
74  * @start: branch target starting this basic block
75  * @end:   branch ending this basic block
76  *
77  * Create all the required block ranges to precisely span the given range.
78  */
79 struct block_range_iter block_range__create(u64 start, u64 end)
80 {
81 	struct rb_node **p = &block_ranges.root.rb_node;
82 	struct rb_node *n, *parent = NULL;
83 	struct block_range *next, *entry = NULL;
84 	struct block_range_iter iter = { NULL, NULL };
85 
86 	while (*p != NULL) {
87 		parent = *p;
88 		entry = rb_entry(parent, struct block_range, node);
89 
90 		if (start < entry->start)
91 			p = &parent->rb_left;
92 		else if (start > entry->end)
93 			p = &parent->rb_right;
94 		else
95 			break;
96 	}
97 
98 	/*
99 	 * Didn't find anything.. there's a hole at @start, however @end might
100 	 * be inside/behind the next range.
101 	 */
102 	if (!*p) {
103 		if (!entry) /* tree empty */
104 			goto do_whole;
105 
106 		/*
107 		 * If the last node is before, advance one to find the next.
108 		 */
109 		n = parent;
110 		if (entry->end < start) {
111 			n = rb_next(n);
112 			if (!n)
113 				goto do_whole;
114 		}
115 		next = rb_entry(n, struct block_range, node);
116 
117 		if (next->start <= end) { /* add head: [start...][n->start...] */
118 			struct block_range *head = malloc(sizeof(struct block_range));
119 			if (!head)
120 				return iter;
121 
122 			*head = (struct block_range){
123 				.start		= start,
124 				.end		= next->start - 1,
125 				.is_target	= 1,
126 				.is_branch	= 0,
127 			};
128 
129 			rb_link_left_of_node(&head->node, &next->node);
130 			rb_insert_color(&head->node, &block_ranges.root);
131 			block_range__debug();
132 
133 			iter.start = head;
134 			goto do_tail;
135 		}
136 
137 do_whole:
138 		/*
139 		 * The whole [start..end] range is non-overlapping.
140 		 */
141 		entry = malloc(sizeof(struct block_range));
142 		if (!entry)
143 			return iter;
144 
145 		*entry = (struct block_range){
146 			.start		= start,
147 			.end		= end,
148 			.is_target	= 1,
149 			.is_branch	= 1,
150 		};
151 
152 		rb_link_node(&entry->node, parent, p);
153 		rb_insert_color(&entry->node, &block_ranges.root);
154 		block_range__debug();
155 
156 		iter.start = entry;
157 		iter.end   = entry;
158 		goto done;
159 	}
160 
161 	/*
162 	 * We found a range that overlapped with ours, split if needed.
163 	 */
164 	if (entry->start < start) { /* split: [e->start...][start...] */
165 		struct block_range *head = malloc(sizeof(struct block_range));
166 		if (!head)
167 			return iter;
168 
169 		*head = (struct block_range){
170 			.start		= entry->start,
171 			.end		= start - 1,
172 			.is_target	= entry->is_target,
173 			.is_branch	= 0,
174 
175 			.coverage	= entry->coverage,
176 			.entry		= entry->entry,
177 		};
178 
179 		entry->start		= start;
180 		entry->is_target	= 1;
181 		entry->entry		= 0;
182 
183 		rb_link_left_of_node(&head->node, &entry->node);
184 		rb_insert_color(&head->node, &block_ranges.root);
185 		block_range__debug();
186 
187 	} else if (entry->start == start)
188 		entry->is_target = 1;
189 
190 	iter.start = entry;
191 
192 do_tail:
193 	/*
194 	 * At this point we've got: @iter.start = [@start...] but @end can still be
195 	 * inside or beyond it.
196 	 */
197 	entry = iter.start;
198 	for (;;) {
199 		/*
200 		 * If @end is inside @entry, split.
201 		 */
202 		if (end < entry->end) { /* split: [...end][...e->end] */
203 			struct block_range *tail = malloc(sizeof(struct block_range));
204 			if (!tail)
205 				return iter;
206 
207 			*tail = (struct block_range){
208 				.start		= end + 1,
209 				.end		= entry->end,
210 				.is_target	= 0,
211 				.is_branch	= entry->is_branch,
212 
213 				.coverage	= entry->coverage,
214 				.taken		= entry->taken,
215 				.pred		= entry->pred,
216 			};
217 
218 			entry->end		= end;
219 			entry->is_branch	= 1;
220 			entry->taken		= 0;
221 			entry->pred		= 0;
222 
223 			rb_link_right_of_node(&tail->node, &entry->node);
224 			rb_insert_color(&tail->node, &block_ranges.root);
225 			block_range__debug();
226 
227 			iter.end = entry;
228 			goto done;
229 		}
230 
231 		/*
232 		 * If @end matches @entry, done
233 		 */
234 		if (end == entry->end) {
235 			entry->is_branch = 1;
236 			iter.end = entry;
237 			goto done;
238 		}
239 
240 		next = block_range__next(entry);
241 		if (!next)
242 			goto add_tail;
243 
244 		/*
245 		 * If @end is in beyond @entry but not inside @next, add tail.
246 		 */
247 		if (end < next->start) { /* add tail: [...e->end][...end] */
248 			struct block_range *tail;
249 add_tail:
250 			tail = malloc(sizeof(struct block_range));
251 			if (!tail)
252 				return iter;
253 
254 			*tail = (struct block_range){
255 				.start		= entry->end + 1,
256 				.end		= end,
257 				.is_target	= 0,
258 				.is_branch	= 1,
259 			};
260 
261 			rb_link_right_of_node(&tail->node, &entry->node);
262 			rb_insert_color(&tail->node, &block_ranges.root);
263 			block_range__debug();
264 
265 			iter.end = tail;
266 			goto done;
267 		}
268 
269 		/*
270 		 * If there is a hole between @entry and @next, fill it.
271 		 */
272 		if (entry->end + 1 != next->start) {
273 			struct block_range *hole = malloc(sizeof(struct block_range));
274 			if (!hole)
275 				return iter;
276 
277 			*hole = (struct block_range){
278 				.start		= entry->end + 1,
279 				.end		= next->start - 1,
280 				.is_target	= 0,
281 				.is_branch	= 0,
282 			};
283 
284 			rb_link_left_of_node(&hole->node, &next->node);
285 			rb_insert_color(&hole->node, &block_ranges.root);
286 			block_range__debug();
287 		}
288 
289 		entry = next;
290 	}
291 
292 done:
293 	assert(iter.start->start == start && iter.start->is_target);
294 	assert(iter.end->end == end && iter.end->is_branch);
295 
296 	block_ranges.blocks++;
297 
298 	return iter;
299 }
300 
301 
302 /*
303  * Compute coverage as:
304  *
305  *    br->coverage / br->sym->max_coverage
306  *
307  * This ensures each symbol has a 100% spot, to reflect that each symbol has a
308  * most covered section.
309  *
310  * Returns [0-1] for coverage and -1 if we had no data what so ever or the
311  * symbol does not exist.
312  */
313 double block_range__coverage(struct block_range *br)
314 {
315 	struct symbol *sym;
316 
317 	if (!br) {
318 		if (block_ranges.blocks)
319 			return 0;
320 
321 		return -1;
322 	}
323 
324 	sym = br->sym;
325 	if (!sym)
326 		return -1;
327 
328 	return (double)br->coverage / symbol__annotation(sym)->max_coverage;
329 }
330