xref: /linux/kernel/bpf/log.c (revision f17c69649c698e4df3cfe0010b7bbf142dec3e40)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/bpf.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/math64.h>
12 
13 #define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
14 
15 static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
16 {
17 	/* ubuf and len_total should both be specified (or not) together */
18 	if (!!log->ubuf != !!log->len_total)
19 		return false;
20 	/* log buf without log_level is meaningless */
21 	if (log->ubuf && log->level == 0)
22 		return false;
23 	if (log->level & ~BPF_LOG_MASK)
24 		return false;
25 	if (log->len_total > UINT_MAX >> 2)
26 		return false;
27 	return true;
28 }
29 
30 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
31 		  char __user *log_buf, u32 log_size)
32 {
33 	log->level = log_level;
34 	log->ubuf = log_buf;
35 	log->len_total = log_size;
36 
37 	/* log attributes have to be sane */
38 	if (!bpf_verifier_log_attr_valid(log))
39 		return -EINVAL;
40 
41 	return 0;
42 }
43 
44 static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
45 {
46 	/* add_len includes terminal \0, so no need for +1. */
47 	u64 len = log->end_pos + add_len;
48 
49 	/* log->len_max could be larger than our current len due to
50 	 * bpf_vlog_reset() calls, so we maintain the max of any length at any
51 	 * previous point
52 	 */
53 	if (len > UINT_MAX)
54 		log->len_max = UINT_MAX;
55 	else if (len > log->len_max)
56 		log->len_max = len;
57 }
58 
59 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
60 		       va_list args)
61 {
62 	u64 cur_pos;
63 	u32 new_n, n;
64 
65 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
66 
67 	if (log->level == BPF_LOG_KERNEL) {
68 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
69 
70 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
71 		return;
72 	}
73 
74 	n += 1; /* include terminating zero */
75 	bpf_vlog_update_len_max(log, n);
76 
77 	if (log->level & BPF_LOG_FIXED) {
78 		/* check if we have at least something to put into user buf */
79 		new_n = 0;
80 		if (log->end_pos < log->len_total) {
81 			new_n = min_t(u32, log->len_total - log->end_pos, n);
82 			log->kbuf[new_n - 1] = '\0';
83 		}
84 
85 		cur_pos = log->end_pos;
86 		log->end_pos += n - 1; /* don't count terminating '\0' */
87 
88 		if (log->ubuf && new_n &&
89 		    copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
90 			goto fail;
91 	} else {
92 		u64 new_end, new_start;
93 		u32 buf_start, buf_end, new_n;
94 
95 		new_end = log->end_pos + n;
96 		if (new_end - log->start_pos >= log->len_total)
97 			new_start = new_end - log->len_total;
98 		else
99 			new_start = log->start_pos;
100 
101 		log->start_pos = new_start;
102 		log->end_pos = new_end - 1; /* don't count terminating '\0' */
103 
104 		if (!log->ubuf)
105 			return;
106 
107 		new_n = min(n, log->len_total);
108 		cur_pos = new_end - new_n;
109 		div_u64_rem(cur_pos, log->len_total, &buf_start);
110 		div_u64_rem(new_end, log->len_total, &buf_end);
111 		/* new_end and buf_end are exclusive indices, so if buf_end is
112 		 * exactly zero, then it actually points right to the end of
113 		 * ubuf and there is no wrap around
114 		 */
115 		if (buf_end == 0)
116 			buf_end = log->len_total;
117 
118 		/* if buf_start > buf_end, we wrapped around;
119 		 * if buf_start == buf_end, then we fill ubuf completely; we
120 		 * can't have buf_start == buf_end to mean that there is
121 		 * nothing to write, because we always write at least
122 		 * something, even if terminal '\0'
123 		 */
124 		if (buf_start < buf_end) {
125 			/* message fits within contiguous chunk of ubuf */
126 			if (copy_to_user(log->ubuf + buf_start,
127 					 log->kbuf + n - new_n,
128 					 buf_end - buf_start))
129 				goto fail;
130 		} else {
131 			/* message wraps around the end of ubuf, copy in two chunks */
132 			if (copy_to_user(log->ubuf + buf_start,
133 					 log->kbuf + n - new_n,
134 					 log->len_total - buf_start))
135 				goto fail;
136 			if (copy_to_user(log->ubuf,
137 					 log->kbuf + n - buf_end,
138 					 buf_end))
139 				goto fail;
140 		}
141 	}
142 
143 	return;
144 fail:
145 	log->ubuf = NULL;
146 }
147 
148 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
149 {
150 	char zero = 0;
151 	u32 pos;
152 
153 	if (WARN_ON_ONCE(new_pos > log->end_pos))
154 		return;
155 
156 	if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
157 		return;
158 
159 	/* if position to which we reset is beyond current log window,
160 	 * then we didn't preserve any useful content and should adjust
161 	 * start_pos to end up with an empty log (start_pos == end_pos)
162 	 */
163 	log->end_pos = new_pos;
164 	if (log->end_pos < log->start_pos)
165 		log->start_pos = log->end_pos;
166 
167 	if (!log->ubuf)
168 		return;
169 
170 	if (log->level & BPF_LOG_FIXED)
171 		pos = log->end_pos + 1;
172 	else
173 		div_u64_rem(new_pos, log->len_total, &pos);
174 
175 	if (pos < log->len_total && put_user(zero, log->ubuf + pos))
176 		log->ubuf = NULL;
177 }
178 
179 static void bpf_vlog_reverse_kbuf(char *buf, int len)
180 {
181 	int i, j;
182 
183 	for (i = 0, j = len - 1; i < j; i++, j--)
184 		swap(buf[i], buf[j]);
185 }
186 
187 static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
188 {
189 	/* we split log->kbuf into two equal parts for both ends of array */
190 	int n = sizeof(log->kbuf) / 2, nn;
191 	char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
192 
193 	/* Read ubuf's section [start, end) two chunks at a time, from left
194 	 * and right side; within each chunk, swap all the bytes; after that
195 	 * reverse the order of lbuf and rbuf and write result back to ubuf.
196 	 * This way we'll end up with swapped contents of specified
197 	 * [start, end) ubuf segment.
198 	 */
199 	while (end - start > 1) {
200 		nn = min(n, (end - start ) / 2);
201 
202 		if (copy_from_user(lbuf, log->ubuf + start, nn))
203 			return -EFAULT;
204 		if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
205 			return -EFAULT;
206 
207 		bpf_vlog_reverse_kbuf(lbuf, nn);
208 		bpf_vlog_reverse_kbuf(rbuf, nn);
209 
210 		/* we write lbuf to the right end of ubuf, while rbuf to the
211 		 * left one to end up with properly reversed overall ubuf
212 		 */
213 		if (copy_to_user(log->ubuf + start, rbuf, nn))
214 			return -EFAULT;
215 		if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
216 			return -EFAULT;
217 
218 		start += nn;
219 		end -= nn;
220 	}
221 
222 	return 0;
223 }
224 
225 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
226 {
227 	u32 sublen;
228 	int err;
229 
230 	*log_size_actual = 0;
231 	if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
232 		return 0;
233 
234 	if (!log->ubuf)
235 		goto skip_log_rotate;
236 	/* If we never truncated log, there is nothing to move around. */
237 	if (log->start_pos == 0)
238 		goto skip_log_rotate;
239 
240 	/* Otherwise we need to rotate log contents to make it start from the
241 	 * buffer beginning and be a continuous zero-terminated string. Note
242 	 * that if log->start_pos != 0 then we definitely filled up entire log
243 	 * buffer with no gaps, and we just need to shift buffer contents to
244 	 * the left by (log->start_pos % log->len_total) bytes.
245 	 *
246 	 * Unfortunately, user buffer could be huge and we don't want to
247 	 * allocate temporary kernel memory of the same size just to shift
248 	 * contents in a straightforward fashion. Instead, we'll be clever and
249 	 * do in-place array rotation. This is a leetcode-style problem, which
250 	 * could be solved by three rotations.
251 	 *
252 	 * Let's say we have log buffer that has to be shifted left by 7 bytes
253 	 * (spaces and vertical bar is just for demonstrative purposes):
254 	 *   E F G H I J K | A B C D
255 	 *
256 	 * First, we reverse entire array:
257 	 *   D C B A | K J I H G F E
258 	 *
259 	 * Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
260 	 * (KJIHGFE), resulting in a properly rotated array:
261 	 *   A B C D | E F G H I J K
262 	 *
263 	 * We'll utilize log->kbuf to read user memory chunk by chunk, swap
264 	 * bytes, and write them back. Doing it byte-by-byte would be
265 	 * unnecessarily inefficient. Altogether we are going to read and
266 	 * write each byte twice, for total 4 memory copies between kernel and
267 	 * user space.
268 	 */
269 
270 	/* length of the chopped off part that will be the beginning;
271 	 * len(ABCD) in the example above
272 	 */
273 	div_u64_rem(log->start_pos, log->len_total, &sublen);
274 	sublen = log->len_total - sublen;
275 
276 	err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
277 	err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
278 	err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
279 	if (err)
280 		log->ubuf = NULL;
281 
282 skip_log_rotate:
283 	*log_size_actual = log->len_max;
284 
285 	/* properly initialized log has either both ubuf!=NULL and len_total>0
286 	 * or ubuf==NULL and len_total==0, so if this condition doesn't hold,
287 	 * we got a fault somewhere along the way, so report it back
288 	 */
289 	if (!!log->ubuf != !!log->len_total)
290 		return -EFAULT;
291 
292 	/* did truncation actually happen? */
293 	if (log->ubuf && log->len_max > log->len_total)
294 		return -ENOSPC;
295 
296 	return 0;
297 }
298 
299 /* log_level controls verbosity level of eBPF verifier.
300  * bpf_verifier_log_write() is used to dump the verification trace to the log,
301  * so the user can figure out what's wrong with the program
302  */
303 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
304 					   const char *fmt, ...)
305 {
306 	va_list args;
307 
308 	if (!bpf_verifier_log_needed(&env->log))
309 		return;
310 
311 	va_start(args, fmt);
312 	bpf_verifier_vlog(&env->log, fmt, args);
313 	va_end(args);
314 }
315 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
316 
317 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
318 			    const char *fmt, ...)
319 {
320 	va_list args;
321 
322 	if (!bpf_verifier_log_needed(log))
323 		return;
324 
325 	va_start(args, fmt);
326 	bpf_verifier_vlog(log, fmt, args);
327 	va_end(args);
328 }
329 EXPORT_SYMBOL_GPL(bpf_log);
330 
331 static const struct bpf_line_info *
332 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
333 {
334 	const struct bpf_line_info *linfo;
335 	const struct bpf_prog *prog;
336 	u32 i, nr_linfo;
337 
338 	prog = env->prog;
339 	nr_linfo = prog->aux->nr_linfo;
340 
341 	if (!nr_linfo || insn_off >= prog->len)
342 		return NULL;
343 
344 	linfo = prog->aux->linfo;
345 	for (i = 1; i < nr_linfo; i++)
346 		if (insn_off < linfo[i].insn_off)
347 			break;
348 
349 	return &linfo[i - 1];
350 }
351 
352 static const char *ltrim(const char *s)
353 {
354 	while (isspace(*s))
355 		s++;
356 
357 	return s;
358 }
359 
360 __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
361 				  u32 insn_off,
362 				  const char *prefix_fmt, ...)
363 {
364 	const struct bpf_line_info *linfo;
365 
366 	if (!bpf_verifier_log_needed(&env->log))
367 		return;
368 
369 	linfo = find_linfo(env, insn_off);
370 	if (!linfo || linfo == env->prev_linfo)
371 		return;
372 
373 	if (prefix_fmt) {
374 		va_list args;
375 
376 		va_start(args, prefix_fmt);
377 		bpf_verifier_vlog(&env->log, prefix_fmt, args);
378 		va_end(args);
379 	}
380 
381 	verbose(env, "%s\n",
382 		ltrim(btf_name_by_offset(env->prog->aux->btf,
383 					 linfo->line_off)));
384 
385 	env->prev_linfo = linfo;
386 }
387 
388 static const char *btf_type_name(const struct btf *btf, u32 id)
389 {
390 	return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
391 }
392 
393 /* string representation of 'enum bpf_reg_type'
394  *
395  * Note that reg_type_str() can not appear more than once in a single verbose()
396  * statement.
397  */
398 const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
399 {
400 	char postfix[16] = {0}, prefix[64] = {0};
401 	static const char * const str[] = {
402 		[NOT_INIT]		= "?",
403 		[SCALAR_VALUE]		= "scalar",
404 		[PTR_TO_CTX]		= "ctx",
405 		[CONST_PTR_TO_MAP]	= "map_ptr",
406 		[PTR_TO_MAP_VALUE]	= "map_value",
407 		[PTR_TO_STACK]		= "fp",
408 		[PTR_TO_PACKET]		= "pkt",
409 		[PTR_TO_PACKET_META]	= "pkt_meta",
410 		[PTR_TO_PACKET_END]	= "pkt_end",
411 		[PTR_TO_FLOW_KEYS]	= "flow_keys",
412 		[PTR_TO_SOCKET]		= "sock",
413 		[PTR_TO_SOCK_COMMON]	= "sock_common",
414 		[PTR_TO_TCP_SOCK]	= "tcp_sock",
415 		[PTR_TO_TP_BUFFER]	= "tp_buffer",
416 		[PTR_TO_XDP_SOCK]	= "xdp_sock",
417 		[PTR_TO_BTF_ID]		= "ptr_",
418 		[PTR_TO_MEM]		= "mem",
419 		[PTR_TO_BUF]		= "buf",
420 		[PTR_TO_FUNC]		= "func",
421 		[PTR_TO_MAP_KEY]	= "map_key",
422 		[CONST_PTR_TO_DYNPTR]	= "dynptr_ptr",
423 	};
424 
425 	if (type & PTR_MAYBE_NULL) {
426 		if (base_type(type) == PTR_TO_BTF_ID)
427 			strncpy(postfix, "or_null_", 16);
428 		else
429 			strncpy(postfix, "_or_null", 16);
430 	}
431 
432 	snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
433 		 type & MEM_RDONLY ? "rdonly_" : "",
434 		 type & MEM_RINGBUF ? "ringbuf_" : "",
435 		 type & MEM_USER ? "user_" : "",
436 		 type & MEM_PERCPU ? "percpu_" : "",
437 		 type & MEM_RCU ? "rcu_" : "",
438 		 type & PTR_UNTRUSTED ? "untrusted_" : "",
439 		 type & PTR_TRUSTED ? "trusted_" : ""
440 	);
441 
442 	snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
443 		 prefix, str[base_type(type)], postfix);
444 	return env->tmp_str_buf;
445 }
446 
447 const char *dynptr_type_str(enum bpf_dynptr_type type)
448 {
449 	switch (type) {
450 	case BPF_DYNPTR_TYPE_LOCAL:
451 		return "local";
452 	case BPF_DYNPTR_TYPE_RINGBUF:
453 		return "ringbuf";
454 	case BPF_DYNPTR_TYPE_SKB:
455 		return "skb";
456 	case BPF_DYNPTR_TYPE_XDP:
457 		return "xdp";
458 	case BPF_DYNPTR_TYPE_INVALID:
459 		return "<invalid>";
460 	default:
461 		WARN_ONCE(1, "unknown dynptr type %d\n", type);
462 		return "<unknown>";
463 	}
464 }
465 
466 const char *iter_type_str(const struct btf *btf, u32 btf_id)
467 {
468 	if (!btf || btf_id == 0)
469 		return "<invalid>";
470 
471 	/* we already validated that type is valid and has conforming name */
472 	return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
473 }
474 
475 const char *iter_state_str(enum bpf_iter_state state)
476 {
477 	switch (state) {
478 	case BPF_ITER_STATE_ACTIVE:
479 		return "active";
480 	case BPF_ITER_STATE_DRAINED:
481 		return "drained";
482 	case BPF_ITER_STATE_INVALID:
483 		return "<invalid>";
484 	default:
485 		WARN_ONCE(1, "unknown iter state %d\n", state);
486 		return "<unknown>";
487 	}
488 }
489 
490 static char slot_type_char[] = {
491 	[STACK_INVALID]	= '?',
492 	[STACK_SPILL]	= 'r',
493 	[STACK_MISC]	= 'm',
494 	[STACK_ZERO]	= '0',
495 	[STACK_DYNPTR]	= 'd',
496 	[STACK_ITER]	= 'i',
497 };
498 
499 static void print_liveness(struct bpf_verifier_env *env,
500 			   enum bpf_reg_liveness live)
501 {
502 	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
503 	    verbose(env, "_");
504 	if (live & REG_LIVE_READ)
505 		verbose(env, "r");
506 	if (live & REG_LIVE_WRITTEN)
507 		verbose(env, "w");
508 	if (live & REG_LIVE_DONE)
509 		verbose(env, "D");
510 }
511 
512 #define UNUM_MAX_DECIMAL U16_MAX
513 #define SNUM_MAX_DECIMAL S16_MAX
514 #define SNUM_MIN_DECIMAL S16_MIN
515 
516 static bool is_unum_decimal(u64 num)
517 {
518 	return num <= UNUM_MAX_DECIMAL;
519 }
520 
521 static bool is_snum_decimal(s64 num)
522 {
523 	return num >= SNUM_MIN_DECIMAL && num <= SNUM_MAX_DECIMAL;
524 }
525 
526 static void verbose_unum(struct bpf_verifier_env *env, u64 num)
527 {
528 	if (is_unum_decimal(num))
529 		verbose(env, "%llu", num);
530 	else
531 		verbose(env, "%#llx", num);
532 }
533 
534 static void verbose_snum(struct bpf_verifier_env *env, s64 num)
535 {
536 	if (is_snum_decimal(num))
537 		verbose(env, "%lld", num);
538 	else
539 		verbose(env, "%#llx", num);
540 }
541 
542 static void print_scalar_ranges(struct bpf_verifier_env *env,
543 				const struct bpf_reg_state *reg,
544 				const char **sep)
545 {
546 	/* For signed ranges, we want to unify 64-bit and 32-bit values in the
547 	 * output as much as possible, but there is a bit of a complication.
548 	 * If we choose to print values as decimals, this is natural to do,
549 	 * because negative 64-bit and 32-bit values >= -S32_MIN have the same
550 	 * representation due to sign extension. But if we choose to print
551 	 * them in hex format (see is_snum_decimal()), then sign extension is
552 	 * misleading.
553 	 * E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in
554 	 * hex they will be smin=0xfffffffffffffffe and smin32=0xfffffffe, two
555 	 * very different numbers.
556 	 * So we avoid sign extension if we choose to print values in hex.
557 	 */
558 	struct {
559 		const char *name;
560 		u64 val;
561 		bool omit;
562 	} minmaxs[] = {
563 		{"smin",   reg->smin_value,         reg->smin_value == S64_MIN},
564 		{"smax",   reg->smax_value,         reg->smax_value == S64_MAX},
565 		{"umin",   reg->umin_value,         reg->umin_value == 0},
566 		{"umax",   reg->umax_value,         reg->umax_value == U64_MAX},
567 		{"smin32",
568 		 is_snum_decimal((s64)reg->s32_min_value)
569 			 ? (s64)reg->s32_min_value
570 			 : (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
571 		{"smax32",
572 		 is_snum_decimal((s64)reg->s32_max_value)
573 			 ? (s64)reg->s32_max_value
574 			 : (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
575 		{"umin32", reg->u32_min_value,      reg->u32_min_value == 0},
576 		{"umax32", reg->u32_max_value,      reg->u32_max_value == U32_MAX},
577 	}, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
578 	bool neg1, neg2;
579 
580 	for (m1 = &minmaxs[0]; m1 < mend; m1++) {
581 		if (m1->omit)
582 			continue;
583 
584 		neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
585 
586 		verbose(env, "%s%s=", *sep, m1->name);
587 		*sep = ",";
588 
589 		for (m2 = m1 + 2; m2 < mend; m2 += 2) {
590 			if (m2->omit || m2->val != m1->val)
591 				continue;
592 			/* don't mix negatives with positives */
593 			neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
594 			if (neg2 != neg1)
595 				continue;
596 			m2->omit = true;
597 			verbose(env, "%s=", m2->name);
598 		}
599 
600 		if (m1->name[0] == 's')
601 			verbose_snum(env, m1->val);
602 		else
603 			verbose_unum(env, m1->val);
604 	}
605 }
606 
607 static bool type_is_map_ptr(enum bpf_reg_type t) {
608 	switch (base_type(t)) {
609 	case CONST_PTR_TO_MAP:
610 	case PTR_TO_MAP_KEY:
611 	case PTR_TO_MAP_VALUE:
612 		return true;
613 	default:
614 		return false;
615 	}
616 }
617 
618 static void print_reg_state(struct bpf_verifier_env *env,
619 			    const struct bpf_func_state *state,
620 			    const struct bpf_reg_state *reg)
621 {
622 	enum bpf_reg_type t;
623 	const char *sep = "";
624 
625 	t = reg->type;
626 	if (t == SCALAR_VALUE && reg->precise)
627 		verbose(env, "P");
628 	if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
629 		/* reg->off should be 0 for SCALAR_VALUE */
630 		verbose_snum(env, reg->var_off.value + reg->off);
631 		return;
632 	}
633 /*
634  * _a stands for append, was shortened to avoid multiline statements below.
635  * This macro is used to output a comma separated list of attributes.
636  */
637 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
638 
639 	verbose(env, "%s", reg_type_str(env, t));
640 	if (t == PTR_TO_STACK) {
641 		if (state->frameno != reg->frameno)
642 			verbose(env, "[%d]", reg->frameno);
643 		if (tnum_is_const(reg->var_off)) {
644 			verbose_snum(env, reg->var_off.value + reg->off);
645 			return;
646 		}
647 	}
648 	if (base_type(t) == PTR_TO_BTF_ID)
649 		verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
650 	verbose(env, "(");
651 	if (reg->id)
652 		verbose_a("id=%d", reg->id);
653 	if (reg->ref_obj_id)
654 		verbose_a("ref_obj_id=%d", reg->ref_obj_id);
655 	if (type_is_non_owning_ref(reg->type))
656 		verbose_a("%s", "non_own_ref");
657 	if (type_is_map_ptr(t)) {
658 		if (reg->map_ptr->name[0])
659 			verbose_a("map=%s", reg->map_ptr->name);
660 		verbose_a("ks=%d,vs=%d",
661 			  reg->map_ptr->key_size,
662 			  reg->map_ptr->value_size);
663 	}
664 	if (t != SCALAR_VALUE && reg->off) {
665 		verbose_a("off=");
666 		verbose_snum(env, reg->off);
667 	}
668 	if (type_is_pkt_pointer(t)) {
669 		verbose_a("r=");
670 		verbose_unum(env, reg->range);
671 	}
672 	if (tnum_is_const(reg->var_off)) {
673 		/* a pointer register with fixed offset */
674 		if (reg->var_off.value) {
675 			verbose_a("imm=");
676 			verbose_snum(env, reg->var_off.value);
677 		}
678 	} else {
679 		print_scalar_ranges(env, reg, &sep);
680 		if (!tnum_is_unknown(reg->var_off)) {
681 			char tn_buf[48];
682 
683 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
684 			verbose_a("var_off=%s", tn_buf);
685 		}
686 	}
687 	verbose(env, ")");
688 
689 #undef verbose_a
690 }
691 
692 void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
693 			  bool print_all)
694 {
695 	const struct bpf_reg_state *reg;
696 	int i;
697 
698 	if (state->frameno)
699 		verbose(env, " frame%d:", state->frameno);
700 	for (i = 0; i < MAX_BPF_REG; i++) {
701 		reg = &state->regs[i];
702 		if (reg->type == NOT_INIT)
703 			continue;
704 		if (!print_all && !reg_scratched(env, i))
705 			continue;
706 		verbose(env, " R%d", i);
707 		print_liveness(env, reg->live);
708 		verbose(env, "=");
709 		print_reg_state(env, state, reg);
710 	}
711 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
712 		char types_buf[BPF_REG_SIZE + 1];
713 		bool valid = false;
714 		u8 slot_type;
715 		int j;
716 
717 		if (!print_all && !stack_slot_scratched(env, i))
718 			continue;
719 
720 		for (j = 0; j < BPF_REG_SIZE; j++) {
721 			slot_type = state->stack[i].slot_type[j];
722 			if (slot_type != STACK_INVALID)
723 				valid = true;
724 			types_buf[j] = slot_type_char[slot_type];
725 		}
726 		types_buf[BPF_REG_SIZE] = 0;
727 		if (!valid)
728 			continue;
729 
730 		reg = &state->stack[i].spilled_ptr;
731 		switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
732 		case STACK_SPILL:
733 			/* print MISC/ZERO/INVALID slots above subreg spill */
734 			for (j = 0; j < BPF_REG_SIZE; j++)
735 				if (state->stack[i].slot_type[j] == STACK_SPILL)
736 					break;
737 			types_buf[j] = '\0';
738 
739 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
740 			print_liveness(env, reg->live);
741 			verbose(env, "=%s", types_buf);
742 			print_reg_state(env, state, reg);
743 			break;
744 		case STACK_DYNPTR:
745 			/* skip to main dynptr slot */
746 			i += BPF_DYNPTR_NR_SLOTS - 1;
747 			reg = &state->stack[i].spilled_ptr;
748 
749 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
750 			print_liveness(env, reg->live);
751 			verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
752 			if (reg->ref_obj_id)
753 				verbose(env, "(ref_id=%d)", reg->ref_obj_id);
754 			break;
755 		case STACK_ITER:
756 			/* only main slot has ref_obj_id set; skip others */
757 			if (!reg->ref_obj_id)
758 				continue;
759 
760 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
761 			print_liveness(env, reg->live);
762 			verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
763 				iter_type_str(reg->iter.btf, reg->iter.btf_id),
764 				reg->ref_obj_id, iter_state_str(reg->iter.state),
765 				reg->iter.depth);
766 			break;
767 		case STACK_MISC:
768 		case STACK_ZERO:
769 		default:
770 			verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
771 			print_liveness(env, reg->live);
772 			verbose(env, "=%s", types_buf);
773 			break;
774 		}
775 	}
776 	if (state->acquired_refs && state->refs[0].id) {
777 		verbose(env, " refs=%d", state->refs[0].id);
778 		for (i = 1; i < state->acquired_refs; i++)
779 			if (state->refs[i].id)
780 				verbose(env, ",%d", state->refs[i].id);
781 	}
782 	if (state->in_callback_fn)
783 		verbose(env, " cb");
784 	if (state->in_async_callback_fn)
785 		verbose(env, " async_cb");
786 	verbose(env, "\n");
787 	if (!print_all)
788 		mark_verifier_state_clean(env);
789 }
790 
791 static inline u32 vlog_alignment(u32 pos)
792 {
793 	return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
794 			BPF_LOG_MIN_ALIGNMENT) - pos - 1;
795 }
796 
797 void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
798 {
799 	if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
800 		/* remove new line character */
801 		bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
802 		verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
803 	} else {
804 		verbose(env, "%d:", env->insn_idx);
805 	}
806 	print_verifier_state(env, state, false);
807 }
808