xref: /linux/tools/testing/selftests/bpf/prog_tests/log_buf.c (revision 42d37fc0c819b81f6f6afd108b55d04ba9d32d0f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 
4 #include <test_progs.h>
5 #include <bpf/btf.h>
6 
7 #include "test_log_buf.skel.h"
8 
9 static size_t libbpf_log_pos;
10 static char libbpf_log_buf[1024 * 1024];
11 static bool libbpf_log_error;
12 
13 static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args)
14 {
15 	int emitted_cnt;
16 	size_t left_cnt;
17 
18 	left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos;
19 	emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args);
20 
21 	if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) {
22 		libbpf_log_error = true;
23 		return 0;
24 	}
25 
26 	libbpf_log_pos += emitted_cnt;
27 	return 0;
28 }
29 
30 static void obj_load_log_buf(void)
31 {
32 	libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb);
33 	LIBBPF_OPTS(bpf_object_open_opts, opts);
34 	const size_t log_buf_sz = 1024 * 1024;
35 	struct test_log_buf* skel;
36 	char *obj_log_buf, *good_log_buf, *bad_log_buf;
37 	int err;
38 
39 	obj_log_buf = malloc(3 * log_buf_sz);
40 	if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf"))
41 		return;
42 
43 	good_log_buf = obj_log_buf + log_buf_sz;
44 	bad_log_buf = obj_log_buf + 2 * log_buf_sz;
45 	obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
46 
47 	opts.kernel_log_buf = obj_log_buf;
48 	opts.kernel_log_size = log_buf_sz;
49 	opts.kernel_log_level = 4; /* for BTF this will turn into 1 */
50 
51 	/* In the first round every prog has its own log_buf, so libbpf logs
52 	 * don't have program failure logs
53 	 */
54 	skel = test_log_buf__open_opts(&opts);
55 	if (!ASSERT_OK_PTR(skel, "skel_open"))
56 		goto cleanup;
57 
58 	/* set very verbose level for good_prog so we always get detailed logs */
59 	bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
60 	bpf_program__set_log_level(skel->progs.good_prog, 2);
61 
62 	bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz);
63 	/* log_level 0 with custom log_buf means that verbose logs are not
64 	 * requested if program load is successful, but libbpf should retry
65 	 * with log_level 1 on error and put program's verbose load log into
66 	 * custom log_buf
67 	 */
68 	bpf_program__set_log_level(skel->progs.bad_prog, 0);
69 
70 	err = test_log_buf__load(skel);
71 	if (!ASSERT_ERR(err, "unexpected_load_success"))
72 		goto cleanup;
73 
74 	ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
75 
76 	/* there should be no prog loading log because we specified per-prog log buf */
77 	ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log");
78 	ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
79 		      "libbpf_log_not_empty");
80 	ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
81 	ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"),
82 		      "good_log_verbose");
83 	ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
84 		      "bad_log_not_empty");
85 
86 	if (env.verbosity > VERBOSE_NONE) {
87 		printf("LIBBPF LOG:   \n=================\n%s=================\n", libbpf_log_buf);
88 		printf("OBJ LOG:      \n=================\n%s=================\n", obj_log_buf);
89 		printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
90 		printf("BAD_PROG  LOG:\n=================\n%s=================\n", bad_log_buf);
91 	}
92 
93 	/* reset everything */
94 	test_log_buf__destroy(skel);
95 	obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
96 	libbpf_log_buf[0] = '\0';
97 	libbpf_log_pos = 0;
98 	libbpf_log_error = false;
99 
100 	/* In the second round we let bad_prog's failure be logged through print callback */
101 	opts.kernel_log_buf = NULL; /* let everything through into print callback */
102 	opts.kernel_log_size = 0;
103 	opts.kernel_log_level = 1;
104 
105 	skel = test_log_buf__open_opts(&opts);
106 	if (!ASSERT_OK_PTR(skel, "skel_open"))
107 		goto cleanup;
108 
109 	/* set normal verbose level for good_prog to check log_level is taken into account */
110 	bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
111 	bpf_program__set_log_level(skel->progs.good_prog, 1);
112 
113 	err = test_log_buf__load(skel);
114 	if (!ASSERT_ERR(err, "unexpected_load_success"))
115 		goto cleanup;
116 
117 	ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
118 
119 	/* this time prog loading error should be logged through print callback */
120 	ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"),
121 		      "libbpf_log_correct");
122 	ASSERT_STREQ(obj_log_buf, "", "obj_log__empty");
123 	ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
124 		     "good_log_ok");
125 	ASSERT_STREQ(bad_log_buf, "", "bad_log_empty");
126 
127 	if (env.verbosity > VERBOSE_NONE) {
128 		printf("LIBBPF LOG:   \n=================\n%s=================\n", libbpf_log_buf);
129 		printf("OBJ LOG:      \n=================\n%s=================\n", obj_log_buf);
130 		printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
131 		printf("BAD_PROG  LOG:\n=================\n%s=================\n", bad_log_buf);
132 	}
133 
134 cleanup:
135 	free(obj_log_buf);
136 	test_log_buf__destroy(skel);
137 	libbpf_set_print(old_print_cb);
138 }
139 
140 static void bpf_prog_load_log_buf(void)
141 {
142 	const struct bpf_insn good_prog_insns[] = {
143 		BPF_MOV64_IMM(BPF_REG_0, 0),
144 		BPF_EXIT_INSN(),
145 	};
146 	const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn);
147 	const struct bpf_insn bad_prog_insns[] = {
148 		BPF_EXIT_INSN(),
149 	};
150 	size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn);
151 	LIBBPF_OPTS(bpf_prog_load_opts, opts);
152 	const size_t log_buf_sz = 1024 * 1024;
153 	char *log_buf;
154 	int fd = -1;
155 
156 	log_buf = malloc(log_buf_sz);
157 	if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
158 		return;
159 	opts.log_buf = log_buf;
160 	opts.log_size = log_buf_sz;
161 
162 	/* with log_level == 0 log_buf shoud stay empty for good prog */
163 	log_buf[0] = '\0';
164 	opts.log_level = 0;
165 	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
166 			   good_prog_insns, good_prog_insn_cnt, &opts);
167 	ASSERT_STREQ(log_buf, "", "good_log_0");
168 	ASSERT_GE(fd, 0, "good_fd1");
169 	if (fd >= 0)
170 		close(fd);
171 	fd = -1;
172 
173 	/* log_level == 2 should always fill log_buf, even for good prog */
174 	log_buf[0] = '\0';
175 	opts.log_level = 2;
176 	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
177 			   good_prog_insns, good_prog_insn_cnt, &opts);
178 	ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2");
179 	ASSERT_GE(fd, 0, "good_fd2");
180 	if (fd >= 0)
181 		close(fd);
182 	fd = -1;
183 
184 	/* log_level == 0 should fill log_buf for bad prog */
185 	log_buf[0] = '\0';
186 	opts.log_level = 0;
187 	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL",
188 			   bad_prog_insns, bad_prog_insn_cnt, &opts);
189 	ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0");
190 	ASSERT_LT(fd, 0, "bad_fd");
191 	if (fd >= 0)
192 		close(fd);
193 	fd = -1;
194 
195 	free(log_buf);
196 }
197 
198 static void bpf_btf_load_log_buf(void)
199 {
200 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
201 	const size_t log_buf_sz = 1024 * 1024;
202 	const void *raw_btf_data;
203 	__u32 raw_btf_size;
204 	struct btf *btf;
205 	char *log_buf = NULL;
206 	int fd = -1;
207 
208 	btf = btf__new_empty();
209 	if (!ASSERT_OK_PTR(btf, "empty_btf"))
210 		return;
211 
212 	ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
213 
214 	raw_btf_data = btf__raw_data(btf, &raw_btf_size);
215 	if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
216 		goto cleanup;
217 
218 	log_buf = malloc(log_buf_sz);
219 	if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
220 		goto cleanup;
221 	opts.log_buf = log_buf;
222 	opts.log_size = log_buf_sz;
223 
224 	/* with log_level == 0 log_buf shoud stay empty for good BTF */
225 	log_buf[0] = '\0';
226 	opts.log_level = 0;
227 	fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
228 	ASSERT_STREQ(log_buf, "", "good_log_0");
229 	ASSERT_GE(fd, 0, "good_fd1");
230 	if (fd >= 0)
231 		close(fd);
232 	fd = -1;
233 
234 	/* log_level == 2 should always fill log_buf, even for good BTF */
235 	log_buf[0] = '\0';
236 	opts.log_level = 2;
237 	fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
238 	printf("LOG_BUF: %s\n", log_buf);
239 	ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2");
240 	ASSERT_GE(fd, 0, "good_fd2");
241 	if (fd >= 0)
242 		close(fd);
243 	fd = -1;
244 
245 	/* make BTF bad, add pointer pointing to non-existing type */
246 	ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type");
247 
248 	raw_btf_data = btf__raw_data(btf, &raw_btf_size);
249 	if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad"))
250 		goto cleanup;
251 
252 	/* log_level == 0 should fill log_buf for bad BTF */
253 	log_buf[0] = '\0';
254 	opts.log_level = 0;
255 	fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
256 	printf("LOG_BUF: %s\n", log_buf);
257 	ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0");
258 	ASSERT_LT(fd, 0, "bad_fd");
259 	if (fd >= 0)
260 		close(fd);
261 	fd = -1;
262 
263 cleanup:
264 	free(log_buf);
265 	btf__free(btf);
266 }
267 
268 void test_log_buf(void)
269 {
270 	if (test__start_subtest("obj_load_log_buf"))
271 		obj_load_log_buf();
272 	if (test__start_subtest("bpf_prog_load_log_buf"))
273 		bpf_prog_load_log_buf();
274 	if (test__start_subtest("bpf_btf_load_log_buf"))
275 		bpf_btf_load_log_buf();
276 }
277