xref: /linux/tools/testing/selftests/bpf/prog_tests/log_buf.c (revision d9104cec3e8fe4b458b74709853231385779001f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 
4 #include <test_progs.h>
5 #include <bpf/btf.h>
6 
7 #include "test_log_buf.skel.h"
8 #include "bpf_util.h"
9 
10 #if !defined(__clang__)
11 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
12 #endif
13 
14 static size_t libbpf_log_pos;
15 static char libbpf_log_buf[1024 * 1024];
16 static bool libbpf_log_error;
17 
libbpf_print_cb(enum libbpf_print_level level,const char * fmt,va_list args)18 static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args)
19 {
20 	int emitted_cnt;
21 	size_t left_cnt;
22 
23 	left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos;
24 	emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args);
25 
26 	if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) {
27 		libbpf_log_error = true;
28 		return 0;
29 	}
30 
31 	libbpf_log_pos += emitted_cnt;
32 	return 0;
33 }
34 
obj_load_log_buf(void)35 static void obj_load_log_buf(void)
36 {
37 	libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb);
38 	LIBBPF_OPTS(bpf_object_open_opts, opts);
39 	const size_t log_buf_sz = 1024 * 1024;
40 	struct test_log_buf* skel;
41 	char *obj_log_buf, *good_log_buf, *bad_log_buf;
42 	int err;
43 
44 	obj_log_buf = malloc(3 * log_buf_sz);
45 	if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf"))
46 		return;
47 
48 	good_log_buf = obj_log_buf + log_buf_sz;
49 	bad_log_buf = obj_log_buf + 2 * log_buf_sz;
50 	obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
51 
52 	opts.kernel_log_buf = obj_log_buf;
53 	opts.kernel_log_size = log_buf_sz;
54 	opts.kernel_log_level = 4; /* for BTF this will turn into 1 */
55 
56 	/* In the first round every prog has its own log_buf, so libbpf logs
57 	 * don't have program failure logs
58 	 */
59 	skel = test_log_buf__open_opts(&opts);
60 	if (!ASSERT_OK_PTR(skel, "skel_open"))
61 		goto cleanup;
62 
63 	/* set very verbose level for good_prog so we always get detailed logs */
64 	bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
65 	bpf_program__set_log_level(skel->progs.good_prog, 2);
66 
67 	bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz);
68 	/* log_level 0 with custom log_buf means that verbose logs are not
69 	 * requested if program load is successful, but libbpf should retry
70 	 * with log_level 1 on error and put program's verbose load log into
71 	 * custom log_buf
72 	 */
73 	bpf_program__set_log_level(skel->progs.bad_prog, 0);
74 
75 	err = test_log_buf__load(skel);
76 	if (!ASSERT_ERR(err, "unexpected_load_success"))
77 		goto cleanup;
78 
79 	ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
80 
81 	/* there should be no prog loading log because we specified per-prog log buf */
82 	ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log");
83 	ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
84 		      "libbpf_log_not_empty");
85 	ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
86 	ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"),
87 		      "good_log_verbose");
88 	ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
89 		      "bad_log_not_empty");
90 
91 	if (env.verbosity > VERBOSE_NONE) {
92 		printf("LIBBPF LOG:   \n=================\n%s=================\n", libbpf_log_buf);
93 		printf("OBJ LOG:      \n=================\n%s=================\n", obj_log_buf);
94 		printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
95 		printf("BAD_PROG  LOG:\n=================\n%s=================\n", bad_log_buf);
96 	}
97 
98 	/* reset everything */
99 	test_log_buf__destroy(skel);
100 	obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
101 	libbpf_log_buf[0] = '\0';
102 	libbpf_log_pos = 0;
103 	libbpf_log_error = false;
104 
105 	/* In the second round we let bad_prog's failure be logged through print callback */
106 	opts.kernel_log_buf = NULL; /* let everything through into print callback */
107 	opts.kernel_log_size = 0;
108 	opts.kernel_log_level = 1;
109 
110 	skel = test_log_buf__open_opts(&opts);
111 	if (!ASSERT_OK_PTR(skel, "skel_open"))
112 		goto cleanup;
113 
114 	/* set normal verbose level for good_prog to check log_level is taken into account */
115 	bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
116 	bpf_program__set_log_level(skel->progs.good_prog, 1);
117 
118 	err = test_log_buf__load(skel);
119 	if (!ASSERT_ERR(err, "unexpected_load_success"))
120 		goto cleanup;
121 
122 	ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
123 
124 	/* this time prog loading error should be logged through print callback */
125 	ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"),
126 		      "libbpf_log_correct");
127 	ASSERT_STREQ(obj_log_buf, "", "obj_log__empty");
128 	ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
129 		     "good_log_ok");
130 	ASSERT_STREQ(bad_log_buf, "", "bad_log_empty");
131 
132 	if (env.verbosity > VERBOSE_NONE) {
133 		printf("LIBBPF LOG:   \n=================\n%s=================\n", libbpf_log_buf);
134 		printf("OBJ LOG:      \n=================\n%s=================\n", obj_log_buf);
135 		printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
136 		printf("BAD_PROG  LOG:\n=================\n%s=================\n", bad_log_buf);
137 	}
138 
139 cleanup:
140 	free(obj_log_buf);
141 	test_log_buf__destroy(skel);
142 	libbpf_set_print(old_print_cb);
143 }
144 
bpf_prog_load_log_buf(void)145 static void bpf_prog_load_log_buf(void)
146 {
147 	const struct bpf_insn good_prog_insns[] = {
148 		BPF_MOV64_IMM(BPF_REG_0, 0),
149 		BPF_EXIT_INSN(),
150 	};
151 	const size_t good_prog_insn_cnt = ARRAY_SIZE(good_prog_insns);
152 	const struct bpf_insn bad_prog_insns[] = {
153 		BPF_EXIT_INSN(),
154 	};
155 	size_t bad_prog_insn_cnt = ARRAY_SIZE(bad_prog_insns);
156 	LIBBPF_OPTS(bpf_prog_load_opts, opts);
157 	const size_t log_buf_sz = 1024 * 1024;
158 	char *log_buf;
159 	int fd = -1;
160 
161 	log_buf = malloc(log_buf_sz);
162 	if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
163 		return;
164 	opts.log_buf = log_buf;
165 	opts.log_size = log_buf_sz;
166 
167 	/* with log_level == 0 log_buf should stay empty for good prog */
168 	log_buf[0] = '\0';
169 	opts.log_level = 0;
170 	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
171 			   good_prog_insns, good_prog_insn_cnt, &opts);
172 	ASSERT_STREQ(log_buf, "", "good_log_0");
173 	ASSERT_GE(fd, 0, "good_fd1");
174 	if (fd >= 0)
175 		close(fd);
176 
177 	/* log_level == 2 should always fill log_buf, even for good prog */
178 	log_buf[0] = '\0';
179 	opts.log_level = 2;
180 	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
181 			   good_prog_insns, good_prog_insn_cnt, &opts);
182 	ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2");
183 	ASSERT_GE(fd, 0, "good_fd2");
184 	if (fd >= 0)
185 		close(fd);
186 
187 	/* log_level == 0 should fill log_buf for bad prog */
188 	log_buf[0] = '\0';
189 	opts.log_level = 0;
190 	fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL",
191 			   bad_prog_insns, bad_prog_insn_cnt, &opts);
192 	ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0");
193 	ASSERT_LT(fd, 0, "bad_fd");
194 	if (fd >= 0)
195 		close(fd);
196 
197 	free(log_buf);
198 }
199 
bpf_btf_load_log_buf(void)200 static void bpf_btf_load_log_buf(void)
201 {
202 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
203 	const size_t log_buf_sz = 1024 * 1024;
204 	const void *raw_btf_data;
205 	__u32 raw_btf_size;
206 	struct btf *btf;
207 	char *log_buf = NULL;
208 	int fd = -1;
209 
210 	btf = btf__new_empty();
211 	if (!ASSERT_OK_PTR(btf, "empty_btf"))
212 		return;
213 
214 	ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
215 
216 	raw_btf_data = btf__raw_data(btf, &raw_btf_size);
217 	if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
218 		goto cleanup;
219 
220 	log_buf = malloc(log_buf_sz);
221 	if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
222 		goto cleanup;
223 	opts.log_buf = log_buf;
224 	opts.log_size = log_buf_sz;
225 
226 	/* with log_level == 0 log_buf should stay empty for good BTF */
227 	log_buf[0] = '\0';
228 	opts.log_level = 0;
229 	fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
230 	ASSERT_STREQ(log_buf, "", "good_log_0");
231 	ASSERT_GE(fd, 0, "good_fd1");
232 	if (fd >= 0)
233 		close(fd);
234 	fd = -1;
235 
236 	/* log_level == 2 should always fill log_buf, even for good BTF */
237 	log_buf[0] = '\0';
238 	opts.log_level = 2;
239 	fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
240 	printf("LOG_BUF: %s\n", log_buf);
241 	ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2");
242 	ASSERT_GE(fd, 0, "good_fd2");
243 	if (fd >= 0)
244 		close(fd);
245 	fd = -1;
246 
247 	/* make BTF bad, add pointer pointing to non-existing type */
248 	ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type");
249 
250 	raw_btf_data = btf__raw_data(btf, &raw_btf_size);
251 	if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad"))
252 		goto cleanup;
253 
254 	/* log_level == 0 should fill log_buf for bad BTF */
255 	log_buf[0] = '\0';
256 	opts.log_level = 0;
257 	fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
258 	printf("LOG_BUF: %s\n", log_buf);
259 	ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0");
260 	ASSERT_LT(fd, 0, "bad_fd");
261 	if (fd >= 0)
262 		close(fd);
263 	fd = -1;
264 
265 cleanup:
266 	free(log_buf);
267 	btf__free(btf);
268 }
269 
test_log_buf(void)270 void test_log_buf(void)
271 {
272 	if (test__start_subtest("obj_load_log_buf"))
273 		obj_load_log_buf();
274 	if (test__start_subtest("bpf_prog_load_log_buf"))
275 		bpf_prog_load_log_buf();
276 	if (test__start_subtest("bpf_btf_load_log_buf"))
277 		bpf_btf_load_log_buf();
278 }
279