1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3
4 #include <test_progs.h>
5 #include <bpf/btf.h>
6
7 #include "test_log_buf.skel.h"
8 #include "bpf_util.h"
9
10 static size_t libbpf_log_pos;
11 static char libbpf_log_buf[1024 * 1024];
12 static bool libbpf_log_error;
13
libbpf_print_cb(enum libbpf_print_level level,const char * fmt,va_list args)14 static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args)
15 {
16 int emitted_cnt;
17 size_t left_cnt;
18
19 left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos;
20 emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args);
21
22 if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) {
23 libbpf_log_error = true;
24 return 0;
25 }
26
27 libbpf_log_pos += emitted_cnt;
28 return 0;
29 }
30
obj_load_log_buf(void)31 static void obj_load_log_buf(void)
32 {
33 libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb);
34 LIBBPF_OPTS(bpf_object_open_opts, opts);
35 const size_t log_buf_sz = 1024 * 1024;
36 struct test_log_buf* skel;
37 char *obj_log_buf, *good_log_buf, *bad_log_buf;
38 int err;
39
40 obj_log_buf = malloc(3 * log_buf_sz);
41 if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf"))
42 return;
43
44 good_log_buf = obj_log_buf + log_buf_sz;
45 bad_log_buf = obj_log_buf + 2 * log_buf_sz;
46 obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
47
48 opts.kernel_log_buf = obj_log_buf;
49 opts.kernel_log_size = log_buf_sz;
50 opts.kernel_log_level = 4; /* for BTF this will turn into 1 */
51
52 /* In the first round every prog has its own log_buf, so libbpf logs
53 * don't have program failure logs
54 */
55 skel = test_log_buf__open_opts(&opts);
56 if (!ASSERT_OK_PTR(skel, "skel_open"))
57 goto cleanup;
58
59 /* set very verbose level for good_prog so we always get detailed logs */
60 bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
61 bpf_program__set_log_level(skel->progs.good_prog, 2);
62
63 bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz);
64 /* log_level 0 with custom log_buf means that verbose logs are not
65 * requested if program load is successful, but libbpf should retry
66 * with log_level 1 on error and put program's verbose load log into
67 * custom log_buf
68 */
69 bpf_program__set_log_level(skel->progs.bad_prog, 0);
70
71 err = test_log_buf__load(skel);
72 if (!ASSERT_ERR(err, "unexpected_load_success"))
73 goto cleanup;
74
75 ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
76
77 /* there should be no prog loading log because we specified per-prog log buf */
78 ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log");
79 ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
80 "libbpf_log_not_empty");
81 ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
82 ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx() R10=fp0"),
83 "good_log_verbose");
84 ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
85 "bad_log_not_empty");
86
87 if (env.verbosity > VERBOSE_NONE) {
88 printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
89 printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
90 printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
91 printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
92 }
93
94 /* reset everything */
95 test_log_buf__destroy(skel);
96 obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
97 libbpf_log_buf[0] = '\0';
98 libbpf_log_pos = 0;
99 libbpf_log_error = false;
100
101 /* In the second round we let bad_prog's failure be logged through print callback */
102 opts.kernel_log_buf = NULL; /* let everything through into print callback */
103 opts.kernel_log_size = 0;
104 opts.kernel_log_level = 1;
105
106 skel = test_log_buf__open_opts(&opts);
107 if (!ASSERT_OK_PTR(skel, "skel_open"))
108 goto cleanup;
109
110 /* set normal verbose level for good_prog to check log_level is taken into account */
111 bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
112 bpf_program__set_log_level(skel->progs.good_prog, 1);
113
114 err = test_log_buf__load(skel);
115 if (!ASSERT_ERR(err, "unexpected_load_success"))
116 goto cleanup;
117
118 ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
119
120 /* this time prog loading error should be logged through print callback */
121 ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"),
122 "libbpf_log_correct");
123 ASSERT_STREQ(obj_log_buf, "", "obj_log__empty");
124 ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
125 "good_log_ok");
126 ASSERT_STREQ(bad_log_buf, "", "bad_log_empty");
127
128 if (env.verbosity > VERBOSE_NONE) {
129 printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
130 printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
131 printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
132 printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
133 }
134
135 cleanup:
136 free(obj_log_buf);
137 test_log_buf__destroy(skel);
138 libbpf_set_print(old_print_cb);
139 }
140
bpf_prog_load_log_buf(void)141 static void bpf_prog_load_log_buf(void)
142 {
143 const struct bpf_insn good_prog_insns[] = {
144 BPF_MOV64_IMM(BPF_REG_0, 0),
145 BPF_EXIT_INSN(),
146 };
147 const size_t good_prog_insn_cnt = ARRAY_SIZE(good_prog_insns);
148 const struct bpf_insn bad_prog_insns[] = {
149 BPF_EXIT_INSN(),
150 };
151 size_t bad_prog_insn_cnt = ARRAY_SIZE(bad_prog_insns);
152 LIBBPF_OPTS(bpf_prog_load_opts, opts);
153 const size_t log_buf_sz = 1024 * 1024;
154 char *log_buf;
155 int fd = -1;
156
157 log_buf = malloc(log_buf_sz);
158 if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
159 return;
160 opts.log_buf = log_buf;
161 opts.log_size = log_buf_sz;
162
163 /* with log_level == 0 log_buf should stay empty for good prog */
164 log_buf[0] = '\0';
165 opts.log_level = 0;
166 fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
167 good_prog_insns, good_prog_insn_cnt, &opts);
168 ASSERT_STREQ(log_buf, "", "good_log_0");
169 ASSERT_GE(fd, 0, "good_fd1");
170 if (fd >= 0)
171 close(fd);
172 fd = -1;
173
174 /* log_level == 2 should always fill log_buf, even for good prog */
175 log_buf[0] = '\0';
176 opts.log_level = 2;
177 fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
178 good_prog_insns, good_prog_insn_cnt, &opts);
179 ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx() R10=fp0"), "good_log_2");
180 ASSERT_GE(fd, 0, "good_fd2");
181 if (fd >= 0)
182 close(fd);
183 fd = -1;
184
185 /* log_level == 0 should fill log_buf for bad prog */
186 log_buf[0] = '\0';
187 opts.log_level = 0;
188 fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL",
189 bad_prog_insns, bad_prog_insn_cnt, &opts);
190 ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0");
191 ASSERT_LT(fd, 0, "bad_fd");
192 if (fd >= 0)
193 close(fd);
194 fd = -1;
195
196 free(log_buf);
197 }
198
bpf_btf_load_log_buf(void)199 static void bpf_btf_load_log_buf(void)
200 {
201 LIBBPF_OPTS(bpf_btf_load_opts, opts);
202 const size_t log_buf_sz = 1024 * 1024;
203 const void *raw_btf_data;
204 __u32 raw_btf_size;
205 struct btf *btf;
206 char *log_buf = NULL;
207 int fd = -1;
208
209 btf = btf__new_empty();
210 if (!ASSERT_OK_PTR(btf, "empty_btf"))
211 return;
212
213 ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
214
215 raw_btf_data = btf__raw_data(btf, &raw_btf_size);
216 if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
217 goto cleanup;
218
219 log_buf = malloc(log_buf_sz);
220 if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
221 goto cleanup;
222 opts.log_buf = log_buf;
223 opts.log_size = log_buf_sz;
224
225 /* with log_level == 0 log_buf should stay empty for good BTF */
226 log_buf[0] = '\0';
227 opts.log_level = 0;
228 fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
229 ASSERT_STREQ(log_buf, "", "good_log_0");
230 ASSERT_GE(fd, 0, "good_fd1");
231 if (fd >= 0)
232 close(fd);
233 fd = -1;
234
235 /* log_level == 2 should always fill log_buf, even for good BTF */
236 log_buf[0] = '\0';
237 opts.log_level = 2;
238 fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
239 printf("LOG_BUF: %s\n", log_buf);
240 ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2");
241 ASSERT_GE(fd, 0, "good_fd2");
242 if (fd >= 0)
243 close(fd);
244 fd = -1;
245
246 /* make BTF bad, add pointer pointing to non-existing type */
247 ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type");
248
249 raw_btf_data = btf__raw_data(btf, &raw_btf_size);
250 if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad"))
251 goto cleanup;
252
253 /* log_level == 0 should fill log_buf for bad BTF */
254 log_buf[0] = '\0';
255 opts.log_level = 0;
256 fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
257 printf("LOG_BUF: %s\n", log_buf);
258 ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0");
259 ASSERT_LT(fd, 0, "bad_fd");
260 if (fd >= 0)
261 close(fd);
262 fd = -1;
263
264 cleanup:
265 free(log_buf);
266 btf__free(btf);
267 }
268
test_log_buf(void)269 void test_log_buf(void)
270 {
271 if (test__start_subtest("obj_load_log_buf"))
272 obj_load_log_buf();
273 if (test__start_subtest("bpf_prog_load_log_buf"))
274 bpf_prog_load_log_buf();
275 if (test__start_subtest("bpf_btf_load_log_buf"))
276 bpf_btf_load_log_buf();
277 }
278