xref: /linux/kernel/bpf/log.c (revision 1b0975ee3bdd3eb19a47371c26fd7ef8f7f6b599)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/bpf.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/math64.h>
12 
13 static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
14 {
15 	/* ubuf and len_total should both be specified (or not) together */
16 	if (!!log->ubuf != !!log->len_total)
17 		return false;
18 	/* log buf without log_level is meaningless */
19 	if (log->ubuf && log->level == 0)
20 		return false;
21 	if (log->level & ~BPF_LOG_MASK)
22 		return false;
23 	if (log->len_total > UINT_MAX >> 2)
24 		return false;
25 	return true;
26 }
27 
28 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
29 		  char __user *log_buf, u32 log_size)
30 {
31 	log->level = log_level;
32 	log->ubuf = log_buf;
33 	log->len_total = log_size;
34 
35 	/* log attributes have to be sane */
36 	if (!bpf_verifier_log_attr_valid(log))
37 		return -EINVAL;
38 
39 	return 0;
40 }
41 
42 static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
43 {
44 	/* add_len includes terminal \0, so no need for +1. */
45 	u64 len = log->end_pos + add_len;
46 
47 	/* log->len_max could be larger than our current len due to
48 	 * bpf_vlog_reset() calls, so we maintain the max of any length at any
49 	 * previous point
50 	 */
51 	if (len > UINT_MAX)
52 		log->len_max = UINT_MAX;
53 	else if (len > log->len_max)
54 		log->len_max = len;
55 }
56 
57 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
58 		       va_list args)
59 {
60 	u64 cur_pos;
61 	u32 new_n, n;
62 
63 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
64 
65 	if (log->level == BPF_LOG_KERNEL) {
66 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
67 
68 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
69 		return;
70 	}
71 
72 	n += 1; /* include terminating zero */
73 	bpf_vlog_update_len_max(log, n);
74 
75 	if (log->level & BPF_LOG_FIXED) {
76 		/* check if we have at least something to put into user buf */
77 		new_n = 0;
78 		if (log->end_pos < log->len_total) {
79 			new_n = min_t(u32, log->len_total - log->end_pos, n);
80 			log->kbuf[new_n - 1] = '\0';
81 		}
82 
83 		cur_pos = log->end_pos;
84 		log->end_pos += n - 1; /* don't count terminating '\0' */
85 
86 		if (log->ubuf && new_n &&
87 		    copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
88 			goto fail;
89 	} else {
90 		u64 new_end, new_start;
91 		u32 buf_start, buf_end, new_n;
92 
93 		new_end = log->end_pos + n;
94 		if (new_end - log->start_pos >= log->len_total)
95 			new_start = new_end - log->len_total;
96 		else
97 			new_start = log->start_pos;
98 
99 		log->start_pos = new_start;
100 		log->end_pos = new_end - 1; /* don't count terminating '\0' */
101 
102 		if (!log->ubuf)
103 			return;
104 
105 		new_n = min(n, log->len_total);
106 		cur_pos = new_end - new_n;
107 		div_u64_rem(cur_pos, log->len_total, &buf_start);
108 		div_u64_rem(new_end, log->len_total, &buf_end);
109 		/* new_end and buf_end are exclusive indices, so if buf_end is
110 		 * exactly zero, then it actually points right to the end of
111 		 * ubuf and there is no wrap around
112 		 */
113 		if (buf_end == 0)
114 			buf_end = log->len_total;
115 
116 		/* if buf_start > buf_end, we wrapped around;
117 		 * if buf_start == buf_end, then we fill ubuf completely; we
118 		 * can't have buf_start == buf_end to mean that there is
119 		 * nothing to write, because we always write at least
120 		 * something, even if terminal '\0'
121 		 */
122 		if (buf_start < buf_end) {
123 			/* message fits within contiguous chunk of ubuf */
124 			if (copy_to_user(log->ubuf + buf_start,
125 					 log->kbuf + n - new_n,
126 					 buf_end - buf_start))
127 				goto fail;
128 		} else {
129 			/* message wraps around the end of ubuf, copy in two chunks */
130 			if (copy_to_user(log->ubuf + buf_start,
131 					 log->kbuf + n - new_n,
132 					 log->len_total - buf_start))
133 				goto fail;
134 			if (copy_to_user(log->ubuf,
135 					 log->kbuf + n - buf_end,
136 					 buf_end))
137 				goto fail;
138 		}
139 	}
140 
141 	return;
142 fail:
143 	log->ubuf = NULL;
144 }
145 
146 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
147 {
148 	char zero = 0;
149 	u32 pos;
150 
151 	if (WARN_ON_ONCE(new_pos > log->end_pos))
152 		return;
153 
154 	if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
155 		return;
156 
157 	/* if position to which we reset is beyond current log window,
158 	 * then we didn't preserve any useful content and should adjust
159 	 * start_pos to end up with an empty log (start_pos == end_pos)
160 	 */
161 	log->end_pos = new_pos;
162 	if (log->end_pos < log->start_pos)
163 		log->start_pos = log->end_pos;
164 
165 	if (!log->ubuf)
166 		return;
167 
168 	if (log->level & BPF_LOG_FIXED)
169 		pos = log->end_pos + 1;
170 	else
171 		div_u64_rem(new_pos, log->len_total, &pos);
172 
173 	if (pos < log->len_total && put_user(zero, log->ubuf + pos))
174 		log->ubuf = NULL;
175 }
176 
177 static void bpf_vlog_reverse_kbuf(char *buf, int len)
178 {
179 	int i, j;
180 
181 	for (i = 0, j = len - 1; i < j; i++, j--)
182 		swap(buf[i], buf[j]);
183 }
184 
185 static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
186 {
187 	/* we split log->kbuf into two equal parts for both ends of array */
188 	int n = sizeof(log->kbuf) / 2, nn;
189 	char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
190 
191 	/* Read ubuf's section [start, end) two chunks at a time, from left
192 	 * and right side; within each chunk, swap all the bytes; after that
193 	 * reverse the order of lbuf and rbuf and write result back to ubuf.
194 	 * This way we'll end up with swapped contents of specified
195 	 * [start, end) ubuf segment.
196 	 */
197 	while (end - start > 1) {
198 		nn = min(n, (end - start ) / 2);
199 
200 		if (copy_from_user(lbuf, log->ubuf + start, nn))
201 			return -EFAULT;
202 		if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
203 			return -EFAULT;
204 
205 		bpf_vlog_reverse_kbuf(lbuf, nn);
206 		bpf_vlog_reverse_kbuf(rbuf, nn);
207 
208 		/* we write lbuf to the right end of ubuf, while rbuf to the
209 		 * left one to end up with properly reversed overall ubuf
210 		 */
211 		if (copy_to_user(log->ubuf + start, rbuf, nn))
212 			return -EFAULT;
213 		if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
214 			return -EFAULT;
215 
216 		start += nn;
217 		end -= nn;
218 	}
219 
220 	return 0;
221 }
222 
223 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
224 {
225 	u32 sublen;
226 	int err;
227 
228 	*log_size_actual = 0;
229 	if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
230 		return 0;
231 
232 	if (!log->ubuf)
233 		goto skip_log_rotate;
234 	/* If we never truncated log, there is nothing to move around. */
235 	if (log->start_pos == 0)
236 		goto skip_log_rotate;
237 
238 	/* Otherwise we need to rotate log contents to make it start from the
239 	 * buffer beginning and be a continuous zero-terminated string. Note
240 	 * that if log->start_pos != 0 then we definitely filled up entire log
241 	 * buffer with no gaps, and we just need to shift buffer contents to
242 	 * the left by (log->start_pos % log->len_total) bytes.
243 	 *
244 	 * Unfortunately, user buffer could be huge and we don't want to
245 	 * allocate temporary kernel memory of the same size just to shift
246 	 * contents in a straightforward fashion. Instead, we'll be clever and
247 	 * do in-place array rotation. This is a leetcode-style problem, which
248 	 * could be solved by three rotations.
249 	 *
250 	 * Let's say we have log buffer that has to be shifted left by 7 bytes
251 	 * (spaces and vertical bar is just for demonstrative purposes):
252 	 *   E F G H I J K | A B C D
253 	 *
254 	 * First, we reverse entire array:
255 	 *   D C B A | K J I H G F E
256 	 *
257 	 * Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
258 	 * (KJIHGFE), resulting in a properly rotated array:
259 	 *   A B C D | E F G H I J K
260 	 *
261 	 * We'll utilize log->kbuf to read user memory chunk by chunk, swap
262 	 * bytes, and write them back. Doing it byte-by-byte would be
263 	 * unnecessarily inefficient. Altogether we are going to read and
264 	 * write each byte twice, for total 4 memory copies between kernel and
265 	 * user space.
266 	 */
267 
268 	/* length of the chopped off part that will be the beginning;
269 	 * len(ABCD) in the example above
270 	 */
271 	div_u64_rem(log->start_pos, log->len_total, &sublen);
272 	sublen = log->len_total - sublen;
273 
274 	err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
275 	err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
276 	err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
277 	if (err)
278 		log->ubuf = NULL;
279 
280 skip_log_rotate:
281 	*log_size_actual = log->len_max;
282 
283 	/* properly initialized log has either both ubuf!=NULL and len_total>0
284 	 * or ubuf==NULL and len_total==0, so if this condition doesn't hold,
285 	 * we got a fault somewhere along the way, so report it back
286 	 */
287 	if (!!log->ubuf != !!log->len_total)
288 		return -EFAULT;
289 
290 	/* did truncation actually happen? */
291 	if (log->ubuf && log->len_max > log->len_total)
292 		return -ENOSPC;
293 
294 	return 0;
295 }
296 
297 /* log_level controls verbosity level of eBPF verifier.
298  * bpf_verifier_log_write() is used to dump the verification trace to the log,
299  * so the user can figure out what's wrong with the program
300  */
301 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
302 					   const char *fmt, ...)
303 {
304 	va_list args;
305 
306 	if (!bpf_verifier_log_needed(&env->log))
307 		return;
308 
309 	va_start(args, fmt);
310 	bpf_verifier_vlog(&env->log, fmt, args);
311 	va_end(args);
312 }
313 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
314 
315 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
316 			    const char *fmt, ...)
317 {
318 	va_list args;
319 
320 	if (!bpf_verifier_log_needed(log))
321 		return;
322 
323 	va_start(args, fmt);
324 	bpf_verifier_vlog(log, fmt, args);
325 	va_end(args);
326 }
327 EXPORT_SYMBOL_GPL(bpf_log);
328