xref: /linux/net/bpf/test_run.c (revision e950e843367d7990b9d7ea964e3c33876d477c4b)
1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 #include <net/sock.h>
14 #include <net/tcp.h>
15 
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/bpf_test_run.h>
18 
19 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
20 			u32 *retval, u32 *time)
21 {
22 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
23 	enum bpf_cgroup_storage_type stype;
24 	u64 time_start, time_spent = 0;
25 	int ret = 0;
26 	u32 i;
27 
28 	for_each_cgroup_storage_type(stype) {
29 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
30 		if (IS_ERR(storage[stype])) {
31 			storage[stype] = NULL;
32 			for_each_cgroup_storage_type(stype)
33 				bpf_cgroup_storage_free(storage[stype]);
34 			return -ENOMEM;
35 		}
36 	}
37 
38 	if (!repeat)
39 		repeat = 1;
40 
41 	rcu_read_lock();
42 	preempt_disable();
43 	time_start = ktime_get_ns();
44 	for (i = 0; i < repeat; i++) {
45 		bpf_cgroup_storage_set(storage);
46 		*retval = BPF_PROG_RUN(prog, ctx);
47 
48 		if (signal_pending(current)) {
49 			ret = -EINTR;
50 			break;
51 		}
52 
53 		if (need_resched()) {
54 			time_spent += ktime_get_ns() - time_start;
55 			preempt_enable();
56 			rcu_read_unlock();
57 
58 			cond_resched();
59 
60 			rcu_read_lock();
61 			preempt_disable();
62 			time_start = ktime_get_ns();
63 		}
64 	}
65 	time_spent += ktime_get_ns() - time_start;
66 	preempt_enable();
67 	rcu_read_unlock();
68 
69 	do_div(time_spent, repeat);
70 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
71 
72 	for_each_cgroup_storage_type(stype)
73 		bpf_cgroup_storage_free(storage[stype]);
74 
75 	return ret;
76 }
77 
78 static int bpf_test_finish(const union bpf_attr *kattr,
79 			   union bpf_attr __user *uattr, const void *data,
80 			   u32 size, u32 retval, u32 duration)
81 {
82 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
83 	int err = -EFAULT;
84 	u32 copy_size = size;
85 
86 	/* Clamp copy if the user has provided a size hint, but copy the full
87 	 * buffer if not to retain old behaviour.
88 	 */
89 	if (kattr->test.data_size_out &&
90 	    copy_size > kattr->test.data_size_out) {
91 		copy_size = kattr->test.data_size_out;
92 		err = -ENOSPC;
93 	}
94 
95 	if (data_out && copy_to_user(data_out, data, copy_size))
96 		goto out;
97 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
98 		goto out;
99 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
100 		goto out;
101 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
102 		goto out;
103 	if (err != -ENOSPC)
104 		err = 0;
105 out:
106 	trace_bpf_test_finish(&err);
107 	return err;
108 }
109 
110 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
111 			   u32 headroom, u32 tailroom)
112 {
113 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
114 	void *data;
115 
116 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
117 		return ERR_PTR(-EINVAL);
118 
119 	data = kzalloc(size + headroom + tailroom, GFP_USER);
120 	if (!data)
121 		return ERR_PTR(-ENOMEM);
122 
123 	if (copy_from_user(data + headroom, data_in, size)) {
124 		kfree(data);
125 		return ERR_PTR(-EFAULT);
126 	}
127 	return data;
128 }
129 
130 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
131 {
132 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
133 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
134 	u32 size = kattr->test.ctx_size_in;
135 	void *data;
136 	int err;
137 
138 	if (!data_in && !data_out)
139 		return NULL;
140 
141 	data = kzalloc(max_size, GFP_USER);
142 	if (!data)
143 		return ERR_PTR(-ENOMEM);
144 
145 	if (data_in) {
146 		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
147 		if (err) {
148 			kfree(data);
149 			return ERR_PTR(err);
150 		}
151 
152 		size = min_t(u32, max_size, size);
153 		if (copy_from_user(data, data_in, size)) {
154 			kfree(data);
155 			return ERR_PTR(-EFAULT);
156 		}
157 	}
158 	return data;
159 }
160 
161 static int bpf_ctx_finish(const union bpf_attr *kattr,
162 			  union bpf_attr __user *uattr, const void *data,
163 			  u32 size)
164 {
165 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
166 	int err = -EFAULT;
167 	u32 copy_size = size;
168 
169 	if (!data || !data_out)
170 		return 0;
171 
172 	if (copy_size > kattr->test.ctx_size_out) {
173 		copy_size = kattr->test.ctx_size_out;
174 		err = -ENOSPC;
175 	}
176 
177 	if (copy_to_user(data_out, data, copy_size))
178 		goto out;
179 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
180 		goto out;
181 	if (err != -ENOSPC)
182 		err = 0;
183 out:
184 	return err;
185 }
186 
187 /**
188  * range_is_zero - test whether buffer is initialized
189  * @buf: buffer to check
190  * @from: check from this position
191  * @to: check up until (excluding) this position
192  *
193  * This function returns true if the there is a non-zero byte
194  * in the buf in the range [from,to).
195  */
196 static inline bool range_is_zero(void *buf, size_t from, size_t to)
197 {
198 	return !memchr_inv((u8 *)buf + from, 0, to - from);
199 }
200 
201 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
202 {
203 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
204 
205 	if (!__skb)
206 		return 0;
207 
208 	/* make sure the fields we don't use are zeroed */
209 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
210 		return -EINVAL;
211 
212 	/* priority is allowed */
213 
214 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
215 			   FIELD_SIZEOF(struct __sk_buff, priority),
216 			   offsetof(struct __sk_buff, cb)))
217 		return -EINVAL;
218 
219 	/* cb is allowed */
220 
221 	if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
222 			   FIELD_SIZEOF(struct __sk_buff, cb),
223 			   sizeof(struct __sk_buff)))
224 		return -EINVAL;
225 
226 	skb->priority = __skb->priority;
227 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
228 
229 	return 0;
230 }
231 
232 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
233 {
234 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
235 
236 	if (!__skb)
237 		return;
238 
239 	__skb->priority = skb->priority;
240 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
241 }
242 
243 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
244 			  union bpf_attr __user *uattr)
245 {
246 	bool is_l2 = false, is_direct_pkt_access = false;
247 	u32 size = kattr->test.data_size_in;
248 	u32 repeat = kattr->test.repeat;
249 	struct __sk_buff *ctx = NULL;
250 	u32 retval, duration;
251 	int hh_len = ETH_HLEN;
252 	struct sk_buff *skb;
253 	struct sock *sk;
254 	void *data;
255 	int ret;
256 
257 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
258 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
259 	if (IS_ERR(data))
260 		return PTR_ERR(data);
261 
262 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
263 	if (IS_ERR(ctx)) {
264 		kfree(data);
265 		return PTR_ERR(ctx);
266 	}
267 
268 	switch (prog->type) {
269 	case BPF_PROG_TYPE_SCHED_CLS:
270 	case BPF_PROG_TYPE_SCHED_ACT:
271 		is_l2 = true;
272 		/* fall through */
273 	case BPF_PROG_TYPE_LWT_IN:
274 	case BPF_PROG_TYPE_LWT_OUT:
275 	case BPF_PROG_TYPE_LWT_XMIT:
276 		is_direct_pkt_access = true;
277 		break;
278 	default:
279 		break;
280 	}
281 
282 	sk = kzalloc(sizeof(struct sock), GFP_USER);
283 	if (!sk) {
284 		kfree(data);
285 		kfree(ctx);
286 		return -ENOMEM;
287 	}
288 	sock_net_set(sk, current->nsproxy->net_ns);
289 	sock_init_data(NULL, sk);
290 
291 	skb = build_skb(data, 0);
292 	if (!skb) {
293 		kfree(data);
294 		kfree(ctx);
295 		kfree(sk);
296 		return -ENOMEM;
297 	}
298 	skb->sk = sk;
299 
300 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
301 	__skb_put(skb, size);
302 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
303 	skb_reset_network_header(skb);
304 
305 	if (is_l2)
306 		__skb_push(skb, hh_len);
307 	if (is_direct_pkt_access)
308 		bpf_compute_data_pointers(skb);
309 	ret = convert___skb_to_skb(skb, ctx);
310 	if (ret)
311 		goto out;
312 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
313 	if (ret)
314 		goto out;
315 	if (!is_l2) {
316 		if (skb_headroom(skb) < hh_len) {
317 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
318 
319 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
320 				ret = -ENOMEM;
321 				goto out;
322 			}
323 		}
324 		memset(__skb_push(skb, hh_len), 0, hh_len);
325 	}
326 	convert_skb_to___skb(skb, ctx);
327 
328 	size = skb->len;
329 	/* bpf program can never convert linear skb to non-linear */
330 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
331 		size = skb_headlen(skb);
332 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
333 	if (!ret)
334 		ret = bpf_ctx_finish(kattr, uattr, ctx,
335 				     sizeof(struct __sk_buff));
336 out:
337 	kfree_skb(skb);
338 	kfree(sk);
339 	kfree(ctx);
340 	return ret;
341 }
342 
343 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
344 			  union bpf_attr __user *uattr)
345 {
346 	u32 size = kattr->test.data_size_in;
347 	u32 repeat = kattr->test.repeat;
348 	struct netdev_rx_queue *rxqueue;
349 	struct xdp_buff xdp = {};
350 	u32 retval, duration;
351 	void *data;
352 	int ret;
353 
354 	if (kattr->test.ctx_in || kattr->test.ctx_out)
355 		return -EINVAL;
356 
357 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
358 	if (IS_ERR(data))
359 		return PTR_ERR(data);
360 
361 	xdp.data_hard_start = data;
362 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
363 	xdp.data_meta = xdp.data;
364 	xdp.data_end = xdp.data + size;
365 
366 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
367 	xdp.rxq = &rxqueue->xdp_rxq;
368 
369 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
370 	if (ret)
371 		goto out;
372 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
373 	    xdp.data_end != xdp.data + size)
374 		size = xdp.data_end - xdp.data;
375 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
376 out:
377 	kfree(data);
378 	return ret;
379 }
380 
381 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
382 				     const union bpf_attr *kattr,
383 				     union bpf_attr __user *uattr)
384 {
385 	u32 size = kattr->test.data_size_in;
386 	struct bpf_flow_dissector ctx = {};
387 	u32 repeat = kattr->test.repeat;
388 	struct bpf_flow_keys flow_keys;
389 	u64 time_start, time_spent = 0;
390 	const struct ethhdr *eth;
391 	u32 retval, duration;
392 	void *data;
393 	int ret;
394 	u32 i;
395 
396 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
397 		return -EINVAL;
398 
399 	if (kattr->test.ctx_in || kattr->test.ctx_out)
400 		return -EINVAL;
401 
402 	if (size < ETH_HLEN)
403 		return -EINVAL;
404 
405 	data = bpf_test_init(kattr, size, 0, 0);
406 	if (IS_ERR(data))
407 		return PTR_ERR(data);
408 
409 	eth = (struct ethhdr *)data;
410 
411 	if (!repeat)
412 		repeat = 1;
413 
414 	ctx.flow_keys = &flow_keys;
415 	ctx.data = data;
416 	ctx.data_end = (__u8 *)data + size;
417 
418 	rcu_read_lock();
419 	preempt_disable();
420 	time_start = ktime_get_ns();
421 	for (i = 0; i < repeat; i++) {
422 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
423 					  size);
424 
425 		if (signal_pending(current)) {
426 			preempt_enable();
427 			rcu_read_unlock();
428 
429 			ret = -EINTR;
430 			goto out;
431 		}
432 
433 		if (need_resched()) {
434 			time_spent += ktime_get_ns() - time_start;
435 			preempt_enable();
436 			rcu_read_unlock();
437 
438 			cond_resched();
439 
440 			rcu_read_lock();
441 			preempt_disable();
442 			time_start = ktime_get_ns();
443 		}
444 	}
445 	time_spent += ktime_get_ns() - time_start;
446 	preempt_enable();
447 	rcu_read_unlock();
448 
449 	do_div(time_spent, repeat);
450 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
451 
452 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
453 			      retval, duration);
454 
455 out:
456 	kfree(data);
457 	return ret;
458 }
459