xref: /linux/net/bpf/test_run.c (revision 5635f189425e328097714c38341944fc40731f3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/hotdata.h>
16 #include <net/sock.h>
17 #include <net/tcp.h>
18 #include <net/net_namespace.h>
19 #include <net/page_pool/helpers.h>
20 #include <linux/error-injection.h>
21 #include <linux/smp.h>
22 #include <linux/sock_diag.h>
23 #include <linux/netfilter.h>
24 #include <net/netdev_rx_queue.h>
25 #include <net/xdp.h>
26 #include <net/netfilter/nf_bpf_link.h>
27 
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/bpf_test_run.h>
30 
31 struct bpf_test_timer {
32 	enum { NO_PREEMPT, NO_MIGRATE } mode;
33 	u32 i;
34 	u64 time_start, time_spent;
35 };
36 
bpf_test_timer_enter(struct bpf_test_timer * t)37 static void bpf_test_timer_enter(struct bpf_test_timer *t)
38 	__acquires(rcu)
39 {
40 	rcu_read_lock();
41 	if (t->mode == NO_PREEMPT)
42 		preempt_disable();
43 	else
44 		migrate_disable();
45 
46 	t->time_start = ktime_get_ns();
47 }
48 
bpf_test_timer_leave(struct bpf_test_timer * t)49 static void bpf_test_timer_leave(struct bpf_test_timer *t)
50 	__releases(rcu)
51 {
52 	t->time_start = 0;
53 
54 	if (t->mode == NO_PREEMPT)
55 		preempt_enable();
56 	else
57 		migrate_enable();
58 	rcu_read_unlock();
59 }
60 
bpf_test_timer_continue(struct bpf_test_timer * t,int iterations,u32 repeat,int * err,u32 * duration)61 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
62 				    u32 repeat, int *err, u32 *duration)
63 	__must_hold(rcu)
64 {
65 	t->i += iterations;
66 	if (t->i >= repeat) {
67 		/* We're done. */
68 		t->time_spent += ktime_get_ns() - t->time_start;
69 		do_div(t->time_spent, t->i);
70 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
71 		*err = 0;
72 		goto reset;
73 	}
74 
75 	if (signal_pending(current)) {
76 		/* During iteration: we've been cancelled, abort. */
77 		*err = -EINTR;
78 		goto reset;
79 	}
80 
81 	if (need_resched()) {
82 		/* During iteration: we need to reschedule between runs. */
83 		t->time_spent += ktime_get_ns() - t->time_start;
84 		bpf_test_timer_leave(t);
85 		cond_resched();
86 		bpf_test_timer_enter(t);
87 	}
88 
89 	/* Do another round. */
90 	return true;
91 
92 reset:
93 	t->i = 0;
94 	return false;
95 }
96 
97 /* We put this struct at the head of each page with a context and frame
98  * initialised when the page is allocated, so we don't have to do this on each
99  * repetition of the test run.
100  */
101 struct xdp_page_head {
102 	struct xdp_buff orig_ctx;
103 	struct xdp_buff ctx;
104 	union {
105 		/* ::data_hard_start starts here */
106 		DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
107 		DECLARE_FLEX_ARRAY(u8, data);
108 	};
109 };
110 
111 struct xdp_test_data {
112 	struct xdp_buff *orig_ctx;
113 	struct xdp_rxq_info rxq;
114 	struct net_device *dev;
115 	struct page_pool *pp;
116 	struct xdp_frame **frames;
117 	struct sk_buff **skbs;
118 	struct xdp_mem_info mem;
119 	u32 batch_size;
120 	u32 frame_cnt;
121 };
122 
123 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
124  * must be updated accordingly this gets changed, otherwise BPF selftests
125  * will fail.
126  */
127 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128 #define TEST_XDP_MAX_BATCH 256
129 
xdp_test_run_init_page(netmem_ref netmem,void * arg)130 static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
131 {
132 	struct xdp_page_head *head =
133 		phys_to_virt(page_to_phys(netmem_to_page(netmem)));
134 	struct xdp_buff *new_ctx, *orig_ctx;
135 	u32 headroom = XDP_PACKET_HEADROOM;
136 	struct xdp_test_data *xdp = arg;
137 	size_t frm_len, meta_len;
138 	struct xdp_frame *frm;
139 	void *data;
140 
141 	orig_ctx = xdp->orig_ctx;
142 	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
143 	meta_len = orig_ctx->data - orig_ctx->data_meta;
144 	headroom -= meta_len;
145 
146 	new_ctx = &head->ctx;
147 	frm = head->frame;
148 	data = head->data;
149 	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
150 
151 	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
152 	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
153 	new_ctx->data = new_ctx->data_meta + meta_len;
154 
155 	xdp_update_frame_from_buff(new_ctx, frm);
156 	frm->mem = new_ctx->rxq->mem;
157 
158 	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
159 }
160 
xdp_test_run_setup(struct xdp_test_data * xdp,struct xdp_buff * orig_ctx)161 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
162 {
163 	struct page_pool *pp;
164 	int err = -ENOMEM;
165 	struct page_pool_params pp_params = {
166 		.order = 0,
167 		.flags = 0,
168 		.pool_size = xdp->batch_size,
169 		.nid = NUMA_NO_NODE,
170 		.init_callback = xdp_test_run_init_page,
171 		.init_arg = xdp,
172 	};
173 
174 	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
175 	if (!xdp->frames)
176 		return -ENOMEM;
177 
178 	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
179 	if (!xdp->skbs)
180 		goto err_skbs;
181 
182 	pp = page_pool_create(&pp_params);
183 	if (IS_ERR(pp)) {
184 		err = PTR_ERR(pp);
185 		goto err_pp;
186 	}
187 
188 	/* will copy 'mem.id' into pp->xdp_mem_id */
189 	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
190 	if (err)
191 		goto err_mmodel;
192 
193 	xdp->pp = pp;
194 
195 	/* We create a 'fake' RXQ referencing the original dev, but with an
196 	 * xdp_mem_info pointing to our page_pool
197 	 */
198 	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
199 	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
200 	xdp->rxq.mem.id = pp->xdp_mem_id;
201 	xdp->dev = orig_ctx->rxq->dev;
202 	xdp->orig_ctx = orig_ctx;
203 
204 	return 0;
205 
206 err_mmodel:
207 	page_pool_destroy(pp);
208 err_pp:
209 	kvfree(xdp->skbs);
210 err_skbs:
211 	kvfree(xdp->frames);
212 	return err;
213 }
214 
xdp_test_run_teardown(struct xdp_test_data * xdp)215 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
216 {
217 	xdp_unreg_mem_model(&xdp->mem);
218 	page_pool_destroy(xdp->pp);
219 	kfree(xdp->frames);
220 	kfree(xdp->skbs);
221 }
222 
frame_was_changed(const struct xdp_page_head * head)223 static bool frame_was_changed(const struct xdp_page_head *head)
224 {
225 	/* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
226 	 * i.e. has the highest chances to be overwritten. If those two are
227 	 * untouched, it's most likely safe to skip the context reset.
228 	 */
229 	return head->frame->data != head->orig_ctx.data ||
230 	       head->frame->flags != head->orig_ctx.flags;
231 }
232 
ctx_was_changed(struct xdp_page_head * head)233 static bool ctx_was_changed(struct xdp_page_head *head)
234 {
235 	return head->orig_ctx.data != head->ctx.data ||
236 		head->orig_ctx.data_meta != head->ctx.data_meta ||
237 		head->orig_ctx.data_end != head->ctx.data_end;
238 }
239 
reset_ctx(struct xdp_page_head * head)240 static void reset_ctx(struct xdp_page_head *head)
241 {
242 	if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
243 		return;
244 
245 	head->ctx.data = head->orig_ctx.data;
246 	head->ctx.data_meta = head->orig_ctx.data_meta;
247 	head->ctx.data_end = head->orig_ctx.data_end;
248 	xdp_update_frame_from_buff(&head->ctx, head->frame);
249 	head->frame->mem = head->orig_ctx.rxq->mem;
250 }
251 
xdp_recv_frames(struct xdp_frame ** frames,int nframes,struct sk_buff ** skbs,struct net_device * dev)252 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
253 			   struct sk_buff **skbs,
254 			   struct net_device *dev)
255 {
256 	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
257 	int i, n;
258 	LIST_HEAD(list);
259 
260 	n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
261 				  (void **)skbs);
262 	if (unlikely(n == 0)) {
263 		for (i = 0; i < nframes; i++)
264 			xdp_return_frame(frames[i]);
265 		return -ENOMEM;
266 	}
267 
268 	for (i = 0; i < nframes; i++) {
269 		struct xdp_frame *xdpf = frames[i];
270 		struct sk_buff *skb = skbs[i];
271 
272 		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
273 		if (!skb) {
274 			xdp_return_frame(xdpf);
275 			continue;
276 		}
277 
278 		list_add_tail(&skb->list, &list);
279 	}
280 	netif_receive_skb_list(&list);
281 
282 	return 0;
283 }
284 
xdp_test_run_batch(struct xdp_test_data * xdp,struct bpf_prog * prog,u32 repeat)285 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
286 			      u32 repeat)
287 {
288 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
289 	int err = 0, act, ret, i, nframes = 0, batch_sz;
290 	struct xdp_frame **frames = xdp->frames;
291 	struct bpf_redirect_info *ri;
292 	struct xdp_page_head *head;
293 	struct xdp_frame *frm;
294 	bool redirect = false;
295 	struct xdp_buff *ctx;
296 	struct page *page;
297 
298 	batch_sz = min_t(u32, repeat, xdp->batch_size);
299 
300 	local_bh_disable();
301 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
302 	ri = bpf_net_ctx_get_ri();
303 	xdp_set_return_frame_no_direct();
304 
305 	for (i = 0; i < batch_sz; i++) {
306 		page = page_pool_dev_alloc_pages(xdp->pp);
307 		if (!page) {
308 			err = -ENOMEM;
309 			goto out;
310 		}
311 
312 		head = phys_to_virt(page_to_phys(page));
313 		reset_ctx(head);
314 		ctx = &head->ctx;
315 		frm = head->frame;
316 		xdp->frame_cnt++;
317 
318 		act = bpf_prog_run_xdp(prog, ctx);
319 
320 		/* if program changed pkt bounds we need to update the xdp_frame */
321 		if (unlikely(ctx_was_changed(head))) {
322 			ret = xdp_update_frame_from_buff(ctx, frm);
323 			if (ret) {
324 				xdp_return_buff(ctx);
325 				continue;
326 			}
327 		}
328 
329 		switch (act) {
330 		case XDP_TX:
331 			/* we can't do a real XDP_TX since we're not in the
332 			 * driver, so turn it into a REDIRECT back to the same
333 			 * index
334 			 */
335 			ri->tgt_index = xdp->dev->ifindex;
336 			ri->map_id = INT_MAX;
337 			ri->map_type = BPF_MAP_TYPE_UNSPEC;
338 			fallthrough;
339 		case XDP_REDIRECT:
340 			redirect = true;
341 			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
342 			if (ret)
343 				xdp_return_buff(ctx);
344 			break;
345 		case XDP_PASS:
346 			frames[nframes++] = frm;
347 			break;
348 		default:
349 			bpf_warn_invalid_xdp_action(NULL, prog, act);
350 			fallthrough;
351 		case XDP_DROP:
352 			xdp_return_buff(ctx);
353 			break;
354 		}
355 	}
356 
357 out:
358 	if (redirect)
359 		xdp_do_flush();
360 	if (nframes) {
361 		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
362 		if (ret)
363 			err = ret;
364 	}
365 
366 	xdp_clear_return_frame_no_direct();
367 	bpf_net_ctx_clear(bpf_net_ctx);
368 	local_bh_enable();
369 	return err;
370 }
371 
bpf_test_run_xdp_live(struct bpf_prog * prog,struct xdp_buff * ctx,u32 repeat,u32 batch_size,u32 * time)372 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
373 				 u32 repeat, u32 batch_size, u32 *time)
374 
375 {
376 	struct xdp_test_data xdp = { .batch_size = batch_size };
377 	struct bpf_test_timer t = { .mode = NO_MIGRATE };
378 	int ret;
379 
380 	if (!repeat)
381 		repeat = 1;
382 
383 	ret = xdp_test_run_setup(&xdp, ctx);
384 	if (ret)
385 		return ret;
386 
387 	bpf_test_timer_enter(&t);
388 	do {
389 		xdp.frame_cnt = 0;
390 		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
391 		if (unlikely(ret < 0))
392 			break;
393 	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
394 	bpf_test_timer_leave(&t);
395 
396 	xdp_test_run_teardown(&xdp);
397 	return ret;
398 }
399 
bpf_test_run(struct bpf_prog * prog,void * ctx,u32 repeat,u32 * retval,u32 * time,bool xdp)400 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
401 			u32 *retval, u32 *time, bool xdp)
402 {
403 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
404 	struct bpf_prog_array_item item = {.prog = prog};
405 	struct bpf_run_ctx *old_ctx;
406 	struct bpf_cg_run_ctx run_ctx;
407 	struct bpf_test_timer t = { NO_MIGRATE };
408 	enum bpf_cgroup_storage_type stype;
409 	int ret;
410 
411 	for_each_cgroup_storage_type(stype) {
412 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
413 		if (IS_ERR(item.cgroup_storage[stype])) {
414 			item.cgroup_storage[stype] = NULL;
415 			for_each_cgroup_storage_type(stype)
416 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
417 			return -ENOMEM;
418 		}
419 	}
420 
421 	if (!repeat)
422 		repeat = 1;
423 
424 	bpf_test_timer_enter(&t);
425 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
426 	do {
427 		run_ctx.prog_item = &item;
428 		local_bh_disable();
429 		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
430 
431 		if (xdp)
432 			*retval = bpf_prog_run_xdp(prog, ctx);
433 		else
434 			*retval = bpf_prog_run(prog, ctx);
435 
436 		bpf_net_ctx_clear(bpf_net_ctx);
437 		local_bh_enable();
438 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
439 	bpf_reset_run_ctx(old_ctx);
440 	bpf_test_timer_leave(&t);
441 
442 	for_each_cgroup_storage_type(stype)
443 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
444 
445 	return ret;
446 }
447 
bpf_test_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,struct skb_shared_info * sinfo,u32 size,u32 retval,u32 duration)448 static int bpf_test_finish(const union bpf_attr *kattr,
449 			   union bpf_attr __user *uattr, const void *data,
450 			   struct skb_shared_info *sinfo, u32 size,
451 			   u32 retval, u32 duration)
452 {
453 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
454 	int err = -EFAULT;
455 	u32 copy_size = size;
456 
457 	/* Clamp copy if the user has provided a size hint, but copy the full
458 	 * buffer if not to retain old behaviour.
459 	 */
460 	if (kattr->test.data_size_out &&
461 	    copy_size > kattr->test.data_size_out) {
462 		copy_size = kattr->test.data_size_out;
463 		err = -ENOSPC;
464 	}
465 
466 	if (data_out) {
467 		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
468 
469 		if (len < 0) {
470 			err = -ENOSPC;
471 			goto out;
472 		}
473 
474 		if (copy_to_user(data_out, data, len))
475 			goto out;
476 
477 		if (sinfo) {
478 			int i, offset = len;
479 			u32 data_len;
480 
481 			for (i = 0; i < sinfo->nr_frags; i++) {
482 				skb_frag_t *frag = &sinfo->frags[i];
483 
484 				if (offset >= copy_size) {
485 					err = -ENOSPC;
486 					break;
487 				}
488 
489 				data_len = min_t(u32, copy_size - offset,
490 						 skb_frag_size(frag));
491 
492 				if (copy_to_user(data_out + offset,
493 						 skb_frag_address(frag),
494 						 data_len))
495 					goto out;
496 
497 				offset += data_len;
498 			}
499 		}
500 	}
501 
502 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
503 		goto out;
504 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
505 		goto out;
506 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
507 		goto out;
508 	if (err != -ENOSPC)
509 		err = 0;
510 out:
511 	trace_bpf_test_finish(&err);
512 	return err;
513 }
514 
515 /* Integer types of various sizes and pointer combinations cover variety of
516  * architecture dependent calling conventions. 7+ can be supported in the
517  * future.
518  */
519 __bpf_kfunc_start_defs();
520 
bpf_fentry_test1(int a)521 __bpf_kfunc int bpf_fentry_test1(int a)
522 {
523 	return a + 1;
524 }
525 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
526 
bpf_fentry_test2(int a,u64 b)527 int noinline bpf_fentry_test2(int a, u64 b)
528 {
529 	return a + b;
530 }
531 
bpf_fentry_test3(char a,int b,u64 c)532 int noinline bpf_fentry_test3(char a, int b, u64 c)
533 {
534 	return a + b + c;
535 }
536 
bpf_fentry_test4(void * a,char b,int c,u64 d)537 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
538 {
539 	return (long)a + b + c + d;
540 }
541 
bpf_fentry_test5(u64 a,void * b,short c,int d,u64 e)542 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
543 {
544 	return a + (long)b + c + d + e;
545 }
546 
bpf_fentry_test6(u64 a,void * b,short c,int d,void * e,u64 f)547 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
548 {
549 	return a + (long)b + c + d + (long)e + f;
550 }
551 
552 struct bpf_fentry_test_t {
553 	struct bpf_fentry_test_t *a;
554 };
555 
bpf_fentry_test7(struct bpf_fentry_test_t * arg)556 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
557 {
558 	asm volatile ("": "+r"(arg));
559 	return (long)arg;
560 }
561 
bpf_fentry_test8(struct bpf_fentry_test_t * arg)562 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
563 {
564 	return (long)arg->a;
565 }
566 
bpf_fentry_test9(u32 * a)567 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
568 {
569 	return *a;
570 }
571 
bpf_fentry_test_sinfo(struct skb_shared_info * sinfo)572 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
573 {
574 }
575 
bpf_modify_return_test(int a,int * b)576 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
577 {
578 	*b += 1;
579 	return a + *b;
580 }
581 
bpf_modify_return_test2(int a,int * b,short c,int d,void * e,char f,int g)582 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
583 					void *e, char f, int g)
584 {
585 	*b += 1;
586 	return a + *b + c + d + (long)e + f + g;
587 }
588 
bpf_modify_return_test_tp(int nonce)589 __bpf_kfunc int bpf_modify_return_test_tp(int nonce)
590 {
591 	trace_bpf_trigger_tp(nonce);
592 
593 	return nonce;
594 }
595 
bpf_fentry_shadow_test(int a)596 int noinline bpf_fentry_shadow_test(int a)
597 {
598 	return a + 1;
599 }
600 
601 struct prog_test_member1 {
602 	int a;
603 };
604 
605 struct prog_test_member {
606 	struct prog_test_member1 m;
607 	int c;
608 };
609 
610 struct prog_test_ref_kfunc {
611 	int a;
612 	int b;
613 	struct prog_test_member memb;
614 	struct prog_test_ref_kfunc *next;
615 	refcount_t cnt;
616 };
617 
bpf_kfunc_call_test_release(struct prog_test_ref_kfunc * p)618 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
619 {
620 	refcount_dec(&p->cnt);
621 }
622 
bpf_kfunc_call_test_release_dtor(void * p)623 __bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
624 {
625 	bpf_kfunc_call_test_release(p);
626 }
627 CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
628 
bpf_kfunc_call_memb_release(struct prog_test_member * p)629 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
630 {
631 }
632 
bpf_kfunc_call_memb_release_dtor(void * p)633 __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
634 {
635 }
636 CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
637 
638 __bpf_kfunc_end_defs();
639 
640 BTF_KFUNCS_START(bpf_test_modify_return_ids)
641 BTF_ID_FLAGS(func, bpf_modify_return_test)
642 BTF_ID_FLAGS(func, bpf_modify_return_test2)
643 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
644 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
645 BTF_KFUNCS_END(bpf_test_modify_return_ids)
646 
647 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
648 	.owner = THIS_MODULE,
649 	.set   = &bpf_test_modify_return_ids,
650 };
651 
652 BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_kfunc_call_test_release,KF_RELEASE)653 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
654 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
655 BTF_KFUNCS_END(test_sk_check_kfunc_ids)
656 
657 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
658 			   u32 size, u32 headroom, u32 tailroom)
659 {
660 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
661 	void *data;
662 
663 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
664 		return ERR_PTR(-EINVAL);
665 
666 	if (user_size > size)
667 		return ERR_PTR(-EMSGSIZE);
668 
669 	size = SKB_DATA_ALIGN(size);
670 	data = kzalloc(size + headroom + tailroom, GFP_USER);
671 	if (!data)
672 		return ERR_PTR(-ENOMEM);
673 
674 	if (copy_from_user(data + headroom, data_in, user_size)) {
675 		kfree(data);
676 		return ERR_PTR(-EFAULT);
677 	}
678 
679 	return data;
680 }
681 
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)682 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
683 			      const union bpf_attr *kattr,
684 			      union bpf_attr __user *uattr)
685 {
686 	struct bpf_fentry_test_t arg = {};
687 	u16 side_effect = 0, ret = 0;
688 	int b = 2, err = -EFAULT;
689 	u32 retval = 0;
690 
691 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
692 		return -EINVAL;
693 
694 	switch (prog->expected_attach_type) {
695 	case BPF_TRACE_FENTRY:
696 	case BPF_TRACE_FEXIT:
697 		if (bpf_fentry_test1(1) != 2 ||
698 		    bpf_fentry_test2(2, 3) != 5 ||
699 		    bpf_fentry_test3(4, 5, 6) != 15 ||
700 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
701 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
702 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
703 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
704 		    bpf_fentry_test8(&arg) != 0 ||
705 		    bpf_fentry_test9(&retval) != 0)
706 			goto out;
707 		break;
708 	case BPF_MODIFY_RETURN:
709 		ret = bpf_modify_return_test(1, &b);
710 		if (b != 2)
711 			side_effect++;
712 		b = 2;
713 		ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
714 		if (b != 2)
715 			side_effect++;
716 		break;
717 	default:
718 		goto out;
719 	}
720 
721 	retval = ((u32)side_effect << 16) | ret;
722 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
723 		goto out;
724 
725 	err = 0;
726 out:
727 	trace_bpf_test_finish(&err);
728 	return err;
729 }
730 
731 struct bpf_raw_tp_test_run_info {
732 	struct bpf_prog *prog;
733 	void *ctx;
734 	u32 retval;
735 };
736 
737 static void
__bpf_prog_test_run_raw_tp(void * data)738 __bpf_prog_test_run_raw_tp(void *data)
739 {
740 	struct bpf_raw_tp_test_run_info *info = data;
741 	struct bpf_trace_run_ctx run_ctx = {};
742 	struct bpf_run_ctx *old_run_ctx;
743 
744 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
745 
746 	rcu_read_lock();
747 	info->retval = bpf_prog_run(info->prog, info->ctx);
748 	rcu_read_unlock();
749 
750 	bpf_reset_run_ctx(old_run_ctx);
751 }
752 
bpf_prog_test_run_raw_tp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)753 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
754 			     const union bpf_attr *kattr,
755 			     union bpf_attr __user *uattr)
756 {
757 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
758 	__u32 ctx_size_in = kattr->test.ctx_size_in;
759 	struct bpf_raw_tp_test_run_info info;
760 	int cpu = kattr->test.cpu, err = 0;
761 	int current_cpu;
762 
763 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
764 	if (kattr->test.data_in || kattr->test.data_out ||
765 	    kattr->test.ctx_out || kattr->test.duration ||
766 	    kattr->test.repeat || kattr->test.batch_size)
767 		return -EINVAL;
768 
769 	if (ctx_size_in < prog->aux->max_ctx_offset ||
770 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
771 		return -EINVAL;
772 
773 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
774 		return -EINVAL;
775 
776 	if (ctx_size_in) {
777 		info.ctx = memdup_user(ctx_in, ctx_size_in);
778 		if (IS_ERR(info.ctx))
779 			return PTR_ERR(info.ctx);
780 	} else {
781 		info.ctx = NULL;
782 	}
783 
784 	info.prog = prog;
785 
786 	current_cpu = get_cpu();
787 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
788 	    cpu == current_cpu) {
789 		__bpf_prog_test_run_raw_tp(&info);
790 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
791 		/* smp_call_function_single() also checks cpu_online()
792 		 * after csd_lock(). However, since cpu is from user
793 		 * space, let's do an extra quick check to filter out
794 		 * invalid value before smp_call_function_single().
795 		 */
796 		err = -ENXIO;
797 	} else {
798 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
799 					       &info, 1);
800 	}
801 	put_cpu();
802 
803 	if (!err &&
804 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
805 		err = -EFAULT;
806 
807 	kfree(info.ctx);
808 	return err;
809 }
810 
bpf_ctx_init(const union bpf_attr * kattr,u32 max_size)811 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
812 {
813 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
814 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
815 	u32 size = kattr->test.ctx_size_in;
816 	void *data;
817 	int err;
818 
819 	if (!data_in && !data_out)
820 		return NULL;
821 
822 	data = kzalloc(max_size, GFP_USER);
823 	if (!data)
824 		return ERR_PTR(-ENOMEM);
825 
826 	if (data_in) {
827 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
828 		if (err) {
829 			kfree(data);
830 			return ERR_PTR(err);
831 		}
832 
833 		size = min_t(u32, max_size, size);
834 		if (copy_from_user(data, data_in, size)) {
835 			kfree(data);
836 			return ERR_PTR(-EFAULT);
837 		}
838 	}
839 	return data;
840 }
841 
bpf_ctx_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,u32 size)842 static int bpf_ctx_finish(const union bpf_attr *kattr,
843 			  union bpf_attr __user *uattr, const void *data,
844 			  u32 size)
845 {
846 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
847 	int err = -EFAULT;
848 	u32 copy_size = size;
849 
850 	if (!data || !data_out)
851 		return 0;
852 
853 	if (copy_size > kattr->test.ctx_size_out) {
854 		copy_size = kattr->test.ctx_size_out;
855 		err = -ENOSPC;
856 	}
857 
858 	if (copy_to_user(data_out, data, copy_size))
859 		goto out;
860 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
861 		goto out;
862 	if (err != -ENOSPC)
863 		err = 0;
864 out:
865 	return err;
866 }
867 
868 /**
869  * range_is_zero - test whether buffer is initialized
870  * @buf: buffer to check
871  * @from: check from this position
872  * @to: check up until (excluding) this position
873  *
874  * This function returns true if the there is a non-zero byte
875  * in the buf in the range [from,to).
876  */
range_is_zero(void * buf,size_t from,size_t to)877 static inline bool range_is_zero(void *buf, size_t from, size_t to)
878 {
879 	return !memchr_inv((u8 *)buf + from, 0, to - from);
880 }
881 
convert___skb_to_skb(struct sk_buff * skb,struct __sk_buff * __skb)882 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
883 {
884 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
885 
886 	if (!__skb)
887 		return 0;
888 
889 	/* make sure the fields we don't use are zeroed */
890 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
891 		return -EINVAL;
892 
893 	/* mark is allowed */
894 
895 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
896 			   offsetof(struct __sk_buff, priority)))
897 		return -EINVAL;
898 
899 	/* priority is allowed */
900 	/* ingress_ifindex is allowed */
901 	/* ifindex is allowed */
902 
903 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
904 			   offsetof(struct __sk_buff, cb)))
905 		return -EINVAL;
906 
907 	/* cb is allowed */
908 
909 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
910 			   offsetof(struct __sk_buff, tstamp)))
911 		return -EINVAL;
912 
913 	/* tstamp is allowed */
914 	/* wire_len is allowed */
915 	/* gso_segs is allowed */
916 
917 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
918 			   offsetof(struct __sk_buff, gso_size)))
919 		return -EINVAL;
920 
921 	/* gso_size is allowed */
922 
923 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
924 			   offsetof(struct __sk_buff, hwtstamp)))
925 		return -EINVAL;
926 
927 	/* hwtstamp is allowed */
928 
929 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
930 			   sizeof(struct __sk_buff)))
931 		return -EINVAL;
932 
933 	skb->mark = __skb->mark;
934 	skb->priority = __skb->priority;
935 	skb->skb_iif = __skb->ingress_ifindex;
936 	skb->tstamp = __skb->tstamp;
937 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
938 
939 	if (__skb->wire_len == 0) {
940 		cb->pkt_len = skb->len;
941 	} else {
942 		if (__skb->wire_len < skb->len ||
943 		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
944 			return -EINVAL;
945 		cb->pkt_len = __skb->wire_len;
946 	}
947 
948 	if (__skb->gso_segs > GSO_MAX_SEGS)
949 		return -EINVAL;
950 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
951 	skb_shinfo(skb)->gso_size = __skb->gso_size;
952 	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
953 
954 	return 0;
955 }
956 
convert_skb_to___skb(struct sk_buff * skb,struct __sk_buff * __skb)957 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
958 {
959 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
960 
961 	if (!__skb)
962 		return;
963 
964 	__skb->mark = skb->mark;
965 	__skb->priority = skb->priority;
966 	__skb->ingress_ifindex = skb->skb_iif;
967 	__skb->ifindex = skb->dev->ifindex;
968 	__skb->tstamp = skb->tstamp;
969 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
970 	__skb->wire_len = cb->pkt_len;
971 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
972 	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
973 }
974 
975 static struct proto bpf_dummy_proto = {
976 	.name   = "bpf_dummy",
977 	.owner  = THIS_MODULE,
978 	.obj_size = sizeof(struct sock),
979 };
980 
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)981 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
982 			  union bpf_attr __user *uattr)
983 {
984 	bool is_l2 = false, is_direct_pkt_access = false;
985 	struct net *net = current->nsproxy->net_ns;
986 	struct net_device *dev = net->loopback_dev;
987 	u32 size = kattr->test.data_size_in;
988 	u32 repeat = kattr->test.repeat;
989 	struct __sk_buff *ctx = NULL;
990 	u32 retval, duration;
991 	int hh_len = ETH_HLEN;
992 	struct sk_buff *skb;
993 	struct sock *sk;
994 	void *data;
995 	int ret;
996 
997 	if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
998 	    kattr->test.cpu || kattr->test.batch_size)
999 		return -EINVAL;
1000 
1001 	data = bpf_test_init(kattr, kattr->test.data_size_in,
1002 			     size, NET_SKB_PAD + NET_IP_ALIGN,
1003 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1004 	if (IS_ERR(data))
1005 		return PTR_ERR(data);
1006 
1007 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1008 	if (IS_ERR(ctx)) {
1009 		kfree(data);
1010 		return PTR_ERR(ctx);
1011 	}
1012 
1013 	switch (prog->type) {
1014 	case BPF_PROG_TYPE_SCHED_CLS:
1015 	case BPF_PROG_TYPE_SCHED_ACT:
1016 		is_l2 = true;
1017 		fallthrough;
1018 	case BPF_PROG_TYPE_LWT_IN:
1019 	case BPF_PROG_TYPE_LWT_OUT:
1020 	case BPF_PROG_TYPE_LWT_XMIT:
1021 		is_direct_pkt_access = true;
1022 		break;
1023 	default:
1024 		break;
1025 	}
1026 
1027 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1028 	if (!sk) {
1029 		kfree(data);
1030 		kfree(ctx);
1031 		return -ENOMEM;
1032 	}
1033 	sock_init_data(NULL, sk);
1034 
1035 	skb = slab_build_skb(data);
1036 	if (!skb) {
1037 		kfree(data);
1038 		kfree(ctx);
1039 		sk_free(sk);
1040 		return -ENOMEM;
1041 	}
1042 	skb->sk = sk;
1043 
1044 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1045 	__skb_put(skb, size);
1046 
1047 	if (ctx && ctx->ifindex > 1) {
1048 		dev = dev_get_by_index(net, ctx->ifindex);
1049 		if (!dev) {
1050 			ret = -ENODEV;
1051 			goto out;
1052 		}
1053 	}
1054 	skb->protocol = eth_type_trans(skb, dev);
1055 	skb_reset_network_header(skb);
1056 
1057 	switch (skb->protocol) {
1058 	case htons(ETH_P_IP):
1059 		sk->sk_family = AF_INET;
1060 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1061 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1062 			sk->sk_daddr = ip_hdr(skb)->daddr;
1063 		}
1064 		break;
1065 #if IS_ENABLED(CONFIG_IPV6)
1066 	case htons(ETH_P_IPV6):
1067 		sk->sk_family = AF_INET6;
1068 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1069 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1070 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1071 		}
1072 		break;
1073 #endif
1074 	default:
1075 		break;
1076 	}
1077 
1078 	if (is_l2)
1079 		__skb_push(skb, hh_len);
1080 	if (is_direct_pkt_access)
1081 		bpf_compute_data_pointers(skb);
1082 
1083 	ret = convert___skb_to_skb(skb, ctx);
1084 	if (ret)
1085 		goto out;
1086 
1087 	if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1088 		const int off = skb_network_offset(skb);
1089 		int len = skb->len - off;
1090 
1091 		skb->csum = skb_checksum(skb, off, len, 0);
1092 		skb->ip_summed = CHECKSUM_COMPLETE;
1093 	}
1094 
1095 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1096 	if (ret)
1097 		goto out;
1098 	if (!is_l2) {
1099 		if (skb_headroom(skb) < hh_len) {
1100 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1101 
1102 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1103 				ret = -ENOMEM;
1104 				goto out;
1105 			}
1106 		}
1107 		memset(__skb_push(skb, hh_len), 0, hh_len);
1108 	}
1109 
1110 	if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1111 		const int off = skb_network_offset(skb);
1112 		int len = skb->len - off;
1113 		__wsum csum;
1114 
1115 		csum = skb_checksum(skb, off, len, 0);
1116 
1117 		if (csum_fold(skb->csum) != csum_fold(csum)) {
1118 			ret = -EBADMSG;
1119 			goto out;
1120 		}
1121 	}
1122 
1123 	convert_skb_to___skb(skb, ctx);
1124 
1125 	size = skb->len;
1126 	/* bpf program can never convert linear skb to non-linear */
1127 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1128 		size = skb_headlen(skb);
1129 	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1130 			      duration);
1131 	if (!ret)
1132 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1133 				     sizeof(struct __sk_buff));
1134 out:
1135 	if (dev && dev != net->loopback_dev)
1136 		dev_put(dev);
1137 	kfree_skb(skb);
1138 	sk_free(sk);
1139 	kfree(ctx);
1140 	return ret;
1141 }
1142 
xdp_convert_md_to_buff(struct xdp_md * xdp_md,struct xdp_buff * xdp)1143 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1144 {
1145 	unsigned int ingress_ifindex, rx_queue_index;
1146 	struct netdev_rx_queue *rxqueue;
1147 	struct net_device *device;
1148 
1149 	if (!xdp_md)
1150 		return 0;
1151 
1152 	if (xdp_md->egress_ifindex != 0)
1153 		return -EINVAL;
1154 
1155 	ingress_ifindex = xdp_md->ingress_ifindex;
1156 	rx_queue_index = xdp_md->rx_queue_index;
1157 
1158 	if (!ingress_ifindex && rx_queue_index)
1159 		return -EINVAL;
1160 
1161 	if (ingress_ifindex) {
1162 		device = dev_get_by_index(current->nsproxy->net_ns,
1163 					  ingress_ifindex);
1164 		if (!device)
1165 			return -ENODEV;
1166 
1167 		if (rx_queue_index >= device->real_num_rx_queues)
1168 			goto free_dev;
1169 
1170 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1171 
1172 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1173 			goto free_dev;
1174 
1175 		xdp->rxq = &rxqueue->xdp_rxq;
1176 		/* The device is now tracked in the xdp->rxq for later
1177 		 * dev_put()
1178 		 */
1179 	}
1180 
1181 	xdp->data = xdp->data_meta + xdp_md->data;
1182 	return 0;
1183 
1184 free_dev:
1185 	dev_put(device);
1186 	return -EINVAL;
1187 }
1188 
xdp_convert_buff_to_md(struct xdp_buff * xdp,struct xdp_md * xdp_md)1189 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1190 {
1191 	if (!xdp_md)
1192 		return;
1193 
1194 	xdp_md->data = xdp->data - xdp->data_meta;
1195 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
1196 
1197 	if (xdp_md->ingress_ifindex)
1198 		dev_put(xdp->rxq->dev);
1199 }
1200 
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1201 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1202 			  union bpf_attr __user *uattr)
1203 {
1204 	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1205 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1206 	u32 batch_size = kattr->test.batch_size;
1207 	u32 retval = 0, duration, max_data_sz;
1208 	u32 size = kattr->test.data_size_in;
1209 	u32 headroom = XDP_PACKET_HEADROOM;
1210 	u32 repeat = kattr->test.repeat;
1211 	struct netdev_rx_queue *rxqueue;
1212 	struct skb_shared_info *sinfo;
1213 	struct xdp_buff xdp = {};
1214 	int i, ret = -EINVAL;
1215 	struct xdp_md *ctx;
1216 	void *data;
1217 
1218 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1219 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
1220 		return -EINVAL;
1221 
1222 	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1223 		return -EINVAL;
1224 
1225 	if (bpf_prog_is_dev_bound(prog->aux))
1226 		return -EINVAL;
1227 
1228 	if (do_live) {
1229 		if (!batch_size)
1230 			batch_size = NAPI_POLL_WEIGHT;
1231 		else if (batch_size > TEST_XDP_MAX_BATCH)
1232 			return -E2BIG;
1233 
1234 		headroom += sizeof(struct xdp_page_head);
1235 	} else if (batch_size) {
1236 		return -EINVAL;
1237 	}
1238 
1239 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1240 	if (IS_ERR(ctx))
1241 		return PTR_ERR(ctx);
1242 
1243 	if (ctx) {
1244 		/* There can't be user provided data before the meta data */
1245 		if (ctx->data_meta || ctx->data_end != size ||
1246 		    ctx->data > ctx->data_end ||
1247 		    unlikely(xdp_metalen_invalid(ctx->data)) ||
1248 		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1249 			goto free_ctx;
1250 		/* Meta data is allocated from the headroom */
1251 		headroom -= ctx->data;
1252 	}
1253 
1254 	max_data_sz = 4096 - headroom - tailroom;
1255 	if (size > max_data_sz) {
1256 		/* disallow live data mode for jumbo frames */
1257 		if (do_live)
1258 			goto free_ctx;
1259 		size = max_data_sz;
1260 	}
1261 
1262 	data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1263 	if (IS_ERR(data)) {
1264 		ret = PTR_ERR(data);
1265 		goto free_ctx;
1266 	}
1267 
1268 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1269 	rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1270 	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1271 	xdp_prepare_buff(&xdp, data, headroom, size, true);
1272 	sinfo = xdp_get_shared_info_from_buff(&xdp);
1273 
1274 	ret = xdp_convert_md_to_buff(ctx, &xdp);
1275 	if (ret)
1276 		goto free_data;
1277 
1278 	if (unlikely(kattr->test.data_size_in > size)) {
1279 		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1280 
1281 		while (size < kattr->test.data_size_in) {
1282 			struct page *page;
1283 			skb_frag_t *frag;
1284 			u32 data_len;
1285 
1286 			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1287 				ret = -ENOMEM;
1288 				goto out;
1289 			}
1290 
1291 			page = alloc_page(GFP_KERNEL);
1292 			if (!page) {
1293 				ret = -ENOMEM;
1294 				goto out;
1295 			}
1296 
1297 			frag = &sinfo->frags[sinfo->nr_frags++];
1298 
1299 			data_len = min_t(u32, kattr->test.data_size_in - size,
1300 					 PAGE_SIZE);
1301 			skb_frag_fill_page_desc(frag, page, 0, data_len);
1302 
1303 			if (copy_from_user(page_address(page), data_in + size,
1304 					   data_len)) {
1305 				ret = -EFAULT;
1306 				goto out;
1307 			}
1308 			sinfo->xdp_frags_size += data_len;
1309 			size += data_len;
1310 		}
1311 		xdp_buff_set_frags_flag(&xdp);
1312 	}
1313 
1314 	if (repeat > 1)
1315 		bpf_prog_change_xdp(NULL, prog);
1316 
1317 	if (do_live)
1318 		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1319 	else
1320 		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1321 	/* We convert the xdp_buff back to an xdp_md before checking the return
1322 	 * code so the reference count of any held netdevice will be decremented
1323 	 * even if the test run failed.
1324 	 */
1325 	xdp_convert_buff_to_md(&xdp, ctx);
1326 	if (ret)
1327 		goto out;
1328 
1329 	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1330 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1331 			      retval, duration);
1332 	if (!ret)
1333 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1334 				     sizeof(struct xdp_md));
1335 
1336 out:
1337 	if (repeat > 1)
1338 		bpf_prog_change_xdp(prog, NULL);
1339 free_data:
1340 	for (i = 0; i < sinfo->nr_frags; i++)
1341 		__free_page(skb_frag_page(&sinfo->frags[i]));
1342 	kfree(data);
1343 free_ctx:
1344 	kfree(ctx);
1345 	return ret;
1346 }
1347 
verify_user_bpf_flow_keys(struct bpf_flow_keys * ctx)1348 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1349 {
1350 	/* make sure the fields we don't use are zeroed */
1351 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1352 		return -EINVAL;
1353 
1354 	/* flags is allowed */
1355 
1356 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1357 			   sizeof(struct bpf_flow_keys)))
1358 		return -EINVAL;
1359 
1360 	return 0;
1361 }
1362 
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1363 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1364 				     const union bpf_attr *kattr,
1365 				     union bpf_attr __user *uattr)
1366 {
1367 	struct bpf_test_timer t = { NO_PREEMPT };
1368 	u32 size = kattr->test.data_size_in;
1369 	struct bpf_flow_dissector ctx = {};
1370 	u32 repeat = kattr->test.repeat;
1371 	struct bpf_flow_keys *user_ctx;
1372 	struct bpf_flow_keys flow_keys;
1373 	const struct ethhdr *eth;
1374 	unsigned int flags = 0;
1375 	u32 retval, duration;
1376 	void *data;
1377 	int ret;
1378 
1379 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1380 		return -EINVAL;
1381 
1382 	if (size < ETH_HLEN)
1383 		return -EINVAL;
1384 
1385 	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1386 	if (IS_ERR(data))
1387 		return PTR_ERR(data);
1388 
1389 	eth = (struct ethhdr *)data;
1390 
1391 	if (!repeat)
1392 		repeat = 1;
1393 
1394 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1395 	if (IS_ERR(user_ctx)) {
1396 		kfree(data);
1397 		return PTR_ERR(user_ctx);
1398 	}
1399 	if (user_ctx) {
1400 		ret = verify_user_bpf_flow_keys(user_ctx);
1401 		if (ret)
1402 			goto out;
1403 		flags = user_ctx->flags;
1404 	}
1405 
1406 	ctx.flow_keys = &flow_keys;
1407 	ctx.data = data;
1408 	ctx.data_end = (__u8 *)data + size;
1409 
1410 	bpf_test_timer_enter(&t);
1411 	do {
1412 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1413 					  size, flags);
1414 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1415 	bpf_test_timer_leave(&t);
1416 
1417 	if (ret < 0)
1418 		goto out;
1419 
1420 	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1421 			      sizeof(flow_keys), retval, duration);
1422 	if (!ret)
1423 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1424 				     sizeof(struct bpf_flow_keys));
1425 
1426 out:
1427 	kfree(user_ctx);
1428 	kfree(data);
1429 	return ret;
1430 }
1431 
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1432 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1433 				union bpf_attr __user *uattr)
1434 {
1435 	struct bpf_test_timer t = { NO_PREEMPT };
1436 	struct bpf_prog_array *progs = NULL;
1437 	struct bpf_sk_lookup_kern ctx = {};
1438 	u32 repeat = kattr->test.repeat;
1439 	struct bpf_sk_lookup *user_ctx;
1440 	u32 retval, duration;
1441 	int ret = -EINVAL;
1442 
1443 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1444 		return -EINVAL;
1445 
1446 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1447 	    kattr->test.data_size_out)
1448 		return -EINVAL;
1449 
1450 	if (!repeat)
1451 		repeat = 1;
1452 
1453 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1454 	if (IS_ERR(user_ctx))
1455 		return PTR_ERR(user_ctx);
1456 
1457 	if (!user_ctx)
1458 		return -EINVAL;
1459 
1460 	if (user_ctx->sk)
1461 		goto out;
1462 
1463 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1464 		goto out;
1465 
1466 	if (user_ctx->local_port > U16_MAX) {
1467 		ret = -ERANGE;
1468 		goto out;
1469 	}
1470 
1471 	ctx.family = (u16)user_ctx->family;
1472 	ctx.protocol = (u16)user_ctx->protocol;
1473 	ctx.dport = (u16)user_ctx->local_port;
1474 	ctx.sport = user_ctx->remote_port;
1475 
1476 	switch (ctx.family) {
1477 	case AF_INET:
1478 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1479 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1480 		break;
1481 
1482 #if IS_ENABLED(CONFIG_IPV6)
1483 	case AF_INET6:
1484 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1485 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1486 		break;
1487 #endif
1488 
1489 	default:
1490 		ret = -EAFNOSUPPORT;
1491 		goto out;
1492 	}
1493 
1494 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1495 	if (!progs) {
1496 		ret = -ENOMEM;
1497 		goto out;
1498 	}
1499 
1500 	progs->items[0].prog = prog;
1501 
1502 	bpf_test_timer_enter(&t);
1503 	do {
1504 		ctx.selected_sk = NULL;
1505 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1506 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1507 	bpf_test_timer_leave(&t);
1508 
1509 	if (ret < 0)
1510 		goto out;
1511 
1512 	user_ctx->cookie = 0;
1513 	if (ctx.selected_sk) {
1514 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1515 			ret = -EOPNOTSUPP;
1516 			goto out;
1517 		}
1518 
1519 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1520 	}
1521 
1522 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1523 	if (!ret)
1524 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1525 
1526 out:
1527 	bpf_prog_array_free(progs);
1528 	kfree(user_ctx);
1529 	return ret;
1530 }
1531 
bpf_prog_test_run_syscall(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1532 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1533 			      const union bpf_attr *kattr,
1534 			      union bpf_attr __user *uattr)
1535 {
1536 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1537 	__u32 ctx_size_in = kattr->test.ctx_size_in;
1538 	void *ctx = NULL;
1539 	u32 retval;
1540 	int err = 0;
1541 
1542 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1543 	if (kattr->test.data_in || kattr->test.data_out ||
1544 	    kattr->test.ctx_out || kattr->test.duration ||
1545 	    kattr->test.repeat || kattr->test.flags ||
1546 	    kattr->test.batch_size)
1547 		return -EINVAL;
1548 
1549 	if (ctx_size_in < prog->aux->max_ctx_offset ||
1550 	    ctx_size_in > U16_MAX)
1551 		return -EINVAL;
1552 
1553 	if (ctx_size_in) {
1554 		ctx = memdup_user(ctx_in, ctx_size_in);
1555 		if (IS_ERR(ctx))
1556 			return PTR_ERR(ctx);
1557 	}
1558 
1559 	rcu_read_lock_trace();
1560 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1561 	rcu_read_unlock_trace();
1562 
1563 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1564 		err = -EFAULT;
1565 		goto out;
1566 	}
1567 	if (ctx_size_in)
1568 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1569 			err = -EFAULT;
1570 out:
1571 	kfree(ctx);
1572 	return err;
1573 }
1574 
verify_and_copy_hook_state(struct nf_hook_state * state,const struct nf_hook_state * user,struct net_device * dev)1575 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1576 				      const struct nf_hook_state *user,
1577 				      struct net_device *dev)
1578 {
1579 	if (user->in || user->out)
1580 		return -EINVAL;
1581 
1582 	if (user->net || user->sk || user->okfn)
1583 		return -EINVAL;
1584 
1585 	switch (user->pf) {
1586 	case NFPROTO_IPV4:
1587 	case NFPROTO_IPV6:
1588 		switch (state->hook) {
1589 		case NF_INET_PRE_ROUTING:
1590 			state->in = dev;
1591 			break;
1592 		case NF_INET_LOCAL_IN:
1593 			state->in = dev;
1594 			break;
1595 		case NF_INET_FORWARD:
1596 			state->in = dev;
1597 			state->out = dev;
1598 			break;
1599 		case NF_INET_LOCAL_OUT:
1600 			state->out = dev;
1601 			break;
1602 		case NF_INET_POST_ROUTING:
1603 			state->out = dev;
1604 			break;
1605 		}
1606 
1607 		break;
1608 	default:
1609 		return -EINVAL;
1610 	}
1611 
1612 	state->pf = user->pf;
1613 	state->hook = user->hook;
1614 
1615 	return 0;
1616 }
1617 
nfproto_eth(int nfproto)1618 static __be16 nfproto_eth(int nfproto)
1619 {
1620 	switch (nfproto) {
1621 	case NFPROTO_IPV4:
1622 		return htons(ETH_P_IP);
1623 	case NFPROTO_IPV6:
1624 		break;
1625 	}
1626 
1627 	return htons(ETH_P_IPV6);
1628 }
1629 
bpf_prog_test_run_nf(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1630 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1631 			 const union bpf_attr *kattr,
1632 			 union bpf_attr __user *uattr)
1633 {
1634 	struct net *net = current->nsproxy->net_ns;
1635 	struct net_device *dev = net->loopback_dev;
1636 	struct nf_hook_state *user_ctx, hook_state = {
1637 		.pf = NFPROTO_IPV4,
1638 		.hook = NF_INET_LOCAL_OUT,
1639 	};
1640 	u32 size = kattr->test.data_size_in;
1641 	u32 repeat = kattr->test.repeat;
1642 	struct bpf_nf_ctx ctx = {
1643 		.state = &hook_state,
1644 	};
1645 	struct sk_buff *skb = NULL;
1646 	u32 retval, duration;
1647 	void *data;
1648 	int ret;
1649 
1650 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1651 		return -EINVAL;
1652 
1653 	if (size < sizeof(struct iphdr))
1654 		return -EINVAL;
1655 
1656 	data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1657 			     NET_SKB_PAD + NET_IP_ALIGN,
1658 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1659 	if (IS_ERR(data))
1660 		return PTR_ERR(data);
1661 
1662 	if (!repeat)
1663 		repeat = 1;
1664 
1665 	user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1666 	if (IS_ERR(user_ctx)) {
1667 		kfree(data);
1668 		return PTR_ERR(user_ctx);
1669 	}
1670 
1671 	if (user_ctx) {
1672 		ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1673 		if (ret)
1674 			goto out;
1675 	}
1676 
1677 	skb = slab_build_skb(data);
1678 	if (!skb) {
1679 		ret = -ENOMEM;
1680 		goto out;
1681 	}
1682 
1683 	data = NULL; /* data released via kfree_skb */
1684 
1685 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1686 	__skb_put(skb, size);
1687 
1688 	ret = -EINVAL;
1689 
1690 	if (hook_state.hook != NF_INET_LOCAL_OUT) {
1691 		if (size < ETH_HLEN + sizeof(struct iphdr))
1692 			goto out;
1693 
1694 		skb->protocol = eth_type_trans(skb, dev);
1695 		switch (skb->protocol) {
1696 		case htons(ETH_P_IP):
1697 			if (hook_state.pf == NFPROTO_IPV4)
1698 				break;
1699 			goto out;
1700 		case htons(ETH_P_IPV6):
1701 			if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1702 				goto out;
1703 			if (hook_state.pf == NFPROTO_IPV6)
1704 				break;
1705 			goto out;
1706 		default:
1707 			ret = -EPROTO;
1708 			goto out;
1709 		}
1710 
1711 		skb_reset_network_header(skb);
1712 	} else {
1713 		skb->protocol = nfproto_eth(hook_state.pf);
1714 	}
1715 
1716 	ctx.skb = skb;
1717 
1718 	ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1719 	if (ret)
1720 		goto out;
1721 
1722 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1723 
1724 out:
1725 	kfree(user_ctx);
1726 	kfree_skb(skb);
1727 	kfree(data);
1728 	return ret;
1729 }
1730 
1731 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1732 	.owner = THIS_MODULE,
1733 	.set   = &test_sk_check_kfunc_ids,
1734 };
1735 
1736 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
BTF_ID(struct,prog_test_ref_kfunc)1737 BTF_ID(struct, prog_test_ref_kfunc)
1738 BTF_ID(func, bpf_kfunc_call_test_release_dtor)
1739 BTF_ID(struct, prog_test_member)
1740 BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
1741 
1742 static int __init bpf_prog_test_run_init(void)
1743 {
1744 	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1745 		{
1746 		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1747 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1748 		},
1749 		{
1750 		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
1751 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1752 		},
1753 	};
1754 	int ret;
1755 
1756 	ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1757 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1758 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1759 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1760 	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1761 						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1762 						  THIS_MODULE);
1763 }
1764 late_initcall(bpf_prog_test_run_init);
1765