xref: /linux/net/bpf/test_run.c (revision 5e3fee34f626a8cb8715f5b5409416c481714ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/hotdata.h>
16 #include <net/sock.h>
17 #include <net/tcp.h>
18 #include <net/net_namespace.h>
19 #include <net/page_pool/helpers.h>
20 #include <linux/error-injection.h>
21 #include <linux/smp.h>
22 #include <linux/sock_diag.h>
23 #include <linux/netfilter.h>
24 #include <net/netdev_rx_queue.h>
25 #include <net/xdp.h>
26 #include <net/netfilter/nf_bpf_link.h>
27 
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/bpf_test_run.h>
30 
31 struct bpf_test_timer {
32 	enum { NO_PREEMPT, NO_MIGRATE } mode;
33 	u32 i;
34 	u64 time_start, time_spent;
35 };
36 
37 static void bpf_test_timer_enter(struct bpf_test_timer *t)
38 	__acquires(rcu)
39 {
40 	rcu_read_lock();
41 	if (t->mode == NO_PREEMPT)
42 		preempt_disable();
43 	else
44 		migrate_disable();
45 
46 	t->time_start = ktime_get_ns();
47 }
48 
49 static void bpf_test_timer_leave(struct bpf_test_timer *t)
50 	__releases(rcu)
51 {
52 	t->time_start = 0;
53 
54 	if (t->mode == NO_PREEMPT)
55 		preempt_enable();
56 	else
57 		migrate_enable();
58 	rcu_read_unlock();
59 }
60 
61 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
62 				    u32 repeat, int *err, u32 *duration)
63 	__must_hold(rcu)
64 {
65 	t->i += iterations;
66 	if (t->i >= repeat) {
67 		/* We're done. */
68 		t->time_spent += ktime_get_ns() - t->time_start;
69 		do_div(t->time_spent, t->i);
70 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
71 		*err = 0;
72 		goto reset;
73 	}
74 
75 	if (signal_pending(current)) {
76 		/* During iteration: we've been cancelled, abort. */
77 		*err = -EINTR;
78 		goto reset;
79 	}
80 
81 	if (need_resched()) {
82 		/* During iteration: we need to reschedule between runs. */
83 		t->time_spent += ktime_get_ns() - t->time_start;
84 		bpf_test_timer_leave(t);
85 		cond_resched();
86 		bpf_test_timer_enter(t);
87 	}
88 
89 	/* Do another round. */
90 	return true;
91 
92 reset:
93 	t->i = 0;
94 	return false;
95 }
96 
97 /* We put this struct at the head of each page with a context and frame
98  * initialised when the page is allocated, so we don't have to do this on each
99  * repetition of the test run.
100  */
101 struct xdp_page_head {
102 	struct xdp_buff orig_ctx;
103 	struct xdp_buff ctx;
104 	union {
105 		/* ::data_hard_start starts here */
106 		DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
107 		DECLARE_FLEX_ARRAY(u8, data);
108 	};
109 };
110 
111 struct xdp_test_data {
112 	struct xdp_buff *orig_ctx;
113 	struct xdp_rxq_info rxq;
114 	struct net_device *dev;
115 	struct page_pool *pp;
116 	struct xdp_frame **frames;
117 	struct sk_buff **skbs;
118 	struct xdp_mem_info mem;
119 	u32 batch_size;
120 	u32 frame_cnt;
121 };
122 
123 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
124  * must be updated accordingly this gets changed, otherwise BPF selftests
125  * will fail.
126  */
127 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128 #define TEST_XDP_MAX_BATCH 256
129 
130 static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
131 {
132 	struct xdp_page_head *head =
133 		phys_to_virt(page_to_phys(netmem_to_page(netmem)));
134 	struct xdp_buff *new_ctx, *orig_ctx;
135 	u32 headroom = XDP_PACKET_HEADROOM;
136 	struct xdp_test_data *xdp = arg;
137 	size_t frm_len, meta_len;
138 	struct xdp_frame *frm;
139 	void *data;
140 
141 	orig_ctx = xdp->orig_ctx;
142 	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
143 	meta_len = orig_ctx->data - orig_ctx->data_meta;
144 	headroom -= meta_len;
145 
146 	new_ctx = &head->ctx;
147 	frm = head->frame;
148 	data = head->data;
149 	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
150 
151 	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
152 	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
153 	new_ctx->data = new_ctx->data_meta + meta_len;
154 
155 	xdp_update_frame_from_buff(new_ctx, frm);
156 	frm->mem_type = new_ctx->rxq->mem.type;
157 
158 	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
159 }
160 
161 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
162 {
163 	struct page_pool *pp;
164 	int err = -ENOMEM;
165 	struct page_pool_params pp_params = {
166 		.order = 0,
167 		.flags = 0,
168 		.pool_size = xdp->batch_size,
169 		.nid = NUMA_NO_NODE,
170 		.init_callback = xdp_test_run_init_page,
171 		.init_arg = xdp,
172 	};
173 
174 	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
175 	if (!xdp->frames)
176 		return -ENOMEM;
177 
178 	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
179 	if (!xdp->skbs)
180 		goto err_skbs;
181 
182 	pp = page_pool_create(&pp_params);
183 	if (IS_ERR(pp)) {
184 		err = PTR_ERR(pp);
185 		goto err_pp;
186 	}
187 
188 	/* will copy 'mem.id' into pp->xdp_mem_id */
189 	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
190 	if (err)
191 		goto err_mmodel;
192 
193 	xdp->pp = pp;
194 
195 	/* We create a 'fake' RXQ referencing the original dev, but with an
196 	 * xdp_mem_info pointing to our page_pool
197 	 */
198 	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
199 	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
200 	xdp->rxq.mem.id = pp->xdp_mem_id;
201 	xdp->dev = orig_ctx->rxq->dev;
202 	xdp->orig_ctx = orig_ctx;
203 
204 	return 0;
205 
206 err_mmodel:
207 	page_pool_destroy(pp);
208 err_pp:
209 	kvfree(xdp->skbs);
210 err_skbs:
211 	kvfree(xdp->frames);
212 	return err;
213 }
214 
215 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
216 {
217 	xdp_unreg_mem_model(&xdp->mem);
218 	page_pool_destroy(xdp->pp);
219 	kfree(xdp->frames);
220 	kfree(xdp->skbs);
221 }
222 
223 static bool frame_was_changed(const struct xdp_page_head *head)
224 {
225 	/* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
226 	 * i.e. has the highest chances to be overwritten. If those two are
227 	 * untouched, it's most likely safe to skip the context reset.
228 	 */
229 	return head->frame->data != head->orig_ctx.data ||
230 	       head->frame->flags != head->orig_ctx.flags;
231 }
232 
233 static bool ctx_was_changed(struct xdp_page_head *head)
234 {
235 	return head->orig_ctx.data != head->ctx.data ||
236 		head->orig_ctx.data_meta != head->ctx.data_meta ||
237 		head->orig_ctx.data_end != head->ctx.data_end;
238 }
239 
240 static void reset_ctx(struct xdp_page_head *head)
241 {
242 	if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
243 		return;
244 
245 	head->ctx.data = head->orig_ctx.data;
246 	head->ctx.data_meta = head->orig_ctx.data_meta;
247 	head->ctx.data_end = head->orig_ctx.data_end;
248 	xdp_update_frame_from_buff(&head->ctx, head->frame);
249 	head->frame->mem_type = head->orig_ctx.rxq->mem.type;
250 }
251 
252 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
253 			   struct sk_buff **skbs,
254 			   struct net_device *dev)
255 {
256 	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
257 	int i, n;
258 	LIST_HEAD(list);
259 
260 	n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
261 				  (void **)skbs);
262 	if (unlikely(n == 0)) {
263 		for (i = 0; i < nframes; i++)
264 			xdp_return_frame(frames[i]);
265 		return -ENOMEM;
266 	}
267 
268 	for (i = 0; i < nframes; i++) {
269 		struct xdp_frame *xdpf = frames[i];
270 		struct sk_buff *skb = skbs[i];
271 
272 		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
273 		if (!skb) {
274 			xdp_return_frame(xdpf);
275 			continue;
276 		}
277 
278 		list_add_tail(&skb->list, &list);
279 	}
280 	netif_receive_skb_list(&list);
281 
282 	return 0;
283 }
284 
285 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
286 			      u32 repeat)
287 {
288 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
289 	int err = 0, act, ret, i, nframes = 0, batch_sz;
290 	struct xdp_frame **frames = xdp->frames;
291 	struct bpf_redirect_info *ri;
292 	struct xdp_page_head *head;
293 	struct xdp_frame *frm;
294 	bool redirect = false;
295 	struct xdp_buff *ctx;
296 	struct page *page;
297 
298 	batch_sz = min_t(u32, repeat, xdp->batch_size);
299 
300 	local_bh_disable();
301 	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
302 	ri = bpf_net_ctx_get_ri();
303 	xdp_set_return_frame_no_direct();
304 
305 	for (i = 0; i < batch_sz; i++) {
306 		page = page_pool_dev_alloc_pages(xdp->pp);
307 		if (!page) {
308 			err = -ENOMEM;
309 			goto out;
310 		}
311 
312 		head = phys_to_virt(page_to_phys(page));
313 		reset_ctx(head);
314 		ctx = &head->ctx;
315 		frm = head->frame;
316 		xdp->frame_cnt++;
317 
318 		act = bpf_prog_run_xdp(prog, ctx);
319 
320 		/* if program changed pkt bounds we need to update the xdp_frame */
321 		if (unlikely(ctx_was_changed(head))) {
322 			ret = xdp_update_frame_from_buff(ctx, frm);
323 			if (ret) {
324 				xdp_return_buff(ctx);
325 				continue;
326 			}
327 		}
328 
329 		switch (act) {
330 		case XDP_TX:
331 			/* we can't do a real XDP_TX since we're not in the
332 			 * driver, so turn it into a REDIRECT back to the same
333 			 * index
334 			 */
335 			ri->tgt_index = xdp->dev->ifindex;
336 			ri->map_id = INT_MAX;
337 			ri->map_type = BPF_MAP_TYPE_UNSPEC;
338 			fallthrough;
339 		case XDP_REDIRECT:
340 			redirect = true;
341 			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
342 			if (ret)
343 				xdp_return_buff(ctx);
344 			break;
345 		case XDP_PASS:
346 			frames[nframes++] = frm;
347 			break;
348 		default:
349 			bpf_warn_invalid_xdp_action(NULL, prog, act);
350 			fallthrough;
351 		case XDP_DROP:
352 			xdp_return_buff(ctx);
353 			break;
354 		}
355 	}
356 
357 out:
358 	if (redirect)
359 		xdp_do_flush();
360 	if (nframes) {
361 		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
362 		if (ret)
363 			err = ret;
364 	}
365 
366 	xdp_clear_return_frame_no_direct();
367 	bpf_net_ctx_clear(bpf_net_ctx);
368 	local_bh_enable();
369 	return err;
370 }
371 
372 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
373 				 u32 repeat, u32 batch_size, u32 *time)
374 
375 {
376 	struct xdp_test_data xdp = { .batch_size = batch_size };
377 	struct bpf_test_timer t = { .mode = NO_MIGRATE };
378 	int ret;
379 
380 	if (!repeat)
381 		repeat = 1;
382 
383 	ret = xdp_test_run_setup(&xdp, ctx);
384 	if (ret)
385 		return ret;
386 
387 	bpf_test_timer_enter(&t);
388 	do {
389 		xdp.frame_cnt = 0;
390 		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
391 		if (unlikely(ret < 0))
392 			break;
393 	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
394 	bpf_test_timer_leave(&t);
395 
396 	xdp_test_run_teardown(&xdp);
397 	return ret;
398 }
399 
400 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
401 			u32 *retval, u32 *time, bool xdp)
402 {
403 	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
404 	struct bpf_prog_array_item item = {.prog = prog};
405 	struct bpf_run_ctx *old_ctx;
406 	struct bpf_cg_run_ctx run_ctx;
407 	struct bpf_test_timer t = { NO_MIGRATE };
408 	enum bpf_cgroup_storage_type stype;
409 	int ret;
410 
411 	for_each_cgroup_storage_type(stype) {
412 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
413 		if (IS_ERR(item.cgroup_storage[stype])) {
414 			item.cgroup_storage[stype] = NULL;
415 			for_each_cgroup_storage_type(stype)
416 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
417 			return -ENOMEM;
418 		}
419 	}
420 
421 	if (!repeat)
422 		repeat = 1;
423 
424 	bpf_test_timer_enter(&t);
425 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
426 	do {
427 		run_ctx.prog_item = &item;
428 		local_bh_disable();
429 		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
430 
431 		if (xdp)
432 			*retval = bpf_prog_run_xdp(prog, ctx);
433 		else
434 			*retval = bpf_prog_run(prog, ctx);
435 
436 		bpf_net_ctx_clear(bpf_net_ctx);
437 		local_bh_enable();
438 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
439 	bpf_reset_run_ctx(old_ctx);
440 	bpf_test_timer_leave(&t);
441 
442 	for_each_cgroup_storage_type(stype)
443 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
444 
445 	return ret;
446 }
447 
448 static int bpf_test_finish(const union bpf_attr *kattr,
449 			   union bpf_attr __user *uattr, const void *data,
450 			   struct skb_shared_info *sinfo, u32 size,
451 			   u32 retval, u32 duration)
452 {
453 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
454 	int err = -EFAULT;
455 	u32 copy_size = size;
456 
457 	/* Clamp copy if the user has provided a size hint, but copy the full
458 	 * buffer if not to retain old behaviour.
459 	 */
460 	if (kattr->test.data_size_out &&
461 	    copy_size > kattr->test.data_size_out) {
462 		copy_size = kattr->test.data_size_out;
463 		err = -ENOSPC;
464 	}
465 
466 	if (data_out) {
467 		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
468 
469 		if (len < 0) {
470 			err = -ENOSPC;
471 			goto out;
472 		}
473 
474 		if (copy_to_user(data_out, data, len))
475 			goto out;
476 
477 		if (sinfo) {
478 			int i, offset = len;
479 			u32 data_len;
480 
481 			for (i = 0; i < sinfo->nr_frags; i++) {
482 				skb_frag_t *frag = &sinfo->frags[i];
483 
484 				if (offset >= copy_size) {
485 					err = -ENOSPC;
486 					break;
487 				}
488 
489 				data_len = min_t(u32, copy_size - offset,
490 						 skb_frag_size(frag));
491 
492 				if (copy_to_user(data_out + offset,
493 						 skb_frag_address(frag),
494 						 data_len))
495 					goto out;
496 
497 				offset += data_len;
498 			}
499 		}
500 	}
501 
502 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
503 		goto out;
504 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
505 		goto out;
506 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
507 		goto out;
508 	if (err != -ENOSPC)
509 		err = 0;
510 out:
511 	trace_bpf_test_finish(&err);
512 	return err;
513 }
514 
515 /* Integer types of various sizes and pointer combinations cover variety of
516  * architecture dependent calling conventions. 7+ can be supported in the
517  * future.
518  */
519 __bpf_kfunc_start_defs();
520 
521 __bpf_kfunc int bpf_fentry_test1(int a)
522 {
523 	return a + 1;
524 }
525 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
526 
527 int noinline bpf_fentry_test2(int a, u64 b)
528 {
529 	return a + b;
530 }
531 
532 int noinline bpf_fentry_test3(char a, int b, u64 c)
533 {
534 	return a + b + c;
535 }
536 
537 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
538 {
539 	return (long)a + b + c + d;
540 }
541 
542 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
543 {
544 	return a + (long)b + c + d + e;
545 }
546 
547 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
548 {
549 	return a + (long)b + c + d + (long)e + f;
550 }
551 
552 struct bpf_fentry_test_t {
553 	struct bpf_fentry_test_t *a;
554 };
555 
556 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
557 {
558 	asm volatile ("": "+r"(arg));
559 	return (long)arg;
560 }
561 
562 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
563 {
564 	return (long)arg->a;
565 }
566 
567 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
568 {
569 	return *a;
570 }
571 
572 int noinline bpf_fentry_test10(const void *a)
573 {
574 	return (long)a;
575 }
576 
577 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
578 {
579 }
580 
581 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
582 {
583 	*b += 1;
584 	return a + *b;
585 }
586 
587 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
588 					void *e, char f, int g)
589 {
590 	*b += 1;
591 	return a + *b + c + d + (long)e + f + g;
592 }
593 
594 __bpf_kfunc int bpf_modify_return_test_tp(int nonce)
595 {
596 	trace_bpf_trigger_tp(nonce);
597 
598 	return nonce;
599 }
600 
601 int noinline bpf_fentry_shadow_test(int a)
602 {
603 	return a + 1;
604 }
605 
606 struct prog_test_member1 {
607 	int a;
608 };
609 
610 struct prog_test_member {
611 	struct prog_test_member1 m;
612 	int c;
613 };
614 
615 struct prog_test_ref_kfunc {
616 	int a;
617 	int b;
618 	struct prog_test_member memb;
619 	struct prog_test_ref_kfunc *next;
620 	refcount_t cnt;
621 };
622 
623 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
624 {
625 	refcount_dec(&p->cnt);
626 }
627 
628 __bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
629 {
630 	bpf_kfunc_call_test_release(p);
631 }
632 CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
633 
634 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
635 {
636 }
637 
638 __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
639 {
640 }
641 CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
642 
643 __bpf_kfunc_end_defs();
644 
645 BTF_KFUNCS_START(bpf_test_modify_return_ids)
646 BTF_ID_FLAGS(func, bpf_modify_return_test)
647 BTF_ID_FLAGS(func, bpf_modify_return_test2)
648 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
649 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
650 BTF_KFUNCS_END(bpf_test_modify_return_ids)
651 
652 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
653 	.owner = THIS_MODULE,
654 	.set   = &bpf_test_modify_return_ids,
655 };
656 
657 BTF_KFUNCS_START(test_sk_check_kfunc_ids)
658 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
659 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
660 BTF_KFUNCS_END(test_sk_check_kfunc_ids)
661 
662 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
663 			   u32 size, u32 headroom, u32 tailroom)
664 {
665 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
666 	void *data;
667 
668 	if (user_size > PAGE_SIZE - headroom - tailroom)
669 		return ERR_PTR(-EINVAL);
670 
671 	size = SKB_DATA_ALIGN(size);
672 	data = kzalloc(size + headroom + tailroom, GFP_USER);
673 	if (!data)
674 		return ERR_PTR(-ENOMEM);
675 
676 	if (copy_from_user(data + headroom, data_in, user_size)) {
677 		kfree(data);
678 		return ERR_PTR(-EFAULT);
679 	}
680 
681 	return data;
682 }
683 
684 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
685 			      const union bpf_attr *kattr,
686 			      union bpf_attr __user *uattr)
687 {
688 	struct bpf_fentry_test_t arg = {};
689 	u16 side_effect = 0, ret = 0;
690 	int b = 2, err = -EFAULT;
691 	u32 retval = 0;
692 
693 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
694 		return -EINVAL;
695 
696 	switch (prog->expected_attach_type) {
697 	case BPF_TRACE_FENTRY:
698 	case BPF_TRACE_FEXIT:
699 		if (bpf_fentry_test1(1) != 2 ||
700 		    bpf_fentry_test2(2, 3) != 5 ||
701 		    bpf_fentry_test3(4, 5, 6) != 15 ||
702 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
703 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
704 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
705 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
706 		    bpf_fentry_test8(&arg) != 0 ||
707 		    bpf_fentry_test9(&retval) != 0 ||
708 		    bpf_fentry_test10((void *)0) != 0)
709 			goto out;
710 		break;
711 	case BPF_MODIFY_RETURN:
712 		ret = bpf_modify_return_test(1, &b);
713 		if (b != 2)
714 			side_effect++;
715 		b = 2;
716 		ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
717 		if (b != 2)
718 			side_effect++;
719 		break;
720 	default:
721 		goto out;
722 	}
723 
724 	retval = ((u32)side_effect << 16) | ret;
725 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
726 		goto out;
727 
728 	err = 0;
729 out:
730 	trace_bpf_test_finish(&err);
731 	return err;
732 }
733 
734 struct bpf_raw_tp_test_run_info {
735 	struct bpf_prog *prog;
736 	void *ctx;
737 	u32 retval;
738 };
739 
740 static void
741 __bpf_prog_test_run_raw_tp(void *data)
742 {
743 	struct bpf_raw_tp_test_run_info *info = data;
744 	struct bpf_trace_run_ctx run_ctx = {};
745 	struct bpf_run_ctx *old_run_ctx;
746 
747 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
748 
749 	rcu_read_lock();
750 	info->retval = bpf_prog_run(info->prog, info->ctx);
751 	rcu_read_unlock();
752 
753 	bpf_reset_run_ctx(old_run_ctx);
754 }
755 
756 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
757 			     const union bpf_attr *kattr,
758 			     union bpf_attr __user *uattr)
759 {
760 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
761 	__u32 ctx_size_in = kattr->test.ctx_size_in;
762 	struct bpf_raw_tp_test_run_info info;
763 	int cpu = kattr->test.cpu, err = 0;
764 	int current_cpu;
765 
766 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
767 	if (kattr->test.data_in || kattr->test.data_out ||
768 	    kattr->test.ctx_out || kattr->test.duration ||
769 	    kattr->test.repeat || kattr->test.batch_size)
770 		return -EINVAL;
771 
772 	if (ctx_size_in < prog->aux->max_ctx_offset ||
773 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
774 		return -EINVAL;
775 
776 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
777 		return -EINVAL;
778 
779 	if (ctx_size_in) {
780 		info.ctx = memdup_user(ctx_in, ctx_size_in);
781 		if (IS_ERR(info.ctx))
782 			return PTR_ERR(info.ctx);
783 	} else {
784 		info.ctx = NULL;
785 	}
786 
787 	info.prog = prog;
788 
789 	current_cpu = get_cpu();
790 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
791 	    cpu == current_cpu) {
792 		__bpf_prog_test_run_raw_tp(&info);
793 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
794 		/* smp_call_function_single() also checks cpu_online()
795 		 * after csd_lock(). However, since cpu is from user
796 		 * space, let's do an extra quick check to filter out
797 		 * invalid value before smp_call_function_single().
798 		 */
799 		err = -ENXIO;
800 	} else {
801 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
802 					       &info, 1);
803 	}
804 	put_cpu();
805 
806 	if (!err &&
807 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
808 		err = -EFAULT;
809 
810 	kfree(info.ctx);
811 	return err;
812 }
813 
814 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
815 {
816 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
817 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
818 	u32 size = kattr->test.ctx_size_in;
819 	void *data;
820 	int err;
821 
822 	if (!data_in && !data_out)
823 		return NULL;
824 
825 	data = kzalloc(max_size, GFP_USER);
826 	if (!data)
827 		return ERR_PTR(-ENOMEM);
828 
829 	if (data_in) {
830 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
831 		if (err) {
832 			kfree(data);
833 			return ERR_PTR(err);
834 		}
835 
836 		size = min_t(u32, max_size, size);
837 		if (copy_from_user(data, data_in, size)) {
838 			kfree(data);
839 			return ERR_PTR(-EFAULT);
840 		}
841 	}
842 	return data;
843 }
844 
845 static int bpf_ctx_finish(const union bpf_attr *kattr,
846 			  union bpf_attr __user *uattr, const void *data,
847 			  u32 size)
848 {
849 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
850 	int err = -EFAULT;
851 	u32 copy_size = size;
852 
853 	if (!data || !data_out)
854 		return 0;
855 
856 	if (copy_size > kattr->test.ctx_size_out) {
857 		copy_size = kattr->test.ctx_size_out;
858 		err = -ENOSPC;
859 	}
860 
861 	if (copy_to_user(data_out, data, copy_size))
862 		goto out;
863 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
864 		goto out;
865 	if (err != -ENOSPC)
866 		err = 0;
867 out:
868 	return err;
869 }
870 
871 /**
872  * range_is_zero - test whether buffer is initialized
873  * @buf: buffer to check
874  * @from: check from this position
875  * @to: check up until (excluding) this position
876  *
877  * This function returns true if the there is a non-zero byte
878  * in the buf in the range [from,to).
879  */
880 static inline bool range_is_zero(void *buf, size_t from, size_t to)
881 {
882 	return !memchr_inv((u8 *)buf + from, 0, to - from);
883 }
884 
885 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
886 {
887 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
888 
889 	if (!__skb)
890 		return 0;
891 
892 	/* make sure the fields we don't use are zeroed */
893 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
894 		return -EINVAL;
895 
896 	/* mark is allowed */
897 
898 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
899 			   offsetof(struct __sk_buff, priority)))
900 		return -EINVAL;
901 
902 	/* priority is allowed */
903 	/* ingress_ifindex is allowed */
904 	/* ifindex is allowed */
905 
906 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
907 			   offsetof(struct __sk_buff, cb)))
908 		return -EINVAL;
909 
910 	/* cb is allowed */
911 
912 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
913 			   offsetof(struct __sk_buff, tstamp)))
914 		return -EINVAL;
915 
916 	/* tstamp is allowed */
917 	/* wire_len is allowed */
918 	/* gso_segs is allowed */
919 
920 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
921 			   offsetof(struct __sk_buff, gso_size)))
922 		return -EINVAL;
923 
924 	/* gso_size is allowed */
925 
926 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
927 			   offsetof(struct __sk_buff, hwtstamp)))
928 		return -EINVAL;
929 
930 	/* hwtstamp is allowed */
931 
932 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
933 			   sizeof(struct __sk_buff)))
934 		return -EINVAL;
935 
936 	skb->mark = __skb->mark;
937 	skb->priority = __skb->priority;
938 	skb->skb_iif = __skb->ingress_ifindex;
939 	skb->tstamp = __skb->tstamp;
940 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
941 
942 	if (__skb->wire_len == 0) {
943 		cb->pkt_len = skb->len;
944 	} else {
945 		if (__skb->wire_len < skb->len ||
946 		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
947 			return -EINVAL;
948 		cb->pkt_len = __skb->wire_len;
949 	}
950 
951 	if (__skb->gso_segs > GSO_MAX_SEGS)
952 		return -EINVAL;
953 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
954 	skb_shinfo(skb)->gso_size = __skb->gso_size;
955 	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
956 
957 	return 0;
958 }
959 
960 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
961 {
962 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
963 
964 	if (!__skb)
965 		return;
966 
967 	__skb->mark = skb->mark;
968 	__skb->priority = skb->priority;
969 	__skb->ingress_ifindex = skb->skb_iif;
970 	__skb->ifindex = skb->dev->ifindex;
971 	__skb->tstamp = skb->tstamp;
972 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
973 	__skb->wire_len = cb->pkt_len;
974 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
975 	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
976 }
977 
978 static struct proto bpf_dummy_proto = {
979 	.name   = "bpf_dummy",
980 	.owner  = THIS_MODULE,
981 	.obj_size = sizeof(struct sock),
982 };
983 
984 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
985 			  union bpf_attr __user *uattr)
986 {
987 	bool is_l2 = false, is_direct_pkt_access = false;
988 	struct net *net = current->nsproxy->net_ns;
989 	struct net_device *dev = net->loopback_dev;
990 	u32 size = kattr->test.data_size_in;
991 	u32 repeat = kattr->test.repeat;
992 	struct __sk_buff *ctx = NULL;
993 	u32 retval, duration;
994 	int hh_len = ETH_HLEN;
995 	struct sk_buff *skb;
996 	struct sock *sk;
997 	void *data;
998 	int ret;
999 
1000 	if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
1001 	    kattr->test.cpu || kattr->test.batch_size)
1002 		return -EINVAL;
1003 
1004 	if (size < ETH_HLEN)
1005 		return -EINVAL;
1006 
1007 	data = bpf_test_init(kattr, kattr->test.data_size_in,
1008 			     size, NET_SKB_PAD + NET_IP_ALIGN,
1009 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1010 	if (IS_ERR(data))
1011 		return PTR_ERR(data);
1012 
1013 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1014 	if (IS_ERR(ctx)) {
1015 		kfree(data);
1016 		return PTR_ERR(ctx);
1017 	}
1018 
1019 	switch (prog->type) {
1020 	case BPF_PROG_TYPE_SCHED_CLS:
1021 	case BPF_PROG_TYPE_SCHED_ACT:
1022 		is_l2 = true;
1023 		fallthrough;
1024 	case BPF_PROG_TYPE_LWT_IN:
1025 	case BPF_PROG_TYPE_LWT_OUT:
1026 	case BPF_PROG_TYPE_LWT_XMIT:
1027 	case BPF_PROG_TYPE_CGROUP_SKB:
1028 		is_direct_pkt_access = true;
1029 		break;
1030 	default:
1031 		break;
1032 	}
1033 
1034 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1035 	if (!sk) {
1036 		kfree(data);
1037 		kfree(ctx);
1038 		return -ENOMEM;
1039 	}
1040 	sock_init_data(NULL, sk);
1041 
1042 	skb = slab_build_skb(data);
1043 	if (!skb) {
1044 		kfree(data);
1045 		kfree(ctx);
1046 		sk_free(sk);
1047 		return -ENOMEM;
1048 	}
1049 	skb->sk = sk;
1050 
1051 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1052 	__skb_put(skb, size);
1053 
1054 	if (ctx && ctx->ifindex > 1) {
1055 		dev = dev_get_by_index(net, ctx->ifindex);
1056 		if (!dev) {
1057 			ret = -ENODEV;
1058 			goto out;
1059 		}
1060 	}
1061 	skb->protocol = eth_type_trans(skb, dev);
1062 	skb_reset_network_header(skb);
1063 
1064 	switch (skb->protocol) {
1065 	case htons(ETH_P_IP):
1066 		sk->sk_family = AF_INET;
1067 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1068 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1069 			sk->sk_daddr = ip_hdr(skb)->daddr;
1070 		}
1071 		break;
1072 #if IS_ENABLED(CONFIG_IPV6)
1073 	case htons(ETH_P_IPV6):
1074 		sk->sk_family = AF_INET6;
1075 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1076 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1077 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1078 		}
1079 		break;
1080 #endif
1081 	default:
1082 		break;
1083 	}
1084 
1085 	if (is_l2)
1086 		__skb_push(skb, hh_len);
1087 	if (is_direct_pkt_access)
1088 		bpf_compute_data_pointers(skb);
1089 
1090 	ret = convert___skb_to_skb(skb, ctx);
1091 	if (ret)
1092 		goto out;
1093 
1094 	if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1095 		const int off = skb_network_offset(skb);
1096 		int len = skb->len - off;
1097 
1098 		skb->csum = skb_checksum(skb, off, len, 0);
1099 		skb->ip_summed = CHECKSUM_COMPLETE;
1100 	}
1101 
1102 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1103 	if (ret)
1104 		goto out;
1105 	if (!is_l2) {
1106 		if (skb_headroom(skb) < hh_len) {
1107 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1108 
1109 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1110 				ret = -ENOMEM;
1111 				goto out;
1112 			}
1113 		}
1114 		memset(__skb_push(skb, hh_len), 0, hh_len);
1115 	}
1116 
1117 	if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1118 		const int off = skb_network_offset(skb);
1119 		int len = skb->len - off;
1120 		__wsum csum;
1121 
1122 		csum = skb_checksum(skb, off, len, 0);
1123 
1124 		if (csum_fold(skb->csum) != csum_fold(csum)) {
1125 			ret = -EBADMSG;
1126 			goto out;
1127 		}
1128 	}
1129 
1130 	convert_skb_to___skb(skb, ctx);
1131 
1132 	size = skb->len;
1133 	/* bpf program can never convert linear skb to non-linear */
1134 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1135 		size = skb_headlen(skb);
1136 	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1137 			      duration);
1138 	if (!ret)
1139 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1140 				     sizeof(struct __sk_buff));
1141 out:
1142 	if (dev && dev != net->loopback_dev)
1143 		dev_put(dev);
1144 	kfree_skb(skb);
1145 	sk_free(sk);
1146 	kfree(ctx);
1147 	return ret;
1148 }
1149 
1150 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1151 {
1152 	unsigned int ingress_ifindex, rx_queue_index;
1153 	struct netdev_rx_queue *rxqueue;
1154 	struct net_device *device;
1155 
1156 	if (!xdp_md)
1157 		return 0;
1158 
1159 	if (xdp_md->egress_ifindex != 0)
1160 		return -EINVAL;
1161 
1162 	ingress_ifindex = xdp_md->ingress_ifindex;
1163 	rx_queue_index = xdp_md->rx_queue_index;
1164 
1165 	if (!ingress_ifindex && rx_queue_index)
1166 		return -EINVAL;
1167 
1168 	if (ingress_ifindex) {
1169 		device = dev_get_by_index(current->nsproxy->net_ns,
1170 					  ingress_ifindex);
1171 		if (!device)
1172 			return -ENODEV;
1173 
1174 		if (rx_queue_index >= device->real_num_rx_queues)
1175 			goto free_dev;
1176 
1177 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1178 
1179 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1180 			goto free_dev;
1181 
1182 		xdp->rxq = &rxqueue->xdp_rxq;
1183 		/* The device is now tracked in the xdp->rxq for later
1184 		 * dev_put()
1185 		 */
1186 	}
1187 
1188 	xdp->data = xdp->data_meta + xdp_md->data;
1189 	return 0;
1190 
1191 free_dev:
1192 	dev_put(device);
1193 	return -EINVAL;
1194 }
1195 
1196 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1197 {
1198 	if (!xdp_md)
1199 		return;
1200 
1201 	xdp_md->data = xdp->data - xdp->data_meta;
1202 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
1203 
1204 	if (xdp_md->ingress_ifindex)
1205 		dev_put(xdp->rxq->dev);
1206 }
1207 
1208 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1209 			  union bpf_attr __user *uattr)
1210 {
1211 	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1212 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1213 	u32 retval = 0, meta_sz = 0, duration, max_linear_sz, size;
1214 	u32 linear_sz = kattr->test.data_size_in;
1215 	u32 batch_size = kattr->test.batch_size;
1216 	u32 headroom = XDP_PACKET_HEADROOM;
1217 	u32 repeat = kattr->test.repeat;
1218 	struct netdev_rx_queue *rxqueue;
1219 	struct skb_shared_info *sinfo;
1220 	struct xdp_buff xdp = {};
1221 	int i, ret = -EINVAL;
1222 	struct xdp_md *ctx;
1223 	void *data;
1224 
1225 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1226 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
1227 		return -EINVAL;
1228 
1229 	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1230 		return -EINVAL;
1231 
1232 	if (bpf_prog_is_dev_bound(prog->aux))
1233 		return -EINVAL;
1234 
1235 	if (do_live) {
1236 		if (!batch_size)
1237 			batch_size = NAPI_POLL_WEIGHT;
1238 		else if (batch_size > TEST_XDP_MAX_BATCH)
1239 			return -E2BIG;
1240 
1241 		headroom += sizeof(struct xdp_page_head);
1242 	} else if (batch_size) {
1243 		return -EINVAL;
1244 	}
1245 
1246 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1247 	if (IS_ERR(ctx))
1248 		return PTR_ERR(ctx);
1249 
1250 	if (ctx) {
1251 		/* There can't be user provided data before the meta data */
1252 		if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
1253 		    ctx->data > ctx->data_end ||
1254 		    unlikely(xdp_metalen_invalid(ctx->data)) ||
1255 		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1256 			goto free_ctx;
1257 		/* Meta data is allocated from the headroom */
1258 		headroom -= ctx->data;
1259 
1260 		meta_sz = ctx->data;
1261 		linear_sz = ctx->data_end;
1262 	}
1263 
1264 	max_linear_sz = PAGE_SIZE - headroom - tailroom;
1265 	linear_sz = min_t(u32, linear_sz, max_linear_sz);
1266 
1267 	/* disallow live data mode for jumbo frames */
1268 	if (do_live && kattr->test.data_size_in > linear_sz)
1269 		goto free_ctx;
1270 
1271 	if (kattr->test.data_size_in - meta_sz < ETH_HLEN)
1272 		return -EINVAL;
1273 
1274 	data = bpf_test_init(kattr, linear_sz, max_linear_sz, headroom, tailroom);
1275 	if (IS_ERR(data)) {
1276 		ret = PTR_ERR(data);
1277 		goto free_ctx;
1278 	}
1279 
1280 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1281 	rxqueue->xdp_rxq.frag_size = PAGE_SIZE;
1282 	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1283 	xdp_prepare_buff(&xdp, data, headroom, linear_sz, true);
1284 	sinfo = xdp_get_shared_info_from_buff(&xdp);
1285 
1286 	ret = xdp_convert_md_to_buff(ctx, &xdp);
1287 	if (ret)
1288 		goto free_data;
1289 
1290 	size = linear_sz;
1291 	if (unlikely(kattr->test.data_size_in > size)) {
1292 		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1293 
1294 		while (size < kattr->test.data_size_in) {
1295 			struct page *page;
1296 			skb_frag_t *frag;
1297 			u32 data_len;
1298 
1299 			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1300 				ret = -ENOMEM;
1301 				goto out;
1302 			}
1303 
1304 			page = alloc_page(GFP_KERNEL);
1305 			if (!page) {
1306 				ret = -ENOMEM;
1307 				goto out;
1308 			}
1309 
1310 			frag = &sinfo->frags[sinfo->nr_frags++];
1311 
1312 			data_len = min_t(u32, kattr->test.data_size_in - size,
1313 					 PAGE_SIZE);
1314 			skb_frag_fill_page_desc(frag, page, 0, data_len);
1315 
1316 			if (copy_from_user(page_address(page), data_in + size,
1317 					   data_len)) {
1318 				ret = -EFAULT;
1319 				goto out;
1320 			}
1321 			sinfo->xdp_frags_size += data_len;
1322 			size += data_len;
1323 		}
1324 		xdp_buff_set_frags_flag(&xdp);
1325 	}
1326 
1327 	if (repeat > 1)
1328 		bpf_prog_change_xdp(NULL, prog);
1329 
1330 	if (do_live)
1331 		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1332 	else
1333 		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1334 	/* We convert the xdp_buff back to an xdp_md before checking the return
1335 	 * code so the reference count of any held netdevice will be decremented
1336 	 * even if the test run failed.
1337 	 */
1338 	xdp_convert_buff_to_md(&xdp, ctx);
1339 	if (ret)
1340 		goto out;
1341 
1342 	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1343 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1344 			      retval, duration);
1345 	if (!ret)
1346 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1347 				     sizeof(struct xdp_md));
1348 
1349 out:
1350 	if (repeat > 1)
1351 		bpf_prog_change_xdp(prog, NULL);
1352 free_data:
1353 	for (i = 0; i < sinfo->nr_frags; i++)
1354 		__free_page(skb_frag_page(&sinfo->frags[i]));
1355 	kfree(data);
1356 free_ctx:
1357 	kfree(ctx);
1358 	return ret;
1359 }
1360 
1361 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1362 {
1363 	/* make sure the fields we don't use are zeroed */
1364 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1365 		return -EINVAL;
1366 
1367 	/* flags is allowed */
1368 
1369 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1370 			   sizeof(struct bpf_flow_keys)))
1371 		return -EINVAL;
1372 
1373 	return 0;
1374 }
1375 
1376 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1377 				     const union bpf_attr *kattr,
1378 				     union bpf_attr __user *uattr)
1379 {
1380 	struct bpf_test_timer t = { NO_PREEMPT };
1381 	u32 size = kattr->test.data_size_in;
1382 	struct bpf_flow_dissector ctx = {};
1383 	u32 repeat = kattr->test.repeat;
1384 	struct bpf_flow_keys *user_ctx;
1385 	struct bpf_flow_keys flow_keys;
1386 	const struct ethhdr *eth;
1387 	unsigned int flags = 0;
1388 	u32 retval, duration;
1389 	void *data;
1390 	int ret;
1391 
1392 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1393 		return -EINVAL;
1394 
1395 	if (size < ETH_HLEN)
1396 		return -EINVAL;
1397 
1398 	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1399 	if (IS_ERR(data))
1400 		return PTR_ERR(data);
1401 
1402 	eth = (struct ethhdr *)data;
1403 
1404 	if (!repeat)
1405 		repeat = 1;
1406 
1407 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1408 	if (IS_ERR(user_ctx)) {
1409 		kfree(data);
1410 		return PTR_ERR(user_ctx);
1411 	}
1412 	if (user_ctx) {
1413 		ret = verify_user_bpf_flow_keys(user_ctx);
1414 		if (ret)
1415 			goto out;
1416 		flags = user_ctx->flags;
1417 	}
1418 
1419 	ctx.flow_keys = &flow_keys;
1420 	ctx.data = data;
1421 	ctx.data_end = (__u8 *)data + size;
1422 
1423 	bpf_test_timer_enter(&t);
1424 	do {
1425 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1426 					  size, flags);
1427 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1428 	bpf_test_timer_leave(&t);
1429 
1430 	if (ret < 0)
1431 		goto out;
1432 
1433 	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1434 			      sizeof(flow_keys), retval, duration);
1435 	if (!ret)
1436 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1437 				     sizeof(struct bpf_flow_keys));
1438 
1439 out:
1440 	kfree(user_ctx);
1441 	kfree(data);
1442 	return ret;
1443 }
1444 
1445 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1446 				union bpf_attr __user *uattr)
1447 {
1448 	struct bpf_test_timer t = { NO_PREEMPT };
1449 	struct bpf_prog_array *progs = NULL;
1450 	struct bpf_sk_lookup_kern ctx = {};
1451 	u32 repeat = kattr->test.repeat;
1452 	struct bpf_sk_lookup *user_ctx;
1453 	u32 retval, duration;
1454 	int ret = -EINVAL;
1455 
1456 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1457 		return -EINVAL;
1458 
1459 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1460 	    kattr->test.data_size_out)
1461 		return -EINVAL;
1462 
1463 	if (!repeat)
1464 		repeat = 1;
1465 
1466 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1467 	if (IS_ERR(user_ctx))
1468 		return PTR_ERR(user_ctx);
1469 
1470 	if (!user_ctx)
1471 		return -EINVAL;
1472 
1473 	if (user_ctx->sk)
1474 		goto out;
1475 
1476 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1477 		goto out;
1478 
1479 	if (user_ctx->local_port > U16_MAX) {
1480 		ret = -ERANGE;
1481 		goto out;
1482 	}
1483 
1484 	ctx.family = (u16)user_ctx->family;
1485 	ctx.protocol = (u16)user_ctx->protocol;
1486 	ctx.dport = (u16)user_ctx->local_port;
1487 	ctx.sport = user_ctx->remote_port;
1488 
1489 	switch (ctx.family) {
1490 	case AF_INET:
1491 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1492 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1493 		break;
1494 
1495 #if IS_ENABLED(CONFIG_IPV6)
1496 	case AF_INET6:
1497 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1498 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1499 		break;
1500 #endif
1501 
1502 	default:
1503 		ret = -EAFNOSUPPORT;
1504 		goto out;
1505 	}
1506 
1507 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1508 	if (!progs) {
1509 		ret = -ENOMEM;
1510 		goto out;
1511 	}
1512 
1513 	progs->items[0].prog = prog;
1514 
1515 	bpf_test_timer_enter(&t);
1516 	do {
1517 		ctx.selected_sk = NULL;
1518 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1519 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1520 	bpf_test_timer_leave(&t);
1521 
1522 	if (ret < 0)
1523 		goto out;
1524 
1525 	user_ctx->cookie = 0;
1526 	if (ctx.selected_sk) {
1527 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1528 			ret = -EOPNOTSUPP;
1529 			goto out;
1530 		}
1531 
1532 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1533 	}
1534 
1535 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1536 	if (!ret)
1537 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1538 
1539 out:
1540 	bpf_prog_array_free(progs);
1541 	kfree(user_ctx);
1542 	return ret;
1543 }
1544 
1545 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1546 			      const union bpf_attr *kattr,
1547 			      union bpf_attr __user *uattr)
1548 {
1549 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1550 	__u32 ctx_size_in = kattr->test.ctx_size_in;
1551 	void *ctx = NULL;
1552 	u32 retval;
1553 	int err = 0;
1554 
1555 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1556 	if (kattr->test.data_in || kattr->test.data_out ||
1557 	    kattr->test.ctx_out || kattr->test.duration ||
1558 	    kattr->test.repeat || kattr->test.flags ||
1559 	    kattr->test.batch_size)
1560 		return -EINVAL;
1561 
1562 	if (ctx_size_in < prog->aux->max_ctx_offset ||
1563 	    ctx_size_in > U16_MAX)
1564 		return -EINVAL;
1565 
1566 	if (ctx_size_in) {
1567 		ctx = memdup_user(ctx_in, ctx_size_in);
1568 		if (IS_ERR(ctx))
1569 			return PTR_ERR(ctx);
1570 	}
1571 
1572 	rcu_read_lock_trace();
1573 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1574 	rcu_read_unlock_trace();
1575 
1576 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1577 		err = -EFAULT;
1578 		goto out;
1579 	}
1580 	if (ctx_size_in)
1581 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1582 			err = -EFAULT;
1583 out:
1584 	kfree(ctx);
1585 	return err;
1586 }
1587 
1588 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1589 				      const struct nf_hook_state *user,
1590 				      struct net_device *dev)
1591 {
1592 	if (user->in || user->out)
1593 		return -EINVAL;
1594 
1595 	if (user->net || user->sk || user->okfn)
1596 		return -EINVAL;
1597 
1598 	switch (user->pf) {
1599 	case NFPROTO_IPV4:
1600 	case NFPROTO_IPV6:
1601 		switch (state->hook) {
1602 		case NF_INET_PRE_ROUTING:
1603 			state->in = dev;
1604 			break;
1605 		case NF_INET_LOCAL_IN:
1606 			state->in = dev;
1607 			break;
1608 		case NF_INET_FORWARD:
1609 			state->in = dev;
1610 			state->out = dev;
1611 			break;
1612 		case NF_INET_LOCAL_OUT:
1613 			state->out = dev;
1614 			break;
1615 		case NF_INET_POST_ROUTING:
1616 			state->out = dev;
1617 			break;
1618 		}
1619 
1620 		break;
1621 	default:
1622 		return -EINVAL;
1623 	}
1624 
1625 	state->pf = user->pf;
1626 	state->hook = user->hook;
1627 
1628 	return 0;
1629 }
1630 
1631 static __be16 nfproto_eth(int nfproto)
1632 {
1633 	switch (nfproto) {
1634 	case NFPROTO_IPV4:
1635 		return htons(ETH_P_IP);
1636 	case NFPROTO_IPV6:
1637 		break;
1638 	}
1639 
1640 	return htons(ETH_P_IPV6);
1641 }
1642 
1643 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1644 			 const union bpf_attr *kattr,
1645 			 union bpf_attr __user *uattr)
1646 {
1647 	struct net *net = current->nsproxy->net_ns;
1648 	struct net_device *dev = net->loopback_dev;
1649 	struct nf_hook_state *user_ctx, hook_state = {
1650 		.pf = NFPROTO_IPV4,
1651 		.hook = NF_INET_LOCAL_OUT,
1652 	};
1653 	u32 size = kattr->test.data_size_in;
1654 	u32 repeat = kattr->test.repeat;
1655 	struct bpf_nf_ctx ctx = {
1656 		.state = &hook_state,
1657 	};
1658 	struct sk_buff *skb = NULL;
1659 	u32 retval, duration;
1660 	void *data;
1661 	int ret;
1662 
1663 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1664 		return -EINVAL;
1665 
1666 	if (size < sizeof(struct iphdr))
1667 		return -EINVAL;
1668 
1669 	data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1670 			     NET_SKB_PAD + NET_IP_ALIGN,
1671 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1672 	if (IS_ERR(data))
1673 		return PTR_ERR(data);
1674 
1675 	if (!repeat)
1676 		repeat = 1;
1677 
1678 	user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1679 	if (IS_ERR(user_ctx)) {
1680 		kfree(data);
1681 		return PTR_ERR(user_ctx);
1682 	}
1683 
1684 	if (user_ctx) {
1685 		ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1686 		if (ret)
1687 			goto out;
1688 	}
1689 
1690 	skb = slab_build_skb(data);
1691 	if (!skb) {
1692 		ret = -ENOMEM;
1693 		goto out;
1694 	}
1695 
1696 	data = NULL; /* data released via kfree_skb */
1697 
1698 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1699 	__skb_put(skb, size);
1700 
1701 	ret = -EINVAL;
1702 
1703 	if (hook_state.hook != NF_INET_LOCAL_OUT) {
1704 		if (size < ETH_HLEN + sizeof(struct iphdr))
1705 			goto out;
1706 
1707 		skb->protocol = eth_type_trans(skb, dev);
1708 		switch (skb->protocol) {
1709 		case htons(ETH_P_IP):
1710 			if (hook_state.pf == NFPROTO_IPV4)
1711 				break;
1712 			goto out;
1713 		case htons(ETH_P_IPV6):
1714 			if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1715 				goto out;
1716 			if (hook_state.pf == NFPROTO_IPV6)
1717 				break;
1718 			goto out;
1719 		default:
1720 			ret = -EPROTO;
1721 			goto out;
1722 		}
1723 
1724 		skb_reset_network_header(skb);
1725 	} else {
1726 		skb->protocol = nfproto_eth(hook_state.pf);
1727 	}
1728 
1729 	ctx.skb = skb;
1730 
1731 	ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1732 	if (ret)
1733 		goto out;
1734 
1735 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1736 
1737 out:
1738 	kfree(user_ctx);
1739 	kfree_skb(skb);
1740 	kfree(data);
1741 	return ret;
1742 }
1743 
1744 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1745 	.owner = THIS_MODULE,
1746 	.set   = &test_sk_check_kfunc_ids,
1747 };
1748 
1749 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1750 BTF_ID(struct, prog_test_ref_kfunc)
1751 BTF_ID(func, bpf_kfunc_call_test_release_dtor)
1752 BTF_ID(struct, prog_test_member)
1753 BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
1754 
1755 static int __init bpf_prog_test_run_init(void)
1756 {
1757 	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1758 		{
1759 		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1760 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1761 		},
1762 		{
1763 		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
1764 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1765 		},
1766 	};
1767 	int ret;
1768 
1769 	ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1770 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1771 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1772 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1773 	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1774 						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1775 						  THIS_MODULE);
1776 }
1777 late_initcall(bpf_prog_test_run_init);
1778