xref: /linux/net/bpf/test_run.c (revision d0fde6aae2bacdc024fff43461ba0f325375fa97)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/sock.h>
16 #include <net/tcp.h>
17 #include <net/net_namespace.h>
18 #include <net/page_pool/helpers.h>
19 #include <linux/error-injection.h>
20 #include <linux/smp.h>
21 #include <linux/sock_diag.h>
22 #include <linux/netfilter.h>
23 #include <net/netdev_rx_queue.h>
24 #include <net/xdp.h>
25 #include <net/netfilter/nf_bpf_link.h>
26 
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/bpf_test_run.h>
29 
30 struct bpf_test_timer {
31 	enum { NO_PREEMPT, NO_MIGRATE } mode;
32 	u32 i;
33 	u64 time_start, time_spent;
34 };
35 
36 static void bpf_test_timer_enter(struct bpf_test_timer *t)
37 	__acquires(rcu)
38 {
39 	rcu_read_lock();
40 	if (t->mode == NO_PREEMPT)
41 		preempt_disable();
42 	else
43 		migrate_disable();
44 
45 	t->time_start = ktime_get_ns();
46 }
47 
48 static void bpf_test_timer_leave(struct bpf_test_timer *t)
49 	__releases(rcu)
50 {
51 	t->time_start = 0;
52 
53 	if (t->mode == NO_PREEMPT)
54 		preempt_enable();
55 	else
56 		migrate_enable();
57 	rcu_read_unlock();
58 }
59 
60 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
61 				    u32 repeat, int *err, u32 *duration)
62 	__must_hold(rcu)
63 {
64 	t->i += iterations;
65 	if (t->i >= repeat) {
66 		/* We're done. */
67 		t->time_spent += ktime_get_ns() - t->time_start;
68 		do_div(t->time_spent, t->i);
69 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
70 		*err = 0;
71 		goto reset;
72 	}
73 
74 	if (signal_pending(current)) {
75 		/* During iteration: we've been cancelled, abort. */
76 		*err = -EINTR;
77 		goto reset;
78 	}
79 
80 	if (need_resched()) {
81 		/* During iteration: we need to reschedule between runs. */
82 		t->time_spent += ktime_get_ns() - t->time_start;
83 		bpf_test_timer_leave(t);
84 		cond_resched();
85 		bpf_test_timer_enter(t);
86 	}
87 
88 	/* Do another round. */
89 	return true;
90 
91 reset:
92 	t->i = 0;
93 	return false;
94 }
95 
96 /* We put this struct at the head of each page with a context and frame
97  * initialised when the page is allocated, so we don't have to do this on each
98  * repetition of the test run.
99  */
100 struct xdp_page_head {
101 	struct xdp_buff orig_ctx;
102 	struct xdp_buff ctx;
103 	union {
104 		/* ::data_hard_start starts here */
105 		DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
106 		DECLARE_FLEX_ARRAY(u8, data);
107 	};
108 };
109 
110 struct xdp_test_data {
111 	struct xdp_buff *orig_ctx;
112 	struct xdp_rxq_info rxq;
113 	struct net_device *dev;
114 	struct page_pool *pp;
115 	struct xdp_frame **frames;
116 	struct sk_buff **skbs;
117 	struct xdp_mem_info mem;
118 	u32 batch_size;
119 	u32 frame_cnt;
120 };
121 
122 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
123  * must be updated accordingly this gets changed, otherwise BPF selftests
124  * will fail.
125  */
126 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
127 #define TEST_XDP_MAX_BATCH 256
128 
129 static void xdp_test_run_init_page(struct page *page, void *arg)
130 {
131 	struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
132 	struct xdp_buff *new_ctx, *orig_ctx;
133 	u32 headroom = XDP_PACKET_HEADROOM;
134 	struct xdp_test_data *xdp = arg;
135 	size_t frm_len, meta_len;
136 	struct xdp_frame *frm;
137 	void *data;
138 
139 	orig_ctx = xdp->orig_ctx;
140 	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
141 	meta_len = orig_ctx->data - orig_ctx->data_meta;
142 	headroom -= meta_len;
143 
144 	new_ctx = &head->ctx;
145 	frm = head->frame;
146 	data = head->data;
147 	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
148 
149 	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
150 	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
151 	new_ctx->data = new_ctx->data_meta + meta_len;
152 
153 	xdp_update_frame_from_buff(new_ctx, frm);
154 	frm->mem = new_ctx->rxq->mem;
155 
156 	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
157 }
158 
159 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
160 {
161 	struct page_pool *pp;
162 	int err = -ENOMEM;
163 	struct page_pool_params pp_params = {
164 		.order = 0,
165 		.flags = 0,
166 		.pool_size = xdp->batch_size,
167 		.nid = NUMA_NO_NODE,
168 		.init_callback = xdp_test_run_init_page,
169 		.init_arg = xdp,
170 	};
171 
172 	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
173 	if (!xdp->frames)
174 		return -ENOMEM;
175 
176 	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
177 	if (!xdp->skbs)
178 		goto err_skbs;
179 
180 	pp = page_pool_create(&pp_params);
181 	if (IS_ERR(pp)) {
182 		err = PTR_ERR(pp);
183 		goto err_pp;
184 	}
185 
186 	/* will copy 'mem.id' into pp->xdp_mem_id */
187 	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
188 	if (err)
189 		goto err_mmodel;
190 
191 	xdp->pp = pp;
192 
193 	/* We create a 'fake' RXQ referencing the original dev, but with an
194 	 * xdp_mem_info pointing to our page_pool
195 	 */
196 	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
197 	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
198 	xdp->rxq.mem.id = pp->xdp_mem_id;
199 	xdp->dev = orig_ctx->rxq->dev;
200 	xdp->orig_ctx = orig_ctx;
201 
202 	return 0;
203 
204 err_mmodel:
205 	page_pool_destroy(pp);
206 err_pp:
207 	kvfree(xdp->skbs);
208 err_skbs:
209 	kvfree(xdp->frames);
210 	return err;
211 }
212 
213 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
214 {
215 	xdp_unreg_mem_model(&xdp->mem);
216 	page_pool_destroy(xdp->pp);
217 	kfree(xdp->frames);
218 	kfree(xdp->skbs);
219 }
220 
221 static bool frame_was_changed(const struct xdp_page_head *head)
222 {
223 	/* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
224 	 * i.e. has the highest chances to be overwritten. If those two are
225 	 * untouched, it's most likely safe to skip the context reset.
226 	 */
227 	return head->frame->data != head->orig_ctx.data ||
228 	       head->frame->flags != head->orig_ctx.flags;
229 }
230 
231 static bool ctx_was_changed(struct xdp_page_head *head)
232 {
233 	return head->orig_ctx.data != head->ctx.data ||
234 		head->orig_ctx.data_meta != head->ctx.data_meta ||
235 		head->orig_ctx.data_end != head->ctx.data_end;
236 }
237 
238 static void reset_ctx(struct xdp_page_head *head)
239 {
240 	if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
241 		return;
242 
243 	head->ctx.data = head->orig_ctx.data;
244 	head->ctx.data_meta = head->orig_ctx.data_meta;
245 	head->ctx.data_end = head->orig_ctx.data_end;
246 	xdp_update_frame_from_buff(&head->ctx, head->frame);
247 }
248 
249 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
250 			   struct sk_buff **skbs,
251 			   struct net_device *dev)
252 {
253 	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
254 	int i, n;
255 	LIST_HEAD(list);
256 
257 	n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
258 	if (unlikely(n == 0)) {
259 		for (i = 0; i < nframes; i++)
260 			xdp_return_frame(frames[i]);
261 		return -ENOMEM;
262 	}
263 
264 	for (i = 0; i < nframes; i++) {
265 		struct xdp_frame *xdpf = frames[i];
266 		struct sk_buff *skb = skbs[i];
267 
268 		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
269 		if (!skb) {
270 			xdp_return_frame(xdpf);
271 			continue;
272 		}
273 
274 		list_add_tail(&skb->list, &list);
275 	}
276 	netif_receive_skb_list(&list);
277 
278 	return 0;
279 }
280 
281 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
282 			      u32 repeat)
283 {
284 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
285 	int err = 0, act, ret, i, nframes = 0, batch_sz;
286 	struct xdp_frame **frames = xdp->frames;
287 	struct xdp_page_head *head;
288 	struct xdp_frame *frm;
289 	bool redirect = false;
290 	struct xdp_buff *ctx;
291 	struct page *page;
292 
293 	batch_sz = min_t(u32, repeat, xdp->batch_size);
294 
295 	local_bh_disable();
296 	xdp_set_return_frame_no_direct();
297 
298 	for (i = 0; i < batch_sz; i++) {
299 		page = page_pool_dev_alloc_pages(xdp->pp);
300 		if (!page) {
301 			err = -ENOMEM;
302 			goto out;
303 		}
304 
305 		head = phys_to_virt(page_to_phys(page));
306 		reset_ctx(head);
307 		ctx = &head->ctx;
308 		frm = head->frame;
309 		xdp->frame_cnt++;
310 
311 		act = bpf_prog_run_xdp(prog, ctx);
312 
313 		/* if program changed pkt bounds we need to update the xdp_frame */
314 		if (unlikely(ctx_was_changed(head))) {
315 			ret = xdp_update_frame_from_buff(ctx, frm);
316 			if (ret) {
317 				xdp_return_buff(ctx);
318 				continue;
319 			}
320 		}
321 
322 		switch (act) {
323 		case XDP_TX:
324 			/* we can't do a real XDP_TX since we're not in the
325 			 * driver, so turn it into a REDIRECT back to the same
326 			 * index
327 			 */
328 			ri->tgt_index = xdp->dev->ifindex;
329 			ri->map_id = INT_MAX;
330 			ri->map_type = BPF_MAP_TYPE_UNSPEC;
331 			fallthrough;
332 		case XDP_REDIRECT:
333 			redirect = true;
334 			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
335 			if (ret)
336 				xdp_return_buff(ctx);
337 			break;
338 		case XDP_PASS:
339 			frames[nframes++] = frm;
340 			break;
341 		default:
342 			bpf_warn_invalid_xdp_action(NULL, prog, act);
343 			fallthrough;
344 		case XDP_DROP:
345 			xdp_return_buff(ctx);
346 			break;
347 		}
348 	}
349 
350 out:
351 	if (redirect)
352 		xdp_do_flush();
353 	if (nframes) {
354 		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
355 		if (ret)
356 			err = ret;
357 	}
358 
359 	xdp_clear_return_frame_no_direct();
360 	local_bh_enable();
361 	return err;
362 }
363 
364 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
365 				 u32 repeat, u32 batch_size, u32 *time)
366 
367 {
368 	struct xdp_test_data xdp = { .batch_size = batch_size };
369 	struct bpf_test_timer t = { .mode = NO_MIGRATE };
370 	int ret;
371 
372 	if (!repeat)
373 		repeat = 1;
374 
375 	ret = xdp_test_run_setup(&xdp, ctx);
376 	if (ret)
377 		return ret;
378 
379 	bpf_test_timer_enter(&t);
380 	do {
381 		xdp.frame_cnt = 0;
382 		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
383 		if (unlikely(ret < 0))
384 			break;
385 	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
386 	bpf_test_timer_leave(&t);
387 
388 	xdp_test_run_teardown(&xdp);
389 	return ret;
390 }
391 
392 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
393 			u32 *retval, u32 *time, bool xdp)
394 {
395 	struct bpf_prog_array_item item = {.prog = prog};
396 	struct bpf_run_ctx *old_ctx;
397 	struct bpf_cg_run_ctx run_ctx;
398 	struct bpf_test_timer t = { NO_MIGRATE };
399 	enum bpf_cgroup_storage_type stype;
400 	int ret;
401 
402 	for_each_cgroup_storage_type(stype) {
403 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
404 		if (IS_ERR(item.cgroup_storage[stype])) {
405 			item.cgroup_storage[stype] = NULL;
406 			for_each_cgroup_storage_type(stype)
407 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
408 			return -ENOMEM;
409 		}
410 	}
411 
412 	if (!repeat)
413 		repeat = 1;
414 
415 	bpf_test_timer_enter(&t);
416 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
417 	do {
418 		run_ctx.prog_item = &item;
419 		local_bh_disable();
420 		if (xdp)
421 			*retval = bpf_prog_run_xdp(prog, ctx);
422 		else
423 			*retval = bpf_prog_run(prog, ctx);
424 		local_bh_enable();
425 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
426 	bpf_reset_run_ctx(old_ctx);
427 	bpf_test_timer_leave(&t);
428 
429 	for_each_cgroup_storage_type(stype)
430 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
431 
432 	return ret;
433 }
434 
435 static int bpf_test_finish(const union bpf_attr *kattr,
436 			   union bpf_attr __user *uattr, const void *data,
437 			   struct skb_shared_info *sinfo, u32 size,
438 			   u32 retval, u32 duration)
439 {
440 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
441 	int err = -EFAULT;
442 	u32 copy_size = size;
443 
444 	/* Clamp copy if the user has provided a size hint, but copy the full
445 	 * buffer if not to retain old behaviour.
446 	 */
447 	if (kattr->test.data_size_out &&
448 	    copy_size > kattr->test.data_size_out) {
449 		copy_size = kattr->test.data_size_out;
450 		err = -ENOSPC;
451 	}
452 
453 	if (data_out) {
454 		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
455 
456 		if (len < 0) {
457 			err = -ENOSPC;
458 			goto out;
459 		}
460 
461 		if (copy_to_user(data_out, data, len))
462 			goto out;
463 
464 		if (sinfo) {
465 			int i, offset = len;
466 			u32 data_len;
467 
468 			for (i = 0; i < sinfo->nr_frags; i++) {
469 				skb_frag_t *frag = &sinfo->frags[i];
470 
471 				if (offset >= copy_size) {
472 					err = -ENOSPC;
473 					break;
474 				}
475 
476 				data_len = min_t(u32, copy_size - offset,
477 						 skb_frag_size(frag));
478 
479 				if (copy_to_user(data_out + offset,
480 						 skb_frag_address(frag),
481 						 data_len))
482 					goto out;
483 
484 				offset += data_len;
485 			}
486 		}
487 	}
488 
489 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
490 		goto out;
491 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
492 		goto out;
493 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
494 		goto out;
495 	if (err != -ENOSPC)
496 		err = 0;
497 out:
498 	trace_bpf_test_finish(&err);
499 	return err;
500 }
501 
502 /* Integer types of various sizes and pointer combinations cover variety of
503  * architecture dependent calling conventions. 7+ can be supported in the
504  * future.
505  */
506 __bpf_kfunc_start_defs();
507 
508 __bpf_kfunc int bpf_fentry_test1(int a)
509 {
510 	return a + 1;
511 }
512 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
513 
514 int noinline bpf_fentry_test2(int a, u64 b)
515 {
516 	return a + b;
517 }
518 
519 int noinline bpf_fentry_test3(char a, int b, u64 c)
520 {
521 	return a + b + c;
522 }
523 
524 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
525 {
526 	return (long)a + b + c + d;
527 }
528 
529 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
530 {
531 	return a + (long)b + c + d + e;
532 }
533 
534 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
535 {
536 	return a + (long)b + c + d + (long)e + f;
537 }
538 
539 struct bpf_fentry_test_t {
540 	struct bpf_fentry_test_t *a;
541 };
542 
543 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
544 {
545 	asm volatile ("");
546 	return (long)arg;
547 }
548 
549 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
550 {
551 	return (long)arg->a;
552 }
553 
554 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
555 {
556 	return *a;
557 }
558 
559 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
560 {
561 }
562 
563 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
564 {
565 	*b += 1;
566 	return a + *b;
567 }
568 
569 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
570 					void *e, char f, int g)
571 {
572 	*b += 1;
573 	return a + *b + c + d + (long)e + f + g;
574 }
575 
576 int noinline bpf_fentry_shadow_test(int a)
577 {
578 	return a + 1;
579 }
580 
581 struct prog_test_member1 {
582 	int a;
583 };
584 
585 struct prog_test_member {
586 	struct prog_test_member1 m;
587 	int c;
588 };
589 
590 struct prog_test_ref_kfunc {
591 	int a;
592 	int b;
593 	struct prog_test_member memb;
594 	struct prog_test_ref_kfunc *next;
595 	refcount_t cnt;
596 };
597 
598 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
599 {
600 	refcount_dec(&p->cnt);
601 }
602 
603 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
604 {
605 }
606 
607 __bpf_kfunc_end_defs();
608 
609 BTF_SET8_START(bpf_test_modify_return_ids)
610 BTF_ID_FLAGS(func, bpf_modify_return_test)
611 BTF_ID_FLAGS(func, bpf_modify_return_test2)
612 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
613 BTF_SET8_END(bpf_test_modify_return_ids)
614 
615 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
616 	.owner = THIS_MODULE,
617 	.set   = &bpf_test_modify_return_ids,
618 };
619 
620 BTF_SET8_START(test_sk_check_kfunc_ids)
621 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
622 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
623 BTF_SET8_END(test_sk_check_kfunc_ids)
624 
625 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
626 			   u32 size, u32 headroom, u32 tailroom)
627 {
628 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
629 	void *data;
630 
631 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
632 		return ERR_PTR(-EINVAL);
633 
634 	if (user_size > size)
635 		return ERR_PTR(-EMSGSIZE);
636 
637 	size = SKB_DATA_ALIGN(size);
638 	data = kzalloc(size + headroom + tailroom, GFP_USER);
639 	if (!data)
640 		return ERR_PTR(-ENOMEM);
641 
642 	if (copy_from_user(data + headroom, data_in, user_size)) {
643 		kfree(data);
644 		return ERR_PTR(-EFAULT);
645 	}
646 
647 	return data;
648 }
649 
650 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
651 			      const union bpf_attr *kattr,
652 			      union bpf_attr __user *uattr)
653 {
654 	struct bpf_fentry_test_t arg = {};
655 	u16 side_effect = 0, ret = 0;
656 	int b = 2, err = -EFAULT;
657 	u32 retval = 0;
658 
659 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
660 		return -EINVAL;
661 
662 	switch (prog->expected_attach_type) {
663 	case BPF_TRACE_FENTRY:
664 	case BPF_TRACE_FEXIT:
665 		if (bpf_fentry_test1(1) != 2 ||
666 		    bpf_fentry_test2(2, 3) != 5 ||
667 		    bpf_fentry_test3(4, 5, 6) != 15 ||
668 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
669 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
670 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
671 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
672 		    bpf_fentry_test8(&arg) != 0 ||
673 		    bpf_fentry_test9(&retval) != 0)
674 			goto out;
675 		break;
676 	case BPF_MODIFY_RETURN:
677 		ret = bpf_modify_return_test(1, &b);
678 		if (b != 2)
679 			side_effect++;
680 		b = 2;
681 		ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
682 		if (b != 2)
683 			side_effect++;
684 		break;
685 	default:
686 		goto out;
687 	}
688 
689 	retval = ((u32)side_effect << 16) | ret;
690 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
691 		goto out;
692 
693 	err = 0;
694 out:
695 	trace_bpf_test_finish(&err);
696 	return err;
697 }
698 
699 struct bpf_raw_tp_test_run_info {
700 	struct bpf_prog *prog;
701 	void *ctx;
702 	u32 retval;
703 };
704 
705 static void
706 __bpf_prog_test_run_raw_tp(void *data)
707 {
708 	struct bpf_raw_tp_test_run_info *info = data;
709 
710 	rcu_read_lock();
711 	info->retval = bpf_prog_run(info->prog, info->ctx);
712 	rcu_read_unlock();
713 }
714 
715 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
716 			     const union bpf_attr *kattr,
717 			     union bpf_attr __user *uattr)
718 {
719 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
720 	__u32 ctx_size_in = kattr->test.ctx_size_in;
721 	struct bpf_raw_tp_test_run_info info;
722 	int cpu = kattr->test.cpu, err = 0;
723 	int current_cpu;
724 
725 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
726 	if (kattr->test.data_in || kattr->test.data_out ||
727 	    kattr->test.ctx_out || kattr->test.duration ||
728 	    kattr->test.repeat || kattr->test.batch_size)
729 		return -EINVAL;
730 
731 	if (ctx_size_in < prog->aux->max_ctx_offset ||
732 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
733 		return -EINVAL;
734 
735 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
736 		return -EINVAL;
737 
738 	if (ctx_size_in) {
739 		info.ctx = memdup_user(ctx_in, ctx_size_in);
740 		if (IS_ERR(info.ctx))
741 			return PTR_ERR(info.ctx);
742 	} else {
743 		info.ctx = NULL;
744 	}
745 
746 	info.prog = prog;
747 
748 	current_cpu = get_cpu();
749 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
750 	    cpu == current_cpu) {
751 		__bpf_prog_test_run_raw_tp(&info);
752 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
753 		/* smp_call_function_single() also checks cpu_online()
754 		 * after csd_lock(). However, since cpu is from user
755 		 * space, let's do an extra quick check to filter out
756 		 * invalid value before smp_call_function_single().
757 		 */
758 		err = -ENXIO;
759 	} else {
760 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
761 					       &info, 1);
762 	}
763 	put_cpu();
764 
765 	if (!err &&
766 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
767 		err = -EFAULT;
768 
769 	kfree(info.ctx);
770 	return err;
771 }
772 
773 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
774 {
775 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
776 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
777 	u32 size = kattr->test.ctx_size_in;
778 	void *data;
779 	int err;
780 
781 	if (!data_in && !data_out)
782 		return NULL;
783 
784 	data = kzalloc(max_size, GFP_USER);
785 	if (!data)
786 		return ERR_PTR(-ENOMEM);
787 
788 	if (data_in) {
789 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
790 		if (err) {
791 			kfree(data);
792 			return ERR_PTR(err);
793 		}
794 
795 		size = min_t(u32, max_size, size);
796 		if (copy_from_user(data, data_in, size)) {
797 			kfree(data);
798 			return ERR_PTR(-EFAULT);
799 		}
800 	}
801 	return data;
802 }
803 
804 static int bpf_ctx_finish(const union bpf_attr *kattr,
805 			  union bpf_attr __user *uattr, const void *data,
806 			  u32 size)
807 {
808 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
809 	int err = -EFAULT;
810 	u32 copy_size = size;
811 
812 	if (!data || !data_out)
813 		return 0;
814 
815 	if (copy_size > kattr->test.ctx_size_out) {
816 		copy_size = kattr->test.ctx_size_out;
817 		err = -ENOSPC;
818 	}
819 
820 	if (copy_to_user(data_out, data, copy_size))
821 		goto out;
822 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
823 		goto out;
824 	if (err != -ENOSPC)
825 		err = 0;
826 out:
827 	return err;
828 }
829 
830 /**
831  * range_is_zero - test whether buffer is initialized
832  * @buf: buffer to check
833  * @from: check from this position
834  * @to: check up until (excluding) this position
835  *
836  * This function returns true if the there is a non-zero byte
837  * in the buf in the range [from,to).
838  */
839 static inline bool range_is_zero(void *buf, size_t from, size_t to)
840 {
841 	return !memchr_inv((u8 *)buf + from, 0, to - from);
842 }
843 
844 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
845 {
846 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
847 
848 	if (!__skb)
849 		return 0;
850 
851 	/* make sure the fields we don't use are zeroed */
852 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
853 		return -EINVAL;
854 
855 	/* mark is allowed */
856 
857 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
858 			   offsetof(struct __sk_buff, priority)))
859 		return -EINVAL;
860 
861 	/* priority is allowed */
862 	/* ingress_ifindex is allowed */
863 	/* ifindex is allowed */
864 
865 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
866 			   offsetof(struct __sk_buff, cb)))
867 		return -EINVAL;
868 
869 	/* cb is allowed */
870 
871 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
872 			   offsetof(struct __sk_buff, tstamp)))
873 		return -EINVAL;
874 
875 	/* tstamp is allowed */
876 	/* wire_len is allowed */
877 	/* gso_segs is allowed */
878 
879 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
880 			   offsetof(struct __sk_buff, gso_size)))
881 		return -EINVAL;
882 
883 	/* gso_size is allowed */
884 
885 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
886 			   offsetof(struct __sk_buff, hwtstamp)))
887 		return -EINVAL;
888 
889 	/* hwtstamp is allowed */
890 
891 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
892 			   sizeof(struct __sk_buff)))
893 		return -EINVAL;
894 
895 	skb->mark = __skb->mark;
896 	skb->priority = __skb->priority;
897 	skb->skb_iif = __skb->ingress_ifindex;
898 	skb->tstamp = __skb->tstamp;
899 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
900 
901 	if (__skb->wire_len == 0) {
902 		cb->pkt_len = skb->len;
903 	} else {
904 		if (__skb->wire_len < skb->len ||
905 		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
906 			return -EINVAL;
907 		cb->pkt_len = __skb->wire_len;
908 	}
909 
910 	if (__skb->gso_segs > GSO_MAX_SEGS)
911 		return -EINVAL;
912 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
913 	skb_shinfo(skb)->gso_size = __skb->gso_size;
914 	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
915 
916 	return 0;
917 }
918 
919 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
920 {
921 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
922 
923 	if (!__skb)
924 		return;
925 
926 	__skb->mark = skb->mark;
927 	__skb->priority = skb->priority;
928 	__skb->ingress_ifindex = skb->skb_iif;
929 	__skb->ifindex = skb->dev->ifindex;
930 	__skb->tstamp = skb->tstamp;
931 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
932 	__skb->wire_len = cb->pkt_len;
933 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
934 	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
935 }
936 
937 static struct proto bpf_dummy_proto = {
938 	.name   = "bpf_dummy",
939 	.owner  = THIS_MODULE,
940 	.obj_size = sizeof(struct sock),
941 };
942 
943 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
944 			  union bpf_attr __user *uattr)
945 {
946 	bool is_l2 = false, is_direct_pkt_access = false;
947 	struct net *net = current->nsproxy->net_ns;
948 	struct net_device *dev = net->loopback_dev;
949 	u32 size = kattr->test.data_size_in;
950 	u32 repeat = kattr->test.repeat;
951 	struct __sk_buff *ctx = NULL;
952 	u32 retval, duration;
953 	int hh_len = ETH_HLEN;
954 	struct sk_buff *skb;
955 	struct sock *sk;
956 	void *data;
957 	int ret;
958 
959 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
960 		return -EINVAL;
961 
962 	data = bpf_test_init(kattr, kattr->test.data_size_in,
963 			     size, NET_SKB_PAD + NET_IP_ALIGN,
964 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
965 	if (IS_ERR(data))
966 		return PTR_ERR(data);
967 
968 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
969 	if (IS_ERR(ctx)) {
970 		kfree(data);
971 		return PTR_ERR(ctx);
972 	}
973 
974 	switch (prog->type) {
975 	case BPF_PROG_TYPE_SCHED_CLS:
976 	case BPF_PROG_TYPE_SCHED_ACT:
977 		is_l2 = true;
978 		fallthrough;
979 	case BPF_PROG_TYPE_LWT_IN:
980 	case BPF_PROG_TYPE_LWT_OUT:
981 	case BPF_PROG_TYPE_LWT_XMIT:
982 		is_direct_pkt_access = true;
983 		break;
984 	default:
985 		break;
986 	}
987 
988 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
989 	if (!sk) {
990 		kfree(data);
991 		kfree(ctx);
992 		return -ENOMEM;
993 	}
994 	sock_init_data(NULL, sk);
995 
996 	skb = slab_build_skb(data);
997 	if (!skb) {
998 		kfree(data);
999 		kfree(ctx);
1000 		sk_free(sk);
1001 		return -ENOMEM;
1002 	}
1003 	skb->sk = sk;
1004 
1005 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1006 	__skb_put(skb, size);
1007 	if (ctx && ctx->ifindex > 1) {
1008 		dev = dev_get_by_index(net, ctx->ifindex);
1009 		if (!dev) {
1010 			ret = -ENODEV;
1011 			goto out;
1012 		}
1013 	}
1014 	skb->protocol = eth_type_trans(skb, dev);
1015 	skb_reset_network_header(skb);
1016 
1017 	switch (skb->protocol) {
1018 	case htons(ETH_P_IP):
1019 		sk->sk_family = AF_INET;
1020 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1021 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1022 			sk->sk_daddr = ip_hdr(skb)->daddr;
1023 		}
1024 		break;
1025 #if IS_ENABLED(CONFIG_IPV6)
1026 	case htons(ETH_P_IPV6):
1027 		sk->sk_family = AF_INET6;
1028 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1029 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1030 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1031 		}
1032 		break;
1033 #endif
1034 	default:
1035 		break;
1036 	}
1037 
1038 	if (is_l2)
1039 		__skb_push(skb, hh_len);
1040 	if (is_direct_pkt_access)
1041 		bpf_compute_data_pointers(skb);
1042 	ret = convert___skb_to_skb(skb, ctx);
1043 	if (ret)
1044 		goto out;
1045 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1046 	if (ret)
1047 		goto out;
1048 	if (!is_l2) {
1049 		if (skb_headroom(skb) < hh_len) {
1050 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1051 
1052 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1053 				ret = -ENOMEM;
1054 				goto out;
1055 			}
1056 		}
1057 		memset(__skb_push(skb, hh_len), 0, hh_len);
1058 	}
1059 	convert_skb_to___skb(skb, ctx);
1060 
1061 	size = skb->len;
1062 	/* bpf program can never convert linear skb to non-linear */
1063 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1064 		size = skb_headlen(skb);
1065 	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1066 			      duration);
1067 	if (!ret)
1068 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1069 				     sizeof(struct __sk_buff));
1070 out:
1071 	if (dev && dev != net->loopback_dev)
1072 		dev_put(dev);
1073 	kfree_skb(skb);
1074 	sk_free(sk);
1075 	kfree(ctx);
1076 	return ret;
1077 }
1078 
1079 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1080 {
1081 	unsigned int ingress_ifindex, rx_queue_index;
1082 	struct netdev_rx_queue *rxqueue;
1083 	struct net_device *device;
1084 
1085 	if (!xdp_md)
1086 		return 0;
1087 
1088 	if (xdp_md->egress_ifindex != 0)
1089 		return -EINVAL;
1090 
1091 	ingress_ifindex = xdp_md->ingress_ifindex;
1092 	rx_queue_index = xdp_md->rx_queue_index;
1093 
1094 	if (!ingress_ifindex && rx_queue_index)
1095 		return -EINVAL;
1096 
1097 	if (ingress_ifindex) {
1098 		device = dev_get_by_index(current->nsproxy->net_ns,
1099 					  ingress_ifindex);
1100 		if (!device)
1101 			return -ENODEV;
1102 
1103 		if (rx_queue_index >= device->real_num_rx_queues)
1104 			goto free_dev;
1105 
1106 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1107 
1108 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1109 			goto free_dev;
1110 
1111 		xdp->rxq = &rxqueue->xdp_rxq;
1112 		/* The device is now tracked in the xdp->rxq for later
1113 		 * dev_put()
1114 		 */
1115 	}
1116 
1117 	xdp->data = xdp->data_meta + xdp_md->data;
1118 	return 0;
1119 
1120 free_dev:
1121 	dev_put(device);
1122 	return -EINVAL;
1123 }
1124 
1125 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1126 {
1127 	if (!xdp_md)
1128 		return;
1129 
1130 	xdp_md->data = xdp->data - xdp->data_meta;
1131 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
1132 
1133 	if (xdp_md->ingress_ifindex)
1134 		dev_put(xdp->rxq->dev);
1135 }
1136 
1137 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1138 			  union bpf_attr __user *uattr)
1139 {
1140 	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1141 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1142 	u32 batch_size = kattr->test.batch_size;
1143 	u32 retval = 0, duration, max_data_sz;
1144 	u32 size = kattr->test.data_size_in;
1145 	u32 headroom = XDP_PACKET_HEADROOM;
1146 	u32 repeat = kattr->test.repeat;
1147 	struct netdev_rx_queue *rxqueue;
1148 	struct skb_shared_info *sinfo;
1149 	struct xdp_buff xdp = {};
1150 	int i, ret = -EINVAL;
1151 	struct xdp_md *ctx;
1152 	void *data;
1153 
1154 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1155 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
1156 		return -EINVAL;
1157 
1158 	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1159 		return -EINVAL;
1160 
1161 	if (bpf_prog_is_dev_bound(prog->aux))
1162 		return -EINVAL;
1163 
1164 	if (do_live) {
1165 		if (!batch_size)
1166 			batch_size = NAPI_POLL_WEIGHT;
1167 		else if (batch_size > TEST_XDP_MAX_BATCH)
1168 			return -E2BIG;
1169 
1170 		headroom += sizeof(struct xdp_page_head);
1171 	} else if (batch_size) {
1172 		return -EINVAL;
1173 	}
1174 
1175 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1176 	if (IS_ERR(ctx))
1177 		return PTR_ERR(ctx);
1178 
1179 	if (ctx) {
1180 		/* There can't be user provided data before the meta data */
1181 		if (ctx->data_meta || ctx->data_end != size ||
1182 		    ctx->data > ctx->data_end ||
1183 		    unlikely(xdp_metalen_invalid(ctx->data)) ||
1184 		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1185 			goto free_ctx;
1186 		/* Meta data is allocated from the headroom */
1187 		headroom -= ctx->data;
1188 	}
1189 
1190 	max_data_sz = 4096 - headroom - tailroom;
1191 	if (size > max_data_sz) {
1192 		/* disallow live data mode for jumbo frames */
1193 		if (do_live)
1194 			goto free_ctx;
1195 		size = max_data_sz;
1196 	}
1197 
1198 	data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1199 	if (IS_ERR(data)) {
1200 		ret = PTR_ERR(data);
1201 		goto free_ctx;
1202 	}
1203 
1204 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1205 	rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1206 	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1207 	xdp_prepare_buff(&xdp, data, headroom, size, true);
1208 	sinfo = xdp_get_shared_info_from_buff(&xdp);
1209 
1210 	ret = xdp_convert_md_to_buff(ctx, &xdp);
1211 	if (ret)
1212 		goto free_data;
1213 
1214 	if (unlikely(kattr->test.data_size_in > size)) {
1215 		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1216 
1217 		while (size < kattr->test.data_size_in) {
1218 			struct page *page;
1219 			skb_frag_t *frag;
1220 			u32 data_len;
1221 
1222 			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1223 				ret = -ENOMEM;
1224 				goto out;
1225 			}
1226 
1227 			page = alloc_page(GFP_KERNEL);
1228 			if (!page) {
1229 				ret = -ENOMEM;
1230 				goto out;
1231 			}
1232 
1233 			frag = &sinfo->frags[sinfo->nr_frags++];
1234 
1235 			data_len = min_t(u32, kattr->test.data_size_in - size,
1236 					 PAGE_SIZE);
1237 			skb_frag_fill_page_desc(frag, page, 0, data_len);
1238 
1239 			if (copy_from_user(page_address(page), data_in + size,
1240 					   data_len)) {
1241 				ret = -EFAULT;
1242 				goto out;
1243 			}
1244 			sinfo->xdp_frags_size += data_len;
1245 			size += data_len;
1246 		}
1247 		xdp_buff_set_frags_flag(&xdp);
1248 	}
1249 
1250 	if (repeat > 1)
1251 		bpf_prog_change_xdp(NULL, prog);
1252 
1253 	if (do_live)
1254 		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1255 	else
1256 		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1257 	/* We convert the xdp_buff back to an xdp_md before checking the return
1258 	 * code so the reference count of any held netdevice will be decremented
1259 	 * even if the test run failed.
1260 	 */
1261 	xdp_convert_buff_to_md(&xdp, ctx);
1262 	if (ret)
1263 		goto out;
1264 
1265 	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1266 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1267 			      retval, duration);
1268 	if (!ret)
1269 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1270 				     sizeof(struct xdp_md));
1271 
1272 out:
1273 	if (repeat > 1)
1274 		bpf_prog_change_xdp(prog, NULL);
1275 free_data:
1276 	for (i = 0; i < sinfo->nr_frags; i++)
1277 		__free_page(skb_frag_page(&sinfo->frags[i]));
1278 	kfree(data);
1279 free_ctx:
1280 	kfree(ctx);
1281 	return ret;
1282 }
1283 
1284 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1285 {
1286 	/* make sure the fields we don't use are zeroed */
1287 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1288 		return -EINVAL;
1289 
1290 	/* flags is allowed */
1291 
1292 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1293 			   sizeof(struct bpf_flow_keys)))
1294 		return -EINVAL;
1295 
1296 	return 0;
1297 }
1298 
1299 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1300 				     const union bpf_attr *kattr,
1301 				     union bpf_attr __user *uattr)
1302 {
1303 	struct bpf_test_timer t = { NO_PREEMPT };
1304 	u32 size = kattr->test.data_size_in;
1305 	struct bpf_flow_dissector ctx = {};
1306 	u32 repeat = kattr->test.repeat;
1307 	struct bpf_flow_keys *user_ctx;
1308 	struct bpf_flow_keys flow_keys;
1309 	const struct ethhdr *eth;
1310 	unsigned int flags = 0;
1311 	u32 retval, duration;
1312 	void *data;
1313 	int ret;
1314 
1315 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1316 		return -EINVAL;
1317 
1318 	if (size < ETH_HLEN)
1319 		return -EINVAL;
1320 
1321 	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1322 	if (IS_ERR(data))
1323 		return PTR_ERR(data);
1324 
1325 	eth = (struct ethhdr *)data;
1326 
1327 	if (!repeat)
1328 		repeat = 1;
1329 
1330 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1331 	if (IS_ERR(user_ctx)) {
1332 		kfree(data);
1333 		return PTR_ERR(user_ctx);
1334 	}
1335 	if (user_ctx) {
1336 		ret = verify_user_bpf_flow_keys(user_ctx);
1337 		if (ret)
1338 			goto out;
1339 		flags = user_ctx->flags;
1340 	}
1341 
1342 	ctx.flow_keys = &flow_keys;
1343 	ctx.data = data;
1344 	ctx.data_end = (__u8 *)data + size;
1345 
1346 	bpf_test_timer_enter(&t);
1347 	do {
1348 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1349 					  size, flags);
1350 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1351 	bpf_test_timer_leave(&t);
1352 
1353 	if (ret < 0)
1354 		goto out;
1355 
1356 	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1357 			      sizeof(flow_keys), retval, duration);
1358 	if (!ret)
1359 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1360 				     sizeof(struct bpf_flow_keys));
1361 
1362 out:
1363 	kfree(user_ctx);
1364 	kfree(data);
1365 	return ret;
1366 }
1367 
1368 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1369 				union bpf_attr __user *uattr)
1370 {
1371 	struct bpf_test_timer t = { NO_PREEMPT };
1372 	struct bpf_prog_array *progs = NULL;
1373 	struct bpf_sk_lookup_kern ctx = {};
1374 	u32 repeat = kattr->test.repeat;
1375 	struct bpf_sk_lookup *user_ctx;
1376 	u32 retval, duration;
1377 	int ret = -EINVAL;
1378 
1379 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1380 		return -EINVAL;
1381 
1382 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1383 	    kattr->test.data_size_out)
1384 		return -EINVAL;
1385 
1386 	if (!repeat)
1387 		repeat = 1;
1388 
1389 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1390 	if (IS_ERR(user_ctx))
1391 		return PTR_ERR(user_ctx);
1392 
1393 	if (!user_ctx)
1394 		return -EINVAL;
1395 
1396 	if (user_ctx->sk)
1397 		goto out;
1398 
1399 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1400 		goto out;
1401 
1402 	if (user_ctx->local_port > U16_MAX) {
1403 		ret = -ERANGE;
1404 		goto out;
1405 	}
1406 
1407 	ctx.family = (u16)user_ctx->family;
1408 	ctx.protocol = (u16)user_ctx->protocol;
1409 	ctx.dport = (u16)user_ctx->local_port;
1410 	ctx.sport = user_ctx->remote_port;
1411 
1412 	switch (ctx.family) {
1413 	case AF_INET:
1414 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1415 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1416 		break;
1417 
1418 #if IS_ENABLED(CONFIG_IPV6)
1419 	case AF_INET6:
1420 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1421 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1422 		break;
1423 #endif
1424 
1425 	default:
1426 		ret = -EAFNOSUPPORT;
1427 		goto out;
1428 	}
1429 
1430 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1431 	if (!progs) {
1432 		ret = -ENOMEM;
1433 		goto out;
1434 	}
1435 
1436 	progs->items[0].prog = prog;
1437 
1438 	bpf_test_timer_enter(&t);
1439 	do {
1440 		ctx.selected_sk = NULL;
1441 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1442 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1443 	bpf_test_timer_leave(&t);
1444 
1445 	if (ret < 0)
1446 		goto out;
1447 
1448 	user_ctx->cookie = 0;
1449 	if (ctx.selected_sk) {
1450 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1451 			ret = -EOPNOTSUPP;
1452 			goto out;
1453 		}
1454 
1455 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1456 	}
1457 
1458 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1459 	if (!ret)
1460 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1461 
1462 out:
1463 	bpf_prog_array_free(progs);
1464 	kfree(user_ctx);
1465 	return ret;
1466 }
1467 
1468 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1469 			      const union bpf_attr *kattr,
1470 			      union bpf_attr __user *uattr)
1471 {
1472 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1473 	__u32 ctx_size_in = kattr->test.ctx_size_in;
1474 	void *ctx = NULL;
1475 	u32 retval;
1476 	int err = 0;
1477 
1478 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1479 	if (kattr->test.data_in || kattr->test.data_out ||
1480 	    kattr->test.ctx_out || kattr->test.duration ||
1481 	    kattr->test.repeat || kattr->test.flags ||
1482 	    kattr->test.batch_size)
1483 		return -EINVAL;
1484 
1485 	if (ctx_size_in < prog->aux->max_ctx_offset ||
1486 	    ctx_size_in > U16_MAX)
1487 		return -EINVAL;
1488 
1489 	if (ctx_size_in) {
1490 		ctx = memdup_user(ctx_in, ctx_size_in);
1491 		if (IS_ERR(ctx))
1492 			return PTR_ERR(ctx);
1493 	}
1494 
1495 	rcu_read_lock_trace();
1496 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1497 	rcu_read_unlock_trace();
1498 
1499 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1500 		err = -EFAULT;
1501 		goto out;
1502 	}
1503 	if (ctx_size_in)
1504 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1505 			err = -EFAULT;
1506 out:
1507 	kfree(ctx);
1508 	return err;
1509 }
1510 
1511 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1512 				      const struct nf_hook_state *user,
1513 				      struct net_device *dev)
1514 {
1515 	if (user->in || user->out)
1516 		return -EINVAL;
1517 
1518 	if (user->net || user->sk || user->okfn)
1519 		return -EINVAL;
1520 
1521 	switch (user->pf) {
1522 	case NFPROTO_IPV4:
1523 	case NFPROTO_IPV6:
1524 		switch (state->hook) {
1525 		case NF_INET_PRE_ROUTING:
1526 			state->in = dev;
1527 			break;
1528 		case NF_INET_LOCAL_IN:
1529 			state->in = dev;
1530 			break;
1531 		case NF_INET_FORWARD:
1532 			state->in = dev;
1533 			state->out = dev;
1534 			break;
1535 		case NF_INET_LOCAL_OUT:
1536 			state->out = dev;
1537 			break;
1538 		case NF_INET_POST_ROUTING:
1539 			state->out = dev;
1540 			break;
1541 		}
1542 
1543 		break;
1544 	default:
1545 		return -EINVAL;
1546 	}
1547 
1548 	state->pf = user->pf;
1549 	state->hook = user->hook;
1550 
1551 	return 0;
1552 }
1553 
1554 static __be16 nfproto_eth(int nfproto)
1555 {
1556 	switch (nfproto) {
1557 	case NFPROTO_IPV4:
1558 		return htons(ETH_P_IP);
1559 	case NFPROTO_IPV6:
1560 		break;
1561 	}
1562 
1563 	return htons(ETH_P_IPV6);
1564 }
1565 
1566 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1567 			 const union bpf_attr *kattr,
1568 			 union bpf_attr __user *uattr)
1569 {
1570 	struct net *net = current->nsproxy->net_ns;
1571 	struct net_device *dev = net->loopback_dev;
1572 	struct nf_hook_state *user_ctx, hook_state = {
1573 		.pf = NFPROTO_IPV4,
1574 		.hook = NF_INET_LOCAL_OUT,
1575 	};
1576 	u32 size = kattr->test.data_size_in;
1577 	u32 repeat = kattr->test.repeat;
1578 	struct bpf_nf_ctx ctx = {
1579 		.state = &hook_state,
1580 	};
1581 	struct sk_buff *skb = NULL;
1582 	u32 retval, duration;
1583 	void *data;
1584 	int ret;
1585 
1586 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1587 		return -EINVAL;
1588 
1589 	if (size < sizeof(struct iphdr))
1590 		return -EINVAL;
1591 
1592 	data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1593 			     NET_SKB_PAD + NET_IP_ALIGN,
1594 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1595 	if (IS_ERR(data))
1596 		return PTR_ERR(data);
1597 
1598 	if (!repeat)
1599 		repeat = 1;
1600 
1601 	user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1602 	if (IS_ERR(user_ctx)) {
1603 		kfree(data);
1604 		return PTR_ERR(user_ctx);
1605 	}
1606 
1607 	if (user_ctx) {
1608 		ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1609 		if (ret)
1610 			goto out;
1611 	}
1612 
1613 	skb = slab_build_skb(data);
1614 	if (!skb) {
1615 		ret = -ENOMEM;
1616 		goto out;
1617 	}
1618 
1619 	data = NULL; /* data released via kfree_skb */
1620 
1621 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1622 	__skb_put(skb, size);
1623 
1624 	ret = -EINVAL;
1625 
1626 	if (hook_state.hook != NF_INET_LOCAL_OUT) {
1627 		if (size < ETH_HLEN + sizeof(struct iphdr))
1628 			goto out;
1629 
1630 		skb->protocol = eth_type_trans(skb, dev);
1631 		switch (skb->protocol) {
1632 		case htons(ETH_P_IP):
1633 			if (hook_state.pf == NFPROTO_IPV4)
1634 				break;
1635 			goto out;
1636 		case htons(ETH_P_IPV6):
1637 			if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1638 				goto out;
1639 			if (hook_state.pf == NFPROTO_IPV6)
1640 				break;
1641 			goto out;
1642 		default:
1643 			ret = -EPROTO;
1644 			goto out;
1645 		}
1646 
1647 		skb_reset_network_header(skb);
1648 	} else {
1649 		skb->protocol = nfproto_eth(hook_state.pf);
1650 	}
1651 
1652 	ctx.skb = skb;
1653 
1654 	ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1655 	if (ret)
1656 		goto out;
1657 
1658 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1659 
1660 out:
1661 	kfree(user_ctx);
1662 	kfree_skb(skb);
1663 	kfree(data);
1664 	return ret;
1665 }
1666 
1667 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1668 	.owner = THIS_MODULE,
1669 	.set   = &test_sk_check_kfunc_ids,
1670 };
1671 
1672 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1673 BTF_ID(struct, prog_test_ref_kfunc)
1674 BTF_ID(func, bpf_kfunc_call_test_release)
1675 BTF_ID(struct, prog_test_member)
1676 BTF_ID(func, bpf_kfunc_call_memb_release)
1677 
1678 static int __init bpf_prog_test_run_init(void)
1679 {
1680 	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1681 		{
1682 		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1683 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1684 		},
1685 		{
1686 		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
1687 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1688 		},
1689 	};
1690 	int ret;
1691 
1692 	ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1693 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1694 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1695 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1696 	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1697 						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1698 						  THIS_MODULE);
1699 }
1700 late_initcall(bpf_prog_test_run_init);
1701