xref: /linux/net/bpf/test_run.c (revision 675f176b4dcc2b75adbcea7ba0e9a649527f53bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/sock.h>
16 #include <net/tcp.h>
17 #include <net/net_namespace.h>
18 #include <net/page_pool.h>
19 #include <linux/error-injection.h>
20 #include <linux/smp.h>
21 #include <linux/sock_diag.h>
22 #include <net/xdp.h>
23 
24 #define CREATE_TRACE_POINTS
25 #include <trace/events/bpf_test_run.h>
26 
27 struct bpf_test_timer {
28 	enum { NO_PREEMPT, NO_MIGRATE } mode;
29 	u32 i;
30 	u64 time_start, time_spent;
31 };
32 
33 static void bpf_test_timer_enter(struct bpf_test_timer *t)
34 	__acquires(rcu)
35 {
36 	rcu_read_lock();
37 	if (t->mode == NO_PREEMPT)
38 		preempt_disable();
39 	else
40 		migrate_disable();
41 
42 	t->time_start = ktime_get_ns();
43 }
44 
45 static void bpf_test_timer_leave(struct bpf_test_timer *t)
46 	__releases(rcu)
47 {
48 	t->time_start = 0;
49 
50 	if (t->mode == NO_PREEMPT)
51 		preempt_enable();
52 	else
53 		migrate_enable();
54 	rcu_read_unlock();
55 }
56 
57 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
58 				    u32 repeat, int *err, u32 *duration)
59 	__must_hold(rcu)
60 {
61 	t->i += iterations;
62 	if (t->i >= repeat) {
63 		/* We're done. */
64 		t->time_spent += ktime_get_ns() - t->time_start;
65 		do_div(t->time_spent, t->i);
66 		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
67 		*err = 0;
68 		goto reset;
69 	}
70 
71 	if (signal_pending(current)) {
72 		/* During iteration: we've been cancelled, abort. */
73 		*err = -EINTR;
74 		goto reset;
75 	}
76 
77 	if (need_resched()) {
78 		/* During iteration: we need to reschedule between runs. */
79 		t->time_spent += ktime_get_ns() - t->time_start;
80 		bpf_test_timer_leave(t);
81 		cond_resched();
82 		bpf_test_timer_enter(t);
83 	}
84 
85 	/* Do another round. */
86 	return true;
87 
88 reset:
89 	t->i = 0;
90 	return false;
91 }
92 
93 /* We put this struct at the head of each page with a context and frame
94  * initialised when the page is allocated, so we don't have to do this on each
95  * repetition of the test run.
96  */
97 struct xdp_page_head {
98 	struct xdp_buff orig_ctx;
99 	struct xdp_buff ctx;
100 	struct xdp_frame frm;
101 	u8 data[];
102 };
103 
104 struct xdp_test_data {
105 	struct xdp_buff *orig_ctx;
106 	struct xdp_rxq_info rxq;
107 	struct net_device *dev;
108 	struct page_pool *pp;
109 	struct xdp_frame **frames;
110 	struct sk_buff **skbs;
111 	struct xdp_mem_info mem;
112 	u32 batch_size;
113 	u32 frame_cnt;
114 };
115 
116 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
117 #define TEST_XDP_MAX_BATCH 256
118 
119 static void xdp_test_run_init_page(struct page *page, void *arg)
120 {
121 	struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
122 	struct xdp_buff *new_ctx, *orig_ctx;
123 	u32 headroom = XDP_PACKET_HEADROOM;
124 	struct xdp_test_data *xdp = arg;
125 	size_t frm_len, meta_len;
126 	struct xdp_frame *frm;
127 	void *data;
128 
129 	orig_ctx = xdp->orig_ctx;
130 	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
131 	meta_len = orig_ctx->data - orig_ctx->data_meta;
132 	headroom -= meta_len;
133 
134 	new_ctx = &head->ctx;
135 	frm = &head->frm;
136 	data = &head->data;
137 	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
138 
139 	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
140 	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
141 	new_ctx->data = new_ctx->data_meta + meta_len;
142 
143 	xdp_update_frame_from_buff(new_ctx, frm);
144 	frm->mem = new_ctx->rxq->mem;
145 
146 	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
147 }
148 
149 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
150 {
151 	struct page_pool *pp;
152 	int err = -ENOMEM;
153 	struct page_pool_params pp_params = {
154 		.order = 0,
155 		.flags = 0,
156 		.pool_size = xdp->batch_size,
157 		.nid = NUMA_NO_NODE,
158 		.init_callback = xdp_test_run_init_page,
159 		.init_arg = xdp,
160 	};
161 
162 	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
163 	if (!xdp->frames)
164 		return -ENOMEM;
165 
166 	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
167 	if (!xdp->skbs)
168 		goto err_skbs;
169 
170 	pp = page_pool_create(&pp_params);
171 	if (IS_ERR(pp)) {
172 		err = PTR_ERR(pp);
173 		goto err_pp;
174 	}
175 
176 	/* will copy 'mem.id' into pp->xdp_mem_id */
177 	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
178 	if (err)
179 		goto err_mmodel;
180 
181 	xdp->pp = pp;
182 
183 	/* We create a 'fake' RXQ referencing the original dev, but with an
184 	 * xdp_mem_info pointing to our page_pool
185 	 */
186 	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
187 	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
188 	xdp->rxq.mem.id = pp->xdp_mem_id;
189 	xdp->dev = orig_ctx->rxq->dev;
190 	xdp->orig_ctx = orig_ctx;
191 
192 	return 0;
193 
194 err_mmodel:
195 	page_pool_destroy(pp);
196 err_pp:
197 	kvfree(xdp->skbs);
198 err_skbs:
199 	kvfree(xdp->frames);
200 	return err;
201 }
202 
203 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
204 {
205 	xdp_unreg_mem_model(&xdp->mem);
206 	page_pool_destroy(xdp->pp);
207 	kfree(xdp->frames);
208 	kfree(xdp->skbs);
209 }
210 
211 static bool ctx_was_changed(struct xdp_page_head *head)
212 {
213 	return head->orig_ctx.data != head->ctx.data ||
214 		head->orig_ctx.data_meta != head->ctx.data_meta ||
215 		head->orig_ctx.data_end != head->ctx.data_end;
216 }
217 
218 static void reset_ctx(struct xdp_page_head *head)
219 {
220 	if (likely(!ctx_was_changed(head)))
221 		return;
222 
223 	head->ctx.data = head->orig_ctx.data;
224 	head->ctx.data_meta = head->orig_ctx.data_meta;
225 	head->ctx.data_end = head->orig_ctx.data_end;
226 	xdp_update_frame_from_buff(&head->ctx, &head->frm);
227 }
228 
229 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
230 			   struct sk_buff **skbs,
231 			   struct net_device *dev)
232 {
233 	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
234 	int i, n;
235 	LIST_HEAD(list);
236 
237 	n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
238 	if (unlikely(n == 0)) {
239 		for (i = 0; i < nframes; i++)
240 			xdp_return_frame(frames[i]);
241 		return -ENOMEM;
242 	}
243 
244 	for (i = 0; i < nframes; i++) {
245 		struct xdp_frame *xdpf = frames[i];
246 		struct sk_buff *skb = skbs[i];
247 
248 		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
249 		if (!skb) {
250 			xdp_return_frame(xdpf);
251 			continue;
252 		}
253 
254 		list_add_tail(&skb->list, &list);
255 	}
256 	netif_receive_skb_list(&list);
257 
258 	return 0;
259 }
260 
261 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
262 			      u32 repeat)
263 {
264 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
265 	int err = 0, act, ret, i, nframes = 0, batch_sz;
266 	struct xdp_frame **frames = xdp->frames;
267 	struct xdp_page_head *head;
268 	struct xdp_frame *frm;
269 	bool redirect = false;
270 	struct xdp_buff *ctx;
271 	struct page *page;
272 
273 	batch_sz = min_t(u32, repeat, xdp->batch_size);
274 
275 	local_bh_disable();
276 	xdp_set_return_frame_no_direct();
277 
278 	for (i = 0; i < batch_sz; i++) {
279 		page = page_pool_dev_alloc_pages(xdp->pp);
280 		if (!page) {
281 			err = -ENOMEM;
282 			goto out;
283 		}
284 
285 		head = phys_to_virt(page_to_phys(page));
286 		reset_ctx(head);
287 		ctx = &head->ctx;
288 		frm = &head->frm;
289 		xdp->frame_cnt++;
290 
291 		act = bpf_prog_run_xdp(prog, ctx);
292 
293 		/* if program changed pkt bounds we need to update the xdp_frame */
294 		if (unlikely(ctx_was_changed(head))) {
295 			ret = xdp_update_frame_from_buff(ctx, frm);
296 			if (ret) {
297 				xdp_return_buff(ctx);
298 				continue;
299 			}
300 		}
301 
302 		switch (act) {
303 		case XDP_TX:
304 			/* we can't do a real XDP_TX since we're not in the
305 			 * driver, so turn it into a REDIRECT back to the same
306 			 * index
307 			 */
308 			ri->tgt_index = xdp->dev->ifindex;
309 			ri->map_id = INT_MAX;
310 			ri->map_type = BPF_MAP_TYPE_UNSPEC;
311 			fallthrough;
312 		case XDP_REDIRECT:
313 			redirect = true;
314 			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
315 			if (ret)
316 				xdp_return_buff(ctx);
317 			break;
318 		case XDP_PASS:
319 			frames[nframes++] = frm;
320 			break;
321 		default:
322 			bpf_warn_invalid_xdp_action(NULL, prog, act);
323 			fallthrough;
324 		case XDP_DROP:
325 			xdp_return_buff(ctx);
326 			break;
327 		}
328 	}
329 
330 out:
331 	if (redirect)
332 		xdp_do_flush();
333 	if (nframes) {
334 		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
335 		if (ret)
336 			err = ret;
337 	}
338 
339 	xdp_clear_return_frame_no_direct();
340 	local_bh_enable();
341 	return err;
342 }
343 
344 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
345 				 u32 repeat, u32 batch_size, u32 *time)
346 
347 {
348 	struct xdp_test_data xdp = { .batch_size = batch_size };
349 	struct bpf_test_timer t = { .mode = NO_MIGRATE };
350 	int ret;
351 
352 	if (!repeat)
353 		repeat = 1;
354 
355 	ret = xdp_test_run_setup(&xdp, ctx);
356 	if (ret)
357 		return ret;
358 
359 	bpf_test_timer_enter(&t);
360 	do {
361 		xdp.frame_cnt = 0;
362 		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
363 		if (unlikely(ret < 0))
364 			break;
365 	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
366 	bpf_test_timer_leave(&t);
367 
368 	xdp_test_run_teardown(&xdp);
369 	return ret;
370 }
371 
372 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
373 			u32 *retval, u32 *time, bool xdp)
374 {
375 	struct bpf_prog_array_item item = {.prog = prog};
376 	struct bpf_run_ctx *old_ctx;
377 	struct bpf_cg_run_ctx run_ctx;
378 	struct bpf_test_timer t = { NO_MIGRATE };
379 	enum bpf_cgroup_storage_type stype;
380 	int ret;
381 
382 	for_each_cgroup_storage_type(stype) {
383 		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
384 		if (IS_ERR(item.cgroup_storage[stype])) {
385 			item.cgroup_storage[stype] = NULL;
386 			for_each_cgroup_storage_type(stype)
387 				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
388 			return -ENOMEM;
389 		}
390 	}
391 
392 	if (!repeat)
393 		repeat = 1;
394 
395 	bpf_test_timer_enter(&t);
396 	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
397 	do {
398 		run_ctx.prog_item = &item;
399 		if (xdp)
400 			*retval = bpf_prog_run_xdp(prog, ctx);
401 		else
402 			*retval = bpf_prog_run(prog, ctx);
403 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
404 	bpf_reset_run_ctx(old_ctx);
405 	bpf_test_timer_leave(&t);
406 
407 	for_each_cgroup_storage_type(stype)
408 		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
409 
410 	return ret;
411 }
412 
413 static int bpf_test_finish(const union bpf_attr *kattr,
414 			   union bpf_attr __user *uattr, const void *data,
415 			   struct skb_shared_info *sinfo, u32 size,
416 			   u32 retval, u32 duration)
417 {
418 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
419 	int err = -EFAULT;
420 	u32 copy_size = size;
421 
422 	/* Clamp copy if the user has provided a size hint, but copy the full
423 	 * buffer if not to retain old behaviour.
424 	 */
425 	if (kattr->test.data_size_out &&
426 	    copy_size > kattr->test.data_size_out) {
427 		copy_size = kattr->test.data_size_out;
428 		err = -ENOSPC;
429 	}
430 
431 	if (data_out) {
432 		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
433 
434 		if (len < 0) {
435 			err = -ENOSPC;
436 			goto out;
437 		}
438 
439 		if (copy_to_user(data_out, data, len))
440 			goto out;
441 
442 		if (sinfo) {
443 			int i, offset = len;
444 			u32 data_len;
445 
446 			for (i = 0; i < sinfo->nr_frags; i++) {
447 				skb_frag_t *frag = &sinfo->frags[i];
448 
449 				if (offset >= copy_size) {
450 					err = -ENOSPC;
451 					break;
452 				}
453 
454 				data_len = min_t(u32, copy_size - offset,
455 						 skb_frag_size(frag));
456 
457 				if (copy_to_user(data_out + offset,
458 						 skb_frag_address(frag),
459 						 data_len))
460 					goto out;
461 
462 				offset += data_len;
463 			}
464 		}
465 	}
466 
467 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
468 		goto out;
469 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
470 		goto out;
471 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
472 		goto out;
473 	if (err != -ENOSPC)
474 		err = 0;
475 out:
476 	trace_bpf_test_finish(&err);
477 	return err;
478 }
479 
480 /* Integer types of various sizes and pointer combinations cover variety of
481  * architecture dependent calling conventions. 7+ can be supported in the
482  * future.
483  */
484 __diag_push();
485 __diag_ignore_all("-Wmissing-prototypes",
486 		  "Global functions as their definitions will be in vmlinux BTF");
487 __bpf_kfunc int bpf_fentry_test1(int a)
488 {
489 	return a + 1;
490 }
491 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
492 
493 int noinline bpf_fentry_test2(int a, u64 b)
494 {
495 	return a + b;
496 }
497 
498 int noinline bpf_fentry_test3(char a, int b, u64 c)
499 {
500 	return a + b + c;
501 }
502 
503 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
504 {
505 	return (long)a + b + c + d;
506 }
507 
508 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
509 {
510 	return a + (long)b + c + d + e;
511 }
512 
513 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
514 {
515 	return a + (long)b + c + d + (long)e + f;
516 }
517 
518 struct bpf_fentry_test_t {
519 	struct bpf_fentry_test_t *a;
520 };
521 
522 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
523 {
524 	return (long)arg;
525 }
526 
527 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
528 {
529 	return (long)arg->a;
530 }
531 
532 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
533 {
534 	*b += 1;
535 	return a + *b;
536 }
537 
538 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
539 {
540 	return a + b + c + d;
541 }
542 
543 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
544 {
545 	return a + b;
546 }
547 
548 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
549 {
550 	return sk;
551 }
552 
553 long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
554 {
555 	/* Provoke the compiler to assume that the caller has sign-extended a,
556 	 * b and c on platforms where this is required (e.g. s390x).
557 	 */
558 	return (long)a + (long)b + (long)c + d;
559 }
560 
561 struct prog_test_member1 {
562 	int a;
563 };
564 
565 struct prog_test_member {
566 	struct prog_test_member1 m;
567 	int c;
568 };
569 
570 struct prog_test_ref_kfunc {
571 	int a;
572 	int b;
573 	struct prog_test_member memb;
574 	struct prog_test_ref_kfunc *next;
575 	refcount_t cnt;
576 };
577 
578 static struct prog_test_ref_kfunc prog_test_struct = {
579 	.a = 42,
580 	.b = 108,
581 	.next = &prog_test_struct,
582 	.cnt = REFCOUNT_INIT(1),
583 };
584 
585 __bpf_kfunc struct prog_test_ref_kfunc *
586 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
587 {
588 	refcount_inc(&prog_test_struct.cnt);
589 	return &prog_test_struct;
590 }
591 
592 __bpf_kfunc struct prog_test_member *
593 bpf_kfunc_call_memb_acquire(void)
594 {
595 	WARN_ON_ONCE(1);
596 	return NULL;
597 }
598 
599 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
600 {
601 	if (!p)
602 		return;
603 
604 	refcount_dec(&p->cnt);
605 }
606 
607 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
608 {
609 }
610 
611 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
612 {
613 	WARN_ON_ONCE(1);
614 }
615 
616 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
617 {
618 	if (size > 2 * sizeof(int))
619 		return NULL;
620 
621 	return (int *)p;
622 }
623 
624 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
625 						  const int rdwr_buf_size)
626 {
627 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
628 }
629 
630 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
631 						    const int rdonly_buf_size)
632 {
633 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
634 }
635 
636 /* the next 2 ones can't be really used for testing expect to ensure
637  * that the verifier rejects the call.
638  * Acquire functions must return struct pointers, so these ones are
639  * failing.
640  */
641 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
642 						    const int rdonly_buf_size)
643 {
644 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
645 }
646 
647 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
648 {
649 }
650 
651 __bpf_kfunc struct prog_test_ref_kfunc *
652 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
653 {
654 	struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
655 
656 	if (!p)
657 		return NULL;
658 	refcount_inc(&p->cnt);
659 	return p;
660 }
661 
662 struct prog_test_pass1 {
663 	int x0;
664 	struct {
665 		int x1;
666 		struct {
667 			int x2;
668 			struct {
669 				int x3;
670 			};
671 		};
672 	};
673 };
674 
675 struct prog_test_pass2 {
676 	int len;
677 	short arr1[4];
678 	struct {
679 		char arr2[4];
680 		unsigned long arr3[8];
681 	} x;
682 };
683 
684 struct prog_test_fail1 {
685 	void *p;
686 	int x;
687 };
688 
689 struct prog_test_fail2 {
690 	int x8;
691 	struct prog_test_pass1 x;
692 };
693 
694 struct prog_test_fail3 {
695 	int len;
696 	char arr1[2];
697 	char arr2[];
698 };
699 
700 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
701 {
702 }
703 
704 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
705 {
706 }
707 
708 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
709 {
710 }
711 
712 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
713 {
714 }
715 
716 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
717 {
718 }
719 
720 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
721 {
722 }
723 
724 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
725 {
726 }
727 
728 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
729 {
730 }
731 
732 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
733 {
734 }
735 
736 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
737 {
738 }
739 
740 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
741 {
742 }
743 
744 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
745 {
746 	return arg;
747 }
748 
749 __diag_pop();
750 
751 BTF_SET8_START(bpf_test_modify_return_ids)
752 BTF_ID_FLAGS(func, bpf_modify_return_test)
753 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
754 BTF_SET8_END(bpf_test_modify_return_ids)
755 
756 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
757 	.owner = THIS_MODULE,
758 	.set   = &bpf_test_modify_return_ids,
759 };
760 
761 BTF_SET8_START(test_sk_check_kfunc_ids)
762 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
763 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
764 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
765 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
766 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
767 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
768 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
769 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
770 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
771 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
772 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
773 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
774 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
775 BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET)
776 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
777 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
778 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
779 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
780 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
781 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
782 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
783 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
784 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
785 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
786 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
787 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
788 BTF_SET8_END(test_sk_check_kfunc_ids)
789 
790 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
791 			   u32 size, u32 headroom, u32 tailroom)
792 {
793 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
794 	void *data;
795 
796 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
797 		return ERR_PTR(-EINVAL);
798 
799 	if (user_size > size)
800 		return ERR_PTR(-EMSGSIZE);
801 
802 	size = SKB_DATA_ALIGN(size);
803 	data = kzalloc(size + headroom + tailroom, GFP_USER);
804 	if (!data)
805 		return ERR_PTR(-ENOMEM);
806 
807 	if (copy_from_user(data + headroom, data_in, user_size)) {
808 		kfree(data);
809 		return ERR_PTR(-EFAULT);
810 	}
811 
812 	return data;
813 }
814 
815 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
816 			      const union bpf_attr *kattr,
817 			      union bpf_attr __user *uattr)
818 {
819 	struct bpf_fentry_test_t arg = {};
820 	u16 side_effect = 0, ret = 0;
821 	int b = 2, err = -EFAULT;
822 	u32 retval = 0;
823 
824 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
825 		return -EINVAL;
826 
827 	switch (prog->expected_attach_type) {
828 	case BPF_TRACE_FENTRY:
829 	case BPF_TRACE_FEXIT:
830 		if (bpf_fentry_test1(1) != 2 ||
831 		    bpf_fentry_test2(2, 3) != 5 ||
832 		    bpf_fentry_test3(4, 5, 6) != 15 ||
833 		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
834 		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
835 		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
836 		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
837 		    bpf_fentry_test8(&arg) != 0)
838 			goto out;
839 		break;
840 	case BPF_MODIFY_RETURN:
841 		ret = bpf_modify_return_test(1, &b);
842 		if (b != 2)
843 			side_effect = 1;
844 		break;
845 	default:
846 		goto out;
847 	}
848 
849 	retval = ((u32)side_effect << 16) | ret;
850 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
851 		goto out;
852 
853 	err = 0;
854 out:
855 	trace_bpf_test_finish(&err);
856 	return err;
857 }
858 
859 struct bpf_raw_tp_test_run_info {
860 	struct bpf_prog *prog;
861 	void *ctx;
862 	u32 retval;
863 };
864 
865 static void
866 __bpf_prog_test_run_raw_tp(void *data)
867 {
868 	struct bpf_raw_tp_test_run_info *info = data;
869 
870 	rcu_read_lock();
871 	info->retval = bpf_prog_run(info->prog, info->ctx);
872 	rcu_read_unlock();
873 }
874 
875 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
876 			     const union bpf_attr *kattr,
877 			     union bpf_attr __user *uattr)
878 {
879 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
880 	__u32 ctx_size_in = kattr->test.ctx_size_in;
881 	struct bpf_raw_tp_test_run_info info;
882 	int cpu = kattr->test.cpu, err = 0;
883 	int current_cpu;
884 
885 	/* doesn't support data_in/out, ctx_out, duration, or repeat */
886 	if (kattr->test.data_in || kattr->test.data_out ||
887 	    kattr->test.ctx_out || kattr->test.duration ||
888 	    kattr->test.repeat || kattr->test.batch_size)
889 		return -EINVAL;
890 
891 	if (ctx_size_in < prog->aux->max_ctx_offset ||
892 	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
893 		return -EINVAL;
894 
895 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
896 		return -EINVAL;
897 
898 	if (ctx_size_in) {
899 		info.ctx = memdup_user(ctx_in, ctx_size_in);
900 		if (IS_ERR(info.ctx))
901 			return PTR_ERR(info.ctx);
902 	} else {
903 		info.ctx = NULL;
904 	}
905 
906 	info.prog = prog;
907 
908 	current_cpu = get_cpu();
909 	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
910 	    cpu == current_cpu) {
911 		__bpf_prog_test_run_raw_tp(&info);
912 	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
913 		/* smp_call_function_single() also checks cpu_online()
914 		 * after csd_lock(). However, since cpu is from user
915 		 * space, let's do an extra quick check to filter out
916 		 * invalid value before smp_call_function_single().
917 		 */
918 		err = -ENXIO;
919 	} else {
920 		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
921 					       &info, 1);
922 	}
923 	put_cpu();
924 
925 	if (!err &&
926 	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
927 		err = -EFAULT;
928 
929 	kfree(info.ctx);
930 	return err;
931 }
932 
933 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
934 {
935 	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
936 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
937 	u32 size = kattr->test.ctx_size_in;
938 	void *data;
939 	int err;
940 
941 	if (!data_in && !data_out)
942 		return NULL;
943 
944 	data = kzalloc(max_size, GFP_USER);
945 	if (!data)
946 		return ERR_PTR(-ENOMEM);
947 
948 	if (data_in) {
949 		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
950 		if (err) {
951 			kfree(data);
952 			return ERR_PTR(err);
953 		}
954 
955 		size = min_t(u32, max_size, size);
956 		if (copy_from_user(data, data_in, size)) {
957 			kfree(data);
958 			return ERR_PTR(-EFAULT);
959 		}
960 	}
961 	return data;
962 }
963 
964 static int bpf_ctx_finish(const union bpf_attr *kattr,
965 			  union bpf_attr __user *uattr, const void *data,
966 			  u32 size)
967 {
968 	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
969 	int err = -EFAULT;
970 	u32 copy_size = size;
971 
972 	if (!data || !data_out)
973 		return 0;
974 
975 	if (copy_size > kattr->test.ctx_size_out) {
976 		copy_size = kattr->test.ctx_size_out;
977 		err = -ENOSPC;
978 	}
979 
980 	if (copy_to_user(data_out, data, copy_size))
981 		goto out;
982 	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
983 		goto out;
984 	if (err != -ENOSPC)
985 		err = 0;
986 out:
987 	return err;
988 }
989 
990 /**
991  * range_is_zero - test whether buffer is initialized
992  * @buf: buffer to check
993  * @from: check from this position
994  * @to: check up until (excluding) this position
995  *
996  * This function returns true if the there is a non-zero byte
997  * in the buf in the range [from,to).
998  */
999 static inline bool range_is_zero(void *buf, size_t from, size_t to)
1000 {
1001 	return !memchr_inv((u8 *)buf + from, 0, to - from);
1002 }
1003 
1004 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
1005 {
1006 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
1007 
1008 	if (!__skb)
1009 		return 0;
1010 
1011 	/* make sure the fields we don't use are zeroed */
1012 	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
1013 		return -EINVAL;
1014 
1015 	/* mark is allowed */
1016 
1017 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
1018 			   offsetof(struct __sk_buff, priority)))
1019 		return -EINVAL;
1020 
1021 	/* priority is allowed */
1022 	/* ingress_ifindex is allowed */
1023 	/* ifindex is allowed */
1024 
1025 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
1026 			   offsetof(struct __sk_buff, cb)))
1027 		return -EINVAL;
1028 
1029 	/* cb is allowed */
1030 
1031 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
1032 			   offsetof(struct __sk_buff, tstamp)))
1033 		return -EINVAL;
1034 
1035 	/* tstamp is allowed */
1036 	/* wire_len is allowed */
1037 	/* gso_segs is allowed */
1038 
1039 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
1040 			   offsetof(struct __sk_buff, gso_size)))
1041 		return -EINVAL;
1042 
1043 	/* gso_size is allowed */
1044 
1045 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
1046 			   offsetof(struct __sk_buff, hwtstamp)))
1047 		return -EINVAL;
1048 
1049 	/* hwtstamp is allowed */
1050 
1051 	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
1052 			   sizeof(struct __sk_buff)))
1053 		return -EINVAL;
1054 
1055 	skb->mark = __skb->mark;
1056 	skb->priority = __skb->priority;
1057 	skb->skb_iif = __skb->ingress_ifindex;
1058 	skb->tstamp = __skb->tstamp;
1059 	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
1060 
1061 	if (__skb->wire_len == 0) {
1062 		cb->pkt_len = skb->len;
1063 	} else {
1064 		if (__skb->wire_len < skb->len ||
1065 		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
1066 			return -EINVAL;
1067 		cb->pkt_len = __skb->wire_len;
1068 	}
1069 
1070 	if (__skb->gso_segs > GSO_MAX_SEGS)
1071 		return -EINVAL;
1072 	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
1073 	skb_shinfo(skb)->gso_size = __skb->gso_size;
1074 	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
1075 
1076 	return 0;
1077 }
1078 
1079 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
1080 {
1081 	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
1082 
1083 	if (!__skb)
1084 		return;
1085 
1086 	__skb->mark = skb->mark;
1087 	__skb->priority = skb->priority;
1088 	__skb->ingress_ifindex = skb->skb_iif;
1089 	__skb->ifindex = skb->dev->ifindex;
1090 	__skb->tstamp = skb->tstamp;
1091 	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
1092 	__skb->wire_len = cb->pkt_len;
1093 	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
1094 	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
1095 }
1096 
1097 static struct proto bpf_dummy_proto = {
1098 	.name   = "bpf_dummy",
1099 	.owner  = THIS_MODULE,
1100 	.obj_size = sizeof(struct sock),
1101 };
1102 
1103 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1104 			  union bpf_attr __user *uattr)
1105 {
1106 	bool is_l2 = false, is_direct_pkt_access = false;
1107 	struct net *net = current->nsproxy->net_ns;
1108 	struct net_device *dev = net->loopback_dev;
1109 	u32 size = kattr->test.data_size_in;
1110 	u32 repeat = kattr->test.repeat;
1111 	struct __sk_buff *ctx = NULL;
1112 	u32 retval, duration;
1113 	int hh_len = ETH_HLEN;
1114 	struct sk_buff *skb;
1115 	struct sock *sk;
1116 	void *data;
1117 	int ret;
1118 
1119 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1120 		return -EINVAL;
1121 
1122 	data = bpf_test_init(kattr, kattr->test.data_size_in,
1123 			     size, NET_SKB_PAD + NET_IP_ALIGN,
1124 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1125 	if (IS_ERR(data))
1126 		return PTR_ERR(data);
1127 
1128 	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1129 	if (IS_ERR(ctx)) {
1130 		kfree(data);
1131 		return PTR_ERR(ctx);
1132 	}
1133 
1134 	switch (prog->type) {
1135 	case BPF_PROG_TYPE_SCHED_CLS:
1136 	case BPF_PROG_TYPE_SCHED_ACT:
1137 		is_l2 = true;
1138 		fallthrough;
1139 	case BPF_PROG_TYPE_LWT_IN:
1140 	case BPF_PROG_TYPE_LWT_OUT:
1141 	case BPF_PROG_TYPE_LWT_XMIT:
1142 		is_direct_pkt_access = true;
1143 		break;
1144 	default:
1145 		break;
1146 	}
1147 
1148 	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1149 	if (!sk) {
1150 		kfree(data);
1151 		kfree(ctx);
1152 		return -ENOMEM;
1153 	}
1154 	sock_init_data(NULL, sk);
1155 
1156 	skb = slab_build_skb(data);
1157 	if (!skb) {
1158 		kfree(data);
1159 		kfree(ctx);
1160 		sk_free(sk);
1161 		return -ENOMEM;
1162 	}
1163 	skb->sk = sk;
1164 
1165 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1166 	__skb_put(skb, size);
1167 	if (ctx && ctx->ifindex > 1) {
1168 		dev = dev_get_by_index(net, ctx->ifindex);
1169 		if (!dev) {
1170 			ret = -ENODEV;
1171 			goto out;
1172 		}
1173 	}
1174 	skb->protocol = eth_type_trans(skb, dev);
1175 	skb_reset_network_header(skb);
1176 
1177 	switch (skb->protocol) {
1178 	case htons(ETH_P_IP):
1179 		sk->sk_family = AF_INET;
1180 		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1181 			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1182 			sk->sk_daddr = ip_hdr(skb)->daddr;
1183 		}
1184 		break;
1185 #if IS_ENABLED(CONFIG_IPV6)
1186 	case htons(ETH_P_IPV6):
1187 		sk->sk_family = AF_INET6;
1188 		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1189 			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1190 			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1191 		}
1192 		break;
1193 #endif
1194 	default:
1195 		break;
1196 	}
1197 
1198 	if (is_l2)
1199 		__skb_push(skb, hh_len);
1200 	if (is_direct_pkt_access)
1201 		bpf_compute_data_pointers(skb);
1202 	ret = convert___skb_to_skb(skb, ctx);
1203 	if (ret)
1204 		goto out;
1205 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1206 	if (ret)
1207 		goto out;
1208 	if (!is_l2) {
1209 		if (skb_headroom(skb) < hh_len) {
1210 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1211 
1212 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1213 				ret = -ENOMEM;
1214 				goto out;
1215 			}
1216 		}
1217 		memset(__skb_push(skb, hh_len), 0, hh_len);
1218 	}
1219 	convert_skb_to___skb(skb, ctx);
1220 
1221 	size = skb->len;
1222 	/* bpf program can never convert linear skb to non-linear */
1223 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1224 		size = skb_headlen(skb);
1225 	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1226 			      duration);
1227 	if (!ret)
1228 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1229 				     sizeof(struct __sk_buff));
1230 out:
1231 	if (dev && dev != net->loopback_dev)
1232 		dev_put(dev);
1233 	kfree_skb(skb);
1234 	sk_free(sk);
1235 	kfree(ctx);
1236 	return ret;
1237 }
1238 
1239 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1240 {
1241 	unsigned int ingress_ifindex, rx_queue_index;
1242 	struct netdev_rx_queue *rxqueue;
1243 	struct net_device *device;
1244 
1245 	if (!xdp_md)
1246 		return 0;
1247 
1248 	if (xdp_md->egress_ifindex != 0)
1249 		return -EINVAL;
1250 
1251 	ingress_ifindex = xdp_md->ingress_ifindex;
1252 	rx_queue_index = xdp_md->rx_queue_index;
1253 
1254 	if (!ingress_ifindex && rx_queue_index)
1255 		return -EINVAL;
1256 
1257 	if (ingress_ifindex) {
1258 		device = dev_get_by_index(current->nsproxy->net_ns,
1259 					  ingress_ifindex);
1260 		if (!device)
1261 			return -ENODEV;
1262 
1263 		if (rx_queue_index >= device->real_num_rx_queues)
1264 			goto free_dev;
1265 
1266 		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1267 
1268 		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1269 			goto free_dev;
1270 
1271 		xdp->rxq = &rxqueue->xdp_rxq;
1272 		/* The device is now tracked in the xdp->rxq for later
1273 		 * dev_put()
1274 		 */
1275 	}
1276 
1277 	xdp->data = xdp->data_meta + xdp_md->data;
1278 	return 0;
1279 
1280 free_dev:
1281 	dev_put(device);
1282 	return -EINVAL;
1283 }
1284 
1285 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1286 {
1287 	if (!xdp_md)
1288 		return;
1289 
1290 	xdp_md->data = xdp->data - xdp->data_meta;
1291 	xdp_md->data_end = xdp->data_end - xdp->data_meta;
1292 
1293 	if (xdp_md->ingress_ifindex)
1294 		dev_put(xdp->rxq->dev);
1295 }
1296 
1297 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1298 			  union bpf_attr __user *uattr)
1299 {
1300 	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1301 	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1302 	u32 batch_size = kattr->test.batch_size;
1303 	u32 retval = 0, duration, max_data_sz;
1304 	u32 size = kattr->test.data_size_in;
1305 	u32 headroom = XDP_PACKET_HEADROOM;
1306 	u32 repeat = kattr->test.repeat;
1307 	struct netdev_rx_queue *rxqueue;
1308 	struct skb_shared_info *sinfo;
1309 	struct xdp_buff xdp = {};
1310 	int i, ret = -EINVAL;
1311 	struct xdp_md *ctx;
1312 	void *data;
1313 
1314 	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1315 	    prog->expected_attach_type == BPF_XDP_CPUMAP)
1316 		return -EINVAL;
1317 
1318 	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1319 		return -EINVAL;
1320 
1321 	if (bpf_prog_is_dev_bound(prog->aux))
1322 		return -EINVAL;
1323 
1324 	if (do_live) {
1325 		if (!batch_size)
1326 			batch_size = NAPI_POLL_WEIGHT;
1327 		else if (batch_size > TEST_XDP_MAX_BATCH)
1328 			return -E2BIG;
1329 
1330 		headroom += sizeof(struct xdp_page_head);
1331 	} else if (batch_size) {
1332 		return -EINVAL;
1333 	}
1334 
1335 	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1336 	if (IS_ERR(ctx))
1337 		return PTR_ERR(ctx);
1338 
1339 	if (ctx) {
1340 		/* There can't be user provided data before the meta data */
1341 		if (ctx->data_meta || ctx->data_end != size ||
1342 		    ctx->data > ctx->data_end ||
1343 		    unlikely(xdp_metalen_invalid(ctx->data)) ||
1344 		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1345 			goto free_ctx;
1346 		/* Meta data is allocated from the headroom */
1347 		headroom -= ctx->data;
1348 	}
1349 
1350 	max_data_sz = 4096 - headroom - tailroom;
1351 	if (size > max_data_sz) {
1352 		/* disallow live data mode for jumbo frames */
1353 		if (do_live)
1354 			goto free_ctx;
1355 		size = max_data_sz;
1356 	}
1357 
1358 	data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1359 	if (IS_ERR(data)) {
1360 		ret = PTR_ERR(data);
1361 		goto free_ctx;
1362 	}
1363 
1364 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1365 	rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1366 	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1367 	xdp_prepare_buff(&xdp, data, headroom, size, true);
1368 	sinfo = xdp_get_shared_info_from_buff(&xdp);
1369 
1370 	ret = xdp_convert_md_to_buff(ctx, &xdp);
1371 	if (ret)
1372 		goto free_data;
1373 
1374 	if (unlikely(kattr->test.data_size_in > size)) {
1375 		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1376 
1377 		while (size < kattr->test.data_size_in) {
1378 			struct page *page;
1379 			skb_frag_t *frag;
1380 			u32 data_len;
1381 
1382 			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1383 				ret = -ENOMEM;
1384 				goto out;
1385 			}
1386 
1387 			page = alloc_page(GFP_KERNEL);
1388 			if (!page) {
1389 				ret = -ENOMEM;
1390 				goto out;
1391 			}
1392 
1393 			frag = &sinfo->frags[sinfo->nr_frags++];
1394 			__skb_frag_set_page(frag, page);
1395 
1396 			data_len = min_t(u32, kattr->test.data_size_in - size,
1397 					 PAGE_SIZE);
1398 			skb_frag_size_set(frag, data_len);
1399 
1400 			if (copy_from_user(page_address(page), data_in + size,
1401 					   data_len)) {
1402 				ret = -EFAULT;
1403 				goto out;
1404 			}
1405 			sinfo->xdp_frags_size += data_len;
1406 			size += data_len;
1407 		}
1408 		xdp_buff_set_frags_flag(&xdp);
1409 	}
1410 
1411 	if (repeat > 1)
1412 		bpf_prog_change_xdp(NULL, prog);
1413 
1414 	if (do_live)
1415 		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1416 	else
1417 		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1418 	/* We convert the xdp_buff back to an xdp_md before checking the return
1419 	 * code so the reference count of any held netdevice will be decremented
1420 	 * even if the test run failed.
1421 	 */
1422 	xdp_convert_buff_to_md(&xdp, ctx);
1423 	if (ret)
1424 		goto out;
1425 
1426 	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1427 	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1428 			      retval, duration);
1429 	if (!ret)
1430 		ret = bpf_ctx_finish(kattr, uattr, ctx,
1431 				     sizeof(struct xdp_md));
1432 
1433 out:
1434 	if (repeat > 1)
1435 		bpf_prog_change_xdp(prog, NULL);
1436 free_data:
1437 	for (i = 0; i < sinfo->nr_frags; i++)
1438 		__free_page(skb_frag_page(&sinfo->frags[i]));
1439 	kfree(data);
1440 free_ctx:
1441 	kfree(ctx);
1442 	return ret;
1443 }
1444 
1445 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1446 {
1447 	/* make sure the fields we don't use are zeroed */
1448 	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1449 		return -EINVAL;
1450 
1451 	/* flags is allowed */
1452 
1453 	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1454 			   sizeof(struct bpf_flow_keys)))
1455 		return -EINVAL;
1456 
1457 	return 0;
1458 }
1459 
1460 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1461 				     const union bpf_attr *kattr,
1462 				     union bpf_attr __user *uattr)
1463 {
1464 	struct bpf_test_timer t = { NO_PREEMPT };
1465 	u32 size = kattr->test.data_size_in;
1466 	struct bpf_flow_dissector ctx = {};
1467 	u32 repeat = kattr->test.repeat;
1468 	struct bpf_flow_keys *user_ctx;
1469 	struct bpf_flow_keys flow_keys;
1470 	const struct ethhdr *eth;
1471 	unsigned int flags = 0;
1472 	u32 retval, duration;
1473 	void *data;
1474 	int ret;
1475 
1476 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1477 		return -EINVAL;
1478 
1479 	if (size < ETH_HLEN)
1480 		return -EINVAL;
1481 
1482 	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1483 	if (IS_ERR(data))
1484 		return PTR_ERR(data);
1485 
1486 	eth = (struct ethhdr *)data;
1487 
1488 	if (!repeat)
1489 		repeat = 1;
1490 
1491 	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1492 	if (IS_ERR(user_ctx)) {
1493 		kfree(data);
1494 		return PTR_ERR(user_ctx);
1495 	}
1496 	if (user_ctx) {
1497 		ret = verify_user_bpf_flow_keys(user_ctx);
1498 		if (ret)
1499 			goto out;
1500 		flags = user_ctx->flags;
1501 	}
1502 
1503 	ctx.flow_keys = &flow_keys;
1504 	ctx.data = data;
1505 	ctx.data_end = (__u8 *)data + size;
1506 
1507 	bpf_test_timer_enter(&t);
1508 	do {
1509 		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1510 					  size, flags);
1511 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1512 	bpf_test_timer_leave(&t);
1513 
1514 	if (ret < 0)
1515 		goto out;
1516 
1517 	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1518 			      sizeof(flow_keys), retval, duration);
1519 	if (!ret)
1520 		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1521 				     sizeof(struct bpf_flow_keys));
1522 
1523 out:
1524 	kfree(user_ctx);
1525 	kfree(data);
1526 	return ret;
1527 }
1528 
1529 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1530 				union bpf_attr __user *uattr)
1531 {
1532 	struct bpf_test_timer t = { NO_PREEMPT };
1533 	struct bpf_prog_array *progs = NULL;
1534 	struct bpf_sk_lookup_kern ctx = {};
1535 	u32 repeat = kattr->test.repeat;
1536 	struct bpf_sk_lookup *user_ctx;
1537 	u32 retval, duration;
1538 	int ret = -EINVAL;
1539 
1540 	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1541 		return -EINVAL;
1542 
1543 	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1544 	    kattr->test.data_size_out)
1545 		return -EINVAL;
1546 
1547 	if (!repeat)
1548 		repeat = 1;
1549 
1550 	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1551 	if (IS_ERR(user_ctx))
1552 		return PTR_ERR(user_ctx);
1553 
1554 	if (!user_ctx)
1555 		return -EINVAL;
1556 
1557 	if (user_ctx->sk)
1558 		goto out;
1559 
1560 	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1561 		goto out;
1562 
1563 	if (user_ctx->local_port > U16_MAX) {
1564 		ret = -ERANGE;
1565 		goto out;
1566 	}
1567 
1568 	ctx.family = (u16)user_ctx->family;
1569 	ctx.protocol = (u16)user_ctx->protocol;
1570 	ctx.dport = (u16)user_ctx->local_port;
1571 	ctx.sport = user_ctx->remote_port;
1572 
1573 	switch (ctx.family) {
1574 	case AF_INET:
1575 		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1576 		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1577 		break;
1578 
1579 #if IS_ENABLED(CONFIG_IPV6)
1580 	case AF_INET6:
1581 		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1582 		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1583 		break;
1584 #endif
1585 
1586 	default:
1587 		ret = -EAFNOSUPPORT;
1588 		goto out;
1589 	}
1590 
1591 	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1592 	if (!progs) {
1593 		ret = -ENOMEM;
1594 		goto out;
1595 	}
1596 
1597 	progs->items[0].prog = prog;
1598 
1599 	bpf_test_timer_enter(&t);
1600 	do {
1601 		ctx.selected_sk = NULL;
1602 		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1603 	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1604 	bpf_test_timer_leave(&t);
1605 
1606 	if (ret < 0)
1607 		goto out;
1608 
1609 	user_ctx->cookie = 0;
1610 	if (ctx.selected_sk) {
1611 		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1612 			ret = -EOPNOTSUPP;
1613 			goto out;
1614 		}
1615 
1616 		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1617 	}
1618 
1619 	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1620 	if (!ret)
1621 		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1622 
1623 out:
1624 	bpf_prog_array_free(progs);
1625 	kfree(user_ctx);
1626 	return ret;
1627 }
1628 
1629 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1630 			      const union bpf_attr *kattr,
1631 			      union bpf_attr __user *uattr)
1632 {
1633 	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1634 	__u32 ctx_size_in = kattr->test.ctx_size_in;
1635 	void *ctx = NULL;
1636 	u32 retval;
1637 	int err = 0;
1638 
1639 	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1640 	if (kattr->test.data_in || kattr->test.data_out ||
1641 	    kattr->test.ctx_out || kattr->test.duration ||
1642 	    kattr->test.repeat || kattr->test.flags ||
1643 	    kattr->test.batch_size)
1644 		return -EINVAL;
1645 
1646 	if (ctx_size_in < prog->aux->max_ctx_offset ||
1647 	    ctx_size_in > U16_MAX)
1648 		return -EINVAL;
1649 
1650 	if (ctx_size_in) {
1651 		ctx = memdup_user(ctx_in, ctx_size_in);
1652 		if (IS_ERR(ctx))
1653 			return PTR_ERR(ctx);
1654 	}
1655 
1656 	rcu_read_lock_trace();
1657 	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1658 	rcu_read_unlock_trace();
1659 
1660 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1661 		err = -EFAULT;
1662 		goto out;
1663 	}
1664 	if (ctx_size_in)
1665 		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1666 			err = -EFAULT;
1667 out:
1668 	kfree(ctx);
1669 	return err;
1670 }
1671 
1672 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1673 	.owner = THIS_MODULE,
1674 	.set   = &test_sk_check_kfunc_ids,
1675 };
1676 
1677 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1678 BTF_ID(struct, prog_test_ref_kfunc)
1679 BTF_ID(func, bpf_kfunc_call_test_release)
1680 BTF_ID(struct, prog_test_member)
1681 BTF_ID(func, bpf_kfunc_call_memb_release)
1682 
1683 static int __init bpf_prog_test_run_init(void)
1684 {
1685 	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1686 		{
1687 		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1688 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1689 		},
1690 		{
1691 		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
1692 		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1693 		},
1694 	};
1695 	int ret;
1696 
1697 	ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1698 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1699 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1700 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1701 	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1702 						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1703 						  THIS_MODULE);
1704 }
1705 late_initcall(bpf_prog_test_run_init);
1706