1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/hotdata.h>
16 #include <net/sock.h>
17 #include <net/tcp.h>
18 #include <net/net_namespace.h>
19 #include <net/page_pool/helpers.h>
20 #include <linux/error-injection.h>
21 #include <linux/smp.h>
22 #include <linux/sock_diag.h>
23 #include <linux/netfilter.h>
24 #include <net/netdev_rx_queue.h>
25 #include <net/xdp.h>
26 #include <net/netfilter/nf_bpf_link.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/bpf_test_run.h>
30
31 struct bpf_test_timer {
32 enum { NO_PREEMPT, NO_MIGRATE } mode;
33 u32 i;
34 u64 time_start, time_spent;
35 };
36
bpf_test_timer_enter(struct bpf_test_timer * t)37 static void bpf_test_timer_enter(struct bpf_test_timer *t)
38 __acquires(rcu)
39 {
40 rcu_read_lock();
41 if (t->mode == NO_PREEMPT)
42 preempt_disable();
43 else
44 migrate_disable();
45
46 t->time_start = ktime_get_ns();
47 }
48
bpf_test_timer_leave(struct bpf_test_timer * t)49 static void bpf_test_timer_leave(struct bpf_test_timer *t)
50 __releases(rcu)
51 {
52 t->time_start = 0;
53
54 if (t->mode == NO_PREEMPT)
55 preempt_enable();
56 else
57 migrate_enable();
58 rcu_read_unlock();
59 }
60
bpf_test_timer_continue(struct bpf_test_timer * t,int iterations,u32 repeat,int * err,u32 * duration)61 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
62 u32 repeat, int *err, u32 *duration)
63 __must_hold(rcu)
64 {
65 t->i += iterations;
66 if (t->i >= repeat) {
67 /* We're done. */
68 t->time_spent += ktime_get_ns() - t->time_start;
69 do_div(t->time_spent, t->i);
70 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
71 *err = 0;
72 goto reset;
73 }
74
75 if (signal_pending(current)) {
76 /* During iteration: we've been cancelled, abort. */
77 *err = -EINTR;
78 goto reset;
79 }
80
81 if (need_resched()) {
82 /* During iteration: we need to reschedule between runs. */
83 t->time_spent += ktime_get_ns() - t->time_start;
84 bpf_test_timer_leave(t);
85 cond_resched();
86 bpf_test_timer_enter(t);
87 }
88
89 /* Do another round. */
90 return true;
91
92 reset:
93 t->i = 0;
94 return false;
95 }
96
97 /* We put this struct at the head of each page with a context and frame
98 * initialised when the page is allocated, so we don't have to do this on each
99 * repetition of the test run.
100 */
101 struct xdp_page_head {
102 struct xdp_buff orig_ctx;
103 struct xdp_buff ctx;
104 union {
105 /* ::data_hard_start starts here */
106 DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
107 DECLARE_FLEX_ARRAY(u8, data);
108 };
109 };
110
111 struct xdp_test_data {
112 struct xdp_buff *orig_ctx;
113 struct xdp_rxq_info rxq;
114 struct net_device *dev;
115 struct page_pool *pp;
116 struct xdp_frame **frames;
117 struct sk_buff **skbs;
118 struct xdp_mem_info mem;
119 u32 batch_size;
120 u32 frame_cnt;
121 };
122
123 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
124 * must be updated accordingly this gets changed, otherwise BPF selftests
125 * will fail.
126 */
127 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128 #define TEST_XDP_MAX_BATCH 256
129
xdp_test_run_init_page(netmem_ref netmem,void * arg)130 static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
131 {
132 struct xdp_page_head *head =
133 phys_to_virt(page_to_phys(netmem_to_page(netmem)));
134 struct xdp_buff *new_ctx, *orig_ctx;
135 u32 headroom = XDP_PACKET_HEADROOM;
136 struct xdp_test_data *xdp = arg;
137 size_t frm_len, meta_len;
138 struct xdp_frame *frm;
139 void *data;
140
141 orig_ctx = xdp->orig_ctx;
142 frm_len = orig_ctx->data_end - orig_ctx->data_meta;
143 meta_len = orig_ctx->data - orig_ctx->data_meta;
144 headroom -= meta_len;
145
146 new_ctx = &head->ctx;
147 frm = head->frame;
148 data = head->data;
149 memcpy(data + headroom, orig_ctx->data_meta, frm_len);
150
151 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
152 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
153 new_ctx->data = new_ctx->data_meta + meta_len;
154
155 xdp_update_frame_from_buff(new_ctx, frm);
156 frm->mem_type = new_ctx->rxq->mem.type;
157
158 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
159 }
160
xdp_test_run_setup(struct xdp_test_data * xdp,struct xdp_buff * orig_ctx)161 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
162 {
163 struct page_pool *pp;
164 int err = -ENOMEM;
165 struct page_pool_params pp_params = {
166 .order = 0,
167 .flags = 0,
168 .pool_size = xdp->batch_size,
169 .nid = NUMA_NO_NODE,
170 .init_callback = xdp_test_run_init_page,
171 .init_arg = xdp,
172 };
173
174 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
175 if (!xdp->frames)
176 return -ENOMEM;
177
178 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
179 if (!xdp->skbs)
180 goto err_skbs;
181
182 pp = page_pool_create(&pp_params);
183 if (IS_ERR(pp)) {
184 err = PTR_ERR(pp);
185 goto err_pp;
186 }
187
188 /* will copy 'mem.id' into pp->xdp_mem_id */
189 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
190 if (err)
191 goto err_mmodel;
192
193 xdp->pp = pp;
194
195 /* We create a 'fake' RXQ referencing the original dev, but with an
196 * xdp_mem_info pointing to our page_pool
197 */
198 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
199 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
200 xdp->rxq.mem.id = pp->xdp_mem_id;
201 xdp->dev = orig_ctx->rxq->dev;
202 xdp->orig_ctx = orig_ctx;
203
204 return 0;
205
206 err_mmodel:
207 page_pool_destroy(pp);
208 err_pp:
209 kvfree(xdp->skbs);
210 err_skbs:
211 kvfree(xdp->frames);
212 return err;
213 }
214
xdp_test_run_teardown(struct xdp_test_data * xdp)215 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
216 {
217 xdp_unreg_mem_model(&xdp->mem);
218 page_pool_destroy(xdp->pp);
219 kfree(xdp->frames);
220 kfree(xdp->skbs);
221 }
222
frame_was_changed(const struct xdp_page_head * head)223 static bool frame_was_changed(const struct xdp_page_head *head)
224 {
225 /* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
226 * i.e. has the highest chances to be overwritten. If those two are
227 * untouched, it's most likely safe to skip the context reset.
228 */
229 return head->frame->data != head->orig_ctx.data ||
230 head->frame->flags != head->orig_ctx.flags;
231 }
232
ctx_was_changed(struct xdp_page_head * head)233 static bool ctx_was_changed(struct xdp_page_head *head)
234 {
235 return head->orig_ctx.data != head->ctx.data ||
236 head->orig_ctx.data_meta != head->ctx.data_meta ||
237 head->orig_ctx.data_end != head->ctx.data_end;
238 }
239
reset_ctx(struct xdp_page_head * head)240 static void reset_ctx(struct xdp_page_head *head)
241 {
242 if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
243 return;
244
245 head->ctx.data = head->orig_ctx.data;
246 head->ctx.data_meta = head->orig_ctx.data_meta;
247 head->ctx.data_end = head->orig_ctx.data_end;
248 xdp_update_frame_from_buff(&head->ctx, head->frame);
249 head->frame->mem_type = head->orig_ctx.rxq->mem.type;
250 }
251
xdp_recv_frames(struct xdp_frame ** frames,int nframes,struct sk_buff ** skbs,struct net_device * dev)252 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
253 struct sk_buff **skbs,
254 struct net_device *dev)
255 {
256 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
257 int i, n;
258 LIST_HEAD(list);
259
260 n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
261 (void **)skbs);
262 if (unlikely(n == 0)) {
263 for (i = 0; i < nframes; i++)
264 xdp_return_frame(frames[i]);
265 return -ENOMEM;
266 }
267
268 for (i = 0; i < nframes; i++) {
269 struct xdp_frame *xdpf = frames[i];
270 struct sk_buff *skb = skbs[i];
271
272 skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
273 if (!skb) {
274 xdp_return_frame(xdpf);
275 continue;
276 }
277
278 list_add_tail(&skb->list, &list);
279 }
280 netif_receive_skb_list(&list);
281
282 return 0;
283 }
284
xdp_test_run_batch(struct xdp_test_data * xdp,struct bpf_prog * prog,u32 repeat)285 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
286 u32 repeat)
287 {
288 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
289 int err = 0, act, ret, i, nframes = 0, batch_sz;
290 struct xdp_frame **frames = xdp->frames;
291 struct bpf_redirect_info *ri;
292 struct xdp_page_head *head;
293 struct xdp_frame *frm;
294 bool redirect = false;
295 struct xdp_buff *ctx;
296 struct page *page;
297
298 batch_sz = min_t(u32, repeat, xdp->batch_size);
299
300 local_bh_disable();
301 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
302 ri = bpf_net_ctx_get_ri();
303 xdp_set_return_frame_no_direct();
304
305 for (i = 0; i < batch_sz; i++) {
306 page = page_pool_dev_alloc_pages(xdp->pp);
307 if (!page) {
308 err = -ENOMEM;
309 goto out;
310 }
311
312 head = phys_to_virt(page_to_phys(page));
313 reset_ctx(head);
314 ctx = &head->ctx;
315 frm = head->frame;
316 xdp->frame_cnt++;
317
318 act = bpf_prog_run_xdp(prog, ctx);
319
320 /* if program changed pkt bounds we need to update the xdp_frame */
321 if (unlikely(ctx_was_changed(head))) {
322 ret = xdp_update_frame_from_buff(ctx, frm);
323 if (ret) {
324 xdp_return_buff(ctx);
325 continue;
326 }
327 }
328
329 switch (act) {
330 case XDP_TX:
331 /* we can't do a real XDP_TX since we're not in the
332 * driver, so turn it into a REDIRECT back to the same
333 * index
334 */
335 ri->tgt_index = xdp->dev->ifindex;
336 ri->map_id = INT_MAX;
337 ri->map_type = BPF_MAP_TYPE_UNSPEC;
338 fallthrough;
339 case XDP_REDIRECT:
340 redirect = true;
341 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
342 if (ret)
343 xdp_return_buff(ctx);
344 break;
345 case XDP_PASS:
346 frames[nframes++] = frm;
347 break;
348 default:
349 bpf_warn_invalid_xdp_action(NULL, prog, act);
350 fallthrough;
351 case XDP_DROP:
352 xdp_return_buff(ctx);
353 break;
354 }
355 }
356
357 out:
358 if (redirect)
359 xdp_do_flush();
360 if (nframes) {
361 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
362 if (ret)
363 err = ret;
364 }
365
366 xdp_clear_return_frame_no_direct();
367 bpf_net_ctx_clear(bpf_net_ctx);
368 local_bh_enable();
369 return err;
370 }
371
bpf_test_run_xdp_live(struct bpf_prog * prog,struct xdp_buff * ctx,u32 repeat,u32 batch_size,u32 * time)372 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
373 u32 repeat, u32 batch_size, u32 *time)
374
375 {
376 struct xdp_test_data xdp = { .batch_size = batch_size };
377 struct bpf_test_timer t = { .mode = NO_MIGRATE };
378 int ret;
379
380 if (!repeat)
381 repeat = 1;
382
383 ret = xdp_test_run_setup(&xdp, ctx);
384 if (ret)
385 return ret;
386
387 bpf_test_timer_enter(&t);
388 do {
389 xdp.frame_cnt = 0;
390 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
391 if (unlikely(ret < 0))
392 break;
393 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
394 bpf_test_timer_leave(&t);
395
396 xdp_test_run_teardown(&xdp);
397 return ret;
398 }
399
bpf_test_run(struct bpf_prog * prog,void * ctx,u32 repeat,u32 * retval,u32 * time,bool xdp)400 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
401 u32 *retval, u32 *time, bool xdp)
402 {
403 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
404 struct bpf_prog_array_item item = {.prog = prog};
405 struct bpf_run_ctx *old_ctx;
406 struct bpf_cg_run_ctx run_ctx;
407 struct bpf_test_timer t = { NO_MIGRATE };
408 enum bpf_cgroup_storage_type stype;
409 int ret;
410
411 for_each_cgroup_storage_type(stype) {
412 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
413 if (IS_ERR(item.cgroup_storage[stype])) {
414 item.cgroup_storage[stype] = NULL;
415 for_each_cgroup_storage_type(stype)
416 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
417 return -ENOMEM;
418 }
419 }
420
421 if (!repeat)
422 repeat = 1;
423
424 bpf_test_timer_enter(&t);
425 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
426 do {
427 run_ctx.prog_item = &item;
428 local_bh_disable();
429 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
430
431 if (xdp)
432 *retval = bpf_prog_run_xdp(prog, ctx);
433 else
434 *retval = bpf_prog_run(prog, ctx);
435
436 bpf_net_ctx_clear(bpf_net_ctx);
437 local_bh_enable();
438 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
439 bpf_reset_run_ctx(old_ctx);
440 bpf_test_timer_leave(&t);
441
442 for_each_cgroup_storage_type(stype)
443 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
444
445 return ret;
446 }
447
bpf_test_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,struct skb_shared_info * sinfo,u32 size,u32 retval,u32 duration)448 static int bpf_test_finish(const union bpf_attr *kattr,
449 union bpf_attr __user *uattr, const void *data,
450 struct skb_shared_info *sinfo, u32 size,
451 u32 retval, u32 duration)
452 {
453 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
454 int err = -EFAULT;
455 u32 copy_size = size;
456
457 /* Clamp copy if the user has provided a size hint, but copy the full
458 * buffer if not to retain old behaviour.
459 */
460 if (kattr->test.data_size_out &&
461 copy_size > kattr->test.data_size_out) {
462 copy_size = kattr->test.data_size_out;
463 err = -ENOSPC;
464 }
465
466 if (data_out) {
467 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
468
469 if (len < 0) {
470 err = -ENOSPC;
471 goto out;
472 }
473
474 if (copy_to_user(data_out, data, len))
475 goto out;
476
477 if (sinfo) {
478 int i, offset = len;
479 u32 data_len;
480
481 for (i = 0; i < sinfo->nr_frags; i++) {
482 skb_frag_t *frag = &sinfo->frags[i];
483
484 if (offset >= copy_size) {
485 err = -ENOSPC;
486 break;
487 }
488
489 data_len = min_t(u32, copy_size - offset,
490 skb_frag_size(frag));
491
492 if (copy_to_user(data_out + offset,
493 skb_frag_address(frag),
494 data_len))
495 goto out;
496
497 offset += data_len;
498 }
499 }
500 }
501
502 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
503 goto out;
504 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
505 goto out;
506 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
507 goto out;
508 if (err != -ENOSPC)
509 err = 0;
510 out:
511 trace_bpf_test_finish(&err);
512 return err;
513 }
514
515 /* Integer types of various sizes and pointer combinations cover variety of
516 * architecture dependent calling conventions. 7+ can be supported in the
517 * future.
518 */
519 __bpf_kfunc_start_defs();
520
bpf_fentry_test1(int a)521 __bpf_kfunc int bpf_fentry_test1(int a)
522 {
523 return a + 1;
524 }
525 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
526
bpf_fentry_test2(int a,u64 b)527 int noinline bpf_fentry_test2(int a, u64 b)
528 {
529 return a + b;
530 }
531
bpf_fentry_test3(char a,int b,u64 c)532 int noinline bpf_fentry_test3(char a, int b, u64 c)
533 {
534 return a + b + c;
535 }
536
bpf_fentry_test4(void * a,char b,int c,u64 d)537 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
538 {
539 return (long)a + b + c + d;
540 }
541
bpf_fentry_test5(u64 a,void * b,short c,int d,u64 e)542 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
543 {
544 return a + (long)b + c + d + e;
545 }
546
bpf_fentry_test6(u64 a,void * b,short c,int d,void * e,u64 f)547 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
548 {
549 return a + (long)b + c + d + (long)e + f;
550 }
551
552 struct bpf_fentry_test_t {
553 struct bpf_fentry_test_t *a;
554 };
555
bpf_fentry_test7(struct bpf_fentry_test_t * arg)556 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
557 {
558 asm volatile ("": "+r"(arg));
559 return (long)arg;
560 }
561
bpf_fentry_test8(struct bpf_fentry_test_t * arg)562 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
563 {
564 return (long)arg->a;
565 }
566
bpf_fentry_test9(u32 * a)567 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
568 {
569 return *a;
570 }
571
bpf_fentry_test_sinfo(struct skb_shared_info * sinfo)572 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
573 {
574 }
575
bpf_modify_return_test(int a,int * b)576 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
577 {
578 *b += 1;
579 return a + *b;
580 }
581
bpf_modify_return_test2(int a,int * b,short c,int d,void * e,char f,int g)582 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
583 void *e, char f, int g)
584 {
585 *b += 1;
586 return a + *b + c + d + (long)e + f + g;
587 }
588
bpf_modify_return_test_tp(int nonce)589 __bpf_kfunc int bpf_modify_return_test_tp(int nonce)
590 {
591 trace_bpf_trigger_tp(nonce);
592
593 return nonce;
594 }
595
bpf_fentry_shadow_test(int a)596 int noinline bpf_fentry_shadow_test(int a)
597 {
598 return a + 1;
599 }
600
601 struct prog_test_member1 {
602 int a;
603 };
604
605 struct prog_test_member {
606 struct prog_test_member1 m;
607 int c;
608 };
609
610 struct prog_test_ref_kfunc {
611 int a;
612 int b;
613 struct prog_test_member memb;
614 struct prog_test_ref_kfunc *next;
615 refcount_t cnt;
616 };
617
bpf_kfunc_call_test_release(struct prog_test_ref_kfunc * p)618 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
619 {
620 refcount_dec(&p->cnt);
621 }
622
bpf_kfunc_call_test_release_dtor(void * p)623 __bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
624 {
625 bpf_kfunc_call_test_release(p);
626 }
627 CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
628
bpf_kfunc_call_memb_release(struct prog_test_member * p)629 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
630 {
631 }
632
bpf_kfunc_call_memb_release_dtor(void * p)633 __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
634 {
635 }
636 CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
637
638 __bpf_kfunc_end_defs();
639
640 BTF_KFUNCS_START(bpf_test_modify_return_ids)
641 BTF_ID_FLAGS(func, bpf_modify_return_test)
642 BTF_ID_FLAGS(func, bpf_modify_return_test2)
643 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
644 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
645 BTF_KFUNCS_END(bpf_test_modify_return_ids)
646
647 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
648 .owner = THIS_MODULE,
649 .set = &bpf_test_modify_return_ids,
650 };
651
652 BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_kfunc_call_test_release,KF_RELEASE)653 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
654 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
655 BTF_KFUNCS_END(test_sk_check_kfunc_ids)
656
657 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
658 u32 size, u32 headroom, u32 tailroom)
659 {
660 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
661 void *data;
662
663 if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
664 return ERR_PTR(-EINVAL);
665
666 size = SKB_DATA_ALIGN(size);
667 data = kzalloc(size + headroom + tailroom, GFP_USER);
668 if (!data)
669 return ERR_PTR(-ENOMEM);
670
671 if (copy_from_user(data + headroom, data_in, user_size)) {
672 kfree(data);
673 return ERR_PTR(-EFAULT);
674 }
675
676 return data;
677 }
678
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)679 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
680 const union bpf_attr *kattr,
681 union bpf_attr __user *uattr)
682 {
683 struct bpf_fentry_test_t arg = {};
684 u16 side_effect = 0, ret = 0;
685 int b = 2, err = -EFAULT;
686 u32 retval = 0;
687
688 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
689 return -EINVAL;
690
691 switch (prog->expected_attach_type) {
692 case BPF_TRACE_FENTRY:
693 case BPF_TRACE_FEXIT:
694 if (bpf_fentry_test1(1) != 2 ||
695 bpf_fentry_test2(2, 3) != 5 ||
696 bpf_fentry_test3(4, 5, 6) != 15 ||
697 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
698 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
699 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
700 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
701 bpf_fentry_test8(&arg) != 0 ||
702 bpf_fentry_test9(&retval) != 0)
703 goto out;
704 break;
705 case BPF_MODIFY_RETURN:
706 ret = bpf_modify_return_test(1, &b);
707 if (b != 2)
708 side_effect++;
709 b = 2;
710 ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
711 if (b != 2)
712 side_effect++;
713 break;
714 default:
715 goto out;
716 }
717
718 retval = ((u32)side_effect << 16) | ret;
719 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
720 goto out;
721
722 err = 0;
723 out:
724 trace_bpf_test_finish(&err);
725 return err;
726 }
727
728 struct bpf_raw_tp_test_run_info {
729 struct bpf_prog *prog;
730 void *ctx;
731 u32 retval;
732 };
733
734 static void
__bpf_prog_test_run_raw_tp(void * data)735 __bpf_prog_test_run_raw_tp(void *data)
736 {
737 struct bpf_raw_tp_test_run_info *info = data;
738 struct bpf_trace_run_ctx run_ctx = {};
739 struct bpf_run_ctx *old_run_ctx;
740
741 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
742
743 rcu_read_lock();
744 info->retval = bpf_prog_run(info->prog, info->ctx);
745 rcu_read_unlock();
746
747 bpf_reset_run_ctx(old_run_ctx);
748 }
749
bpf_prog_test_run_raw_tp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)750 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
751 const union bpf_attr *kattr,
752 union bpf_attr __user *uattr)
753 {
754 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
755 __u32 ctx_size_in = kattr->test.ctx_size_in;
756 struct bpf_raw_tp_test_run_info info;
757 int cpu = kattr->test.cpu, err = 0;
758 int current_cpu;
759
760 /* doesn't support data_in/out, ctx_out, duration, or repeat */
761 if (kattr->test.data_in || kattr->test.data_out ||
762 kattr->test.ctx_out || kattr->test.duration ||
763 kattr->test.repeat || kattr->test.batch_size)
764 return -EINVAL;
765
766 if (ctx_size_in < prog->aux->max_ctx_offset ||
767 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
768 return -EINVAL;
769
770 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
771 return -EINVAL;
772
773 if (ctx_size_in) {
774 info.ctx = memdup_user(ctx_in, ctx_size_in);
775 if (IS_ERR(info.ctx))
776 return PTR_ERR(info.ctx);
777 } else {
778 info.ctx = NULL;
779 }
780
781 info.prog = prog;
782
783 current_cpu = get_cpu();
784 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
785 cpu == current_cpu) {
786 __bpf_prog_test_run_raw_tp(&info);
787 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
788 /* smp_call_function_single() also checks cpu_online()
789 * after csd_lock(). However, since cpu is from user
790 * space, let's do an extra quick check to filter out
791 * invalid value before smp_call_function_single().
792 */
793 err = -ENXIO;
794 } else {
795 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
796 &info, 1);
797 }
798 put_cpu();
799
800 if (!err &&
801 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
802 err = -EFAULT;
803
804 kfree(info.ctx);
805 return err;
806 }
807
bpf_ctx_init(const union bpf_attr * kattr,u32 max_size)808 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
809 {
810 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
811 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
812 u32 size = kattr->test.ctx_size_in;
813 void *data;
814 int err;
815
816 if (!data_in && !data_out)
817 return NULL;
818
819 data = kzalloc(max_size, GFP_USER);
820 if (!data)
821 return ERR_PTR(-ENOMEM);
822
823 if (data_in) {
824 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
825 if (err) {
826 kfree(data);
827 return ERR_PTR(err);
828 }
829
830 size = min_t(u32, max_size, size);
831 if (copy_from_user(data, data_in, size)) {
832 kfree(data);
833 return ERR_PTR(-EFAULT);
834 }
835 }
836 return data;
837 }
838
bpf_ctx_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,u32 size)839 static int bpf_ctx_finish(const union bpf_attr *kattr,
840 union bpf_attr __user *uattr, const void *data,
841 u32 size)
842 {
843 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
844 int err = -EFAULT;
845 u32 copy_size = size;
846
847 if (!data || !data_out)
848 return 0;
849
850 if (copy_size > kattr->test.ctx_size_out) {
851 copy_size = kattr->test.ctx_size_out;
852 err = -ENOSPC;
853 }
854
855 if (copy_to_user(data_out, data, copy_size))
856 goto out;
857 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
858 goto out;
859 if (err != -ENOSPC)
860 err = 0;
861 out:
862 return err;
863 }
864
865 /**
866 * range_is_zero - test whether buffer is initialized
867 * @buf: buffer to check
868 * @from: check from this position
869 * @to: check up until (excluding) this position
870 *
871 * This function returns true if the there is a non-zero byte
872 * in the buf in the range [from,to).
873 */
range_is_zero(void * buf,size_t from,size_t to)874 static inline bool range_is_zero(void *buf, size_t from, size_t to)
875 {
876 return !memchr_inv((u8 *)buf + from, 0, to - from);
877 }
878
convert___skb_to_skb(struct sk_buff * skb,struct __sk_buff * __skb)879 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
880 {
881 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
882
883 if (!__skb)
884 return 0;
885
886 /* make sure the fields we don't use are zeroed */
887 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
888 return -EINVAL;
889
890 /* mark is allowed */
891
892 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
893 offsetof(struct __sk_buff, priority)))
894 return -EINVAL;
895
896 /* priority is allowed */
897 /* ingress_ifindex is allowed */
898 /* ifindex is allowed */
899
900 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
901 offsetof(struct __sk_buff, cb)))
902 return -EINVAL;
903
904 /* cb is allowed */
905
906 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
907 offsetof(struct __sk_buff, tstamp)))
908 return -EINVAL;
909
910 /* tstamp is allowed */
911 /* wire_len is allowed */
912 /* gso_segs is allowed */
913
914 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
915 offsetof(struct __sk_buff, gso_size)))
916 return -EINVAL;
917
918 /* gso_size is allowed */
919
920 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
921 offsetof(struct __sk_buff, hwtstamp)))
922 return -EINVAL;
923
924 /* hwtstamp is allowed */
925
926 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
927 sizeof(struct __sk_buff)))
928 return -EINVAL;
929
930 skb->mark = __skb->mark;
931 skb->priority = __skb->priority;
932 skb->skb_iif = __skb->ingress_ifindex;
933 skb->tstamp = __skb->tstamp;
934 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
935
936 if (__skb->wire_len == 0) {
937 cb->pkt_len = skb->len;
938 } else {
939 if (__skb->wire_len < skb->len ||
940 __skb->wire_len > GSO_LEGACY_MAX_SIZE)
941 return -EINVAL;
942 cb->pkt_len = __skb->wire_len;
943 }
944
945 if (__skb->gso_segs > GSO_MAX_SEGS)
946 return -EINVAL;
947 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
948 skb_shinfo(skb)->gso_size = __skb->gso_size;
949 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
950
951 return 0;
952 }
953
convert_skb_to___skb(struct sk_buff * skb,struct __sk_buff * __skb)954 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
955 {
956 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
957
958 if (!__skb)
959 return;
960
961 __skb->mark = skb->mark;
962 __skb->priority = skb->priority;
963 __skb->ingress_ifindex = skb->skb_iif;
964 __skb->ifindex = skb->dev->ifindex;
965 __skb->tstamp = skb->tstamp;
966 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
967 __skb->wire_len = cb->pkt_len;
968 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
969 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
970 }
971
972 static struct proto bpf_dummy_proto = {
973 .name = "bpf_dummy",
974 .owner = THIS_MODULE,
975 .obj_size = sizeof(struct sock),
976 };
977
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)978 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
979 union bpf_attr __user *uattr)
980 {
981 bool is_l2 = false, is_direct_pkt_access = false;
982 struct net *net = current->nsproxy->net_ns;
983 struct net_device *dev = net->loopback_dev;
984 u32 size = kattr->test.data_size_in;
985 u32 repeat = kattr->test.repeat;
986 struct __sk_buff *ctx = NULL;
987 u32 retval, duration;
988 int hh_len = ETH_HLEN;
989 struct sk_buff *skb;
990 struct sock *sk;
991 void *data;
992 int ret;
993
994 if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
995 kattr->test.cpu || kattr->test.batch_size)
996 return -EINVAL;
997
998 data = bpf_test_init(kattr, kattr->test.data_size_in,
999 size, NET_SKB_PAD + NET_IP_ALIGN,
1000 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1001 if (IS_ERR(data))
1002 return PTR_ERR(data);
1003
1004 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1005 if (IS_ERR(ctx)) {
1006 kfree(data);
1007 return PTR_ERR(ctx);
1008 }
1009
1010 switch (prog->type) {
1011 case BPF_PROG_TYPE_SCHED_CLS:
1012 case BPF_PROG_TYPE_SCHED_ACT:
1013 is_l2 = true;
1014 fallthrough;
1015 case BPF_PROG_TYPE_LWT_IN:
1016 case BPF_PROG_TYPE_LWT_OUT:
1017 case BPF_PROG_TYPE_LWT_XMIT:
1018 case BPF_PROG_TYPE_CGROUP_SKB:
1019 is_direct_pkt_access = true;
1020 break;
1021 default:
1022 break;
1023 }
1024
1025 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1026 if (!sk) {
1027 kfree(data);
1028 kfree(ctx);
1029 return -ENOMEM;
1030 }
1031 sock_init_data(NULL, sk);
1032
1033 skb = slab_build_skb(data);
1034 if (!skb) {
1035 kfree(data);
1036 kfree(ctx);
1037 sk_free(sk);
1038 return -ENOMEM;
1039 }
1040 skb->sk = sk;
1041
1042 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1043 __skb_put(skb, size);
1044
1045 if (ctx && ctx->ifindex > 1) {
1046 dev = dev_get_by_index(net, ctx->ifindex);
1047 if (!dev) {
1048 ret = -ENODEV;
1049 goto out;
1050 }
1051 }
1052 skb->protocol = eth_type_trans(skb, dev);
1053 skb_reset_network_header(skb);
1054
1055 switch (skb->protocol) {
1056 case htons(ETH_P_IP):
1057 sk->sk_family = AF_INET;
1058 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1059 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1060 sk->sk_daddr = ip_hdr(skb)->daddr;
1061 }
1062 break;
1063 #if IS_ENABLED(CONFIG_IPV6)
1064 case htons(ETH_P_IPV6):
1065 sk->sk_family = AF_INET6;
1066 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1067 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1068 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1069 }
1070 break;
1071 #endif
1072 default:
1073 break;
1074 }
1075
1076 if (is_l2)
1077 __skb_push(skb, hh_len);
1078 if (is_direct_pkt_access)
1079 bpf_compute_data_pointers(skb);
1080
1081 ret = convert___skb_to_skb(skb, ctx);
1082 if (ret)
1083 goto out;
1084
1085 if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1086 const int off = skb_network_offset(skb);
1087 int len = skb->len - off;
1088
1089 skb->csum = skb_checksum(skb, off, len, 0);
1090 skb->ip_summed = CHECKSUM_COMPLETE;
1091 }
1092
1093 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1094 if (ret)
1095 goto out;
1096 if (!is_l2) {
1097 if (skb_headroom(skb) < hh_len) {
1098 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1099
1100 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1101 ret = -ENOMEM;
1102 goto out;
1103 }
1104 }
1105 memset(__skb_push(skb, hh_len), 0, hh_len);
1106 }
1107
1108 if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1109 const int off = skb_network_offset(skb);
1110 int len = skb->len - off;
1111 __wsum csum;
1112
1113 csum = skb_checksum(skb, off, len, 0);
1114
1115 if (csum_fold(skb->csum) != csum_fold(csum)) {
1116 ret = -EBADMSG;
1117 goto out;
1118 }
1119 }
1120
1121 convert_skb_to___skb(skb, ctx);
1122
1123 size = skb->len;
1124 /* bpf program can never convert linear skb to non-linear */
1125 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1126 size = skb_headlen(skb);
1127 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1128 duration);
1129 if (!ret)
1130 ret = bpf_ctx_finish(kattr, uattr, ctx,
1131 sizeof(struct __sk_buff));
1132 out:
1133 if (dev && dev != net->loopback_dev)
1134 dev_put(dev);
1135 kfree_skb(skb);
1136 sk_free(sk);
1137 kfree(ctx);
1138 return ret;
1139 }
1140
xdp_convert_md_to_buff(struct xdp_md * xdp_md,struct xdp_buff * xdp)1141 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1142 {
1143 unsigned int ingress_ifindex, rx_queue_index;
1144 struct netdev_rx_queue *rxqueue;
1145 struct net_device *device;
1146
1147 if (!xdp_md)
1148 return 0;
1149
1150 if (xdp_md->egress_ifindex != 0)
1151 return -EINVAL;
1152
1153 ingress_ifindex = xdp_md->ingress_ifindex;
1154 rx_queue_index = xdp_md->rx_queue_index;
1155
1156 if (!ingress_ifindex && rx_queue_index)
1157 return -EINVAL;
1158
1159 if (ingress_ifindex) {
1160 device = dev_get_by_index(current->nsproxy->net_ns,
1161 ingress_ifindex);
1162 if (!device)
1163 return -ENODEV;
1164
1165 if (rx_queue_index >= device->real_num_rx_queues)
1166 goto free_dev;
1167
1168 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1169
1170 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1171 goto free_dev;
1172
1173 xdp->rxq = &rxqueue->xdp_rxq;
1174 /* The device is now tracked in the xdp->rxq for later
1175 * dev_put()
1176 */
1177 }
1178
1179 xdp->data = xdp->data_meta + xdp_md->data;
1180 return 0;
1181
1182 free_dev:
1183 dev_put(device);
1184 return -EINVAL;
1185 }
1186
xdp_convert_buff_to_md(struct xdp_buff * xdp,struct xdp_md * xdp_md)1187 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1188 {
1189 if (!xdp_md)
1190 return;
1191
1192 xdp_md->data = xdp->data - xdp->data_meta;
1193 xdp_md->data_end = xdp->data_end - xdp->data_meta;
1194
1195 if (xdp_md->ingress_ifindex)
1196 dev_put(xdp->rxq->dev);
1197 }
1198
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1199 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1200 union bpf_attr __user *uattr)
1201 {
1202 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1203 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1204 u32 batch_size = kattr->test.batch_size;
1205 u32 retval = 0, duration, max_data_sz;
1206 u32 size = kattr->test.data_size_in;
1207 u32 headroom = XDP_PACKET_HEADROOM;
1208 u32 repeat = kattr->test.repeat;
1209 struct netdev_rx_queue *rxqueue;
1210 struct skb_shared_info *sinfo;
1211 struct xdp_buff xdp = {};
1212 int i, ret = -EINVAL;
1213 struct xdp_md *ctx;
1214 void *data;
1215
1216 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1217 prog->expected_attach_type == BPF_XDP_CPUMAP)
1218 return -EINVAL;
1219
1220 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1221 return -EINVAL;
1222
1223 if (bpf_prog_is_dev_bound(prog->aux))
1224 return -EINVAL;
1225
1226 if (do_live) {
1227 if (!batch_size)
1228 batch_size = NAPI_POLL_WEIGHT;
1229 else if (batch_size > TEST_XDP_MAX_BATCH)
1230 return -E2BIG;
1231
1232 headroom += sizeof(struct xdp_page_head);
1233 } else if (batch_size) {
1234 return -EINVAL;
1235 }
1236
1237 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1238 if (IS_ERR(ctx))
1239 return PTR_ERR(ctx);
1240
1241 if (ctx) {
1242 /* There can't be user provided data before the meta data */
1243 if (ctx->data_meta || ctx->data_end != size ||
1244 ctx->data > ctx->data_end ||
1245 unlikely(xdp_metalen_invalid(ctx->data)) ||
1246 (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1247 goto free_ctx;
1248 /* Meta data is allocated from the headroom */
1249 headroom -= ctx->data;
1250 }
1251
1252 max_data_sz = 4096 - headroom - tailroom;
1253 if (size > max_data_sz) {
1254 /* disallow live data mode for jumbo frames */
1255 if (do_live)
1256 goto free_ctx;
1257 size = max_data_sz;
1258 }
1259
1260 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1261 if (IS_ERR(data)) {
1262 ret = PTR_ERR(data);
1263 goto free_ctx;
1264 }
1265
1266 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1267 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1268 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1269 xdp_prepare_buff(&xdp, data, headroom, size, true);
1270 sinfo = xdp_get_shared_info_from_buff(&xdp);
1271
1272 ret = xdp_convert_md_to_buff(ctx, &xdp);
1273 if (ret)
1274 goto free_data;
1275
1276 if (unlikely(kattr->test.data_size_in > size)) {
1277 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1278
1279 while (size < kattr->test.data_size_in) {
1280 struct page *page;
1281 skb_frag_t *frag;
1282 u32 data_len;
1283
1284 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1285 ret = -ENOMEM;
1286 goto out;
1287 }
1288
1289 page = alloc_page(GFP_KERNEL);
1290 if (!page) {
1291 ret = -ENOMEM;
1292 goto out;
1293 }
1294
1295 frag = &sinfo->frags[sinfo->nr_frags++];
1296
1297 data_len = min_t(u32, kattr->test.data_size_in - size,
1298 PAGE_SIZE);
1299 skb_frag_fill_page_desc(frag, page, 0, data_len);
1300
1301 if (copy_from_user(page_address(page), data_in + size,
1302 data_len)) {
1303 ret = -EFAULT;
1304 goto out;
1305 }
1306 sinfo->xdp_frags_size += data_len;
1307 size += data_len;
1308 }
1309 xdp_buff_set_frags_flag(&xdp);
1310 }
1311
1312 if (repeat > 1)
1313 bpf_prog_change_xdp(NULL, prog);
1314
1315 if (do_live)
1316 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1317 else
1318 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1319 /* We convert the xdp_buff back to an xdp_md before checking the return
1320 * code so the reference count of any held netdevice will be decremented
1321 * even if the test run failed.
1322 */
1323 xdp_convert_buff_to_md(&xdp, ctx);
1324 if (ret)
1325 goto out;
1326
1327 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1328 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1329 retval, duration);
1330 if (!ret)
1331 ret = bpf_ctx_finish(kattr, uattr, ctx,
1332 sizeof(struct xdp_md));
1333
1334 out:
1335 if (repeat > 1)
1336 bpf_prog_change_xdp(prog, NULL);
1337 free_data:
1338 for (i = 0; i < sinfo->nr_frags; i++)
1339 __free_page(skb_frag_page(&sinfo->frags[i]));
1340 kfree(data);
1341 free_ctx:
1342 kfree(ctx);
1343 return ret;
1344 }
1345
verify_user_bpf_flow_keys(struct bpf_flow_keys * ctx)1346 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1347 {
1348 /* make sure the fields we don't use are zeroed */
1349 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1350 return -EINVAL;
1351
1352 /* flags is allowed */
1353
1354 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1355 sizeof(struct bpf_flow_keys)))
1356 return -EINVAL;
1357
1358 return 0;
1359 }
1360
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1361 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1362 const union bpf_attr *kattr,
1363 union bpf_attr __user *uattr)
1364 {
1365 struct bpf_test_timer t = { NO_PREEMPT };
1366 u32 size = kattr->test.data_size_in;
1367 struct bpf_flow_dissector ctx = {};
1368 u32 repeat = kattr->test.repeat;
1369 struct bpf_flow_keys *user_ctx;
1370 struct bpf_flow_keys flow_keys;
1371 const struct ethhdr *eth;
1372 unsigned int flags = 0;
1373 u32 retval, duration;
1374 void *data;
1375 int ret;
1376
1377 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1378 return -EINVAL;
1379
1380 if (size < ETH_HLEN)
1381 return -EINVAL;
1382
1383 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1384 if (IS_ERR(data))
1385 return PTR_ERR(data);
1386
1387 eth = (struct ethhdr *)data;
1388
1389 if (!repeat)
1390 repeat = 1;
1391
1392 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1393 if (IS_ERR(user_ctx)) {
1394 kfree(data);
1395 return PTR_ERR(user_ctx);
1396 }
1397 if (user_ctx) {
1398 ret = verify_user_bpf_flow_keys(user_ctx);
1399 if (ret)
1400 goto out;
1401 flags = user_ctx->flags;
1402 }
1403
1404 ctx.flow_keys = &flow_keys;
1405 ctx.data = data;
1406 ctx.data_end = (__u8 *)data + size;
1407
1408 bpf_test_timer_enter(&t);
1409 do {
1410 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1411 size, flags);
1412 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1413 bpf_test_timer_leave(&t);
1414
1415 if (ret < 0)
1416 goto out;
1417
1418 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1419 sizeof(flow_keys), retval, duration);
1420 if (!ret)
1421 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1422 sizeof(struct bpf_flow_keys));
1423
1424 out:
1425 kfree(user_ctx);
1426 kfree(data);
1427 return ret;
1428 }
1429
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1430 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1431 union bpf_attr __user *uattr)
1432 {
1433 struct bpf_test_timer t = { NO_PREEMPT };
1434 struct bpf_prog_array *progs = NULL;
1435 struct bpf_sk_lookup_kern ctx = {};
1436 u32 repeat = kattr->test.repeat;
1437 struct bpf_sk_lookup *user_ctx;
1438 u32 retval, duration;
1439 int ret = -EINVAL;
1440
1441 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1442 return -EINVAL;
1443
1444 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1445 kattr->test.data_size_out)
1446 return -EINVAL;
1447
1448 if (!repeat)
1449 repeat = 1;
1450
1451 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1452 if (IS_ERR(user_ctx))
1453 return PTR_ERR(user_ctx);
1454
1455 if (!user_ctx)
1456 return -EINVAL;
1457
1458 if (user_ctx->sk)
1459 goto out;
1460
1461 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1462 goto out;
1463
1464 if (user_ctx->local_port > U16_MAX) {
1465 ret = -ERANGE;
1466 goto out;
1467 }
1468
1469 ctx.family = (u16)user_ctx->family;
1470 ctx.protocol = (u16)user_ctx->protocol;
1471 ctx.dport = (u16)user_ctx->local_port;
1472 ctx.sport = user_ctx->remote_port;
1473
1474 switch (ctx.family) {
1475 case AF_INET:
1476 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1477 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1478 break;
1479
1480 #if IS_ENABLED(CONFIG_IPV6)
1481 case AF_INET6:
1482 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1483 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1484 break;
1485 #endif
1486
1487 default:
1488 ret = -EAFNOSUPPORT;
1489 goto out;
1490 }
1491
1492 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1493 if (!progs) {
1494 ret = -ENOMEM;
1495 goto out;
1496 }
1497
1498 progs->items[0].prog = prog;
1499
1500 bpf_test_timer_enter(&t);
1501 do {
1502 ctx.selected_sk = NULL;
1503 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1504 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1505 bpf_test_timer_leave(&t);
1506
1507 if (ret < 0)
1508 goto out;
1509
1510 user_ctx->cookie = 0;
1511 if (ctx.selected_sk) {
1512 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1513 ret = -EOPNOTSUPP;
1514 goto out;
1515 }
1516
1517 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1518 }
1519
1520 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1521 if (!ret)
1522 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1523
1524 out:
1525 bpf_prog_array_free(progs);
1526 kfree(user_ctx);
1527 return ret;
1528 }
1529
bpf_prog_test_run_syscall(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1530 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1531 const union bpf_attr *kattr,
1532 union bpf_attr __user *uattr)
1533 {
1534 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1535 __u32 ctx_size_in = kattr->test.ctx_size_in;
1536 void *ctx = NULL;
1537 u32 retval;
1538 int err = 0;
1539
1540 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1541 if (kattr->test.data_in || kattr->test.data_out ||
1542 kattr->test.ctx_out || kattr->test.duration ||
1543 kattr->test.repeat || kattr->test.flags ||
1544 kattr->test.batch_size)
1545 return -EINVAL;
1546
1547 if (ctx_size_in < prog->aux->max_ctx_offset ||
1548 ctx_size_in > U16_MAX)
1549 return -EINVAL;
1550
1551 if (ctx_size_in) {
1552 ctx = memdup_user(ctx_in, ctx_size_in);
1553 if (IS_ERR(ctx))
1554 return PTR_ERR(ctx);
1555 }
1556
1557 rcu_read_lock_trace();
1558 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1559 rcu_read_unlock_trace();
1560
1561 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1562 err = -EFAULT;
1563 goto out;
1564 }
1565 if (ctx_size_in)
1566 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1567 err = -EFAULT;
1568 out:
1569 kfree(ctx);
1570 return err;
1571 }
1572
verify_and_copy_hook_state(struct nf_hook_state * state,const struct nf_hook_state * user,struct net_device * dev)1573 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1574 const struct nf_hook_state *user,
1575 struct net_device *dev)
1576 {
1577 if (user->in || user->out)
1578 return -EINVAL;
1579
1580 if (user->net || user->sk || user->okfn)
1581 return -EINVAL;
1582
1583 switch (user->pf) {
1584 case NFPROTO_IPV4:
1585 case NFPROTO_IPV6:
1586 switch (state->hook) {
1587 case NF_INET_PRE_ROUTING:
1588 state->in = dev;
1589 break;
1590 case NF_INET_LOCAL_IN:
1591 state->in = dev;
1592 break;
1593 case NF_INET_FORWARD:
1594 state->in = dev;
1595 state->out = dev;
1596 break;
1597 case NF_INET_LOCAL_OUT:
1598 state->out = dev;
1599 break;
1600 case NF_INET_POST_ROUTING:
1601 state->out = dev;
1602 break;
1603 }
1604
1605 break;
1606 default:
1607 return -EINVAL;
1608 }
1609
1610 state->pf = user->pf;
1611 state->hook = user->hook;
1612
1613 return 0;
1614 }
1615
nfproto_eth(int nfproto)1616 static __be16 nfproto_eth(int nfproto)
1617 {
1618 switch (nfproto) {
1619 case NFPROTO_IPV4:
1620 return htons(ETH_P_IP);
1621 case NFPROTO_IPV6:
1622 break;
1623 }
1624
1625 return htons(ETH_P_IPV6);
1626 }
1627
bpf_prog_test_run_nf(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1628 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1629 const union bpf_attr *kattr,
1630 union bpf_attr __user *uattr)
1631 {
1632 struct net *net = current->nsproxy->net_ns;
1633 struct net_device *dev = net->loopback_dev;
1634 struct nf_hook_state *user_ctx, hook_state = {
1635 .pf = NFPROTO_IPV4,
1636 .hook = NF_INET_LOCAL_OUT,
1637 };
1638 u32 size = kattr->test.data_size_in;
1639 u32 repeat = kattr->test.repeat;
1640 struct bpf_nf_ctx ctx = {
1641 .state = &hook_state,
1642 };
1643 struct sk_buff *skb = NULL;
1644 u32 retval, duration;
1645 void *data;
1646 int ret;
1647
1648 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1649 return -EINVAL;
1650
1651 if (size < sizeof(struct iphdr))
1652 return -EINVAL;
1653
1654 data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1655 NET_SKB_PAD + NET_IP_ALIGN,
1656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1657 if (IS_ERR(data))
1658 return PTR_ERR(data);
1659
1660 if (!repeat)
1661 repeat = 1;
1662
1663 user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1664 if (IS_ERR(user_ctx)) {
1665 kfree(data);
1666 return PTR_ERR(user_ctx);
1667 }
1668
1669 if (user_ctx) {
1670 ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1671 if (ret)
1672 goto out;
1673 }
1674
1675 skb = slab_build_skb(data);
1676 if (!skb) {
1677 ret = -ENOMEM;
1678 goto out;
1679 }
1680
1681 data = NULL; /* data released via kfree_skb */
1682
1683 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1684 __skb_put(skb, size);
1685
1686 ret = -EINVAL;
1687
1688 if (hook_state.hook != NF_INET_LOCAL_OUT) {
1689 if (size < ETH_HLEN + sizeof(struct iphdr))
1690 goto out;
1691
1692 skb->protocol = eth_type_trans(skb, dev);
1693 switch (skb->protocol) {
1694 case htons(ETH_P_IP):
1695 if (hook_state.pf == NFPROTO_IPV4)
1696 break;
1697 goto out;
1698 case htons(ETH_P_IPV6):
1699 if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1700 goto out;
1701 if (hook_state.pf == NFPROTO_IPV6)
1702 break;
1703 goto out;
1704 default:
1705 ret = -EPROTO;
1706 goto out;
1707 }
1708
1709 skb_reset_network_header(skb);
1710 } else {
1711 skb->protocol = nfproto_eth(hook_state.pf);
1712 }
1713
1714 ctx.skb = skb;
1715
1716 ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1717 if (ret)
1718 goto out;
1719
1720 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1721
1722 out:
1723 kfree(user_ctx);
1724 kfree_skb(skb);
1725 kfree(data);
1726 return ret;
1727 }
1728
1729 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1730 .owner = THIS_MODULE,
1731 .set = &test_sk_check_kfunc_ids,
1732 };
1733
1734 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
BTF_ID(struct,prog_test_ref_kfunc)1735 BTF_ID(struct, prog_test_ref_kfunc)
1736 BTF_ID(func, bpf_kfunc_call_test_release_dtor)
1737 BTF_ID(struct, prog_test_member)
1738 BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
1739
1740 static int __init bpf_prog_test_run_init(void)
1741 {
1742 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1743 {
1744 .btf_id = bpf_prog_test_dtor_kfunc_ids[0],
1745 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1746 },
1747 {
1748 .btf_id = bpf_prog_test_dtor_kfunc_ids[2],
1749 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1750 },
1751 };
1752 int ret;
1753
1754 ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1755 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1756 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1757 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1758 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1759 ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1760 THIS_MODULE);
1761 }
1762 late_initcall(bpf_prog_test_run_init);
1763