1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
15 #include <net/hotdata.h>
16 #include <net/sock.h>
17 #include <net/tcp.h>
18 #include <net/net_namespace.h>
19 #include <net/page_pool/helpers.h>
20 #include <linux/error-injection.h>
21 #include <linux/smp.h>
22 #include <linux/sock_diag.h>
23 #include <linux/netfilter.h>
24 #include <net/netdev_rx_queue.h>
25 #include <net/xdp.h>
26 #include <net/netfilter/nf_bpf_link.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/bpf_test_run.h>
30
31 struct bpf_test_timer {
32 enum { NO_PREEMPT, NO_MIGRATE } mode;
33 u32 i;
34 u64 time_start, time_spent;
35 };
36
bpf_test_timer_enter(struct bpf_test_timer * t)37 static void bpf_test_timer_enter(struct bpf_test_timer *t)
38 __acquires(rcu)
39 {
40 rcu_read_lock();
41 if (t->mode == NO_PREEMPT)
42 preempt_disable();
43 else
44 migrate_disable();
45
46 t->time_start = ktime_get_ns();
47 }
48
bpf_test_timer_leave(struct bpf_test_timer * t)49 static void bpf_test_timer_leave(struct bpf_test_timer *t)
50 __releases(rcu)
51 {
52 t->time_start = 0;
53
54 if (t->mode == NO_PREEMPT)
55 preempt_enable();
56 else
57 migrate_enable();
58 rcu_read_unlock();
59 }
60
bpf_test_timer_continue(struct bpf_test_timer * t,int iterations,u32 repeat,int * err,u32 * duration)61 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
62 u32 repeat, int *err, u32 *duration)
63 __must_hold(rcu)
64 {
65 t->i += iterations;
66 if (t->i >= repeat) {
67 /* We're done. */
68 t->time_spent += ktime_get_ns() - t->time_start;
69 do_div(t->time_spent, t->i);
70 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
71 *err = 0;
72 goto reset;
73 }
74
75 if (signal_pending(current)) {
76 /* During iteration: we've been cancelled, abort. */
77 *err = -EINTR;
78 goto reset;
79 }
80
81 if (need_resched()) {
82 /* During iteration: we need to reschedule between runs. */
83 t->time_spent += ktime_get_ns() - t->time_start;
84 bpf_test_timer_leave(t);
85 cond_resched();
86 bpf_test_timer_enter(t);
87 }
88
89 /* Do another round. */
90 return true;
91
92 reset:
93 t->i = 0;
94 return false;
95 }
96
97 /* We put this struct at the head of each page with a context and frame
98 * initialised when the page is allocated, so we don't have to do this on each
99 * repetition of the test run.
100 */
101 struct xdp_page_head {
102 struct xdp_buff orig_ctx;
103 struct xdp_buff ctx;
104 union {
105 /* ::data_hard_start starts here */
106 DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
107 DECLARE_FLEX_ARRAY(u8, data);
108 };
109 };
110
111 struct xdp_test_data {
112 struct xdp_buff *orig_ctx;
113 struct xdp_rxq_info rxq;
114 struct net_device *dev;
115 struct page_pool *pp;
116 struct xdp_frame **frames;
117 struct sk_buff **skbs;
118 struct xdp_mem_info mem;
119 u32 batch_size;
120 u32 frame_cnt;
121 };
122
123 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
124 * must be updated accordingly this gets changed, otherwise BPF selftests
125 * will fail.
126 */
127 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128 #define TEST_XDP_MAX_BATCH 256
129
xdp_test_run_init_page(netmem_ref netmem,void * arg)130 static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
131 {
132 struct xdp_page_head *head =
133 phys_to_virt(page_to_phys(netmem_to_page(netmem)));
134 struct xdp_buff *new_ctx, *orig_ctx;
135 u32 headroom = XDP_PACKET_HEADROOM;
136 struct xdp_test_data *xdp = arg;
137 size_t frm_len, meta_len;
138 struct xdp_frame *frm;
139 void *data;
140
141 orig_ctx = xdp->orig_ctx;
142 frm_len = orig_ctx->data_end - orig_ctx->data_meta;
143 meta_len = orig_ctx->data - orig_ctx->data_meta;
144 headroom -= meta_len;
145
146 new_ctx = &head->ctx;
147 frm = head->frame;
148 data = head->data;
149 memcpy(data + headroom, orig_ctx->data_meta, frm_len);
150
151 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
152 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
153 new_ctx->data = new_ctx->data_meta + meta_len;
154
155 xdp_update_frame_from_buff(new_ctx, frm);
156 frm->mem = new_ctx->rxq->mem;
157
158 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
159 }
160
xdp_test_run_setup(struct xdp_test_data * xdp,struct xdp_buff * orig_ctx)161 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
162 {
163 struct page_pool *pp;
164 int err = -ENOMEM;
165 struct page_pool_params pp_params = {
166 .order = 0,
167 .flags = 0,
168 .pool_size = xdp->batch_size,
169 .nid = NUMA_NO_NODE,
170 .init_callback = xdp_test_run_init_page,
171 .init_arg = xdp,
172 };
173
174 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
175 if (!xdp->frames)
176 return -ENOMEM;
177
178 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
179 if (!xdp->skbs)
180 goto err_skbs;
181
182 pp = page_pool_create(&pp_params);
183 if (IS_ERR(pp)) {
184 err = PTR_ERR(pp);
185 goto err_pp;
186 }
187
188 /* will copy 'mem.id' into pp->xdp_mem_id */
189 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
190 if (err)
191 goto err_mmodel;
192
193 xdp->pp = pp;
194
195 /* We create a 'fake' RXQ referencing the original dev, but with an
196 * xdp_mem_info pointing to our page_pool
197 */
198 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
199 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
200 xdp->rxq.mem.id = pp->xdp_mem_id;
201 xdp->dev = orig_ctx->rxq->dev;
202 xdp->orig_ctx = orig_ctx;
203
204 return 0;
205
206 err_mmodel:
207 page_pool_destroy(pp);
208 err_pp:
209 kvfree(xdp->skbs);
210 err_skbs:
211 kvfree(xdp->frames);
212 return err;
213 }
214
xdp_test_run_teardown(struct xdp_test_data * xdp)215 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
216 {
217 xdp_unreg_mem_model(&xdp->mem);
218 page_pool_destroy(xdp->pp);
219 kfree(xdp->frames);
220 kfree(xdp->skbs);
221 }
222
frame_was_changed(const struct xdp_page_head * head)223 static bool frame_was_changed(const struct xdp_page_head *head)
224 {
225 /* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
226 * i.e. has the highest chances to be overwritten. If those two are
227 * untouched, it's most likely safe to skip the context reset.
228 */
229 return head->frame->data != head->orig_ctx.data ||
230 head->frame->flags != head->orig_ctx.flags;
231 }
232
ctx_was_changed(struct xdp_page_head * head)233 static bool ctx_was_changed(struct xdp_page_head *head)
234 {
235 return head->orig_ctx.data != head->ctx.data ||
236 head->orig_ctx.data_meta != head->ctx.data_meta ||
237 head->orig_ctx.data_end != head->ctx.data_end;
238 }
239
reset_ctx(struct xdp_page_head * head)240 static void reset_ctx(struct xdp_page_head *head)
241 {
242 if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
243 return;
244
245 head->ctx.data = head->orig_ctx.data;
246 head->ctx.data_meta = head->orig_ctx.data_meta;
247 head->ctx.data_end = head->orig_ctx.data_end;
248 xdp_update_frame_from_buff(&head->ctx, head->frame);
249 }
250
xdp_recv_frames(struct xdp_frame ** frames,int nframes,struct sk_buff ** skbs,struct net_device * dev)251 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
252 struct sk_buff **skbs,
253 struct net_device *dev)
254 {
255 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
256 int i, n;
257 LIST_HEAD(list);
258
259 n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
260 (void **)skbs);
261 if (unlikely(n == 0)) {
262 for (i = 0; i < nframes; i++)
263 xdp_return_frame(frames[i]);
264 return -ENOMEM;
265 }
266
267 for (i = 0; i < nframes; i++) {
268 struct xdp_frame *xdpf = frames[i];
269 struct sk_buff *skb = skbs[i];
270
271 skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
272 if (!skb) {
273 xdp_return_frame(xdpf);
274 continue;
275 }
276
277 list_add_tail(&skb->list, &list);
278 }
279 netif_receive_skb_list(&list);
280
281 return 0;
282 }
283
xdp_test_run_batch(struct xdp_test_data * xdp,struct bpf_prog * prog,u32 repeat)284 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
285 u32 repeat)
286 {
287 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
288 int err = 0, act, ret, i, nframes = 0, batch_sz;
289 struct xdp_frame **frames = xdp->frames;
290 struct bpf_redirect_info *ri;
291 struct xdp_page_head *head;
292 struct xdp_frame *frm;
293 bool redirect = false;
294 struct xdp_buff *ctx;
295 struct page *page;
296
297 batch_sz = min_t(u32, repeat, xdp->batch_size);
298
299 local_bh_disable();
300 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
301 ri = bpf_net_ctx_get_ri();
302 xdp_set_return_frame_no_direct();
303
304 for (i = 0; i < batch_sz; i++) {
305 page = page_pool_dev_alloc_pages(xdp->pp);
306 if (!page) {
307 err = -ENOMEM;
308 goto out;
309 }
310
311 head = phys_to_virt(page_to_phys(page));
312 reset_ctx(head);
313 ctx = &head->ctx;
314 frm = head->frame;
315 xdp->frame_cnt++;
316
317 act = bpf_prog_run_xdp(prog, ctx);
318
319 /* if program changed pkt bounds we need to update the xdp_frame */
320 if (unlikely(ctx_was_changed(head))) {
321 ret = xdp_update_frame_from_buff(ctx, frm);
322 if (ret) {
323 xdp_return_buff(ctx);
324 continue;
325 }
326 }
327
328 switch (act) {
329 case XDP_TX:
330 /* we can't do a real XDP_TX since we're not in the
331 * driver, so turn it into a REDIRECT back to the same
332 * index
333 */
334 ri->tgt_index = xdp->dev->ifindex;
335 ri->map_id = INT_MAX;
336 ri->map_type = BPF_MAP_TYPE_UNSPEC;
337 fallthrough;
338 case XDP_REDIRECT:
339 redirect = true;
340 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
341 if (ret)
342 xdp_return_buff(ctx);
343 break;
344 case XDP_PASS:
345 frames[nframes++] = frm;
346 break;
347 default:
348 bpf_warn_invalid_xdp_action(NULL, prog, act);
349 fallthrough;
350 case XDP_DROP:
351 xdp_return_buff(ctx);
352 break;
353 }
354 }
355
356 out:
357 if (redirect)
358 xdp_do_flush();
359 if (nframes) {
360 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
361 if (ret)
362 err = ret;
363 }
364
365 xdp_clear_return_frame_no_direct();
366 bpf_net_ctx_clear(bpf_net_ctx);
367 local_bh_enable();
368 return err;
369 }
370
bpf_test_run_xdp_live(struct bpf_prog * prog,struct xdp_buff * ctx,u32 repeat,u32 batch_size,u32 * time)371 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
372 u32 repeat, u32 batch_size, u32 *time)
373
374 {
375 struct xdp_test_data xdp = { .batch_size = batch_size };
376 struct bpf_test_timer t = { .mode = NO_MIGRATE };
377 int ret;
378
379 if (!repeat)
380 repeat = 1;
381
382 ret = xdp_test_run_setup(&xdp, ctx);
383 if (ret)
384 return ret;
385
386 bpf_test_timer_enter(&t);
387 do {
388 xdp.frame_cnt = 0;
389 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
390 if (unlikely(ret < 0))
391 break;
392 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
393 bpf_test_timer_leave(&t);
394
395 xdp_test_run_teardown(&xdp);
396 return ret;
397 }
398
bpf_test_run(struct bpf_prog * prog,void * ctx,u32 repeat,u32 * retval,u32 * time,bool xdp)399 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
400 u32 *retval, u32 *time, bool xdp)
401 {
402 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
403 struct bpf_prog_array_item item = {.prog = prog};
404 struct bpf_run_ctx *old_ctx;
405 struct bpf_cg_run_ctx run_ctx;
406 struct bpf_test_timer t = { NO_MIGRATE };
407 enum bpf_cgroup_storage_type stype;
408 int ret;
409
410 for_each_cgroup_storage_type(stype) {
411 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
412 if (IS_ERR(item.cgroup_storage[stype])) {
413 item.cgroup_storage[stype] = NULL;
414 for_each_cgroup_storage_type(stype)
415 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
416 return -ENOMEM;
417 }
418 }
419
420 if (!repeat)
421 repeat = 1;
422
423 bpf_test_timer_enter(&t);
424 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
425 do {
426 run_ctx.prog_item = &item;
427 local_bh_disable();
428 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
429
430 if (xdp)
431 *retval = bpf_prog_run_xdp(prog, ctx);
432 else
433 *retval = bpf_prog_run(prog, ctx);
434
435 bpf_net_ctx_clear(bpf_net_ctx);
436 local_bh_enable();
437 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
438 bpf_reset_run_ctx(old_ctx);
439 bpf_test_timer_leave(&t);
440
441 for_each_cgroup_storage_type(stype)
442 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
443
444 return ret;
445 }
446
bpf_test_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,struct skb_shared_info * sinfo,u32 size,u32 retval,u32 duration)447 static int bpf_test_finish(const union bpf_attr *kattr,
448 union bpf_attr __user *uattr, const void *data,
449 struct skb_shared_info *sinfo, u32 size,
450 u32 retval, u32 duration)
451 {
452 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
453 int err = -EFAULT;
454 u32 copy_size = size;
455
456 /* Clamp copy if the user has provided a size hint, but copy the full
457 * buffer if not to retain old behaviour.
458 */
459 if (kattr->test.data_size_out &&
460 copy_size > kattr->test.data_size_out) {
461 copy_size = kattr->test.data_size_out;
462 err = -ENOSPC;
463 }
464
465 if (data_out) {
466 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
467
468 if (len < 0) {
469 err = -ENOSPC;
470 goto out;
471 }
472
473 if (copy_to_user(data_out, data, len))
474 goto out;
475
476 if (sinfo) {
477 int i, offset = len;
478 u32 data_len;
479
480 for (i = 0; i < sinfo->nr_frags; i++) {
481 skb_frag_t *frag = &sinfo->frags[i];
482
483 if (offset >= copy_size) {
484 err = -ENOSPC;
485 break;
486 }
487
488 data_len = min_t(u32, copy_size - offset,
489 skb_frag_size(frag));
490
491 if (copy_to_user(data_out + offset,
492 skb_frag_address(frag),
493 data_len))
494 goto out;
495
496 offset += data_len;
497 }
498 }
499 }
500
501 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
502 goto out;
503 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
504 goto out;
505 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
506 goto out;
507 if (err != -ENOSPC)
508 err = 0;
509 out:
510 trace_bpf_test_finish(&err);
511 return err;
512 }
513
514 /* Integer types of various sizes and pointer combinations cover variety of
515 * architecture dependent calling conventions. 7+ can be supported in the
516 * future.
517 */
518 __bpf_kfunc_start_defs();
519
bpf_fentry_test1(int a)520 __bpf_kfunc int bpf_fentry_test1(int a)
521 {
522 return a + 1;
523 }
524 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
525
bpf_fentry_test2(int a,u64 b)526 int noinline bpf_fentry_test2(int a, u64 b)
527 {
528 return a + b;
529 }
530
bpf_fentry_test3(char a,int b,u64 c)531 int noinline bpf_fentry_test3(char a, int b, u64 c)
532 {
533 return a + b + c;
534 }
535
bpf_fentry_test4(void * a,char b,int c,u64 d)536 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
537 {
538 return (long)a + b + c + d;
539 }
540
bpf_fentry_test5(u64 a,void * b,short c,int d,u64 e)541 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
542 {
543 return a + (long)b + c + d + e;
544 }
545
bpf_fentry_test6(u64 a,void * b,short c,int d,void * e,u64 f)546 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
547 {
548 return a + (long)b + c + d + (long)e + f;
549 }
550
551 struct bpf_fentry_test_t {
552 struct bpf_fentry_test_t *a;
553 };
554
bpf_fentry_test7(struct bpf_fentry_test_t * arg)555 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
556 {
557 asm volatile ("": "+r"(arg));
558 return (long)arg;
559 }
560
bpf_fentry_test8(struct bpf_fentry_test_t * arg)561 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
562 {
563 return (long)arg->a;
564 }
565
bpf_fentry_test9(u32 * a)566 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
567 {
568 return *a;
569 }
570
bpf_fentry_test_sinfo(struct skb_shared_info * sinfo)571 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
572 {
573 }
574
bpf_modify_return_test(int a,int * b)575 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
576 {
577 *b += 1;
578 return a + *b;
579 }
580
bpf_modify_return_test2(int a,int * b,short c,int d,void * e,char f,int g)581 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
582 void *e, char f, int g)
583 {
584 *b += 1;
585 return a + *b + c + d + (long)e + f + g;
586 }
587
bpf_modify_return_test_tp(int nonce)588 __bpf_kfunc int bpf_modify_return_test_tp(int nonce)
589 {
590 trace_bpf_trigger_tp(nonce);
591
592 return nonce;
593 }
594
bpf_fentry_shadow_test(int a)595 int noinline bpf_fentry_shadow_test(int a)
596 {
597 return a + 1;
598 }
599
600 struct prog_test_member1 {
601 int a;
602 };
603
604 struct prog_test_member {
605 struct prog_test_member1 m;
606 int c;
607 };
608
609 struct prog_test_ref_kfunc {
610 int a;
611 int b;
612 struct prog_test_member memb;
613 struct prog_test_ref_kfunc *next;
614 refcount_t cnt;
615 };
616
bpf_kfunc_call_test_release(struct prog_test_ref_kfunc * p)617 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
618 {
619 refcount_dec(&p->cnt);
620 }
621
bpf_kfunc_call_test_release_dtor(void * p)622 __bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
623 {
624 bpf_kfunc_call_test_release(p);
625 }
626 CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
627
bpf_kfunc_call_memb_release(struct prog_test_member * p)628 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
629 {
630 }
631
bpf_kfunc_call_memb_release_dtor(void * p)632 __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
633 {
634 }
635 CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
636
637 __bpf_kfunc_end_defs();
638
639 BTF_KFUNCS_START(bpf_test_modify_return_ids)
640 BTF_ID_FLAGS(func, bpf_modify_return_test)
641 BTF_ID_FLAGS(func, bpf_modify_return_test2)
642 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
643 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
644 BTF_KFUNCS_END(bpf_test_modify_return_ids)
645
646 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
647 .owner = THIS_MODULE,
648 .set = &bpf_test_modify_return_ids,
649 };
650
651 BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_kfunc_call_test_release,KF_RELEASE)652 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
653 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
654 BTF_KFUNCS_END(test_sk_check_kfunc_ids)
655
656 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
657 u32 size, u32 headroom, u32 tailroom)
658 {
659 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
660 void *data;
661
662 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
663 return ERR_PTR(-EINVAL);
664
665 if (user_size > size)
666 return ERR_PTR(-EMSGSIZE);
667
668 size = SKB_DATA_ALIGN(size);
669 data = kzalloc(size + headroom + tailroom, GFP_USER);
670 if (!data)
671 return ERR_PTR(-ENOMEM);
672
673 if (copy_from_user(data + headroom, data_in, user_size)) {
674 kfree(data);
675 return ERR_PTR(-EFAULT);
676 }
677
678 return data;
679 }
680
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)681 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
682 const union bpf_attr *kattr,
683 union bpf_attr __user *uattr)
684 {
685 struct bpf_fentry_test_t arg = {};
686 u16 side_effect = 0, ret = 0;
687 int b = 2, err = -EFAULT;
688 u32 retval = 0;
689
690 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
691 return -EINVAL;
692
693 switch (prog->expected_attach_type) {
694 case BPF_TRACE_FENTRY:
695 case BPF_TRACE_FEXIT:
696 if (bpf_fentry_test1(1) != 2 ||
697 bpf_fentry_test2(2, 3) != 5 ||
698 bpf_fentry_test3(4, 5, 6) != 15 ||
699 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
700 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
701 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
702 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
703 bpf_fentry_test8(&arg) != 0 ||
704 bpf_fentry_test9(&retval) != 0)
705 goto out;
706 break;
707 case BPF_MODIFY_RETURN:
708 ret = bpf_modify_return_test(1, &b);
709 if (b != 2)
710 side_effect++;
711 b = 2;
712 ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
713 if (b != 2)
714 side_effect++;
715 break;
716 default:
717 goto out;
718 }
719
720 retval = ((u32)side_effect << 16) | ret;
721 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
722 goto out;
723
724 err = 0;
725 out:
726 trace_bpf_test_finish(&err);
727 return err;
728 }
729
730 struct bpf_raw_tp_test_run_info {
731 struct bpf_prog *prog;
732 void *ctx;
733 u32 retval;
734 };
735
736 static void
__bpf_prog_test_run_raw_tp(void * data)737 __bpf_prog_test_run_raw_tp(void *data)
738 {
739 struct bpf_raw_tp_test_run_info *info = data;
740 struct bpf_trace_run_ctx run_ctx = {};
741 struct bpf_run_ctx *old_run_ctx;
742
743 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
744
745 rcu_read_lock();
746 info->retval = bpf_prog_run(info->prog, info->ctx);
747 rcu_read_unlock();
748
749 bpf_reset_run_ctx(old_run_ctx);
750 }
751
bpf_prog_test_run_raw_tp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)752 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
753 const union bpf_attr *kattr,
754 union bpf_attr __user *uattr)
755 {
756 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
757 __u32 ctx_size_in = kattr->test.ctx_size_in;
758 struct bpf_raw_tp_test_run_info info;
759 int cpu = kattr->test.cpu, err = 0;
760 int current_cpu;
761
762 /* doesn't support data_in/out, ctx_out, duration, or repeat */
763 if (kattr->test.data_in || kattr->test.data_out ||
764 kattr->test.ctx_out || kattr->test.duration ||
765 kattr->test.repeat || kattr->test.batch_size)
766 return -EINVAL;
767
768 if (ctx_size_in < prog->aux->max_ctx_offset ||
769 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
770 return -EINVAL;
771
772 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
773 return -EINVAL;
774
775 if (ctx_size_in) {
776 info.ctx = memdup_user(ctx_in, ctx_size_in);
777 if (IS_ERR(info.ctx))
778 return PTR_ERR(info.ctx);
779 } else {
780 info.ctx = NULL;
781 }
782
783 info.prog = prog;
784
785 current_cpu = get_cpu();
786 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
787 cpu == current_cpu) {
788 __bpf_prog_test_run_raw_tp(&info);
789 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
790 /* smp_call_function_single() also checks cpu_online()
791 * after csd_lock(). However, since cpu is from user
792 * space, let's do an extra quick check to filter out
793 * invalid value before smp_call_function_single().
794 */
795 err = -ENXIO;
796 } else {
797 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
798 &info, 1);
799 }
800 put_cpu();
801
802 if (!err &&
803 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
804 err = -EFAULT;
805
806 kfree(info.ctx);
807 return err;
808 }
809
bpf_ctx_init(const union bpf_attr * kattr,u32 max_size)810 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
811 {
812 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
813 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
814 u32 size = kattr->test.ctx_size_in;
815 void *data;
816 int err;
817
818 if (!data_in && !data_out)
819 return NULL;
820
821 data = kzalloc(max_size, GFP_USER);
822 if (!data)
823 return ERR_PTR(-ENOMEM);
824
825 if (data_in) {
826 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
827 if (err) {
828 kfree(data);
829 return ERR_PTR(err);
830 }
831
832 size = min_t(u32, max_size, size);
833 if (copy_from_user(data, data_in, size)) {
834 kfree(data);
835 return ERR_PTR(-EFAULT);
836 }
837 }
838 return data;
839 }
840
bpf_ctx_finish(const union bpf_attr * kattr,union bpf_attr __user * uattr,const void * data,u32 size)841 static int bpf_ctx_finish(const union bpf_attr *kattr,
842 union bpf_attr __user *uattr, const void *data,
843 u32 size)
844 {
845 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
846 int err = -EFAULT;
847 u32 copy_size = size;
848
849 if (!data || !data_out)
850 return 0;
851
852 if (copy_size > kattr->test.ctx_size_out) {
853 copy_size = kattr->test.ctx_size_out;
854 err = -ENOSPC;
855 }
856
857 if (copy_to_user(data_out, data, copy_size))
858 goto out;
859 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
860 goto out;
861 if (err != -ENOSPC)
862 err = 0;
863 out:
864 return err;
865 }
866
867 /**
868 * range_is_zero - test whether buffer is initialized
869 * @buf: buffer to check
870 * @from: check from this position
871 * @to: check up until (excluding) this position
872 *
873 * This function returns true if the there is a non-zero byte
874 * in the buf in the range [from,to).
875 */
range_is_zero(void * buf,size_t from,size_t to)876 static inline bool range_is_zero(void *buf, size_t from, size_t to)
877 {
878 return !memchr_inv((u8 *)buf + from, 0, to - from);
879 }
880
convert___skb_to_skb(struct sk_buff * skb,struct __sk_buff * __skb)881 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
882 {
883 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
884
885 if (!__skb)
886 return 0;
887
888 /* make sure the fields we don't use are zeroed */
889 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
890 return -EINVAL;
891
892 /* mark is allowed */
893
894 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
895 offsetof(struct __sk_buff, priority)))
896 return -EINVAL;
897
898 /* priority is allowed */
899 /* ingress_ifindex is allowed */
900 /* ifindex is allowed */
901
902 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
903 offsetof(struct __sk_buff, cb)))
904 return -EINVAL;
905
906 /* cb is allowed */
907
908 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
909 offsetof(struct __sk_buff, tstamp)))
910 return -EINVAL;
911
912 /* tstamp is allowed */
913 /* wire_len is allowed */
914 /* gso_segs is allowed */
915
916 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
917 offsetof(struct __sk_buff, gso_size)))
918 return -EINVAL;
919
920 /* gso_size is allowed */
921
922 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
923 offsetof(struct __sk_buff, hwtstamp)))
924 return -EINVAL;
925
926 /* hwtstamp is allowed */
927
928 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
929 sizeof(struct __sk_buff)))
930 return -EINVAL;
931
932 skb->mark = __skb->mark;
933 skb->priority = __skb->priority;
934 skb->skb_iif = __skb->ingress_ifindex;
935 skb->tstamp = __skb->tstamp;
936 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
937
938 if (__skb->wire_len == 0) {
939 cb->pkt_len = skb->len;
940 } else {
941 if (__skb->wire_len < skb->len ||
942 __skb->wire_len > GSO_LEGACY_MAX_SIZE)
943 return -EINVAL;
944 cb->pkt_len = __skb->wire_len;
945 }
946
947 if (__skb->gso_segs > GSO_MAX_SEGS)
948 return -EINVAL;
949 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
950 skb_shinfo(skb)->gso_size = __skb->gso_size;
951 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
952
953 return 0;
954 }
955
convert_skb_to___skb(struct sk_buff * skb,struct __sk_buff * __skb)956 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
957 {
958 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
959
960 if (!__skb)
961 return;
962
963 __skb->mark = skb->mark;
964 __skb->priority = skb->priority;
965 __skb->ingress_ifindex = skb->skb_iif;
966 __skb->ifindex = skb->dev->ifindex;
967 __skb->tstamp = skb->tstamp;
968 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
969 __skb->wire_len = cb->pkt_len;
970 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
971 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
972 }
973
974 static struct proto bpf_dummy_proto = {
975 .name = "bpf_dummy",
976 .owner = THIS_MODULE,
977 .obj_size = sizeof(struct sock),
978 };
979
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)980 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
981 union bpf_attr __user *uattr)
982 {
983 bool is_l2 = false, is_direct_pkt_access = false;
984 struct net *net = current->nsproxy->net_ns;
985 struct net_device *dev = net->loopback_dev;
986 u32 size = kattr->test.data_size_in;
987 u32 repeat = kattr->test.repeat;
988 struct __sk_buff *ctx = NULL;
989 u32 retval, duration;
990 int hh_len = ETH_HLEN;
991 struct sk_buff *skb;
992 struct sock *sk;
993 void *data;
994 int ret;
995
996 if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
997 kattr->test.cpu || kattr->test.batch_size)
998 return -EINVAL;
999
1000 data = bpf_test_init(kattr, kattr->test.data_size_in,
1001 size, NET_SKB_PAD + NET_IP_ALIGN,
1002 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1003 if (IS_ERR(data))
1004 return PTR_ERR(data);
1005
1006 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1007 if (IS_ERR(ctx)) {
1008 kfree(data);
1009 return PTR_ERR(ctx);
1010 }
1011
1012 switch (prog->type) {
1013 case BPF_PROG_TYPE_SCHED_CLS:
1014 case BPF_PROG_TYPE_SCHED_ACT:
1015 is_l2 = true;
1016 fallthrough;
1017 case BPF_PROG_TYPE_LWT_IN:
1018 case BPF_PROG_TYPE_LWT_OUT:
1019 case BPF_PROG_TYPE_LWT_XMIT:
1020 is_direct_pkt_access = true;
1021 break;
1022 default:
1023 break;
1024 }
1025
1026 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1027 if (!sk) {
1028 kfree(data);
1029 kfree(ctx);
1030 return -ENOMEM;
1031 }
1032 sock_init_data(NULL, sk);
1033
1034 skb = slab_build_skb(data);
1035 if (!skb) {
1036 kfree(data);
1037 kfree(ctx);
1038 sk_free(sk);
1039 return -ENOMEM;
1040 }
1041 skb->sk = sk;
1042
1043 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1044 __skb_put(skb, size);
1045
1046 if (ctx && ctx->ifindex > 1) {
1047 dev = dev_get_by_index(net, ctx->ifindex);
1048 if (!dev) {
1049 ret = -ENODEV;
1050 goto out;
1051 }
1052 }
1053 skb->protocol = eth_type_trans(skb, dev);
1054 skb_reset_network_header(skb);
1055
1056 switch (skb->protocol) {
1057 case htons(ETH_P_IP):
1058 sk->sk_family = AF_INET;
1059 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1060 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1061 sk->sk_daddr = ip_hdr(skb)->daddr;
1062 }
1063 break;
1064 #if IS_ENABLED(CONFIG_IPV6)
1065 case htons(ETH_P_IPV6):
1066 sk->sk_family = AF_INET6;
1067 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1068 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1069 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1070 }
1071 break;
1072 #endif
1073 default:
1074 break;
1075 }
1076
1077 if (is_l2)
1078 __skb_push(skb, hh_len);
1079 if (is_direct_pkt_access)
1080 bpf_compute_data_pointers(skb);
1081
1082 ret = convert___skb_to_skb(skb, ctx);
1083 if (ret)
1084 goto out;
1085
1086 if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1087 const int off = skb_network_offset(skb);
1088 int len = skb->len - off;
1089
1090 skb->csum = skb_checksum(skb, off, len, 0);
1091 skb->ip_summed = CHECKSUM_COMPLETE;
1092 }
1093
1094 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1095 if (ret)
1096 goto out;
1097 if (!is_l2) {
1098 if (skb_headroom(skb) < hh_len) {
1099 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1100
1101 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1102 ret = -ENOMEM;
1103 goto out;
1104 }
1105 }
1106 memset(__skb_push(skb, hh_len), 0, hh_len);
1107 }
1108
1109 if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1110 const int off = skb_network_offset(skb);
1111 int len = skb->len - off;
1112 __wsum csum;
1113
1114 csum = skb_checksum(skb, off, len, 0);
1115
1116 if (csum_fold(skb->csum) != csum_fold(csum)) {
1117 ret = -EBADMSG;
1118 goto out;
1119 }
1120 }
1121
1122 convert_skb_to___skb(skb, ctx);
1123
1124 size = skb->len;
1125 /* bpf program can never convert linear skb to non-linear */
1126 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1127 size = skb_headlen(skb);
1128 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1129 duration);
1130 if (!ret)
1131 ret = bpf_ctx_finish(kattr, uattr, ctx,
1132 sizeof(struct __sk_buff));
1133 out:
1134 if (dev && dev != net->loopback_dev)
1135 dev_put(dev);
1136 kfree_skb(skb);
1137 sk_free(sk);
1138 kfree(ctx);
1139 return ret;
1140 }
1141
xdp_convert_md_to_buff(struct xdp_md * xdp_md,struct xdp_buff * xdp)1142 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1143 {
1144 unsigned int ingress_ifindex, rx_queue_index;
1145 struct netdev_rx_queue *rxqueue;
1146 struct net_device *device;
1147
1148 if (!xdp_md)
1149 return 0;
1150
1151 if (xdp_md->egress_ifindex != 0)
1152 return -EINVAL;
1153
1154 ingress_ifindex = xdp_md->ingress_ifindex;
1155 rx_queue_index = xdp_md->rx_queue_index;
1156
1157 if (!ingress_ifindex && rx_queue_index)
1158 return -EINVAL;
1159
1160 if (ingress_ifindex) {
1161 device = dev_get_by_index(current->nsproxy->net_ns,
1162 ingress_ifindex);
1163 if (!device)
1164 return -ENODEV;
1165
1166 if (rx_queue_index >= device->real_num_rx_queues)
1167 goto free_dev;
1168
1169 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1170
1171 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1172 goto free_dev;
1173
1174 xdp->rxq = &rxqueue->xdp_rxq;
1175 /* The device is now tracked in the xdp->rxq for later
1176 * dev_put()
1177 */
1178 }
1179
1180 xdp->data = xdp->data_meta + xdp_md->data;
1181 return 0;
1182
1183 free_dev:
1184 dev_put(device);
1185 return -EINVAL;
1186 }
1187
xdp_convert_buff_to_md(struct xdp_buff * xdp,struct xdp_md * xdp_md)1188 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1189 {
1190 if (!xdp_md)
1191 return;
1192
1193 xdp_md->data = xdp->data - xdp->data_meta;
1194 xdp_md->data_end = xdp->data_end - xdp->data_meta;
1195
1196 if (xdp_md->ingress_ifindex)
1197 dev_put(xdp->rxq->dev);
1198 }
1199
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1200 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1201 union bpf_attr __user *uattr)
1202 {
1203 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1204 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1205 u32 batch_size = kattr->test.batch_size;
1206 u32 retval = 0, duration, max_data_sz;
1207 u32 size = kattr->test.data_size_in;
1208 u32 headroom = XDP_PACKET_HEADROOM;
1209 u32 repeat = kattr->test.repeat;
1210 struct netdev_rx_queue *rxqueue;
1211 struct skb_shared_info *sinfo;
1212 struct xdp_buff xdp = {};
1213 int i, ret = -EINVAL;
1214 struct xdp_md *ctx;
1215 void *data;
1216
1217 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1218 prog->expected_attach_type == BPF_XDP_CPUMAP)
1219 return -EINVAL;
1220
1221 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1222 return -EINVAL;
1223
1224 if (bpf_prog_is_dev_bound(prog->aux))
1225 return -EINVAL;
1226
1227 if (do_live) {
1228 if (!batch_size)
1229 batch_size = NAPI_POLL_WEIGHT;
1230 else if (batch_size > TEST_XDP_MAX_BATCH)
1231 return -E2BIG;
1232
1233 headroom += sizeof(struct xdp_page_head);
1234 } else if (batch_size) {
1235 return -EINVAL;
1236 }
1237
1238 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1239 if (IS_ERR(ctx))
1240 return PTR_ERR(ctx);
1241
1242 if (ctx) {
1243 /* There can't be user provided data before the meta data */
1244 if (ctx->data_meta || ctx->data_end != size ||
1245 ctx->data > ctx->data_end ||
1246 unlikely(xdp_metalen_invalid(ctx->data)) ||
1247 (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1248 goto free_ctx;
1249 /* Meta data is allocated from the headroom */
1250 headroom -= ctx->data;
1251 }
1252
1253 max_data_sz = 4096 - headroom - tailroom;
1254 if (size > max_data_sz) {
1255 /* disallow live data mode for jumbo frames */
1256 if (do_live)
1257 goto free_ctx;
1258 size = max_data_sz;
1259 }
1260
1261 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1262 if (IS_ERR(data)) {
1263 ret = PTR_ERR(data);
1264 goto free_ctx;
1265 }
1266
1267 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1268 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1269 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1270 xdp_prepare_buff(&xdp, data, headroom, size, true);
1271 sinfo = xdp_get_shared_info_from_buff(&xdp);
1272
1273 ret = xdp_convert_md_to_buff(ctx, &xdp);
1274 if (ret)
1275 goto free_data;
1276
1277 if (unlikely(kattr->test.data_size_in > size)) {
1278 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1279
1280 while (size < kattr->test.data_size_in) {
1281 struct page *page;
1282 skb_frag_t *frag;
1283 u32 data_len;
1284
1285 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1286 ret = -ENOMEM;
1287 goto out;
1288 }
1289
1290 page = alloc_page(GFP_KERNEL);
1291 if (!page) {
1292 ret = -ENOMEM;
1293 goto out;
1294 }
1295
1296 frag = &sinfo->frags[sinfo->nr_frags++];
1297
1298 data_len = min_t(u32, kattr->test.data_size_in - size,
1299 PAGE_SIZE);
1300 skb_frag_fill_page_desc(frag, page, 0, data_len);
1301
1302 if (copy_from_user(page_address(page), data_in + size,
1303 data_len)) {
1304 ret = -EFAULT;
1305 goto out;
1306 }
1307 sinfo->xdp_frags_size += data_len;
1308 size += data_len;
1309 }
1310 xdp_buff_set_frags_flag(&xdp);
1311 }
1312
1313 if (repeat > 1)
1314 bpf_prog_change_xdp(NULL, prog);
1315
1316 if (do_live)
1317 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1318 else
1319 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1320 /* We convert the xdp_buff back to an xdp_md before checking the return
1321 * code so the reference count of any held netdevice will be decremented
1322 * even if the test run failed.
1323 */
1324 xdp_convert_buff_to_md(&xdp, ctx);
1325 if (ret)
1326 goto out;
1327
1328 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1329 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1330 retval, duration);
1331 if (!ret)
1332 ret = bpf_ctx_finish(kattr, uattr, ctx,
1333 sizeof(struct xdp_md));
1334
1335 out:
1336 if (repeat > 1)
1337 bpf_prog_change_xdp(prog, NULL);
1338 free_data:
1339 for (i = 0; i < sinfo->nr_frags; i++)
1340 __free_page(skb_frag_page(&sinfo->frags[i]));
1341 kfree(data);
1342 free_ctx:
1343 kfree(ctx);
1344 return ret;
1345 }
1346
verify_user_bpf_flow_keys(struct bpf_flow_keys * ctx)1347 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1348 {
1349 /* make sure the fields we don't use are zeroed */
1350 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1351 return -EINVAL;
1352
1353 /* flags is allowed */
1354
1355 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1356 sizeof(struct bpf_flow_keys)))
1357 return -EINVAL;
1358
1359 return 0;
1360 }
1361
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1362 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1363 const union bpf_attr *kattr,
1364 union bpf_attr __user *uattr)
1365 {
1366 struct bpf_test_timer t = { NO_PREEMPT };
1367 u32 size = kattr->test.data_size_in;
1368 struct bpf_flow_dissector ctx = {};
1369 u32 repeat = kattr->test.repeat;
1370 struct bpf_flow_keys *user_ctx;
1371 struct bpf_flow_keys flow_keys;
1372 const struct ethhdr *eth;
1373 unsigned int flags = 0;
1374 u32 retval, duration;
1375 void *data;
1376 int ret;
1377
1378 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1379 return -EINVAL;
1380
1381 if (size < ETH_HLEN)
1382 return -EINVAL;
1383
1384 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1385 if (IS_ERR(data))
1386 return PTR_ERR(data);
1387
1388 eth = (struct ethhdr *)data;
1389
1390 if (!repeat)
1391 repeat = 1;
1392
1393 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1394 if (IS_ERR(user_ctx)) {
1395 kfree(data);
1396 return PTR_ERR(user_ctx);
1397 }
1398 if (user_ctx) {
1399 ret = verify_user_bpf_flow_keys(user_ctx);
1400 if (ret)
1401 goto out;
1402 flags = user_ctx->flags;
1403 }
1404
1405 ctx.flow_keys = &flow_keys;
1406 ctx.data = data;
1407 ctx.data_end = (__u8 *)data + size;
1408
1409 bpf_test_timer_enter(&t);
1410 do {
1411 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1412 size, flags);
1413 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1414 bpf_test_timer_leave(&t);
1415
1416 if (ret < 0)
1417 goto out;
1418
1419 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1420 sizeof(flow_keys), retval, duration);
1421 if (!ret)
1422 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1423 sizeof(struct bpf_flow_keys));
1424
1425 out:
1426 kfree(user_ctx);
1427 kfree(data);
1428 return ret;
1429 }
1430
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1431 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1432 union bpf_attr __user *uattr)
1433 {
1434 struct bpf_test_timer t = { NO_PREEMPT };
1435 struct bpf_prog_array *progs = NULL;
1436 struct bpf_sk_lookup_kern ctx = {};
1437 u32 repeat = kattr->test.repeat;
1438 struct bpf_sk_lookup *user_ctx;
1439 u32 retval, duration;
1440 int ret = -EINVAL;
1441
1442 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1443 return -EINVAL;
1444
1445 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1446 kattr->test.data_size_out)
1447 return -EINVAL;
1448
1449 if (!repeat)
1450 repeat = 1;
1451
1452 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1453 if (IS_ERR(user_ctx))
1454 return PTR_ERR(user_ctx);
1455
1456 if (!user_ctx)
1457 return -EINVAL;
1458
1459 if (user_ctx->sk)
1460 goto out;
1461
1462 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1463 goto out;
1464
1465 if (user_ctx->local_port > U16_MAX) {
1466 ret = -ERANGE;
1467 goto out;
1468 }
1469
1470 ctx.family = (u16)user_ctx->family;
1471 ctx.protocol = (u16)user_ctx->protocol;
1472 ctx.dport = (u16)user_ctx->local_port;
1473 ctx.sport = user_ctx->remote_port;
1474
1475 switch (ctx.family) {
1476 case AF_INET:
1477 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1478 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1479 break;
1480
1481 #if IS_ENABLED(CONFIG_IPV6)
1482 case AF_INET6:
1483 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1484 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1485 break;
1486 #endif
1487
1488 default:
1489 ret = -EAFNOSUPPORT;
1490 goto out;
1491 }
1492
1493 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1494 if (!progs) {
1495 ret = -ENOMEM;
1496 goto out;
1497 }
1498
1499 progs->items[0].prog = prog;
1500
1501 bpf_test_timer_enter(&t);
1502 do {
1503 ctx.selected_sk = NULL;
1504 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1505 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1506 bpf_test_timer_leave(&t);
1507
1508 if (ret < 0)
1509 goto out;
1510
1511 user_ctx->cookie = 0;
1512 if (ctx.selected_sk) {
1513 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1514 ret = -EOPNOTSUPP;
1515 goto out;
1516 }
1517
1518 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1519 }
1520
1521 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1522 if (!ret)
1523 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1524
1525 out:
1526 bpf_prog_array_free(progs);
1527 kfree(user_ctx);
1528 return ret;
1529 }
1530
bpf_prog_test_run_syscall(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1531 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1532 const union bpf_attr *kattr,
1533 union bpf_attr __user *uattr)
1534 {
1535 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1536 __u32 ctx_size_in = kattr->test.ctx_size_in;
1537 void *ctx = NULL;
1538 u32 retval;
1539 int err = 0;
1540
1541 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1542 if (kattr->test.data_in || kattr->test.data_out ||
1543 kattr->test.ctx_out || kattr->test.duration ||
1544 kattr->test.repeat || kattr->test.flags ||
1545 kattr->test.batch_size)
1546 return -EINVAL;
1547
1548 if (ctx_size_in < prog->aux->max_ctx_offset ||
1549 ctx_size_in > U16_MAX)
1550 return -EINVAL;
1551
1552 if (ctx_size_in) {
1553 ctx = memdup_user(ctx_in, ctx_size_in);
1554 if (IS_ERR(ctx))
1555 return PTR_ERR(ctx);
1556 }
1557
1558 rcu_read_lock_trace();
1559 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1560 rcu_read_unlock_trace();
1561
1562 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1563 err = -EFAULT;
1564 goto out;
1565 }
1566 if (ctx_size_in)
1567 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1568 err = -EFAULT;
1569 out:
1570 kfree(ctx);
1571 return err;
1572 }
1573
verify_and_copy_hook_state(struct nf_hook_state * state,const struct nf_hook_state * user,struct net_device * dev)1574 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1575 const struct nf_hook_state *user,
1576 struct net_device *dev)
1577 {
1578 if (user->in || user->out)
1579 return -EINVAL;
1580
1581 if (user->net || user->sk || user->okfn)
1582 return -EINVAL;
1583
1584 switch (user->pf) {
1585 case NFPROTO_IPV4:
1586 case NFPROTO_IPV6:
1587 switch (state->hook) {
1588 case NF_INET_PRE_ROUTING:
1589 state->in = dev;
1590 break;
1591 case NF_INET_LOCAL_IN:
1592 state->in = dev;
1593 break;
1594 case NF_INET_FORWARD:
1595 state->in = dev;
1596 state->out = dev;
1597 break;
1598 case NF_INET_LOCAL_OUT:
1599 state->out = dev;
1600 break;
1601 case NF_INET_POST_ROUTING:
1602 state->out = dev;
1603 break;
1604 }
1605
1606 break;
1607 default:
1608 return -EINVAL;
1609 }
1610
1611 state->pf = user->pf;
1612 state->hook = user->hook;
1613
1614 return 0;
1615 }
1616
nfproto_eth(int nfproto)1617 static __be16 nfproto_eth(int nfproto)
1618 {
1619 switch (nfproto) {
1620 case NFPROTO_IPV4:
1621 return htons(ETH_P_IP);
1622 case NFPROTO_IPV6:
1623 break;
1624 }
1625
1626 return htons(ETH_P_IPV6);
1627 }
1628
bpf_prog_test_run_nf(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)1629 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1630 const union bpf_attr *kattr,
1631 union bpf_attr __user *uattr)
1632 {
1633 struct net *net = current->nsproxy->net_ns;
1634 struct net_device *dev = net->loopback_dev;
1635 struct nf_hook_state *user_ctx, hook_state = {
1636 .pf = NFPROTO_IPV4,
1637 .hook = NF_INET_LOCAL_OUT,
1638 };
1639 u32 size = kattr->test.data_size_in;
1640 u32 repeat = kattr->test.repeat;
1641 struct bpf_nf_ctx ctx = {
1642 .state = &hook_state,
1643 };
1644 struct sk_buff *skb = NULL;
1645 u32 retval, duration;
1646 void *data;
1647 int ret;
1648
1649 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1650 return -EINVAL;
1651
1652 if (size < sizeof(struct iphdr))
1653 return -EINVAL;
1654
1655 data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1656 NET_SKB_PAD + NET_IP_ALIGN,
1657 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1658 if (IS_ERR(data))
1659 return PTR_ERR(data);
1660
1661 if (!repeat)
1662 repeat = 1;
1663
1664 user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1665 if (IS_ERR(user_ctx)) {
1666 kfree(data);
1667 return PTR_ERR(user_ctx);
1668 }
1669
1670 if (user_ctx) {
1671 ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1672 if (ret)
1673 goto out;
1674 }
1675
1676 skb = slab_build_skb(data);
1677 if (!skb) {
1678 ret = -ENOMEM;
1679 goto out;
1680 }
1681
1682 data = NULL; /* data released via kfree_skb */
1683
1684 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1685 __skb_put(skb, size);
1686
1687 ret = -EINVAL;
1688
1689 if (hook_state.hook != NF_INET_LOCAL_OUT) {
1690 if (size < ETH_HLEN + sizeof(struct iphdr))
1691 goto out;
1692
1693 skb->protocol = eth_type_trans(skb, dev);
1694 switch (skb->protocol) {
1695 case htons(ETH_P_IP):
1696 if (hook_state.pf == NFPROTO_IPV4)
1697 break;
1698 goto out;
1699 case htons(ETH_P_IPV6):
1700 if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1701 goto out;
1702 if (hook_state.pf == NFPROTO_IPV6)
1703 break;
1704 goto out;
1705 default:
1706 ret = -EPROTO;
1707 goto out;
1708 }
1709
1710 skb_reset_network_header(skb);
1711 } else {
1712 skb->protocol = nfproto_eth(hook_state.pf);
1713 }
1714
1715 ctx.skb = skb;
1716
1717 ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1718 if (ret)
1719 goto out;
1720
1721 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1722
1723 out:
1724 kfree(user_ctx);
1725 kfree_skb(skb);
1726 kfree(data);
1727 return ret;
1728 }
1729
1730 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1731 .owner = THIS_MODULE,
1732 .set = &test_sk_check_kfunc_ids,
1733 };
1734
1735 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
BTF_ID(struct,prog_test_ref_kfunc)1736 BTF_ID(struct, prog_test_ref_kfunc)
1737 BTF_ID(func, bpf_kfunc_call_test_release_dtor)
1738 BTF_ID(struct, prog_test_member)
1739 BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
1740
1741 static int __init bpf_prog_test_run_init(void)
1742 {
1743 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1744 {
1745 .btf_id = bpf_prog_test_dtor_kfunc_ids[0],
1746 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1747 },
1748 {
1749 .btf_id = bpf_prog_test_dtor_kfunc_ids[2],
1750 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1751 },
1752 };
1753 int ret;
1754
1755 ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1756 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1757 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1758 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1759 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1760 ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1761 THIS_MODULE);
1762 }
1763 late_initcall(bpf_prog_test_run_init);
1764