xref: /linux/net/bpf/test_run.c (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 
14 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
15 {
16 	u32 ret;
17 
18 	preempt_disable();
19 	rcu_read_lock();
20 	ret = BPF_PROG_RUN(prog, ctx);
21 	rcu_read_unlock();
22 	preempt_enable();
23 
24 	return ret;
25 }
26 
27 static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
28 {
29 	u64 time_start, time_spent = 0;
30 	u32 ret = 0, i;
31 
32 	if (!repeat)
33 		repeat = 1;
34 	time_start = ktime_get_ns();
35 	for (i = 0; i < repeat; i++) {
36 		ret = bpf_test_run_one(prog, ctx);
37 		if (need_resched()) {
38 			if (signal_pending(current))
39 				break;
40 			time_spent += ktime_get_ns() - time_start;
41 			cond_resched();
42 			time_start = ktime_get_ns();
43 		}
44 	}
45 	time_spent += ktime_get_ns() - time_start;
46 	do_div(time_spent, repeat);
47 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
48 
49 	return ret;
50 }
51 
52 static int bpf_test_finish(union bpf_attr __user *uattr, const void *data,
53 			   u32 size, u32 retval, u32 duration)
54 {
55 	void __user *data_out = u64_to_user_ptr(uattr->test.data_out);
56 	int err = -EFAULT;
57 
58 	if (data_out && copy_to_user(data_out, data, size))
59 		goto out;
60 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
61 		goto out;
62 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
63 		goto out;
64 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
65 		goto out;
66 	err = 0;
67 out:
68 	return err;
69 }
70 
71 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
72 			   u32 headroom, u32 tailroom)
73 {
74 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
75 	void *data;
76 
77 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
78 		return ERR_PTR(-EINVAL);
79 
80 	data = kzalloc(size + headroom + tailroom, GFP_USER);
81 	if (!data)
82 		return ERR_PTR(-ENOMEM);
83 
84 	if (copy_from_user(data + headroom, data_in, size)) {
85 		kfree(data);
86 		return ERR_PTR(-EFAULT);
87 	}
88 	return data;
89 }
90 
91 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
92 			  union bpf_attr __user *uattr)
93 {
94 	bool is_l2 = false, is_direct_pkt_access = false;
95 	u32 size = kattr->test.data_size_in;
96 	u32 repeat = kattr->test.repeat;
97 	u32 retval, duration;
98 	struct sk_buff *skb;
99 	void *data;
100 	int ret;
101 
102 	data = bpf_test_init(kattr, size, NET_SKB_PAD,
103 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
104 	if (IS_ERR(data))
105 		return PTR_ERR(data);
106 
107 	switch (prog->type) {
108 	case BPF_PROG_TYPE_SCHED_CLS:
109 	case BPF_PROG_TYPE_SCHED_ACT:
110 		is_l2 = true;
111 		/* fall through */
112 	case BPF_PROG_TYPE_LWT_IN:
113 	case BPF_PROG_TYPE_LWT_OUT:
114 	case BPF_PROG_TYPE_LWT_XMIT:
115 		is_direct_pkt_access = true;
116 		break;
117 	default:
118 		break;
119 	}
120 
121 	skb = build_skb(data, 0);
122 	if (!skb) {
123 		kfree(data);
124 		return -ENOMEM;
125 	}
126 
127 	skb_reserve(skb, NET_SKB_PAD);
128 	__skb_put(skb, size);
129 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
130 	skb_reset_network_header(skb);
131 
132 	if (is_l2)
133 		__skb_push(skb, ETH_HLEN);
134 	if (is_direct_pkt_access)
135 		bpf_compute_data_end(skb);
136 	retval = bpf_test_run(prog, skb, repeat, &duration);
137 	if (!is_l2)
138 		__skb_push(skb, ETH_HLEN);
139 	size = skb->len;
140 	/* bpf program can never convert linear skb to non-linear */
141 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
142 		size = skb_headlen(skb);
143 	ret = bpf_test_finish(uattr, skb->data, size, retval, duration);
144 	kfree_skb(skb);
145 	return ret;
146 }
147 
148 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
149 			  union bpf_attr __user *uattr)
150 {
151 	u32 size = kattr->test.data_size_in;
152 	u32 repeat = kattr->test.repeat;
153 	struct xdp_buff xdp = {};
154 	u32 retval, duration;
155 	void *data;
156 	int ret;
157 
158 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM, 0);
159 	if (IS_ERR(data))
160 		return PTR_ERR(data);
161 
162 	xdp.data_hard_start = data;
163 	xdp.data = data + XDP_PACKET_HEADROOM;
164 	xdp.data_end = xdp.data + size;
165 
166 	retval = bpf_test_run(prog, &xdp, repeat, &duration);
167 	if (xdp.data != data + XDP_PACKET_HEADROOM)
168 		size = xdp.data_end - xdp.data;
169 	ret = bpf_test_finish(uattr, xdp.data, size, retval, duration);
170 	kfree(data);
171 	return ret;
172 }
173