xref: /linux/net/bpf/test_run.c (revision 9f7d35d9f7a184ffb591b090b2cbf63d2d599c02)
1 /* Copyright (c) 2017 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 #include <net/sock.h>
14 #include <net/tcp.h>
15 
16 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
17 		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
18 {
19 	u32 ret;
20 
21 	preempt_disable();
22 	rcu_read_lock();
23 	bpf_cgroup_storage_set(storage);
24 	ret = BPF_PROG_RUN(prog, ctx);
25 	rcu_read_unlock();
26 	preempt_enable();
27 
28 	return ret;
29 }
30 
31 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 			u32 *time)
33 {
34 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 	enum bpf_cgroup_storage_type stype;
36 	u64 time_start, time_spent = 0;
37 	u32 i;
38 
39 	for_each_cgroup_storage_type(stype) {
40 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
41 		if (IS_ERR(storage[stype])) {
42 			storage[stype] = NULL;
43 			for_each_cgroup_storage_type(stype)
44 				bpf_cgroup_storage_free(storage[stype]);
45 			return -ENOMEM;
46 		}
47 	}
48 
49 	if (!repeat)
50 		repeat = 1;
51 	time_start = ktime_get_ns();
52 	for (i = 0; i < repeat; i++) {
53 		*ret = bpf_test_run_one(prog, ctx, storage);
54 		if (need_resched()) {
55 			if (signal_pending(current))
56 				break;
57 			time_spent += ktime_get_ns() - time_start;
58 			cond_resched();
59 			time_start = ktime_get_ns();
60 		}
61 	}
62 	time_spent += ktime_get_ns() - time_start;
63 	do_div(time_spent, repeat);
64 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 
66 	for_each_cgroup_storage_type(stype)
67 		bpf_cgroup_storage_free(storage[stype]);
68 
69 	return 0;
70 }
71 
72 static int bpf_test_finish(const union bpf_attr *kattr,
73 			   union bpf_attr __user *uattr, const void *data,
74 			   u32 size, u32 retval, u32 duration)
75 {
76 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
77 	int err = -EFAULT;
78 	u32 copy_size = size;
79 
80 	/* Clamp copy if the user has provided a size hint, but copy the full
81 	 * buffer if not to retain old behaviour.
82 	 */
83 	if (kattr->test.data_size_out &&
84 	    copy_size > kattr->test.data_size_out) {
85 		copy_size = kattr->test.data_size_out;
86 		err = -ENOSPC;
87 	}
88 
89 	if (data_out && copy_to_user(data_out, data, copy_size))
90 		goto out;
91 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
92 		goto out;
93 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
94 		goto out;
95 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
96 		goto out;
97 	if (err != -ENOSPC)
98 		err = 0;
99 out:
100 	return err;
101 }
102 
103 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
104 			   u32 headroom, u32 tailroom)
105 {
106 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
107 	void *data;
108 
109 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
110 		return ERR_PTR(-EINVAL);
111 
112 	data = kzalloc(size + headroom + tailroom, GFP_USER);
113 	if (!data)
114 		return ERR_PTR(-ENOMEM);
115 
116 	if (copy_from_user(data + headroom, data_in, size)) {
117 		kfree(data);
118 		return ERR_PTR(-EFAULT);
119 	}
120 	return data;
121 }
122 
123 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
124 			  union bpf_attr __user *uattr)
125 {
126 	bool is_l2 = false, is_direct_pkt_access = false;
127 	u32 size = kattr->test.data_size_in;
128 	u32 repeat = kattr->test.repeat;
129 	u32 retval, duration;
130 	int hh_len = ETH_HLEN;
131 	struct sk_buff *skb;
132 	struct sock *sk;
133 	void *data;
134 	int ret;
135 
136 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
137 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
138 	if (IS_ERR(data))
139 		return PTR_ERR(data);
140 
141 	switch (prog->type) {
142 	case BPF_PROG_TYPE_SCHED_CLS:
143 	case BPF_PROG_TYPE_SCHED_ACT:
144 		is_l2 = true;
145 		/* fall through */
146 	case BPF_PROG_TYPE_LWT_IN:
147 	case BPF_PROG_TYPE_LWT_OUT:
148 	case BPF_PROG_TYPE_LWT_XMIT:
149 		is_direct_pkt_access = true;
150 		break;
151 	default:
152 		break;
153 	}
154 
155 	sk = kzalloc(sizeof(struct sock), GFP_USER);
156 	if (!sk) {
157 		kfree(data);
158 		return -ENOMEM;
159 	}
160 	sock_net_set(sk, current->nsproxy->net_ns);
161 	sock_init_data(NULL, sk);
162 
163 	skb = build_skb(data, 0);
164 	if (!skb) {
165 		kfree(data);
166 		kfree(sk);
167 		return -ENOMEM;
168 	}
169 	skb->sk = sk;
170 
171 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
172 	__skb_put(skb, size);
173 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
174 	skb_reset_network_header(skb);
175 
176 	if (is_l2)
177 		__skb_push(skb, hh_len);
178 	if (is_direct_pkt_access)
179 		bpf_compute_data_pointers(skb);
180 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
181 	if (ret) {
182 		kfree_skb(skb);
183 		kfree(sk);
184 		return ret;
185 	}
186 	if (!is_l2) {
187 		if (skb_headroom(skb) < hh_len) {
188 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
189 
190 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
191 				kfree_skb(skb);
192 				kfree(sk);
193 				return -ENOMEM;
194 			}
195 		}
196 		memset(__skb_push(skb, hh_len), 0, hh_len);
197 	}
198 
199 	size = skb->len;
200 	/* bpf program can never convert linear skb to non-linear */
201 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
202 		size = skb_headlen(skb);
203 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
204 	kfree_skb(skb);
205 	kfree(sk);
206 	return ret;
207 }
208 
209 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
210 			  union bpf_attr __user *uattr)
211 {
212 	u32 size = kattr->test.data_size_in;
213 	u32 repeat = kattr->test.repeat;
214 	struct netdev_rx_queue *rxqueue;
215 	struct xdp_buff xdp = {};
216 	u32 retval, duration;
217 	void *data;
218 	int ret;
219 
220 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
221 	if (IS_ERR(data))
222 		return PTR_ERR(data);
223 
224 	xdp.data_hard_start = data;
225 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
226 	xdp.data_meta = xdp.data;
227 	xdp.data_end = xdp.data + size;
228 
229 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
230 	xdp.rxq = &rxqueue->xdp_rxq;
231 
232 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
233 	if (ret)
234 		goto out;
235 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
236 	    xdp.data_end != xdp.data + size)
237 		size = xdp.data_end - xdp.data;
238 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
239 out:
240 	kfree(data);
241 	return ret;
242 }
243