xref: /linux/net/ipv4/bpf_tcp_ca.c (revision 881f1bb5e25c8982ed963b2d319fc0fc732e55db)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/filter.h>
11 #include <net/tcp.h>
12 #include <net/bpf_sk_storage.h>
13 
14 /* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
15 static struct bpf_struct_ops bpf_tcp_congestion_ops;
16 
17 static u32 unsupported_ops[] = {
18 	offsetof(struct tcp_congestion_ops, get_info),
19 };
20 
21 static const struct btf_type *tcp_sock_type;
22 static u32 tcp_sock_id, sock_id;
23 static const struct btf_type *tcp_congestion_ops_type;
24 
25 static int bpf_tcp_ca_init(struct btf *btf)
26 {
27 	s32 type_id;
28 
29 	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
30 	if (type_id < 0)
31 		return -EINVAL;
32 	sock_id = type_id;
33 
34 	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
35 	if (type_id < 0)
36 		return -EINVAL;
37 	tcp_sock_id = type_id;
38 	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
39 
40 	type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
41 	if (type_id < 0)
42 		return -EINVAL;
43 	tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
44 
45 	return 0;
46 }
47 
48 static bool is_unsupported(u32 member_offset)
49 {
50 	unsigned int i;
51 
52 	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
53 		if (member_offset == unsupported_ops[i])
54 			return true;
55 	}
56 
57 	return false;
58 }
59 
60 static bool bpf_tcp_ca_is_valid_access(int off, int size,
61 				       enum bpf_access_type type,
62 				       const struct bpf_prog *prog,
63 				       struct bpf_insn_access_aux *info)
64 {
65 	if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
66 		return false;
67 
68 	if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
69 	    !bpf_type_has_unsafe_modifiers(info->reg_type) &&
70 	    info->btf_id == sock_id)
71 		/* promote it to tcp_sock */
72 		info->btf_id = tcp_sock_id;
73 
74 	return true;
75 }
76 
77 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
78 					const struct bpf_reg_state *reg,
79 					int off, int size)
80 {
81 	const struct btf_type *t;
82 	size_t end;
83 
84 	t = btf_type_by_id(reg->btf, reg->btf_id);
85 	if (t != tcp_sock_type) {
86 		bpf_log(log, "only read is supported\n");
87 		return -EACCES;
88 	}
89 
90 	switch (off) {
91 	case offsetof(struct sock, sk_pacing_rate):
92 		end = offsetofend(struct sock, sk_pacing_rate);
93 		break;
94 	case offsetof(struct sock, sk_pacing_status):
95 		end = offsetofend(struct sock, sk_pacing_status);
96 		break;
97 	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
98 		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
99 		break;
100 	case offsetof(struct inet_connection_sock, icsk_ack.pending):
101 		end = offsetofend(struct inet_connection_sock,
102 				  icsk_ack.pending);
103 		break;
104 	case offsetof(struct tcp_sock, snd_cwnd):
105 		end = offsetofend(struct tcp_sock, snd_cwnd);
106 		break;
107 	case offsetof(struct tcp_sock, snd_cwnd_cnt):
108 		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
109 		break;
110 	case offsetof(struct tcp_sock, snd_ssthresh):
111 		end = offsetofend(struct tcp_sock, snd_ssthresh);
112 		break;
113 	case offsetof(struct tcp_sock, ecn_flags):
114 		end = offsetofend(struct tcp_sock, ecn_flags);
115 		break;
116 	case offsetof(struct tcp_sock, app_limited):
117 		end = offsetofend(struct tcp_sock, app_limited);
118 		break;
119 	default:
120 		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
121 		return -EACCES;
122 	}
123 
124 	if (off + size > end) {
125 		bpf_log(log,
126 			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
127 			off, size, end);
128 		return -EACCES;
129 	}
130 
131 	return 0;
132 }
133 
134 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
135 {
136 	/* bpf_tcp_ca prog cannot have NULL tp */
137 	__tcp_send_ack((struct sock *)tp, rcv_nxt);
138 	return 0;
139 }
140 
141 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
142 	.func		= bpf_tcp_send_ack,
143 	.gpl_only	= false,
144 	/* In case we want to report error later */
145 	.ret_type	= RET_INTEGER,
146 	.arg1_type	= ARG_PTR_TO_BTF_ID,
147 	.arg1_btf_id	= &tcp_sock_id,
148 	.arg2_type	= ARG_ANYTHING,
149 };
150 
151 static u32 prog_ops_moff(const struct bpf_prog *prog)
152 {
153 	const struct btf_member *m;
154 	const struct btf_type *t;
155 	u32 midx;
156 
157 	midx = prog->expected_attach_type;
158 	t = tcp_congestion_ops_type;
159 	m = &btf_type_member(t)[midx];
160 
161 	return __btf_member_bit_offset(t, m) / 8;
162 }
163 
164 static const struct bpf_func_proto *
165 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
166 			  const struct bpf_prog *prog)
167 {
168 	switch (func_id) {
169 	case BPF_FUNC_tcp_send_ack:
170 		return &bpf_tcp_send_ack_proto;
171 	case BPF_FUNC_sk_storage_get:
172 		return &bpf_sk_storage_get_proto;
173 	case BPF_FUNC_sk_storage_delete:
174 		return &bpf_sk_storage_delete_proto;
175 	case BPF_FUNC_setsockopt:
176 		/* Does not allow release() to call setsockopt.
177 		 * release() is called when the current bpf-tcp-cc
178 		 * is retiring.  It is not allowed to call
179 		 * setsockopt() to make further changes which
180 		 * may potentially allocate new resources.
181 		 */
182 		if (prog_ops_moff(prog) !=
183 		    offsetof(struct tcp_congestion_ops, release))
184 			return &bpf_sk_setsockopt_proto;
185 		return NULL;
186 	case BPF_FUNC_getsockopt:
187 		/* Since get/setsockopt is usually expected to
188 		 * be available together, disable getsockopt for
189 		 * release also to avoid usage surprise.
190 		 * The bpf-tcp-cc already has a more powerful way
191 		 * to read tcp_sock from the PTR_TO_BTF_ID.
192 		 */
193 		if (prog_ops_moff(prog) !=
194 		    offsetof(struct tcp_congestion_ops, release))
195 			return &bpf_sk_getsockopt_proto;
196 		return NULL;
197 	case BPF_FUNC_ktime_get_coarse_ns:
198 		return &bpf_ktime_get_coarse_ns_proto;
199 	default:
200 		return bpf_base_func_proto(func_id, prog);
201 	}
202 }
203 
204 BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
205 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
206 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
207 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
208 BTF_ID_FLAGS(func, tcp_slow_start)
209 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
210 BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
211 
212 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
213 	.owner = THIS_MODULE,
214 	.set   = &bpf_tcp_ca_check_kfunc_ids,
215 };
216 
217 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
218 	.get_func_proto		= bpf_tcp_ca_get_func_proto,
219 	.is_valid_access	= bpf_tcp_ca_is_valid_access,
220 	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
221 };
222 
223 static int bpf_tcp_ca_init_member(const struct btf_type *t,
224 				  const struct btf_member *member,
225 				  void *kdata, const void *udata)
226 {
227 	const struct tcp_congestion_ops *utcp_ca;
228 	struct tcp_congestion_ops *tcp_ca;
229 	u32 moff;
230 
231 	utcp_ca = (const struct tcp_congestion_ops *)udata;
232 	tcp_ca = (struct tcp_congestion_ops *)kdata;
233 
234 	moff = __btf_member_bit_offset(t, member) / 8;
235 	switch (moff) {
236 	case offsetof(struct tcp_congestion_ops, flags):
237 		if (utcp_ca->flags & ~TCP_CONG_MASK)
238 			return -EINVAL;
239 		tcp_ca->flags = utcp_ca->flags;
240 		return 1;
241 	case offsetof(struct tcp_congestion_ops, name):
242 		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
243 				     sizeof(tcp_ca->name)) <= 0)
244 			return -EINVAL;
245 		return 1;
246 	}
247 
248 	return 0;
249 }
250 
251 static int bpf_tcp_ca_check_member(const struct btf_type *t,
252 				   const struct btf_member *member,
253 				   const struct bpf_prog *prog)
254 {
255 	if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
256 		return -ENOTSUPP;
257 	return 0;
258 }
259 
260 static int bpf_tcp_ca_reg(void *kdata)
261 {
262 	return tcp_register_congestion_control(kdata);
263 }
264 
265 static void bpf_tcp_ca_unreg(void *kdata)
266 {
267 	tcp_unregister_congestion_control(kdata);
268 }
269 
270 static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
271 {
272 	return tcp_update_congestion_control(kdata, old_kdata);
273 }
274 
275 static int bpf_tcp_ca_validate(void *kdata)
276 {
277 	return tcp_validate_congestion_control(kdata);
278 }
279 
280 static u32 bpf_tcp_ca_ssthresh(struct sock *sk)
281 {
282 	return 0;
283 }
284 
285 static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked)
286 {
287 }
288 
289 static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state)
290 {
291 }
292 
293 static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
294 {
295 }
296 
297 static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags)
298 {
299 }
300 
301 static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample)
302 {
303 }
304 
305 static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk)
306 {
307 	return 0;
308 }
309 
310 static void bpf_tcp_ca_cong_control(struct sock *sk, const struct rate_sample *rs)
311 {
312 }
313 
314 static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk)
315 {
316 	return 0;
317 }
318 
319 static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk)
320 {
321 	return 0;
322 }
323 
324 static void __bpf_tcp_ca_init(struct sock *sk)
325 {
326 }
327 
328 static void __bpf_tcp_ca_release(struct sock *sk)
329 {
330 }
331 
332 static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
333 	.ssthresh = bpf_tcp_ca_ssthresh,
334 	.cong_avoid = bpf_tcp_ca_cong_avoid,
335 	.set_state = bpf_tcp_ca_set_state,
336 	.cwnd_event = bpf_tcp_ca_cwnd_event,
337 	.in_ack_event = bpf_tcp_ca_in_ack_event,
338 	.pkts_acked = bpf_tcp_ca_pkts_acked,
339 	.min_tso_segs = bpf_tcp_ca_min_tso_segs,
340 	.cong_control = bpf_tcp_ca_cong_control,
341 	.undo_cwnd = bpf_tcp_ca_undo_cwnd,
342 	.sndbuf_expand = bpf_tcp_ca_sndbuf_expand,
343 
344 	.init = __bpf_tcp_ca_init,
345 	.release = __bpf_tcp_ca_release,
346 };
347 
348 static struct bpf_struct_ops bpf_tcp_congestion_ops = {
349 	.verifier_ops = &bpf_tcp_ca_verifier_ops,
350 	.reg = bpf_tcp_ca_reg,
351 	.unreg = bpf_tcp_ca_unreg,
352 	.update = bpf_tcp_ca_update,
353 	.check_member = bpf_tcp_ca_check_member,
354 	.init_member = bpf_tcp_ca_init_member,
355 	.init = bpf_tcp_ca_init,
356 	.validate = bpf_tcp_ca_validate,
357 	.name = "tcp_congestion_ops",
358 	.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
359 	.owner = THIS_MODULE,
360 };
361 
362 static int __init bpf_tcp_ca_kfunc_init(void)
363 {
364 	int ret;
365 
366 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
367 	ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
368 
369 	return ret;
370 }
371 late_initcall(bpf_tcp_ca_kfunc_init);
372