xref: /linux/net/ipv4/bpf_tcp_ca.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/filter.h>
11 #include <net/tcp.h>
12 #include <net/bpf_sk_storage.h>
13 
14 /* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
15 static struct bpf_struct_ops bpf_tcp_congestion_ops;
16 
17 static u32 unsupported_ops[] = {
18 	offsetof(struct tcp_congestion_ops, get_info),
19 };
20 
21 static const struct btf_type *tcp_sock_type;
22 static u32 tcp_sock_id, sock_id;
23 static const struct btf_type *tcp_congestion_ops_type;
24 
bpf_tcp_ca_init(struct btf * btf)25 static int bpf_tcp_ca_init(struct btf *btf)
26 {
27 	s32 type_id;
28 
29 	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
30 	if (type_id < 0)
31 		return -EINVAL;
32 	sock_id = type_id;
33 
34 	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
35 	if (type_id < 0)
36 		return -EINVAL;
37 	tcp_sock_id = type_id;
38 	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
39 
40 	type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
41 	if (type_id < 0)
42 		return -EINVAL;
43 	tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
44 
45 	return 0;
46 }
47 
is_unsupported(u32 member_offset)48 static bool is_unsupported(u32 member_offset)
49 {
50 	unsigned int i;
51 
52 	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
53 		if (member_offset == unsupported_ops[i])
54 			return true;
55 	}
56 
57 	return false;
58 }
59 
bpf_tcp_ca_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)60 static bool bpf_tcp_ca_is_valid_access(int off, int size,
61 				       enum bpf_access_type type,
62 				       const struct bpf_prog *prog,
63 				       struct bpf_insn_access_aux *info)
64 {
65 	if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
66 		return false;
67 
68 	if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
69 	    !bpf_type_has_unsafe_modifiers(info->reg_type) &&
70 	    info->btf_id == sock_id)
71 		/* promote it to tcp_sock */
72 		info->btf_id = tcp_sock_id;
73 
74 	return true;
75 }
76 
bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)77 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
78 					const struct bpf_reg_state *reg,
79 					int off, int size)
80 {
81 	const struct btf_type *t;
82 	size_t end;
83 
84 	t = btf_type_by_id(reg->btf, reg->btf_id);
85 	if (t != tcp_sock_type) {
86 		bpf_log(log, "only read is supported\n");
87 		return -EACCES;
88 	}
89 
90 	switch (off) {
91 	case offsetof(struct sock, sk_pacing_rate):
92 		end = offsetofend(struct sock, sk_pacing_rate);
93 		break;
94 	case offsetof(struct sock, sk_pacing_status):
95 		end = offsetofend(struct sock, sk_pacing_status);
96 		break;
97 	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
98 		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
99 		break;
100 	case offsetof(struct inet_connection_sock, icsk_ack.pending):
101 		end = offsetofend(struct inet_connection_sock,
102 				  icsk_ack.pending);
103 		break;
104 	case offsetof(struct tcp_sock, snd_cwnd):
105 		end = offsetofend(struct tcp_sock, snd_cwnd);
106 		break;
107 	case offsetof(struct tcp_sock, snd_cwnd_cnt):
108 		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
109 		break;
110 	case offsetof(struct tcp_sock, snd_cwnd_stamp):
111 		end = offsetofend(struct tcp_sock, snd_cwnd_stamp);
112 		break;
113 	case offsetof(struct tcp_sock, snd_ssthresh):
114 		end = offsetofend(struct tcp_sock, snd_ssthresh);
115 		break;
116 	case offsetof(struct tcp_sock, ecn_flags):
117 		end = offsetofend(struct tcp_sock, ecn_flags);
118 		break;
119 	case offsetof(struct tcp_sock, app_limited):
120 		end = offsetofend(struct tcp_sock, app_limited);
121 		break;
122 	default:
123 		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
124 		return -EACCES;
125 	}
126 
127 	if (off + size > end) {
128 		bpf_log(log,
129 			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
130 			off, size, end);
131 		return -EACCES;
132 	}
133 
134 	return 0;
135 }
136 
BPF_CALL_2(bpf_tcp_send_ack,struct tcp_sock *,tp,u32,rcv_nxt)137 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
138 {
139 	/* bpf_tcp_ca prog cannot have NULL tp */
140 	__tcp_send_ack((struct sock *)tp, rcv_nxt);
141 	return 0;
142 }
143 
144 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
145 	.func		= bpf_tcp_send_ack,
146 	.gpl_only	= false,
147 	/* In case we want to report error later */
148 	.ret_type	= RET_INTEGER,
149 	.arg1_type	= ARG_PTR_TO_BTF_ID,
150 	.arg1_btf_id	= &tcp_sock_id,
151 	.arg2_type	= ARG_ANYTHING,
152 };
153 
prog_ops_moff(const struct bpf_prog * prog)154 static u32 prog_ops_moff(const struct bpf_prog *prog)
155 {
156 	const struct btf_member *m;
157 	const struct btf_type *t;
158 	u32 midx;
159 
160 	midx = prog->expected_attach_type;
161 	t = tcp_congestion_ops_type;
162 	m = &btf_type_member(t)[midx];
163 
164 	return __btf_member_bit_offset(t, m) / 8;
165 }
166 
167 static const struct bpf_func_proto *
bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)168 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
169 			  const struct bpf_prog *prog)
170 {
171 	switch (func_id) {
172 	case BPF_FUNC_tcp_send_ack:
173 		return &bpf_tcp_send_ack_proto;
174 	case BPF_FUNC_sk_storage_get:
175 		return &bpf_sk_storage_get_proto;
176 	case BPF_FUNC_sk_storage_delete:
177 		return &bpf_sk_storage_delete_proto;
178 	case BPF_FUNC_setsockopt:
179 		/* Does not allow release() to call setsockopt.
180 		 * release() is called when the current bpf-tcp-cc
181 		 * is retiring.  It is not allowed to call
182 		 * setsockopt() to make further changes which
183 		 * may potentially allocate new resources.
184 		 */
185 		if (prog_ops_moff(prog) !=
186 		    offsetof(struct tcp_congestion_ops, release))
187 			return &bpf_sk_setsockopt_proto;
188 		return NULL;
189 	case BPF_FUNC_getsockopt:
190 		/* Since get/setsockopt is usually expected to
191 		 * be available together, disable getsockopt for
192 		 * release also to avoid usage surprise.
193 		 * The bpf-tcp-cc already has a more powerful way
194 		 * to read tcp_sock from the PTR_TO_BTF_ID.
195 		 */
196 		if (prog_ops_moff(prog) !=
197 		    offsetof(struct tcp_congestion_ops, release))
198 			return &bpf_sk_getsockopt_proto;
199 		return NULL;
200 	case BPF_FUNC_ktime_get_coarse_ns:
201 		return &bpf_ktime_get_coarse_ns_proto;
202 	default:
203 		return bpf_base_func_proto(func_id, prog);
204 	}
205 }
206 
207 BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
208 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
209 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
210 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
211 BTF_ID_FLAGS(func, tcp_slow_start)
212 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
213 BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
214 
215 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
216 	.owner = THIS_MODULE,
217 	.set   = &bpf_tcp_ca_check_kfunc_ids,
218 };
219 
220 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
221 	.get_func_proto		= bpf_tcp_ca_get_func_proto,
222 	.is_valid_access	= bpf_tcp_ca_is_valid_access,
223 	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
224 };
225 
bpf_tcp_ca_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)226 static int bpf_tcp_ca_init_member(const struct btf_type *t,
227 				  const struct btf_member *member,
228 				  void *kdata, const void *udata)
229 {
230 	const struct tcp_congestion_ops *utcp_ca;
231 	struct tcp_congestion_ops *tcp_ca;
232 	u32 moff;
233 
234 	utcp_ca = (const struct tcp_congestion_ops *)udata;
235 	tcp_ca = (struct tcp_congestion_ops *)kdata;
236 
237 	moff = __btf_member_bit_offset(t, member) / 8;
238 	switch (moff) {
239 	case offsetof(struct tcp_congestion_ops, flags):
240 		if (utcp_ca->flags & ~TCP_CONG_MASK)
241 			return -EINVAL;
242 		tcp_ca->flags = utcp_ca->flags;
243 		return 1;
244 	case offsetof(struct tcp_congestion_ops, name):
245 		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
246 				     sizeof(tcp_ca->name)) <= 0)
247 			return -EINVAL;
248 		return 1;
249 	}
250 
251 	return 0;
252 }
253 
bpf_tcp_ca_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)254 static int bpf_tcp_ca_check_member(const struct btf_type *t,
255 				   const struct btf_member *member,
256 				   const struct bpf_prog *prog)
257 {
258 	if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
259 		return -ENOTSUPP;
260 	return 0;
261 }
262 
bpf_tcp_ca_reg(void * kdata,struct bpf_link * link)263 static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
264 {
265 	return tcp_register_congestion_control(kdata);
266 }
267 
bpf_tcp_ca_unreg(void * kdata,struct bpf_link * link)268 static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link)
269 {
270 	tcp_unregister_congestion_control(kdata);
271 }
272 
bpf_tcp_ca_update(void * kdata,void * old_kdata,struct bpf_link * link)273 static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link)
274 {
275 	return tcp_update_congestion_control(kdata, old_kdata);
276 }
277 
bpf_tcp_ca_validate(void * kdata)278 static int bpf_tcp_ca_validate(void *kdata)
279 {
280 	return tcp_validate_congestion_control(kdata);
281 }
282 
bpf_tcp_ca_ssthresh(struct sock * sk)283 static u32 bpf_tcp_ca_ssthresh(struct sock *sk)
284 {
285 	return 0;
286 }
287 
bpf_tcp_ca_cong_avoid(struct sock * sk,u32 ack,u32 acked)288 static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked)
289 {
290 }
291 
bpf_tcp_ca_set_state(struct sock * sk,u8 new_state)292 static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state)
293 {
294 }
295 
bpf_tcp_ca_cwnd_event(struct sock * sk,enum tcp_ca_event ev)296 static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
297 {
298 }
299 
bpf_tcp_ca_in_ack_event(struct sock * sk,u32 flags)300 static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags)
301 {
302 }
303 
bpf_tcp_ca_pkts_acked(struct sock * sk,const struct ack_sample * sample)304 static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample)
305 {
306 }
307 
bpf_tcp_ca_min_tso_segs(struct sock * sk)308 static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk)
309 {
310 	return 0;
311 }
312 
bpf_tcp_ca_cong_control(struct sock * sk,u32 ack,int flag,const struct rate_sample * rs)313 static void bpf_tcp_ca_cong_control(struct sock *sk, u32 ack, int flag,
314 				    const struct rate_sample *rs)
315 {
316 }
317 
bpf_tcp_ca_undo_cwnd(struct sock * sk)318 static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk)
319 {
320 	return 0;
321 }
322 
bpf_tcp_ca_sndbuf_expand(struct sock * sk)323 static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk)
324 {
325 	return 0;
326 }
327 
__bpf_tcp_ca_init(struct sock * sk)328 static void __bpf_tcp_ca_init(struct sock *sk)
329 {
330 }
331 
__bpf_tcp_ca_release(struct sock * sk)332 static void __bpf_tcp_ca_release(struct sock *sk)
333 {
334 }
335 
336 static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
337 	.ssthresh = bpf_tcp_ca_ssthresh,
338 	.cong_avoid = bpf_tcp_ca_cong_avoid,
339 	.set_state = bpf_tcp_ca_set_state,
340 	.cwnd_event = bpf_tcp_ca_cwnd_event,
341 	.in_ack_event = bpf_tcp_ca_in_ack_event,
342 	.pkts_acked = bpf_tcp_ca_pkts_acked,
343 	.min_tso_segs = bpf_tcp_ca_min_tso_segs,
344 	.cong_control = bpf_tcp_ca_cong_control,
345 	.undo_cwnd = bpf_tcp_ca_undo_cwnd,
346 	.sndbuf_expand = bpf_tcp_ca_sndbuf_expand,
347 
348 	.init = __bpf_tcp_ca_init,
349 	.release = __bpf_tcp_ca_release,
350 };
351 
352 static struct bpf_struct_ops bpf_tcp_congestion_ops = {
353 	.verifier_ops = &bpf_tcp_ca_verifier_ops,
354 	.reg = bpf_tcp_ca_reg,
355 	.unreg = bpf_tcp_ca_unreg,
356 	.update = bpf_tcp_ca_update,
357 	.check_member = bpf_tcp_ca_check_member,
358 	.init_member = bpf_tcp_ca_init_member,
359 	.init = bpf_tcp_ca_init,
360 	.validate = bpf_tcp_ca_validate,
361 	.name = "tcp_congestion_ops",
362 	.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
363 	.owner = THIS_MODULE,
364 };
365 
bpf_tcp_ca_kfunc_init(void)366 static int __init bpf_tcp_ca_kfunc_init(void)
367 {
368 	int ret;
369 
370 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
371 	ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
372 
373 	return ret;
374 }
375 late_initcall(bpf_tcp_ca_kfunc_init);
376