xref: /linux/include/net/mptcp.h (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Multipath TCP
4  *
5  * Copyright (c) 2017 - 2019, Intel Corporation.
6  */
7 
8 #ifndef __NET_MPTCP_H
9 #define __NET_MPTCP_H
10 
11 #include <linux/skbuff.h>
12 #include <linux/tcp.h>
13 #include <linux/types.h>
14 
15 struct mptcp_info;
16 struct mptcp_sock;
17 struct mptcp_pm_addr_entry;
18 struct seq_file;
19 
20 /* MPTCP sk_buff extension data */
21 struct mptcp_ext {
22 	union {
23 		u64	data_ack;
24 		u32	data_ack32;
25 	};
26 	u64		data_seq;
27 	u32		subflow_seq;
28 	u16		data_len;
29 	__sum16		csum;
30 	u8		use_map:1,
31 			dsn64:1,
32 			data_fin:1,
33 			use_ack:1,
34 			ack64:1,
35 			mpc_map:1,
36 			frozen:1,
37 			reset_transient:1;
38 	u8		reset_reason:4,
39 			csum_reqd:1,
40 			infinite_map:1;
41 };
42 
43 #define MPTCPOPT_HMAC_LEN	20
44 #define MPTCP_RM_IDS_MAX	8
45 
46 struct mptcp_rm_list {
47 	u8 ids[MPTCP_RM_IDS_MAX];
48 	u8 nr;
49 };
50 
51 struct mptcp_addr_info {
52 	u8			id;
53 	sa_family_t		family;
54 	__be16			port;
55 	union {
56 		struct in_addr	addr;
57 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
58 		struct in6_addr	addr6;
59 #endif
60 	};
61 };
62 
63 struct mptcp_out_options {
64 #if IS_ENABLED(CONFIG_MPTCP)
65 	u16 suboptions;
66 	struct mptcp_rm_list rm_list;
67 	u8 join_id;
68 	u8 backup;
69 	u8 reset_reason:4,
70 	   reset_transient:1,
71 	   csum_reqd:1,
72 	   allow_join_id0:1;
73 	union {
74 		struct {
75 			u64 sndr_key;
76 			u64 rcvr_key;
77 			u64 data_seq;
78 			u32 subflow_seq;
79 			u16 data_len;
80 			__sum16 csum;
81 		};
82 		struct {
83 			struct mptcp_addr_info addr;
84 			u64 ahmac;
85 		};
86 		struct {
87 			struct mptcp_ext ext_copy;
88 			u64 fail_seq;
89 		};
90 		struct {
91 			u32 nonce;
92 			u32 token;
93 			u64 thmac;
94 			u8 hmac[MPTCPOPT_HMAC_LEN];
95 		};
96 	};
97 #endif
98 };
99 
100 #define MPTCP_SCHED_NAME_MAX	16
101 #define MPTCP_SCHED_MAX		128
102 #define MPTCP_SCHED_BUF_MAX	(MPTCP_SCHED_NAME_MAX * MPTCP_SCHED_MAX)
103 
104 #define MPTCP_SUBFLOWS_MAX	8
105 
106 struct mptcp_sched_data {
107 	u8	subflows;
108 	struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
109 };
110 
111 struct mptcp_sched_ops {
112 	int (*get_send)(struct mptcp_sock *msk,
113 			struct mptcp_sched_data *data);
114 	int (*get_retrans)(struct mptcp_sock *msk,
115 			   struct mptcp_sched_data *data);
116 
117 	char			name[MPTCP_SCHED_NAME_MAX];
118 	struct module		*owner;
119 	struct list_head	list;
120 
121 	void (*init)(struct mptcp_sock *msk);
122 	void (*release)(struct mptcp_sock *msk);
123 } ____cacheline_aligned_in_smp;
124 
125 #define MPTCP_PM_NAME_MAX	16
126 #define MPTCP_PM_MAX		128
127 #define MPTCP_PM_BUF_MAX	(MPTCP_PM_NAME_MAX * MPTCP_PM_MAX)
128 
129 struct mptcp_pm_ops {
130 	char			name[MPTCP_PM_NAME_MAX];
131 	struct module		*owner;
132 	struct list_head	list;
133 
134 	void (*init)(struct mptcp_sock *msk);
135 	void (*release)(struct mptcp_sock *msk);
136 } ____cacheline_aligned_in_smp;
137 
138 #ifdef CONFIG_MPTCP
139 void mptcp_init(void);
140 
141 static inline bool sk_is_mptcp(const struct sock *sk)
142 {
143 	return tcp_sk(sk)->is_mptcp;
144 }
145 
146 static inline bool rsk_is_mptcp(const struct request_sock *req)
147 {
148 	return tcp_rsk(req)->is_mptcp;
149 }
150 
151 static inline bool rsk_drop_req(const struct request_sock *req)
152 {
153 	return tcp_rsk(req)->is_mptcp && tcp_rsk(req)->drop_req;
154 }
155 
156 void mptcp_space(const struct sock *ssk, int *space, int *full_space);
157 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
158 		       unsigned int *size, struct mptcp_out_options *opts);
159 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
160 			  struct mptcp_out_options *opts);
161 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
162 			       unsigned int *size, unsigned int remaining,
163 			       struct mptcp_out_options *opts);
164 bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
165 
166 void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
167 			 struct mptcp_out_options *opts);
168 
169 void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info);
170 
171 /* move the skb extension owership, with the assumption that 'to' is
172  * newly allocated
173  */
174 static inline void mptcp_skb_ext_move(struct sk_buff *to,
175 				      struct sk_buff *from)
176 {
177 	if (!skb_ext_exist(from, SKB_EXT_MPTCP))
178 		return;
179 
180 	if (WARN_ON_ONCE(to->active_extensions))
181 		skb_ext_put(to);
182 
183 	to->active_extensions = from->active_extensions;
184 	to->extensions = from->extensions;
185 	from->active_extensions = 0;
186 }
187 
188 static inline void mptcp_skb_ext_copy(struct sk_buff *to,
189 				      struct sk_buff *from)
190 {
191 	struct mptcp_ext *from_ext;
192 
193 	from_ext = skb_ext_find(from, SKB_EXT_MPTCP);
194 	if (!from_ext)
195 		return;
196 
197 	from_ext->frozen = 1;
198 	skb_ext_copy(to, from);
199 }
200 
201 static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
202 				     const struct mptcp_ext *from_ext)
203 {
204 	/* MPTCP always clears the ext when adding it to the skb, so
205 	 * holes do not bother us here
206 	 */
207 	return !from_ext ||
208 	       (to_ext && from_ext &&
209 	        !memcmp(from_ext, to_ext, sizeof(struct mptcp_ext)));
210 }
211 
212 /* check if skbs can be collapsed.
213  * MPTCP collapse is allowed if neither @to or @from carry an mptcp data
214  * mapping, or if the extension of @to is the same as @from.
215  * Collapsing is not possible if @to lacks an extension, but @from carries one.
216  */
217 static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
218 					  const struct sk_buff *from)
219 {
220 	return mptcp_ext_matches(skb_ext_find(to, SKB_EXT_MPTCP),
221 				 skb_ext_find(from, SKB_EXT_MPTCP));
222 }
223 
224 void mptcp_seq_show(struct seq_file *seq);
225 int mptcp_subflow_init_cookie_req(struct request_sock *req,
226 				  const struct sock *sk_listener,
227 				  struct sk_buff *skb);
228 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
229 					       struct sock *sk_listener,
230 					       bool attach_listener);
231 
232 __be32 mptcp_get_reset_option(const struct sk_buff *skb);
233 
234 static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
235 {
236 	if (skb_ext_exist(skb, SKB_EXT_MPTCP))
237 		return mptcp_get_reset_option(skb);
238 
239 	return htonl(0u);
240 }
241 
242 void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
243 #else
244 
245 static inline void mptcp_init(void)
246 {
247 }
248 
249 static inline bool sk_is_mptcp(const struct sock *sk)
250 {
251 	return false;
252 }
253 
254 static inline bool rsk_is_mptcp(const struct request_sock *req)
255 {
256 	return false;
257 }
258 
259 static inline bool rsk_drop_req(const struct request_sock *req)
260 {
261 	return false;
262 }
263 
264 static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
265 				     unsigned int *size,
266 				     struct mptcp_out_options *opts)
267 {
268 	return false;
269 }
270 
271 static inline bool mptcp_synack_options(const struct request_sock *req,
272 					unsigned int *size,
273 					struct mptcp_out_options *opts)
274 {
275 	return false;
276 }
277 
278 static inline bool mptcp_established_options(struct sock *sk,
279 					     struct sk_buff *skb,
280 					     unsigned int *size,
281 					     unsigned int remaining,
282 					     struct mptcp_out_options *opts)
283 {
284 	return false;
285 }
286 
287 static inline bool mptcp_incoming_options(struct sock *sk,
288 					  struct sk_buff *skb)
289 {
290 	return true;
291 }
292 
293 static inline void mptcp_skb_ext_move(struct sk_buff *to,
294 				      const struct sk_buff *from)
295 {
296 }
297 
298 static inline void mptcp_skb_ext_copy(struct sk_buff *to,
299 				      struct sk_buff *from)
300 {
301 }
302 
303 static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
304 					  const struct sk_buff *from)
305 {
306 	return true;
307 }
308 
309 static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { }
310 static inline void mptcp_seq_show(struct seq_file *seq) { }
311 
312 static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
313 						const struct sock *sk_listener,
314 						struct sk_buff *skb)
315 {
316 	return 0; /* TCP fallback */
317 }
318 
319 static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
320 							     struct sock *sk_listener,
321 							     bool attach_listener)
322 {
323 	return NULL;
324 }
325 
326 static inline __be32 mptcp_reset_option(const struct sk_buff *skb)  { return htonl(0u); }
327 
328 static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
329 #endif /* CONFIG_MPTCP */
330 
331 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
332 int mptcpv6_init(void);
333 void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
334 #elif IS_ENABLED(CONFIG_IPV6)
335 static inline int mptcpv6_init(void) { return 0; }
336 static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
337 #endif
338 
339 #if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
340 struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
341 #else
342 static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
343 #endif
344 
345 #if !IS_ENABLED(CONFIG_MPTCP)
346 struct mptcp_sock { };
347 #endif
348 
349 #endif /* __NET_MPTCP_H */
350