xref: /linux/net/mptcp/protocol.h (revision ffb239e29518578c45f278fccd32db958ff59174)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Multipath TCP
3  *
4  * Copyright (c) 2017 - 2019, Intel Corporation.
5  */
6 
7 #ifndef __MPTCP_PROTOCOL_H
8 #define __MPTCP_PROTOCOL_H
9 
10 #include <linux/random.h>
11 #include <net/tcp.h>
12 #include <net/inet_connection_sock.h>
13 #include <uapi/linux/mptcp.h>
14 
15 #define MPTCP_SUPPORTED_VERSION	1
16 
17 /* MPTCP option bits */
18 #define OPTION_MPTCP_MPC_SYN	BIT(0)
19 #define OPTION_MPTCP_MPC_SYNACK	BIT(1)
20 #define OPTION_MPTCP_MPC_ACK	BIT(2)
21 #define OPTION_MPTCP_MPJ_SYN	BIT(3)
22 #define OPTION_MPTCP_MPJ_SYNACK	BIT(4)
23 #define OPTION_MPTCP_MPJ_ACK	BIT(5)
24 #define OPTION_MPTCP_ADD_ADDR	BIT(6)
25 #define OPTION_MPTCP_RM_ADDR	BIT(7)
26 #define OPTION_MPTCP_FASTCLOSE	BIT(8)
27 #define OPTION_MPTCP_PRIO	BIT(9)
28 #define OPTION_MPTCP_RST	BIT(10)
29 #define OPTION_MPTCP_DSS	BIT(11)
30 #define OPTION_MPTCP_FAIL	BIT(12)
31 
32 /* MPTCP option subtypes */
33 #define MPTCPOPT_MP_CAPABLE	0
34 #define MPTCPOPT_MP_JOIN	1
35 #define MPTCPOPT_DSS		2
36 #define MPTCPOPT_ADD_ADDR	3
37 #define MPTCPOPT_RM_ADDR	4
38 #define MPTCPOPT_MP_PRIO	5
39 #define MPTCPOPT_MP_FAIL	6
40 #define MPTCPOPT_MP_FASTCLOSE	7
41 #define MPTCPOPT_RST		8
42 
43 /* MPTCP suboption lengths */
44 #define TCPOLEN_MPTCP_MPC_SYN		4
45 #define TCPOLEN_MPTCP_MPC_SYNACK	12
46 #define TCPOLEN_MPTCP_MPC_ACK		20
47 #define TCPOLEN_MPTCP_MPC_ACK_DATA	22
48 #define TCPOLEN_MPTCP_MPJ_SYN		12
49 #define TCPOLEN_MPTCP_MPJ_SYNACK	16
50 #define TCPOLEN_MPTCP_MPJ_ACK		24
51 #define TCPOLEN_MPTCP_DSS_BASE		4
52 #define TCPOLEN_MPTCP_DSS_ACK32		4
53 #define TCPOLEN_MPTCP_DSS_ACK64		8
54 #define TCPOLEN_MPTCP_DSS_MAP32		10
55 #define TCPOLEN_MPTCP_DSS_MAP64		14
56 #define TCPOLEN_MPTCP_DSS_CHECKSUM	2
57 #define TCPOLEN_MPTCP_ADD_ADDR		16
58 #define TCPOLEN_MPTCP_ADD_ADDR_PORT	18
59 #define TCPOLEN_MPTCP_ADD_ADDR_BASE	8
60 #define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT	10
61 #define TCPOLEN_MPTCP_ADD_ADDR6		28
62 #define TCPOLEN_MPTCP_ADD_ADDR6_PORT	30
63 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE	20
64 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT	22
65 #define TCPOLEN_MPTCP_PORT_LEN		2
66 #define TCPOLEN_MPTCP_PORT_ALIGN	2
67 #define TCPOLEN_MPTCP_RM_ADDR_BASE	3
68 #define TCPOLEN_MPTCP_PRIO		3
69 #define TCPOLEN_MPTCP_PRIO_ALIGN	4
70 #define TCPOLEN_MPTCP_FASTCLOSE		12
71 #define TCPOLEN_MPTCP_RST		4
72 #define TCPOLEN_MPTCP_FAIL		12
73 
74 #define TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM	(TCPOLEN_MPTCP_DSS_CHECKSUM + TCPOLEN_MPTCP_MPC_ACK_DATA)
75 
76 /* MPTCP MP_JOIN flags */
77 #define MPTCPOPT_BACKUP		BIT(0)
78 #define MPTCPOPT_HMAC_LEN	20
79 #define MPTCPOPT_THMAC_LEN	8
80 
81 /* MPTCP MP_CAPABLE flags */
82 #define MPTCP_VERSION_MASK	(0x0F)
83 #define MPTCP_CAP_CHECKSUM_REQD	BIT(7)
84 #define MPTCP_CAP_EXTENSIBILITY	BIT(6)
85 #define MPTCP_CAP_DENY_JOIN_ID0	BIT(5)
86 #define MPTCP_CAP_HMAC_SHA256	BIT(0)
87 #define MPTCP_CAP_FLAG_MASK	(0x1F)
88 
89 /* MPTCP DSS flags */
90 #define MPTCP_DSS_DATA_FIN	BIT(4)
91 #define MPTCP_DSS_DSN64		BIT(3)
92 #define MPTCP_DSS_HAS_MAP	BIT(2)
93 #define MPTCP_DSS_ACK64		BIT(1)
94 #define MPTCP_DSS_HAS_ACK	BIT(0)
95 #define MPTCP_DSS_FLAG_MASK	(0x1F)
96 
97 /* MPTCP ADD_ADDR flags */
98 #define MPTCP_ADDR_ECHO		BIT(0)
99 
100 /* MPTCP MP_PRIO flags */
101 #define MPTCP_PRIO_BKUP		BIT(0)
102 
103 /* MPTCP TCPRST flags */
104 #define MPTCP_RST_TRANSIENT	BIT(0)
105 
106 /* MPTCP socket flags */
107 #define MPTCP_DATA_READY	0
108 #define MPTCP_NOSPACE		1
109 #define MPTCP_WORK_RTX		2
110 #define MPTCP_WORK_EOF		3
111 #define MPTCP_FALLBACK_DONE	4
112 #define MPTCP_WORK_CLOSE_SUBFLOW 5
113 #define MPTCP_PUSH_PENDING	6
114 #define MPTCP_CLEAN_UNA		7
115 #define MPTCP_ERROR_REPORT	8
116 #define MPTCP_RETRANSMIT	9
117 #define MPTCP_WORK_SYNC_SETSOCKOPT 10
118 #define MPTCP_CONNECTED		11
119 
120 static inline bool before64(__u64 seq1, __u64 seq2)
121 {
122 	return (__s64)(seq1 - seq2) < 0;
123 }
124 
125 #define after64(seq2, seq1)	before64(seq1, seq2)
126 
127 struct mptcp_options_received {
128 	u64	sndr_key;
129 	u64	rcvr_key;
130 	u64	data_ack;
131 	u64	data_seq;
132 	u32	subflow_seq;
133 	u16	data_len;
134 	__sum16	csum;
135 	u16	mp_capable : 1,
136 		mp_join : 1,
137 		fastclose : 1,
138 		reset : 1,
139 		dss : 1,
140 		add_addr : 1,
141 		rm_addr : 1,
142 		mp_prio : 1,
143 		mp_fail : 1,
144 		echo : 1,
145 		csum_reqd : 1,
146 		backup : 1,
147 		deny_join_id0 : 1;
148 	u32	token;
149 	u32	nonce;
150 	u64	thmac;
151 	u8	hmac[MPTCPOPT_HMAC_LEN];
152 	u8	join_id;
153 	u8	use_map:1,
154 		dsn64:1,
155 		data_fin:1,
156 		use_ack:1,
157 		ack64:1,
158 		mpc_map:1,
159 		__unused:2;
160 	struct mptcp_addr_info addr;
161 	struct mptcp_rm_list rm_list;
162 	u64	ahmac;
163 	u8	reset_reason:4;
164 	u8	reset_transient:1;
165 	u64	fail_seq;
166 };
167 
168 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
169 {
170 	return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) |
171 		     ((nib & 0xF) << 8) | field);
172 }
173 
174 enum mptcp_pm_status {
175 	MPTCP_PM_ADD_ADDR_RECEIVED,
176 	MPTCP_PM_ADD_ADDR_SEND_ACK,
177 	MPTCP_PM_RM_ADDR_RECEIVED,
178 	MPTCP_PM_ESTABLISHED,
179 	MPTCP_PM_ALREADY_ESTABLISHED,	/* persistent status, set after ESTABLISHED event */
180 	MPTCP_PM_SUBFLOW_ESTABLISHED,
181 };
182 
183 enum mptcp_addr_signal_status {
184 	MPTCP_ADD_ADDR_SIGNAL,
185 	MPTCP_ADD_ADDR_ECHO,
186 	MPTCP_RM_ADDR_SIGNAL,
187 };
188 
189 struct mptcp_pm_data {
190 	struct mptcp_addr_info local;
191 	struct mptcp_addr_info remote;
192 	struct list_head anno_list;
193 
194 	spinlock_t	lock;		/*protects the whole PM data */
195 
196 	u8		addr_signal;
197 	bool		server_side;
198 	bool		work_pending;
199 	bool		accept_addr;
200 	bool		accept_subflow;
201 	bool		remote_deny_join_id0;
202 	u8		add_addr_signaled;
203 	u8		add_addr_accepted;
204 	u8		local_addr_used;
205 	u8		subflows;
206 	u8		status;
207 	struct mptcp_rm_list rm_list_tx;
208 	struct mptcp_rm_list rm_list_rx;
209 };
210 
211 struct mptcp_data_frag {
212 	struct list_head list;
213 	u64 data_seq;
214 	u16 data_len;
215 	u16 offset;
216 	u16 overhead;
217 	u16 already_sent;
218 	struct page *page;
219 };
220 
221 /* MPTCP connection sock */
222 struct mptcp_sock {
223 	/* inet_connection_sock must be the first member */
224 	struct inet_connection_sock sk;
225 	u64		local_key;
226 	u64		remote_key;
227 	u64		write_seq;
228 	u64		snd_nxt;
229 	u64		ack_seq;
230 	u64		rcv_wnd_sent;
231 	u64		rcv_data_fin_seq;
232 	int		wmem_reserved;
233 	struct sock	*last_snd;
234 	int		snd_burst;
235 	int		old_wspace;
236 	u64		recovery_snd_nxt;	/* in recovery mode accept up to this seq;
237 						 * recovery related fields are under data_lock
238 						 * protection
239 						 */
240 	u64		snd_una;
241 	u64		wnd_end;
242 	unsigned long	timer_ival;
243 	u32		token;
244 	int		rmem_released;
245 	unsigned long	flags;
246 	bool		recovery;		/* closing subflow write queue reinjected */
247 	bool		can_ack;
248 	bool		fully_established;
249 	bool		rcv_data_fin;
250 	bool		snd_data_fin_enable;
251 	bool		rcv_fastclose;
252 	bool		use_64bit_ack; /* Set when we received a 64-bit DSN */
253 	bool		csum_enabled;
254 	spinlock_t	join_list_lock;
255 	struct work_struct work;
256 	struct sk_buff  *ooo_last_skb;
257 	struct rb_root  out_of_order_queue;
258 	struct sk_buff_head receive_queue;
259 	int		tx_pending_data;
260 	struct list_head conn_list;
261 	struct list_head rtx_queue;
262 	struct mptcp_data_frag *first_pending;
263 	struct list_head join_list;
264 	struct socket	*subflow; /* outgoing connect/listener/!mp_capable */
265 	struct sock	*first;
266 	struct mptcp_pm_data	pm;
267 	struct {
268 		u32	space;	/* bytes copied in last measurement window */
269 		u32	copied; /* bytes copied in this measurement window */
270 		u64	time;	/* start time of measurement window */
271 		u64	rtt_us; /* last maximum rtt of subflows */
272 	} rcvq_space;
273 
274 	u32 setsockopt_seq;
275 	char		ca_name[TCP_CA_NAME_MAX];
276 };
277 
278 #define mptcp_lock_sock(___sk, cb) do {					\
279 	struct sock *__sk = (___sk); /* silence macro reuse warning */	\
280 	might_sleep();							\
281 	spin_lock_bh(&__sk->sk_lock.slock);				\
282 	if (__sk->sk_lock.owned)					\
283 		__lock_sock(__sk);					\
284 	cb;								\
285 	__sk->sk_lock.owned = 1;					\
286 	spin_unlock(&__sk->sk_lock.slock);				\
287 	mutex_acquire(&__sk->sk_lock.dep_map, 0, 0, _RET_IP_);		\
288 	local_bh_enable();						\
289 } while (0)
290 
291 #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
292 #define mptcp_data_unlock(sk) spin_unlock_bh(&(sk)->sk_lock.slock)
293 
294 #define mptcp_for_each_subflow(__msk, __subflow)			\
295 	list_for_each_entry(__subflow, &((__msk)->conn_list), node)
296 
297 static inline void msk_owned_by_me(const struct mptcp_sock *msk)
298 {
299 	sock_owned_by_me((const struct sock *)msk);
300 }
301 
302 static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
303 {
304 	return (struct mptcp_sock *)sk;
305 }
306 
307 /* the msk socket don't use the backlog, also account for the bulk
308  * free memory
309  */
310 static inline int __mptcp_rmem(const struct sock *sk)
311 {
312 	return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released);
313 }
314 
315 static inline int __mptcp_space(const struct sock *sk)
316 {
317 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk));
318 }
319 
320 static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
321 {
322 	const struct mptcp_sock *msk = mptcp_sk(sk);
323 
324 	return READ_ONCE(msk->first_pending);
325 }
326 
327 static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
328 {
329 	struct mptcp_sock *msk = mptcp_sk(sk);
330 	struct mptcp_data_frag *cur;
331 
332 	cur = msk->first_pending;
333 	return list_is_last(&cur->list, &msk->rtx_queue) ? NULL :
334 						     list_next_entry(cur, list);
335 }
336 
337 static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk)
338 {
339 	struct mptcp_sock *msk = mptcp_sk(sk);
340 
341 	if (!msk->first_pending)
342 		return NULL;
343 
344 	if (WARN_ON_ONCE(list_empty(&msk->rtx_queue)))
345 		return NULL;
346 
347 	return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
348 }
349 
350 static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
351 {
352 	struct mptcp_sock *msk = mptcp_sk(sk);
353 
354 	if (msk->snd_una == READ_ONCE(msk->snd_nxt))
355 		return NULL;
356 
357 	return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
358 }
359 
360 struct csum_pseudo_header {
361 	__be64 data_seq;
362 	__be32 subflow_seq;
363 	__be16 data_len;
364 	__sum16 csum;
365 };
366 
367 struct mptcp_subflow_request_sock {
368 	struct	tcp_request_sock sk;
369 	u16	mp_capable : 1,
370 		mp_join : 1,
371 		backup : 1,
372 		csum_reqd : 1,
373 		allow_join_id0 : 1;
374 	u8	local_id;
375 	u8	remote_id;
376 	u64	local_key;
377 	u64	idsn;
378 	u32	token;
379 	u32	ssn_offset;
380 	u64	thmac;
381 	u32	local_nonce;
382 	u32	remote_nonce;
383 	struct mptcp_sock	*msk;
384 	struct hlist_nulls_node token_node;
385 };
386 
387 static inline struct mptcp_subflow_request_sock *
388 mptcp_subflow_rsk(const struct request_sock *rsk)
389 {
390 	return (struct mptcp_subflow_request_sock *)rsk;
391 }
392 
393 enum mptcp_data_avail {
394 	MPTCP_SUBFLOW_NODATA,
395 	MPTCP_SUBFLOW_DATA_AVAIL,
396 };
397 
398 struct mptcp_delegated_action {
399 	struct napi_struct napi;
400 	struct list_head head;
401 };
402 
403 DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
404 
405 #define MPTCP_DELEGATE_SEND		0
406 
407 /* MPTCP subflow context */
408 struct mptcp_subflow_context {
409 	struct	list_head node;/* conn_list of subflows */
410 	u64	local_key;
411 	u64	remote_key;
412 	u64	idsn;
413 	u64	map_seq;
414 	u32	snd_isn;
415 	u32	token;
416 	u32	rel_write_seq;
417 	u32	map_subflow_seq;
418 	u32	ssn_offset;
419 	u32	map_data_len;
420 	__wsum	map_data_csum;
421 	u32	map_csum_len;
422 	u32	request_mptcp : 1,  /* send MP_CAPABLE */
423 		request_join : 1,   /* send MP_JOIN */
424 		request_bkup : 1,
425 		mp_capable : 1,	    /* remote is MPTCP capable */
426 		mp_join : 1,	    /* remote is JOINing */
427 		fully_established : 1,	    /* path validated */
428 		pm_notified : 1,    /* PM hook called for established status */
429 		conn_finished : 1,
430 		map_valid : 1,
431 		map_csum_reqd : 1,
432 		map_data_fin : 1,
433 		mpc_map : 1,
434 		backup : 1,
435 		send_mp_prio : 1,
436 		send_mp_fail : 1,
437 		rx_eof : 1,
438 		can_ack : 1,        /* only after processing the remote a key */
439 		disposable : 1,	    /* ctx can be free at ulp release time */
440 		stale : 1;	    /* unable to snd/rcv data, do not use for xmit */
441 	enum mptcp_data_avail data_avail;
442 	u32	remote_nonce;
443 	u64	thmac;
444 	u32	local_nonce;
445 	u32	remote_token;
446 	u8	hmac[MPTCPOPT_HMAC_LEN];
447 	u8	local_id;
448 	u8	remote_id;
449 	u8	reset_seen:1;
450 	u8	reset_transient:1;
451 	u8	reset_reason:4;
452 	u8	stale_count;
453 
454 	long	delegated_status;
455 	struct	list_head delegated_node;   /* link into delegated_action, protected by local BH */
456 
457 	u32	setsockopt_seq;
458 	u32	stale_rcv_tstamp;
459 
460 	struct	sock *tcp_sock;	    /* tcp sk backpointer */
461 	struct	sock *conn;	    /* parent mptcp_sock */
462 	const	struct inet_connection_sock_af_ops *icsk_af_ops;
463 	void	(*tcp_data_ready)(struct sock *sk);
464 	void	(*tcp_state_change)(struct sock *sk);
465 	void	(*tcp_write_space)(struct sock *sk);
466 	void	(*tcp_error_report)(struct sock *sk);
467 
468 	struct	rcu_head rcu;
469 };
470 
471 static inline struct mptcp_subflow_context *
472 mptcp_subflow_ctx(const struct sock *sk)
473 {
474 	struct inet_connection_sock *icsk = inet_csk(sk);
475 
476 	/* Use RCU on icsk_ulp_data only for sock diag code */
477 	return (__force struct mptcp_subflow_context *)icsk->icsk_ulp_data;
478 }
479 
480 static inline struct sock *
481 mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
482 {
483 	return subflow->tcp_sock;
484 }
485 
486 static inline u64
487 mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow)
488 {
489 	return tcp_sk(mptcp_subflow_tcp_sock(subflow))->copied_seq -
490 		      subflow->ssn_offset -
491 		      subflow->map_subflow_seq;
492 }
493 
494 static inline u64
495 mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
496 {
497 	return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
498 }
499 
500 static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
501 					     struct mptcp_subflow_context *subflow)
502 {
503 	sock_hold(mptcp_subflow_tcp_sock(subflow));
504 	spin_lock_bh(&msk->join_list_lock);
505 	list_add_tail(&subflow->node, &msk->join_list);
506 	spin_unlock_bh(&msk->join_list_lock);
507 }
508 
509 void mptcp_subflow_process_delegated(struct sock *ssk);
510 
511 static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
512 {
513 	struct mptcp_delegated_action *delegated;
514 	bool schedule;
515 
516 	/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
517 	 * ensures the below list check sees list updates done prior to status
518 	 * bit changes
519 	 */
520 	if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
521 		/* still on delegated list from previous scheduling */
522 		if (!list_empty(&subflow->delegated_node))
523 			return;
524 
525 		/* the caller held the subflow bh socket lock */
526 		lockdep_assert_in_softirq();
527 
528 		delegated = this_cpu_ptr(&mptcp_delegated_actions);
529 		schedule = list_empty(&delegated->head);
530 		list_add_tail(&subflow->delegated_node, &delegated->head);
531 		sock_hold(mptcp_subflow_tcp_sock(subflow));
532 		if (schedule)
533 			napi_schedule(&delegated->napi);
534 	}
535 }
536 
537 static inline struct mptcp_subflow_context *
538 mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
539 {
540 	struct mptcp_subflow_context *ret;
541 
542 	if (list_empty(&delegated->head))
543 		return NULL;
544 
545 	ret = list_first_entry(&delegated->head, struct mptcp_subflow_context, delegated_node);
546 	list_del_init(&ret->delegated_node);
547 	return ret;
548 }
549 
550 static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
551 {
552 	return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
553 }
554 
555 static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
556 {
557 	/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
558 	 * touching the status bit
559 	 */
560 	smp_wmb();
561 	clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
562 }
563 
564 int mptcp_is_enabled(const struct net *net);
565 unsigned int mptcp_get_add_addr_timeout(const struct net *net);
566 int mptcp_is_checksum_enabled(const struct net *net);
567 int mptcp_allow_join_id0(const struct net *net);
568 unsigned int mptcp_stale_loss_cnt(const struct net *net);
569 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
570 				     struct mptcp_options_received *mp_opt);
571 bool __mptcp_retransmit_pending_data(struct sock *sk);
572 void __mptcp_push_pending(struct sock *sk, unsigned int flags);
573 bool mptcp_subflow_data_available(struct sock *sk);
574 void __init mptcp_subflow_init(void);
575 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
576 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
577 		     struct mptcp_subflow_context *subflow);
578 void mptcp_subflow_reset(struct sock *ssk);
579 void mptcp_sock_graft(struct sock *sk, struct socket *parent);
580 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
581 
582 /* called with sk socket lock held */
583 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
584 			    const struct mptcp_addr_info *remote);
585 int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
586 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
587 			 struct sockaddr_storage *addr,
588 			 unsigned short family);
589 
590 static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
591 {
592 	struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
593 
594 	/* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
595 	if (subflow->request_join && !subflow->fully_established)
596 		return false;
597 
598 	/* only send if our side has not closed yet */
599 	return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
600 }
601 
602 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
603 
604 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
605 
606 static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
607 					      struct mptcp_subflow_context *ctx)
608 {
609 	sk->sk_data_ready = ctx->tcp_data_ready;
610 	sk->sk_state_change = ctx->tcp_state_change;
611 	sk->sk_write_space = ctx->tcp_write_space;
612 	sk->sk_error_report = ctx->tcp_error_report;
613 
614 	inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
615 }
616 
617 static inline bool mptcp_has_another_subflow(struct sock *ssk)
618 {
619 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk), *tmp;
620 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
621 
622 	mptcp_for_each_subflow(msk, tmp) {
623 		if (tmp != subflow)
624 			return true;
625 	}
626 
627 	return false;
628 }
629 
630 void __init mptcp_proto_init(void);
631 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
632 int __init mptcp_proto_v6_init(void);
633 #endif
634 
635 struct sock *mptcp_sk_clone(const struct sock *sk,
636 			    const struct mptcp_options_received *mp_opt,
637 			    struct request_sock *req);
638 void mptcp_get_options(const struct sock *sk,
639 		       const struct sk_buff *skb,
640 		       struct mptcp_options_received *mp_opt);
641 
642 void mptcp_finish_connect(struct sock *sk);
643 void __mptcp_set_connected(struct sock *sk);
644 static inline bool mptcp_is_fully_established(struct sock *sk)
645 {
646 	return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
647 	       READ_ONCE(mptcp_sk(sk)->fully_established);
648 }
649 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
650 void mptcp_data_ready(struct sock *sk, struct sock *ssk);
651 bool mptcp_finish_join(struct sock *sk);
652 bool mptcp_schedule_work(struct sock *sk);
653 int mptcp_setsockopt(struct sock *sk, int level, int optname,
654 		     sockptr_t optval, unsigned int optlen);
655 int mptcp_getsockopt(struct sock *sk, int level, int optname,
656 		     char __user *optval, int __user *option);
657 
658 u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq);
659 static inline u64 mptcp_expand_seq(u64 old_seq, u64 cur_seq, bool use_64bit)
660 {
661 	if (use_64bit)
662 		return cur_seq;
663 
664 	return __mptcp_expand_seq(old_seq, cur_seq);
665 }
666 void __mptcp_check_push(struct sock *sk, struct sock *ssk);
667 void __mptcp_data_acked(struct sock *sk);
668 void __mptcp_error_report(struct sock *sk);
669 void mptcp_subflow_eof(struct sock *sk);
670 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
671 void __mptcp_flush_join_list(struct mptcp_sock *msk);
672 static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
673 {
674 	return READ_ONCE(msk->snd_data_fin_enable) &&
675 	       READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
676 }
677 
678 static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
679 {
680 	if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <= READ_ONCE(sk->sk_sndbuf))
681 		return false;
682 
683 	WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
684 	return true;
685 }
686 
687 static inline void mptcp_write_space(struct sock *sk)
688 {
689 	if (sk_stream_is_writeable(sk)) {
690 		/* pairs with memory barrier in mptcp_poll */
691 		smp_mb();
692 		if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
693 			sk_stream_write_space(sk);
694 	}
695 }
696 
697 void mptcp_destroy_common(struct mptcp_sock *msk);
698 
699 #define MPTCP_TOKEN_MAX_RETRIES	4
700 
701 void __init mptcp_token_init(void);
702 static inline void mptcp_token_init_request(struct request_sock *req)
703 {
704 	mptcp_subflow_rsk(req)->token_node.pprev = NULL;
705 }
706 
707 int mptcp_token_new_request(struct request_sock *req);
708 void mptcp_token_destroy_request(struct request_sock *req);
709 int mptcp_token_new_connect(struct sock *sk);
710 void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
711 			struct mptcp_sock *msk);
712 bool mptcp_token_exists(u32 token);
713 struct mptcp_sock *mptcp_token_get_sock(u32 token);
714 struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
715 					 long *s_num);
716 void mptcp_token_destroy(struct mptcp_sock *msk);
717 
718 void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
719 
720 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
721 
722 void __init mptcp_pm_init(void);
723 void mptcp_pm_data_init(struct mptcp_sock *msk);
724 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
725 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
726 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
727 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp);
728 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
729 void mptcp_pm_connection_closed(struct mptcp_sock *msk);
730 void mptcp_pm_subflow_established(struct mptcp_sock *msk);
731 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id);
732 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
733 				const struct mptcp_addr_info *addr);
734 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
735 			      struct mptcp_addr_info *addr);
736 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
737 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
738 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
739 			       const struct mptcp_rm_list *rm_list);
740 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup);
741 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
742 				 struct mptcp_addr_info *addr,
743 				 u8 bkup);
744 void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq);
745 void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
746 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
747 struct mptcp_pm_add_entry *
748 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
749 		       struct mptcp_addr_info *addr, bool check_id);
750 struct mptcp_pm_add_entry *
751 mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
752 				struct mptcp_addr_info *addr);
753 int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
754 					 u8 *flags, int *ifindex);
755 
756 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
757 			   const struct mptcp_addr_info *addr,
758 			   bool echo);
759 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
760 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
761 
762 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
763 		 const struct sock *ssk, gfp_t gfp);
764 void mptcp_event_addr_announced(const struct mptcp_sock *msk, const struct mptcp_addr_info *info);
765 void mptcp_event_addr_removed(const struct mptcp_sock *msk, u8 id);
766 
767 static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
768 {
769 	return READ_ONCE(msk->pm.addr_signal) &
770 		(BIT(MPTCP_ADD_ADDR_SIGNAL) | BIT(MPTCP_ADD_ADDR_ECHO));
771 }
772 
773 static inline bool mptcp_pm_should_add_signal_addr(struct mptcp_sock *msk)
774 {
775 	return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL);
776 }
777 
778 static inline bool mptcp_pm_should_add_signal_echo(struct mptcp_sock *msk)
779 {
780 	return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO);
781 }
782 
783 static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk)
784 {
785 	return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_RM_ADDR_SIGNAL);
786 }
787 
788 static inline unsigned int mptcp_add_addr_len(int family, bool echo, bool port)
789 {
790 	u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
791 
792 	if (family == AF_INET6)
793 		len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
794 	if (!echo)
795 		len += MPTCPOPT_THMAC_LEN;
796 	/* account for 2 trailing 'nop' options */
797 	if (port)
798 		len += TCPOLEN_MPTCP_PORT_LEN + TCPOLEN_MPTCP_PORT_ALIGN;
799 
800 	return len;
801 }
802 
803 static inline int mptcp_rm_addr_len(const struct mptcp_rm_list *rm_list)
804 {
805 	if (rm_list->nr == 0 || rm_list->nr > MPTCP_RM_IDS_MAX)
806 		return -EINVAL;
807 
808 	return TCPOLEN_MPTCP_RM_ADDR_BASE + roundup(rm_list->nr - 1, 4) + 1;
809 }
810 
811 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb,
812 			      unsigned int opt_size, unsigned int remaining,
813 			      struct mptcp_addr_info *addr, bool *echo,
814 			      bool *port, bool *drop_other_suboptions);
815 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
816 			     struct mptcp_rm_list *rm_list);
817 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
818 
819 void __init mptcp_pm_nl_init(void);
820 void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
821 void mptcp_pm_nl_work(struct mptcp_sock *msk);
822 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
823 				     const struct mptcp_rm_list *rm_list);
824 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
825 unsigned int mptcp_pm_get_add_addr_signal_max(struct mptcp_sock *msk);
826 unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
827 unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
828 unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
829 
830 void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
831 void mptcp_sockopt_sync_all(struct mptcp_sock *msk);
832 
833 static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
834 {
835 	return (struct mptcp_ext *)skb_ext_find(skb, SKB_EXT_MPTCP);
836 }
837 
838 void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops);
839 
840 static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk)
841 {
842 	return test_bit(MPTCP_FALLBACK_DONE, &msk->flags);
843 }
844 
845 static inline bool mptcp_check_fallback(const struct sock *sk)
846 {
847 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
848 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
849 
850 	return __mptcp_check_fallback(msk);
851 }
852 
853 static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
854 {
855 	if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
856 		pr_debug("TCP fallback already done (msk=%p)", msk);
857 		return;
858 	}
859 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
860 }
861 
862 static inline void mptcp_do_fallback(struct sock *sk)
863 {
864 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
865 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
866 
867 	__mptcp_do_fallback(msk);
868 }
869 
870 #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
871 
872 static inline bool subflow_simultaneous_connect(struct sock *sk)
873 {
874 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
875 	struct sock *parent = subflow->conn;
876 
877 	return sk->sk_state == TCP_ESTABLISHED &&
878 	       !mptcp_sk(parent)->pm.server_side &&
879 	       !subflow->conn_finished;
880 }
881 
882 #ifdef CONFIG_SYN_COOKIES
883 void subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req,
884 				       struct sk_buff *skb);
885 bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req,
886 					struct sk_buff *skb);
887 void __init mptcp_join_cookie_init(void);
888 #else
889 static inline void
890 subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req,
891 				  struct sk_buff *skb) {}
892 static inline bool
893 mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req,
894 				   struct sk_buff *skb)
895 {
896 	return false;
897 }
898 
899 static inline void mptcp_join_cookie_init(void) {}
900 #endif
901 
902 #endif /* __MPTCP_PROTOCOL_H */
903