xref: /linux/net/mptcp/pm.c (revision ed30aef3c864f99111e16d4ea5cf29488d99a278)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7 
8 #include <linux/kernel.h>
9 #include <net/tcp.h>
10 #include <net/mptcp.h>
11 #include "protocol.h"
12 
13 /* path manager command handlers */
14 
15 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
16 			   const struct mptcp_addr_info *addr,
17 			   bool echo)
18 {
19 	pr_debug("msk=%p, local_id=%d", msk, addr->id);
20 
21 	msk->pm.local = *addr;
22 	WRITE_ONCE(msk->pm.add_addr_echo, echo);
23 	WRITE_ONCE(msk->pm.add_addr_signal, true);
24 	return 0;
25 }
26 
27 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
28 {
29 	pr_debug("msk=%p, local_id=%d", msk, local_id);
30 
31 	msk->pm.rm_id = local_id;
32 	WRITE_ONCE(msk->pm.rm_addr_signal, true);
33 	return 0;
34 }
35 
36 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
37 {
38 	pr_debug("msk=%p, local_id=%d", msk, local_id);
39 
40 	spin_lock_bh(&msk->pm.lock);
41 	mptcp_pm_nl_rm_subflow_received(msk, local_id);
42 	spin_unlock_bh(&msk->pm.lock);
43 	return 0;
44 }
45 
46 /* path manager event handlers */
47 
48 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
49 {
50 	struct mptcp_pm_data *pm = &msk->pm;
51 
52 	pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
53 
54 	WRITE_ONCE(pm->server_side, server_side);
55 }
56 
57 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
58 {
59 	struct mptcp_pm_data *pm = &msk->pm;
60 	int ret = 0;
61 
62 	pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
63 		 pm->subflows_max, READ_ONCE(pm->accept_subflow));
64 
65 	/* try to avoid acquiring the lock below */
66 	if (!READ_ONCE(pm->accept_subflow))
67 		return false;
68 
69 	spin_lock_bh(&pm->lock);
70 	if (READ_ONCE(pm->accept_subflow)) {
71 		ret = pm->subflows < pm->subflows_max;
72 		if (ret && ++pm->subflows == pm->subflows_max)
73 			WRITE_ONCE(pm->accept_subflow, false);
74 	}
75 	spin_unlock_bh(&pm->lock);
76 
77 	return ret;
78 }
79 
80 /* return true if the new status bit is currently cleared, that is, this event
81  * can be server, eventually by an already scheduled work
82  */
83 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
84 				   enum mptcp_pm_status new_status)
85 {
86 	pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
87 		 BIT(new_status));
88 	if (msk->pm.status & BIT(new_status))
89 		return false;
90 
91 	msk->pm.status |= BIT(new_status);
92 	mptcp_schedule_work((struct sock *)msk);
93 	return true;
94 }
95 
96 void mptcp_pm_fully_established(struct mptcp_sock *msk)
97 {
98 	struct mptcp_pm_data *pm = &msk->pm;
99 
100 	pr_debug("msk=%p", msk);
101 
102 	/* try to avoid acquiring the lock below */
103 	if (!READ_ONCE(pm->work_pending))
104 		return;
105 
106 	spin_lock_bh(&pm->lock);
107 
108 	if (READ_ONCE(pm->work_pending))
109 		mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
110 
111 	spin_unlock_bh(&pm->lock);
112 }
113 
114 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
115 {
116 	pr_debug("msk=%p", msk);
117 }
118 
119 void mptcp_pm_subflow_established(struct mptcp_sock *msk,
120 				  struct mptcp_subflow_context *subflow)
121 {
122 	struct mptcp_pm_data *pm = &msk->pm;
123 
124 	pr_debug("msk=%p", msk);
125 
126 	if (!READ_ONCE(pm->work_pending))
127 		return;
128 
129 	spin_lock_bh(&pm->lock);
130 
131 	if (READ_ONCE(pm->work_pending))
132 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
133 
134 	spin_unlock_bh(&pm->lock);
135 }
136 
137 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
138 {
139 	pr_debug("msk=%p", msk);
140 }
141 
142 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
143 				const struct mptcp_addr_info *addr)
144 {
145 	struct mptcp_pm_data *pm = &msk->pm;
146 
147 	pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
148 		 READ_ONCE(pm->accept_addr));
149 
150 	spin_lock_bh(&pm->lock);
151 
152 	if (!READ_ONCE(pm->accept_addr))
153 		mptcp_pm_announce_addr(msk, addr, true);
154 	else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
155 		pm->remote = *addr;
156 
157 	spin_unlock_bh(&pm->lock);
158 }
159 
160 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
161 {
162 	struct mptcp_pm_data *pm = &msk->pm;
163 
164 	pr_debug("msk=%p remote_id=%d", msk, rm_id);
165 
166 	spin_lock_bh(&pm->lock);
167 	mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
168 	pm->rm_id = rm_id;
169 	spin_unlock_bh(&pm->lock);
170 }
171 
172 /* path manager helpers */
173 
174 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
175 			      struct mptcp_addr_info *saddr, bool *echo)
176 {
177 	int ret = false;
178 
179 	spin_lock_bh(&msk->pm.lock);
180 
181 	/* double check after the lock is acquired */
182 	if (!mptcp_pm_should_add_signal(msk))
183 		goto out_unlock;
184 
185 	*echo = READ_ONCE(msk->pm.add_addr_echo);
186 
187 	if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo))
188 		goto out_unlock;
189 
190 	*saddr = msk->pm.local;
191 	WRITE_ONCE(msk->pm.add_addr_signal, false);
192 	ret = true;
193 
194 out_unlock:
195 	spin_unlock_bh(&msk->pm.lock);
196 	return ret;
197 }
198 
199 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
200 			     u8 *rm_id)
201 {
202 	int ret = false;
203 
204 	spin_lock_bh(&msk->pm.lock);
205 
206 	/* double check after the lock is acquired */
207 	if (!mptcp_pm_should_rm_signal(msk))
208 		goto out_unlock;
209 
210 	if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
211 		goto out_unlock;
212 
213 	*rm_id = msk->pm.rm_id;
214 	WRITE_ONCE(msk->pm.rm_addr_signal, false);
215 	ret = true;
216 
217 out_unlock:
218 	spin_unlock_bh(&msk->pm.lock);
219 	return ret;
220 }
221 
222 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
223 {
224 	return mptcp_pm_nl_get_local_id(msk, skc);
225 }
226 
227 void mptcp_pm_data_init(struct mptcp_sock *msk)
228 {
229 	msk->pm.add_addr_signaled = 0;
230 	msk->pm.add_addr_accepted = 0;
231 	msk->pm.local_addr_used = 0;
232 	msk->pm.subflows = 0;
233 	msk->pm.rm_id = 0;
234 	WRITE_ONCE(msk->pm.work_pending, false);
235 	WRITE_ONCE(msk->pm.add_addr_signal, false);
236 	WRITE_ONCE(msk->pm.rm_addr_signal, false);
237 	WRITE_ONCE(msk->pm.accept_addr, false);
238 	WRITE_ONCE(msk->pm.accept_subflow, false);
239 	WRITE_ONCE(msk->pm.add_addr_echo, false);
240 	msk->pm.status = 0;
241 
242 	spin_lock_init(&msk->pm.lock);
243 	INIT_LIST_HEAD(&msk->pm.anno_list);
244 
245 	mptcp_pm_nl_data_init(msk);
246 }
247 
248 void __init mptcp_pm_init(void)
249 {
250 	mptcp_pm_nl_init();
251 }
252