1 // SPDX-License-Identifier: GPL-2.0 2 /* Multipath TCP 3 * 4 * Copyright (c) 2019, Intel Corporation. 5 */ 6 #define pr_fmt(fmt) "MPTCP: " fmt 7 8 #include <linux/kernel.h> 9 #include <net/tcp.h> 10 #include <net/mptcp.h> 11 #include "protocol.h" 12 13 #include "mib.h" 14 15 /* path manager command handlers */ 16 17 int mptcp_pm_announce_addr(struct mptcp_sock *msk, 18 const struct mptcp_addr_info *addr, 19 bool echo) 20 { 21 u8 add_addr = READ_ONCE(msk->pm.addr_signal); 22 23 pr_debug("msk=%p, local_id=%d", msk, addr->id); 24 25 lockdep_assert_held(&msk->pm.lock); 26 27 if (add_addr) { 28 pr_warn("addr_signal error, add_addr=%d", add_addr); 29 return -EINVAL; 30 } 31 32 msk->pm.local = *addr; 33 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); 34 if (echo) 35 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); 36 if (addr->family == AF_INET6) 37 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6); 38 if (addr->port) 39 add_addr |= BIT(MPTCP_ADD_ADDR_PORT); 40 WRITE_ONCE(msk->pm.addr_signal, add_addr); 41 return 0; 42 } 43 44 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 45 { 46 u8 rm_addr = READ_ONCE(msk->pm.addr_signal); 47 48 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 49 50 if (rm_addr) { 51 pr_warn("addr_signal error, rm_addr=%d", rm_addr); 52 return -EINVAL; 53 } 54 55 msk->pm.rm_list_tx = *rm_list; 56 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); 57 WRITE_ONCE(msk->pm.addr_signal, rm_addr); 58 mptcp_pm_nl_addr_send_ack(msk); 59 return 0; 60 } 61 62 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) 63 { 64 pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); 65 66 spin_lock_bh(&msk->pm.lock); 67 mptcp_pm_nl_rm_subflow_received(msk, rm_list); 68 spin_unlock_bh(&msk->pm.lock); 69 return 0; 70 } 71 72 /* path manager event handlers */ 73 74 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) 75 { 76 struct mptcp_pm_data *pm = &msk->pm; 77 78 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); 79 80 WRITE_ONCE(pm->server_side, server_side); 81 mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); 82 } 83 84 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) 85 { 86 struct mptcp_pm_data *pm = &msk->pm; 87 unsigned int subflows_max; 88 int ret = 0; 89 90 subflows_max = mptcp_pm_get_subflows_max(msk); 91 92 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, 93 subflows_max, READ_ONCE(pm->accept_subflow)); 94 95 /* try to avoid acquiring the lock below */ 96 if (!READ_ONCE(pm->accept_subflow)) 97 return false; 98 99 spin_lock_bh(&pm->lock); 100 if (READ_ONCE(pm->accept_subflow)) { 101 ret = pm->subflows < subflows_max; 102 if (ret && ++pm->subflows == subflows_max) 103 WRITE_ONCE(pm->accept_subflow, false); 104 } 105 spin_unlock_bh(&pm->lock); 106 107 return ret; 108 } 109 110 /* return true if the new status bit is currently cleared, that is, this event 111 * can be server, eventually by an already scheduled work 112 */ 113 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, 114 enum mptcp_pm_status new_status) 115 { 116 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, 117 BIT(new_status)); 118 if (msk->pm.status & BIT(new_status)) 119 return false; 120 121 msk->pm.status |= BIT(new_status); 122 mptcp_schedule_work((struct sock *)msk); 123 return true; 124 } 125 126 void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) 127 { 128 struct mptcp_pm_data *pm = &msk->pm; 129 bool announce = false; 130 131 pr_debug("msk=%p", msk); 132 133 spin_lock_bh(&pm->lock); 134 135 /* mptcp_pm_fully_established() can be invoked by multiple 136 * racing paths - accept() and check_fully_established() 137 * be sure to serve this event only once. 138 */ 139 if (READ_ONCE(pm->work_pending) && 140 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) 141 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); 142 143 if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) 144 announce = true; 145 146 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); 147 spin_unlock_bh(&pm->lock); 148 149 if (announce) 150 mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp); 151 } 152 153 void mptcp_pm_connection_closed(struct mptcp_sock *msk) 154 { 155 pr_debug("msk=%p", msk); 156 } 157 158 void mptcp_pm_subflow_established(struct mptcp_sock *msk) 159 { 160 struct mptcp_pm_data *pm = &msk->pm; 161 162 pr_debug("msk=%p", msk); 163 164 if (!READ_ONCE(pm->work_pending)) 165 return; 166 167 spin_lock_bh(&pm->lock); 168 169 if (READ_ONCE(pm->work_pending)) 170 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 171 172 spin_unlock_bh(&pm->lock); 173 } 174 175 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) 176 { 177 pr_debug("msk=%p", msk); 178 } 179 180 void mptcp_pm_add_addr_received(struct mptcp_sock *msk, 181 const struct mptcp_addr_info *addr) 182 { 183 struct mptcp_pm_data *pm = &msk->pm; 184 185 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, 186 READ_ONCE(pm->accept_addr)); 187 188 mptcp_event_addr_announced(msk, addr); 189 190 spin_lock_bh(&pm->lock); 191 192 if (!READ_ONCE(pm->accept_addr)) { 193 mptcp_pm_announce_addr(msk, addr, true); 194 mptcp_pm_add_addr_send_ack(msk); 195 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { 196 pm->remote = *addr; 197 } 198 199 spin_unlock_bh(&pm->lock); 200 } 201 202 void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, 203 struct mptcp_addr_info *addr) 204 { 205 struct mptcp_pm_data *pm = &msk->pm; 206 207 pr_debug("msk=%p", msk); 208 209 spin_lock_bh(&pm->lock); 210 211 if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) 212 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); 213 214 spin_unlock_bh(&pm->lock); 215 } 216 217 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) 218 { 219 if (!mptcp_pm_should_add_signal(msk)) 220 return; 221 222 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); 223 } 224 225 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, 226 const struct mptcp_rm_list *rm_list) 227 { 228 struct mptcp_pm_data *pm = &msk->pm; 229 u8 i; 230 231 pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); 232 233 for (i = 0; i < rm_list->nr; i++) 234 mptcp_event_addr_removed(msk, rm_list->ids[i]); 235 236 spin_lock_bh(&pm->lock); 237 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); 238 pm->rm_list_rx = *rm_list; 239 spin_unlock_bh(&pm->lock); 240 } 241 242 void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) 243 { 244 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 245 246 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); 247 subflow->backup = bkup; 248 249 mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC); 250 } 251 252 /* path manager helpers */ 253 254 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 255 struct mptcp_addr_info *saddr, bool *echo, bool *port) 256 { 257 int ret = false; 258 259 spin_lock_bh(&msk->pm.lock); 260 261 /* double check after the lock is acquired */ 262 if (!mptcp_pm_should_add_signal(msk)) 263 goto out_unlock; 264 265 *echo = mptcp_pm_should_add_signal_echo(msk); 266 *port = mptcp_pm_should_add_signal_port(msk); 267 268 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port)) 269 goto out_unlock; 270 271 *saddr = msk->pm.local; 272 WRITE_ONCE(msk->pm.addr_signal, 0); 273 ret = true; 274 275 out_unlock: 276 spin_unlock_bh(&msk->pm.lock); 277 return ret; 278 } 279 280 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, 281 struct mptcp_rm_list *rm_list) 282 { 283 int ret = false, len; 284 285 spin_lock_bh(&msk->pm.lock); 286 287 /* double check after the lock is acquired */ 288 if (!mptcp_pm_should_rm_signal(msk)) 289 goto out_unlock; 290 291 len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); 292 if (len < 0) { 293 WRITE_ONCE(msk->pm.addr_signal, 0); 294 goto out_unlock; 295 } 296 if (remaining < len) 297 goto out_unlock; 298 299 *rm_list = msk->pm.rm_list_tx; 300 WRITE_ONCE(msk->pm.addr_signal, 0); 301 ret = true; 302 303 out_unlock: 304 spin_unlock_bh(&msk->pm.lock); 305 return ret; 306 } 307 308 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) 309 { 310 return mptcp_pm_nl_get_local_id(msk, skc); 311 } 312 313 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) 314 { 315 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); 316 u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); 317 318 /* keep track of rtx periods with no progress */ 319 if (!subflow->stale_count) { 320 subflow->stale_rcv_tstamp = rcv_tstamp; 321 subflow->stale_count++; 322 } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { 323 if (subflow->stale_count < U8_MAX) 324 subflow->stale_count++; 325 mptcp_pm_nl_subflow_chk_stale(msk, ssk); 326 } else { 327 subflow->stale_count = 0; 328 mptcp_subflow_set_active(subflow); 329 } 330 } 331 332 void mptcp_pm_data_init(struct mptcp_sock *msk) 333 { 334 msk->pm.add_addr_signaled = 0; 335 msk->pm.add_addr_accepted = 0; 336 msk->pm.local_addr_used = 0; 337 msk->pm.subflows = 0; 338 msk->pm.rm_list_tx.nr = 0; 339 msk->pm.rm_list_rx.nr = 0; 340 WRITE_ONCE(msk->pm.work_pending, false); 341 WRITE_ONCE(msk->pm.addr_signal, 0); 342 WRITE_ONCE(msk->pm.accept_addr, false); 343 WRITE_ONCE(msk->pm.accept_subflow, false); 344 WRITE_ONCE(msk->pm.remote_deny_join_id0, false); 345 msk->pm.status = 0; 346 347 spin_lock_init(&msk->pm.lock); 348 INIT_LIST_HEAD(&msk->pm.anno_list); 349 350 mptcp_pm_nl_data_init(msk); 351 } 352 353 void __init mptcp_pm_init(void) 354 { 355 mptcp_pm_nl_init(); 356 } 357