Lines Matching full:subflow
347 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_prep_synack() local
355 mptcp_fastopen_subflow_synack_set_params(subflow, req); in subflow_prep_synack()
414 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) in subflow_thmac_valid() argument
419 subflow_generate_hmac(subflow->remote_key, subflow->local_key, in subflow_thmac_valid()
420 subflow->remote_nonce, subflow->local_nonce, in subflow_thmac_valid()
424 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n", in subflow_thmac_valid()
425 subflow, subflow->token, thmac, subflow->thmac); in subflow_thmac_valid()
427 return thmac == subflow->thmac; in subflow_thmac_valid()
432 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_reset() local
433 struct sock *sk = subflow->conn; in mptcp_subflow_reset()
459 struct mptcp_subflow_context *subflow; in __mptcp_sync_state() local
463 subflow = mptcp_subflow_ctx(ssk); in __mptcp_sync_state()
469 /* subflow->idsn is always available is TCP_SYN_SENT state, in __mptcp_sync_state()
472 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); in __mptcp_sync_state()
480 struct mptcp_subflow_context *subflow, in subflow_set_remote_key() argument
483 /* active MPC subflow will reach here multiple times: in subflow_set_remote_key()
486 if (subflow->remote_key_valid) in subflow_set_remote_key()
489 subflow->remote_key_valid = 1; in subflow_set_remote_key()
490 subflow->remote_key = mp_opt->sndr_key; in subflow_set_remote_key()
491 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn); in subflow_set_remote_key()
492 subflow->iasn++; in subflow_set_remote_key()
494 WRITE_ONCE(msk->remote_key, subflow->remote_key); in subflow_set_remote_key()
495 WRITE_ONCE(msk->ack_seq, subflow->iasn); in subflow_set_remote_key()
497 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn); in subflow_set_remote_key()
501 struct mptcp_subflow_context *subflow, in mptcp_propagate_state() argument
511 WRITE_ONCE(msk->snd_una, subflow->idsn + 1); in mptcp_propagate_state()
512 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd); in mptcp_propagate_state()
513 subflow_set_remote_key(msk, subflow, mp_opt); in mptcp_propagate_state()
527 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_finish_connect() local
529 struct sock *parent = subflow->conn; in subflow_finish_connect()
532 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); in subflow_finish_connect()
535 if (subflow->conn_finished) in subflow_finish_connect()
539 subflow->rel_write_seq = 1; in subflow_finish_connect()
540 subflow->conn_finished = 1; in subflow_finish_connect()
541 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; in subflow_finish_connect()
542 pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset); in subflow_finish_connect()
545 if (subflow->request_mptcp) { in subflow_finish_connect()
561 subflow->mp_capable = 1; in subflow_finish_connect()
565 mptcp_propagate_state(parent, sk, subflow, &mp_opt); in subflow_finish_connect()
566 } else if (subflow->request_join) { in subflow_finish_connect()
570 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_finish_connect()
574 subflow->backup = mp_opt.backup; in subflow_finish_connect()
575 subflow->thmac = mp_opt.thmac; in subflow_finish_connect()
576 subflow->remote_nonce = mp_opt.nonce; in subflow_finish_connect()
577 WRITE_ONCE(subflow->remote_id, mp_opt.join_id); in subflow_finish_connect()
578 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n", in subflow_finish_connect()
579 subflow, subflow->thmac, subflow->remote_nonce, in subflow_finish_connect()
580 subflow->backup); in subflow_finish_connect()
582 if (!subflow_thmac_valid(subflow)) { in subflow_finish_connect()
584 subflow->reset_reason = MPTCP_RST_EMPTCP; in subflow_finish_connect()
591 subflow_generate_hmac(subflow->local_key, subflow->remote_key, in subflow_finish_connect()
592 subflow->local_nonce, in subflow_finish_connect()
593 subflow->remote_nonce, in subflow_finish_connect()
595 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); in subflow_finish_connect()
597 subflow->mp_join = 1; in subflow_finish_connect()
600 if (subflow->backup) in subflow_finish_connect()
611 if (subflow->mpc_drop) in subflow_finish_connect()
614 mptcp_propagate_state(parent, sk, subflow, NULL); in subflow_finish_connect()
619 subflow->reset_transient = 0; in subflow_finish_connect()
623 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id) in subflow_set_local_id() argument
626 WRITE_ONCE(subflow->local_id, local_id); in subflow_set_local_id()
631 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_chk_local_id() local
632 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in subflow_chk_local_id()
635 if (likely(subflow->local_id >= 0)) in subflow_chk_local_id()
642 subflow_set_local_id(subflow, err); in subflow_chk_local_id()
643 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk); in subflow_chk_local_id()
675 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_v4_conn_request() local
677 pr_debug("subflow=%p\n", subflow); in subflow_v4_conn_request()
706 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_v6_conn_request() local
708 pr_debug("subflow=%p\n", subflow); in subflow_v6_conn_request()
797 struct mptcp_subflow_context *subflow, in __mptcp_subflow_fully_established() argument
800 subflow_set_remote_key(msk, subflow, mp_opt); in __mptcp_subflow_fully_established()
801 WRITE_ONCE(subflow->fully_established, true); in __mptcp_subflow_fully_established()
918 /* move the msk reference ownership to the subflow */ in subflow_syn_recv_sock()
935 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child); in subflow_syn_recv_sock() local
937 subflow_add_reset_reason(skb, subflow->reset_reason); in subflow_syn_recv_sock()
985 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) in dbg_bad_map() argument
988 ssn, subflow->map_subflow_seq, subflow->map_data_len); in dbg_bad_map()
993 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in skb_is_fully_mapped() local
1002 return skb->len - skb_consumed <= subflow->map_data_len - in skb_is_fully_mapped()
1003 mptcp_subflow_get_map_offset(subflow); in skb_is_fully_mapped()
1008 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_mapping() local
1009 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in validate_mapping()
1011 if (unlikely(before(ssn, subflow->map_subflow_seq))) { in validate_mapping()
1012 /* Mapping covers data later in the subflow stream, in validate_mapping()
1015 dbg_bad_map(subflow, ssn); in validate_mapping()
1018 if (unlikely(!before(ssn, subflow->map_subflow_seq + in validate_mapping()
1019 subflow->map_data_len))) { in validate_mapping()
1020 /* Mapping does covers past subflow data, invalid */ in validate_mapping()
1021 dbg_bad_map(subflow, ssn); in validate_mapping()
1030 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_data_csum() local
1039 if (subflow->map_csum_len == subflow->map_data_len) in validate_data_csum()
1047 delta = subflow->map_data_len - subflow->map_csum_len; in validate_data_csum()
1049 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; in validate_data_csum()
1060 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum, in validate_data_csum()
1061 subflow->map_csum_len); in validate_data_csum()
1064 subflow->map_csum_len += len; in validate_data_csum()
1070 /* if this subflow is closed, the partial mapping in validate_data_csum()
1093 csum = __mptcp_make_csum(subflow->map_seq, in validate_data_csum()
1094 subflow->map_subflow_seq, in validate_data_csum()
1095 subflow->map_data_len + subflow->map_data_fin, in validate_data_csum()
1096 subflow->map_data_csum); in validate_data_csum()
1102 subflow->valid_csum_seen = 1; in validate_data_csum()
1109 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in get_mapping_status() local
1125 if (!subflow->map_valid && !skb->len) { in get_mapping_status()
1140 if (!subflow->map_valid) in get_mapping_status()
1162 if (subflow->map_valid) { in get_mapping_status()
1195 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); in get_mapping_status()
1197 if (subflow->map_valid) { in get_mapping_status()
1199 if (subflow->map_seq == map_seq && in get_mapping_status()
1200 subflow->map_subflow_seq == mpext->subflow_seq && in get_mapping_status()
1201 subflow->map_data_len == data_len && in get_mapping_status()
1202 subflow->map_csum_reqd == mpext->csum_reqd) { in get_mapping_status()
1219 subflow->map_seq = map_seq; in get_mapping_status()
1220 subflow->map_subflow_seq = mpext->subflow_seq; in get_mapping_status()
1221 subflow->map_data_len = data_len; in get_mapping_status()
1222 subflow->map_valid = 1; in get_mapping_status()
1223 subflow->map_data_fin = mpext->data_fin; in get_mapping_status()
1224 subflow->mpc_map = mpext->mpc_map; in get_mapping_status()
1225 subflow->map_csum_reqd = mpext->csum_reqd; in get_mapping_status()
1226 subflow->map_csum_len = 0; in get_mapping_status()
1227 subflow->map_data_csum = csum_unfold(mpext->csum); in get_mapping_status()
1230 if (unlikely(subflow->map_csum_reqd != csum_reqd)) in get_mapping_status()
1234 subflow->map_seq, subflow->map_subflow_seq, in get_mapping_status()
1235 subflow->map_data_len, subflow->map_csum_reqd, in get_mapping_status()
1236 subflow->map_data_csum); in get_mapping_status()
1256 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_discard_data() local
1269 offset, subflow->map_subflow_seq); in mptcp_subflow_discard_data()
1276 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) in mptcp_subflow_discard_data()
1277 subflow->map_valid = 0; in mptcp_subflow_discard_data()
1285 /* sched mptcp worker for subflow cleanup if no more data is pending */
1301 /* when the fallback subflow closes the rx side, trigger a 'dummy' in subflow_sched_work_if_closed()
1312 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_fail() local
1315 /* we are really failing, prevent any later subflow join */ in mptcp_subflow_fail()
1324 /* graceful failure can happen only on the MPC subflow */ in mptcp_subflow_fail()
1340 WRITE_ONCE(subflow->fail_tout, fail_tout); in mptcp_subflow_fail()
1343 mptcp_reset_tout_timer(msk, subflow->fail_tout); in mptcp_subflow_fail()
1349 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in subflow_check_data_avail() local
1355 WRITE_ONCE(subflow->data_avail, false); in subflow_check_data_avail()
1356 if (subflow->data_avail) in subflow_check_data_avail()
1359 msk = mptcp_sk(subflow->conn); in subflow_check_data_avail()
1381 ack_seq = mptcp_subflow_get_mapped_dsn(subflow); in subflow_check_data_avail()
1382 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack, in subflow_check_data_avail()
1389 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1402 (subflow->mp_join || subflow->valid_csum_seen)) { in subflow_check_data_avail()
1403 subflow->send_mp_fail = 1; in subflow_check_data_avail()
1406 subflow->reset_transient = 0; in subflow_check_data_avail()
1407 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; in subflow_check_data_avail()
1410 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1418 subflow->reset_transient = 0; in subflow_check_data_avail()
1419 subflow->reset_reason = status == MAPPING_NODSS ? in subflow_check_data_avail()
1429 WRITE_ONCE(subflow->data_avail, false); in subflow_check_data_avail()
1435 subflow->map_valid = 1; in subflow_check_data_avail()
1436 subflow->map_seq = READ_ONCE(msk->ack_seq); in subflow_check_data_avail()
1437 subflow->map_data_len = skb->len; in subflow_check_data_avail()
1438 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in subflow_check_data_avail()
1439 WRITE_ONCE(subflow->data_avail, true); in subflow_check_data_avail()
1445 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcp_subflow_data_available() local
1448 if (subflow->map_valid && in mptcp_subflow_data_available()
1449 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { in mptcp_subflow_data_available()
1450 subflow->map_valid = 0; in mptcp_subflow_data_available()
1451 WRITE_ONCE(subflow->data_avail, false); in mptcp_subflow_data_available()
1454 subflow->map_subflow_seq, in mptcp_subflow_data_available()
1455 subflow->map_data_len); in mptcp_subflow_data_available()
1472 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_space() local
1473 const struct sock *sk = subflow->conn; in mptcp_space()
1500 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_data_ready() local
1502 struct sock *parent = subflow->conn; in subflow_data_ready()
1509 /* MPJ subflow are removed from accept queue before reaching here, in subflow_data_ready()
1519 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && in subflow_data_ready()
1520 !subflow->mp_join && !(state & TCPF_CLOSE)); in subflow_data_ready()
1525 /* subflow-level lowat test are not relevant. in subflow_data_ready()
1557 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in mptcpv6_handle_mapped() local
1563 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n", in mptcpv6_handle_mapped()
1564 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped); in mptcpv6_handle_mapped()
1569 subflow->icsk_af_ops = icsk->icsk_af_ops; in mptcpv6_handle_mapped()
1609 struct mptcp_subflow_context *subflow; in __mptcp_subflow_connect() local
1632 subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_connect()
1634 get_random_bytes(&subflow->local_nonce, sizeof(u32)); in __mptcp_subflow_connect()
1635 } while (!subflow->local_nonce); in __mptcp_subflow_connect()
1649 subflow_set_local_id(subflow, local_id); in __mptcp_subflow_connect()
1651 subflow->remote_key_valid = 1; in __mptcp_subflow_connect()
1652 subflow->remote_key = READ_ONCE(msk->remote_key); in __mptcp_subflow_connect()
1653 subflow->local_key = READ_ONCE(msk->local_key); in __mptcp_subflow_connect()
1654 subflow->token = msk->token; in __mptcp_subflow_connect()
1671 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); in __mptcp_subflow_connect()
1674 subflow->remote_token = remote_token; in __mptcp_subflow_connect()
1675 WRITE_ONCE(subflow->remote_id, remote_id); in __mptcp_subflow_connect()
1676 subflow->request_join = 1; in __mptcp_subflow_connect()
1677 subflow->request_bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); in __mptcp_subflow_connect()
1678 subflow->subflow_id = msk->subflow_id++; in __mptcp_subflow_connect()
1682 list_add_tail(&subflow->node, &msk->conn_list); in __mptcp_subflow_connect()
1693 /* discard the subflow socket */ in __mptcp_subflow_connect()
1700 list_del(&subflow->node); in __mptcp_subflow_connect()
1701 sock_put(mptcp_subflow_tcp_sock(subflow)); in __mptcp_subflow_connect()
1704 subflow->disposable = 1; in __mptcp_subflow_connect()
1757 struct mptcp_subflow_context *subflow; in mptcp_subflow_create_socket() local
1803 subflow = mptcp_subflow_ctx(sf->sk); in mptcp_subflow_create_socket()
1804 pr_debug("subflow=%p\n", subflow); in mptcp_subflow_create_socket()
1808 subflow->conn = sk; in mptcp_subflow_create_socket()
1833 pr_debug("subflow=%p\n", ctx); in subflow_create_ctx()
1854 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); in subflow_state_change() local
1855 struct sock *parent = subflow->conn; in subflow_state_change()
1861 subflow->conn_finished = 1; in subflow_state_change()
1862 mptcp_propagate_state(parent, sk, subflow, NULL); in subflow_state_change()
1865 /* as recvmsg() does not acquire the subflow socket for ssk selection in subflow_state_change()
1881 struct mptcp_subflow_context *subflow; in mptcp_subflow_queue_clean() local
1898 /* can't acquire the msk socket lock under the subflow one, in mptcp_subflow_queue_clean()
1908 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_queue_clean()
1909 if (!subflow || !subflow->conn) in mptcp_subflow_queue_clean()
1912 sk = subflow->conn; in mptcp_subflow_queue_clean()
1969 pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family); in subflow_ulp_init()
2001 * when the subflow is still unaccepted in subflow_ulp_release()
2055 /* this is the first subflow, id is always 0 */ in subflow_ulp_clone()
2068 /* the subflow req id is valid, fetched via subflow_check_req() in subflow_ulp_clone()
2077 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in tcp_release_cb_override() local
2080 /* process and clear all the pending actions, but leave the subflow into in tcp_release_cb_override()
2084 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0); in tcp_release_cb_override()
2093 /* closing a listener subflow requires a great deal of care. in tcp_abort_override()
2132 panic("MPTCP: failed to init subflow v4 request sock ops\n"); in mptcp_subflow_init()
2161 panic("MPTCP: failed to init subflow v6 request sock ops\n"); in mptcp_subflow_init()