Lines Matching refs:msk

36 	struct mptcp_sock msk;  member
57 static u64 mptcp_wnd_end(const struct mptcp_sock *msk) in mptcp_wnd_end() argument
59 return READ_ONCE(msk->wnd_end); in mptcp_wnd_end()
74 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib) in __mptcp_try_fallback() argument
76 struct net *net = sock_net((struct sock *)msk); in __mptcp_try_fallback()
78 if (__mptcp_check_fallback(msk)) in __mptcp_try_fallback()
85 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) in __mptcp_try_fallback()
88 spin_lock_bh(&msk->fallback_lock); in __mptcp_try_fallback()
89 if (!msk->allow_infinite_fallback) { in __mptcp_try_fallback()
90 spin_unlock_bh(&msk->fallback_lock); in __mptcp_try_fallback()
94 msk->allow_subflows = false; in __mptcp_try_fallback()
95 set_bit(MPTCP_FALLBACK_DONE, &msk->flags); in __mptcp_try_fallback()
97 spin_unlock_bh(&msk->fallback_lock); in __mptcp_try_fallback()
101 static int __mptcp_socket_create(struct mptcp_sock *msk) in __mptcp_socket_create() argument
104 struct sock *sk = (struct sock *)msk; in __mptcp_socket_create()
112 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; in __mptcp_socket_create()
113 WRITE_ONCE(msk->first, ssock->sk); in __mptcp_socket_create()
115 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
118 subflow->subflow_id = msk->subflow_id++; in __mptcp_socket_create()
122 mptcp_sock_graft(msk->first, sk->sk_socket); in __mptcp_socket_create()
131 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) in __mptcp_nmpc_sk() argument
133 struct sock *sk = (struct sock *)msk; in __mptcp_nmpc_sk()
139 if (!msk->first) { in __mptcp_nmpc_sk()
140 ret = __mptcp_socket_create(msk); in __mptcp_nmpc_sk()
145 return msk->first; in __mptcp_nmpc_sk()
193 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, in mptcp_ooo_try_coalesce() argument
199 return mptcp_try_coalesce((struct sock *)msk, to, from); in mptcp_ooo_try_coalesce()
208 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_rcvbuf_grow() local
213 oldval = msk->rcvq_space.space; in mptcp_rcvbuf_grow()
214 msk->rcvq_space.space = newval; in mptcp_rcvbuf_grow()
227 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) in mptcp_rcvbuf_grow()
228 rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; in mptcp_rcvbuf_grow()
244 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) in mptcp_data_queue_ofo() argument
246 struct sock *sk = (struct sock *)msk; in mptcp_data_queue_ofo()
253 max_seq = atomic64_read(&msk->rcv_wnd_sent); in mptcp_data_queue_ofo()
255 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, in mptcp_data_queue_ofo()
256 RB_EMPTY_ROOT(&msk->out_of_order_queue)); in mptcp_data_queue_ofo()
262 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); in mptcp_data_queue_ofo()
267 p = &msk->out_of_order_queue.rb_node; in mptcp_data_queue_ofo()
269 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { in mptcp_data_queue_ofo()
271 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
272 msk->ooo_last_skb = skb; in mptcp_data_queue_ofo()
279 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { in mptcp_data_queue_ofo()
286 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo()
288 parent = &msk->ooo_last_skb->rbnode; in mptcp_data_queue_ofo()
320 &msk->out_of_order_queue); in mptcp_data_queue_ofo()
325 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { in mptcp_data_queue_ofo()
335 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
342 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
348 msk->ooo_last_skb = skb; in mptcp_data_queue_ofo()
355 mptcp_rcvbuf_grow(sk, msk->rcvq_space.space); in mptcp_data_queue_ofo()
383 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_move_skb() local
388 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { in __mptcp_move_skb()
390 msk->bytes_received += copy_len; in __mptcp_move_skb()
391 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); in __mptcp_move_skb()
399 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { in __mptcp_move_skb()
400 mptcp_data_queue_ofo(msk, skb); in __mptcp_move_skb()
431 static void mptcp_shutdown_subflows(struct mptcp_sock *msk) in mptcp_shutdown_subflows() argument
435 mptcp_for_each_subflow(msk, subflow) { in mptcp_shutdown_subflows()
448 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin_ack() local
452 msk->write_seq == READ_ONCE(msk->snd_una); in mptcp_pending_data_fin_ack()
457 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin_ack() local
461 WRITE_ONCE(msk->snd_data_fin_enable, 0); in mptcp_check_data_fin_ack()
469 mptcp_shutdown_subflows(msk); in mptcp_check_data_fin_ack()
481 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin() local
483 if (READ_ONCE(msk->rcv_data_fin) && in mptcp_pending_data_fin()
486 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); in mptcp_pending_data_fin()
488 if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) { in mptcp_pending_data_fin()
554 static void mptcp_send_ack(struct mptcp_sock *msk) in mptcp_send_ack() argument
558 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
585 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) in mptcp_cleanup_rbuf() argument
587 int old_space = READ_ONCE(msk->old_wspace); in mptcp_cleanup_rbuf()
589 struct sock *sk = (struct sock *)msk; in mptcp_cleanup_rbuf()
596 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
606 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin() local
623 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); in mptcp_check_data_fin()
624 WRITE_ONCE(msk->rcv_data_fin, 0); in mptcp_check_data_fin()
637 mptcp_shutdown_subflows(msk); in mptcp_check_data_fin()
646 if (!__mptcp_check_fallback(msk)) in mptcp_check_data_fin()
647 mptcp_send_ack(msk); in mptcp_check_data_fin()
652 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) in mptcp_dss_corruption() argument
664 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_add_backlog() local
676 if (!list_empty(&msk->backlog_list)) in __mptcp_add_backlog()
677 tail = list_last_entry(&msk->backlog_list, struct sk_buff, list); in __mptcp_add_backlog()
688 list_add_tail(&skb->list, &msk->backlog_list); in __mptcp_add_backlog()
693 WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta); in __mptcp_add_backlog()
699 msk->backlog_unaccounted += delta; in __mptcp_add_backlog()
702 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, in __mptcp_move_skbs_from_subflow() argument
706 struct sock *sk = (struct sock *)msk; in __mptcp_move_skbs_from_subflow()
711 pr_debug("msk=%p ssk=%p\n", msk, ssk); in __mptcp_move_skbs_from_subflow()
727 if (__mptcp_check_fallback(msk)) { in __mptcp_move_skbs_from_subflow()
756 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
761 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
773 msk->last_data_recv = tcp_jiffies32; in __mptcp_move_skbs_from_subflow()
777 static bool __mptcp_ofo_queue(struct mptcp_sock *msk) in __mptcp_ofo_queue() argument
779 struct sock *sk = (struct sock *)msk; in __mptcp_ofo_queue()
785 p = rb_first(&msk->out_of_order_queue); in __mptcp_ofo_queue()
786 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); in __mptcp_ofo_queue()
789 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) in __mptcp_ofo_queue()
793 rb_erase(&skb->rbnode, &msk->out_of_order_queue); in __mptcp_ofo_queue()
796 msk->ack_seq))) { in __mptcp_ofo_queue()
804 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { in __mptcp_ofo_queue()
805 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; in __mptcp_ofo_queue()
809 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, in __mptcp_ofo_queue()
815 msk->bytes_received += end_seq - msk->ack_seq; in __mptcp_ofo_queue()
816 WRITE_ONCE(msk->ack_seq, end_seq); in __mptcp_ofo_queue()
855 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_error_report() local
857 mptcp_for_each_subflow(msk, subflow) in __mptcp_error_report()
865 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
867 struct sock *sk = (struct sock *)msk; in move_skbs_to_msk()
870 moved = __mptcp_move_skbs_from_subflow(msk, ssk, true); in move_skbs_to_msk()
871 __mptcp_ofo_queue(msk); in move_skbs_to_msk()
888 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_data_ready() local
900 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) in mptcp_data_ready()
903 __mptcp_move_skbs_from_subflow(msk, ssk, false); in mptcp_data_ready()
908 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) in mptcp_subflow_joined() argument
910 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); in mptcp_subflow_joined()
911 msk->allow_infinite_fallback = false; in mptcp_subflow_joined()
912 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_subflow_joined()
915 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) in __mptcp_finish_join() argument
917 struct sock *sk = (struct sock *)msk; in __mptcp_finish_join()
922 spin_lock_bh(&msk->fallback_lock); in __mptcp_finish_join()
923 if (!msk->allow_subflows) { in __mptcp_finish_join()
924 spin_unlock_bh(&msk->fallback_lock); in __mptcp_finish_join()
927 mptcp_subflow_joined(msk, ssk); in __mptcp_finish_join()
928 spin_unlock_bh(&msk->fallback_lock); in __mptcp_finish_join()
930 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; in __mptcp_finish_join()
931 mptcp_sockopt_sync_locked(msk, ssk); in __mptcp_finish_join()
940 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_flush_join_list() local
946 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
947 if (!__mptcp_finish_join(msk, ssk)) in __mptcp_flush_join_list()
1007 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, in mptcp_frag_can_collapse_to() argument
1014 df->data_seq + df->data_len == msk->write_seq; in mptcp_frag_can_collapse_to()
1035 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clean_una() local
1039 snd_una = msk->snd_una; in __mptcp_clean_una()
1040 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { in __mptcp_clean_una()
1044 if (unlikely(dfrag == msk->first_pending)) { in __mptcp_clean_una()
1046 if (WARN_ON_ONCE(!msk->recovery)) in __mptcp_clean_una()
1049 msk->first_pending = mptcp_send_next(sk); in __mptcp_clean_una()
1061 if (WARN_ON_ONCE(!msk->recovery)) in __mptcp_clean_una()
1077 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) in __mptcp_clean_una()
1078 msk->recovery = false; in __mptcp_clean_una()
1081 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { in __mptcp_clean_una()
1082 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) in __mptcp_clean_una()
1110 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_enter_memory_pressure() local
1113 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1140 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, in mptcp_carve_data_frag() argument
1148 dfrag->data_seq = msk->write_seq; in mptcp_carve_data_frag()
1166 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, in mptcp_check_allowed_size() argument
1169 u64 window_end = mptcp_wnd_end(msk); in mptcp_check_allowed_size()
1172 if (__mptcp_check_fallback(msk)) in mptcp_check_allowed_size()
1251 static void mptcp_update_infinite_map(struct mptcp_sock *msk, in mptcp_update_infinite_map() argument
1278 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_frag() local
1288 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1338 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); in mptcp_sendmsg_frag()
1340 u64 snd_una = READ_ONCE(msk->snd_una); in mptcp_sendmsg_frag()
1346 if (snd_una != msk->snd_nxt || skb->len || in mptcp_sendmsg_frag()
1401 if (READ_ONCE(msk->csum_enabled)) in mptcp_sendmsg_frag()
1407 if (READ_ONCE(msk->csum_enabled)) in mptcp_sendmsg_frag()
1410 mptcp_update_infinite_map(msk, ssk, mpext); in mptcp_sendmsg_frag()
1457 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) in mptcp_subflow_get_send() argument
1461 struct sock *sk = (struct sock *)msk; in mptcp_subflow_get_send()
1474 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1520 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); in mptcp_subflow_get_send()
1529 msk->snd_burst = burst; in mptcp_subflow_get_send()
1539 static void mptcp_update_post_push(struct mptcp_sock *msk, in mptcp_update_post_push() argument
1547 msk->snd_burst -= sent; in mptcp_update_post_push()
1560 if (likely(after64(snd_nxt_new, msk->snd_nxt))) { in mptcp_update_post_push()
1561 msk->bytes_sent += snd_nxt_new - msk->snd_nxt; in mptcp_update_post_push()
1562 WRITE_ONCE(msk->snd_nxt, snd_nxt_new); in mptcp_update_post_push()
1578 struct mptcp_sock *msk = mptcp_sk(sk); in __subflow_push_pending() local
1599 mptcp_update_post_push(msk, dfrag, ret); in __subflow_push_pending()
1601 msk->first_pending = mptcp_send_next(sk); in __subflow_push_pending()
1603 if (msk->snd_burst <= 0 || in __subflow_push_pending()
1615 msk->last_data_sent = tcp_jiffies32; in __subflow_push_pending()
1622 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_push_pending() local
1633 if (mptcp_sched_get_send(msk)) in __mptcp_push_pending()
1638 mptcp_for_each_subflow(msk, subflow) { in __mptcp_push_pending()
1689 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_subflow_push_pending() local
1715 if (mptcp_sched_get_send(msk)) in __mptcp_subflow_push_pending()
1726 mptcp_for_each_subflow(msk, subflow) { in __mptcp_subflow_push_pending()
1748 if (msk->snd_data_fin_enable && in __mptcp_subflow_push_pending()
1749 msk->snd_nxt + 1 == msk->write_seq) in __mptcp_subflow_push_pending()
1760 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_fastopen() local
1771 ssk = __mptcp_nmpc_sk(msk); in mptcp_sendmsg_fastopen()
1775 if (!msk->first) in mptcp_sendmsg_fastopen()
1778 ssk = msk->first; in mptcp_sendmsg_fastopen()
1782 msk->fastopening = 1; in mptcp_sendmsg_fastopen()
1784 msk->fastopening = 0; in mptcp_sendmsg_fastopen()
1833 const struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_send_limit() local
1843 not_sent = msk->write_seq - msk->snd_nxt; in mptcp_send_limit()
1850 static void mptcp_rps_record_subflows(const struct mptcp_sock *msk) in mptcp_rps_record_subflows() argument
1857 mptcp_for_each_subflow(msk, subflow) { in mptcp_rps_record_subflows()
1866 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg() local
1877 mptcp_rps_record_subflows(msk); in mptcp_sendmsg()
1921 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); in mptcp_sendmsg()
1926 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); in mptcp_sendmsg()
1954 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); in mptcp_sendmsg()
1962 list_add_tail(&dfrag->list, &msk->rtx_queue); in mptcp_sendmsg()
1963 if (!msk->first_pending) in mptcp_sendmsg()
1964 msk->first_pending = dfrag; in mptcp_sendmsg()
1966 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, in mptcp_sendmsg()
1995 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
2002 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_recvmsg_mskq() local
2044 msk->bytes_consumed += count; in __mptcp_recvmsg_mskq()
2064 mptcp_rcv_space_adjust(msk, copied); in __mptcp_recvmsg_mskq()
2072 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) in mptcp_rcv_space_adjust() argument
2075 struct sock *sk = (struct sock *)msk; in mptcp_rcv_space_adjust()
2080 msk_owned_by_me(msk); in mptcp_rcv_space_adjust()
2085 if (!msk->rcvspace_init) in mptcp_rcv_space_adjust()
2086 mptcp_rcv_space_init(msk, msk->first); in mptcp_rcv_space_adjust()
2088 msk->rcvq_space.copied += copied; in mptcp_rcv_space_adjust()
2091 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); in mptcp_rcv_space_adjust()
2093 rtt_us = msk->rcvq_space.rtt_us; in mptcp_rcv_space_adjust()
2098 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2113 msk->rcvq_space.rtt_us = rtt_us; in mptcp_rcv_space_adjust()
2114 msk->scaling_ratio = scaling_ratio; in mptcp_rcv_space_adjust()
2118 if (msk->rcvq_space.copied <= msk->rcvq_space.space) in mptcp_rcv_space_adjust()
2121 if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) { in mptcp_rcv_space_adjust()
2127 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2135 tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied); in mptcp_rcv_space_adjust()
2141 msk->rcvq_space.copied = 0; in mptcp_rcv_space_adjust()
2142 msk->rcvq_space.time = mstamp; in mptcp_rcv_space_adjust()
2148 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_move_skbs() local
2168 __mptcp_ofo_queue(msk); in __mptcp_move_skbs()
2170 mptcp_check_data_fin((struct sock *)msk); in __mptcp_move_skbs()
2176 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_can_spool_backlog() local
2181 DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket && in mptcp_can_spool_backlog()
2185 if (list_empty(&msk->backlog_list) || in mptcp_can_spool_backlog()
2190 list_splice_init(&msk->backlog_list, skbs); in mptcp_can_spool_backlog()
2197 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_backlog_spooled() local
2199 WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved); in mptcp_backlog_spooled()
2200 list_splice(skbs, &msk->backlog_list); in mptcp_backlog_spooled()
2223 const struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_inq_hint() local
2228 u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; in mptcp_inq_hint()
2245 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_recvmsg() local
2261 mptcp_rps_record_subflows(msk); in mptcp_recvmsg()
2268 if (unlikely(msk->recvmsg_inq)) in mptcp_recvmsg()
2284 if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk)) in mptcp_recvmsg()
2326 mptcp_cleanup_rbuf(msk, copied); in mptcp_recvmsg()
2334 mptcp_cleanup_rbuf(msk, copied); in mptcp_recvmsg()
2349 msk, skb_queue_empty(&sk->sk_receive_queue), copied); in mptcp_recvmsg()
2358 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_retransmit_timer() local
2363 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) in mptcp_retransmit_timer()
2367 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); in mptcp_retransmit_timer()
2388 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) in mptcp_subflow_get_retrans() argument
2394 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2402 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2427 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retransmit_pending_data() local
2429 if (__mptcp_check_fallback(msk)) in __mptcp_retransmit_pending_data()
2444 msk->recovery_snd_nxt = msk->snd_nxt; in __mptcp_retransmit_pending_data()
2445 msk->recovery = true; in __mptcp_retransmit_pending_data()
2448 msk->first_pending = rtx_head; in __mptcp_retransmit_pending_data()
2449 msk->snd_burst = 0; in __mptcp_retransmit_pending_data()
2452 list_for_each_entry(cur, &msk->rtx_queue, list) { in __mptcp_retransmit_pending_data()
2496 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_ssk() local
2523 if (msk->in_accept_queue && msk->first == ssk && in __mptcp_close_ssk()
2532 dispose_it = msk->free_first || ssk != msk->first; in __mptcp_close_ssk()
2541 __mptcp_subflow_disconnect(ssk, subflow, msk->fastclosing); in __mptcp_close_ssk()
2570 if (ssk == msk->first) in __mptcp_close_ssk()
2571 WRITE_ONCE(msk->first, NULL); in __mptcp_close_ssk()
2583 if (list_is_singular(&msk->conn_list) && msk->first && in __mptcp_close_ssk()
2584 inet_sk_state_load(msk->first) == TCP_CLOSE) { in __mptcp_close_ssk()
2586 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { in __mptcp_close_ssk()
2598 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_close_ssk() local
2613 list_for_each_entry(skb, &msk->backlog_list, list) { in mptcp_close_ssk()
2637 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_subflow() local
2641 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in __mptcp_close_subflow()
2648 __mptcp_check_fallback(msk))) in __mptcp_close_subflow()
2670 static void mptcp_check_fastclose(struct mptcp_sock *msk) in mptcp_check_fastclose() argument
2673 struct sock *sk = (struct sock *)msk; in mptcp_check_fastclose()
2675 if (likely(!READ_ONCE(msk->rcv_fastclose))) in mptcp_check_fastclose()
2678 mptcp_token_destroy(msk); in mptcp_check_fastclose()
2680 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_check_fastclose()
2709 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); in mptcp_check_fastclose()
2722 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retrans() local
2732 err = mptcp_sched_get_retrans(msk); in __mptcp_retrans()
2735 if (mptcp_data_fin_enabled(msk)) { in __mptcp_retrans()
2741 mptcp_send_ack(msk); in __mptcp_retrans()
2755 mptcp_for_each_subflow(msk, subflow) { in __mptcp_retrans()
2767 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : in __mptcp_retrans()
2776 spin_lock_bh(&msk->fallback_lock); in __mptcp_retrans()
2777 if (__mptcp_check_fallback(msk) || in __mptcp_retrans()
2778 !msk->allow_subflows) { in __mptcp_retrans()
2779 spin_unlock_bh(&msk->fallback_lock); in __mptcp_retrans()
2797 msk->allow_infinite_fallback = false; in __mptcp_retrans()
2799 spin_unlock_bh(&msk->fallback_lock); in __mptcp_retrans()
2805 msk->bytes_retrans += len; in __mptcp_retrans()
2819 mptcp_for_each_subflow(msk, subflow) in __mptcp_retrans()
2827 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) in mptcp_reset_tout_timer() argument
2829 struct sock *sk = (struct sock *)msk; in mptcp_reset_tout_timer()
2846 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) in mptcp_mp_fail_no_response() argument
2848 struct sock *ssk = msk->first; in mptcp_mp_fail_no_response()
2864 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_backlog_purge() local
2869 list_splice_init(&msk->backlog_list, &backlog); in mptcp_backlog_purge()
2870 msk->backlog_len = 0; in mptcp_backlog_purge()
2883 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_do_fastclose() local
2887 msk->fastclosing = 1; in mptcp_do_fastclose()
2890 if (__mptcp_check_fallback(msk)) in mptcp_do_fastclose()
2893 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_do_fastclose()
2918 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); in mptcp_worker() local
2919 struct sock *sk = (struct sock *)msk; in mptcp_worker()
2928 mptcp_check_fastclose(msk); in mptcp_worker()
2930 mptcp_pm_worker(msk); in mptcp_worker()
2936 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) in mptcp_worker()
2943 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_worker()
2953 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) in mptcp_worker()
2956 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; in mptcp_worker()
2958 mptcp_mp_fail_no_response(msk); in mptcp_worker()
2967 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_init_sock() local
2969 INIT_LIST_HEAD(&msk->conn_list); in __mptcp_init_sock()
2970 INIT_LIST_HEAD(&msk->join_list); in __mptcp_init_sock()
2971 INIT_LIST_HEAD(&msk->rtx_queue); in __mptcp_init_sock()
2972 INIT_LIST_HEAD(&msk->backlog_list); in __mptcp_init_sock()
2973 INIT_WORK(&msk->work, mptcp_worker); in __mptcp_init_sock()
2974 msk->out_of_order_queue = RB_ROOT; in __mptcp_init_sock()
2975 msk->first_pending = NULL; in __mptcp_init_sock()
2976 msk->timer_ival = TCP_RTO_MIN; in __mptcp_init_sock()
2977 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; in __mptcp_init_sock()
2978 msk->backlog_len = 0; in __mptcp_init_sock()
2980 WRITE_ONCE(msk->first, NULL); in __mptcp_init_sock()
2982 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in __mptcp_init_sock()
2983 msk->allow_infinite_fallback = true; in __mptcp_init_sock()
2984 msk->allow_subflows = true; in __mptcp_init_sock()
2985 msk->recovery = false; in __mptcp_init_sock()
2986 msk->subflow_id = 1; in __mptcp_init_sock()
2987 msk->last_data_sent = tcp_jiffies32; in __mptcp_init_sock()
2988 msk->last_data_recv = tcp_jiffies32; in __mptcp_init_sock()
2989 msk->last_ack_recv = tcp_jiffies32; in __mptcp_init_sock()
2991 mptcp_pm_data_init(msk); in __mptcp_init_sock()
2992 spin_lock_init(&msk->fallback_lock); in __mptcp_init_sock()
2996 timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0); in __mptcp_init_sock()
3048 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clear_xmit() local
3051 msk->first_pending = NULL; in __mptcp_clear_xmit()
3052 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) in __mptcp_clear_xmit()
3058 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_cancel_work() local
3060 if (cancel_work_sync(&msk->work)) in mptcp_cancel_work()
3152 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_send_data_fin() local
3155 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), in mptcp_check_send_data_fin()
3156 msk->snd_nxt, msk->write_seq); in mptcp_check_send_data_fin()
3161 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || in mptcp_check_send_data_fin()
3165 WRITE_ONCE(msk->snd_nxt, msk->write_seq); in mptcp_check_send_data_fin()
3167 mptcp_for_each_subflow(msk, subflow) { in mptcp_check_send_data_fin()
3176 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_wr_shutdown() local
3179 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, in __mptcp_wr_shutdown()
3183 WRITE_ONCE(msk->write_seq, msk->write_seq + 1); in __mptcp_wr_shutdown()
3184 WRITE_ONCE(msk->snd_data_fin_enable, 1); in __mptcp_wr_shutdown()
3191 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_destroy_sock() local
3193 pr_debug("msk=%p\n", msk); in __mptcp_destroy_sock()
3199 msk->pm.status = 0; in __mptcp_destroy_sock()
3200 mptcp_release_sched(msk); in __mptcp_destroy_sock()
3245 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close() local
3257 if (mptcp_data_avail(msk) || timeout < 0) { in __mptcp_close()
3271 mptcp_for_each_subflow(msk, subflow) { in __mptcp_close()
3280 if (ssk == msk->first) in __mptcp_close()
3300 mptcp_pm_connection_closed(msk); in __mptcp_close()
3326 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
3330 struct ipv6_pinfo *msk6 = inet6_sk(msk); in mptcp_copy_inaddrs()
3332 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
3333 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
3341 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
3342 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
3343 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
3344 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
3345 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
3346 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
3349 static void mptcp_destroy_common(struct mptcp_sock *msk) in mptcp_destroy_common() argument
3352 struct sock *sk = (struct sock *)msk; in mptcp_destroy_common()
3358 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_destroy_common()
3362 skb_rbtree_purge(&msk->out_of_order_queue); in mptcp_destroy_common()
3367 mptcp_token_destroy(msk); in mptcp_destroy_common()
3368 mptcp_pm_destroy(msk); in mptcp_destroy_common()
3373 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_disconnect() local
3379 if (msk->fastopening) in mptcp_disconnect()
3388 mptcp_pm_connection_closed(msk); in mptcp_disconnect()
3394 mptcp_destroy_common(msk); in mptcp_disconnect()
3399 spin_lock_bh(&msk->fallback_lock); in mptcp_disconnect()
3400 msk->allow_subflows = true; in mptcp_disconnect()
3401 msk->allow_infinite_fallback = true; in mptcp_disconnect()
3402 WRITE_ONCE(msk->flags, 0); in mptcp_disconnect()
3403 spin_unlock_bh(&msk->fallback_lock); in mptcp_disconnect()
3405 msk->cb_flags = 0; in mptcp_disconnect()
3406 msk->recovery = false; in mptcp_disconnect()
3407 WRITE_ONCE(msk->can_ack, false); in mptcp_disconnect()
3408 WRITE_ONCE(msk->fully_established, false); in mptcp_disconnect()
3409 WRITE_ONCE(msk->rcv_data_fin, false); in mptcp_disconnect()
3410 WRITE_ONCE(msk->snd_data_fin_enable, false); in mptcp_disconnect()
3411 WRITE_ONCE(msk->rcv_fastclose, false); in mptcp_disconnect()
3412 WRITE_ONCE(msk->use_64bit_ack, false); in mptcp_disconnect()
3413 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in mptcp_disconnect()
3414 mptcp_pm_data_reset(msk); in mptcp_disconnect()
3416 msk->bytes_consumed = 0; in mptcp_disconnect()
3417 msk->bytes_acked = 0; in mptcp_disconnect()
3418 msk->bytes_received = 0; in mptcp_disconnect()
3419 msk->bytes_sent = 0; in mptcp_disconnect()
3420 msk->bytes_retrans = 0; in mptcp_disconnect()
3421 msk->rcvspace_init = 0; in mptcp_disconnect()
3422 msk->fastclosing = 0; in mptcp_disconnect()
3425 WRITE_ONCE(msk->ack_seq, 0); in mptcp_disconnect()
3435 struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk); in mptcp_inet6_sk()
3488 struct mptcp_sock *msk; in mptcp_sk_clone_init() local
3507 msk = mptcp_sk(nsk); in mptcp_sk_clone_init()
3508 WRITE_ONCE(msk->local_key, subflow_req->local_key); in mptcp_sk_clone_init()
3509 WRITE_ONCE(msk->token, subflow_req->token); in mptcp_sk_clone_init()
3510 msk->in_accept_queue = 1; in mptcp_sk_clone_init()
3511 WRITE_ONCE(msk->fully_established, false); in mptcp_sk_clone_init()
3513 WRITE_ONCE(msk->csum_enabled, true); in mptcp_sk_clone_init()
3515 WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1); in mptcp_sk_clone_init()
3516 WRITE_ONCE(msk->snd_nxt, msk->write_seq); in mptcp_sk_clone_init()
3517 WRITE_ONCE(msk->snd_una, msk->write_seq); in mptcp_sk_clone_init()
3518 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); in mptcp_sk_clone_init()
3519 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; in mptcp_sk_clone_init()
3520 mptcp_init_sched(msk, mptcp_sk(sk)->sched); in mptcp_sk_clone_init()
3523 msk->subflow_id = 2; in mptcp_sk_clone_init()
3534 WRITE_ONCE(msk->first, ssk); in mptcp_sk_clone_init()
3536 list_add(&subflow->node, &msk->conn_list); in mptcp_sk_clone_init()
3542 mptcp_token_accept(subflow_req, msk); in mptcp_sk_clone_init()
3550 mptcp_rcv_space_init(msk, ssk); in mptcp_sk_clone_init()
3553 __mptcp_subflow_fully_established(msk, subflow, mp_opt); in mptcp_sk_clone_init()
3560 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
3564 msk->rcvspace_init = 1; in mptcp_rcv_space_init()
3565 msk->rcvq_space.copied = 0; in mptcp_rcv_space_init()
3566 msk->rcvq_space.rtt_us = 0; in mptcp_rcv_space_init()
3568 msk->rcvq_space.time = tp->tcp_mstamp; in mptcp_rcv_space_init()
3571 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, in mptcp_rcv_space_init()
3573 if (msk->rcvq_space.space == 0) in mptcp_rcv_space_init()
3574 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; in mptcp_rcv_space_init()
3579 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_destroy() local
3582 msk->free_first = 1; in mptcp_destroy()
3583 mptcp_destroy_common(msk); in mptcp_destroy()
3611 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_release_cb() local
3614 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); in mptcp_release_cb()
3624 list_splice_init(&msk->join_list, &join_list); in mptcp_release_cb()
3633 msk->cb_flags &= ~flags; in mptcp_release_cb()
3644 mptcp_cleanup_rbuf(msk, 0); in mptcp_release_cb()
3654 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) in mptcp_release_cb()
3656 if (unlikely(msk->cb_flags)) { in mptcp_release_cb()
3661 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) in mptcp_release_cb()
3662 __mptcp_sync_state(sk, msk->pending_state); in mptcp_release_cb()
3663 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) in mptcp_release_cb()
3665 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) in mptcp_release_cb()
3737 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_get_port() local
3739 pr_debug("msk=%p, ssk=%p\n", msk, msk->first); in mptcp_get_port()
3740 if (WARN_ON_ONCE(!msk->first)) in mptcp_get_port()
3743 return inet_csk_get_port(msk->first, snum); in mptcp_get_port()
3749 struct mptcp_sock *msk; in mptcp_finish_connect() local
3754 msk = mptcp_sk(sk); in mptcp_finish_connect()
3764 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3766 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3797 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join() local
3798 struct sock *parent = (void *)msk; in mptcp_finish_join()
3801 pr_debug("msk=%p, subflow=%p\n", msk, subflow); in mptcp_finish_join()
3813 spin_lock_bh(&msk->fallback_lock); in mptcp_finish_join()
3814 if (!msk->allow_subflows) { in mptcp_finish_join()
3815 spin_unlock_bh(&msk->fallback_lock); in mptcp_finish_join()
3818 mptcp_subflow_joined(msk, ssk); in mptcp_finish_join()
3819 spin_unlock_bh(&msk->fallback_lock); in mptcp_finish_join()
3824 if (!mptcp_pm_allow_new_subflow(msk)) { in mptcp_finish_join()
3834 ret = __mptcp_finish_join(msk, ssk); in mptcp_finish_join()
3837 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3842 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3843 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); in mptcp_finish_join()
3869 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) in mptcp_ioctl_outq() argument
3871 const struct sock *sk = (void *)msk; in mptcp_ioctl_outq()
3880 delta = msk->write_seq - v; in mptcp_ioctl_outq()
3881 if (__mptcp_check_fallback(msk) && msk->first) { in mptcp_ioctl_outq()
3882 struct tcp_sock *tp = tcp_sk(msk->first); in mptcp_ioctl_outq()
3888 if (!((1 << msk->first->sk_state) & in mptcp_ioctl_outq()
3900 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_ioctl() local
3910 mptcp_cleanup_rbuf(msk, 0); in mptcp_ioctl()
3916 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); in mptcp_ioctl()
3921 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); in mptcp_ioctl()
3935 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_connect() local
3939 ssk = __mptcp_nmpc_sk(msk); in mptcp_connect()
3950 mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK); in mptcp_connect()
3954 mptcp_early_fallback(msk, subflow, in mptcp_connect()
3957 mptcp_early_fallback(msk, subflow, in mptcp_connect()
3961 WRITE_ONCE(msk->write_seq, subflow->idsn); in mptcp_connect()
3962 WRITE_ONCE(msk->snd_nxt, subflow->idsn); in mptcp_connect()
3963 WRITE_ONCE(msk->snd_una, subflow->idsn); in mptcp_connect()
3964 if (likely(!__mptcp_check_fallback(msk))) in mptcp_connect()
3970 if (!msk->fastopening) in mptcp_connect()
3992 if (!msk->fastopening) in mptcp_connect()
4000 mptcp_token_destroy(msk); in mptcp_connect()
4044 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_bind() local
4049 ssk = __mptcp_nmpc_sk(msk); in mptcp_bind()
4071 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_listen() local
4076 pr_debug("msk=%p\n", msk); in mptcp_listen()
4084 ssk = __mptcp_nmpc_sk(msk); in mptcp_listen()
4112 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_graft_subflows() local
4124 list_splice_init(&msk->join_list, &join_list); in mptcp_graft_subflows()
4130 mptcp_for_each_subflow(msk, subflow) { in mptcp_graft_subflows()
4160 msk->backlog_unaccounted + in mptcp_graft_subflows()
4164 msk->backlog_unaccounted = 0; in mptcp_graft_subflows()
4175 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_stream_accept() local
4178 pr_debug("msk=%p\n", msk); in mptcp_stream_accept()
4183 ssk = READ_ONCE(msk->first); in mptcp_stream_accept()
4216 msk = mptcp_sk(newsk); in mptcp_stream_accept()
4217 msk->in_accept_queue = 0; in mptcp_stream_accept()
4220 mptcp_rps_record_subflows(msk); in mptcp_stream_accept()
4225 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { in mptcp_stream_accept()
4226 if (unlikely(list_is_singular(&msk->conn_list))) in mptcp_stream_accept()
4228 mptcp_close_ssk(newsk, msk->first, in mptcp_stream_accept()
4229 mptcp_subflow_ctx(msk->first)); in mptcp_stream_accept()
4250 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) in mptcp_check_writeable() argument
4252 struct sock *sk = (struct sock *)msk; in mptcp_check_writeable()
4269 struct mptcp_sock *msk; in mptcp_poll() local
4274 msk = mptcp_sk(sk); in mptcp_poll()
4278 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); in mptcp_poll()
4280 struct sock *ssk = READ_ONCE(msk->first); in mptcp_poll()
4299 mask |= mptcp_check_writeable(msk); in mptcp_poll()