/linux/net/ipv4/ |
H A D | tcp_input.c | 266 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH) in tcp_measure_rcv_mss() 349 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) in tcp_ecn_accept_cwr() 363 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { in __tcp_ecn_check_ce() 722 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts() 723 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) { in tcp_rcv_rtt_measure_ts() 1113 before(TCP_SKB_CB(skb)->seq, in tcp_verify_retransmit_hint() 1114 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) in tcp_verify_retransmit_hint() 1128 __u8 sacked = TCP_SKB_CB(skb)->sacked; in tcp_mark_skb_lost() 1138 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; in tcp_mark_skb_lost() 1146 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_mark_skb_lost() [all …]
|
H A D | tcp_output.c | 73 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent() 324 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack() 326 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack() 350 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn() 363 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn() 384 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send() 407 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb() 411 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb() 414 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb() 1326 tcb = TCP_SKB_CB(skb); in __tcp_transmit_skb() [all …]
|
H A D | tcp_minisocks.c | 127 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 136 if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt)) in tcp_timewait_state_process() 141 !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) || in tcp_timewait_state_process() 142 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process() 151 TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1) in tcp_timewait_state_process() 156 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 188 (TCP_SKB_CB(skb)->seq == rcv_nxt && in tcp_timewait_state_process() 189 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process() 235 (after(TCP_SKB_CB(skb)->seq, rcv_nxt) || in tcp_timewait_state_process() 684 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && in tcp_check_req() [all …]
|
H A D | tcp_rate.c | 65 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; in tcp_rate_skb_sent() 66 TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; in tcp_rate_skb_sent() 67 TCP_SKB_CB(skb)->tx.delivered = tp->delivered; in tcp_rate_skb_sent() 68 TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce; in tcp_rate_skb_sent() 69 TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; in tcp_rate_skb_sent() 84 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); in tcp_rate_skb_delivered()
|
H A D | tcp_fastopen.c | 174 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb() 194 TCP_SKB_CB(skb)->seq++; in tcp_fastopen_add_skb() 195 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; in tcp_fastopen_add_skb() 197 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb() 206 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_fastopen_add_skb() 285 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_fastopen_create_child() 353 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen() 567 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { in tcp_fastopen_active_disable_ofo_check()
|
H A D | tcp_ipv4.c | 911 skb, &TCP_SKB_CB(skb)->header.h4.opt, in tcp_v4_send_reset() 1025 skb, &TCP_SKB_CB(skb)->header.h4.opt, in tcp_v4_send_ack() 2051 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || 2052 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield || 2053 ((TCP_SKB_CB(tail)->tcp_flags | 2054 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) || 2055 !((TCP_SKB_CB(tail)->tcp_flags & 2056 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || 2057 ((TCP_SKB_CB(tail)->tcp_flags ^ 2058 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || [all …]
|
H A D | tcp.c | 487 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_tx_timestamp() 493 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp() 667 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_mark_push() 679 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_skb_entail() 955 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in tcp_remove_empty_skb() 1186 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; in tcp_sendmsg_locked() 1286 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; in tcp_sendmsg_locked() 1289 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked() 1295 TCP_SKB_CB(skb)->eor = 1; in tcp_sendmsg_locked() 1514 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf() [all …]
|
H A D | tcp_recovery.c | 68 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); in tcp_rack_detect_loss() 227 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_newreno_mark_lost()
|
H A D | syncookies.c | 286 treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; in cookie_tcp_reqsk_init() 400 struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; in cookie_v4_check()
|
H A D | tcp_bpf.c | 214 if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in is_next_msg_fin()
|
H A D | tcp_ao.c | 813 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_ao_transmit_skb()
|
/linux/net/mptcp/ |
H A D | syncookies.c | 48 seq = TCP_SKB_CB(skb)->seq; in mptcp_join_entry_hash() 50 seq = TCP_SKB_CB(skb)->seq - 1; in mptcp_join_entry_hash()
|
H A D | options.c | 37 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in mptcp_parse_option() 44 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) { in mptcp_parse_option() 339 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) in mptcp_parse_option() 412 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; in mptcp_syn_options() 465 subflow->snd_isn != TCP_SKB_CB(skb)->seq || in mptcp_established_options_mp() 840 if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) { in mptcp_established_options() 938 if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 && in check_fully_established() 939 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && in check_fully_established() 951 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) { in check_fully_established() 1209 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in mptcp_incoming_options()
|
H A D | subflow.c | 191 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; in subflow_check_req() 220 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; in subflow_check_req() 292 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; in mptcp_subflow_init_cookie_req() 298 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; in mptcp_subflow_init_cookie_req() 540 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; in subflow_finish_connect() 989 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; in skb_is_fully_mapped() 1043 offset = seq - TCP_SKB_CB(skb)->seq; in validate_data_csum() 1123 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in get_mapping_status() 1125 TCP_SKB_CB(skb)->seq, in get_mapping_status() 1126 TCP_SKB_CB(skb)->end_seq, in get_mapping_status() [all …]
|
H A D | fastopen.c | 49 MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; in mptcp_fastopen_subflow_synack_set_params()
|
H A D | protocol.c | 358 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; in __mptcp_move_skb() 690 offset = seq - TCP_SKB_CB(skb)->seq; in __mptcp_move_skbs_from_subflow() 691 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; in __mptcp_move_skbs_from_subflow() 1287 TCP_SKB_CB(skb)->eor = 1; in mptcp_sendmsg_frag() 1345 TCP_SKB_CB(skb)->end_seq += copy; in mptcp_sendmsg_frag() 1350 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; in mptcp_sendmsg_frag()
|
/linux/include/net/ |
H A D | tcp.h | 989 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) macro 999 return TCP_SKB_CB(skb)->header.h6.iif; in tcp_v6_iif() 1004 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); in tcp_v6_iif_l3_slave() 1006 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; in tcp_v6_iif_l3_slave() 1013 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) in tcp_v6_sdif() 1014 return TCP_SKB_CB(skb)->header.h6.iif; in tcp_v6_sdif() 1031 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) in tcp_v4_sdif() 1032 return TCP_SKB_CB(skb)->header.h4.iif; in tcp_v4_sdif() 1042 return TCP_SKB_CB(skb)->tcp_gso_segs; in tcp_skb_pcount() 1047 TCP_SKB_CB(skb)->tcp_gso_segs = segs; in tcp_skb_pcount_set() [all …]
|
/linux/net/ipv6/ |
H A D | tcp_ipv6.c | 795 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); in tcp_v6_init_req() 808 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || in tcp_v6_init_req() 1342 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, 1698 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1709 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { 1730 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), 1734 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1735 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1737 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1738 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); [all …]
|
H A D | syncookies.c | 208 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || in cookie_v6_check()
|
/linux/net/tls/ |
H A D | tls_strp.c | 435 seq = TCP_SKB_CB(first)->seq; in tls_strp_check_queue_ok() 445 if (TCP_SKB_CB(skb)->seq != seq) in tls_strp_check_queue_ok()
|
H A D | tls_sw.c | 2409 TCP_SKB_CB(skb)->seq + strp->stm.offset); in tls_rx_msg_size()
|