Home
last modified time | relevance | path

Searched refs:end_seq (Results 1 – 25 of 25) sorted by relevance

/linux/net/ipv4/
H A Dtcp_input.c349 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) in tcp_ecn_accept_cwr()
722 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts()
1031 u32 end_seq, struct tcp_sacktag_state *state) in tcp_dsack_seen() argument
1035 if (!before(start_seq, end_seq)) in tcp_dsack_seen()
1038 seq_len = end_seq - start_seq; in tcp_dsack_seen()
1044 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen()
1254 u32 start_seq, u32 end_seq) in tcp_is_sackblock_valid() argument
1257 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1274 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1281 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
[all …]
H A Dtcp_recovery.c78 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss()
118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument
140 end_seq, tp->rack.end_seq)) { in tcp_rack_advance()
142 tp->rack.end_seq = end_seq; in tcp_rack_advance()
H A Dtcp_minisocks.c27 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) in tcp_in_window() argument
31 if (after(end_seq, s_win) && before(seq, e_win)) in tcp_in_window()
33 return seq == e_win && seq == end_seq; in tcp_in_window()
127 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process()
141 !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) || in tcp_timewait_state_process()
142 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process()
151 TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1) in tcp_timewait_state_process()
156 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process()
189 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process()
792 TCP_SKB_CB(skb)->end_seq, in tcp_check_req()
[all …]
H A Dtcp_output.c73 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
414 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
727 *ptr++ = htonl(sp[this_sack].end_seq); in tcp_options_write()
1448 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1498 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1646 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1647 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1673 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1981 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
2142 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test() local
[all …]
H A Dtcp_illinois.c49 u32 end_seq; /* right edge of current RTT */ member
62 ca->end_seq = tp->snd_nxt; in rtt_reset()
265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
H A Dtcp_rate.c93 scb->end_seq, rs->last_end_seq)) { in tcp_rate_skb_delivered()
99 rs->last_end_seq = scb->end_seq; in tcp_rate_skb_delivered()
H A Dtcp_cubic.c102 u32 end_seq; /* end_seq of the round */ member
124 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset()
392 if (after(tp->snd_una, ca->end_seq)) in hystart_update()
H A Dtcp_fastopen.c174 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb()
197 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb()
353 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen()
H A Dtcp.c680 tcb->seq = tcb->end_seq = tp->write_seq; in tcp_skb_entail()
954 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in tcp_remove_empty_skb()
1288 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked()
1513 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf()
1515 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf()
3068 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; in __tcp_close()
3365 tp->duplicate_sack[0].end_seq = 0; in tcp_disconnect()
H A Dtcp_ipv4.c2050 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
2074 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
2162 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
/linux/net/mptcp/
H A Dfastopen.c47 MPTCP_SKB_CB(skb)->end_seq = 0; in mptcp_fastopen_subflow_synack_set_params()
70 WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); in __mptcp_fastopen_gen_msk_ackseq()
73 MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq); in __mptcp_fastopen_gen_msk_ackseq()
75 MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq; in __mptcp_fastopen_gen_msk_ackseq()
H A Dprotocol.c144 to->len, MPTCP_SKB_CB(from)->end_seq); in mptcp_try_coalesce()
145 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; in mptcp_try_coalesce()
161 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) in mptcp_ooo_try_coalesce()
213 u64 seq, end_seq, max_seq; in mptcp_data_queue_ofo() local
217 end_seq = MPTCP_SKB_CB(skb)->end_seq; in mptcp_data_queue_ofo()
222 if (after64(end_seq, max_seq)) { in mptcp_data_queue_ofo()
226 (unsigned long long)end_seq - (unsigned long)max_seq, in mptcp_data_queue_ofo()
251 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo()
267 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo()
268 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo()
[all …]
H A Doptions.c412 subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; in mptcp_syn_options()
939 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && in check_fully_established()
1209 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { in mptcp_incoming_options()
H A Dsubflow.c1125 TCP_SKB_CB(skb)->end_seq, in get_mapping_status()
1266 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) in mptcp_subflow_discard_data()
H A Dprotocol.h130 u64 end_seq; member
/linux/net/netfilter/
H A Dnf_conntrack_seqadj.c94 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust()
96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust()
99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust()
104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust()
109 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust()
111 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust()
/linux/net/tls/
H A Dtls_device.c175 if (info && !before(acked_seq, info->end_seq)) in tls_icsk_clean_acked()
179 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked()
288 record->end_seq = tp->write_seq + record->len; in tls_push_record()
618 before(seq, info->end_seq - info->len)) { in tls_get_record()
641 last->end_seq)) in tls_get_record()
650 if (before(seq, info->end_seq)) { in tls_get_record()
652 after(info->end_seq, in tls_get_record()
653 context->retransmit_hint->end_seq)) { in tls_get_record()
1124 start_marker_record->end_seq = tcp_sk(sk)->write_seq; in tls_set_device_offload()
/linux/include/linux/
H A Dtcp.h99 __be32 end_seq; member
104 u32 end_seq; member
361 u32 end_seq; /* Ending TCP sequence of the skb */ member
/linux/tools/testing/selftests/bpf/progs/
H A Dbpf_cubic.c97 __u32 end_seq; /* end_seq of the round */ member
175 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset()
395 if (hystart && after(ack, ca->end_seq)) in BPF_PROG()
/linux/include/net/
H A Dtls.h143 u32 end_seq; member
340 return rec->end_seq - rec->len; in tls_record_start_seq()
H A Dtcp.h946 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ member
2400 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
/linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/
H A Dchcr_ktls.c1740 tx_info->prev_seq = record->end_seq; in chcr_end_part_handler()
1995 tls_end_offset = record->end_seq - tcp_seq; in chcr_ktls_xmit()
1998 tcp_seq, record->end_seq, tx_info->prev_seq, data_len); in chcr_ktls_xmit()
2005 tx_max = record->end_seq - in chcr_ktls_xmit()
2050 tcp_seq = record->end_seq; in chcr_ktls_xmit()
/linux/drivers/infiniband/hw/irdma/
H A Dpuda.c1157 u32 marker_seq, end_seq, blk_start; in irdma_ieq_get_fpdu_len() local
1181 end_seq = rcv_seq + total_len; in irdma_ieq_get_fpdu_len()
1182 while ((int)(marker_seq - end_seq) < 0) { in irdma_ieq_get_fpdu_len()
1184 end_seq += marker_len; in irdma_ieq_get_fpdu_len()
/linux/net/ipv6/
H A Dtcp_ipv6.c1697 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1734 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
/linux/net/sched/
H A Dsch_cake.c1011 u32 end_a = get_unaligned_be32(&sack_a->end_seq); in cake_tcph_sack_compare()
1023 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); in cake_tcph_sack_compare()