/linux/net/netfilter/ |
H A D | nf_conntrack_seqadj.c | 82 struct tcp_sack_block_wire *sack; in nf_ct_sack_block_adjust() local 85 sack = (void *)skb->data + sackoff; in nf_ct_sack_block_adjust() 86 if (after(ntohl(sack->start_seq) - seq->offset_before, in nf_ct_sack_block_adjust() 88 new_start_seq = htonl(ntohl(sack->start_seq) - in nf_ct_sack_block_adjust() 91 new_start_seq = htonl(ntohl(sack->start_seq) - in nf_ct_sack_block_adjust() 94 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust() 96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 103 ntohl(sack->start_seq), ntohl(new_start_seq), in nf_ct_sack_block_adjust() 104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust() [all …]
|
H A D | nf_conntrack_proto_tcp.c | 392 const struct tcphdr *tcph, __u32 *sack) in tcp_sack() argument 444 if (after(tmp, *sack)) in tcp_sack() 445 *sack = tmp; in tcp_sack() 518 __u32 seq, ack, sack, end, win, swin; in tcp_in_window() local 527 ack = sack = ntohl(tcph->ack_seq); in tcp_in_window() 533 tcp_sack(skb, dataoff, tcph, &sack); in tcp_in_window() 538 sack -= receiver_offset; in tcp_in_window() 567 receiver->td_end = receiver->td_maxend = sack; in tcp_in_window() 568 } else if (sack == receiver->td_end + 1) { in tcp_in_window() 600 ack = sack = receiver->td_end; in tcp_in_window() [all …]
|
/linux/net/sctp/ |
H A D | outqueue.c | 42 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 47 struct sctp_sackhdr *sack, 1226 struct sctp_sackhdr *sack) in sctp_sack_update_unack_data() argument 1234 frags = (union sctp_sack_variable *)(sack + 1); in sctp_sack_update_unack_data() 1235 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { in sctp_sack_update_unack_data() 1251 struct sctp_sackhdr *sack = chunk->subh.sack_hdr; in sctp_outq_sack() local 1273 sack_ctsn = ntohl(sack->cum_tsn_ack); in sctp_outq_sack() 1274 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); in sctp_outq_sack() 1317 (union sctp_sack_variable *)(sack + 1); in sctp_outq_sack() 1330 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); in sctp_outq_sack() [all …]
|
H A D | output.c | 297 struct sctp_chunk *sack; in sctp_packet_bundle_sack() local 304 sack = sctp_make_sack(asoc); in sctp_packet_bundle_sack() 305 if (sack) { in sctp_packet_bundle_sack() 306 retval = __sctp_packet_append_chunk(pkt, sack); in sctp_packet_bundle_sack() 308 sctp_chunk_free(sack); in sctp_packet_bundle_sack()
|
H A D | sm_sideeffect.c | 143 struct sctp_chunk *sack; in sctp_gen_sack() local 206 sack = sctp_make_sack(asoc); in sctp_gen_sack() 207 if (!sack) { in sctp_gen_sack() 215 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); in sctp_gen_sack()
|
H A D | associola.c | 1473 struct sctp_chunk *sack; in sctp_assoc_rwnd_increase() local 1514 sack = sctp_make_sack(asoc); in sctp_assoc_rwnd_increase() 1515 if (!sack) in sctp_assoc_rwnd_increase() 1520 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); in sctp_assoc_rwnd_increase()
|
H A D | sm_make_chunk.c | 761 struct sctp_sackhdr sack; in sctp_make_sack() local 775 sack.cum_tsn_ack = htonl(ctsn); in sctp_make_sack() 776 sack.a_rwnd = htonl(asoc->a_rwnd); in sctp_make_sack() 777 sack.num_gap_ack_blocks = htons(num_gabs); in sctp_make_sack() 778 sack.num_dup_tsns = htons(num_dup_tsns); in sctp_make_sack() 780 len = sizeof(sack) in sctp_make_sack() 822 sctp_addto_chunk(retval, sizeof(sack), &sack); in sctp_make_sack()
|
H A D | sm_statefuns.c | 6304 struct sctp_sackhdr *sack; in sctp_sm_pull_sack() local 6312 sack = (struct sctp_sackhdr *) chunk->skb->data; in sctp_sm_pull_sack() 6314 num_blocks = ntohs(sack->num_gap_ack_blocks); in sctp_sm_pull_sack() 6315 num_dup_tsns = ntohs(sack->num_dup_tsns); in sctp_sm_pull_sack() 6323 return sack; in sctp_sm_pull_sack()
|
/linux/net/rxrpc/ |
H A D | input.c | 475 unsigned int sack = call->ackr_sack_base; in rxrpc_input_data_one() local 526 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_none); in rxrpc_input_data_one() 529 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_advance); in rxrpc_input_data_one() 530 sack = (sack + 1) % RXRPC_SACK_SIZE; in rxrpc_input_data_one() 548 call->ackr_sack_table[sack] = 0; in rxrpc_input_data_one() 549 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_fill); in rxrpc_input_data_one() 550 sack = (sack + 1) % RXRPC_SACK_SIZE; in rxrpc_input_data_one() 557 call->ackr_sack_base = sack; in rxrpc_input_data_one() 564 sack = (sack + slot) % RXRPC_SACK_SIZE; in rxrpc_input_data_one() 566 if (call->ackr_sack_table[sack % RXRPC_SACK_SIZE]) { in rxrpc_input_data_one() [all …]
|
H A D | output.c | 159 unsigned int qsize, sack, wrap, to, max_mtu, if_mtu; in rxrpc_fill_out_ack() local 170 sack = call->ackr_sack_base % RXRPC_SACK_SIZE; in rxrpc_fill_out_ack() 203 wrap = RXRPC_SACK_SIZE - sack; in rxrpc_fill_out_ack() 206 if (sack + ack->nAcks <= RXRPC_SACK_SIZE) { in rxrpc_fill_out_ack() 207 memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks); in rxrpc_fill_out_ack() 209 memcpy(sackp, call->ackr_sack_table + sack, wrap); in rxrpc_fill_out_ack()
|
/linux/drivers/net/ethernet/chelsio/cxgb/ |
H A D | cpl5_cmd.h | 175 u8 sack:1; member 179 u8 sack:1; member
|
/linux/tools/testing/selftests/net/netfilter/ |
H A D | nft_synproxy.sh | 74 … meta iif veth0 meta l4proto tcp ct state untracked,invalid synproxy mss 1460 sack-perm timestamp
|
/linux/include/trace/events/ |
H A D | rxrpc.h | 2292 unsigned int sack, enum rxrpc_sack_trace what), 2294 TP_ARGS(call, seq, sack, what), 2299 __field(unsigned int, sack) 2306 __entry->sack = sack; 2314 __entry->sack)
|
/linux/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | t3_cpl.h | 215 __u8 sack:1; member 219 __u8 sack:1; member
|
/linux/net/ipv4/ |
H A D | tcp_input.c | 3348 struct tcp_sacktag_state *sack, bool ece_ack) in tcp_clean_rtx_queue() argument 3415 tcp_rate_skb_delivered(sk, skb, sack->rate); in tcp_clean_rtx_queue() 3463 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue() 3472 if (sack->first_sackt) { in tcp_clean_rtx_queue() 3473 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue() 3474 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue() 3477 ca_rtt_us, sack->rate); in tcp_clean_rtx_queue() 3519 .rtt_us = sack->rate->rtt_us }; in tcp_clean_rtx_queue() 3522 (tp->delivered - sack->rate->prior_delivered); in tcp_clean_rtx_queue()
|
/linux/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | t4_msg.h | 385 __u8 sack:1; member 389 __u8 sack:1; member
|
/linux/Documentation/networking/ |
H A D | snmp_counter.rst | 587 When the congestion control comes into Recovery state, if sack is 588 used, TcpExtTCPSackRecovery increases 1, if sack is not used, 1218 …ts sack cubic wscale:7,7 rto:204 rtt:0.98/0.49 mss:1448 pmtu:1500 rcvmss:536 advmss:1448 cwnd:10 b…
|
/linux/drivers/target/iscsi/cxgbit/ |
H A D | cxgbit_cm.c | 1185 if (req->tcpopt.sack) in cxgbit_pass_accept_rpl()
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | cm.c | 2460 if (enable_tcp_sack && req->tcpopt.sack) in accept_cr() 3992 req->tcpopt.sack = 1; in build_cpl_pass_accept_req()
|
/linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
H A D | chtls_cm.c | 1052 if (req->tcpopt.sack) in chtls_pass_accept_rpl()
|