| /linux/net/ipv4/ |
| H A D | tcp_bic.c | 83 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) in bictcp_update() argument 85 if (ca->last_cwnd == cwnd && in bictcp_update() 89 ca->last_cwnd = cwnd; in bictcp_update() 96 if (cwnd <= low_window) { in bictcp_update() 97 ca->cnt = cwnd; in bictcp_update() 102 if (cwnd < ca->last_max_cwnd) { in bictcp_update() 103 __u32 dist = (ca->last_max_cwnd - cwnd) in bictcp_update() 108 ca->cnt = cwnd / max_increment; in bictcp_update() 111 ca->cnt = (cwnd * smooth_part) / BICTCP_B; in bictcp_update() 114 ca->cnt = cwnd / dist; in bictcp_update() [all …]
|
| H A D | tcp_highspeed.c | 17 unsigned int cwnd; member 130 if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid() 131 while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid() 134 } else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid() 135 while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid()
|
| H A D | tcp_bbr.c | 395 static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd) in bbr_quantization_budget() argument 400 cwnd += 3 * bbr_tso_segs_goal(sk); in bbr_quantization_budget() 403 cwnd = (cwnd + 1) & ~1U; in bbr_quantization_budget() 407 cwnd += 2; in bbr_quantization_budget() 409 return cwnd; in bbr_quantization_budget() 486 u32 cwnd = tcp_snd_cwnd(tp); in bbr_set_cwnd_to_recover_or_restore() local 493 cwnd = max_t(s32, cwnd - rs->losses, 1); in bbr_set_cwnd_to_recover_or_restore() 500 cwnd = tcp_packets_in_flight(tp) + acked; in bbr_set_cwnd_to_recover_or_restore() 503 cwnd = max(cwnd, bbr->prior_cwnd); in bbr_set_cwnd_to_recover_or_restore() 509 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); in bbr_set_cwnd_to_recover_or_restore() [all …]
|
| /linux/net/sctp/ |
| H A D | transport.c | 557 __u32 cwnd, ssthresh, flight_size, pba, pmtu; in sctp_transport_raise_cwnd() local 559 cwnd = transport->cwnd; in sctp_transport_raise_cwnd() 571 if (cwnd <= ssthresh) { in sctp_transport_raise_cwnd() 594 if (flight_size < cwnd) in sctp_transport_raise_cwnd() 598 cwnd += pmtu; in sctp_transport_raise_cwnd() 600 cwnd += bytes_acked; in sctp_transport_raise_cwnd() 604 __func__, transport, bytes_acked, cwnd, ssthresh, in sctp_transport_raise_cwnd() 630 if (pba > cwnd && flight_size < cwnd) in sctp_transport_raise_cwnd() 631 pba = cwnd; in sctp_transport_raise_cwnd() 632 if (pba >= cwnd && flight_size >= cwnd) { in sctp_transport_raise_cwnd() [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_cubic.c | 56 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 57 * so K = cubic_root( (wmax-cwnd)*rtt/c ) 64 * cwnd < 1 million packets 75 __u32 cnt; /* increase cwnd by 1 after ACKs */ 85 __u32 tcp_cwnd; /* estimated tcp cwnd */ 198 * Shift epoch_start to keep cwnd growth to cubic curve. in BPF_PROG() 265 static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked) in cubic_root() 272 if (ca->last_cwnd == cwnd && in bictcp_update() argument 277 * On all cwnd reduction events, ca->epoch_start is set to 0, in bictcp_update() 283 ca->last_cwnd = cwnd; in bictcp_update() [all...] |
| /linux/tools/testing/selftests/net/packetdrill/ |
| H A D | tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt | 3 // less than the current cwnd, and not big enough to bump up cwnd. 27 +0 %{ assert tcpi_snd_cwnd == 10, 'cwnd=%d' % tcpi_snd_cwnd }% 30 +0 %{ assert tcpi_snd_cwnd == 10, 'cwnd=%d' % tcpi_snd_cwnd }% 33 +0 %{ assert tcpi_snd_cwnd == 10, 'cwnd=%d' % tcpi_snd_cwnd }%
|
| H A D | tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt | 3 // less than the current cwnd, but still big enough that in slow 4 // start we want to increase our cwnd a little. 28 +0 %{ assert tcpi_snd_cwnd == 12, 'cwnd=%d' % tcpi_snd_cwnd }% 31 +0 %{ assert tcpi_snd_cwnd == 12, 'cwnd=%d' % tcpi_snd_cwnd }% 34 +0 %{ assert tcpi_snd_cwnd == 12, 'cwnd=%d' % tcpi_snd_cwnd }%
|
| H A D | tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt | 3 // the cwnd continues to grow, even if TSQ triggers. 41 // (FQ commit allows an application/cwnd limited flow to get at most quantum/2 extra credit) 63 +0 %{ assert tcpi_snd_cwnd == 20, 'cwnd=%d' % tcpi_snd_cwnd }%
|
| H A D | tcp_slow_start_slow-start-app-limited-9-packets-out.pkt | 3 // with IW10, if we don't fully use our cwnd but instead 4 // send just 9 packets, then cwnd should grow to twice that
|
| H A D | tcp_slow_start_slow-start-after-idle.pkt | 3 // This test expects tso size to be at least initial cwnd * mss 34 // If slow start after idle works properly, we should send 5 MSS here (cwnd/2)
|
| H A D | tcp_slow_start_slow-start-app-limited.pkt | 3 // with IW10, if we send exactly 10 packets then cwnd should grow to 20.
|
| H A D | tcp_slow_start_slow-start-ack-per-4pkt.pkt | 3 // the cwnd continues to grow.
|
| H A D | tcp_slow_start_slow-start-ack-per-2pkt.pkt | 3 // the cwnd continues to grow.
|
| H A D | tcp_slow_start_slow-start-after-win-update.pkt | 3 // This test expects tso size to be at least initial cwnd * mss
|
| H A D | tcp_slow_start_slow-start-ack-per-1pkt.pkt | 3 // the cwnd continues to grow.
|
| /linux/samples/bpf/ |
| H A D | hbm_kern.h | 74 int cwnd; member 94 pkti->cwnd = tp->snd_cwnd; in get_tcp_info() 102 pkti->cwnd = 0; in get_tcp_info() 114 pkti->cwnd = 0; in hbm_get_pkt_info() 187 if (pkti->cwnd) { in hbm_update_stats() 189 pkti->cwnd); in hbm_update_stats()
|
| /linux/net/batman-adv/ |
| H A D | tp_meter.c | 148 if (tp_vars->cwnd <= tp_vars->ss_threshold) { in batadv_tp_update_cwnd() 150 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 157 ((mss * mss) << 6) / (tp_vars->cwnd << 3)); in batadv_tp_update_cwnd() 163 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 417 tp_vars->cwnd, tp_vars->ss_threshold); in batadv_tp_sender_end() 508 tp_vars->ss_threshold = tp_vars->cwnd >> 1; in batadv_tp_sender_timeout() 514 tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold, in batadv_tp_sender_timeout() 517 tp_vars->cwnd = BATADV_TP_PLEN * 3; in batadv_tp_sender_timeout() 636 u32 rtt, recv_ack, cwnd; in batadv_tp_recv_ack() local 699 tp_vars->ss_threshold = tp_vars->cwnd >> 1; in batadv_tp_recv_ack() [all …]
|
| /linux/include/trace/events/ |
| H A D | sctp.h | 23 __field(__u32, cwnd) 35 __entry->cwnd = sp->cwnd; 45 __entry->ipaddr, __entry->state, __entry->cwnd,
|
| /linux/include/linux/qed/ |
| H A D | tcp_common.h | 92 __le32 cwnd; member 158 __le32 cwnd; member 238 __le32 cwnd; member
|
| /linux/net/dccp/ccids/ |
| H A D | ccid2.c | |
| /linux/net/sunrpc/ |
| H A D | xprt.c | 565 unsigned long cwnd = xprt->cwnd; in xprt_adjust_cwnd() local 567 if (result >= 0 && cwnd <= xprt->cong) { in xprt_adjust_cwnd() 570 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; in xprt_adjust_cwnd() 571 if (cwnd > RPC_MAXCWND(xprt)) in xprt_adjust_cwnd() 572 cwnd = RPC_MAXCWND(xprt); in xprt_adjust_cwnd() 575 cwnd >>= 1; in xprt_adjust_cwnd() 576 if (cwnd < RPC_CWNDSCALE) in xprt_adjust_cwnd() 577 cwnd = RPC_CWNDSCALE; in xprt_adjust_cwnd() 580 xprt->cong, xprt->cwnd, cwnd); in xprt_adjust_cwnd() 581 xprt->cwnd = cwnd; in xprt_adjust_cwnd() [all …]
|
| /linux/net/sunrpc/xprtrdma/ |
| H A D | svc_rdma_backchannel.c | 50 xprt->cwnd = credits << RPC_CWNDSHIFT; in svc_rdma_handle_bc_reply() 200 xprt->cwnd = RPC_CWNDSHIFT; in xprt_rdma_bc_close()
|
| /linux/drivers/net/ethernet/qlogic/qed/ |
| H A D | qed_nvmetcp.h | 73 u32 cwnd; member
|
| H A D | qed_iscsi.c | 80 u32 cwnd; member 364 p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); in qed_sp_iscsi_conn_offload() 439 p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd); in qed_sp_iscsi_conn_offload() 1245 con->cwnd = conn_info->cwnd; in qed_iscsi_offload_conn()
|
| H A D | qed_nvmetcp.c | 347 p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); in qed_sp_nvmetcp_conn_offload() 715 con->cwnd = conn_info->cwnd; in qed_nvmetcp_offload_conn()
|