167fef78bSLawrence Stewart /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 467fef78bSLawrence Stewart * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org> 567fef78bSLawrence Stewart * Copyright (c) 2010 The FreeBSD Foundation 667fef78bSLawrence Stewart * All rights reserved. 767fef78bSLawrence Stewart * 867fef78bSLawrence Stewart * This software was developed by Lawrence Stewart while studying at the Centre 9891b8ed4SLawrence Stewart * for Advanced Internet Architectures, Swinburne University of Technology, made 10891b8ed4SLawrence Stewart * possible in part by a grant from the Cisco University Research Program Fund 11891b8ed4SLawrence Stewart * at Community Foundation Silicon Valley. 1267fef78bSLawrence Stewart * 1367fef78bSLawrence Stewart * Portions of this software were developed at the Centre for Advanced 1467fef78bSLawrence Stewart * Internet Architectures, Swinburne University of Technology, Melbourne, 1567fef78bSLawrence Stewart * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 1667fef78bSLawrence Stewart * 1767fef78bSLawrence Stewart * Redistribution and use in source and binary forms, with or without 1867fef78bSLawrence Stewart * modification, are permitted provided that the following conditions 1967fef78bSLawrence Stewart * are met: 2067fef78bSLawrence Stewart * 1. Redistributions of source code must retain the above copyright 2167fef78bSLawrence Stewart * notice, this list of conditions and the following disclaimer. 2267fef78bSLawrence Stewart * 2. Redistributions in binary form must reproduce the above copyright 2367fef78bSLawrence Stewart * notice, this list of conditions and the following disclaimer in the 2467fef78bSLawrence Stewart * documentation and/or other materials provided with the distribution. 2567fef78bSLawrence Stewart * 2667fef78bSLawrence Stewart * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 2767fef78bSLawrence Stewart * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2867fef78bSLawrence Stewart * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2967fef78bSLawrence Stewart * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 3067fef78bSLawrence Stewart * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 3167fef78bSLawrence Stewart * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3267fef78bSLawrence Stewart * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3367fef78bSLawrence Stewart * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3467fef78bSLawrence Stewart * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3567fef78bSLawrence Stewart * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3667fef78bSLawrence Stewart * SUCH DAMAGE. 3767fef78bSLawrence Stewart */ 3867fef78bSLawrence Stewart 3967fef78bSLawrence Stewart /* 4067fef78bSLawrence Stewart * An implementation of the CUBIC congestion control algorithm for FreeBSD, 4167fef78bSLawrence Stewart * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha. 4267fef78bSLawrence Stewart * Originally released as part of the NewTCP research project at Swinburne 43891b8ed4SLawrence Stewart * University of Technology's Centre for Advanced Internet Architectures, 44891b8ed4SLawrence Stewart * Melbourne, Australia, which was made possible in part by a grant from the 45891b8ed4SLawrence Stewart * Cisco University Research Program Fund at Community Foundation Silicon 46891b8ed4SLawrence Stewart * Valley. More details are available at: 4767fef78bSLawrence Stewart * http://caia.swin.edu.au/urp/newtcp/ 4867fef78bSLawrence Stewart */ 4967fef78bSLawrence Stewart 5067fef78bSLawrence Stewart #include <sys/cdefs.h> 5167fef78bSLawrence Stewart __FBSDID("$FreeBSD$"); 5267fef78bSLawrence Stewart 5367fef78bSLawrence Stewart #include <sys/param.h> 5467fef78bSLawrence Stewart #include <sys/kernel.h> 5567fef78bSLawrence Stewart #include <sys/malloc.h> 5667fef78bSLawrence Stewart #include <sys/module.h> 5767fef78bSLawrence Stewart #include <sys/socket.h> 5867fef78bSLawrence Stewart #include <sys/socketvar.h> 5967fef78bSLawrence Stewart #include <sys/sysctl.h> 6067fef78bSLawrence Stewart #include <sys/systm.h> 6167fef78bSLawrence Stewart 6267fef78bSLawrence Stewart #include <net/vnet.h> 6367fef78bSLawrence Stewart 642de3e790SGleb Smirnoff #include <netinet/tcp.h> 6567fef78bSLawrence Stewart #include <netinet/tcp_seq.h> 6667fef78bSLawrence Stewart #include <netinet/tcp_timer.h> 6767fef78bSLawrence Stewart #include <netinet/tcp_var.h> 684644fda3SGleb Smirnoff #include <netinet/cc/cc.h> 6967fef78bSLawrence Stewart #include <netinet/cc/cc_cubic.h> 7067fef78bSLawrence Stewart #include <netinet/cc/cc_module.h> 7167fef78bSLawrence Stewart 7267fef78bSLawrence Stewart static void cubic_ack_received(struct cc_var *ccv, uint16_t type); 7367fef78bSLawrence Stewart static void cubic_cb_destroy(struct cc_var *ccv); 7467fef78bSLawrence Stewart static int cubic_cb_init(struct cc_var *ccv); 7567fef78bSLawrence Stewart static void cubic_cong_signal(struct cc_var *ccv, uint32_t type); 7667fef78bSLawrence Stewart static void cubic_conn_init(struct cc_var *ccv); 7767fef78bSLawrence Stewart static int cubic_mod_init(void); 7867fef78bSLawrence Stewart static void cubic_post_recovery(struct cc_var *ccv); 7967fef78bSLawrence Stewart static void cubic_record_rtt(struct cc_var *ccv); 8067fef78bSLawrence Stewart static void cubic_ssthresh_update(struct cc_var *ccv); 8167fef78bSLawrence Stewart 8267fef78bSLawrence Stewart struct cubic { 8367fef78bSLawrence Stewart /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */ 8467fef78bSLawrence Stewart int64_t K; 8567fef78bSLawrence Stewart /* Sum of RTT samples across an epoch in ticks. */ 8667fef78bSLawrence Stewart int64_t sum_rtt_ticks; 8767fef78bSLawrence Stewart /* cwnd at the most recent congestion event. */ 8867fef78bSLawrence Stewart unsigned long max_cwnd; 8967fef78bSLawrence Stewart /* cwnd at the previous congestion event. */ 9067fef78bSLawrence Stewart unsigned long prev_max_cwnd; 9167fef78bSLawrence Stewart /* Number of congestion events. */ 9267fef78bSLawrence Stewart uint32_t num_cong_events; 9367fef78bSLawrence Stewart /* Minimum observed rtt in ticks. */ 9467fef78bSLawrence Stewart int min_rtt_ticks; 9567fef78bSLawrence Stewart /* Mean observed rtt between congestion epochs. */ 9667fef78bSLawrence Stewart int mean_rtt_ticks; 9767fef78bSLawrence Stewart /* ACKs since last congestion event. */ 9867fef78bSLawrence Stewart int epoch_ack_count; 9967fef78bSLawrence Stewart /* Time of last congestion event in ticks. */ 10067fef78bSLawrence Stewart int t_last_cong; 10167fef78bSLawrence Stewart }; 10267fef78bSLawrence Stewart 1036bed196cSSergey Kandaurov static MALLOC_DEFINE(M_CUBIC, "cubic data", 10467fef78bSLawrence Stewart "Per connection data required for the CUBIC congestion control algorithm"); 10567fef78bSLawrence Stewart 10667fef78bSLawrence Stewart struct cc_algo cubic_cc_algo = { 10767fef78bSLawrence Stewart .name = "cubic", 10867fef78bSLawrence Stewart .ack_received = cubic_ack_received, 10967fef78bSLawrence Stewart .cb_destroy = cubic_cb_destroy, 11067fef78bSLawrence Stewart .cb_init = cubic_cb_init, 11167fef78bSLawrence Stewart .cong_signal = cubic_cong_signal, 11267fef78bSLawrence Stewart .conn_init = cubic_conn_init, 11367fef78bSLawrence Stewart .mod_init = cubic_mod_init, 11467fef78bSLawrence Stewart .post_recovery = cubic_post_recovery, 11567fef78bSLawrence Stewart }; 11667fef78bSLawrence Stewart 11767fef78bSLawrence Stewart static void 11867fef78bSLawrence Stewart cubic_ack_received(struct cc_var *ccv, uint16_t type) 11967fef78bSLawrence Stewart { 12067fef78bSLawrence Stewart struct cubic *cubic_data; 12167fef78bSLawrence Stewart unsigned long w_tf, w_cubic_next; 12267fef78bSLawrence Stewart int ticks_since_cong; 12367fef78bSLawrence Stewart 12467fef78bSLawrence Stewart cubic_data = ccv->cc_data; 12567fef78bSLawrence Stewart cubic_record_rtt(ccv); 12667fef78bSLawrence Stewart 12767fef78bSLawrence Stewart /* 12867fef78bSLawrence Stewart * Regular ACK and we're not in cong/fast recovery and we're cwnd 12967fef78bSLawrence Stewart * limited and we're either not doing ABC or are slow starting or are 13067fef78bSLawrence Stewart * doing ABC and we've sent a cwnd's worth of bytes. 13167fef78bSLawrence Stewart */ 13267fef78bSLawrence Stewart if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && 13367fef78bSLawrence Stewart (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 || 13467fef78bSLawrence Stewart CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || 13567fef78bSLawrence Stewart (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) { 13667fef78bSLawrence Stewart /* Use the logic in NewReno ack_received() for slow start. */ 13767fef78bSLawrence Stewart if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || 13867fef78bSLawrence Stewart cubic_data->min_rtt_ticks == TCPTV_SRTTBASE) 13967fef78bSLawrence Stewart newreno_cc_algo.ack_received(ccv, type); 14067fef78bSLawrence Stewart else { 14167fef78bSLawrence Stewart ticks_since_cong = ticks - cubic_data->t_last_cong; 14267fef78bSLawrence Stewart 14367fef78bSLawrence Stewart /* 14467fef78bSLawrence Stewart * The mean RTT is used to best reflect the equations in 14567fef78bSLawrence Stewart * the I-D. Using min_rtt in the tf_cwnd calculation 14667fef78bSLawrence Stewart * causes w_tf to grow much faster than it should if the 14767fef78bSLawrence Stewart * RTT is dominated by network buffering rather than 148a4641f4eSPedro F. Giffuni * propagation delay. 14967fef78bSLawrence Stewart */ 15067fef78bSLawrence Stewart w_tf = tf_cwnd(ticks_since_cong, 15167fef78bSLawrence Stewart cubic_data->mean_rtt_ticks, cubic_data->max_cwnd, 15267fef78bSLawrence Stewart CCV(ccv, t_maxseg)); 15367fef78bSLawrence Stewart 15467fef78bSLawrence Stewart w_cubic_next = cubic_cwnd(ticks_since_cong + 15567fef78bSLawrence Stewart cubic_data->mean_rtt_ticks, cubic_data->max_cwnd, 15667fef78bSLawrence Stewart CCV(ccv, t_maxseg), cubic_data->K); 15767fef78bSLawrence Stewart 15867fef78bSLawrence Stewart ccv->flags &= ~CCF_ABC_SENTAWND; 15967fef78bSLawrence Stewart 16067fef78bSLawrence Stewart if (w_cubic_next < w_tf) 16167fef78bSLawrence Stewart /* 16267fef78bSLawrence Stewart * TCP-friendly region, follow tf 16367fef78bSLawrence Stewart * cwnd growth. 16467fef78bSLawrence Stewart */ 16551e712f8SHiren Panchasara CCV(ccv, snd_cwnd) = w_tf; 16667fef78bSLawrence Stewart 16767fef78bSLawrence Stewart else if (CCV(ccv, snd_cwnd) < w_cubic_next) { 16867fef78bSLawrence Stewart /* 16967fef78bSLawrence Stewart * Concave or convex region, follow CUBIC 17067fef78bSLawrence Stewart * cwnd growth. 17167fef78bSLawrence Stewart */ 17267fef78bSLawrence Stewart if (V_tcp_do_rfc3465) 17367fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = w_cubic_next; 17467fef78bSLawrence Stewart else 17567fef78bSLawrence Stewart CCV(ccv, snd_cwnd) += ((w_cubic_next - 17667fef78bSLawrence Stewart CCV(ccv, snd_cwnd)) * 17767fef78bSLawrence Stewart CCV(ccv, t_maxseg)) / 17867fef78bSLawrence Stewart CCV(ccv, snd_cwnd); 17967fef78bSLawrence Stewart } 18067fef78bSLawrence Stewart 18167fef78bSLawrence Stewart /* 18267fef78bSLawrence Stewart * If we're not in slow start and we're probing for a 18367fef78bSLawrence Stewart * new cwnd limit at the start of a connection 18467fef78bSLawrence Stewart * (happens when hostcache has a relevant entry), 18567fef78bSLawrence Stewart * keep updating our current estimate of the 18667fef78bSLawrence Stewart * max_cwnd. 18767fef78bSLawrence Stewart */ 18867fef78bSLawrence Stewart if (cubic_data->num_cong_events == 0 && 18951e712f8SHiren Panchasara cubic_data->max_cwnd < CCV(ccv, snd_cwnd)) 19067fef78bSLawrence Stewart cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 19167fef78bSLawrence Stewart } 19267fef78bSLawrence Stewart } 19367fef78bSLawrence Stewart } 19467fef78bSLawrence Stewart 19567fef78bSLawrence Stewart static void 19667fef78bSLawrence Stewart cubic_cb_destroy(struct cc_var *ccv) 19767fef78bSLawrence Stewart { 19867fef78bSLawrence Stewart free(ccv->cc_data, M_CUBIC); 19967fef78bSLawrence Stewart } 20067fef78bSLawrence Stewart 20167fef78bSLawrence Stewart static int 20267fef78bSLawrence Stewart cubic_cb_init(struct cc_var *ccv) 20367fef78bSLawrence Stewart { 20467fef78bSLawrence Stewart struct cubic *cubic_data; 20567fef78bSLawrence Stewart 20667fef78bSLawrence Stewart cubic_data = malloc(sizeof(struct cubic), M_CUBIC, M_NOWAIT|M_ZERO); 20767fef78bSLawrence Stewart 20867fef78bSLawrence Stewart if (cubic_data == NULL) 20967fef78bSLawrence Stewart return (ENOMEM); 21067fef78bSLawrence Stewart 21167fef78bSLawrence Stewart /* Init some key variables with sensible defaults. */ 21267fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 21367fef78bSLawrence Stewart cubic_data->min_rtt_ticks = TCPTV_SRTTBASE; 21447f44cddSLawrence Stewart cubic_data->mean_rtt_ticks = 1; 21567fef78bSLawrence Stewart 21667fef78bSLawrence Stewart ccv->cc_data = cubic_data; 21767fef78bSLawrence Stewart 21867fef78bSLawrence Stewart return (0); 21967fef78bSLawrence Stewart } 22067fef78bSLawrence Stewart 22167fef78bSLawrence Stewart /* 22267fef78bSLawrence Stewart * Perform any necessary tasks before we enter congestion recovery. 22367fef78bSLawrence Stewart */ 22467fef78bSLawrence Stewart static void 22567fef78bSLawrence Stewart cubic_cong_signal(struct cc_var *ccv, uint32_t type) 22667fef78bSLawrence Stewart { 22767fef78bSLawrence Stewart struct cubic *cubic_data; 22867fef78bSLawrence Stewart 22967fef78bSLawrence Stewart cubic_data = ccv->cc_data; 23067fef78bSLawrence Stewart 23167fef78bSLawrence Stewart switch (type) { 23267fef78bSLawrence Stewart case CC_NDUPACK: 23367fef78bSLawrence Stewart if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { 23467fef78bSLawrence Stewart if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 23567fef78bSLawrence Stewart cubic_ssthresh_update(ccv); 23667fef78bSLawrence Stewart cubic_data->num_cong_events++; 23767fef78bSLawrence Stewart cubic_data->prev_max_cwnd = cubic_data->max_cwnd; 23843053c12SSean Bruno cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 23967fef78bSLawrence Stewart } 24067fef78bSLawrence Stewart ENTER_RECOVERY(CCV(ccv, t_flags)); 24167fef78bSLawrence Stewart } 24267fef78bSLawrence Stewart break; 24367fef78bSLawrence Stewart 24467fef78bSLawrence Stewart case CC_ECN: 24567fef78bSLawrence Stewart if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 24667fef78bSLawrence Stewart cubic_ssthresh_update(ccv); 24767fef78bSLawrence Stewart cubic_data->num_cong_events++; 24867fef78bSLawrence Stewart cubic_data->prev_max_cwnd = cubic_data->max_cwnd; 24943053c12SSean Bruno cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 25067fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 25167fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh); 25267fef78bSLawrence Stewart ENTER_CONGRECOVERY(CCV(ccv, t_flags)); 25367fef78bSLawrence Stewart } 25467fef78bSLawrence Stewart break; 25567fef78bSLawrence Stewart 25667fef78bSLawrence Stewart case CC_RTO: 25767fef78bSLawrence Stewart /* 25867fef78bSLawrence Stewart * Grab the current time and record it so we know when the 25967fef78bSLawrence Stewart * most recent congestion event was. Only record it when the 26067fef78bSLawrence Stewart * timeout has fired more than once, as there is a reasonable 26167fef78bSLawrence Stewart * chance the first one is a false alarm and may not indicate 26267fef78bSLawrence Stewart * congestion. 26367fef78bSLawrence Stewart */ 2641edbb54fSEd Maste if (CCV(ccv, t_rxtshift) >= 2) { 26567fef78bSLawrence Stewart cubic_data->num_cong_events++; 26667fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 2671edbb54fSEd Maste } 26867fef78bSLawrence Stewart break; 26967fef78bSLawrence Stewart } 27067fef78bSLawrence Stewart } 27167fef78bSLawrence Stewart 27267fef78bSLawrence Stewart static void 27367fef78bSLawrence Stewart cubic_conn_init(struct cc_var *ccv) 27467fef78bSLawrence Stewart { 27567fef78bSLawrence Stewart struct cubic *cubic_data; 27667fef78bSLawrence Stewart 27767fef78bSLawrence Stewart cubic_data = ccv->cc_data; 27867fef78bSLawrence Stewart 27967fef78bSLawrence Stewart /* 28067fef78bSLawrence Stewart * Ensure we have a sane initial value for max_cwnd recorded. Without 28167fef78bSLawrence Stewart * this here bad things happen when entries from the TCP hostcache 28267fef78bSLawrence Stewart * get used. 28367fef78bSLawrence Stewart */ 28467fef78bSLawrence Stewart cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 28567fef78bSLawrence Stewart } 28667fef78bSLawrence Stewart 28767fef78bSLawrence Stewart static int 28867fef78bSLawrence Stewart cubic_mod_init(void) 28967fef78bSLawrence Stewart { 29067fef78bSLawrence Stewart 29167fef78bSLawrence Stewart cubic_cc_algo.after_idle = newreno_cc_algo.after_idle; 29267fef78bSLawrence Stewart 29367fef78bSLawrence Stewart return (0); 29467fef78bSLawrence Stewart } 29567fef78bSLawrence Stewart 29667fef78bSLawrence Stewart /* 29767fef78bSLawrence Stewart * Perform any necessary tasks before we exit congestion recovery. 29867fef78bSLawrence Stewart */ 29967fef78bSLawrence Stewart static void 30067fef78bSLawrence Stewart cubic_post_recovery(struct cc_var *ccv) 30167fef78bSLawrence Stewart { 30267fef78bSLawrence Stewart struct cubic *cubic_data; 303f81bc34eSHiren Panchasara int pipe; 30467fef78bSLawrence Stewart 30567fef78bSLawrence Stewart cubic_data = ccv->cc_data; 306f81bc34eSHiren Panchasara pipe = 0; 30767fef78bSLawrence Stewart 30867fef78bSLawrence Stewart /* Fast convergence heuristic. */ 30951e712f8SHiren Panchasara if (cubic_data->max_cwnd < cubic_data->prev_max_cwnd) 31067fef78bSLawrence Stewart cubic_data->max_cwnd = (cubic_data->max_cwnd * CUBIC_FC_FACTOR) 31167fef78bSLawrence Stewart >> CUBIC_SHIFT; 31267fef78bSLawrence Stewart 31367fef78bSLawrence Stewart if (IN_FASTRECOVERY(CCV(ccv, t_flags))) { 31467fef78bSLawrence Stewart /* 31567fef78bSLawrence Stewart * If inflight data is less than ssthresh, set cwnd 31667fef78bSLawrence Stewart * conservatively to avoid a burst of data, as suggested in 31767fef78bSLawrence Stewart * the NewReno RFC. Otherwise, use the CUBIC method. 31867fef78bSLawrence Stewart * 31967fef78bSLawrence Stewart * XXXLAS: Find a way to do this without needing curack 32067fef78bSLawrence Stewart */ 321f81bc34eSHiren Panchasara if (V_tcp_do_rfc6675_pipe) 322f81bc34eSHiren Panchasara pipe = tcp_compute_pipe(ccv->ccvc.tcp); 323f81bc34eSHiren Panchasara else 324f81bc34eSHiren Panchasara pipe = CCV(ccv, snd_max) - ccv->curack; 325f81bc34eSHiren Panchasara 326f81bc34eSHiren Panchasara if (pipe < CCV(ccv, snd_ssthresh)) 327*5cc11a89SMichael Tuexen /* 328*5cc11a89SMichael Tuexen * Ensure that cwnd does not collapse to 1 MSS under 329*5cc11a89SMichael Tuexen * adverse conditions. Implements RFC6582 330*5cc11a89SMichael Tuexen */ 331*5cc11a89SMichael Tuexen CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) + 332*5cc11a89SMichael Tuexen CCV(ccv, t_maxseg); 33367fef78bSLawrence Stewart else 33467fef78bSLawrence Stewart /* Update cwnd based on beta and adjusted max_cwnd. */ 33567fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = max(1, ((CUBIC_BETA * 33667fef78bSLawrence Stewart cubic_data->max_cwnd) >> CUBIC_SHIFT)); 33767fef78bSLawrence Stewart } 33867fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 33967fef78bSLawrence Stewart 34067fef78bSLawrence Stewart /* Calculate the average RTT between congestion epochs. */ 34147f44cddSLawrence Stewart if (cubic_data->epoch_ack_count > 0 && 34247f44cddSLawrence Stewart cubic_data->sum_rtt_ticks >= cubic_data->epoch_ack_count) { 34367fef78bSLawrence Stewart cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks / 34467fef78bSLawrence Stewart cubic_data->epoch_ack_count); 34547f44cddSLawrence Stewart } 34667fef78bSLawrence Stewart 34767fef78bSLawrence Stewart cubic_data->epoch_ack_count = 0; 34867fef78bSLawrence Stewart cubic_data->sum_rtt_ticks = 0; 34951e712f8SHiren Panchasara cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg)); 35067fef78bSLawrence Stewart } 35167fef78bSLawrence Stewart 35267fef78bSLawrence Stewart /* 35367fef78bSLawrence Stewart * Record the min RTT and sum samples for the epoch average RTT calculation. 35467fef78bSLawrence Stewart */ 35567fef78bSLawrence Stewart static void 35667fef78bSLawrence Stewart cubic_record_rtt(struct cc_var *ccv) 35767fef78bSLawrence Stewart { 35867fef78bSLawrence Stewart struct cubic *cubic_data; 35967fef78bSLawrence Stewart int t_srtt_ticks; 36067fef78bSLawrence Stewart 36167fef78bSLawrence Stewart /* Ignore srtt until a min number of samples have been taken. */ 36267fef78bSLawrence Stewart if (CCV(ccv, t_rttupdated) >= CUBIC_MIN_RTT_SAMPLES) { 36367fef78bSLawrence Stewart cubic_data = ccv->cc_data; 36467fef78bSLawrence Stewart t_srtt_ticks = CCV(ccv, t_srtt) / TCP_RTT_SCALE; 36567fef78bSLawrence Stewart 36667fef78bSLawrence Stewart /* 36767fef78bSLawrence Stewart * Record the current SRTT as our minrtt if it's the smallest 36867fef78bSLawrence Stewart * we've seen or minrtt is currently equal to its initialised 36967fef78bSLawrence Stewart * value. 37067fef78bSLawrence Stewart * 37167fef78bSLawrence Stewart * XXXLAS: Should there be some hysteresis for minrtt? 37267fef78bSLawrence Stewart */ 37367fef78bSLawrence Stewart if ((t_srtt_ticks < cubic_data->min_rtt_ticks || 37447f44cddSLawrence Stewart cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)) { 37567fef78bSLawrence Stewart cubic_data->min_rtt_ticks = max(1, t_srtt_ticks); 37667fef78bSLawrence Stewart 37747f44cddSLawrence Stewart /* 37847f44cddSLawrence Stewart * If the connection is within its first congestion 37947f44cddSLawrence Stewart * epoch, ensure we prime mean_rtt_ticks with a 38047f44cddSLawrence Stewart * reasonable value until the epoch average RTT is 38147f44cddSLawrence Stewart * calculated in cubic_post_recovery(). 38247f44cddSLawrence Stewart */ 38347f44cddSLawrence Stewart if (cubic_data->min_rtt_ticks > 38447f44cddSLawrence Stewart cubic_data->mean_rtt_ticks) 38547f44cddSLawrence Stewart cubic_data->mean_rtt_ticks = 38647f44cddSLawrence Stewart cubic_data->min_rtt_ticks; 38747f44cddSLawrence Stewart } 38847f44cddSLawrence Stewart 38967fef78bSLawrence Stewart /* Sum samples for epoch average RTT calculation. */ 39067fef78bSLawrence Stewart cubic_data->sum_rtt_ticks += t_srtt_ticks; 39167fef78bSLawrence Stewart cubic_data->epoch_ack_count++; 39267fef78bSLawrence Stewart } 39367fef78bSLawrence Stewart } 39467fef78bSLawrence Stewart 39567fef78bSLawrence Stewart /* 39667fef78bSLawrence Stewart * Update the ssthresh in the event of congestion. 39767fef78bSLawrence Stewart */ 39867fef78bSLawrence Stewart static void 39967fef78bSLawrence Stewart cubic_ssthresh_update(struct cc_var *ccv) 40067fef78bSLawrence Stewart { 40167fef78bSLawrence Stewart struct cubic *cubic_data; 40267fef78bSLawrence Stewart 40367fef78bSLawrence Stewart cubic_data = ccv->cc_data; 40467fef78bSLawrence Stewart 40567fef78bSLawrence Stewart /* 40667fef78bSLawrence Stewart * On the first congestion event, set ssthresh to cwnd * 0.5, on 40767fef78bSLawrence Stewart * subsequent congestion events, set it to cwnd * beta. 40867fef78bSLawrence Stewart */ 40967fef78bSLawrence Stewart if (cubic_data->num_cong_events == 0) 41067fef78bSLawrence Stewart CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd) >> 1; 41167fef78bSLawrence Stewart else 4123ac12506SJonathan T. Looney CCV(ccv, snd_ssthresh) = ((u_long)CCV(ccv, snd_cwnd) * 4133ac12506SJonathan T. Looney CUBIC_BETA) >> CUBIC_SHIFT; 41467fef78bSLawrence Stewart } 41567fef78bSLawrence Stewart 41667fef78bSLawrence Stewart 41767fef78bSLawrence Stewart DECLARE_CC_MODULE(cubic, &cubic_cc_algo); 418