167fef78bSLawrence Stewart /*- 267fef78bSLawrence Stewart * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org> 367fef78bSLawrence Stewart * Copyright (c) 2010 The FreeBSD Foundation 467fef78bSLawrence Stewart * All rights reserved. 567fef78bSLawrence Stewart * 667fef78bSLawrence Stewart * This software was developed by Lawrence Stewart while studying at the Centre 7891b8ed4SLawrence Stewart * for Advanced Internet Architectures, Swinburne University of Technology, made 8891b8ed4SLawrence Stewart * possible in part by a grant from the Cisco University Research Program Fund 9891b8ed4SLawrence Stewart * at Community Foundation Silicon Valley. 1067fef78bSLawrence Stewart * 1167fef78bSLawrence Stewart * Portions of this software were developed at the Centre for Advanced 1267fef78bSLawrence Stewart * Internet Architectures, Swinburne University of Technology, Melbourne, 1367fef78bSLawrence Stewart * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 1467fef78bSLawrence Stewart * 1567fef78bSLawrence Stewart * Redistribution and use in source and binary forms, with or without 1667fef78bSLawrence Stewart * modification, are permitted provided that the following conditions 1767fef78bSLawrence Stewart * are met: 1867fef78bSLawrence Stewart * 1. Redistributions of source code must retain the above copyright 1967fef78bSLawrence Stewart * notice, this list of conditions and the following disclaimer. 2067fef78bSLawrence Stewart * 2. Redistributions in binary form must reproduce the above copyright 2167fef78bSLawrence Stewart * notice, this list of conditions and the following disclaimer in the 2267fef78bSLawrence Stewart * documentation and/or other materials provided with the distribution. 2367fef78bSLawrence Stewart * 2467fef78bSLawrence Stewart * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 2567fef78bSLawrence Stewart * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2667fef78bSLawrence Stewart * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2767fef78bSLawrence Stewart * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2867fef78bSLawrence Stewart * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2967fef78bSLawrence Stewart * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3067fef78bSLawrence Stewart * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3167fef78bSLawrence Stewart * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3267fef78bSLawrence Stewart * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3367fef78bSLawrence Stewart * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3467fef78bSLawrence Stewart * SUCH DAMAGE. 3567fef78bSLawrence Stewart */ 3667fef78bSLawrence Stewart 3767fef78bSLawrence Stewart /* 3867fef78bSLawrence Stewart * An implementation of the CUBIC congestion control algorithm for FreeBSD, 3967fef78bSLawrence Stewart * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha. 4067fef78bSLawrence Stewart * Originally released as part of the NewTCP research project at Swinburne 41891b8ed4SLawrence Stewart * University of Technology's Centre for Advanced Internet Architectures, 42891b8ed4SLawrence Stewart * Melbourne, Australia, which was made possible in part by a grant from the 43891b8ed4SLawrence Stewart * Cisco University Research Program Fund at Community Foundation Silicon 44891b8ed4SLawrence Stewart * Valley. More details are available at: 4567fef78bSLawrence Stewart * http://caia.swin.edu.au/urp/newtcp/ 4667fef78bSLawrence Stewart */ 4767fef78bSLawrence Stewart 4867fef78bSLawrence Stewart #include <sys/cdefs.h> 4967fef78bSLawrence Stewart __FBSDID("$FreeBSD$"); 5067fef78bSLawrence Stewart 5167fef78bSLawrence Stewart #include <sys/param.h> 5267fef78bSLawrence Stewart #include <sys/kernel.h> 5367fef78bSLawrence Stewart #include <sys/malloc.h> 5467fef78bSLawrence Stewart #include <sys/module.h> 5567fef78bSLawrence Stewart #include <sys/socket.h> 5667fef78bSLawrence Stewart #include <sys/socketvar.h> 5767fef78bSLawrence Stewart #include <sys/sysctl.h> 5867fef78bSLawrence Stewart #include <sys/systm.h> 5967fef78bSLawrence Stewart 6067fef78bSLawrence Stewart #include <net/vnet.h> 6167fef78bSLawrence Stewart 62*2de3e790SGleb Smirnoff #include <netinet/tcp.h> 6367fef78bSLawrence Stewart #include <netinet/tcp_seq.h> 6467fef78bSLawrence Stewart #include <netinet/tcp_timer.h> 6567fef78bSLawrence Stewart #include <netinet/tcp_var.h> 66*2de3e790SGleb Smirnoff #include <netinet/tcp_cc.h> 6767fef78bSLawrence Stewart #include <netinet/cc/cc_cubic.h> 6867fef78bSLawrence Stewart #include <netinet/cc/cc_module.h> 6967fef78bSLawrence Stewart 7067fef78bSLawrence Stewart static void cubic_ack_received(struct cc_var *ccv, uint16_t type); 7167fef78bSLawrence Stewart static void cubic_cb_destroy(struct cc_var *ccv); 7267fef78bSLawrence Stewart static int cubic_cb_init(struct cc_var *ccv); 7367fef78bSLawrence Stewart static void cubic_cong_signal(struct cc_var *ccv, uint32_t type); 7467fef78bSLawrence Stewart static void cubic_conn_init(struct cc_var *ccv); 7567fef78bSLawrence Stewart static int cubic_mod_init(void); 7667fef78bSLawrence Stewart static void cubic_post_recovery(struct cc_var *ccv); 7767fef78bSLawrence Stewart static void cubic_record_rtt(struct cc_var *ccv); 7867fef78bSLawrence Stewart static void cubic_ssthresh_update(struct cc_var *ccv); 7967fef78bSLawrence Stewart 8067fef78bSLawrence Stewart struct cubic { 8167fef78bSLawrence Stewart /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */ 8267fef78bSLawrence Stewart int64_t K; 8367fef78bSLawrence Stewart /* Sum of RTT samples across an epoch in ticks. */ 8467fef78bSLawrence Stewart int64_t sum_rtt_ticks; 8567fef78bSLawrence Stewart /* cwnd at the most recent congestion event. */ 8667fef78bSLawrence Stewart unsigned long max_cwnd; 8767fef78bSLawrence Stewart /* cwnd at the previous congestion event. */ 8867fef78bSLawrence Stewart unsigned long prev_max_cwnd; 8967fef78bSLawrence Stewart /* Number of congestion events. */ 9067fef78bSLawrence Stewart uint32_t num_cong_events; 9167fef78bSLawrence Stewart /* Minimum observed rtt in ticks. */ 9267fef78bSLawrence Stewart int min_rtt_ticks; 9367fef78bSLawrence Stewart /* Mean observed rtt between congestion epochs. */ 9467fef78bSLawrence Stewart int mean_rtt_ticks; 9567fef78bSLawrence Stewart /* ACKs since last congestion event. */ 9667fef78bSLawrence Stewart int epoch_ack_count; 9767fef78bSLawrence Stewart /* Time of last congestion event in ticks. */ 9867fef78bSLawrence Stewart int t_last_cong; 9967fef78bSLawrence Stewart }; 10067fef78bSLawrence Stewart 1016bed196cSSergey Kandaurov static MALLOC_DEFINE(M_CUBIC, "cubic data", 10267fef78bSLawrence Stewart "Per connection data required for the CUBIC congestion control algorithm"); 10367fef78bSLawrence Stewart 10467fef78bSLawrence Stewart struct cc_algo cubic_cc_algo = { 10567fef78bSLawrence Stewart .name = "cubic", 10667fef78bSLawrence Stewart .ack_received = cubic_ack_received, 10767fef78bSLawrence Stewart .cb_destroy = cubic_cb_destroy, 10867fef78bSLawrence Stewart .cb_init = cubic_cb_init, 10967fef78bSLawrence Stewart .cong_signal = cubic_cong_signal, 11067fef78bSLawrence Stewart .conn_init = cubic_conn_init, 11167fef78bSLawrence Stewart .mod_init = cubic_mod_init, 11267fef78bSLawrence Stewart .post_recovery = cubic_post_recovery, 11367fef78bSLawrence Stewart }; 11467fef78bSLawrence Stewart 11567fef78bSLawrence Stewart static void 11667fef78bSLawrence Stewart cubic_ack_received(struct cc_var *ccv, uint16_t type) 11767fef78bSLawrence Stewart { 11867fef78bSLawrence Stewart struct cubic *cubic_data; 11967fef78bSLawrence Stewart unsigned long w_tf, w_cubic_next; 12067fef78bSLawrence Stewart int ticks_since_cong; 12167fef78bSLawrence Stewart 12267fef78bSLawrence Stewart cubic_data = ccv->cc_data; 12367fef78bSLawrence Stewart cubic_record_rtt(ccv); 12467fef78bSLawrence Stewart 12567fef78bSLawrence Stewart /* 12667fef78bSLawrence Stewart * Regular ACK and we're not in cong/fast recovery and we're cwnd 12767fef78bSLawrence Stewart * limited and we're either not doing ABC or are slow starting or are 12867fef78bSLawrence Stewart * doing ABC and we've sent a cwnd's worth of bytes. 12967fef78bSLawrence Stewart */ 13067fef78bSLawrence Stewart if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && 13167fef78bSLawrence Stewart (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 || 13267fef78bSLawrence Stewart CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || 13367fef78bSLawrence Stewart (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) { 13467fef78bSLawrence Stewart /* Use the logic in NewReno ack_received() for slow start. */ 13567fef78bSLawrence Stewart if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) || 13667fef78bSLawrence Stewart cubic_data->min_rtt_ticks == TCPTV_SRTTBASE) 13767fef78bSLawrence Stewart newreno_cc_algo.ack_received(ccv, type); 13867fef78bSLawrence Stewart else { 13967fef78bSLawrence Stewart ticks_since_cong = ticks - cubic_data->t_last_cong; 14067fef78bSLawrence Stewart 14167fef78bSLawrence Stewart /* 14267fef78bSLawrence Stewart * The mean RTT is used to best reflect the equations in 14367fef78bSLawrence Stewart * the I-D. Using min_rtt in the tf_cwnd calculation 14467fef78bSLawrence Stewart * causes w_tf to grow much faster than it should if the 14567fef78bSLawrence Stewart * RTT is dominated by network buffering rather than 14667fef78bSLawrence Stewart * propogation delay. 14767fef78bSLawrence Stewart */ 14867fef78bSLawrence Stewart w_tf = tf_cwnd(ticks_since_cong, 14967fef78bSLawrence Stewart cubic_data->mean_rtt_ticks, cubic_data->max_cwnd, 15067fef78bSLawrence Stewart CCV(ccv, t_maxseg)); 15167fef78bSLawrence Stewart 15267fef78bSLawrence Stewart w_cubic_next = cubic_cwnd(ticks_since_cong + 15367fef78bSLawrence Stewart cubic_data->mean_rtt_ticks, cubic_data->max_cwnd, 15467fef78bSLawrence Stewart CCV(ccv, t_maxseg), cubic_data->K); 15567fef78bSLawrence Stewart 15667fef78bSLawrence Stewart ccv->flags &= ~CCF_ABC_SENTAWND; 15767fef78bSLawrence Stewart 15867fef78bSLawrence Stewart if (w_cubic_next < w_tf) 15967fef78bSLawrence Stewart /* 16067fef78bSLawrence Stewart * TCP-friendly region, follow tf 16167fef78bSLawrence Stewart * cwnd growth. 16267fef78bSLawrence Stewart */ 16367fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = w_tf; 16467fef78bSLawrence Stewart 16567fef78bSLawrence Stewart else if (CCV(ccv, snd_cwnd) < w_cubic_next) { 16667fef78bSLawrence Stewart /* 16767fef78bSLawrence Stewart * Concave or convex region, follow CUBIC 16867fef78bSLawrence Stewart * cwnd growth. 16967fef78bSLawrence Stewart */ 17067fef78bSLawrence Stewart if (V_tcp_do_rfc3465) 17167fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = w_cubic_next; 17267fef78bSLawrence Stewart else 17367fef78bSLawrence Stewart CCV(ccv, snd_cwnd) += ((w_cubic_next - 17467fef78bSLawrence Stewart CCV(ccv, snd_cwnd)) * 17567fef78bSLawrence Stewart CCV(ccv, t_maxseg)) / 17667fef78bSLawrence Stewart CCV(ccv, snd_cwnd); 17767fef78bSLawrence Stewart } 17867fef78bSLawrence Stewart 17967fef78bSLawrence Stewart /* 18067fef78bSLawrence Stewart * If we're not in slow start and we're probing for a 18167fef78bSLawrence Stewart * new cwnd limit at the start of a connection 18267fef78bSLawrence Stewart * (happens when hostcache has a relevant entry), 18367fef78bSLawrence Stewart * keep updating our current estimate of the 18467fef78bSLawrence Stewart * max_cwnd. 18567fef78bSLawrence Stewart */ 18667fef78bSLawrence Stewart if (cubic_data->num_cong_events == 0 && 18767fef78bSLawrence Stewart cubic_data->max_cwnd < CCV(ccv, snd_cwnd)) 18867fef78bSLawrence Stewart cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 18967fef78bSLawrence Stewart } 19067fef78bSLawrence Stewart } 19167fef78bSLawrence Stewart } 19267fef78bSLawrence Stewart 19367fef78bSLawrence Stewart static void 19467fef78bSLawrence Stewart cubic_cb_destroy(struct cc_var *ccv) 19567fef78bSLawrence Stewart { 19667fef78bSLawrence Stewart 19767fef78bSLawrence Stewart if (ccv->cc_data != NULL) 19867fef78bSLawrence Stewart free(ccv->cc_data, M_CUBIC); 19967fef78bSLawrence Stewart } 20067fef78bSLawrence Stewart 20167fef78bSLawrence Stewart static int 20267fef78bSLawrence Stewart cubic_cb_init(struct cc_var *ccv) 20367fef78bSLawrence Stewart { 20467fef78bSLawrence Stewart struct cubic *cubic_data; 20567fef78bSLawrence Stewart 20667fef78bSLawrence Stewart cubic_data = malloc(sizeof(struct cubic), M_CUBIC, M_NOWAIT|M_ZERO); 20767fef78bSLawrence Stewart 20867fef78bSLawrence Stewart if (cubic_data == NULL) 20967fef78bSLawrence Stewart return (ENOMEM); 21067fef78bSLawrence Stewart 21167fef78bSLawrence Stewart /* Init some key variables with sensible defaults. */ 21267fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 21367fef78bSLawrence Stewart cubic_data->min_rtt_ticks = TCPTV_SRTTBASE; 21447f44cddSLawrence Stewart cubic_data->mean_rtt_ticks = 1; 21567fef78bSLawrence Stewart 21667fef78bSLawrence Stewart ccv->cc_data = cubic_data; 21767fef78bSLawrence Stewart 21867fef78bSLawrence Stewart return (0); 21967fef78bSLawrence Stewart } 22067fef78bSLawrence Stewart 22167fef78bSLawrence Stewart /* 22267fef78bSLawrence Stewart * Perform any necessary tasks before we enter congestion recovery. 22367fef78bSLawrence Stewart */ 22467fef78bSLawrence Stewart static void 22567fef78bSLawrence Stewart cubic_cong_signal(struct cc_var *ccv, uint32_t type) 22667fef78bSLawrence Stewart { 22767fef78bSLawrence Stewart struct cubic *cubic_data; 22867fef78bSLawrence Stewart 22967fef78bSLawrence Stewart cubic_data = ccv->cc_data; 23067fef78bSLawrence Stewart 23167fef78bSLawrence Stewart switch (type) { 23267fef78bSLawrence Stewart case CC_NDUPACK: 23367fef78bSLawrence Stewart if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { 23467fef78bSLawrence Stewart if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 23567fef78bSLawrence Stewart cubic_ssthresh_update(ccv); 23667fef78bSLawrence Stewart cubic_data->num_cong_events++; 23767fef78bSLawrence Stewart cubic_data->prev_max_cwnd = cubic_data->max_cwnd; 23867fef78bSLawrence Stewart cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 23967fef78bSLawrence Stewart } 24067fef78bSLawrence Stewart ENTER_RECOVERY(CCV(ccv, t_flags)); 24167fef78bSLawrence Stewart } 24267fef78bSLawrence Stewart break; 24367fef78bSLawrence Stewart 24467fef78bSLawrence Stewart case CC_ECN: 24567fef78bSLawrence Stewart if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 24667fef78bSLawrence Stewart cubic_ssthresh_update(ccv); 24767fef78bSLawrence Stewart cubic_data->num_cong_events++; 24867fef78bSLawrence Stewart cubic_data->prev_max_cwnd = cubic_data->max_cwnd; 24967fef78bSLawrence Stewart cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 25067fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 25167fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh); 25267fef78bSLawrence Stewart ENTER_CONGRECOVERY(CCV(ccv, t_flags)); 25367fef78bSLawrence Stewart } 25467fef78bSLawrence Stewart break; 25567fef78bSLawrence Stewart 25667fef78bSLawrence Stewart case CC_RTO: 25767fef78bSLawrence Stewart /* 25867fef78bSLawrence Stewart * Grab the current time and record it so we know when the 25967fef78bSLawrence Stewart * most recent congestion event was. Only record it when the 26067fef78bSLawrence Stewart * timeout has fired more than once, as there is a reasonable 26167fef78bSLawrence Stewart * chance the first one is a false alarm and may not indicate 26267fef78bSLawrence Stewart * congestion. 26367fef78bSLawrence Stewart */ 26467fef78bSLawrence Stewart if (CCV(ccv, t_rxtshift) >= 2) 26567fef78bSLawrence Stewart cubic_data->num_cong_events++; 26667fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 26767fef78bSLawrence Stewart break; 26867fef78bSLawrence Stewart } 26967fef78bSLawrence Stewart } 27067fef78bSLawrence Stewart 27167fef78bSLawrence Stewart static void 27267fef78bSLawrence Stewart cubic_conn_init(struct cc_var *ccv) 27367fef78bSLawrence Stewart { 27467fef78bSLawrence Stewart struct cubic *cubic_data; 27567fef78bSLawrence Stewart 27667fef78bSLawrence Stewart cubic_data = ccv->cc_data; 27767fef78bSLawrence Stewart 27867fef78bSLawrence Stewart /* 27967fef78bSLawrence Stewart * Ensure we have a sane initial value for max_cwnd recorded. Without 28067fef78bSLawrence Stewart * this here bad things happen when entries from the TCP hostcache 28167fef78bSLawrence Stewart * get used. 28267fef78bSLawrence Stewart */ 28367fef78bSLawrence Stewart cubic_data->max_cwnd = CCV(ccv, snd_cwnd); 28467fef78bSLawrence Stewart } 28567fef78bSLawrence Stewart 28667fef78bSLawrence Stewart static int 28767fef78bSLawrence Stewart cubic_mod_init(void) 28867fef78bSLawrence Stewart { 28967fef78bSLawrence Stewart 29067fef78bSLawrence Stewart cubic_cc_algo.after_idle = newreno_cc_algo.after_idle; 29167fef78bSLawrence Stewart 29267fef78bSLawrence Stewart return (0); 29367fef78bSLawrence Stewart } 29467fef78bSLawrence Stewart 29567fef78bSLawrence Stewart /* 29667fef78bSLawrence Stewart * Perform any necessary tasks before we exit congestion recovery. 29767fef78bSLawrence Stewart */ 29867fef78bSLawrence Stewart static void 29967fef78bSLawrence Stewart cubic_post_recovery(struct cc_var *ccv) 30067fef78bSLawrence Stewart { 30167fef78bSLawrence Stewart struct cubic *cubic_data; 302f81bc34eSHiren Panchasara int pipe; 30367fef78bSLawrence Stewart 30467fef78bSLawrence Stewart cubic_data = ccv->cc_data; 305f81bc34eSHiren Panchasara pipe = 0; 30667fef78bSLawrence Stewart 30767fef78bSLawrence Stewart /* Fast convergence heuristic. */ 30867fef78bSLawrence Stewart if (cubic_data->max_cwnd < cubic_data->prev_max_cwnd) 30967fef78bSLawrence Stewart cubic_data->max_cwnd = (cubic_data->max_cwnd * CUBIC_FC_FACTOR) 31067fef78bSLawrence Stewart >> CUBIC_SHIFT; 31167fef78bSLawrence Stewart 31267fef78bSLawrence Stewart if (IN_FASTRECOVERY(CCV(ccv, t_flags))) { 31367fef78bSLawrence Stewart /* 31467fef78bSLawrence Stewart * If inflight data is less than ssthresh, set cwnd 31567fef78bSLawrence Stewart * conservatively to avoid a burst of data, as suggested in 31667fef78bSLawrence Stewart * the NewReno RFC. Otherwise, use the CUBIC method. 31767fef78bSLawrence Stewart * 31867fef78bSLawrence Stewart * XXXLAS: Find a way to do this without needing curack 31967fef78bSLawrence Stewart */ 320f81bc34eSHiren Panchasara if (V_tcp_do_rfc6675_pipe) 321f81bc34eSHiren Panchasara pipe = tcp_compute_pipe(ccv->ccvc.tcp); 322f81bc34eSHiren Panchasara else 323f81bc34eSHiren Panchasara pipe = CCV(ccv, snd_max) - ccv->curack; 324f81bc34eSHiren Panchasara 325f81bc34eSHiren Panchasara if (pipe < CCV(ccv, snd_ssthresh)) 326f81bc34eSHiren Panchasara CCV(ccv, snd_cwnd) = pipe + CCV(ccv, t_maxseg); 32767fef78bSLawrence Stewart else 32867fef78bSLawrence Stewart /* Update cwnd based on beta and adjusted max_cwnd. */ 32967fef78bSLawrence Stewart CCV(ccv, snd_cwnd) = max(1, ((CUBIC_BETA * 33067fef78bSLawrence Stewart cubic_data->max_cwnd) >> CUBIC_SHIFT)); 33167fef78bSLawrence Stewart } 33267fef78bSLawrence Stewart cubic_data->t_last_cong = ticks; 33367fef78bSLawrence Stewart 33467fef78bSLawrence Stewart /* Calculate the average RTT between congestion epochs. */ 33547f44cddSLawrence Stewart if (cubic_data->epoch_ack_count > 0 && 33647f44cddSLawrence Stewart cubic_data->sum_rtt_ticks >= cubic_data->epoch_ack_count) { 33767fef78bSLawrence Stewart cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks / 33867fef78bSLawrence Stewart cubic_data->epoch_ack_count); 33947f44cddSLawrence Stewart } 34067fef78bSLawrence Stewart 34167fef78bSLawrence Stewart cubic_data->epoch_ack_count = 0; 34267fef78bSLawrence Stewart cubic_data->sum_rtt_ticks = 0; 34367fef78bSLawrence Stewart cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg)); 34467fef78bSLawrence Stewart } 34567fef78bSLawrence Stewart 34667fef78bSLawrence Stewart /* 34767fef78bSLawrence Stewart * Record the min RTT and sum samples for the epoch average RTT calculation. 34867fef78bSLawrence Stewart */ 34967fef78bSLawrence Stewart static void 35067fef78bSLawrence Stewart cubic_record_rtt(struct cc_var *ccv) 35167fef78bSLawrence Stewart { 35267fef78bSLawrence Stewart struct cubic *cubic_data; 35367fef78bSLawrence Stewart int t_srtt_ticks; 35467fef78bSLawrence Stewart 35567fef78bSLawrence Stewart /* Ignore srtt until a min number of samples have been taken. */ 35667fef78bSLawrence Stewart if (CCV(ccv, t_rttupdated) >= CUBIC_MIN_RTT_SAMPLES) { 35767fef78bSLawrence Stewart cubic_data = ccv->cc_data; 35867fef78bSLawrence Stewart t_srtt_ticks = CCV(ccv, t_srtt) / TCP_RTT_SCALE; 35967fef78bSLawrence Stewart 36067fef78bSLawrence Stewart /* 36167fef78bSLawrence Stewart * Record the current SRTT as our minrtt if it's the smallest 36267fef78bSLawrence Stewart * we've seen or minrtt is currently equal to its initialised 36367fef78bSLawrence Stewart * value. 36467fef78bSLawrence Stewart * 36567fef78bSLawrence Stewart * XXXLAS: Should there be some hysteresis for minrtt? 36667fef78bSLawrence Stewart */ 36767fef78bSLawrence Stewart if ((t_srtt_ticks < cubic_data->min_rtt_ticks || 36847f44cddSLawrence Stewart cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)) { 36967fef78bSLawrence Stewart cubic_data->min_rtt_ticks = max(1, t_srtt_ticks); 37067fef78bSLawrence Stewart 37147f44cddSLawrence Stewart /* 37247f44cddSLawrence Stewart * If the connection is within its first congestion 37347f44cddSLawrence Stewart * epoch, ensure we prime mean_rtt_ticks with a 37447f44cddSLawrence Stewart * reasonable value until the epoch average RTT is 37547f44cddSLawrence Stewart * calculated in cubic_post_recovery(). 37647f44cddSLawrence Stewart */ 37747f44cddSLawrence Stewart if (cubic_data->min_rtt_ticks > 37847f44cddSLawrence Stewart cubic_data->mean_rtt_ticks) 37947f44cddSLawrence Stewart cubic_data->mean_rtt_ticks = 38047f44cddSLawrence Stewart cubic_data->min_rtt_ticks; 38147f44cddSLawrence Stewart } 38247f44cddSLawrence Stewart 38367fef78bSLawrence Stewart /* Sum samples for epoch average RTT calculation. */ 38467fef78bSLawrence Stewart cubic_data->sum_rtt_ticks += t_srtt_ticks; 38567fef78bSLawrence Stewart cubic_data->epoch_ack_count++; 38667fef78bSLawrence Stewart } 38767fef78bSLawrence Stewart } 38867fef78bSLawrence Stewart 38967fef78bSLawrence Stewart /* 39067fef78bSLawrence Stewart * Update the ssthresh in the event of congestion. 39167fef78bSLawrence Stewart */ 39267fef78bSLawrence Stewart static void 39367fef78bSLawrence Stewart cubic_ssthresh_update(struct cc_var *ccv) 39467fef78bSLawrence Stewart { 39567fef78bSLawrence Stewart struct cubic *cubic_data; 39667fef78bSLawrence Stewart 39767fef78bSLawrence Stewart cubic_data = ccv->cc_data; 39867fef78bSLawrence Stewart 39967fef78bSLawrence Stewart /* 40067fef78bSLawrence Stewart * On the first congestion event, set ssthresh to cwnd * 0.5, on 40167fef78bSLawrence Stewart * subsequent congestion events, set it to cwnd * beta. 40267fef78bSLawrence Stewart */ 40367fef78bSLawrence Stewart if (cubic_data->num_cong_events == 0) 40467fef78bSLawrence Stewart CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd) >> 1; 40567fef78bSLawrence Stewart else 40667fef78bSLawrence Stewart CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * CUBIC_BETA) 40767fef78bSLawrence Stewart >> CUBIC_SHIFT; 40867fef78bSLawrence Stewart } 40967fef78bSLawrence Stewart 41067fef78bSLawrence Stewart 41167fef78bSLawrence Stewart DECLARE_CC_MODULE(cubic, &cubic_cc_algo); 412