xref: /freebsd/sys/netinet/cc/cc_htcp.c (revision 6bfca4dcab07dad45a805879d954876b353c0810)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007-2008
5  * 	Swinburne University of Technology, Melbourne, Australia
6  * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7  * Copyright (c) 2010 The FreeBSD Foundation
8  * All rights reserved.
9  *
10  * This software was developed at the Centre for Advanced Internet
11  * Architectures, Swinburne University of Technology, by Lawrence Stewart and
12  * James Healy, made possible in part by a grant from the Cisco University
13  * Research Program Fund at Community Foundation Silicon Valley.
14  *
15  * Portions of this software were developed at the Centre for Advanced
16  * Internet Architectures, Swinburne University of Technology, Melbourne,
17  * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 /*
42  * An implementation of the H-TCP congestion control algorithm for FreeBSD,
43  * based on the Internet Draft "draft-leith-tcp-htcp-06.txt" by Leith and
44  * Shorten. Originally released as part of the NewTCP research project at
45  * Swinburne University of Technology's Centre for Advanced Internet
46  * Architectures, Melbourne, Australia, which was made possible in part by a
47  * grant from the Cisco University Research Program Fund at Community Foundation
48  * Silicon Valley. More details are available at:
49  *   http://caia.swin.edu.au/urp/newtcp/
50  */
51 
52 #include <sys/param.h>
53 #include <sys/kernel.h>
54 #include <sys/limits.h>
55 #include <sys/malloc.h>
56 #include <sys/module.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/systm.h>
61 
62 #include <net/vnet.h>
63 
64 #include <net/route.h>
65 #include <net/route/nhop.h>
66 
67 #include <netinet/in_pcb.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_seq.h>
70 #include <netinet/tcp_timer.h>
71 #include <netinet/tcp_var.h>
72 #include <netinet/cc/cc.h>
73 #include <netinet/cc/cc_module.h>
74 
75 /* Fixed point math shifts. */
76 #define HTCP_SHIFT 8
77 #define HTCP_ALPHA_INC_SHIFT 4
78 
79 #define HTCP_INIT_ALPHA 1
80 #define HTCP_DELTA_L hz		/* 1 sec in ticks. */
81 #define HTCP_MINBETA 128	/* 0.5 << HTCP_SHIFT. */
82 #define HTCP_MAXBETA 204	/* ~0.8 << HTCP_SHIFT. */
83 #define HTCP_MINROWE 26		/* ~0.1 << HTCP_SHIFT. */
84 #define HTCP_MAXROWE 512	/* 2 << HTCP_SHIFT. */
85 
86 /* RTT_ref (ms) used in the calculation of alpha if RTT scaling is enabled. */
87 #define HTCP_RTT_REF 100
88 
89 /* Don't trust SRTT until this many samples have been taken. */
90 #define HTCP_MIN_RTT_SAMPLES 8
91 
92 /*
93  * HTCP_CALC_ALPHA performs a fixed point math calculation to determine the
94  * value of alpha, based on the function defined in the HTCP spec.
95  *
96  * i.e. 1 + 10(delta - delta_l) + ((delta - delta_l) / 2) ^ 2
97  *
98  * "diff" is passed in to the macro as "delta - delta_l" and is expected to be
99  * in units of ticks.
100  *
101  * The joyousnous of fixed point maths means our function implementation looks a
102  * little funky...
103  *
104  * In order to maintain some precision in the calculations, a fixed point shift
105  * HTCP_ALPHA_INC_SHIFT is used to ensure the integer divisions don't
106  * truncate the results too badly.
107  *
108  * The "16" value is the "1" term in the alpha function shifted up by
109  * HTCP_ALPHA_INC_SHIFT
110  *
111  * The "160" value is the "10" multiplier in the alpha function multiplied by
112  * 2^HTCP_ALPHA_INC_SHIFT
113  *
114  * Specifying these as constants reduces the computations required. After
115  * up-shifting all the terms in the function and performing the required
116  * calculations, we down-shift the final result by HTCP_ALPHA_INC_SHIFT to
117  * ensure it is back in the correct range.
118  *
119  * The "hz" terms are required as kernels can be configured to run with
120  * different tick timers, which we have to adjust for in the alpha calculation
121  * (which originally was defined in terms of seconds).
122  *
123  * We also have to be careful to constrain the value of diff such that it won't
124  * overflow whilst performing the calculation. The middle term i.e. (160 * diff)
125  * / hz is the limiting factor in the calculation. We must constrain diff to be
126  * less than the max size of an int divided by the constant 160 figure
127  * i.e. diff < INT_MAX / 160
128  *
129  * NB: Changing HTCP_ALPHA_INC_SHIFT will require you to MANUALLY update the
130  * constants used in this function!
131  */
132 #define HTCP_CALC_ALPHA(diff) \
133 ((\
134 	(16) + \
135 	((160 * (diff)) / hz) + \
136 	(((diff) / hz) * (((diff) << HTCP_ALPHA_INC_SHIFT) / (4 * hz))) \
137 ) >> HTCP_ALPHA_INC_SHIFT)
138 
139 static void	htcp_ack_received(struct cc_var *ccv, uint16_t type);
140 static void	htcp_cb_destroy(struct cc_var *ccv);
141 static int	htcp_cb_init(struct cc_var *ccv, void *ptr);
142 static void	htcp_cong_signal(struct cc_var *ccv, uint32_t type);
143 static int	htcp_mod_init(void);
144 static void	htcp_post_recovery(struct cc_var *ccv);
145 static void	htcp_recalc_alpha(struct cc_var *ccv);
146 static void	htcp_recalc_beta(struct cc_var *ccv);
147 static void	htcp_record_rtt(struct cc_var *ccv);
148 static void	htcp_ssthresh_update(struct cc_var *ccv);
149 static size_t	htcp_data_sz(void);
150 
151 struct htcp {
152 	/* cwnd before entering cong recovery. */
153 	unsigned long	prev_cwnd;
154 	/* cwnd additive increase parameter. */
155 	int		alpha;
156 	/* cwnd multiplicative decrease parameter. */
157 	int		beta;
158 	/* Largest rtt seen for the flow. */
159 	int		maxrtt;
160 	/* Shortest rtt seen for the flow. */
161 	int		minrtt;
162 	/* Time of last congestion event in ticks. */
163 	int		t_last_cong;
164 };
165 
166 static int htcp_rtt_ref;
167 /*
168  * The maximum number of ticks the value of diff can reach in
169  * htcp_recalc_alpha() before alpha will stop increasing due to overflow.
170  * See comment above HTCP_CALC_ALPHA for more info.
171  */
172 static int htcp_max_diff = INT_MAX / ((1 << HTCP_ALPHA_INC_SHIFT) * 10);
173 
174 /* Per-netstack vars. */
175 VNET_DEFINE_STATIC(u_int, htcp_adaptive_backoff) = 0;
176 VNET_DEFINE_STATIC(u_int, htcp_rtt_scaling) = 0;
177 #define	V_htcp_adaptive_backoff    VNET(htcp_adaptive_backoff)
178 #define	V_htcp_rtt_scaling    VNET(htcp_rtt_scaling)
179 
180 struct cc_algo htcp_cc_algo = {
181 	.name = "htcp",
182 	.ack_received = htcp_ack_received,
183 	.cb_destroy = htcp_cb_destroy,
184 	.cb_init = htcp_cb_init,
185 	.cong_signal = htcp_cong_signal,
186 	.mod_init = htcp_mod_init,
187 	.post_recovery = htcp_post_recovery,
188 	.cc_data_sz = htcp_data_sz,
189 	.after_idle = newreno_cc_after_idle,
190 };
191 
192 static void
193 htcp_ack_received(struct cc_var *ccv, uint16_t type)
194 {
195 	struct htcp *htcp_data;
196 
197 	htcp_data = ccv->cc_data;
198 	htcp_record_rtt(ccv);
199 
200 	/*
201 	 * Regular ACK and we're not in cong/fast recovery and we're cwnd
202 	 * limited and we're either not doing ABC or are slow starting or are
203 	 * doing ABC and we've sent a cwnd's worth of bytes.
204 	 */
205 	if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
206 	    (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 ||
207 	    CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
208 	    (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) {
209 		htcp_recalc_beta(ccv);
210 		htcp_recalc_alpha(ccv);
211 		/*
212 		 * Use the logic in NewReno ack_received() for slow start and
213 		 * for the first HTCP_DELTA_L ticks after either the flow starts
214 		 * or a congestion event (when alpha equals 1).
215 		 */
216 		if (htcp_data->alpha == 1 ||
217 		    CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh))
218 			newreno_cc_ack_received(ccv, type);
219 		else {
220 			if (V_tcp_do_rfc3465) {
221 				/* Increment cwnd by alpha segments. */
222 				CCV(ccv, snd_cwnd) += htcp_data->alpha *
223 				    CCV(ccv, t_maxseg);
224 				ccv->flags &= ~CCF_ABC_SENTAWND;
225 			} else
226 				/*
227 				 * Increment cwnd by alpha/cwnd segments to
228 				 * approximate an increase of alpha segments
229 				 * per RTT.
230 				 */
231 				CCV(ccv, snd_cwnd) += (((htcp_data->alpha <<
232 				    HTCP_SHIFT) / (CCV(ccv, snd_cwnd) /
233 				    CCV(ccv, t_maxseg))) * CCV(ccv, t_maxseg))
234 				    >> HTCP_SHIFT;
235 		}
236 	}
237 }
238 
239 static void
240 htcp_cb_destroy(struct cc_var *ccv)
241 {
242 	free(ccv->cc_data, M_CC_MEM);
243 }
244 
245 static size_t
246 htcp_data_sz(void)
247 {
248 	return(sizeof(struct htcp));
249 }
250 
251 static int
252 htcp_cb_init(struct cc_var *ccv, void *ptr)
253 {
254 	struct htcp *htcp_data;
255 
256 	INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
257 	if (ptr == NULL) {
258 		htcp_data = malloc(sizeof(struct htcp), M_CC_MEM, M_NOWAIT);
259 		if (htcp_data == NULL)
260 			return (ENOMEM);
261 	} else
262 		htcp_data = ptr;
263 
264 	/* Init some key variables with sensible defaults. */
265 	htcp_data->alpha = HTCP_INIT_ALPHA;
266 	htcp_data->beta = HTCP_MINBETA;
267 	htcp_data->maxrtt = TCPTV_SRTTBASE;
268 	htcp_data->minrtt = TCPTV_SRTTBASE;
269 	htcp_data->prev_cwnd = 0;
270 	htcp_data->t_last_cong = ticks;
271 
272 	ccv->cc_data = htcp_data;
273 
274 	return (0);
275 }
276 
277 /*
278  * Perform any necessary tasks before we enter congestion recovery.
279  */
280 static void
281 htcp_cong_signal(struct cc_var *ccv, uint32_t type)
282 {
283 	struct htcp *htcp_data;
284 	u_int mss;
285 
286 	htcp_data = ccv->cc_data;
287 	mss = tcp_maxseg(ccv->ccvc.tcp);
288 
289 	switch (type) {
290 	case CC_NDUPACK:
291 		if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
292 			if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
293 				/*
294 				 * Apply hysteresis to maxrtt to ensure
295 				 * reductions in the RTT are reflected in our
296 				 * measurements.
297 				 */
298 				htcp_data->maxrtt = (htcp_data->minrtt +
299 				    (htcp_data->maxrtt - htcp_data->minrtt) *
300 				    95) / 100;
301 				htcp_ssthresh_update(ccv);
302 				htcp_data->t_last_cong = ticks;
303 				htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
304 			}
305 			ENTER_RECOVERY(CCV(ccv, t_flags));
306 		}
307 		break;
308 
309 	case CC_ECN:
310 		if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
311 			/*
312 			 * Apply hysteresis to maxrtt to ensure reductions in
313 			 * the RTT are reflected in our measurements.
314 			 */
315 			htcp_data->maxrtt = (htcp_data->minrtt + (htcp_data->maxrtt -
316 			    htcp_data->minrtt) * 95) / 100;
317 			htcp_ssthresh_update(ccv);
318 			CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
319 			htcp_data->t_last_cong = ticks;
320 			htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
321 			ENTER_CONGRECOVERY(CCV(ccv, t_flags));
322 		}
323 		break;
324 
325 	case CC_RTO:
326 		CCV(ccv, snd_ssthresh) = max(min(CCV(ccv, snd_wnd),
327 						 CCV(ccv, snd_cwnd)) / 2 / mss,
328 					     2) * mss;
329 		CCV(ccv, snd_cwnd) = mss;
330 		/*
331 		 * Grab the current time and record it so we know when the
332 		 * most recent congestion event was. Only record it when the
333 		 * timeout has fired more than once, as there is a reasonable
334 		 * chance the first one is a false alarm and may not indicate
335 		 * congestion.
336 		 */
337 		if (CCV(ccv, t_rxtshift) >= 2)
338 			htcp_data->t_last_cong = ticks;
339 		break;
340 	}
341 }
342 
343 static int
344 htcp_mod_init(void)
345 {
346 	/*
347 	 * HTCP_RTT_REF is defined in ms, and t_srtt in the tcpcb is stored in
348 	 * units of TCP_RTT_SCALE*hz. Scale HTCP_RTT_REF to be in the same units
349 	 * as t_srtt.
350 	 */
351 	htcp_rtt_ref = (HTCP_RTT_REF * TCP_RTT_SCALE * hz) / 1000;
352 	return (0);
353 }
354 
355 /*
356  * Perform any necessary tasks before we exit congestion recovery.
357  */
358 static void
359 htcp_post_recovery(struct cc_var *ccv)
360 {
361 	int pipe;
362 	struct htcp *htcp_data;
363 
364 	pipe = 0;
365 	htcp_data = ccv->cc_data;
366 
367 	if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
368 		/*
369 		 * If inflight data is less than ssthresh, set cwnd
370 		 * conservatively to avoid a burst of data, as suggested in the
371 		 * NewReno RFC. Otherwise, use the HTCP method.
372 		 *
373 		 * XXXLAS: Find a way to do this without needing curack
374 		 */
375 		if (V_tcp_do_newsack)
376 			pipe = tcp_compute_pipe(ccv->ccvc.tcp);
377 		else
378 			pipe = CCV(ccv, snd_max) - ccv->curack;
379 
380 		if (pipe < CCV(ccv, snd_ssthresh))
381 			/*
382 			 * Ensure that cwnd down not collape to 1 MSS under
383 			 * adverse conditions. Implements RFC6582
384 			 */
385 			CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) +
386 			    CCV(ccv, t_maxseg);
387 		else
388 			CCV(ccv, snd_cwnd) = max(1, ((htcp_data->beta *
389 			    htcp_data->prev_cwnd / CCV(ccv, t_maxseg))
390 			    >> HTCP_SHIFT)) * CCV(ccv, t_maxseg);
391 	}
392 }
393 
394 static void
395 htcp_recalc_alpha(struct cc_var *ccv)
396 {
397 	struct htcp *htcp_data;
398 	int alpha, diff, now;
399 
400 	htcp_data = ccv->cc_data;
401 	now = ticks;
402 
403 	/*
404 	 * If ticks has wrapped around (will happen approximately once every 49
405 	 * days on a machine with the default kern.hz=1000) and a flow straddles
406 	 * the wrap point, our alpha calcs will be completely wrong. We cut our
407 	 * losses and restart alpha from scratch by setting t_last_cong = now -
408 	 * HTCP_DELTA_L.
409 	 *
410 	 * This does not deflate our cwnd at all. It simply slows the rate cwnd
411 	 * is growing by until alpha regains the value it held prior to taking
412 	 * this drastic measure.
413 	 */
414 	if (now < htcp_data->t_last_cong)
415 		htcp_data->t_last_cong = now - HTCP_DELTA_L;
416 
417 	diff = now - htcp_data->t_last_cong - HTCP_DELTA_L;
418 
419 	/* Cap alpha if the value of diff would overflow HTCP_CALC_ALPHA(). */
420 	if (diff < htcp_max_diff) {
421 		/*
422 		 * If it has been more than HTCP_DELTA_L ticks since congestion,
423 		 * increase alpha according to the function defined in the spec.
424 		 */
425 		if (diff > 0) {
426 			alpha = HTCP_CALC_ALPHA(diff);
427 
428 			/*
429 			 * Adaptive backoff fairness adjustment:
430 			 * 2 * (1 - beta) * alpha_raw
431 			 */
432 			if (V_htcp_adaptive_backoff)
433 				alpha = max(1, (2 * ((1 << HTCP_SHIFT) -
434 				    htcp_data->beta) * alpha) >> HTCP_SHIFT);
435 
436 			/*
437 			 * RTT scaling: (RTT / RTT_ref) * alpha
438 			 * alpha will be the raw value from HTCP_CALC_ALPHA() if
439 			 * adaptive backoff is off, or the adjusted value if
440 			 * adaptive backoff is on.
441 			 */
442 			if (V_htcp_rtt_scaling)
443 				alpha = max(1, (min(max(HTCP_MINROWE,
444 				    (tcp_get_srtt(ccv->ccvc.tcp, TCP_TMR_GRANULARITY_TICKS) << HTCP_SHIFT) /
445 				    htcp_rtt_ref), HTCP_MAXROWE) * alpha)
446 				    >> HTCP_SHIFT);
447 
448 		} else
449 			alpha = 1;
450 
451 		htcp_data->alpha = alpha;
452 	}
453 }
454 
455 static void
456 htcp_recalc_beta(struct cc_var *ccv)
457 {
458 	struct htcp *htcp_data;
459 
460 	htcp_data = ccv->cc_data;
461 
462 	/*
463 	 * TCPTV_SRTTBASE is the initialised value of each connection's SRTT, so
464 	 * we only calc beta if the connection's SRTT has been changed from its
465 	 * initial value. beta is bounded to ensure it is always between
466 	 * HTCP_MINBETA and HTCP_MAXBETA.
467 	 */
468 	if (V_htcp_adaptive_backoff && htcp_data->minrtt != TCPTV_SRTTBASE &&
469 	    htcp_data->maxrtt != TCPTV_SRTTBASE)
470 		htcp_data->beta = min(max(HTCP_MINBETA,
471 		    (htcp_data->minrtt << HTCP_SHIFT) / htcp_data->maxrtt),
472 		    HTCP_MAXBETA);
473 	else
474 		htcp_data->beta = HTCP_MINBETA;
475 }
476 
477 /*
478  * Record the minimum and maximum RTT seen for the connection. These are used in
479  * the calculation of beta if adaptive backoff is enabled.
480  */
481 static void
482 htcp_record_rtt(struct cc_var *ccv)
483 {
484 	struct htcp *htcp_data;
485 
486 	htcp_data = ccv->cc_data;
487 
488 	/* XXXLAS: Should there be some hysteresis for minrtt? */
489 
490 	/*
491 	 * Record the current SRTT as our minrtt if it's the smallest we've seen
492 	 * or minrtt is currently equal to its initialised value. Ignore SRTT
493 	 * until a min number of samples have been taken.
494 	 */
495 	if ((tcp_get_srtt(ccv->ccvc.tcp, TCP_TMR_GRANULARITY_TICKS) < htcp_data->minrtt ||
496 	    htcp_data->minrtt == TCPTV_SRTTBASE) &&
497 	    (CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES))
498 		htcp_data->minrtt = tcp_get_srtt(ccv->ccvc.tcp, TCP_TMR_GRANULARITY_TICKS);
499 
500 	/*
501 	 * Record the current SRTT as our maxrtt if it's the largest we've
502 	 * seen. Ignore SRTT until a min number of samples have been taken.
503 	 */
504 	if (tcp_get_srtt(ccv->ccvc.tcp, TCP_TMR_GRANULARITY_TICKS) > htcp_data->maxrtt
505 	    && CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES)
506 		htcp_data->maxrtt = tcp_get_srtt(ccv->ccvc.tcp, TCP_TMR_GRANULARITY_TICKS);
507 }
508 
509 /*
510  * Update the ssthresh in the event of congestion.
511  */
512 static void
513 htcp_ssthresh_update(struct cc_var *ccv)
514 {
515 	struct htcp *htcp_data;
516 
517 	htcp_data = ccv->cc_data;
518 
519 	/*
520 	 * On the first congestion event, set ssthresh to cwnd * 0.5, on
521 	 * subsequent congestion events, set it to cwnd * beta.
522 	 */
523 	if (CCV(ccv, snd_ssthresh) == TCP_MAXWIN << TCP_MAX_WINSHIFT)
524 		CCV(ccv, snd_ssthresh) = ((u_long)CCV(ccv, snd_cwnd) *
525 		    HTCP_MINBETA) >> HTCP_SHIFT;
526 	else {
527 		htcp_recalc_beta(ccv);
528 		CCV(ccv, snd_ssthresh) = ((u_long)CCV(ccv, snd_cwnd) *
529 		    htcp_data->beta) >> HTCP_SHIFT;
530 	}
531 }
532 
533 SYSCTL_DECL(_net_inet_tcp_cc_htcp);
534 SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, htcp, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
535     "H-TCP related settings");
536 SYSCTL_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, adaptive_backoff,
537     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(htcp_adaptive_backoff), 0,
538     "enable H-TCP adaptive backoff");
539 SYSCTL_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, rtt_scaling,
540     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(htcp_rtt_scaling), 0,
541     "enable H-TCP RTT scaling");
542 
543 DECLARE_CC_MODULE(htcp, &htcp_cc_algo);
544 MODULE_VERSION(htcp, 2);
545