xref: /freebsd/sys/netinet/cc/cc_htcp.c (revision 22dcc81293854c4d39df639a329fecded175b2b0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007-2008
5  * 	Swinburne University of Technology, Melbourne, Australia
6  * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
7  * Copyright (c) 2010 The FreeBSD Foundation
8  * All rights reserved.
9  *
10  * This software was developed at the Centre for Advanced Internet
11  * Architectures, Swinburne University of Technology, by Lawrence Stewart and
12  * James Healy, made possible in part by a grant from the Cisco University
13  * Research Program Fund at Community Foundation Silicon Valley.
14  *
15  * Portions of this software were developed at the Centre for Advanced
16  * Internet Architectures, Swinburne University of Technology, Melbourne,
17  * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  * 1. Redistributions of source code must retain the above copyright
23  *    notice, this list of conditions and the following disclaimer.
24  * 2. Redistributions in binary form must reproduce the above copyright
25  *    notice, this list of conditions and the following disclaimer in the
26  *    documentation and/or other materials provided with the distribution.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 /*
42  * An implementation of the H-TCP congestion control algorithm for FreeBSD,
43  * based on the Internet Draft "draft-leith-tcp-htcp-06.txt" by Leith and
44  * Shorten. Originally released as part of the NewTCP research project at
45  * Swinburne University of Technology's Centre for Advanced Internet
46  * Architectures, Melbourne, Australia, which was made possible in part by a
47  * grant from the Cisco University Research Program Fund at Community Foundation
48  * Silicon Valley. More details are available at:
49  *   http://caia.swin.edu.au/urp/newtcp/
50  */
51 
52 #include <sys/param.h>
53 #include <sys/kernel.h>
54 #include <sys/limits.h>
55 #include <sys/malloc.h>
56 #include <sys/module.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/systm.h>
61 
62 #include <net/vnet.h>
63 
64 #include <net/route.h>
65 #include <net/route/nhop.h>
66 
67 #include <netinet/in_pcb.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_seq.h>
70 #include <netinet/tcp_timer.h>
71 #include <netinet/tcp_var.h>
72 #include <netinet/cc/cc.h>
73 #include <netinet/cc/cc_module.h>
74 
75 /* Fixed point math shifts. */
76 #define HTCP_SHIFT 8
77 #define HTCP_ALPHA_INC_SHIFT 4
78 
79 #define HTCP_INIT_ALPHA 1
80 #define HTCP_DELTA_L hz		/* 1 sec in ticks. */
81 #define HTCP_MINBETA 128	/* 0.5 << HTCP_SHIFT. */
82 #define HTCP_MAXBETA 204	/* ~0.8 << HTCP_SHIFT. */
83 #define HTCP_MINROWE 26		/* ~0.1 << HTCP_SHIFT. */
84 #define HTCP_MAXROWE 512	/* 2 << HTCP_SHIFT. */
85 
86 /* RTT_ref (ms) used in the calculation of alpha if RTT scaling is enabled. */
87 #define HTCP_RTT_REF 100
88 
89 /* Don't trust SRTT until this many samples have been taken. */
90 #define HTCP_MIN_RTT_SAMPLES 8
91 
92 /*
93  * HTCP_CALC_ALPHA performs a fixed point math calculation to determine the
94  * value of alpha, based on the function defined in the HTCP spec.
95  *
96  * i.e. 1 + 10(delta - delta_l) + ((delta - delta_l) / 2) ^ 2
97  *
98  * "diff" is passed in to the macro as "delta - delta_l" and is expected to be
99  * in units of ticks.
100  *
101  * The joyousnous of fixed point maths means our function implementation looks a
102  * little funky...
103  *
104  * In order to maintain some precision in the calculations, a fixed point shift
105  * HTCP_ALPHA_INC_SHIFT is used to ensure the integer divisions don't
106  * truncate the results too badly.
107  *
108  * The "16" value is the "1" term in the alpha function shifted up by
109  * HTCP_ALPHA_INC_SHIFT
110  *
111  * The "160" value is the "10" multiplier in the alpha function multiplied by
112  * 2^HTCP_ALPHA_INC_SHIFT
113  *
114  * Specifying these as constants reduces the computations required. After
115  * up-shifting all the terms in the function and performing the required
116  * calculations, we down-shift the final result by HTCP_ALPHA_INC_SHIFT to
117  * ensure it is back in the correct range.
118  *
119  * The "hz" terms are required as kernels can be configured to run with
120  * different tick timers, which we have to adjust for in the alpha calculation
121  * (which originally was defined in terms of seconds).
122  *
123  * We also have to be careful to constrain the value of diff such that it won't
124  * overflow whilst performing the calculation. The middle term i.e. (160 * diff)
125  * / hz is the limiting factor in the calculation. We must constrain diff to be
126  * less than the max size of an int divided by the constant 160 figure
127  * i.e. diff < INT_MAX / 160
128  *
129  * NB: Changing HTCP_ALPHA_INC_SHIFT will require you to MANUALLY update the
130  * constants used in this function!
131  */
132 #define HTCP_CALC_ALPHA(diff) \
133 ((\
134 	(16) + \
135 	((160 * (diff)) / hz) + \
136 	(((diff) / hz) * (((diff) << HTCP_ALPHA_INC_SHIFT) / (4 * hz))) \
137 ) >> HTCP_ALPHA_INC_SHIFT)
138 
139 static void	htcp_ack_received(struct cc_var *ccv, ccsignal_t type);
140 static void	htcp_cb_destroy(struct cc_var *ccv);
141 static int	htcp_cb_init(struct cc_var *ccv, void *ptr);
142 static void	htcp_cong_signal(struct cc_var *ccv, ccsignal_t type);
143 static int	htcp_mod_init(void);
144 static void	htcp_post_recovery(struct cc_var *ccv);
145 static void	htcp_recalc_alpha(struct cc_var *ccv);
146 static void	htcp_recalc_beta(struct cc_var *ccv);
147 static void	htcp_record_rtt(struct cc_var *ccv);
148 static void	htcp_ssthresh_update(struct cc_var *ccv);
149 static size_t	htcp_data_sz(void);
150 
151 struct htcp {
152 	/* cwnd before entering cong recovery. */
153 	unsigned long	prev_cwnd;
154 	/* cwnd additive increase parameter. */
155 	int		alpha;
156 	/* cwnd multiplicative decrease parameter. */
157 	int		beta;
158 	/* Largest rtt seen for the flow. */
159 	int		maxrtt;
160 	/* Shortest rtt seen for the flow. */
161 	int		minrtt;
162 	/* Time of last congestion event in ticks. */
163 	int		t_last_cong;
164 };
165 
166 static int htcp_rtt_ref;
167 /*
168  * The maximum number of ticks the value of diff can reach in
169  * htcp_recalc_alpha() before alpha will stop increasing due to overflow.
170  * See comment above HTCP_CALC_ALPHA for more info.
171  */
172 static int htcp_max_diff = INT_MAX / ((1 << HTCP_ALPHA_INC_SHIFT) * 10);
173 
174 /* Per-netstack vars. */
175 VNET_DEFINE_STATIC(u_int, htcp_adaptive_backoff) = 0;
176 VNET_DEFINE_STATIC(u_int, htcp_rtt_scaling) = 0;
177 #define	V_htcp_adaptive_backoff    VNET(htcp_adaptive_backoff)
178 #define	V_htcp_rtt_scaling    VNET(htcp_rtt_scaling)
179 
180 struct cc_algo htcp_cc_algo = {
181 	.name = "htcp",
182 	.ack_received = htcp_ack_received,
183 	.cb_destroy = htcp_cb_destroy,
184 	.cb_init = htcp_cb_init,
185 	.cong_signal = htcp_cong_signal,
186 	.mod_init = htcp_mod_init,
187 	.post_recovery = htcp_post_recovery,
188 	.cc_data_sz = htcp_data_sz,
189 	.after_idle = newreno_cc_after_idle,
190 };
191 
192 static void
htcp_ack_received(struct cc_var * ccv,ccsignal_t type)193 htcp_ack_received(struct cc_var *ccv, ccsignal_t type)
194 {
195 	struct htcp *htcp_data;
196 	uint32_t mss = tcp_fixed_maxseg(ccv->tp);
197 
198 	htcp_data = ccv->cc_data;
199 	htcp_record_rtt(ccv);
200 
201 	/*
202 	 * Regular ACK and we're not in cong/fast recovery and we're cwnd
203 	 * limited and we're either not doing ABC or are slow starting or are
204 	 * doing ABC and we've sent a cwnd's worth of bytes.
205 	 */
206 	if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
207 	    (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 ||
208 	    CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
209 	    (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) {
210 		htcp_recalc_beta(ccv);
211 		htcp_recalc_alpha(ccv);
212 		/*
213 		 * Use the logic in NewReno ack_received() for slow start and
214 		 * for the first HTCP_DELTA_L ticks after either the flow starts
215 		 * or a congestion event (when alpha equals 1).
216 		 */
217 		if (htcp_data->alpha == 1 ||
218 		    CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh))
219 			newreno_cc_ack_received(ccv, type);
220 		else {
221 			if (V_tcp_do_rfc3465) {
222 				/* Increment cwnd by alpha segments. */
223 				CCV(ccv, snd_cwnd) += htcp_data->alpha *
224 				    mss;
225 				ccv->flags &= ~CCF_ABC_SENTAWND;
226 			} else
227 				/*
228 				 * Increment cwnd by alpha/cwnd segments to
229 				 * approximate an increase of alpha segments
230 				 * per RTT.
231 				 */
232 				CCV(ccv, snd_cwnd) += (((htcp_data->alpha <<
233 				    HTCP_SHIFT) / (max(1,
234 				    CCV(ccv, snd_cwnd) / mss))) *
235 				    mss)  >> HTCP_SHIFT;
236 		}
237 	}
238 }
239 
240 static void
htcp_cb_destroy(struct cc_var * ccv)241 htcp_cb_destroy(struct cc_var *ccv)
242 {
243 	free(ccv->cc_data, M_CC_MEM);
244 }
245 
246 static size_t
htcp_data_sz(void)247 htcp_data_sz(void)
248 {
249 	return(sizeof(struct htcp));
250 }
251 
252 static int
htcp_cb_init(struct cc_var * ccv,void * ptr)253 htcp_cb_init(struct cc_var *ccv, void *ptr)
254 {
255 	struct htcp *htcp_data;
256 
257 	INP_WLOCK_ASSERT(tptoinpcb(ccv->tp));
258 	if (ptr == NULL) {
259 		htcp_data = malloc(sizeof(struct htcp), M_CC_MEM, M_NOWAIT);
260 		if (htcp_data == NULL)
261 			return (ENOMEM);
262 	} else
263 		htcp_data = ptr;
264 
265 	/* Init some key variables with sensible defaults. */
266 	htcp_data->alpha = HTCP_INIT_ALPHA;
267 	htcp_data->beta = HTCP_MINBETA;
268 	htcp_data->maxrtt = TCPTV_SRTTBASE;
269 	htcp_data->minrtt = TCPTV_SRTTBASE;
270 	htcp_data->prev_cwnd = 0;
271 	htcp_data->t_last_cong = ticks;
272 
273 	ccv->cc_data = htcp_data;
274 
275 	return (0);
276 }
277 
278 /*
279  * Perform any necessary tasks before we enter congestion recovery.
280  */
281 static void
htcp_cong_signal(struct cc_var * ccv,ccsignal_t type)282 htcp_cong_signal(struct cc_var *ccv, ccsignal_t type)
283 {
284 	struct htcp *htcp_data;
285 	uint32_t mss, pipe;
286 
287 	htcp_data = ccv->cc_data;
288 	mss = tcp_fixed_maxseg(ccv->tp);
289 
290 	switch (type) {
291 	case CC_NDUPACK:
292 		if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
293 			if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
294 				/*
295 				 * Apply hysteresis to maxrtt to ensure
296 				 * reductions in the RTT are reflected in our
297 				 * measurements.
298 				 */
299 				htcp_data->maxrtt = (htcp_data->minrtt +
300 				    (htcp_data->maxrtt - htcp_data->minrtt) *
301 				    95) / 100;
302 				htcp_ssthresh_update(ccv);
303 				htcp_data->t_last_cong = ticks;
304 				htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
305 			}
306 			ENTER_RECOVERY(CCV(ccv, t_flags));
307 		}
308 		break;
309 
310 	case CC_ECN:
311 		if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
312 			/*
313 			 * Apply hysteresis to maxrtt to ensure reductions in
314 			 * the RTT are reflected in our measurements.
315 			 */
316 			htcp_data->maxrtt = (htcp_data->minrtt + (htcp_data->maxrtt -
317 			    htcp_data->minrtt) * 95) / 100;
318 			htcp_ssthresh_update(ccv);
319 			CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
320 			htcp_data->t_last_cong = ticks;
321 			htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
322 			ENTER_CONGRECOVERY(CCV(ccv, t_flags));
323 		}
324 		break;
325 
326 	case CC_RTO:
327 		if (CCV(ccv, t_rxtshift) == 1) {
328 			if (V_tcp_do_newsack) {
329 				pipe = tcp_compute_pipe(ccv->tp);
330 			} else {
331 				pipe = CCV(ccv, snd_max) -
332 					CCV(ccv, snd_fack) +
333 					CCV(ccv, sackhint.sack_bytes_rexmit);
334 			}
335 			CCV(ccv, snd_ssthresh) = max(2,
336 				min(CCV(ccv, snd_wnd), pipe) / 2 / mss) * mss;
337 		}
338 		CCV(ccv, snd_cwnd) = mss;
339 		/*
340 		 * Grab the current time and record it so we know when the
341 		 * most recent congestion event was. Only record it when the
342 		 * timeout has fired more than once, as there is a reasonable
343 		 * chance the first one is a false alarm and may not indicate
344 		 * congestion.
345 		 */
346 		if (CCV(ccv, t_rxtshift) >= 2)
347 			htcp_data->t_last_cong = ticks;
348 		break;
349 	default:
350 		break;
351 	}
352 }
353 
354 static int
htcp_mod_init(void)355 htcp_mod_init(void)
356 {
357 	/*
358 	 * HTCP_RTT_REF is defined in ms, and t_srtt in the tcpcb is stored in
359 	 * units of TCP_RTT_SCALE*hz. Scale HTCP_RTT_REF to be in the same units
360 	 * as t_srtt.
361 	 */
362 	htcp_rtt_ref = (HTCP_RTT_REF * TCP_RTT_SCALE * hz) / 1000;
363 	return (0);
364 }
365 
366 /*
367  * Perform any necessary tasks before we exit congestion recovery.
368  */
369 static void
htcp_post_recovery(struct cc_var * ccv)370 htcp_post_recovery(struct cc_var *ccv)
371 {
372 	int pipe;
373 	struct htcp *htcp_data;
374 	uint32_t mss = tcp_fixed_maxseg(ccv->tp);
375 
376 	pipe = 0;
377 	htcp_data = ccv->cc_data;
378 
379 	if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
380 		/*
381 		 * If inflight data is less than ssthresh, set cwnd
382 		 * conservatively to avoid a burst of data, as suggested in the
383 		 * NewReno RFC. Otherwise, use the HTCP method.
384 		 *
385 		 * XXXLAS: Find a way to do this without needing curack
386 		 */
387 		if (V_tcp_do_newsack)
388 			pipe = tcp_compute_pipe(ccv->tp);
389 		else
390 			pipe = CCV(ccv, snd_max) - ccv->curack;
391 
392 		if (pipe < CCV(ccv, snd_ssthresh))
393 			/*
394 			 * Ensure that cwnd down not collape to 1 MSS under
395 			 * adverse conditions. Implements RFC6582
396 			 */
397 			CCV(ccv, snd_cwnd) = max(pipe, mss) + mss;
398 		else
399 			CCV(ccv, snd_cwnd) = max(1, ((htcp_data->beta *
400 			    htcp_data->prev_cwnd / mss)
401 			    >> HTCP_SHIFT)) * mss;
402 	}
403 }
404 
405 static void
htcp_recalc_alpha(struct cc_var * ccv)406 htcp_recalc_alpha(struct cc_var *ccv)
407 {
408 	struct htcp *htcp_data;
409 	int alpha, diff, now;
410 
411 	htcp_data = ccv->cc_data;
412 	now = ticks;
413 
414 	/*
415 	 * If ticks has wrapped around (will happen approximately once every 49
416 	 * days on a machine with the default kern.hz=1000) and a flow straddles
417 	 * the wrap point, our alpha calcs will be completely wrong. We cut our
418 	 * losses and restart alpha from scratch by setting t_last_cong = now -
419 	 * HTCP_DELTA_L.
420 	 *
421 	 * This does not deflate our cwnd at all. It simply slows the rate cwnd
422 	 * is growing by until alpha regains the value it held prior to taking
423 	 * this drastic measure.
424 	 */
425 	if (now < htcp_data->t_last_cong)
426 		htcp_data->t_last_cong = now - HTCP_DELTA_L;
427 
428 	diff = now - htcp_data->t_last_cong - HTCP_DELTA_L;
429 
430 	/* Cap alpha if the value of diff would overflow HTCP_CALC_ALPHA(). */
431 	if (diff < htcp_max_diff) {
432 		/*
433 		 * If it has been more than HTCP_DELTA_L ticks since congestion,
434 		 * increase alpha according to the function defined in the spec.
435 		 */
436 		if (diff > 0) {
437 			alpha = HTCP_CALC_ALPHA(diff);
438 
439 			/*
440 			 * Adaptive backoff fairness adjustment:
441 			 * 2 * (1 - beta) * alpha_raw
442 			 */
443 			if (V_htcp_adaptive_backoff)
444 				alpha = max(1, (2 * ((1 << HTCP_SHIFT) -
445 				    htcp_data->beta) * alpha) >> HTCP_SHIFT);
446 
447 			/*
448 			 * RTT scaling: (RTT / RTT_ref) * alpha
449 			 * alpha will be the raw value from HTCP_CALC_ALPHA() if
450 			 * adaptive backoff is off, or the adjusted value if
451 			 * adaptive backoff is on.
452 			 */
453 			if (V_htcp_rtt_scaling)
454 				alpha = max(1, (min(max(HTCP_MINROWE,
455 				    (tcp_get_srtt(ccv->tp, TCP_TMR_GRANULARITY_TICKS) << HTCP_SHIFT) /
456 				    htcp_rtt_ref), HTCP_MAXROWE) * alpha)
457 				    >> HTCP_SHIFT);
458 
459 		} else
460 			alpha = 1;
461 
462 		htcp_data->alpha = alpha;
463 	}
464 }
465 
466 static void
htcp_recalc_beta(struct cc_var * ccv)467 htcp_recalc_beta(struct cc_var *ccv)
468 {
469 	struct htcp *htcp_data;
470 
471 	htcp_data = ccv->cc_data;
472 
473 	/*
474 	 * TCPTV_SRTTBASE is the initialised value of each connection's SRTT, so
475 	 * we only calc beta if the connection's SRTT has been changed from its
476 	 * initial value. beta is bounded to ensure it is always between
477 	 * HTCP_MINBETA and HTCP_MAXBETA.
478 	 */
479 	if (V_htcp_adaptive_backoff && htcp_data->minrtt != TCPTV_SRTTBASE &&
480 	    htcp_data->maxrtt != TCPTV_SRTTBASE)
481 		htcp_data->beta = min(max(HTCP_MINBETA,
482 		    (htcp_data->minrtt << HTCP_SHIFT) / htcp_data->maxrtt),
483 		    HTCP_MAXBETA);
484 	else
485 		htcp_data->beta = HTCP_MINBETA;
486 }
487 
488 /*
489  * Record the minimum and maximum RTT seen for the connection. These are used in
490  * the calculation of beta if adaptive backoff is enabled.
491  */
492 static void
htcp_record_rtt(struct cc_var * ccv)493 htcp_record_rtt(struct cc_var *ccv)
494 {
495 	struct htcp *htcp_data;
496 
497 	htcp_data = ccv->cc_data;
498 
499 	/* XXXLAS: Should there be some hysteresis for minrtt? */
500 
501 	/*
502 	 * Record the current SRTT as our minrtt if it's the smallest we've seen
503 	 * or minrtt is currently equal to its initialised value. Ignore SRTT
504 	 * until a min number of samples have been taken.
505 	 */
506 	if ((tcp_get_srtt(ccv->tp, TCP_TMR_GRANULARITY_TICKS) < htcp_data->minrtt ||
507 	    htcp_data->minrtt == TCPTV_SRTTBASE) &&
508 	    (CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES))
509 		htcp_data->minrtt = tcp_get_srtt(ccv->tp, TCP_TMR_GRANULARITY_TICKS);
510 
511 	/*
512 	 * Record the current SRTT as our maxrtt if it's the largest we've
513 	 * seen. Ignore SRTT until a min number of samples have been taken.
514 	 */
515 	if (tcp_get_srtt(ccv->tp, TCP_TMR_GRANULARITY_TICKS) > htcp_data->maxrtt
516 	    && CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES)
517 		htcp_data->maxrtt = tcp_get_srtt(ccv->tp, TCP_TMR_GRANULARITY_TICKS);
518 }
519 
520 /*
521  * Update the ssthresh in the event of congestion.
522  */
523 static void
htcp_ssthresh_update(struct cc_var * ccv)524 htcp_ssthresh_update(struct cc_var *ccv)
525 {
526 	struct htcp *htcp_data;
527 
528 	htcp_data = ccv->cc_data;
529 
530 	/*
531 	 * On the first congestion event, set ssthresh to cwnd * 0.5, on
532 	 * subsequent congestion events, set it to cwnd * beta.
533 	 */
534 	if (CCV(ccv, snd_ssthresh) == TCP_MAXWIN << TCP_MAX_WINSHIFT)
535 		CCV(ccv, snd_ssthresh) = ((u_long)CCV(ccv, snd_cwnd) *
536 		    HTCP_MINBETA) >> HTCP_SHIFT;
537 	else {
538 		htcp_recalc_beta(ccv);
539 		CCV(ccv, snd_ssthresh) = ((u_long)CCV(ccv, snd_cwnd) *
540 		    htcp_data->beta) >> HTCP_SHIFT;
541 	}
542 }
543 
544 SYSCTL_DECL(_net_inet_tcp_cc_htcp);
545 SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, htcp, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
546     "H-TCP related settings");
547 SYSCTL_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, adaptive_backoff,
548     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(htcp_adaptive_backoff), 0,
549     "enable H-TCP adaptive backoff");
550 SYSCTL_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, rtt_scaling,
551     CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(htcp_rtt_scaling), 0,
552     "enable H-TCP RTT scaling");
553 
554 DECLARE_CC_MODULE(htcp, &htcp_cc_algo);
555 MODULE_VERSION(htcp, 2);
556