xref: /freebsd/sys/netinet/sctp_cc_functions.c (revision ea8345d6a761ab0a253af82bc2b6c975160ad2f3)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <netinet/sctp_os.h>
32 #include <netinet/sctp_var.h>
33 #include <netinet/sctp_sysctl.h>
34 #include <netinet/sctp_pcb.h>
35 #include <netinet/sctp_header.h>
36 #include <netinet/sctputil.h>
37 #include <netinet/sctp_output.h>
38 #include <netinet/sctp_input.h>
39 #include <netinet/sctp_indata.h>
40 #include <netinet/sctp_uio.h>
41 #include <netinet/sctp_timer.h>
42 #include <netinet/sctp_auth.h>
43 #include <netinet/sctp_asconf.h>
44 #include <netinet/sctp_cc_functions.h>
45 #include <netinet/sctp_dtrace_declare.h>
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 void
50 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
51 {
52 	struct sctp_association *assoc;
53 	uint32_t cwnd_in_mtu;
54 
55 	assoc = &stcb->asoc;
56 	cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
57 	if (cwnd_in_mtu == 0) {
58 		/* Using 0 means that the value of RFC 4960 is used. */
59 		net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
60 	} else {
61 		/*
62 		 * We take the minimum of the burst limit and the initial
63 		 * congestion window.
64 		 */
65 		if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
66 			cwnd_in_mtu = assoc->max_burst;
67 		net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
68 	}
69 	if (stcb->asoc.sctp_cmt_on_off == 2) {
70 		/* In case of resource pooling initialize appropriately */
71 		net->cwnd /= assoc->numnets;
72 		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
73 			net->cwnd = net->mtu - sizeof(struct sctphdr);
74 		}
75 	}
76 	net->ssthresh = assoc->peers_rwnd;
77 
78 	SDT_PROBE(sctp, cwnd, net, init,
79 	    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
80 	    0, net->cwnd);
81 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &
82 	    (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
83 		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
84 	}
85 }
86 
87 void
88 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
89     struct sctp_association *asoc)
90 {
91 	struct sctp_nets *net;
92 	uint32_t t_ssthresh, t_cwnd;
93 
94 	/* MT FIXME: Don't compute this over and over again */
95 	t_ssthresh = 0;
96 	t_cwnd = 0;
97 	if (asoc->sctp_cmt_on_off == 2) {
98 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
99 			t_ssthresh += net->ssthresh;
100 			t_cwnd += net->cwnd;
101 		}
102 	}
103 	/*-
104 	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
105 	 * (net->fast_retran_loss_recovery == 0)))
106 	 */
107 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
108 		if ((asoc->fast_retran_loss_recovery == 0) ||
109 		    (asoc->sctp_cmt_on_off > 0)) {
110 			/* out of a RFC2582 Fast recovery window? */
111 			if (net->net_ack > 0) {
112 				/*
113 				 * per section 7.2.3, are there any
114 				 * destinations that had a fast retransmit
115 				 * to them. If so what we need to do is
116 				 * adjust ssthresh and cwnd.
117 				 */
118 				struct sctp_tmit_chunk *lchk;
119 				int old_cwnd = net->cwnd;
120 
121 				if (asoc->sctp_cmt_on_off == 2) {
122 					net->ssthresh = (uint32_t) (((uint64_t) 4 *
123 					    (uint64_t) net->mtu *
124 					    (uint64_t) net->ssthresh) /
125 					    (uint64_t) t_ssthresh);
126 					if ((net->cwnd > t_cwnd / 2) &&
127 					    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
128 						net->ssthresh = net->cwnd - t_cwnd / 2;
129 					}
130 					if (net->ssthresh < net->mtu) {
131 						net->ssthresh = net->mtu;
132 					}
133 				} else {
134 					net->ssthresh = net->cwnd / 2;
135 					if (net->ssthresh < (net->mtu * 2)) {
136 						net->ssthresh = 2 * net->mtu;
137 					}
138 				}
139 				net->cwnd = net->ssthresh;
140 				SDT_PROBE(sctp, cwnd, net, fr,
141 				    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
142 				    old_cwnd, net->cwnd);
143 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
144 					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
145 					    SCTP_CWND_LOG_FROM_FR);
146 				}
147 				lchk = TAILQ_FIRST(&asoc->send_queue);
148 
149 				net->partial_bytes_acked = 0;
150 				/* Turn on fast recovery window */
151 				asoc->fast_retran_loss_recovery = 1;
152 				if (lchk == NULL) {
153 					/* Mark end of the window */
154 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
155 				} else {
156 					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
157 				}
158 
159 				/*
160 				 * CMT fast recovery -- per destination
161 				 * recovery variable.
162 				 */
163 				net->fast_retran_loss_recovery = 1;
164 
165 				if (lchk == NULL) {
166 					/* Mark end of the window */
167 					net->fast_recovery_tsn = asoc->sending_seq - 1;
168 				} else {
169 					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
170 				}
171 
172 				/*
173 				 * Disable Nonce Sum Checking and store the
174 				 * resync tsn
175 				 */
176 				asoc->nonce_sum_check = 0;
177 				asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
178 
179 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
180 				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
181 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
182 				    stcb->sctp_ep, stcb, net);
183 			}
184 		} else if (net->net_ack > 0) {
185 			/*
186 			 * Mark a peg that we WOULD have done a cwnd
187 			 * reduction but RFC2582 prevented this action.
188 			 */
189 			SCTP_STAT_INCR(sctps_fastretransinrtt);
190 		}
191 	}
192 }
193 
194 void
195 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
196     struct sctp_association *asoc,
197     int accum_moved, int reneged_all, int will_exit)
198 {
199 	struct sctp_nets *net;
200 	int old_cwnd;
201 	uint32_t t_ssthresh, t_cwnd, incr;
202 
203 	/* MT FIXME: Don't compute this over and over again */
204 	t_ssthresh = 0;
205 	t_cwnd = 0;
206 	if (stcb->asoc.sctp_cmt_on_off == 2) {
207 		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
208 			t_ssthresh += net->ssthresh;
209 			t_cwnd += net->cwnd;
210 		}
211 	}
212 	/******************************/
213 	/* update cwnd and Early FR   */
214 	/******************************/
215 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
216 
217 #ifdef JANA_CMT_FAST_RECOVERY
218 		/*
219 		 * CMT fast recovery code. Need to debug.
220 		 */
221 		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
222 			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
223 			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
224 				net->will_exit_fast_recovery = 1;
225 			}
226 		}
227 #endif
228 		if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
229 			/*
230 			 * So, first of all do we need to have a Early FR
231 			 * timer running?
232 			 */
233 			if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
234 			    (net->ref_count > 1) &&
235 			    (net->flight_size < net->cwnd)) ||
236 			    (reneged_all)) {
237 				/*
238 				 * yes, so in this case stop it if its
239 				 * running, and then restart it. Reneging
240 				 * all is a special case where we want to
241 				 * run the Early FR timer and then force the
242 				 * last few unacked to be sent, causing us
243 				 * to illicit a sack with gaps to force out
244 				 * the others.
245 				 */
246 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
247 					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
248 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
249 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
250 				}
251 				SCTP_STAT_INCR(sctps_earlyfrstrid);
252 				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
253 			} else {
254 				/* No, stop it if its running */
255 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
256 					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
257 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
258 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
259 				}
260 			}
261 		}
262 		/* if nothing was acked on this destination skip it */
263 		if (net->net_ack == 0) {
264 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
265 				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
266 			}
267 			continue;
268 		}
269 		if (net->net_ack2 > 0) {
270 			/*
271 			 * Karn's rule applies to clearing error count, this
272 			 * is optional.
273 			 */
274 			net->error_count = 0;
275 			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
276 			    SCTP_ADDR_NOT_REACHABLE) {
277 				/* addr came good */
278 				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
279 				net->dest_state |= SCTP_ADDR_REACHABLE;
280 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
281 				    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
282 				/* now was it the primary? if so restore */
283 				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
284 					(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
285 				}
286 			}
287 			/*
288 			 * JRS 5/14/07 - If CMT PF is on and the destination
289 			 * is in PF state, set the destination to active
290 			 * state and set the cwnd to one or two MTU's based
291 			 * on whether PF1 or PF2 is being used.
292 			 *
293 			 * Should we stop any running T3 timer here?
294 			 */
295 			if ((asoc->sctp_cmt_on_off > 0) &&
296 			    (asoc->sctp_cmt_pf > 0) &&
297 			    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
298 				net->dest_state &= ~SCTP_ADDR_PF;
299 				old_cwnd = net->cwnd;
300 				net->cwnd = net->mtu * asoc->sctp_cmt_pf;
301 				SDT_PROBE(sctp, cwnd, net, ack,
302 				    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
303 				    old_cwnd, net->cwnd);
304 				SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
305 				    net, net->cwnd);
306 				/*
307 				 * Since the cwnd value is explicitly set,
308 				 * skip the code that updates the cwnd
309 				 * value.
310 				 */
311 				goto skip_cwnd_update;
312 			}
313 		}
314 #ifdef JANA_CMT_FAST_RECOVERY
315 		/*
316 		 * CMT fast recovery code
317 		 */
318 		/*
319 		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
320 		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
321 		 * } else if (sctp_cmt_on_off == 0 &&
322 		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
323 		 */
324 #endif
325 
326 		if (asoc->fast_retran_loss_recovery &&
327 		    (will_exit == 0) &&
328 		    (asoc->sctp_cmt_on_off == 0)) {
329 			/*
330 			 * If we are in loss recovery we skip any cwnd
331 			 * update
332 			 */
333 			goto skip_cwnd_update;
334 		}
335 		/*
336 		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
337 		 * moved.
338 		 */
339 		if (accum_moved ||
340 		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
341 			/* If the cumulative ack moved we can proceed */
342 			if (net->cwnd <= net->ssthresh) {
343 				/* We are in slow start */
344 				if (net->flight_size + net->net_ack >= net->cwnd) {
345 					old_cwnd = net->cwnd;
346 					if (stcb->asoc.sctp_cmt_on_off == 2) {
347 						uint32_t limit;
348 
349 						limit = (uint32_t) (((uint64_t) net->mtu *
350 						    (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
351 						    (uint64_t) net->ssthresh) /
352 						    (uint64_t) t_ssthresh);
353 						incr = (uint32_t) (((uint64_t) net->net_ack *
354 						    (uint64_t) net->ssthresh) /
355 						    (uint64_t) t_ssthresh);
356 						if (incr > limit) {
357 							incr = limit;
358 						}
359 						if (incr == 0) {
360 							incr = 1;
361 						}
362 					} else {
363 						incr = net->net_ack;
364 						if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
365 							incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
366 						}
367 					}
368 					net->cwnd += incr;
369 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
370 						sctp_log_cwnd(stcb, net, incr,
371 						    SCTP_CWND_LOG_FROM_SS);
372 					}
373 					SDT_PROBE(sctp, cwnd, net, ack,
374 					    stcb->asoc.my_vtag,
375 					    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
376 					    net,
377 					    old_cwnd, net->cwnd);
378 				} else {
379 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
380 						sctp_log_cwnd(stcb, net, net->net_ack,
381 						    SCTP_CWND_LOG_NOADV_SS);
382 					}
383 				}
384 			} else {
385 				/* We are in congestion avoidance */
386 				uint32_t incr;
387 
388 				/*
389 				 * Add to pba
390 				 */
391 				net->partial_bytes_acked += net->net_ack;
392 
393 				if ((net->flight_size + net->net_ack >= net->cwnd) &&
394 				    (net->partial_bytes_acked >= net->cwnd)) {
395 					net->partial_bytes_acked -= net->cwnd;
396 					old_cwnd = net->cwnd;
397 					if (asoc->sctp_cmt_on_off == 2) {
398 						incr = (uint32_t) (((uint64_t) net->mtu *
399 						    (uint64_t) net->ssthresh) /
400 						    (uint64_t) t_ssthresh);
401 						if (incr == 0) {
402 							incr = 1;
403 						}
404 					} else {
405 						incr = net->mtu;
406 					}
407 					net->cwnd += incr;
408 					SDT_PROBE(sctp, cwnd, net, ack,
409 					    stcb->asoc.my_vtag,
410 					    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
411 					    net,
412 					    old_cwnd, net->cwnd);
413 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
414 						sctp_log_cwnd(stcb, net, net->mtu,
415 						    SCTP_CWND_LOG_FROM_CA);
416 					}
417 				} else {
418 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
419 						sctp_log_cwnd(stcb, net, net->net_ack,
420 						    SCTP_CWND_LOG_NOADV_CA);
421 					}
422 				}
423 			}
424 		} else {
425 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
426 				sctp_log_cwnd(stcb, net, net->mtu,
427 				    SCTP_CWND_LOG_NO_CUMACK);
428 			}
429 		}
430 skip_cwnd_update:
431 		/*
432 		 * NOW, according to Karn's rule do we need to restore the
433 		 * RTO timer back? Check our net_ack2. If not set then we
434 		 * have a ambiguity.. i.e. all data ack'd was sent to more
435 		 * than one place.
436 		 */
437 		if (net->net_ack2) {
438 			/* restore any doubled timers */
439 			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
440 			if (net->RTO < stcb->asoc.minrto) {
441 				net->RTO = stcb->asoc.minrto;
442 			}
443 			if (net->RTO > stcb->asoc.maxrto) {
444 				net->RTO = stcb->asoc.maxrto;
445 			}
446 		}
447 	}
448 }
449 
450 void
451 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
452 {
453 	int old_cwnd = net->cwnd;
454 	uint32_t t_ssthresh, t_cwnd;
455 
456 	/* MT FIXME: Don't compute this over and over again */
457 	t_ssthresh = 0;
458 	t_cwnd = 0;
459 	if (stcb->asoc.sctp_cmt_on_off == 2) {
460 		struct sctp_nets *lnet;
461 
462 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
463 			t_ssthresh += lnet->ssthresh;
464 			t_cwnd += lnet->cwnd;
465 		}
466 		net->ssthresh = (uint32_t) (((uint64_t) 4 *
467 		    (uint64_t) net->mtu *
468 		    (uint64_t) net->ssthresh) /
469 		    (uint64_t) t_ssthresh);
470 		if ((net->cwnd > t_cwnd / 2) &&
471 		    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
472 			net->ssthresh = net->cwnd - t_cwnd / 2;
473 		}
474 		if (net->ssthresh < net->mtu) {
475 			net->ssthresh = net->mtu;
476 		}
477 	} else {
478 		net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
479 	}
480 	net->cwnd = net->mtu;
481 	net->partial_bytes_acked = 0;
482 	SDT_PROBE(sctp, cwnd, net, to,
483 	    stcb->asoc.my_vtag,
484 	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
485 	    net,
486 	    old_cwnd, net->cwnd);
487 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
488 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
489 	}
490 }
491 
492 void
493 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net)
494 {
495 	int old_cwnd = net->cwnd;
496 
497 	SCTP_STAT_INCR(sctps_ecnereducedcwnd);
498 	net->ssthresh = net->cwnd / 2;
499 	if (net->ssthresh < net->mtu) {
500 		net->ssthresh = net->mtu;
501 		/* here back off the timer as well, to slow us down */
502 		net->RTO <<= 1;
503 	}
504 	net->cwnd = net->ssthresh;
505 	SDT_PROBE(sctp, cwnd, net, ecn,
506 	    stcb->asoc.my_vtag,
507 	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
508 	    net,
509 	    old_cwnd, net->cwnd);
510 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
511 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
512 	}
513 }
514 
515 void
516 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
517     struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
518     uint32_t * bottle_bw, uint32_t * on_queue)
519 {
520 	uint32_t bw_avail;
521 	int rtt, incr;
522 	int old_cwnd = net->cwnd;
523 
524 	/* need real RTT for this calc */
525 	rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
526 	/* get bottle neck bw */
527 	*bottle_bw = ntohl(cp->bottle_bw);
528 	/* and whats on queue */
529 	*on_queue = ntohl(cp->current_onq);
530 	/*
531 	 * adjust the on-queue if our flight is more it could be that the
532 	 * router has not yet gotten data "in-flight" to it
533 	 */
534 	if (*on_queue < net->flight_size)
535 		*on_queue = net->flight_size;
536 	/* calculate the available space */
537 	bw_avail = (*bottle_bw * rtt) / 1000;
538 	if (bw_avail > *bottle_bw) {
539 		/*
540 		 * Cap the growth to no more than the bottle neck. This can
541 		 * happen as RTT slides up due to queues. It also means if
542 		 * you have more than a 1 second RTT with a empty queue you
543 		 * will be limited to the bottle_bw per second no matter if
544 		 * other points have 1/2 the RTT and you could get more
545 		 * out...
546 		 */
547 		bw_avail = *bottle_bw;
548 	}
549 	if (*on_queue > bw_avail) {
550 		/*
551 		 * No room for anything else don't allow anything else to be
552 		 * "added to the fire".
553 		 */
554 		int seg_inflight, seg_onqueue, my_portion;
555 
556 		net->partial_bytes_acked = 0;
557 
558 		/* how much are we over queue size? */
559 		incr = *on_queue - bw_avail;
560 		if (stcb->asoc.seen_a_sack_this_pkt) {
561 			/*
562 			 * undo any cwnd adjustment that the sack might have
563 			 * made
564 			 */
565 			net->cwnd = net->prev_cwnd;
566 		}
567 		/* Now how much of that is mine? */
568 		seg_inflight = net->flight_size / net->mtu;
569 		seg_onqueue = *on_queue / net->mtu;
570 		my_portion = (incr * seg_inflight) / seg_onqueue;
571 
572 		/* Have I made an adjustment already */
573 		if (net->cwnd > net->flight_size) {
574 			/*
575 			 * for this flight I made an adjustment we need to
576 			 * decrease the portion by a share our previous
577 			 * adjustment.
578 			 */
579 			int diff_adj;
580 
581 			diff_adj = net->cwnd - net->flight_size;
582 			if (diff_adj > my_portion)
583 				my_portion = 0;
584 			else
585 				my_portion -= diff_adj;
586 		}
587 		/*
588 		 * back down to the previous cwnd (assume we have had a sack
589 		 * before this packet). minus what ever portion of the
590 		 * overage is my fault.
591 		 */
592 		net->cwnd -= my_portion;
593 
594 		/* we will NOT back down more than 1 MTU */
595 		if (net->cwnd <= net->mtu) {
596 			net->cwnd = net->mtu;
597 		}
598 		/* force into CA */
599 		net->ssthresh = net->cwnd - 1;
600 	} else {
601 		/*
602 		 * Take 1/4 of the space left or max burst up .. whichever
603 		 * is less.
604 		 */
605 		incr = min((bw_avail - *on_queue) >> 2,
606 		    stcb->asoc.max_burst * net->mtu);
607 		net->cwnd += incr;
608 	}
609 	if (net->cwnd > bw_avail) {
610 		/* We can't exceed the pipe size */
611 		net->cwnd = bw_avail;
612 	}
613 	if (net->cwnd < net->mtu) {
614 		/* We always have 1 MTU */
615 		net->cwnd = net->mtu;
616 	}
617 	if (net->cwnd - old_cwnd != 0) {
618 		/* log only changes */
619 		SDT_PROBE(sctp, cwnd, net, pd,
620 		    stcb->asoc.my_vtag,
621 		    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
622 		    net,
623 		    old_cwnd, net->cwnd);
624 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
625 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
626 			    SCTP_CWND_LOG_FROM_SAT);
627 		}
628 	}
629 }
630 
631 void
632 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
633     struct sctp_nets *net, int burst_limit)
634 {
635 	int old_cwnd = net->cwnd;
636 
637 	if (net->ssthresh < net->cwnd)
638 		net->ssthresh = net->cwnd;
639 	net->cwnd = (net->flight_size + (burst_limit * net->mtu));
640 	SDT_PROBE(sctp, cwnd, net, bl,
641 	    stcb->asoc.my_vtag,
642 	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
643 	    net,
644 	    old_cwnd, net->cwnd);
645 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
646 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
647 	}
648 }
649 
650 void
651 sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
652     struct sctp_tcb *stcb, struct sctp_nets *net)
653 {
654 	int old_cwnd = net->cwnd;
655 
656 	sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
657 	/*
658 	 * make a small adjustment to cwnd and force to CA.
659 	 */
660 	if (net->cwnd > net->mtu)
661 		/* drop down one MTU after sending */
662 		net->cwnd -= net->mtu;
663 	if (net->cwnd < net->ssthresh)
664 		/* still in SS move to CA */
665 		net->ssthresh = net->cwnd - 1;
666 	SDT_PROBE(sctp, cwnd, net, fr,
667 	    stcb->asoc.my_vtag,
668 	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
669 	    net,
670 	    old_cwnd, net->cwnd);
671 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
672 		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
673 	}
674 }
675 
676 struct sctp_hs_raise_drop {
677 	int32_t cwnd;
678 	int32_t increase;
679 	int32_t drop_percent;
680 };
681 
682 #define SCTP_HS_TABLE_SIZE 73
683 
684 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
685 	{38, 1, 50},		/* 0   */
686 	{118, 2, 44},		/* 1   */
687 	{221, 3, 41},		/* 2   */
688 	{347, 4, 38},		/* 3   */
689 	{495, 5, 37},		/* 4   */
690 	{663, 6, 35},		/* 5   */
691 	{851, 7, 34},		/* 6   */
692 	{1058, 8, 33},		/* 7   */
693 	{1284, 9, 32},		/* 8   */
694 	{1529, 10, 31},		/* 9   */
695 	{1793, 11, 30},		/* 10  */
696 	{2076, 12, 29},		/* 11  */
697 	{2378, 13, 28},		/* 12  */
698 	{2699, 14, 28},		/* 13  */
699 	{3039, 15, 27},		/* 14  */
700 	{3399, 16, 27},		/* 15  */
701 	{3778, 17, 26},		/* 16  */
702 	{4177, 18, 26},		/* 17  */
703 	{4596, 19, 25},		/* 18  */
704 	{5036, 20, 25},		/* 19  */
705 	{5497, 21, 24},		/* 20  */
706 	{5979, 22, 24},		/* 21  */
707 	{6483, 23, 23},		/* 22  */
708 	{7009, 24, 23},		/* 23  */
709 	{7558, 25, 22},		/* 24  */
710 	{8130, 26, 22},		/* 25  */
711 	{8726, 27, 22},		/* 26  */
712 	{9346, 28, 21},		/* 27  */
713 	{9991, 29, 21},		/* 28  */
714 	{10661, 30, 21},	/* 29  */
715 	{11358, 31, 20},	/* 30  */
716 	{12082, 32, 20},	/* 31  */
717 	{12834, 33, 20},	/* 32  */
718 	{13614, 34, 19},	/* 33  */
719 	{14424, 35, 19},	/* 34  */
720 	{15265, 36, 19},	/* 35  */
721 	{16137, 37, 19},	/* 36  */
722 	{17042, 38, 18},	/* 37  */
723 	{17981, 39, 18},	/* 38  */
724 	{18955, 40, 18},	/* 39  */
725 	{19965, 41, 17},	/* 40  */
726 	{21013, 42, 17},	/* 41  */
727 	{22101, 43, 17},	/* 42  */
728 	{23230, 44, 17},	/* 43  */
729 	{24402, 45, 16},	/* 44  */
730 	{25618, 46, 16},	/* 45  */
731 	{26881, 47, 16},	/* 46  */
732 	{28193, 48, 16},	/* 47  */
733 	{29557, 49, 15},	/* 48  */
734 	{30975, 50, 15},	/* 49  */
735 	{32450, 51, 15},	/* 50  */
736 	{33986, 52, 15},	/* 51  */
737 	{35586, 53, 14},	/* 52  */
738 	{37253, 54, 14},	/* 53  */
739 	{38992, 55, 14},	/* 54  */
740 	{40808, 56, 14},	/* 55  */
741 	{42707, 57, 13},	/* 56  */
742 	{44694, 58, 13},	/* 57  */
743 	{46776, 59, 13},	/* 58  */
744 	{48961, 60, 13},	/* 59  */
745 	{51258, 61, 13},	/* 60  */
746 	{53677, 62, 12},	/* 61  */
747 	{56230, 63, 12},	/* 62  */
748 	{58932, 64, 12},	/* 63  */
749 	{61799, 65, 12},	/* 64  */
750 	{64851, 66, 11},	/* 65  */
751 	{68113, 67, 11},	/* 66  */
752 	{71617, 68, 11},	/* 67  */
753 	{75401, 69, 10},	/* 68  */
754 	{79517, 70, 10},	/* 69  */
755 	{84035, 71, 10},	/* 70  */
756 	{89053, 72, 10},	/* 71  */
757 	{94717, 73, 9}		/* 72  */
758 };
759 
760 static void
761 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
762 {
763 	int cur_val, i, indx, incr;
764 
765 	cur_val = net->cwnd >> 10;
766 	indx = SCTP_HS_TABLE_SIZE - 1;
767 #ifdef SCTP_DEBUG
768 	printf("HS CC CAlled.\n");
769 #endif
770 	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
771 		/* normal mode */
772 		if (net->net_ack > net->mtu) {
773 			net->cwnd += net->mtu;
774 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
775 				sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
776 			}
777 		} else {
778 			net->cwnd += net->net_ack;
779 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
780 				sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
781 			}
782 		}
783 	} else {
784 		for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
785 			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
786 				indx = i;
787 				break;
788 			}
789 		}
790 		net->last_hs_used = indx;
791 		incr = ((sctp_cwnd_adjust[indx].increase) << 10);
792 		net->cwnd += incr;
793 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
794 			sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
795 		}
796 	}
797 }
798 
799 static void
800 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
801 {
802 	int cur_val, i, indx;
803 	int old_cwnd = net->cwnd;
804 
805 	cur_val = net->cwnd >> 10;
806 	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
807 		/* normal mode */
808 		net->ssthresh = net->cwnd / 2;
809 		if (net->ssthresh < (net->mtu * 2)) {
810 			net->ssthresh = 2 * net->mtu;
811 		}
812 		net->cwnd = net->ssthresh;
813 	} else {
814 		/* drop by the proper amount */
815 		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
816 		    sctp_cwnd_adjust[net->last_hs_used].drop_percent);
817 		net->cwnd = net->ssthresh;
818 		/* now where are we */
819 		indx = net->last_hs_used;
820 		cur_val = net->cwnd >> 10;
821 		/* reset where we are in the table */
822 		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
823 			/* feel out of hs */
824 			net->last_hs_used = 0;
825 		} else {
826 			for (i = indx; i >= 1; i--) {
827 				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
828 					break;
829 				}
830 			}
831 			net->last_hs_used = indx;
832 		}
833 	}
834 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
835 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
836 	}
837 }
838 
839 void
840 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
841     struct sctp_association *asoc)
842 {
843 	struct sctp_nets *net;
844 
845 	/*
846 	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
847 	 * (net->fast_retran_loss_recovery == 0)))
848 	 */
849 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
850 		if ((asoc->fast_retran_loss_recovery == 0) ||
851 		    (asoc->sctp_cmt_on_off > 0)) {
852 			/* out of a RFC2582 Fast recovery window? */
853 			if (net->net_ack > 0) {
854 				/*
855 				 * per section 7.2.3, are there any
856 				 * destinations that had a fast retransmit
857 				 * to them. If so what we need to do is
858 				 * adjust ssthresh and cwnd.
859 				 */
860 				struct sctp_tmit_chunk *lchk;
861 
862 				sctp_hs_cwnd_decrease(stcb, net);
863 
864 				lchk = TAILQ_FIRST(&asoc->send_queue);
865 
866 				net->partial_bytes_acked = 0;
867 				/* Turn on fast recovery window */
868 				asoc->fast_retran_loss_recovery = 1;
869 				if (lchk == NULL) {
870 					/* Mark end of the window */
871 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
872 				} else {
873 					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
874 				}
875 
876 				/*
877 				 * CMT fast recovery -- per destination
878 				 * recovery variable.
879 				 */
880 				net->fast_retran_loss_recovery = 1;
881 
882 				if (lchk == NULL) {
883 					/* Mark end of the window */
884 					net->fast_recovery_tsn = asoc->sending_seq - 1;
885 				} else {
886 					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
887 				}
888 
889 				/*
890 				 * Disable Nonce Sum Checking and store the
891 				 * resync tsn
892 				 */
893 				asoc->nonce_sum_check = 0;
894 				asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
895 
896 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
897 				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
898 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
899 				    stcb->sctp_ep, stcb, net);
900 			}
901 		} else if (net->net_ack > 0) {
902 			/*
903 			 * Mark a peg that we WOULD have done a cwnd
904 			 * reduction but RFC2582 prevented this action.
905 			 */
906 			SCTP_STAT_INCR(sctps_fastretransinrtt);
907 		}
908 	}
909 }
910 
911 void
912 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
913     struct sctp_association *asoc,
914     int accum_moved, int reneged_all, int will_exit)
915 {
916 	struct sctp_nets *net;
917 
918 	/******************************/
919 	/* update cwnd and Early FR   */
920 	/******************************/
921 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
922 
923 #ifdef JANA_CMT_FAST_RECOVERY
924 		/*
925 		 * CMT fast recovery code. Need to debug.
926 		 */
927 		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
928 			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
929 			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
930 				net->will_exit_fast_recovery = 1;
931 			}
932 		}
933 #endif
934 		if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
935 			/*
936 			 * So, first of all do we need to have a Early FR
937 			 * timer running?
938 			 */
939 			if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
940 			    (net->ref_count > 1) &&
941 			    (net->flight_size < net->cwnd)) ||
942 			    (reneged_all)) {
943 				/*
944 				 * yes, so in this case stop it if its
945 				 * running, and then restart it. Reneging
946 				 * all is a special case where we want to
947 				 * run the Early FR timer and then force the
948 				 * last few unacked to be sent, causing us
949 				 * to illicit a sack with gaps to force out
950 				 * the others.
951 				 */
952 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
953 					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
954 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
955 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
956 				}
957 				SCTP_STAT_INCR(sctps_earlyfrstrid);
958 				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
959 			} else {
960 				/* No, stop it if its running */
961 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
962 					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
963 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
964 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
965 				}
966 			}
967 		}
968 		/* if nothing was acked on this destination skip it */
969 		if (net->net_ack == 0) {
970 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
971 				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
972 			}
973 			continue;
974 		}
975 		if (net->net_ack2 > 0) {
976 			/*
977 			 * Karn's rule applies to clearing error count, this
978 			 * is optional.
979 			 */
980 			net->error_count = 0;
981 			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
982 			    SCTP_ADDR_NOT_REACHABLE) {
983 				/* addr came good */
984 				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
985 				net->dest_state |= SCTP_ADDR_REACHABLE;
986 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
987 				    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
988 				/* now was it the primary? if so restore */
989 				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
990 					(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
991 				}
992 			}
993 			/*
994 			 * JRS 5/14/07 - If CMT PF is on and the destination
995 			 * is in PF state, set the destination to active
996 			 * state and set the cwnd to one or two MTU's based
997 			 * on whether PF1 or PF2 is being used.
998 			 *
999 			 * Should we stop any running T3 timer here?
1000 			 */
1001 			if ((asoc->sctp_cmt_on_off > 0) &&
1002 			    (asoc->sctp_cmt_pf > 0) &&
1003 			    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
1004 				net->dest_state &= ~SCTP_ADDR_PF;
1005 				net->cwnd = net->mtu * asoc->sctp_cmt_pf;
1006 				SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1007 				    net, net->cwnd);
1008 				/*
1009 				 * Since the cwnd value is explicitly set,
1010 				 * skip the code that updates the cwnd
1011 				 * value.
1012 				 */
1013 				goto skip_cwnd_update;
1014 			}
1015 		}
1016 #ifdef JANA_CMT_FAST_RECOVERY
1017 		/*
1018 		 * CMT fast recovery code
1019 		 */
1020 		/*
1021 		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1022 		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1023 		 * } else if (sctp_cmt_on_off == 0 &&
1024 		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1025 		 */
1026 #endif
1027 
1028 		if (asoc->fast_retran_loss_recovery &&
1029 		    (will_exit == 0) &&
1030 		    (asoc->sctp_cmt_on_off == 0)) {
1031 			/*
1032 			 * If we are in loss recovery we skip any cwnd
1033 			 * update
1034 			 */
1035 			goto skip_cwnd_update;
1036 		}
1037 		/*
1038 		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1039 		 * moved.
1040 		 */
1041 		if (accum_moved ||
1042 		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1043 			/* If the cumulative ack moved we can proceed */
1044 			if (net->cwnd <= net->ssthresh) {
1045 				/* We are in slow start */
1046 				if (net->flight_size + net->net_ack >= net->cwnd) {
1047 
1048 					sctp_hs_cwnd_increase(stcb, net);
1049 
1050 				} else {
1051 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1052 						sctp_log_cwnd(stcb, net, net->net_ack,
1053 						    SCTP_CWND_LOG_NOADV_SS);
1054 					}
1055 				}
1056 			} else {
1057 				/* We are in congestion avoidance */
1058 				net->partial_bytes_acked += net->net_ack;
1059 				if ((net->flight_size + net->net_ack >= net->cwnd) &&
1060 				    (net->partial_bytes_acked >= net->cwnd)) {
1061 					net->partial_bytes_acked -= net->cwnd;
1062 					net->cwnd += net->mtu;
1063 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1064 						sctp_log_cwnd(stcb, net, net->mtu,
1065 						    SCTP_CWND_LOG_FROM_CA);
1066 					}
1067 				} else {
1068 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1069 						sctp_log_cwnd(stcb, net, net->net_ack,
1070 						    SCTP_CWND_LOG_NOADV_CA);
1071 					}
1072 				}
1073 			}
1074 		} else {
1075 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1076 				sctp_log_cwnd(stcb, net, net->mtu,
1077 				    SCTP_CWND_LOG_NO_CUMACK);
1078 			}
1079 		}
1080 skip_cwnd_update:
1081 		/*
1082 		 * NOW, according to Karn's rule do we need to restore the
1083 		 * RTO timer back? Check our net_ack2. If not set then we
1084 		 * have a ambiguity.. i.e. all data ack'd was sent to more
1085 		 * than one place.
1086 		 */
1087 		if (net->net_ack2) {
1088 			/* restore any doubled timers */
1089 			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
1090 			if (net->RTO < stcb->asoc.minrto) {
1091 				net->RTO = stcb->asoc.minrto;
1092 			}
1093 			if (net->RTO > stcb->asoc.maxrto) {
1094 				net->RTO = stcb->asoc.maxrto;
1095 			}
1096 		}
1097 	}
1098 }
1099 
1100 
1101 /*
1102  * H-TCP congestion control. The algorithm is detailed in:
1103  * R.N.Shorten, D.J.Leith:
1104  *   "H-TCP: TCP for high-speed and long-distance networks"
1105  *   Proc. PFLDnet, Argonne, 2004.
1106  * http://www.hamilton.ie/net/htcp3.pdf
1107  */
1108 
1109 
1110 static int use_rtt_scaling = 1;
1111 static int use_bandwidth_switch = 1;
1112 
1113 static inline int
1114 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
1115 {
1116 	return seq3 - seq2 >= seq1 - seq2;
1117 }
1118 
1119 static inline uint32_t
1120 htcp_cong_time(struct htcp *ca)
1121 {
1122 	return sctp_get_tick_count() - ca->last_cong;
1123 }
1124 
1125 static inline uint32_t
1126 htcp_ccount(struct htcp *ca)
1127 {
1128 	return htcp_cong_time(ca) / ca->minRTT;
1129 }
1130 
1131 static inline void
1132 htcp_reset(struct htcp *ca)
1133 {
1134 	ca->undo_last_cong = ca->last_cong;
1135 	ca->undo_maxRTT = ca->maxRTT;
1136 	ca->undo_old_maxB = ca->old_maxB;
1137 	ca->last_cong = sctp_get_tick_count();
1138 }
1139 
1140 #ifdef SCTP_NOT_USED
1141 
1142 static uint32_t
1143 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
1144 {
1145 	net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong;
1146 	net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT;
1147 	net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB;
1148 	return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu);
1149 }
1150 
1151 #endif
1152 
1153 static inline void
1154 measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
1155 {
1156 	uint32_t srtt = net->lastsa >> 3;
1157 
1158 	/* keep track of minimum RTT seen so far, minRTT is zero at first */
1159 	if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT)
1160 		net->htcp_ca.minRTT = srtt;
1161 
1162 	/* max RTT */
1163 	if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) {
1164 		if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT)
1165 			net->htcp_ca.maxRTT = net->htcp_ca.minRTT;
1166 		if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20))
1167 			net->htcp_ca.maxRTT = srtt;
1168 	}
1169 }
1170 
1171 static void
1172 measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
1173 {
1174 	uint32_t now = sctp_get_tick_count();
1175 
1176 	if (net->fast_retran_ip == 0)
1177 		net->htcp_ca.bytes_acked = net->net_ack;
1178 
1179 	if (!use_bandwidth_switch)
1180 		return;
1181 
1182 	/* achieved throughput calculations */
1183 	/* JRS - not 100% sure of this statement */
1184 	if (net->fast_retran_ip == 1) {
1185 		net->htcp_ca.bytecount = 0;
1186 		net->htcp_ca.lasttime = now;
1187 		return;
1188 	}
1189 	net->htcp_ca.bytecount += net->net_ack;
1190 
1191 	if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu)
1192 	    && now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT
1193 	    && net->htcp_ca.minRTT > 0) {
1194 		uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime);
1195 
1196 		if (htcp_ccount(&net->htcp_ca) <= 3) {
1197 			/* just after backoff */
1198 			net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi;
1199 		} else {
1200 			net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4;
1201 			if (net->htcp_ca.Bi > net->htcp_ca.maxB)
1202 				net->htcp_ca.maxB = net->htcp_ca.Bi;
1203 			if (net->htcp_ca.minB > net->htcp_ca.maxB)
1204 				net->htcp_ca.minB = net->htcp_ca.maxB;
1205 		}
1206 		net->htcp_ca.bytecount = 0;
1207 		net->htcp_ca.lasttime = now;
1208 	}
1209 }
1210 
1211 static inline void
1212 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
1213 {
1214 	if (use_bandwidth_switch) {
1215 		uint32_t maxB = ca->maxB;
1216 		uint32_t old_maxB = ca->old_maxB;
1217 
1218 		ca->old_maxB = ca->maxB;
1219 
1220 		if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
1221 			ca->beta = BETA_MIN;
1222 			ca->modeswitch = 0;
1223 			return;
1224 		}
1225 	}
1226 	if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) {
1227 		ca->beta = (minRTT << 7) / maxRTT;
1228 		if (ca->beta < BETA_MIN)
1229 			ca->beta = BETA_MIN;
1230 		else if (ca->beta > BETA_MAX)
1231 			ca->beta = BETA_MAX;
1232 	} else {
1233 		ca->beta = BETA_MIN;
1234 		ca->modeswitch = 1;
1235 	}
1236 }
1237 
1238 static inline void
1239 htcp_alpha_update(struct htcp *ca)
1240 {
1241 	uint32_t minRTT = ca->minRTT;
1242 	uint32_t factor = 1;
1243 	uint32_t diff = htcp_cong_time(ca);
1244 
1245 	if (diff > (uint32_t) hz) {
1246 		diff -= hz;
1247 		factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
1248 	}
1249 	if (use_rtt_scaling && minRTT) {
1250 		uint32_t scale = (hz << 3) / (10 * minRTT);
1251 
1252 		scale = min(max(scale, 1U << 2), 10U << 3);	/* clamping ratio to
1253 								 * interval [0.5,10]<<3 */
1254 		factor = (factor << 3) / scale;
1255 		if (!factor)
1256 			factor = 1;
1257 	}
1258 	ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
1259 	if (!ca->alpha)
1260 		ca->alpha = ALPHA_BASE;
1261 }
1262 
1263 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
1264  * rtt before we adjust our beta to ensure we are working from a consistent
1265  * data.
1266  *
1267  * This function should be called when we hit a congestion event since only at
1268  * that point do we really have a real sense of maxRTT (the queues en route
1269  * were getting just too full now).
1270  */
1271 static void
1272 htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net)
1273 {
1274 	uint32_t minRTT = net->htcp_ca.minRTT;
1275 	uint32_t maxRTT = net->htcp_ca.maxRTT;
1276 
1277 	htcp_beta_update(&net->htcp_ca, minRTT, maxRTT);
1278 	htcp_alpha_update(&net->htcp_ca);
1279 
1280 	/*
1281 	 * add slowly fading memory for maxRTT to accommodate routing
1282 	 * changes etc
1283 	 */
1284 	if (minRTT > 0 && maxRTT > minRTT)
1285 		net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
1286 }
1287 
1288 static uint32_t
1289 htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net)
1290 {
1291 	htcp_param_update(stcb, net);
1292 	return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
1293 }
1294 
1295 static void
1296 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
1297 {
1298 	/*-
1299 	 * How to handle these functions?
1300          *	if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
1301 	 *		return;
1302 	 */
1303 	if (net->cwnd <= net->ssthresh) {
1304 		/* We are in slow start */
1305 		if (net->flight_size + net->net_ack >= net->cwnd) {
1306 			if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
1307 				net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
1308 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1309 					sctp_log_cwnd(stcb, net, net->mtu,
1310 					    SCTP_CWND_LOG_FROM_SS);
1311 				}
1312 			} else {
1313 				net->cwnd += net->net_ack;
1314 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1315 					sctp_log_cwnd(stcb, net, net->net_ack,
1316 					    SCTP_CWND_LOG_FROM_SS);
1317 				}
1318 			}
1319 		} else {
1320 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1321 				sctp_log_cwnd(stcb, net, net->net_ack,
1322 				    SCTP_CWND_LOG_NOADV_SS);
1323 			}
1324 		}
1325 	} else {
1326 		measure_rtt(stcb, net);
1327 
1328 		/*
1329 		 * In dangerous area, increase slowly. In theory this is
1330 		 * net->cwnd += alpha / net->cwnd
1331 		 */
1332 		/* What is snd_cwnd_cnt?? */
1333 		if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
1334 			/*-
1335 			 * Does SCTP have a cwnd clamp?
1336 			 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
1337 			 */
1338 			net->cwnd += net->mtu;
1339 			net->partial_bytes_acked = 0;
1340 			htcp_alpha_update(&net->htcp_ca);
1341 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1342 				sctp_log_cwnd(stcb, net, net->mtu,
1343 				    SCTP_CWND_LOG_FROM_CA);
1344 			}
1345 		} else {
1346 			net->partial_bytes_acked += net->net_ack;
1347 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1348 				sctp_log_cwnd(stcb, net, net->net_ack,
1349 				    SCTP_CWND_LOG_NOADV_CA);
1350 			}
1351 		}
1352 
1353 		net->htcp_ca.bytes_acked = net->mtu;
1354 	}
1355 }
1356 
1357 #ifdef SCTP_NOT_USED
1358 /* Lower bound on congestion window. */
1359 static uint32_t
1360 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
1361 {
1362 	return net->ssthresh;
1363 }
1364 
1365 #endif
1366 
1367 static void
1368 htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
1369 {
1370 	memset(&net->htcp_ca, 0, sizeof(struct htcp));
1371 	net->htcp_ca.alpha = ALPHA_BASE;
1372 	net->htcp_ca.beta = BETA_MIN;
1373 	net->htcp_ca.bytes_acked = net->mtu;
1374 	net->htcp_ca.last_cong = sctp_get_tick_count();
1375 }
1376 
1377 void
1378 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
1379 {
1380 	/*
1381 	 * We take the max of the burst limit times a MTU or the
1382 	 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
1383 	 */
1384 	net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
1385 	net->ssthresh = stcb->asoc.peers_rwnd;
1386 	htcp_init(stcb, net);
1387 
1388 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
1389 		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
1390 	}
1391 }
1392 
1393 void
1394 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
1395     struct sctp_association *asoc,
1396     int accum_moved, int reneged_all, int will_exit)
1397 {
1398 	struct sctp_nets *net;
1399 
1400 	/******************************/
1401 	/* update cwnd and Early FR   */
1402 	/******************************/
1403 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1404 
1405 #ifdef JANA_CMT_FAST_RECOVERY
1406 		/*
1407 		 * CMT fast recovery code. Need to debug.
1408 		 */
1409 		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
1410 			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
1411 			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
1412 				net->will_exit_fast_recovery = 1;
1413 			}
1414 		}
1415 #endif
1416 		if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
1417 			/*
1418 			 * So, first of all do we need to have a Early FR
1419 			 * timer running?
1420 			 */
1421 			if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
1422 			    (net->ref_count > 1) &&
1423 			    (net->flight_size < net->cwnd)) ||
1424 			    (reneged_all)) {
1425 				/*
1426 				 * yes, so in this case stop it if its
1427 				 * running, and then restart it. Reneging
1428 				 * all is a special case where we want to
1429 				 * run the Early FR timer and then force the
1430 				 * last few unacked to be sent, causing us
1431 				 * to illicit a sack with gaps to force out
1432 				 * the others.
1433 				 */
1434 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
1435 					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
1436 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
1437 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
1438 				}
1439 				SCTP_STAT_INCR(sctps_earlyfrstrid);
1440 				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
1441 			} else {
1442 				/* No, stop it if its running */
1443 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
1444 					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
1445 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
1446 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
1447 				}
1448 			}
1449 		}
1450 		/* if nothing was acked on this destination skip it */
1451 		if (net->net_ack == 0) {
1452 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1453 				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
1454 			}
1455 			continue;
1456 		}
1457 		if (net->net_ack2 > 0) {
1458 			/*
1459 			 * Karn's rule applies to clearing error count, this
1460 			 * is optional.
1461 			 */
1462 			net->error_count = 0;
1463 			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
1464 			    SCTP_ADDR_NOT_REACHABLE) {
1465 				/* addr came good */
1466 				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
1467 				net->dest_state |= SCTP_ADDR_REACHABLE;
1468 				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
1469 				    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
1470 				/* now was it the primary? if so restore */
1471 				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
1472 					(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
1473 				}
1474 			}
1475 			/*
1476 			 * JRS 5/14/07 - If CMT PF is on and the destination
1477 			 * is in PF state, set the destination to active
1478 			 * state and set the cwnd to one or two MTU's based
1479 			 * on whether PF1 or PF2 is being used.
1480 			 *
1481 			 * Should we stop any running T3 timer here?
1482 			 */
1483 			if ((asoc->sctp_cmt_on_off > 0) &&
1484 			    (asoc->sctp_cmt_pf > 0) &&
1485 			    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
1486 				net->dest_state &= ~SCTP_ADDR_PF;
1487 				net->cwnd = net->mtu * asoc->sctp_cmt_pf;
1488 				SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1489 				    net, net->cwnd);
1490 				/*
1491 				 * Since the cwnd value is explicitly set,
1492 				 * skip the code that updates the cwnd
1493 				 * value.
1494 				 */
1495 				goto skip_cwnd_update;
1496 			}
1497 		}
1498 #ifdef JANA_CMT_FAST_RECOVERY
1499 		/*
1500 		 * CMT fast recovery code
1501 		 */
1502 		/*
1503 		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1504 		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1505 		 * } else if (sctp_cmt_on_off == 0 &&
1506 		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1507 		 */
1508 #endif
1509 
1510 		if (asoc->fast_retran_loss_recovery &&
1511 		    will_exit == 0 &&
1512 		    (asoc->sctp_cmt_on_off == 0)) {
1513 			/*
1514 			 * If we are in loss recovery we skip any cwnd
1515 			 * update
1516 			 */
1517 			goto skip_cwnd_update;
1518 		}
1519 		/*
1520 		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1521 		 * moved.
1522 		 */
1523 		if (accum_moved ||
1524 		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1525 			htcp_cong_avoid(stcb, net);
1526 			measure_achieved_throughput(stcb, net);
1527 		} else {
1528 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1529 				sctp_log_cwnd(stcb, net, net->mtu,
1530 				    SCTP_CWND_LOG_NO_CUMACK);
1531 			}
1532 		}
1533 skip_cwnd_update:
1534 		/*
1535 		 * NOW, according to Karn's rule do we need to restore the
1536 		 * RTO timer back? Check our net_ack2. If not set then we
1537 		 * have a ambiguity.. i.e. all data ack'd was sent to more
1538 		 * than one place.
1539 		 */
1540 		if (net->net_ack2) {
1541 			/* restore any doubled timers */
1542 			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
1543 			if (net->RTO < stcb->asoc.minrto) {
1544 				net->RTO = stcb->asoc.minrto;
1545 			}
1546 			if (net->RTO > stcb->asoc.maxrto) {
1547 				net->RTO = stcb->asoc.maxrto;
1548 			}
1549 		}
1550 	}
1551 }
1552 
1553 void
1554 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
1555     struct sctp_association *asoc)
1556 {
1557 	struct sctp_nets *net;
1558 
1559 	/*
1560 	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
1561 	 * (net->fast_retran_loss_recovery == 0)))
1562 	 */
1563 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1564 		if ((asoc->fast_retran_loss_recovery == 0) ||
1565 		    (asoc->sctp_cmt_on_off > 0)) {
1566 			/* out of a RFC2582 Fast recovery window? */
1567 			if (net->net_ack > 0) {
1568 				/*
1569 				 * per section 7.2.3, are there any
1570 				 * destinations that had a fast retransmit
1571 				 * to them. If so what we need to do is
1572 				 * adjust ssthresh and cwnd.
1573 				 */
1574 				struct sctp_tmit_chunk *lchk;
1575 				int old_cwnd = net->cwnd;
1576 
1577 				/* JRS - reset as if state were changed */
1578 				htcp_reset(&net->htcp_ca);
1579 				net->ssthresh = htcp_recalc_ssthresh(stcb, net);
1580 				net->cwnd = net->ssthresh;
1581 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1582 					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
1583 					    SCTP_CWND_LOG_FROM_FR);
1584 				}
1585 				lchk = TAILQ_FIRST(&asoc->send_queue);
1586 
1587 				net->partial_bytes_acked = 0;
1588 				/* Turn on fast recovery window */
1589 				asoc->fast_retran_loss_recovery = 1;
1590 				if (lchk == NULL) {
1591 					/* Mark end of the window */
1592 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
1593 				} else {
1594 					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
1595 				}
1596 
1597 				/*
1598 				 * CMT fast recovery -- per destination
1599 				 * recovery variable.
1600 				 */
1601 				net->fast_retran_loss_recovery = 1;
1602 
1603 				if (lchk == NULL) {
1604 					/* Mark end of the window */
1605 					net->fast_recovery_tsn = asoc->sending_seq - 1;
1606 				} else {
1607 					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
1608 				}
1609 
1610 				/*
1611 				 * Disable Nonce Sum Checking and store the
1612 				 * resync tsn
1613 				 */
1614 				asoc->nonce_sum_check = 0;
1615 				asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
1616 
1617 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
1618 				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
1619 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
1620 				    stcb->sctp_ep, stcb, net);
1621 			}
1622 		} else if (net->net_ack > 0) {
1623 			/*
1624 			 * Mark a peg that we WOULD have done a cwnd
1625 			 * reduction but RFC2582 prevented this action.
1626 			 */
1627 			SCTP_STAT_INCR(sctps_fastretransinrtt);
1628 		}
1629 	}
1630 }
1631 
1632 void
1633 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
1634     struct sctp_nets *net)
1635 {
1636 	int old_cwnd = net->cwnd;
1637 
1638 	/* JRS - reset as if the state were being changed to timeout */
1639 	htcp_reset(&net->htcp_ca);
1640 	net->ssthresh = htcp_recalc_ssthresh(stcb, net);
1641 	net->cwnd = net->mtu;
1642 	net->partial_bytes_acked = 0;
1643 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1644 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
1645 	}
1646 }
1647 
1648 void
1649 sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
1650     struct sctp_tcb *stcb, struct sctp_nets *net)
1651 {
1652 	int old_cwnd;
1653 
1654 	old_cwnd = net->cwnd;
1655 
1656 	sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
1657 	net->htcp_ca.last_cong = sctp_get_tick_count();
1658 	/*
1659 	 * make a small adjustment to cwnd and force to CA.
1660 	 */
1661 	if (net->cwnd > net->mtu)
1662 		/* drop down one MTU after sending */
1663 		net->cwnd -= net->mtu;
1664 	if (net->cwnd < net->ssthresh)
1665 		/* still in SS move to CA */
1666 		net->ssthresh = net->cwnd - 1;
1667 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1668 		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
1669 	}
1670 }
1671 
1672 void
1673 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
1674     struct sctp_nets *net)
1675 {
1676 	int old_cwnd;
1677 
1678 	old_cwnd = net->cwnd;
1679 
1680 	/* JRS - reset hctp as if state changed */
1681 	htcp_reset(&net->htcp_ca);
1682 	SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1683 	net->ssthresh = htcp_recalc_ssthresh(stcb, net);
1684 	if (net->ssthresh < net->mtu) {
1685 		net->ssthresh = net->mtu;
1686 		/* here back off the timer as well, to slow us down */
1687 		net->RTO <<= 1;
1688 	}
1689 	net->cwnd = net->ssthresh;
1690 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1691 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1692 	}
1693 }
1694