1 /* SCTP kernel implementation 2 * Copyright (c) 1999-2000 Cisco, Inc. 3 * Copyright (c) 1999-2001 Motorola, Inc. 4 * Copyright (c) 2001-2003 International Business Machines Corp. 5 * Copyright (c) 2001 Intel Corp. 6 * Copyright (c) 2001 La Monte H.P. Yarroll 7 * 8 * This file is part of the SCTP kernel implementation 9 * 10 * This module provides the abstraction for an SCTP tranport representing 11 * a remote transport address. For local transport addresses, we just use 12 * union sctp_addr. 13 * 14 * This SCTP implementation is free software; 15 * you can redistribute it and/or modify it under the terms of 16 * the GNU General Public License as published by 17 * the Free Software Foundation; either version 2, or (at your option) 18 * any later version. 19 * 20 * This SCTP implementation is distributed in the hope that it 21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 22 * ************************ 23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 24 * See the GNU General Public License for more details. 25 * 26 * You should have received a copy of the GNU General Public License 27 * along with GNU CC; see the file COPYING. If not, write to 28 * the Free Software Foundation, 59 Temple Place - Suite 330, 29 * Boston, MA 02111-1307, USA. 30 * 31 * Please send any bug reports or fixes you make to the 32 * email address(es): 33 * lksctp developers <lksctp-developers@lists.sourceforge.net> 34 * 35 * Or submit a bug report through the following website: 36 * http://www.sf.net/projects/lksctp 37 * 38 * Written or modified by: 39 * La Monte H.P. Yarroll <piggy@acm.org> 40 * Karl Knutson <karl@athena.chicago.il.us> 41 * Jon Grimm <jgrimm@us.ibm.com> 42 * Xingang Guo <xingang.guo@intel.com> 43 * Hui Huang <hui.huang@nokia.com> 44 * Sridhar Samudrala <sri@us.ibm.com> 45 * Ardelle Fan <ardelle.fan@intel.com> 46 * 47 * Any bugs reported given to us we will try to fix... any fixes shared will 48 * be incorporated into the next SCTP release. 49 */ 50 51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 52 53 #include <linux/slab.h> 54 #include <linux/types.h> 55 #include <linux/random.h> 56 #include <net/sctp/sctp.h> 57 #include <net/sctp/sm.h> 58 59 /* 1st Level Abstractions. */ 60 61 /* Initialize a new transport from provided memory. */ 62 static struct sctp_transport *sctp_transport_init(struct net *net, 63 struct sctp_transport *peer, 64 const union sctp_addr *addr, 65 gfp_t gfp) 66 { 67 /* Copy in the address. */ 68 peer->ipaddr = *addr; 69 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); 70 memset(&peer->saddr, 0, sizeof(union sctp_addr)); 71 72 peer->sack_generation = 0; 73 74 /* From 6.3.1 RTO Calculation: 75 * 76 * C1) Until an RTT measurement has been made for a packet sent to the 77 * given destination transport address, set RTO to the protocol 78 * parameter 'RTO.Initial'. 79 */ 80 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); 81 82 peer->last_time_heard = jiffies; 83 peer->last_time_ecne_reduced = jiffies; 84 85 peer->param_flags = SPP_HB_DISABLE | 86 SPP_PMTUD_ENABLE | 87 SPP_SACKDELAY_ENABLE; 88 89 /* Initialize the default path max_retrans. */ 90 peer->pathmaxrxt = net->sctp.max_retrans_path; 91 peer->pf_retrans = net->sctp.pf_retrans; 92 93 INIT_LIST_HEAD(&peer->transmitted); 94 INIT_LIST_HEAD(&peer->send_ready); 95 INIT_LIST_HEAD(&peer->transports); 96 97 setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 98 (unsigned long)peer); 99 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, 100 (unsigned long)peer); 101 setup_timer(&peer->proto_unreach_timer, 102 sctp_generate_proto_unreach_event, (unsigned long)peer); 103 104 /* Initialize the 64-bit random nonce sent with heartbeat. */ 105 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 106 107 atomic_set(&peer->refcnt, 1); 108 109 return peer; 110 } 111 112 /* Allocate and initialize a new transport. */ 113 struct sctp_transport *sctp_transport_new(struct net *net, 114 const union sctp_addr *addr, 115 gfp_t gfp) 116 { 117 struct sctp_transport *transport; 118 119 transport = t_new(struct sctp_transport, gfp); 120 if (!transport) 121 goto fail; 122 123 if (!sctp_transport_init(net, transport, addr, gfp)) 124 goto fail_init; 125 126 SCTP_DBG_OBJCNT_INC(transport); 127 128 return transport; 129 130 fail_init: 131 kfree(transport); 132 133 fail: 134 return NULL; 135 } 136 137 /* This transport is no longer needed. Free up if possible, or 138 * delay until it last reference count. 139 */ 140 void sctp_transport_free(struct sctp_transport *transport) 141 { 142 transport->dead = 1; 143 144 /* Try to delete the heartbeat timer. */ 145 if (del_timer(&transport->hb_timer)) 146 sctp_transport_put(transport); 147 148 /* Delete the T3_rtx timer if it's active. 149 * There is no point in not doing this now and letting 150 * structure hang around in memory since we know 151 * the tranport is going away. 152 */ 153 if (del_timer(&transport->T3_rtx_timer)) 154 sctp_transport_put(transport); 155 156 /* Delete the ICMP proto unreachable timer if it's active. */ 157 if (del_timer(&transport->proto_unreach_timer)) 158 sctp_association_put(transport->asoc); 159 160 sctp_transport_put(transport); 161 } 162 163 static void sctp_transport_destroy_rcu(struct rcu_head *head) 164 { 165 struct sctp_transport *transport; 166 167 transport = container_of(head, struct sctp_transport, rcu); 168 169 dst_release(transport->dst); 170 kfree(transport); 171 SCTP_DBG_OBJCNT_DEC(transport); 172 } 173 174 /* Destroy the transport data structure. 175 * Assumes there are no more users of this structure. 176 */ 177 static void sctp_transport_destroy(struct sctp_transport *transport) 178 { 179 SCTP_ASSERT(transport->dead, "Transport is not dead", return); 180 181 call_rcu(&transport->rcu, sctp_transport_destroy_rcu); 182 183 sctp_packet_free(&transport->packet); 184 185 if (transport->asoc) 186 sctp_association_put(transport->asoc); 187 } 188 189 /* Start T3_rtx timer if it is not already running and update the heartbeat 190 * timer. This routine is called every time a DATA chunk is sent. 191 */ 192 void sctp_transport_reset_timers(struct sctp_transport *transport) 193 { 194 /* RFC 2960 6.3.2 Retransmission Timer Rules 195 * 196 * R1) Every time a DATA chunk is sent to any address(including a 197 * retransmission), if the T3-rtx timer of that address is not running 198 * start it running so that it will expire after the RTO of that 199 * address. 200 */ 201 202 if (!timer_pending(&transport->T3_rtx_timer)) 203 if (!mod_timer(&transport->T3_rtx_timer, 204 jiffies + transport->rto)) 205 sctp_transport_hold(transport); 206 207 /* When a data chunk is sent, reset the heartbeat interval. */ 208 if (!mod_timer(&transport->hb_timer, 209 sctp_transport_timeout(transport))) 210 sctp_transport_hold(transport); 211 } 212 213 /* This transport has been assigned to an association. 214 * Initialize fields from the association or from the sock itself. 215 * Register the reference count in the association. 216 */ 217 void sctp_transport_set_owner(struct sctp_transport *transport, 218 struct sctp_association *asoc) 219 { 220 transport->asoc = asoc; 221 sctp_association_hold(asoc); 222 } 223 224 /* Initialize the pmtu of a transport. */ 225 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) 226 { 227 /* If we don't have a fresh route, look one up */ 228 if (!transport->dst || transport->dst->obsolete) { 229 dst_release(transport->dst); 230 transport->af_specific->get_dst(transport, &transport->saddr, 231 &transport->fl, sk); 232 } 233 234 if (transport->dst) { 235 transport->pathmtu = dst_mtu(transport->dst); 236 } else 237 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 238 } 239 240 void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) 241 { 242 struct dst_entry *dst; 243 244 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 245 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 246 __func__, pmtu, 247 SCTP_DEFAULT_MINSEGMENT); 248 /* Use default minimum segment size and disable 249 * pmtu discovery on this transport. 250 */ 251 t->pathmtu = SCTP_DEFAULT_MINSEGMENT; 252 } else { 253 t->pathmtu = pmtu; 254 } 255 256 dst = sctp_transport_dst_check(t); 257 if (!dst) 258 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); 259 260 if (dst) { 261 dst->ops->update_pmtu(dst, sk, NULL, pmtu); 262 263 dst = sctp_transport_dst_check(t); 264 if (!dst) 265 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); 266 } 267 } 268 269 /* Caches the dst entry and source address for a transport's destination 270 * address. 271 */ 272 void sctp_transport_route(struct sctp_transport *transport, 273 union sctp_addr *saddr, struct sctp_sock *opt) 274 { 275 struct sctp_association *asoc = transport->asoc; 276 struct sctp_af *af = transport->af_specific; 277 278 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); 279 280 if (saddr) 281 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 282 else 283 af->get_saddr(opt, transport, &transport->fl); 284 285 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 286 return; 287 } 288 if (transport->dst) { 289 transport->pathmtu = dst_mtu(transport->dst); 290 291 /* Initialize sk->sk_rcv_saddr, if the transport is the 292 * association's active path for getsockname(). 293 */ 294 if (asoc && (!asoc->peer.primary_path || 295 (transport == asoc->peer.active_path))) 296 opt->pf->af->to_sk_saddr(&transport->saddr, 297 asoc->base.sk); 298 } else 299 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 300 } 301 302 /* Hold a reference to a transport. */ 303 void sctp_transport_hold(struct sctp_transport *transport) 304 { 305 atomic_inc(&transport->refcnt); 306 } 307 308 /* Release a reference to a transport and clean up 309 * if there are no more references. 310 */ 311 void sctp_transport_put(struct sctp_transport *transport) 312 { 313 if (atomic_dec_and_test(&transport->refcnt)) 314 sctp_transport_destroy(transport); 315 } 316 317 /* Update transport's RTO based on the newly calculated RTT. */ 318 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) 319 { 320 /* Check for valid transport. */ 321 SCTP_ASSERT(tp, "NULL transport", return); 322 323 /* We should not be doing any RTO updates unless rto_pending is set. */ 324 SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); 325 326 if (tp->rttvar || tp->srtt) { 327 struct net *net = sock_net(tp->asoc->base.sk); 328 /* 6.3.1 C3) When a new RTT measurement R' is made, set 329 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| 330 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' 331 */ 332 333 /* Note: The above algorithm has been rewritten to 334 * express rto_beta and rto_alpha as inverse powers 335 * of two. 336 * For example, assuming the default value of RTO.Alpha of 337 * 1/8, rto_alpha would be expressed as 3. 338 */ 339 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) 340 + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); 341 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) 342 + (rtt >> net->sctp.rto_alpha); 343 } else { 344 /* 6.3.1 C2) When the first RTT measurement R is made, set 345 * SRTT <- R, RTTVAR <- R/2. 346 */ 347 tp->srtt = rtt; 348 tp->rttvar = rtt >> 1; 349 } 350 351 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then 352 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. 353 */ 354 if (tp->rttvar == 0) 355 tp->rttvar = SCTP_CLOCK_GRANULARITY; 356 357 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ 358 tp->rto = tp->srtt + (tp->rttvar << 2); 359 360 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min 361 * seconds then it is rounded up to RTO.Min seconds. 362 */ 363 if (tp->rto < tp->asoc->rto_min) 364 tp->rto = tp->asoc->rto_min; 365 366 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is 367 * at least RTO.max seconds. 368 */ 369 if (tp->rto > tp->asoc->rto_max) 370 tp->rto = tp->asoc->rto_max; 371 372 sctp_max_rto(tp->asoc, tp); 373 tp->rtt = rtt; 374 375 /* Reset rto_pending so that a new RTT measurement is started when a 376 * new data chunk is sent. 377 */ 378 tp->rto_pending = 0; 379 380 SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " 381 "rttvar: %d, rto: %ld\n", __func__, 382 tp, rtt, tp->srtt, tp->rttvar, tp->rto); 383 } 384 385 /* This routine updates the transport's cwnd and partial_bytes_acked 386 * parameters based on the bytes acked in the received SACK. 387 */ 388 void sctp_transport_raise_cwnd(struct sctp_transport *transport, 389 __u32 sack_ctsn, __u32 bytes_acked) 390 { 391 struct sctp_association *asoc = transport->asoc; 392 __u32 cwnd, ssthresh, flight_size, pba, pmtu; 393 394 cwnd = transport->cwnd; 395 flight_size = transport->flight_size; 396 397 /* See if we need to exit Fast Recovery first */ 398 if (asoc->fast_recovery && 399 TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) 400 asoc->fast_recovery = 0; 401 402 /* The appropriate cwnd increase algorithm is performed if, and only 403 * if the cumulative TSN whould advanced and the congestion window is 404 * being fully utilized. 405 */ 406 if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || 407 (flight_size < cwnd)) 408 return; 409 410 ssthresh = transport->ssthresh; 411 pba = transport->partial_bytes_acked; 412 pmtu = transport->asoc->pathmtu; 413 414 if (cwnd <= ssthresh) { 415 /* RFC 4960 7.2.1 416 * o When cwnd is less than or equal to ssthresh, an SCTP 417 * endpoint MUST use the slow-start algorithm to increase 418 * cwnd only if the current congestion window is being fully 419 * utilized, an incoming SACK advances the Cumulative TSN 420 * Ack Point, and the data sender is not in Fast Recovery. 421 * Only when these three conditions are met can the cwnd be 422 * increased; otherwise, the cwnd MUST not be increased. 423 * If these conditions are met, then cwnd MUST be increased 424 * by, at most, the lesser of 1) the total size of the 425 * previously outstanding DATA chunk(s) acknowledged, and 426 * 2) the destination's path MTU. This upper bound protects 427 * against the ACK-Splitting attack outlined in [SAVAGE99]. 428 */ 429 if (asoc->fast_recovery) 430 return; 431 432 if (bytes_acked > pmtu) 433 cwnd += pmtu; 434 else 435 cwnd += bytes_acked; 436 SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " 437 "bytes_acked: %d, cwnd: %d, ssthresh: %d, " 438 "flight_size: %d, pba: %d\n", 439 __func__, 440 transport, bytes_acked, cwnd, 441 ssthresh, flight_size, pba); 442 } else { 443 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, 444 * upon each SACK arrival that advances the Cumulative TSN Ack 445 * Point, increase partial_bytes_acked by the total number of 446 * bytes of all new chunks acknowledged in that SACK including 447 * chunks acknowledged by the new Cumulative TSN Ack and by 448 * Gap Ack Blocks. 449 * 450 * When partial_bytes_acked is equal to or greater than cwnd 451 * and before the arrival of the SACK the sender had cwnd or 452 * more bytes of data outstanding (i.e., before arrival of the 453 * SACK, flightsize was greater than or equal to cwnd), 454 * increase cwnd by MTU, and reset partial_bytes_acked to 455 * (partial_bytes_acked - cwnd). 456 */ 457 pba += bytes_acked; 458 if (pba >= cwnd) { 459 cwnd += pmtu; 460 pba = ((cwnd < pba) ? (pba - cwnd) : 0); 461 } 462 SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " 463 "transport: %p, bytes_acked: %d, cwnd: %d, " 464 "ssthresh: %d, flight_size: %d, pba: %d\n", 465 __func__, 466 transport, bytes_acked, cwnd, 467 ssthresh, flight_size, pba); 468 } 469 470 transport->cwnd = cwnd; 471 transport->partial_bytes_acked = pba; 472 } 473 474 /* This routine is used to lower the transport's cwnd when congestion is 475 * detected. 476 */ 477 void sctp_transport_lower_cwnd(struct sctp_transport *transport, 478 sctp_lower_cwnd_t reason) 479 { 480 struct sctp_association *asoc = transport->asoc; 481 482 switch (reason) { 483 case SCTP_LOWER_CWND_T3_RTX: 484 /* RFC 2960 Section 7.2.3, sctpimpguide 485 * When the T3-rtx timer expires on an address, SCTP should 486 * perform slow start by: 487 * ssthresh = max(cwnd/2, 4*MTU) 488 * cwnd = 1*MTU 489 * partial_bytes_acked = 0 490 */ 491 transport->ssthresh = max(transport->cwnd/2, 492 4*asoc->pathmtu); 493 transport->cwnd = asoc->pathmtu; 494 495 /* T3-rtx also clears fast recovery */ 496 asoc->fast_recovery = 0; 497 break; 498 499 case SCTP_LOWER_CWND_FAST_RTX: 500 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the 501 * destination address(es) to which the missing DATA chunks 502 * were last sent, according to the formula described in 503 * Section 7.2.3. 504 * 505 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet 506 * losses from SACK (see Section 7.2.4), An endpoint 507 * should do the following: 508 * ssthresh = max(cwnd/2, 4*MTU) 509 * cwnd = ssthresh 510 * partial_bytes_acked = 0 511 */ 512 if (asoc->fast_recovery) 513 return; 514 515 /* Mark Fast recovery */ 516 asoc->fast_recovery = 1; 517 asoc->fast_recovery_exit = asoc->next_tsn - 1; 518 519 transport->ssthresh = max(transport->cwnd/2, 520 4*asoc->pathmtu); 521 transport->cwnd = transport->ssthresh; 522 break; 523 524 case SCTP_LOWER_CWND_ECNE: 525 /* RFC 2481 Section 6.1.2. 526 * If the sender receives an ECN-Echo ACK packet 527 * then the sender knows that congestion was encountered in the 528 * network on the path from the sender to the receiver. The 529 * indication of congestion should be treated just as a 530 * congestion loss in non-ECN Capable TCP. That is, the TCP 531 * source halves the congestion window "cwnd" and reduces the 532 * slow start threshold "ssthresh". 533 * A critical condition is that TCP does not react to 534 * congestion indications more than once every window of 535 * data (or more loosely more than once every round-trip time). 536 */ 537 if (time_after(jiffies, transport->last_time_ecne_reduced + 538 transport->rtt)) { 539 transport->ssthresh = max(transport->cwnd/2, 540 4*asoc->pathmtu); 541 transport->cwnd = transport->ssthresh; 542 transport->last_time_ecne_reduced = jiffies; 543 } 544 break; 545 546 case SCTP_LOWER_CWND_INACTIVE: 547 /* RFC 2960 Section 7.2.1, sctpimpguide 548 * When the endpoint does not transmit data on a given 549 * transport address, the cwnd of the transport address 550 * should be adjusted to max(cwnd/2, 4*MTU) per RTO. 551 * NOTE: Although the draft recommends that this check needs 552 * to be done every RTO interval, we do it every hearbeat 553 * interval. 554 */ 555 transport->cwnd = max(transport->cwnd/2, 556 4*asoc->pathmtu); 557 break; 558 } 559 560 transport->partial_bytes_acked = 0; 561 SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " 562 "%d ssthresh: %d\n", __func__, 563 transport, reason, 564 transport->cwnd, transport->ssthresh); 565 } 566 567 /* Apply Max.Burst limit to the congestion window: 568 * sctpimpguide-05 2.14.2 569 * D) When the time comes for the sender to 570 * transmit new DATA chunks, the protocol parameter Max.Burst MUST 571 * first be applied to limit how many new DATA chunks may be sent. 572 * The limit is applied by adjusting cwnd as follows: 573 * if ((flightsize+ Max.Burst * MTU) < cwnd) 574 * cwnd = flightsize + Max.Burst * MTU 575 */ 576 577 void sctp_transport_burst_limited(struct sctp_transport *t) 578 { 579 struct sctp_association *asoc = t->asoc; 580 u32 old_cwnd = t->cwnd; 581 u32 max_burst_bytes; 582 583 if (t->burst_limited) 584 return; 585 586 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); 587 if (max_burst_bytes < old_cwnd) { 588 t->cwnd = max_burst_bytes; 589 t->burst_limited = old_cwnd; 590 } 591 } 592 593 /* Restore the old cwnd congestion window, after the burst had it's 594 * desired effect. 595 */ 596 void sctp_transport_burst_reset(struct sctp_transport *t) 597 { 598 if (t->burst_limited) { 599 t->cwnd = t->burst_limited; 600 t->burst_limited = 0; 601 } 602 } 603 604 /* What is the next timeout value for this transport? */ 605 unsigned long sctp_transport_timeout(struct sctp_transport *t) 606 { 607 unsigned long timeout; 608 timeout = t->rto + sctp_jitter(t->rto); 609 if ((t->state != SCTP_UNCONFIRMED) && 610 (t->state != SCTP_PF)) 611 timeout += t->hbinterval; 612 timeout += jiffies; 613 return timeout; 614 } 615 616 /* Reset transport variables to their initial values */ 617 void sctp_transport_reset(struct sctp_transport *t) 618 { 619 struct sctp_association *asoc = t->asoc; 620 621 /* RFC 2960 (bis), Section 5.2.4 622 * All the congestion control parameters (e.g., cwnd, ssthresh) 623 * related to this peer MUST be reset to their initial values 624 * (see Section 6.2.1) 625 */ 626 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 627 t->burst_limited = 0; 628 t->ssthresh = asoc->peer.i.a_rwnd; 629 t->rto = asoc->rto_initial; 630 sctp_max_rto(asoc, t); 631 t->rtt = 0; 632 t->srtt = 0; 633 t->rttvar = 0; 634 635 /* Reset these additional varibles so that we have a clean 636 * slate. 637 */ 638 t->partial_bytes_acked = 0; 639 t->flight_size = 0; 640 t->error_count = 0; 641 t->rto_pending = 0; 642 t->hb_sent = 0; 643 644 /* Initialize the state information for SFR-CACC */ 645 t->cacc.changeover_active = 0; 646 t->cacc.cycling_changeover = 0; 647 t->cacc.next_tsn_at_change = 0; 648 t->cacc.cacc_saw_newack = 0; 649 } 650 651 /* Schedule retransmission on the given transport */ 652 void sctp_transport_immediate_rtx(struct sctp_transport *t) 653 { 654 /* Stop pending T3_rtx_timer */ 655 if (del_timer(&t->T3_rtx_timer)) 656 sctp_transport_put(t); 657 658 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); 659 if (!timer_pending(&t->T3_rtx_timer)) { 660 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) 661 sctp_transport_hold(t); 662 } 663 return; 664 } 665