1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * 7 * This file is part of the SCTP kernel implementation 8 * 9 * These functions implement the sctp_outq class. The outqueue handles 10 * bundling and queueing of outgoing SCTP chunks. 11 * 12 * This SCTP implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * This SCTP implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, write to 26 * the Free Software Foundation, 59 Temple Place - Suite 330, 27 * Boston, MA 02111-1307, USA. 28 * 29 * Please send any bug reports or fixes you make to the 30 * email address(es): 31 * lksctp developers <linux-sctp@vger.kernel.org> 32 * 33 * Written or modified by: 34 * La Monte H.P. Yarroll <piggy@acm.org> 35 * Karl Knutson <karl@athena.chicago.il.us> 36 * Perry Melange <pmelange@null.cc.uic.edu> 37 * Xingang Guo <xingang.guo@intel.com> 38 * Hui Huang <hui.huang@nokia.com> 39 * Sridhar Samudrala <sri@us.ibm.com> 40 * Jon Grimm <jgrimm@us.ibm.com> 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/types.h> 46 #include <linux/list.h> /* For struct list_head */ 47 #include <linux/socket.h> 48 #include <linux/ip.h> 49 #include <linux/slab.h> 50 #include <net/sock.h> /* For skb_set_owner_w */ 51 52 #include <net/sctp/sctp.h> 53 #include <net/sctp/sm.h> 54 55 /* Declare internal functions here. */ 56 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 57 static void sctp_check_transmitted(struct sctp_outq *q, 58 struct list_head *transmitted_queue, 59 struct sctp_transport *transport, 60 union sctp_addr *saddr, 61 struct sctp_sackhdr *sack, 62 __u32 *highest_new_tsn); 63 64 static void sctp_mark_missing(struct sctp_outq *q, 65 struct list_head *transmitted_queue, 66 struct sctp_transport *transport, 67 __u32 highest_new_tsn, 68 int count_of_newacks); 69 70 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 71 72 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); 73 74 /* Add data to the front of the queue. */ 75 static inline void sctp_outq_head_data(struct sctp_outq *q, 76 struct sctp_chunk *ch) 77 { 78 list_add(&ch->list, &q->out_chunk_list); 79 q->out_qlen += ch->skb->len; 80 } 81 82 /* Take data from the front of the queue. */ 83 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 84 { 85 struct sctp_chunk *ch = NULL; 86 87 if (!list_empty(&q->out_chunk_list)) { 88 struct list_head *entry = q->out_chunk_list.next; 89 90 ch = list_entry(entry, struct sctp_chunk, list); 91 list_del_init(entry); 92 q->out_qlen -= ch->skb->len; 93 } 94 return ch; 95 } 96 /* Add data chunk to the end of the queue. */ 97 static inline void sctp_outq_tail_data(struct sctp_outq *q, 98 struct sctp_chunk *ch) 99 { 100 list_add_tail(&ch->list, &q->out_chunk_list); 101 q->out_qlen += ch->skb->len; 102 } 103 104 /* 105 * SFR-CACC algorithm: 106 * D) If count_of_newacks is greater than or equal to 2 107 * and t was not sent to the current primary then the 108 * sender MUST NOT increment missing report count for t. 109 */ 110 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, 111 struct sctp_transport *transport, 112 int count_of_newacks) 113 { 114 if (count_of_newacks >=2 && transport != primary) 115 return 1; 116 return 0; 117 } 118 119 /* 120 * SFR-CACC algorithm: 121 * F) If count_of_newacks is less than 2, let d be the 122 * destination to which t was sent. If cacc_saw_newack 123 * is 0 for destination d, then the sender MUST NOT 124 * increment missing report count for t. 125 */ 126 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 127 int count_of_newacks) 128 { 129 if (count_of_newacks < 2 && 130 (transport && !transport->cacc.cacc_saw_newack)) 131 return 1; 132 return 0; 133 } 134 135 /* 136 * SFR-CACC algorithm: 137 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD 138 * execute steps C, D, F. 139 * 140 * C has been implemented in sctp_outq_sack 141 */ 142 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, 143 struct sctp_transport *transport, 144 int count_of_newacks) 145 { 146 if (!primary->cacc.cycling_changeover) { 147 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) 148 return 1; 149 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) 150 return 1; 151 return 0; 152 } 153 return 0; 154 } 155 156 /* 157 * SFR-CACC algorithm: 158 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less 159 * than next_tsn_at_change of the current primary, then 160 * the sender MUST NOT increment missing report count 161 * for t. 162 */ 163 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) 164 { 165 if (primary->cacc.cycling_changeover && 166 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) 167 return 1; 168 return 0; 169 } 170 171 /* 172 * SFR-CACC algorithm: 173 * 3) If the missing report count for TSN t is to be 174 * incremented according to [RFC2960] and 175 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, 176 * then the sender MUST further execute steps 3.1 and 177 * 3.2 to determine if the missing report count for 178 * TSN t SHOULD NOT be incremented. 179 * 180 * 3.3) If 3.1 and 3.2 do not dictate that the missing 181 * report count for t should not be incremented, then 182 * the sender SHOULD increment missing report count for 183 * t (according to [RFC2960] and [SCTP_STEWART_2002]). 184 */ 185 static inline int sctp_cacc_skip(struct sctp_transport *primary, 186 struct sctp_transport *transport, 187 int count_of_newacks, 188 __u32 tsn) 189 { 190 if (primary->cacc.changeover_active && 191 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) || 192 sctp_cacc_skip_3_2(primary, tsn))) 193 return 1; 194 return 0; 195 } 196 197 /* Initialize an existing sctp_outq. This does the boring stuff. 198 * You still need to define handlers if you really want to DO 199 * something with this structure... 200 */ 201 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 202 { 203 memset(q, 0, sizeof(struct sctp_outq)); 204 205 q->asoc = asoc; 206 INIT_LIST_HEAD(&q->out_chunk_list); 207 INIT_LIST_HEAD(&q->control_chunk_list); 208 INIT_LIST_HEAD(&q->retransmit); 209 INIT_LIST_HEAD(&q->sacked); 210 INIT_LIST_HEAD(&q->abandoned); 211 212 q->empty = 1; 213 } 214 215 /* Free the outqueue structure and any related pending chunks. 216 */ 217 static void __sctp_outq_teardown(struct sctp_outq *q) 218 { 219 struct sctp_transport *transport; 220 struct list_head *lchunk, *temp; 221 struct sctp_chunk *chunk, *tmp; 222 223 /* Throw away unacknowledged chunks. */ 224 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, 225 transports) { 226 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { 227 chunk = list_entry(lchunk, struct sctp_chunk, 228 transmitted_list); 229 /* Mark as part of a failed message. */ 230 sctp_chunk_fail(chunk, q->error); 231 sctp_chunk_free(chunk); 232 } 233 } 234 235 /* Throw away chunks that have been gap ACKed. */ 236 list_for_each_safe(lchunk, temp, &q->sacked) { 237 list_del_init(lchunk); 238 chunk = list_entry(lchunk, struct sctp_chunk, 239 transmitted_list); 240 sctp_chunk_fail(chunk, q->error); 241 sctp_chunk_free(chunk); 242 } 243 244 /* Throw away any chunks in the retransmit queue. */ 245 list_for_each_safe(lchunk, temp, &q->retransmit) { 246 list_del_init(lchunk); 247 chunk = list_entry(lchunk, struct sctp_chunk, 248 transmitted_list); 249 sctp_chunk_fail(chunk, q->error); 250 sctp_chunk_free(chunk); 251 } 252 253 /* Throw away any chunks that are in the abandoned queue. */ 254 list_for_each_safe(lchunk, temp, &q->abandoned) { 255 list_del_init(lchunk); 256 chunk = list_entry(lchunk, struct sctp_chunk, 257 transmitted_list); 258 sctp_chunk_fail(chunk, q->error); 259 sctp_chunk_free(chunk); 260 } 261 262 /* Throw away any leftover data chunks. */ 263 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 264 265 /* Mark as send failure. */ 266 sctp_chunk_fail(chunk, q->error); 267 sctp_chunk_free(chunk); 268 } 269 270 /* Throw away any leftover control chunks. */ 271 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 272 list_del_init(&chunk->list); 273 sctp_chunk_free(chunk); 274 } 275 } 276 277 void sctp_outq_teardown(struct sctp_outq *q) 278 { 279 __sctp_outq_teardown(q); 280 sctp_outq_init(q->asoc, q); 281 } 282 283 /* Free the outqueue structure and any related pending chunks. */ 284 void sctp_outq_free(struct sctp_outq *q) 285 { 286 /* Throw away leftover chunks. */ 287 __sctp_outq_teardown(q); 288 } 289 290 /* Put a new chunk in an sctp_outq. */ 291 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) 292 { 293 struct net *net = sock_net(q->asoc->base.sk); 294 int error = 0; 295 296 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, 297 chunk && chunk->chunk_hdr ? 298 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 299 "illegal chunk"); 300 301 /* If it is data, queue it up, otherwise, send it 302 * immediately. 303 */ 304 if (sctp_chunk_is_data(chunk)) { 305 /* Is it OK to queue data chunks? */ 306 /* From 9. Termination of Association 307 * 308 * When either endpoint performs a shutdown, the 309 * association on each peer will stop accepting new 310 * data from its user and only deliver data in queue 311 * at the time of sending or receiving the SHUTDOWN 312 * chunk. 313 */ 314 switch (q->asoc->state) { 315 case SCTP_STATE_CLOSED: 316 case SCTP_STATE_SHUTDOWN_PENDING: 317 case SCTP_STATE_SHUTDOWN_SENT: 318 case SCTP_STATE_SHUTDOWN_RECEIVED: 319 case SCTP_STATE_SHUTDOWN_ACK_SENT: 320 /* Cannot send after transport endpoint shutdown */ 321 error = -ESHUTDOWN; 322 break; 323 324 default: 325 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n", 326 __func__, q, chunk, chunk && chunk->chunk_hdr ? 327 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 328 "illegal chunk"); 329 330 sctp_outq_tail_data(q, chunk); 331 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 332 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 333 else 334 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); 335 q->empty = 0; 336 break; 337 } 338 } else { 339 list_add_tail(&chunk->list, &q->control_chunk_list); 340 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); 341 } 342 343 if (error < 0) 344 return error; 345 346 if (!q->cork) 347 error = sctp_outq_flush(q, 0); 348 349 return error; 350 } 351 352 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list 353 * and the abandoned list are in ascending order. 354 */ 355 static void sctp_insert_list(struct list_head *head, struct list_head *new) 356 { 357 struct list_head *pos; 358 struct sctp_chunk *nchunk, *lchunk; 359 __u32 ntsn, ltsn; 360 int done = 0; 361 362 nchunk = list_entry(new, struct sctp_chunk, transmitted_list); 363 ntsn = ntohl(nchunk->subh.data_hdr->tsn); 364 365 list_for_each(pos, head) { 366 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); 367 ltsn = ntohl(lchunk->subh.data_hdr->tsn); 368 if (TSN_lt(ntsn, ltsn)) { 369 list_add(new, pos->prev); 370 done = 1; 371 break; 372 } 373 } 374 if (!done) 375 list_add_tail(new, head); 376 } 377 378 /* Mark all the eligible packets on a transport for retransmission. */ 379 void sctp_retransmit_mark(struct sctp_outq *q, 380 struct sctp_transport *transport, 381 __u8 reason) 382 { 383 struct list_head *lchunk, *ltemp; 384 struct sctp_chunk *chunk; 385 386 /* Walk through the specified transmitted queue. */ 387 list_for_each_safe(lchunk, ltemp, &transport->transmitted) { 388 chunk = list_entry(lchunk, struct sctp_chunk, 389 transmitted_list); 390 391 /* If the chunk is abandoned, move it to abandoned list. */ 392 if (sctp_chunk_abandoned(chunk)) { 393 list_del_init(lchunk); 394 sctp_insert_list(&q->abandoned, lchunk); 395 396 /* If this chunk has not been previousely acked, 397 * stop considering it 'outstanding'. Our peer 398 * will most likely never see it since it will 399 * not be retransmitted 400 */ 401 if (!chunk->tsn_gap_acked) { 402 if (chunk->transport) 403 chunk->transport->flight_size -= 404 sctp_data_size(chunk); 405 q->outstanding_bytes -= sctp_data_size(chunk); 406 q->asoc->peer.rwnd += sctp_data_size(chunk); 407 } 408 continue; 409 } 410 411 /* If we are doing retransmission due to a timeout or pmtu 412 * discovery, only the chunks that are not yet acked should 413 * be added to the retransmit queue. 414 */ 415 if ((reason == SCTP_RTXR_FAST_RTX && 416 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 417 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 418 /* RFC 2960 6.2.1 Processing a Received SACK 419 * 420 * C) Any time a DATA chunk is marked for 421 * retransmission (via either T3-rtx timer expiration 422 * (Section 6.3.3) or via fast retransmit 423 * (Section 7.2.4)), add the data size of those 424 * chunks to the rwnd. 425 */ 426 q->asoc->peer.rwnd += sctp_data_size(chunk); 427 q->outstanding_bytes -= sctp_data_size(chunk); 428 if (chunk->transport) 429 transport->flight_size -= sctp_data_size(chunk); 430 431 /* sctpimpguide-05 Section 2.8.2 432 * M5) If a T3-rtx timer expires, the 433 * 'TSN.Missing.Report' of all affected TSNs is set 434 * to 0. 435 */ 436 chunk->tsn_missing_report = 0; 437 438 /* If a chunk that is being used for RTT measurement 439 * has to be retransmitted, we cannot use this chunk 440 * anymore for RTT measurements. Reset rto_pending so 441 * that a new RTT measurement is started when a new 442 * data chunk is sent. 443 */ 444 if (chunk->rtt_in_progress) { 445 chunk->rtt_in_progress = 0; 446 transport->rto_pending = 0; 447 } 448 449 chunk->resent = 1; 450 451 /* Move the chunk to the retransmit queue. The chunks 452 * on the retransmit queue are always kept in order. 453 */ 454 list_del_init(lchunk); 455 sctp_insert_list(&q->retransmit, lchunk); 456 } 457 } 458 459 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, " 460 "flight_size:%d, pba:%d\n", __func__, transport, reason, 461 transport->cwnd, transport->ssthresh, transport->flight_size, 462 transport->partial_bytes_acked); 463 } 464 465 /* Mark all the eligible packets on a transport for retransmission and force 466 * one packet out. 467 */ 468 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 469 sctp_retransmit_reason_t reason) 470 { 471 struct net *net = sock_net(q->asoc->base.sk); 472 int error = 0; 473 474 switch(reason) { 475 case SCTP_RTXR_T3_RTX: 476 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS); 477 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); 478 /* Update the retran path if the T3-rtx timer has expired for 479 * the current retran path. 480 */ 481 if (transport == transport->asoc->peer.retran_path) 482 sctp_assoc_update_retran_path(transport->asoc); 483 transport->asoc->rtx_data_chunks += 484 transport->asoc->unack_data; 485 break; 486 case SCTP_RTXR_FAST_RTX: 487 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS); 488 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 489 q->fast_rtx = 1; 490 break; 491 case SCTP_RTXR_PMTUD: 492 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS); 493 break; 494 case SCTP_RTXR_T1_RTX: 495 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS); 496 transport->asoc->init_retries++; 497 break; 498 default: 499 BUG(); 500 } 501 502 sctp_retransmit_mark(q, transport, reason); 503 504 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 505 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 506 * following the procedures outlined in C1 - C5. 507 */ 508 if (reason == SCTP_RTXR_T3_RTX) 509 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 510 511 /* Flush the queues only on timeout, since fast_rtx is only 512 * triggered during sack processing and the queue 513 * will be flushed at the end. 514 */ 515 if (reason != SCTP_RTXR_FAST_RTX) 516 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 517 518 if (error) 519 q->asoc->base.sk->sk_err = -error; 520 } 521 522 /* 523 * Transmit DATA chunks on the retransmit queue. Upon return from 524 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which 525 * need to be transmitted by the caller. 526 * We assume that pkt->transport has already been set. 527 * 528 * The return value is a normal kernel error return value. 529 */ 530 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, 531 int rtx_timeout, int *start_timer) 532 { 533 struct list_head *lqueue; 534 struct sctp_transport *transport = pkt->transport; 535 sctp_xmit_t status; 536 struct sctp_chunk *chunk, *chunk1; 537 int fast_rtx; 538 int error = 0; 539 int timer = 0; 540 int done = 0; 541 542 lqueue = &q->retransmit; 543 fast_rtx = q->fast_rtx; 544 545 /* This loop handles time-out retransmissions, fast retransmissions, 546 * and retransmissions due to opening of whindow. 547 * 548 * RFC 2960 6.3.3 Handle T3-rtx Expiration 549 * 550 * E3) Determine how many of the earliest (i.e., lowest TSN) 551 * outstanding DATA chunks for the address for which the 552 * T3-rtx has expired will fit into a single packet, subject 553 * to the MTU constraint for the path corresponding to the 554 * destination transport address to which the retransmission 555 * is being sent (this may be different from the address for 556 * which the timer expires [see Section 6.4]). Call this value 557 * K. Bundle and retransmit those K DATA chunks in a single 558 * packet to the destination endpoint. 559 * 560 * [Just to be painfully clear, if we are retransmitting 561 * because a timeout just happened, we should send only ONE 562 * packet of retransmitted data.] 563 * 564 * For fast retransmissions we also send only ONE packet. However, 565 * if we are just flushing the queue due to open window, we'll 566 * try to send as much as possible. 567 */ 568 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 569 /* If the chunk is abandoned, move it to abandoned list. */ 570 if (sctp_chunk_abandoned(chunk)) { 571 list_del_init(&chunk->transmitted_list); 572 sctp_insert_list(&q->abandoned, 573 &chunk->transmitted_list); 574 continue; 575 } 576 577 /* Make sure that Gap Acked TSNs are not retransmitted. A 578 * simple approach is just to move such TSNs out of the 579 * way and into a 'transmitted' queue and skip to the 580 * next chunk. 581 */ 582 if (chunk->tsn_gap_acked) { 583 list_move_tail(&chunk->transmitted_list, 584 &transport->transmitted); 585 continue; 586 } 587 588 /* If we are doing fast retransmit, ignore non-fast_rtransmit 589 * chunks 590 */ 591 if (fast_rtx && !chunk->fast_retransmit) 592 continue; 593 594 redo: 595 /* Attempt to append this chunk to the packet. */ 596 status = sctp_packet_append_chunk(pkt, chunk); 597 598 switch (status) { 599 case SCTP_XMIT_PMTU_FULL: 600 if (!pkt->has_data && !pkt->has_cookie_echo) { 601 /* If this packet did not contain DATA then 602 * retransmission did not happen, so do it 603 * again. We'll ignore the error here since 604 * control chunks are already freed so there 605 * is nothing we can do. 606 */ 607 sctp_packet_transmit(pkt); 608 goto redo; 609 } 610 611 /* Send this packet. */ 612 error = sctp_packet_transmit(pkt); 613 614 /* If we are retransmitting, we should only 615 * send a single packet. 616 * Otherwise, try appending this chunk again. 617 */ 618 if (rtx_timeout || fast_rtx) 619 done = 1; 620 else 621 goto redo; 622 623 /* Bundle next chunk in the next round. */ 624 break; 625 626 case SCTP_XMIT_RWND_FULL: 627 /* Send this packet. */ 628 error = sctp_packet_transmit(pkt); 629 630 /* Stop sending DATA as there is no more room 631 * at the receiver. 632 */ 633 done = 1; 634 break; 635 636 case SCTP_XMIT_NAGLE_DELAY: 637 /* Send this packet. */ 638 error = sctp_packet_transmit(pkt); 639 640 /* Stop sending DATA because of nagle delay. */ 641 done = 1; 642 break; 643 644 default: 645 /* The append was successful, so add this chunk to 646 * the transmitted list. 647 */ 648 list_move_tail(&chunk->transmitted_list, 649 &transport->transmitted); 650 651 /* Mark the chunk as ineligible for fast retransmit 652 * after it is retransmitted. 653 */ 654 if (chunk->fast_retransmit == SCTP_NEED_FRTX) 655 chunk->fast_retransmit = SCTP_DONT_FRTX; 656 657 q->empty = 0; 658 q->asoc->stats.rtxchunks++; 659 break; 660 } 661 662 /* Set the timer if there were no errors */ 663 if (!error && !timer) 664 timer = 1; 665 666 if (done) 667 break; 668 } 669 670 /* If we are here due to a retransmit timeout or a fast 671 * retransmit and if there are any chunks left in the retransmit 672 * queue that could not fit in the PMTU sized packet, they need 673 * to be marked as ineligible for a subsequent fast retransmit. 674 */ 675 if (rtx_timeout || fast_rtx) { 676 list_for_each_entry(chunk1, lqueue, transmitted_list) { 677 if (chunk1->fast_retransmit == SCTP_NEED_FRTX) 678 chunk1->fast_retransmit = SCTP_DONT_FRTX; 679 } 680 } 681 682 *start_timer = timer; 683 684 /* Clear fast retransmit hint */ 685 if (fast_rtx) 686 q->fast_rtx = 0; 687 688 return error; 689 } 690 691 /* Cork the outqueue so queued chunks are really queued. */ 692 int sctp_outq_uncork(struct sctp_outq *q) 693 { 694 if (q->cork) 695 q->cork = 0; 696 697 return sctp_outq_flush(q, 0); 698 } 699 700 701 /* 702 * Try to flush an outqueue. 703 * 704 * Description: Send everything in q which we legally can, subject to 705 * congestion limitations. 706 * * Note: This function can be called from multiple contexts so appropriate 707 * locking concerns must be made. Today we use the sock lock to protect 708 * this function. 709 */ 710 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 711 { 712 struct sctp_packet *packet; 713 struct sctp_packet singleton; 714 struct sctp_association *asoc = q->asoc; 715 __u16 sport = asoc->base.bind_addr.port; 716 __u16 dport = asoc->peer.port; 717 __u32 vtag = asoc->peer.i.init_tag; 718 struct sctp_transport *transport = NULL; 719 struct sctp_transport *new_transport; 720 struct sctp_chunk *chunk, *tmp; 721 sctp_xmit_t status; 722 int error = 0; 723 int start_timer = 0; 724 int one_packet = 0; 725 726 /* These transports have chunks to send. */ 727 struct list_head transport_list; 728 struct list_head *ltransport; 729 730 INIT_LIST_HEAD(&transport_list); 731 packet = NULL; 732 733 /* 734 * 6.10 Bundling 735 * ... 736 * When bundling control chunks with DATA chunks, an 737 * endpoint MUST place control chunks first in the outbound 738 * SCTP packet. The transmitter MUST transmit DATA chunks 739 * within a SCTP packet in increasing order of TSN. 740 * ... 741 */ 742 743 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 744 /* RFC 5061, 5.3 745 * F1) This means that until such time as the ASCONF 746 * containing the add is acknowledged, the sender MUST 747 * NOT use the new IP address as a source for ANY SCTP 748 * packet except on carrying an ASCONF Chunk. 749 */ 750 if (asoc->src_out_of_asoc_ok && 751 chunk->chunk_hdr->type != SCTP_CID_ASCONF) 752 continue; 753 754 list_del_init(&chunk->list); 755 756 /* Pick the right transport to use. */ 757 new_transport = chunk->transport; 758 759 if (!new_transport) { 760 /* 761 * If we have a prior transport pointer, see if 762 * the destination address of the chunk 763 * matches the destination address of the 764 * current transport. If not a match, then 765 * try to look up the transport with a given 766 * destination address. We do this because 767 * after processing ASCONFs, we may have new 768 * transports created. 769 */ 770 if (transport && 771 sctp_cmp_addr_exact(&chunk->dest, 772 &transport->ipaddr)) 773 new_transport = transport; 774 else 775 new_transport = sctp_assoc_lookup_paddr(asoc, 776 &chunk->dest); 777 778 /* if we still don't have a new transport, then 779 * use the current active path. 780 */ 781 if (!new_transport) 782 new_transport = asoc->peer.active_path; 783 } else if ((new_transport->state == SCTP_INACTIVE) || 784 (new_transport->state == SCTP_UNCONFIRMED) || 785 (new_transport->state == SCTP_PF)) { 786 /* If the chunk is Heartbeat or Heartbeat Ack, 787 * send it to chunk->transport, even if it's 788 * inactive. 789 * 790 * 3.3.6 Heartbeat Acknowledgement: 791 * ... 792 * A HEARTBEAT ACK is always sent to the source IP 793 * address of the IP datagram containing the 794 * HEARTBEAT chunk to which this ack is responding. 795 * ... 796 * 797 * ASCONF_ACKs also must be sent to the source. 798 */ 799 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && 800 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && 801 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) 802 new_transport = asoc->peer.active_path; 803 } 804 805 /* Are we switching transports? 806 * Take care of transport locks. 807 */ 808 if (new_transport != transport) { 809 transport = new_transport; 810 if (list_empty(&transport->send_ready)) { 811 list_add_tail(&transport->send_ready, 812 &transport_list); 813 } 814 packet = &transport->packet; 815 sctp_packet_config(packet, vtag, 816 asoc->peer.ecn_capable); 817 } 818 819 switch (chunk->chunk_hdr->type) { 820 /* 821 * 6.10 Bundling 822 * ... 823 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN 824 * COMPLETE with any other chunks. [Send them immediately.] 825 */ 826 case SCTP_CID_INIT: 827 case SCTP_CID_INIT_ACK: 828 case SCTP_CID_SHUTDOWN_COMPLETE: 829 sctp_packet_init(&singleton, transport, sport, dport); 830 sctp_packet_config(&singleton, vtag, 0); 831 sctp_packet_append_chunk(&singleton, chunk); 832 error = sctp_packet_transmit(&singleton); 833 if (error < 0) 834 return error; 835 break; 836 837 case SCTP_CID_ABORT: 838 if (sctp_test_T_bit(chunk)) { 839 packet->vtag = asoc->c.my_vtag; 840 } 841 /* The following chunks are "response" chunks, i.e. 842 * they are generated in response to something we 843 * received. If we are sending these, then we can 844 * send only 1 packet containing these chunks. 845 */ 846 case SCTP_CID_HEARTBEAT_ACK: 847 case SCTP_CID_SHUTDOWN_ACK: 848 case SCTP_CID_COOKIE_ACK: 849 case SCTP_CID_COOKIE_ECHO: 850 case SCTP_CID_ERROR: 851 case SCTP_CID_ECN_CWR: 852 case SCTP_CID_ASCONF_ACK: 853 one_packet = 1; 854 /* Fall through */ 855 856 case SCTP_CID_SACK: 857 case SCTP_CID_HEARTBEAT: 858 case SCTP_CID_SHUTDOWN: 859 case SCTP_CID_ECN_ECNE: 860 case SCTP_CID_ASCONF: 861 case SCTP_CID_FWD_TSN: 862 status = sctp_packet_transmit_chunk(packet, chunk, 863 one_packet); 864 if (status != SCTP_XMIT_OK) { 865 /* put the chunk back */ 866 list_add(&chunk->list, &q->control_chunk_list); 867 } else { 868 asoc->stats.octrlchunks++; 869 /* PR-SCTP C5) If a FORWARD TSN is sent, the 870 * sender MUST assure that at least one T3-rtx 871 * timer is running. 872 */ 873 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 874 sctp_transport_reset_timers(transport); 875 } 876 break; 877 878 default: 879 /* We built a chunk with an illegal type! */ 880 BUG(); 881 } 882 } 883 884 if (q->asoc->src_out_of_asoc_ok) 885 goto sctp_flush_out; 886 887 /* Is it OK to send data chunks? */ 888 switch (asoc->state) { 889 case SCTP_STATE_COOKIE_ECHOED: 890 /* Only allow bundling when this packet has a COOKIE-ECHO 891 * chunk. 892 */ 893 if (!packet || !packet->has_cookie_echo) 894 break; 895 896 /* fallthru */ 897 case SCTP_STATE_ESTABLISHED: 898 case SCTP_STATE_SHUTDOWN_PENDING: 899 case SCTP_STATE_SHUTDOWN_RECEIVED: 900 /* 901 * RFC 2960 6.1 Transmission of DATA Chunks 902 * 903 * C) When the time comes for the sender to transmit, 904 * before sending new DATA chunks, the sender MUST 905 * first transmit any outstanding DATA chunks which 906 * are marked for retransmission (limited by the 907 * current cwnd). 908 */ 909 if (!list_empty(&q->retransmit)) { 910 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) 911 goto sctp_flush_out; 912 if (transport == asoc->peer.retran_path) 913 goto retran; 914 915 /* Switch transports & prepare the packet. */ 916 917 transport = asoc->peer.retran_path; 918 919 if (list_empty(&transport->send_ready)) { 920 list_add_tail(&transport->send_ready, 921 &transport_list); 922 } 923 924 packet = &transport->packet; 925 sctp_packet_config(packet, vtag, 926 asoc->peer.ecn_capable); 927 retran: 928 error = sctp_outq_flush_rtx(q, packet, 929 rtx_timeout, &start_timer); 930 931 if (start_timer) 932 sctp_transport_reset_timers(transport); 933 934 /* This can happen on COOKIE-ECHO resend. Only 935 * one chunk can get bundled with a COOKIE-ECHO. 936 */ 937 if (packet->has_cookie_echo) 938 goto sctp_flush_out; 939 940 /* Don't send new data if there is still data 941 * waiting to retransmit. 942 */ 943 if (!list_empty(&q->retransmit)) 944 goto sctp_flush_out; 945 } 946 947 /* Apply Max.Burst limitation to the current transport in 948 * case it will be used for new data. We are going to 949 * rest it before we return, but we want to apply the limit 950 * to the currently queued data. 951 */ 952 if (transport) 953 sctp_transport_burst_limited(transport); 954 955 /* Finally, transmit new packets. */ 956 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 957 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 958 * stream identifier. 959 */ 960 if (chunk->sinfo.sinfo_stream >= 961 asoc->c.sinit_num_ostreams) { 962 963 /* Mark as failed send. */ 964 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 965 sctp_chunk_free(chunk); 966 continue; 967 } 968 969 /* Has this chunk expired? */ 970 if (sctp_chunk_abandoned(chunk)) { 971 sctp_chunk_fail(chunk, 0); 972 sctp_chunk_free(chunk); 973 continue; 974 } 975 976 /* If there is a specified transport, use it. 977 * Otherwise, we want to use the active path. 978 */ 979 new_transport = chunk->transport; 980 if (!new_transport || 981 ((new_transport->state == SCTP_INACTIVE) || 982 (new_transport->state == SCTP_UNCONFIRMED) || 983 (new_transport->state == SCTP_PF))) 984 new_transport = asoc->peer.active_path; 985 if (new_transport->state == SCTP_UNCONFIRMED) 986 continue; 987 988 /* Change packets if necessary. */ 989 if (new_transport != transport) { 990 transport = new_transport; 991 992 /* Schedule to have this transport's 993 * packet flushed. 994 */ 995 if (list_empty(&transport->send_ready)) { 996 list_add_tail(&transport->send_ready, 997 &transport_list); 998 } 999 1000 packet = &transport->packet; 1001 sctp_packet_config(packet, vtag, 1002 asoc->peer.ecn_capable); 1003 /* We've switched transports, so apply the 1004 * Burst limit to the new transport. 1005 */ 1006 sctp_transport_burst_limited(transport); 1007 } 1008 1009 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " 1010 "skb->users:%d\n", 1011 __func__, q, chunk, chunk && chunk->chunk_hdr ? 1012 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 1013 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), 1014 chunk->skb ? chunk->skb->head : NULL, chunk->skb ? 1015 atomic_read(&chunk->skb->users) : -1); 1016 1017 /* Add the chunk to the packet. */ 1018 status = sctp_packet_transmit_chunk(packet, chunk, 0); 1019 1020 switch (status) { 1021 case SCTP_XMIT_PMTU_FULL: 1022 case SCTP_XMIT_RWND_FULL: 1023 case SCTP_XMIT_NAGLE_DELAY: 1024 /* We could not append this chunk, so put 1025 * the chunk back on the output queue. 1026 */ 1027 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", 1028 __func__, ntohl(chunk->subh.data_hdr->tsn), 1029 status); 1030 1031 sctp_outq_head_data(q, chunk); 1032 goto sctp_flush_out; 1033 break; 1034 1035 case SCTP_XMIT_OK: 1036 /* The sender is in the SHUTDOWN-PENDING state, 1037 * The sender MAY set the I-bit in the DATA 1038 * chunk header. 1039 */ 1040 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) 1041 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; 1042 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 1043 asoc->stats.ouodchunks++; 1044 else 1045 asoc->stats.oodchunks++; 1046 1047 break; 1048 1049 default: 1050 BUG(); 1051 } 1052 1053 /* BUG: We assume that the sctp_packet_transmit() 1054 * call below will succeed all the time and add the 1055 * chunk to the transmitted list and restart the 1056 * timers. 1057 * It is possible that the call can fail under OOM 1058 * conditions. 1059 * 1060 * Is this really a problem? Won't this behave 1061 * like a lost TSN? 1062 */ 1063 list_add_tail(&chunk->transmitted_list, 1064 &transport->transmitted); 1065 1066 sctp_transport_reset_timers(transport); 1067 1068 q->empty = 0; 1069 1070 /* Only let one DATA chunk get bundled with a 1071 * COOKIE-ECHO chunk. 1072 */ 1073 if (packet->has_cookie_echo) 1074 goto sctp_flush_out; 1075 } 1076 break; 1077 1078 default: 1079 /* Do nothing. */ 1080 break; 1081 } 1082 1083 sctp_flush_out: 1084 1085 /* Before returning, examine all the transports touched in 1086 * this call. Right now, we bluntly force clear all the 1087 * transports. Things might change after we implement Nagle. 1088 * But such an examination is still required. 1089 * 1090 * --xguo 1091 */ 1092 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { 1093 struct sctp_transport *t = list_entry(ltransport, 1094 struct sctp_transport, 1095 send_ready); 1096 packet = &t->packet; 1097 if (!sctp_packet_empty(packet)) 1098 error = sctp_packet_transmit(packet); 1099 1100 /* Clear the burst limited state, if any */ 1101 sctp_transport_burst_reset(t); 1102 } 1103 1104 return error; 1105 } 1106 1107 /* Update unack_data based on the incoming SACK chunk */ 1108 static void sctp_sack_update_unack_data(struct sctp_association *assoc, 1109 struct sctp_sackhdr *sack) 1110 { 1111 sctp_sack_variable_t *frags; 1112 __u16 unack_data; 1113 int i; 1114 1115 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; 1116 1117 frags = sack->variable; 1118 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { 1119 unack_data -= ((ntohs(frags[i].gab.end) - 1120 ntohs(frags[i].gab.start) + 1)); 1121 } 1122 1123 assoc->unack_data = unack_data; 1124 } 1125 1126 /* This is where we REALLY process a SACK. 1127 * 1128 * Process the SACK against the outqueue. Mostly, this just frees 1129 * things off the transmitted queue. 1130 */ 1131 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) 1132 { 1133 struct sctp_association *asoc = q->asoc; 1134 struct sctp_sackhdr *sack = chunk->subh.sack_hdr; 1135 struct sctp_transport *transport; 1136 struct sctp_chunk *tchunk = NULL; 1137 struct list_head *lchunk, *transport_list, *temp; 1138 sctp_sack_variable_t *frags = sack->variable; 1139 __u32 sack_ctsn, ctsn, tsn; 1140 __u32 highest_tsn, highest_new_tsn; 1141 __u32 sack_a_rwnd; 1142 unsigned int outstanding; 1143 struct sctp_transport *primary = asoc->peer.primary_path; 1144 int count_of_newacks = 0; 1145 int gap_ack_blocks; 1146 u8 accum_moved = 0; 1147 1148 /* Grab the association's destination address list. */ 1149 transport_list = &asoc->peer.transport_addr_list; 1150 1151 sack_ctsn = ntohl(sack->cum_tsn_ack); 1152 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); 1153 asoc->stats.gapcnt += gap_ack_blocks; 1154 /* 1155 * SFR-CACC algorithm: 1156 * On receipt of a SACK the sender SHOULD execute the 1157 * following statements. 1158 * 1159 * 1) If the cumulative ack in the SACK passes next tsn_at_change 1160 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be 1161 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for 1162 * all destinations. 1163 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE 1164 * is set the receiver of the SACK MUST take the following actions: 1165 * 1166 * A) Initialize the cacc_saw_newack to 0 for all destination 1167 * addresses. 1168 * 1169 * Only bother if changeover_active is set. Otherwise, this is 1170 * totally suboptimal to do on every SACK. 1171 */ 1172 if (primary->cacc.changeover_active) { 1173 u8 clear_cycling = 0; 1174 1175 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { 1176 primary->cacc.changeover_active = 0; 1177 clear_cycling = 1; 1178 } 1179 1180 if (clear_cycling || gap_ack_blocks) { 1181 list_for_each_entry(transport, transport_list, 1182 transports) { 1183 if (clear_cycling) 1184 transport->cacc.cycling_changeover = 0; 1185 if (gap_ack_blocks) 1186 transport->cacc.cacc_saw_newack = 0; 1187 } 1188 } 1189 } 1190 1191 /* Get the highest TSN in the sack. */ 1192 highest_tsn = sack_ctsn; 1193 if (gap_ack_blocks) 1194 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); 1195 1196 if (TSN_lt(asoc->highest_sacked, highest_tsn)) 1197 asoc->highest_sacked = highest_tsn; 1198 1199 highest_new_tsn = sack_ctsn; 1200 1201 /* Run through the retransmit queue. Credit bytes received 1202 * and free those chunks that we can. 1203 */ 1204 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); 1205 1206 /* Run through the transmitted queue. 1207 * Credit bytes received and free those chunks which we can. 1208 * 1209 * This is a MASSIVE candidate for optimization. 1210 */ 1211 list_for_each_entry(transport, transport_list, transports) { 1212 sctp_check_transmitted(q, &transport->transmitted, 1213 transport, &chunk->source, sack, 1214 &highest_new_tsn); 1215 /* 1216 * SFR-CACC algorithm: 1217 * C) Let count_of_newacks be the number of 1218 * destinations for which cacc_saw_newack is set. 1219 */ 1220 if (transport->cacc.cacc_saw_newack) 1221 count_of_newacks ++; 1222 } 1223 1224 /* Move the Cumulative TSN Ack Point if appropriate. */ 1225 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { 1226 asoc->ctsn_ack_point = sack_ctsn; 1227 accum_moved = 1; 1228 } 1229 1230 if (gap_ack_blocks) { 1231 1232 if (asoc->fast_recovery && accum_moved) 1233 highest_new_tsn = highest_tsn; 1234 1235 list_for_each_entry(transport, transport_list, transports) 1236 sctp_mark_missing(q, &transport->transmitted, transport, 1237 highest_new_tsn, count_of_newacks); 1238 } 1239 1240 /* Update unack_data field in the assoc. */ 1241 sctp_sack_update_unack_data(asoc, sack); 1242 1243 ctsn = asoc->ctsn_ack_point; 1244 1245 /* Throw away stuff rotting on the sack queue. */ 1246 list_for_each_safe(lchunk, temp, &q->sacked) { 1247 tchunk = list_entry(lchunk, struct sctp_chunk, 1248 transmitted_list); 1249 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1250 if (TSN_lte(tsn, ctsn)) { 1251 list_del_init(&tchunk->transmitted_list); 1252 sctp_chunk_free(tchunk); 1253 } 1254 } 1255 1256 /* ii) Set rwnd equal to the newly received a_rwnd minus the 1257 * number of bytes still outstanding after processing the 1258 * Cumulative TSN Ack and the Gap Ack Blocks. 1259 */ 1260 1261 sack_a_rwnd = ntohl(sack->a_rwnd); 1262 outstanding = q->outstanding_bytes; 1263 1264 if (outstanding < sack_a_rwnd) 1265 sack_a_rwnd -= outstanding; 1266 else 1267 sack_a_rwnd = 0; 1268 1269 asoc->peer.rwnd = sack_a_rwnd; 1270 1271 sctp_generate_fwdtsn(q, sack_ctsn); 1272 1273 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); 1274 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " 1275 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, 1276 asoc->adv_peer_ack_point); 1277 1278 /* See if all chunks are acked. 1279 * Make sure the empty queue handler will get run later. 1280 */ 1281 q->empty = (list_empty(&q->out_chunk_list) && 1282 list_empty(&q->retransmit)); 1283 if (!q->empty) 1284 goto finish; 1285 1286 list_for_each_entry(transport, transport_list, transports) { 1287 q->empty = q->empty && list_empty(&transport->transmitted); 1288 if (!q->empty) 1289 goto finish; 1290 } 1291 1292 pr_debug("%s: sack queue is empty\n", __func__); 1293 finish: 1294 return q->empty; 1295 } 1296 1297 /* Is the outqueue empty? */ 1298 int sctp_outq_is_empty(const struct sctp_outq *q) 1299 { 1300 return q->empty; 1301 } 1302 1303 /******************************************************************** 1304 * 2nd Level Abstractions 1305 ********************************************************************/ 1306 1307 /* Go through a transport's transmitted list or the association's retransmit 1308 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. 1309 * The retransmit list will not have an associated transport. 1310 * 1311 * I added coherent debug information output. --xguo 1312 * 1313 * Instead of printing 'sacked' or 'kept' for each TSN on the 1314 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. 1315 * KEPT TSN6-TSN7, etc. 1316 */ 1317 static void sctp_check_transmitted(struct sctp_outq *q, 1318 struct list_head *transmitted_queue, 1319 struct sctp_transport *transport, 1320 union sctp_addr *saddr, 1321 struct sctp_sackhdr *sack, 1322 __u32 *highest_new_tsn_in_sack) 1323 { 1324 struct list_head *lchunk; 1325 struct sctp_chunk *tchunk; 1326 struct list_head tlist; 1327 __u32 tsn; 1328 __u32 sack_ctsn; 1329 __u32 rtt; 1330 __u8 restart_timer = 0; 1331 int bytes_acked = 0; 1332 int migrate_bytes = 0; 1333 bool forward_progress = false; 1334 1335 sack_ctsn = ntohl(sack->cum_tsn_ack); 1336 1337 INIT_LIST_HEAD(&tlist); 1338 1339 /* The while loop will skip empty transmitted queues. */ 1340 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { 1341 tchunk = list_entry(lchunk, struct sctp_chunk, 1342 transmitted_list); 1343 1344 if (sctp_chunk_abandoned(tchunk)) { 1345 /* Move the chunk to abandoned list. */ 1346 sctp_insert_list(&q->abandoned, lchunk); 1347 1348 /* If this chunk has not been acked, stop 1349 * considering it as 'outstanding'. 1350 */ 1351 if (!tchunk->tsn_gap_acked) { 1352 if (tchunk->transport) 1353 tchunk->transport->flight_size -= 1354 sctp_data_size(tchunk); 1355 q->outstanding_bytes -= sctp_data_size(tchunk); 1356 } 1357 continue; 1358 } 1359 1360 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1361 if (sctp_acked(sack, tsn)) { 1362 /* If this queue is the retransmit queue, the 1363 * retransmit timer has already reclaimed 1364 * the outstanding bytes for this chunk, so only 1365 * count bytes associated with a transport. 1366 */ 1367 if (transport) { 1368 /* If this chunk is being used for RTT 1369 * measurement, calculate the RTT and update 1370 * the RTO using this value. 1371 * 1372 * 6.3.1 C5) Karn's algorithm: RTT measurements 1373 * MUST NOT be made using packets that were 1374 * retransmitted (and thus for which it is 1375 * ambiguous whether the reply was for the 1376 * first instance of the packet or a later 1377 * instance). 1378 */ 1379 if (!tchunk->tsn_gap_acked && 1380 !tchunk->resent && 1381 tchunk->rtt_in_progress) { 1382 tchunk->rtt_in_progress = 0; 1383 rtt = jiffies - tchunk->sent_at; 1384 sctp_transport_update_rto(transport, 1385 rtt); 1386 } 1387 } 1388 1389 /* If the chunk hasn't been marked as ACKED, 1390 * mark it and account bytes_acked if the 1391 * chunk had a valid transport (it will not 1392 * have a transport if ASCONF had deleted it 1393 * while DATA was outstanding). 1394 */ 1395 if (!tchunk->tsn_gap_acked) { 1396 tchunk->tsn_gap_acked = 1; 1397 if (TSN_lt(*highest_new_tsn_in_sack, tsn)) 1398 *highest_new_tsn_in_sack = tsn; 1399 bytes_acked += sctp_data_size(tchunk); 1400 if (!tchunk->transport) 1401 migrate_bytes += sctp_data_size(tchunk); 1402 forward_progress = true; 1403 } 1404 1405 if (TSN_lte(tsn, sack_ctsn)) { 1406 /* RFC 2960 6.3.2 Retransmission Timer Rules 1407 * 1408 * R3) Whenever a SACK is received 1409 * that acknowledges the DATA chunk 1410 * with the earliest outstanding TSN 1411 * for that address, restart T3-rtx 1412 * timer for that address with its 1413 * current RTO. 1414 */ 1415 restart_timer = 1; 1416 forward_progress = true; 1417 1418 if (!tchunk->tsn_gap_acked) { 1419 /* 1420 * SFR-CACC algorithm: 1421 * 2) If the SACK contains gap acks 1422 * and the flag CHANGEOVER_ACTIVE is 1423 * set the receiver of the SACK MUST 1424 * take the following action: 1425 * 1426 * B) For each TSN t being acked that 1427 * has not been acked in any SACK so 1428 * far, set cacc_saw_newack to 1 for 1429 * the destination that the TSN was 1430 * sent to. 1431 */ 1432 if (transport && 1433 sack->num_gap_ack_blocks && 1434 q->asoc->peer.primary_path->cacc. 1435 changeover_active) 1436 transport->cacc.cacc_saw_newack 1437 = 1; 1438 } 1439 1440 list_add_tail(&tchunk->transmitted_list, 1441 &q->sacked); 1442 } else { 1443 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 1444 * M2) Each time a SACK arrives reporting 1445 * 'Stray DATA chunk(s)' record the highest TSN 1446 * reported as newly acknowledged, call this 1447 * value 'HighestTSNinSack'. A newly 1448 * acknowledged DATA chunk is one not 1449 * previously acknowledged in a SACK. 1450 * 1451 * When the SCTP sender of data receives a SACK 1452 * chunk that acknowledges, for the first time, 1453 * the receipt of a DATA chunk, all the still 1454 * unacknowledged DATA chunks whose TSN is 1455 * older than that newly acknowledged DATA 1456 * chunk, are qualified as 'Stray DATA chunks'. 1457 */ 1458 list_add_tail(lchunk, &tlist); 1459 } 1460 } else { 1461 if (tchunk->tsn_gap_acked) { 1462 pr_debug("%s: receiver reneged on data TSN:0x%x\n", 1463 __func__, tsn); 1464 1465 tchunk->tsn_gap_acked = 0; 1466 1467 if (tchunk->transport) 1468 bytes_acked -= sctp_data_size(tchunk); 1469 1470 /* RFC 2960 6.3.2 Retransmission Timer Rules 1471 * 1472 * R4) Whenever a SACK is received missing a 1473 * TSN that was previously acknowledged via a 1474 * Gap Ack Block, start T3-rtx for the 1475 * destination address to which the DATA 1476 * chunk was originally 1477 * transmitted if it is not already running. 1478 */ 1479 restart_timer = 1; 1480 } 1481 1482 list_add_tail(lchunk, &tlist); 1483 } 1484 } 1485 1486 if (transport) { 1487 if (bytes_acked) { 1488 struct sctp_association *asoc = transport->asoc; 1489 1490 /* We may have counted DATA that was migrated 1491 * to this transport due to DEL-IP operation. 1492 * Subtract those bytes, since the were never 1493 * send on this transport and shouldn't be 1494 * credited to this transport. 1495 */ 1496 bytes_acked -= migrate_bytes; 1497 1498 /* 8.2. When an outstanding TSN is acknowledged, 1499 * the endpoint shall clear the error counter of 1500 * the destination transport address to which the 1501 * DATA chunk was last sent. 1502 * The association's overall error counter is 1503 * also cleared. 1504 */ 1505 transport->error_count = 0; 1506 transport->asoc->overall_error_count = 0; 1507 forward_progress = true; 1508 1509 /* 1510 * While in SHUTDOWN PENDING, we may have started 1511 * the T5 shutdown guard timer after reaching the 1512 * retransmission limit. Stop that timer as soon 1513 * as the receiver acknowledged any data. 1514 */ 1515 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && 1516 del_timer(&asoc->timers 1517 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) 1518 sctp_association_put(asoc); 1519 1520 /* Mark the destination transport address as 1521 * active if it is not so marked. 1522 */ 1523 if ((transport->state == SCTP_INACTIVE || 1524 transport->state == SCTP_UNCONFIRMED) && 1525 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) { 1526 sctp_assoc_control_transport( 1527 transport->asoc, 1528 transport, 1529 SCTP_TRANSPORT_UP, 1530 SCTP_RECEIVED_SACK); 1531 } 1532 1533 sctp_transport_raise_cwnd(transport, sack_ctsn, 1534 bytes_acked); 1535 1536 transport->flight_size -= bytes_acked; 1537 if (transport->flight_size == 0) 1538 transport->partial_bytes_acked = 0; 1539 q->outstanding_bytes -= bytes_acked + migrate_bytes; 1540 } else { 1541 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1542 * When a sender is doing zero window probing, it 1543 * should not timeout the association if it continues 1544 * to receive new packets from the receiver. The 1545 * reason is that the receiver MAY keep its window 1546 * closed for an indefinite time. 1547 * A sender is doing zero window probing when the 1548 * receiver's advertised window is zero, and there is 1549 * only one data chunk in flight to the receiver. 1550 * 1551 * Allow the association to timeout while in SHUTDOWN 1552 * PENDING or SHUTDOWN RECEIVED in case the receiver 1553 * stays in zero window mode forever. 1554 */ 1555 if (!q->asoc->peer.rwnd && 1556 !list_empty(&tlist) && 1557 (sack_ctsn+2 == q->asoc->next_tsn) && 1558 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { 1559 pr_debug("%s: sack received for zero window " 1560 "probe:%u\n", __func__, sack_ctsn); 1561 1562 q->asoc->overall_error_count = 0; 1563 transport->error_count = 0; 1564 } 1565 } 1566 1567 /* RFC 2960 6.3.2 Retransmission Timer Rules 1568 * 1569 * R2) Whenever all outstanding data sent to an address have 1570 * been acknowledged, turn off the T3-rtx timer of that 1571 * address. 1572 */ 1573 if (!transport->flight_size) { 1574 if (del_timer(&transport->T3_rtx_timer)) 1575 sctp_transport_put(transport); 1576 } else if (restart_timer) { 1577 if (!mod_timer(&transport->T3_rtx_timer, 1578 jiffies + transport->rto)) 1579 sctp_transport_hold(transport); 1580 } 1581 1582 if (forward_progress) { 1583 if (transport->dst) 1584 dst_confirm(transport->dst); 1585 } 1586 } 1587 1588 list_splice(&tlist, transmitted_queue); 1589 } 1590 1591 /* Mark chunks as missing and consequently may get retransmitted. */ 1592 static void sctp_mark_missing(struct sctp_outq *q, 1593 struct list_head *transmitted_queue, 1594 struct sctp_transport *transport, 1595 __u32 highest_new_tsn_in_sack, 1596 int count_of_newacks) 1597 { 1598 struct sctp_chunk *chunk; 1599 __u32 tsn; 1600 char do_fast_retransmit = 0; 1601 struct sctp_association *asoc = q->asoc; 1602 struct sctp_transport *primary = asoc->peer.primary_path; 1603 1604 list_for_each_entry(chunk, transmitted_queue, transmitted_list) { 1605 1606 tsn = ntohl(chunk->subh.data_hdr->tsn); 1607 1608 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all 1609 * 'Unacknowledged TSN's', if the TSN number of an 1610 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' 1611 * value, increment the 'TSN.Missing.Report' count on that 1612 * chunk if it has NOT been fast retransmitted or marked for 1613 * fast retransmit already. 1614 */ 1615 if (chunk->fast_retransmit == SCTP_CAN_FRTX && 1616 !chunk->tsn_gap_acked && 1617 TSN_lt(tsn, highest_new_tsn_in_sack)) { 1618 1619 /* SFR-CACC may require us to skip marking 1620 * this chunk as missing. 1621 */ 1622 if (!transport || !sctp_cacc_skip(primary, 1623 chunk->transport, 1624 count_of_newacks, tsn)) { 1625 chunk->tsn_missing_report++; 1626 1627 pr_debug("%s: tsn:0x%x missing counter:%d\n", 1628 __func__, tsn, chunk->tsn_missing_report); 1629 } 1630 } 1631 /* 1632 * M4) If any DATA chunk is found to have a 1633 * 'TSN.Missing.Report' 1634 * value larger than or equal to 3, mark that chunk for 1635 * retransmission and start the fast retransmit procedure. 1636 */ 1637 1638 if (chunk->tsn_missing_report >= 3) { 1639 chunk->fast_retransmit = SCTP_NEED_FRTX; 1640 do_fast_retransmit = 1; 1641 } 1642 } 1643 1644 if (transport) { 1645 if (do_fast_retransmit) 1646 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); 1647 1648 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, " 1649 "flight_size:%d, pba:%d\n", __func__, transport, 1650 transport->cwnd, transport->ssthresh, 1651 transport->flight_size, transport->partial_bytes_acked); 1652 } 1653 } 1654 1655 /* Is the given TSN acked by this packet? */ 1656 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) 1657 { 1658 int i; 1659 sctp_sack_variable_t *frags; 1660 __u16 gap; 1661 __u32 ctsn = ntohl(sack->cum_tsn_ack); 1662 1663 if (TSN_lte(tsn, ctsn)) 1664 goto pass; 1665 1666 /* 3.3.4 Selective Acknowledgement (SACK) (3): 1667 * 1668 * Gap Ack Blocks: 1669 * These fields contain the Gap Ack Blocks. They are repeated 1670 * for each Gap Ack Block up to the number of Gap Ack Blocks 1671 * defined in the Number of Gap Ack Blocks field. All DATA 1672 * chunks with TSNs greater than or equal to (Cumulative TSN 1673 * Ack + Gap Ack Block Start) and less than or equal to 1674 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack 1675 * Block are assumed to have been received correctly. 1676 */ 1677 1678 frags = sack->variable; 1679 gap = tsn - ctsn; 1680 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { 1681 if (TSN_lte(ntohs(frags[i].gab.start), gap) && 1682 TSN_lte(gap, ntohs(frags[i].gab.end))) 1683 goto pass; 1684 } 1685 1686 return 0; 1687 pass: 1688 return 1; 1689 } 1690 1691 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, 1692 int nskips, __be16 stream) 1693 { 1694 int i; 1695 1696 for (i = 0; i < nskips; i++) { 1697 if (skiplist[i].stream == stream) 1698 return i; 1699 } 1700 return i; 1701 } 1702 1703 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ 1704 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) 1705 { 1706 struct sctp_association *asoc = q->asoc; 1707 struct sctp_chunk *ftsn_chunk = NULL; 1708 struct sctp_fwdtsn_skip ftsn_skip_arr[10]; 1709 int nskips = 0; 1710 int skip_pos = 0; 1711 __u32 tsn; 1712 struct sctp_chunk *chunk; 1713 struct list_head *lchunk, *temp; 1714 1715 if (!asoc->peer.prsctp_capable) 1716 return; 1717 1718 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the 1719 * received SACK. 1720 * 1721 * If (Advanced.Peer.Ack.Point < SackCumAck), then update 1722 * Advanced.Peer.Ack.Point to be equal to SackCumAck. 1723 */ 1724 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1725 asoc->adv_peer_ack_point = ctsn; 1726 1727 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" 1728 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as 1729 * the chunk next in the out-queue space is marked as "abandoned" as 1730 * shown in the following example: 1731 * 1732 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 1733 * and the Advanced.Peer.Ack.Point is updated to this value: 1734 * 1735 * out-queue at the end of ==> out-queue after Adv.Ack.Point 1736 * normal SACK processing local advancement 1737 * ... ... 1738 * Adv.Ack.Pt-> 102 acked 102 acked 1739 * 103 abandoned 103 abandoned 1740 * 104 abandoned Adv.Ack.P-> 104 abandoned 1741 * 105 105 1742 * 106 acked 106 acked 1743 * ... ... 1744 * 1745 * In this example, the data sender successfully advanced the 1746 * "Advanced.Peer.Ack.Point" from 102 to 104 locally. 1747 */ 1748 list_for_each_safe(lchunk, temp, &q->abandoned) { 1749 chunk = list_entry(lchunk, struct sctp_chunk, 1750 transmitted_list); 1751 tsn = ntohl(chunk->subh.data_hdr->tsn); 1752 1753 /* Remove any chunks in the abandoned queue that are acked by 1754 * the ctsn. 1755 */ 1756 if (TSN_lte(tsn, ctsn)) { 1757 list_del_init(lchunk); 1758 sctp_chunk_free(chunk); 1759 } else { 1760 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { 1761 asoc->adv_peer_ack_point = tsn; 1762 if (chunk->chunk_hdr->flags & 1763 SCTP_DATA_UNORDERED) 1764 continue; 1765 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], 1766 nskips, 1767 chunk->subh.data_hdr->stream); 1768 ftsn_skip_arr[skip_pos].stream = 1769 chunk->subh.data_hdr->stream; 1770 ftsn_skip_arr[skip_pos].ssn = 1771 chunk->subh.data_hdr->ssn; 1772 if (skip_pos == nskips) 1773 nskips++; 1774 if (nskips == 10) 1775 break; 1776 } else 1777 break; 1778 } 1779 } 1780 1781 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" 1782 * is greater than the Cumulative TSN ACK carried in the received 1783 * SACK, the data sender MUST send the data receiver a FORWARD TSN 1784 * chunk containing the latest value of the 1785 * "Advanced.Peer.Ack.Point". 1786 * 1787 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD 1788 * list each stream and sequence number in the forwarded TSN. This 1789 * information will enable the receiver to easily find any 1790 * stranded TSN's waiting on stream reorder queues. Each stream 1791 * SHOULD only be reported once; this means that if multiple 1792 * abandoned messages occur in the same stream then only the 1793 * highest abandoned stream sequence number is reported. If the 1794 * total size of the FORWARD TSN does NOT fit in a single MTU then 1795 * the sender of the FORWARD TSN SHOULD lower the 1796 * Advanced.Peer.Ack.Point to the last TSN that will fit in a 1797 * single MTU. 1798 */ 1799 if (asoc->adv_peer_ack_point > ctsn) 1800 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, 1801 nskips, &ftsn_skip_arr[0]); 1802 1803 if (ftsn_chunk) { 1804 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1805 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); 1806 } 1807 } 1808