1 /* SCTP kernel reference Implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * Copyright (c) 2001-2003 Intel Corp. 6 * 7 * This file is part of the SCTP kernel reference Implementation 8 * 9 * These functions implement the sctp_outq class. The outqueue handles 10 * bundling and queueing of outgoing SCTP chunks. 11 * 12 * The SCTP reference implementation is free software; 13 * you can redistribute it and/or modify it under the terms of 14 * the GNU General Public License as published by 15 * the Free Software Foundation; either version 2, or (at your option) 16 * any later version. 17 * 18 * The SCTP reference implementation is distributed in the hope that it 19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 20 * ************************ 21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 22 * See the GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with GNU CC; see the file COPYING. If not, write to 26 * the Free Software Foundation, 59 Temple Place - Suite 330, 27 * Boston, MA 02111-1307, USA. 28 * 29 * Please send any bug reports or fixes you make to the 30 * email address(es): 31 * lksctp developers <lksctp-developers@lists.sourceforge.net> 32 * 33 * Or submit a bug report through the following website: 34 * http://www.sf.net/projects/lksctp 35 * 36 * Written or modified by: 37 * La Monte H.P. Yarroll <piggy@acm.org> 38 * Karl Knutson <karl@athena.chicago.il.us> 39 * Perry Melange <pmelange@null.cc.uic.edu> 40 * Xingang Guo <xingang.guo@intel.com> 41 * Hui Huang <hui.huang@nokia.com> 42 * Sridhar Samudrala <sri@us.ibm.com> 43 * Jon Grimm <jgrimm@us.ibm.com> 44 * 45 * Any bugs reported given to us we will try to fix... any fixes shared will 46 * be incorporated into the next SCTP release. 47 */ 48 49 #include <linux/types.h> 50 #include <linux/list.h> /* For struct list_head */ 51 #include <linux/socket.h> 52 #include <linux/ip.h> 53 #include <net/sock.h> /* For skb_set_owner_w */ 54 55 #include <net/sctp/sctp.h> 56 #include <net/sctp/sm.h> 57 58 /* Declare internal functions here. */ 59 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 60 static void sctp_check_transmitted(struct sctp_outq *q, 61 struct list_head *transmitted_queue, 62 struct sctp_transport *transport, 63 struct sctp_sackhdr *sack, 64 __u32 highest_new_tsn); 65 66 static void sctp_mark_missing(struct sctp_outq *q, 67 struct list_head *transmitted_queue, 68 struct sctp_transport *transport, 69 __u32 highest_new_tsn, 70 int count_of_newacks); 71 72 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 73 74 /* Add data to the front of the queue. */ 75 static inline void sctp_outq_head_data(struct sctp_outq *q, 76 struct sctp_chunk *ch) 77 { 78 __skb_queue_head(&q->out, (struct sk_buff *)ch); 79 q->out_qlen += ch->skb->len; 80 return; 81 } 82 83 /* Take data from the front of the queue. */ 84 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 85 { 86 struct sctp_chunk *ch; 87 ch = (struct sctp_chunk *)__skb_dequeue(&q->out); 88 if (ch) 89 q->out_qlen -= ch->skb->len; 90 return ch; 91 } 92 /* Add data chunk to the end of the queue. */ 93 static inline void sctp_outq_tail_data(struct sctp_outq *q, 94 struct sctp_chunk *ch) 95 { 96 __skb_queue_tail(&q->out, (struct sk_buff *)ch); 97 q->out_qlen += ch->skb->len; 98 return; 99 } 100 101 /* 102 * SFR-CACC algorithm: 103 * D) If count_of_newacks is greater than or equal to 2 104 * and t was not sent to the current primary then the 105 * sender MUST NOT increment missing report count for t. 106 */ 107 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, 108 struct sctp_transport *transport, 109 int count_of_newacks) 110 { 111 if (count_of_newacks >=2 && transport != primary) 112 return 1; 113 return 0; 114 } 115 116 /* 117 * SFR-CACC algorithm: 118 * F) If count_of_newacks is less than 2, let d be the 119 * destination to which t was sent. If cacc_saw_newack 120 * is 0 for destination d, then the sender MUST NOT 121 * increment missing report count for t. 122 */ 123 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 124 int count_of_newacks) 125 { 126 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) 127 return 1; 128 return 0; 129 } 130 131 /* 132 * SFR-CACC algorithm: 133 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD 134 * execute steps C, D, F. 135 * 136 * C has been implemented in sctp_outq_sack 137 */ 138 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, 139 struct sctp_transport *transport, 140 int count_of_newacks) 141 { 142 if (!primary->cacc.cycling_changeover) { 143 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) 144 return 1; 145 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) 146 return 1; 147 return 0; 148 } 149 return 0; 150 } 151 152 /* 153 * SFR-CACC algorithm: 154 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less 155 * than next_tsn_at_change of the current primary, then 156 * the sender MUST NOT increment missing report count 157 * for t. 158 */ 159 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) 160 { 161 if (primary->cacc.cycling_changeover && 162 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) 163 return 1; 164 return 0; 165 } 166 167 /* 168 * SFR-CACC algorithm: 169 * 3) If the missing report count for TSN t is to be 170 * incremented according to [RFC2960] and 171 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, 172 * then the sender MUST futher execute steps 3.1 and 173 * 3.2 to determine if the missing report count for 174 * TSN t SHOULD NOT be incremented. 175 * 176 * 3.3) If 3.1 and 3.2 do not dictate that the missing 177 * report count for t should not be incremented, then 178 * the sender SOULD increment missing report count for 179 * t (according to [RFC2960] and [SCTP_STEWART_2002]). 180 */ 181 static inline int sctp_cacc_skip(struct sctp_transport *primary, 182 struct sctp_transport *transport, 183 int count_of_newacks, 184 __u32 tsn) 185 { 186 if (primary->cacc.changeover_active && 187 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) 188 || sctp_cacc_skip_3_2(primary, tsn))) 189 return 1; 190 return 0; 191 } 192 193 /* Initialize an existing sctp_outq. This does the boring stuff. 194 * You still need to define handlers if you really want to DO 195 * something with this structure... 196 */ 197 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) 198 { 199 q->asoc = asoc; 200 skb_queue_head_init(&q->out); 201 skb_queue_head_init(&q->control); 202 INIT_LIST_HEAD(&q->retransmit); 203 INIT_LIST_HEAD(&q->sacked); 204 INIT_LIST_HEAD(&q->abandoned); 205 206 q->outstanding_bytes = 0; 207 q->empty = 1; 208 q->cork = 0; 209 210 q->malloced = 0; 211 q->out_qlen = 0; 212 } 213 214 /* Free the outqueue structure and any related pending chunks. 215 */ 216 void sctp_outq_teardown(struct sctp_outq *q) 217 { 218 struct sctp_transport *transport; 219 struct list_head *lchunk, *pos, *temp; 220 struct sctp_chunk *chunk; 221 222 /* Throw away unacknowledged chunks. */ 223 list_for_each(pos, &q->asoc->peer.transport_addr_list) { 224 transport = list_entry(pos, struct sctp_transport, transports); 225 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { 226 chunk = list_entry(lchunk, struct sctp_chunk, 227 transmitted_list); 228 /* Mark as part of a failed message. */ 229 sctp_chunk_fail(chunk, q->error); 230 sctp_chunk_free(chunk); 231 } 232 } 233 234 /* Throw away chunks that have been gap ACKed. */ 235 list_for_each_safe(lchunk, temp, &q->sacked) { 236 list_del_init(lchunk); 237 chunk = list_entry(lchunk, struct sctp_chunk, 238 transmitted_list); 239 sctp_chunk_fail(chunk, q->error); 240 sctp_chunk_free(chunk); 241 } 242 243 /* Throw away any chunks in the retransmit queue. */ 244 list_for_each_safe(lchunk, temp, &q->retransmit) { 245 list_del_init(lchunk); 246 chunk = list_entry(lchunk, struct sctp_chunk, 247 transmitted_list); 248 sctp_chunk_fail(chunk, q->error); 249 sctp_chunk_free(chunk); 250 } 251 252 /* Throw away any chunks that are in the abandoned queue. */ 253 list_for_each_safe(lchunk, temp, &q->abandoned) { 254 list_del_init(lchunk); 255 chunk = list_entry(lchunk, struct sctp_chunk, 256 transmitted_list); 257 sctp_chunk_fail(chunk, q->error); 258 sctp_chunk_free(chunk); 259 } 260 261 /* Throw away any leftover data chunks. */ 262 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 263 264 /* Mark as send failure. */ 265 sctp_chunk_fail(chunk, q->error); 266 sctp_chunk_free(chunk); 267 } 268 269 q->error = 0; 270 271 /* Throw away any leftover control chunks. */ 272 while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) 273 sctp_chunk_free(chunk); 274 } 275 276 /* Free the outqueue structure and any related pending chunks. */ 277 void sctp_outq_free(struct sctp_outq *q) 278 { 279 /* Throw away leftover chunks. */ 280 sctp_outq_teardown(q); 281 282 /* If we were kmalloc()'d, free the memory. */ 283 if (q->malloced) 284 kfree(q); 285 } 286 287 /* Put a new chunk in an sctp_outq. */ 288 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) 289 { 290 int error = 0; 291 292 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", 293 q, chunk, chunk && chunk->chunk_hdr ? 294 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) 295 : "Illegal Chunk"); 296 297 /* If it is data, queue it up, otherwise, send it 298 * immediately. 299 */ 300 if (SCTP_CID_DATA == chunk->chunk_hdr->type) { 301 /* Is it OK to queue data chunks? */ 302 /* From 9. Termination of Association 303 * 304 * When either endpoint performs a shutdown, the 305 * association on each peer will stop accepting new 306 * data from its user and only deliver data in queue 307 * at the time of sending or receiving the SHUTDOWN 308 * chunk. 309 */ 310 switch (q->asoc->state) { 311 case SCTP_STATE_EMPTY: 312 case SCTP_STATE_CLOSED: 313 case SCTP_STATE_SHUTDOWN_PENDING: 314 case SCTP_STATE_SHUTDOWN_SENT: 315 case SCTP_STATE_SHUTDOWN_RECEIVED: 316 case SCTP_STATE_SHUTDOWN_ACK_SENT: 317 /* Cannot send after transport endpoint shutdown */ 318 error = -ESHUTDOWN; 319 break; 320 321 default: 322 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", 323 q, chunk, chunk && chunk->chunk_hdr ? 324 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) 325 : "Illegal Chunk"); 326 327 sctp_outq_tail_data(q, chunk); 328 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 329 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS); 330 else 331 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); 332 q->empty = 0; 333 break; 334 }; 335 } else { 336 __skb_queue_tail(&q->control, (struct sk_buff *) chunk); 337 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 338 } 339 340 if (error < 0) 341 return error; 342 343 if (!q->cork) 344 error = sctp_outq_flush(q, 0); 345 346 return error; 347 } 348 349 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list 350 * and the abandoned list are in ascending order. 351 */ 352 static void sctp_insert_list(struct list_head *head, struct list_head *new) 353 { 354 struct list_head *pos; 355 struct sctp_chunk *nchunk, *lchunk; 356 __u32 ntsn, ltsn; 357 int done = 0; 358 359 nchunk = list_entry(new, struct sctp_chunk, transmitted_list); 360 ntsn = ntohl(nchunk->subh.data_hdr->tsn); 361 362 list_for_each(pos, head) { 363 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); 364 ltsn = ntohl(lchunk->subh.data_hdr->tsn); 365 if (TSN_lt(ntsn, ltsn)) { 366 list_add(new, pos->prev); 367 done = 1; 368 break; 369 } 370 } 371 if (!done) 372 list_add_tail(new, head); 373 } 374 375 /* Mark all the eligible packets on a transport for retransmission. */ 376 void sctp_retransmit_mark(struct sctp_outq *q, 377 struct sctp_transport *transport, 378 __u8 fast_retransmit) 379 { 380 struct list_head *lchunk, *ltemp; 381 struct sctp_chunk *chunk; 382 383 /* Walk through the specified transmitted queue. */ 384 list_for_each_safe(lchunk, ltemp, &transport->transmitted) { 385 chunk = list_entry(lchunk, struct sctp_chunk, 386 transmitted_list); 387 388 /* If the chunk is abandoned, move it to abandoned list. */ 389 if (sctp_chunk_abandoned(chunk)) { 390 list_del_init(lchunk); 391 sctp_insert_list(&q->abandoned, lchunk); 392 continue; 393 } 394 395 /* If we are doing retransmission due to a fast retransmit, 396 * only the chunk's that are marked for fast retransmit 397 * should be added to the retransmit queue. If we are doing 398 * retransmission due to a timeout or pmtu discovery, only the 399 * chunks that are not yet acked should be added to the 400 * retransmit queue. 401 */ 402 if ((fast_retransmit && chunk->fast_retransmit) || 403 (!fast_retransmit && !chunk->tsn_gap_acked)) { 404 /* RFC 2960 6.2.1 Processing a Received SACK 405 * 406 * C) Any time a DATA chunk is marked for 407 * retransmission (via either T3-rtx timer expiration 408 * (Section 6.3.3) or via fast retransmit 409 * (Section 7.2.4)), add the data size of those 410 * chunks to the rwnd. 411 */ 412 q->asoc->peer.rwnd += sctp_data_size(chunk); 413 q->outstanding_bytes -= sctp_data_size(chunk); 414 transport->flight_size -= sctp_data_size(chunk); 415 416 /* sctpimpguide-05 Section 2.8.2 417 * M5) If a T3-rtx timer expires, the 418 * 'TSN.Missing.Report' of all affected TSNs is set 419 * to 0. 420 */ 421 chunk->tsn_missing_report = 0; 422 423 /* If a chunk that is being used for RTT measurement 424 * has to be retransmitted, we cannot use this chunk 425 * anymore for RTT measurements. Reset rto_pending so 426 * that a new RTT measurement is started when a new 427 * data chunk is sent. 428 */ 429 if (chunk->rtt_in_progress) { 430 chunk->rtt_in_progress = 0; 431 transport->rto_pending = 0; 432 } 433 434 /* Move the chunk to the retransmit queue. The chunks 435 * on the retransmit queue are always kept in order. 436 */ 437 list_del_init(lchunk); 438 sctp_insert_list(&q->retransmit, lchunk); 439 } 440 } 441 442 SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " 443 "cwnd: %d, ssthresh: %d, flight_size: %d, " 444 "pba: %d\n", __FUNCTION__, 445 transport, fast_retransmit, 446 transport->cwnd, transport->ssthresh, 447 transport->flight_size, 448 transport->partial_bytes_acked); 449 450 } 451 452 /* Mark all the eligible packets on a transport for retransmission and force 453 * one packet out. 454 */ 455 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 456 sctp_retransmit_reason_t reason) 457 { 458 int error = 0; 459 __u8 fast_retransmit = 0; 460 461 switch(reason) { 462 case SCTP_RTXR_T3_RTX: 463 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); 464 /* Update the retran path if the T3-rtx timer has expired for 465 * the current retran path. 466 */ 467 if (transport == transport->asoc->peer.retran_path) 468 sctp_assoc_update_retran_path(transport->asoc); 469 break; 470 case SCTP_RTXR_FAST_RTX: 471 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 472 fast_retransmit = 1; 473 break; 474 case SCTP_RTXR_PMTUD: 475 default: 476 break; 477 } 478 479 sctp_retransmit_mark(q, transport, fast_retransmit); 480 481 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 482 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 483 * following the procedures outlined in C1 - C5. 484 */ 485 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 486 487 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 488 489 if (error) 490 q->asoc->base.sk->sk_err = -error; 491 } 492 493 /* 494 * Transmit DATA chunks on the retransmit queue. Upon return from 495 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which 496 * need to be transmitted by the caller. 497 * We assume that pkt->transport has already been set. 498 * 499 * The return value is a normal kernel error return value. 500 */ 501 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, 502 int rtx_timeout, int *start_timer) 503 { 504 struct list_head *lqueue; 505 struct list_head *lchunk, *lchunk1; 506 struct sctp_transport *transport = pkt->transport; 507 sctp_xmit_t status; 508 struct sctp_chunk *chunk, *chunk1; 509 struct sctp_association *asoc; 510 int error = 0; 511 512 asoc = q->asoc; 513 lqueue = &q->retransmit; 514 515 /* RFC 2960 6.3.3 Handle T3-rtx Expiration 516 * 517 * E3) Determine how many of the earliest (i.e., lowest TSN) 518 * outstanding DATA chunks for the address for which the 519 * T3-rtx has expired will fit into a single packet, subject 520 * to the MTU constraint for the path corresponding to the 521 * destination transport address to which the retransmission 522 * is being sent (this may be different from the address for 523 * which the timer expires [see Section 6.4]). Call this value 524 * K. Bundle and retransmit those K DATA chunks in a single 525 * packet to the destination endpoint. 526 * 527 * [Just to be painfully clear, if we are retransmitting 528 * because a timeout just happened, we should send only ONE 529 * packet of retransmitted data.] 530 */ 531 lchunk = sctp_list_dequeue(lqueue); 532 533 while (lchunk) { 534 chunk = list_entry(lchunk, struct sctp_chunk, 535 transmitted_list); 536 537 /* Make sure that Gap Acked TSNs are not retransmitted. A 538 * simple approach is just to move such TSNs out of the 539 * way and into a 'transmitted' queue and skip to the 540 * next chunk. 541 */ 542 if (chunk->tsn_gap_acked) { 543 list_add_tail(lchunk, &transport->transmitted); 544 lchunk = sctp_list_dequeue(lqueue); 545 continue; 546 } 547 548 /* Attempt to append this chunk to the packet. */ 549 status = sctp_packet_append_chunk(pkt, chunk); 550 551 switch (status) { 552 case SCTP_XMIT_PMTU_FULL: 553 /* Send this packet. */ 554 if ((error = sctp_packet_transmit(pkt)) == 0) 555 *start_timer = 1; 556 557 /* If we are retransmitting, we should only 558 * send a single packet. 559 */ 560 if (rtx_timeout) { 561 list_add(lchunk, lqueue); 562 lchunk = NULL; 563 } 564 565 /* Bundle lchunk in the next round. */ 566 break; 567 568 case SCTP_XMIT_RWND_FULL: 569 /* Send this packet. */ 570 if ((error = sctp_packet_transmit(pkt)) == 0) 571 *start_timer = 1; 572 573 /* Stop sending DATA as there is no more room 574 * at the receiver. 575 */ 576 list_add(lchunk, lqueue); 577 lchunk = NULL; 578 break; 579 580 case SCTP_XMIT_NAGLE_DELAY: 581 /* Send this packet. */ 582 if ((error = sctp_packet_transmit(pkt)) == 0) 583 *start_timer = 1; 584 585 /* Stop sending DATA because of nagle delay. */ 586 list_add(lchunk, lqueue); 587 lchunk = NULL; 588 break; 589 590 default: 591 /* The append was successful, so add this chunk to 592 * the transmitted list. 593 */ 594 list_add_tail(lchunk, &transport->transmitted); 595 596 /* Mark the chunk as ineligible for fast retransmit 597 * after it is retransmitted. 598 */ 599 chunk->fast_retransmit = 0; 600 601 *start_timer = 1; 602 q->empty = 0; 603 604 /* Retrieve a new chunk to bundle. */ 605 lchunk = sctp_list_dequeue(lqueue); 606 break; 607 }; 608 609 /* If we are here due to a retransmit timeout or a fast 610 * retransmit and if there are any chunks left in the retransmit 611 * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. 612 */ 613 if (rtx_timeout && !lchunk) { 614 list_for_each(lchunk1, lqueue) { 615 chunk1 = list_entry(lchunk1, struct sctp_chunk, 616 transmitted_list); 617 chunk1->fast_retransmit = 0; 618 } 619 } 620 } 621 622 return error; 623 } 624 625 /* Cork the outqueue so queued chunks are really queued. */ 626 int sctp_outq_uncork(struct sctp_outq *q) 627 { 628 int error = 0; 629 if (q->cork) { 630 q->cork = 0; 631 error = sctp_outq_flush(q, 0); 632 } 633 return error; 634 } 635 636 /* 637 * Try to flush an outqueue. 638 * 639 * Description: Send everything in q which we legally can, subject to 640 * congestion limitations. 641 * * Note: This function can be called from multiple contexts so appropriate 642 * locking concerns must be made. Today we use the sock lock to protect 643 * this function. 644 */ 645 int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 646 { 647 struct sctp_packet *packet; 648 struct sctp_packet singleton; 649 struct sctp_association *asoc = q->asoc; 650 __u16 sport = asoc->base.bind_addr.port; 651 __u16 dport = asoc->peer.port; 652 __u32 vtag = asoc->peer.i.init_tag; 653 struct sk_buff_head *queue; 654 struct sctp_transport *transport = NULL; 655 struct sctp_transport *new_transport; 656 struct sctp_chunk *chunk; 657 sctp_xmit_t status; 658 int error = 0; 659 int start_timer = 0; 660 661 /* These transports have chunks to send. */ 662 struct list_head transport_list; 663 struct list_head *ltransport; 664 665 INIT_LIST_HEAD(&transport_list); 666 packet = NULL; 667 668 /* 669 * 6.10 Bundling 670 * ... 671 * When bundling control chunks with DATA chunks, an 672 * endpoint MUST place control chunks first in the outbound 673 * SCTP packet. The transmitter MUST transmit DATA chunks 674 * within a SCTP packet in increasing order of TSN. 675 * ... 676 */ 677 678 queue = &q->control; 679 while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { 680 /* Pick the right transport to use. */ 681 new_transport = chunk->transport; 682 683 if (!new_transport) { 684 new_transport = asoc->peer.active_path; 685 } else if (new_transport->state == SCTP_INACTIVE) { 686 /* If the chunk is Heartbeat or Heartbeat Ack, 687 * send it to chunk->transport, even if it's 688 * inactive. 689 * 690 * 3.3.6 Heartbeat Acknowledgement: 691 * ... 692 * A HEARTBEAT ACK is always sent to the source IP 693 * address of the IP datagram containing the 694 * HEARTBEAT chunk to which this ack is responding. 695 * ... 696 */ 697 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && 698 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK) 699 new_transport = asoc->peer.active_path; 700 } 701 702 /* Are we switching transports? 703 * Take care of transport locks. 704 */ 705 if (new_transport != transport) { 706 transport = new_transport; 707 if (list_empty(&transport->send_ready)) { 708 list_add_tail(&transport->send_ready, 709 &transport_list); 710 } 711 packet = &transport->packet; 712 sctp_packet_config(packet, vtag, 713 asoc->peer.ecn_capable); 714 } 715 716 switch (chunk->chunk_hdr->type) { 717 /* 718 * 6.10 Bundling 719 * ... 720 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN 721 * COMPLETE with any other chunks. [Send them immediately.] 722 */ 723 case SCTP_CID_INIT: 724 case SCTP_CID_INIT_ACK: 725 case SCTP_CID_SHUTDOWN_COMPLETE: 726 sctp_packet_init(&singleton, transport, sport, dport); 727 sctp_packet_config(&singleton, vtag, 0); 728 sctp_packet_append_chunk(&singleton, chunk); 729 error = sctp_packet_transmit(&singleton); 730 if (error < 0) 731 return error; 732 break; 733 734 case SCTP_CID_ABORT: 735 case SCTP_CID_SACK: 736 case SCTP_CID_HEARTBEAT: 737 case SCTP_CID_HEARTBEAT_ACK: 738 case SCTP_CID_SHUTDOWN: 739 case SCTP_CID_SHUTDOWN_ACK: 740 case SCTP_CID_ERROR: 741 case SCTP_CID_COOKIE_ECHO: 742 case SCTP_CID_COOKIE_ACK: 743 case SCTP_CID_ECN_ECNE: 744 case SCTP_CID_ECN_CWR: 745 case SCTP_CID_ASCONF: 746 case SCTP_CID_ASCONF_ACK: 747 case SCTP_CID_FWD_TSN: 748 sctp_packet_transmit_chunk(packet, chunk); 749 break; 750 751 default: 752 /* We built a chunk with an illegal type! */ 753 BUG(); 754 }; 755 } 756 757 /* Is it OK to send data chunks? */ 758 switch (asoc->state) { 759 case SCTP_STATE_COOKIE_ECHOED: 760 /* Only allow bundling when this packet has a COOKIE-ECHO 761 * chunk. 762 */ 763 if (!packet || !packet->has_cookie_echo) 764 break; 765 766 /* fallthru */ 767 case SCTP_STATE_ESTABLISHED: 768 case SCTP_STATE_SHUTDOWN_PENDING: 769 case SCTP_STATE_SHUTDOWN_RECEIVED: 770 /* 771 * RFC 2960 6.1 Transmission of DATA Chunks 772 * 773 * C) When the time comes for the sender to transmit, 774 * before sending new DATA chunks, the sender MUST 775 * first transmit any outstanding DATA chunks which 776 * are marked for retransmission (limited by the 777 * current cwnd). 778 */ 779 if (!list_empty(&q->retransmit)) { 780 if (transport == asoc->peer.retran_path) 781 goto retran; 782 783 /* Switch transports & prepare the packet. */ 784 785 transport = asoc->peer.retran_path; 786 787 if (list_empty(&transport->send_ready)) { 788 list_add_tail(&transport->send_ready, 789 &transport_list); 790 } 791 792 packet = &transport->packet; 793 sctp_packet_config(packet, vtag, 794 asoc->peer.ecn_capable); 795 retran: 796 error = sctp_outq_flush_rtx(q, packet, 797 rtx_timeout, &start_timer); 798 799 if (start_timer) 800 sctp_transport_reset_timers(transport); 801 802 /* This can happen on COOKIE-ECHO resend. Only 803 * one chunk can get bundled with a COOKIE-ECHO. 804 */ 805 if (packet->has_cookie_echo) 806 goto sctp_flush_out; 807 808 /* Don't send new data if there is still data 809 * waiting to retransmit. 810 */ 811 if (!list_empty(&q->retransmit)) 812 goto sctp_flush_out; 813 } 814 815 /* Finally, transmit new packets. */ 816 start_timer = 0; 817 queue = &q->out; 818 819 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 820 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 821 * stream identifier. 822 */ 823 if (chunk->sinfo.sinfo_stream >= 824 asoc->c.sinit_num_ostreams) { 825 826 /* Mark as failed send. */ 827 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 828 sctp_chunk_free(chunk); 829 continue; 830 } 831 832 /* Has this chunk expired? */ 833 if (sctp_chunk_abandoned(chunk)) { 834 sctp_chunk_fail(chunk, 0); 835 sctp_chunk_free(chunk); 836 continue; 837 } 838 839 /* If there is a specified transport, use it. 840 * Otherwise, we want to use the active path. 841 */ 842 new_transport = chunk->transport; 843 if (!new_transport || 844 new_transport->state == SCTP_INACTIVE) 845 new_transport = asoc->peer.active_path; 846 847 /* Change packets if necessary. */ 848 if (new_transport != transport) { 849 transport = new_transport; 850 851 /* Schedule to have this transport's 852 * packet flushed. 853 */ 854 if (list_empty(&transport->send_ready)) { 855 list_add_tail(&transport->send_ready, 856 &transport_list); 857 } 858 859 packet = &transport->packet; 860 sctp_packet_config(packet, vtag, 861 asoc->peer.ecn_capable); 862 } 863 864 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", 865 q, chunk, 866 chunk && chunk->chunk_hdr ? 867 sctp_cname(SCTP_ST_CHUNK( 868 chunk->chunk_hdr->type)) 869 : "Illegal Chunk"); 870 871 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " 872 "%p skb->users %d.\n", 873 ntohl(chunk->subh.data_hdr->tsn), 874 chunk->skb ?chunk->skb->head : NULL, 875 chunk->skb ? 876 atomic_read(&chunk->skb->users) : -1); 877 878 /* Add the chunk to the packet. */ 879 status = sctp_packet_transmit_chunk(packet, chunk); 880 881 switch (status) { 882 case SCTP_XMIT_PMTU_FULL: 883 case SCTP_XMIT_RWND_FULL: 884 case SCTP_XMIT_NAGLE_DELAY: 885 /* We could not append this chunk, so put 886 * the chunk back on the output queue. 887 */ 888 SCTP_DEBUG_PRINTK("sctp_outq_flush: could " 889 "not transmit TSN: 0x%x, status: %d\n", 890 ntohl(chunk->subh.data_hdr->tsn), 891 status); 892 sctp_outq_head_data(q, chunk); 893 goto sctp_flush_out; 894 break; 895 896 case SCTP_XMIT_OK: 897 break; 898 899 default: 900 BUG(); 901 } 902 903 /* BUG: We assume that the sctp_packet_transmit() 904 * call below will succeed all the time and add the 905 * chunk to the transmitted list and restart the 906 * timers. 907 * It is possible that the call can fail under OOM 908 * conditions. 909 * 910 * Is this really a problem? Won't this behave 911 * like a lost TSN? 912 */ 913 list_add_tail(&chunk->transmitted_list, 914 &transport->transmitted); 915 916 sctp_transport_reset_timers(transport); 917 918 q->empty = 0; 919 920 /* Only let one DATA chunk get bundled with a 921 * COOKIE-ECHO chunk. 922 */ 923 if (packet->has_cookie_echo) 924 goto sctp_flush_out; 925 } 926 break; 927 928 default: 929 /* Do nothing. */ 930 break; 931 } 932 933 sctp_flush_out: 934 935 /* Before returning, examine all the transports touched in 936 * this call. Right now, we bluntly force clear all the 937 * transports. Things might change after we implement Nagle. 938 * But such an examination is still required. 939 * 940 * --xguo 941 */ 942 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { 943 struct sctp_transport *t = list_entry(ltransport, 944 struct sctp_transport, 945 send_ready); 946 packet = &t->packet; 947 if (!sctp_packet_empty(packet)) 948 error = sctp_packet_transmit(packet); 949 } 950 951 return error; 952 } 953 954 /* Update unack_data based on the incoming SACK chunk */ 955 static void sctp_sack_update_unack_data(struct sctp_association *assoc, 956 struct sctp_sackhdr *sack) 957 { 958 sctp_sack_variable_t *frags; 959 __u16 unack_data; 960 int i; 961 962 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; 963 964 frags = sack->variable; 965 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { 966 unack_data -= ((ntohs(frags[i].gab.end) - 967 ntohs(frags[i].gab.start) + 1)); 968 } 969 970 assoc->unack_data = unack_data; 971 } 972 973 /* Return the highest new tsn that is acknowledged by the given SACK chunk. */ 974 static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack, 975 struct sctp_association *asoc) 976 { 977 struct list_head *ltransport, *lchunk; 978 struct sctp_transport *transport; 979 struct sctp_chunk *chunk; 980 __u32 highest_new_tsn, tsn; 981 struct list_head *transport_list = &asoc->peer.transport_addr_list; 982 983 highest_new_tsn = ntohl(sack->cum_tsn_ack); 984 985 list_for_each(ltransport, transport_list) { 986 transport = list_entry(ltransport, struct sctp_transport, 987 transports); 988 list_for_each(lchunk, &transport->transmitted) { 989 chunk = list_entry(lchunk, struct sctp_chunk, 990 transmitted_list); 991 tsn = ntohl(chunk->subh.data_hdr->tsn); 992 993 if (!chunk->tsn_gap_acked && 994 TSN_lt(highest_new_tsn, tsn) && 995 sctp_acked(sack, tsn)) 996 highest_new_tsn = tsn; 997 } 998 } 999 1000 return highest_new_tsn; 1001 } 1002 1003 /* This is where we REALLY process a SACK. 1004 * 1005 * Process the SACK against the outqueue. Mostly, this just frees 1006 * things off the transmitted queue. 1007 */ 1008 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) 1009 { 1010 struct sctp_association *asoc = q->asoc; 1011 struct sctp_transport *transport; 1012 struct sctp_chunk *tchunk = NULL; 1013 struct list_head *lchunk, *transport_list, *pos, *temp; 1014 sctp_sack_variable_t *frags = sack->variable; 1015 __u32 sack_ctsn, ctsn, tsn; 1016 __u32 highest_tsn, highest_new_tsn; 1017 __u32 sack_a_rwnd; 1018 unsigned outstanding; 1019 struct sctp_transport *primary = asoc->peer.primary_path; 1020 int count_of_newacks = 0; 1021 1022 /* Grab the association's destination address list. */ 1023 transport_list = &asoc->peer.transport_addr_list; 1024 1025 sack_ctsn = ntohl(sack->cum_tsn_ack); 1026 1027 /* 1028 * SFR-CACC algorithm: 1029 * On receipt of a SACK the sender SHOULD execute the 1030 * following statements. 1031 * 1032 * 1) If the cumulative ack in the SACK passes next tsn_at_change 1033 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be 1034 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for 1035 * all destinations. 1036 */ 1037 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { 1038 primary->cacc.changeover_active = 0; 1039 list_for_each(pos, transport_list) { 1040 transport = list_entry(pos, struct sctp_transport, 1041 transports); 1042 transport->cacc.cycling_changeover = 0; 1043 } 1044 } 1045 1046 /* 1047 * SFR-CACC algorithm: 1048 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE 1049 * is set the receiver of the SACK MUST take the following actions: 1050 * 1051 * A) Initialize the cacc_saw_newack to 0 for all destination 1052 * addresses. 1053 */ 1054 if (sack->num_gap_ack_blocks > 0 && 1055 primary->cacc.changeover_active) { 1056 list_for_each(pos, transport_list) { 1057 transport = list_entry(pos, struct sctp_transport, 1058 transports); 1059 transport->cacc.cacc_saw_newack = 0; 1060 } 1061 } 1062 1063 /* Get the highest TSN in the sack. */ 1064 highest_tsn = sack_ctsn; 1065 if (sack->num_gap_ack_blocks) 1066 highest_tsn += 1067 ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end); 1068 1069 if (TSN_lt(asoc->highest_sacked, highest_tsn)) { 1070 highest_new_tsn = highest_tsn; 1071 asoc->highest_sacked = highest_tsn; 1072 } else { 1073 highest_new_tsn = sctp_highest_new_tsn(sack, asoc); 1074 } 1075 1076 /* Run through the retransmit queue. Credit bytes received 1077 * and free those chunks that we can. 1078 */ 1079 sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn); 1080 sctp_mark_missing(q, &q->retransmit, NULL, highest_new_tsn, 0); 1081 1082 /* Run through the transmitted queue. 1083 * Credit bytes received and free those chunks which we can. 1084 * 1085 * This is a MASSIVE candidate for optimization. 1086 */ 1087 list_for_each(pos, transport_list) { 1088 transport = list_entry(pos, struct sctp_transport, 1089 transports); 1090 sctp_check_transmitted(q, &transport->transmitted, 1091 transport, sack, highest_new_tsn); 1092 /* 1093 * SFR-CACC algorithm: 1094 * C) Let count_of_newacks be the number of 1095 * destinations for which cacc_saw_newack is set. 1096 */ 1097 if (transport->cacc.cacc_saw_newack) 1098 count_of_newacks ++; 1099 } 1100 1101 list_for_each(pos, transport_list) { 1102 transport = list_entry(pos, struct sctp_transport, 1103 transports); 1104 sctp_mark_missing(q, &transport->transmitted, transport, 1105 highest_new_tsn, count_of_newacks); 1106 } 1107 1108 /* Move the Cumulative TSN Ack Point if appropriate. */ 1109 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) 1110 asoc->ctsn_ack_point = sack_ctsn; 1111 1112 /* Update unack_data field in the assoc. */ 1113 sctp_sack_update_unack_data(asoc, sack); 1114 1115 ctsn = asoc->ctsn_ack_point; 1116 1117 /* Throw away stuff rotting on the sack queue. */ 1118 list_for_each_safe(lchunk, temp, &q->sacked) { 1119 tchunk = list_entry(lchunk, struct sctp_chunk, 1120 transmitted_list); 1121 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1122 if (TSN_lte(tsn, ctsn)) 1123 sctp_chunk_free(tchunk); 1124 } 1125 1126 /* ii) Set rwnd equal to the newly received a_rwnd minus the 1127 * number of bytes still outstanding after processing the 1128 * Cumulative TSN Ack and the Gap Ack Blocks. 1129 */ 1130 1131 sack_a_rwnd = ntohl(sack->a_rwnd); 1132 outstanding = q->outstanding_bytes; 1133 1134 if (outstanding < sack_a_rwnd) 1135 sack_a_rwnd -= outstanding; 1136 else 1137 sack_a_rwnd = 0; 1138 1139 asoc->peer.rwnd = sack_a_rwnd; 1140 1141 sctp_generate_fwdtsn(q, sack_ctsn); 1142 1143 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", 1144 __FUNCTION__, sack_ctsn); 1145 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " 1146 "%p is 0x%x. Adv peer ack point: 0x%x\n", 1147 __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point); 1148 1149 /* See if all chunks are acked. 1150 * Make sure the empty queue handler will get run later. 1151 */ 1152 q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && 1153 list_empty(&q->retransmit); 1154 if (!q->empty) 1155 goto finish; 1156 1157 list_for_each(pos, transport_list) { 1158 transport = list_entry(pos, struct sctp_transport, 1159 transports); 1160 q->empty = q->empty && list_empty(&transport->transmitted); 1161 if (!q->empty) 1162 goto finish; 1163 } 1164 1165 SCTP_DEBUG_PRINTK("sack queue is empty.\n"); 1166 finish: 1167 return q->empty; 1168 } 1169 1170 /* Is the outqueue empty? */ 1171 int sctp_outq_is_empty(const struct sctp_outq *q) 1172 { 1173 return q->empty; 1174 } 1175 1176 /******************************************************************** 1177 * 2nd Level Abstractions 1178 ********************************************************************/ 1179 1180 /* Go through a transport's transmitted list or the association's retransmit 1181 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. 1182 * The retransmit list will not have an associated transport. 1183 * 1184 * I added coherent debug information output. --xguo 1185 * 1186 * Instead of printing 'sacked' or 'kept' for each TSN on the 1187 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. 1188 * KEPT TSN6-TSN7, etc. 1189 */ 1190 static void sctp_check_transmitted(struct sctp_outq *q, 1191 struct list_head *transmitted_queue, 1192 struct sctp_transport *transport, 1193 struct sctp_sackhdr *sack, 1194 __u32 highest_new_tsn_in_sack) 1195 { 1196 struct list_head *lchunk; 1197 struct sctp_chunk *tchunk; 1198 struct list_head tlist; 1199 __u32 tsn; 1200 __u32 sack_ctsn; 1201 __u32 rtt; 1202 __u8 restart_timer = 0; 1203 int bytes_acked = 0; 1204 1205 /* These state variables are for coherent debug output. --xguo */ 1206 1207 #if SCTP_DEBUG 1208 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */ 1209 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */ 1210 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */ 1211 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */ 1212 1213 /* 0 : The last TSN was ACKed. 1214 * 1 : The last TSN was NOT ACKed (i.e. KEPT). 1215 * -1: We need to initialize. 1216 */ 1217 int dbg_prt_state = -1; 1218 #endif /* SCTP_DEBUG */ 1219 1220 sack_ctsn = ntohl(sack->cum_tsn_ack); 1221 1222 INIT_LIST_HEAD(&tlist); 1223 1224 /* The while loop will skip empty transmitted queues. */ 1225 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { 1226 tchunk = list_entry(lchunk, struct sctp_chunk, 1227 transmitted_list); 1228 1229 if (sctp_chunk_abandoned(tchunk)) { 1230 /* Move the chunk to abandoned list. */ 1231 sctp_insert_list(&q->abandoned, lchunk); 1232 continue; 1233 } 1234 1235 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1236 if (sctp_acked(sack, tsn)) { 1237 /* If this queue is the retransmit queue, the 1238 * retransmit timer has already reclaimed 1239 * the outstanding bytes for this chunk, so only 1240 * count bytes associated with a transport. 1241 */ 1242 if (transport) { 1243 /* If this chunk is being used for RTT 1244 * measurement, calculate the RTT and update 1245 * the RTO using this value. 1246 * 1247 * 6.3.1 C5) Karn's algorithm: RTT measurements 1248 * MUST NOT be made using packets that were 1249 * retransmitted (and thus for which it is 1250 * ambiguous whether the reply was for the 1251 * first instance of the packet or a later 1252 * instance). 1253 */ 1254 if (!tchunk->tsn_gap_acked && 1255 !tchunk->resent && 1256 tchunk->rtt_in_progress) { 1257 rtt = jiffies - tchunk->sent_at; 1258 sctp_transport_update_rto(transport, 1259 rtt); 1260 } 1261 } 1262 if (TSN_lte(tsn, sack_ctsn)) { 1263 /* RFC 2960 6.3.2 Retransmission Timer Rules 1264 * 1265 * R3) Whenever a SACK is received 1266 * that acknowledges the DATA chunk 1267 * with the earliest outstanding TSN 1268 * for that address, restart T3-rtx 1269 * timer for that address with its 1270 * current RTO. 1271 */ 1272 restart_timer = 1; 1273 1274 if (!tchunk->tsn_gap_acked) { 1275 tchunk->tsn_gap_acked = 1; 1276 bytes_acked += sctp_data_size(tchunk); 1277 /* 1278 * SFR-CACC algorithm: 1279 * 2) If the SACK contains gap acks 1280 * and the flag CHANGEOVER_ACTIVE is 1281 * set the receiver of the SACK MUST 1282 * take the following action: 1283 * 1284 * B) For each TSN t being acked that 1285 * has not been acked in any SACK so 1286 * far, set cacc_saw_newack to 1 for 1287 * the destination that the TSN was 1288 * sent to. 1289 */ 1290 if (transport && 1291 sack->num_gap_ack_blocks && 1292 q->asoc->peer.primary_path->cacc. 1293 changeover_active) 1294 transport->cacc.cacc_saw_newack 1295 = 1; 1296 } 1297 1298 list_add_tail(&tchunk->transmitted_list, 1299 &q->sacked); 1300 } else { 1301 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 1302 * M2) Each time a SACK arrives reporting 1303 * 'Stray DATA chunk(s)' record the highest TSN 1304 * reported as newly acknowledged, call this 1305 * value 'HighestTSNinSack'. A newly 1306 * acknowledged DATA chunk is one not 1307 * previously acknowledged in a SACK. 1308 * 1309 * When the SCTP sender of data receives a SACK 1310 * chunk that acknowledges, for the first time, 1311 * the receipt of a DATA chunk, all the still 1312 * unacknowledged DATA chunks whose TSN is 1313 * older than that newly acknowledged DATA 1314 * chunk, are qualified as 'Stray DATA chunks'. 1315 */ 1316 if (!tchunk->tsn_gap_acked) { 1317 tchunk->tsn_gap_acked = 1; 1318 bytes_acked += sctp_data_size(tchunk); 1319 } 1320 list_add_tail(lchunk, &tlist); 1321 } 1322 1323 #if SCTP_DEBUG 1324 switch (dbg_prt_state) { 1325 case 0: /* last TSN was ACKed */ 1326 if (dbg_last_ack_tsn + 1 == tsn) { 1327 /* This TSN belongs to the 1328 * current ACK range. 1329 */ 1330 break; 1331 } 1332 1333 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1334 /* Display the end of the 1335 * current range. 1336 */ 1337 SCTP_DEBUG_PRINTK("-%08x", 1338 dbg_last_ack_tsn); 1339 } 1340 1341 /* Start a new range. */ 1342 SCTP_DEBUG_PRINTK(",%08x", tsn); 1343 dbg_ack_tsn = tsn; 1344 break; 1345 1346 case 1: /* The last TSN was NOT ACKed. */ 1347 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1348 /* Display the end of current range. */ 1349 SCTP_DEBUG_PRINTK("-%08x", 1350 dbg_last_kept_tsn); 1351 } 1352 1353 SCTP_DEBUG_PRINTK("\n"); 1354 1355 /* FALL THROUGH... */ 1356 default: 1357 /* This is the first-ever TSN we examined. */ 1358 /* Start a new range of ACK-ed TSNs. */ 1359 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); 1360 dbg_prt_state = 0; 1361 dbg_ack_tsn = tsn; 1362 }; 1363 1364 dbg_last_ack_tsn = tsn; 1365 #endif /* SCTP_DEBUG */ 1366 1367 } else { 1368 if (tchunk->tsn_gap_acked) { 1369 SCTP_DEBUG_PRINTK("%s: Receiver reneged on " 1370 "data TSN: 0x%x\n", 1371 __FUNCTION__, 1372 tsn); 1373 tchunk->tsn_gap_acked = 0; 1374 1375 bytes_acked -= sctp_data_size(tchunk); 1376 1377 /* RFC 2960 6.3.2 Retransmission Timer Rules 1378 * 1379 * R4) Whenever a SACK is received missing a 1380 * TSN that was previously acknowledged via a 1381 * Gap Ack Block, start T3-rtx for the 1382 * destination address to which the DATA 1383 * chunk was originally 1384 * transmitted if it is not already running. 1385 */ 1386 restart_timer = 1; 1387 } 1388 1389 list_add_tail(lchunk, &tlist); 1390 1391 #if SCTP_DEBUG 1392 /* See the above comments on ACK-ed TSNs. */ 1393 switch (dbg_prt_state) { 1394 case 1: 1395 if (dbg_last_kept_tsn + 1 == tsn) 1396 break; 1397 1398 if (dbg_last_kept_tsn != dbg_kept_tsn) 1399 SCTP_DEBUG_PRINTK("-%08x", 1400 dbg_last_kept_tsn); 1401 1402 SCTP_DEBUG_PRINTK(",%08x", tsn); 1403 dbg_kept_tsn = tsn; 1404 break; 1405 1406 case 0: 1407 if (dbg_last_ack_tsn != dbg_ack_tsn) 1408 SCTP_DEBUG_PRINTK("-%08x", 1409 dbg_last_ack_tsn); 1410 SCTP_DEBUG_PRINTK("\n"); 1411 1412 /* FALL THROUGH... */ 1413 default: 1414 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); 1415 dbg_prt_state = 1; 1416 dbg_kept_tsn = tsn; 1417 }; 1418 1419 dbg_last_kept_tsn = tsn; 1420 #endif /* SCTP_DEBUG */ 1421 } 1422 } 1423 1424 #if SCTP_DEBUG 1425 /* Finish off the last range, displaying its ending TSN. */ 1426 switch (dbg_prt_state) { 1427 case 0: 1428 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1429 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn); 1430 } else { 1431 SCTP_DEBUG_PRINTK("\n"); 1432 } 1433 break; 1434 1435 case 1: 1436 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1437 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn); 1438 } else { 1439 SCTP_DEBUG_PRINTK("\n"); 1440 } 1441 }; 1442 #endif /* SCTP_DEBUG */ 1443 if (transport) { 1444 if (bytes_acked) { 1445 /* 8.2. When an outstanding TSN is acknowledged, 1446 * the endpoint shall clear the error counter of 1447 * the destination transport address to which the 1448 * DATA chunk was last sent. 1449 * The association's overall error counter is 1450 * also cleared. 1451 */ 1452 transport->error_count = 0; 1453 transport->asoc->overall_error_count = 0; 1454 1455 /* Mark the destination transport address as 1456 * active if it is not so marked. 1457 */ 1458 if (transport->state == SCTP_INACTIVE) { 1459 sctp_assoc_control_transport( 1460 transport->asoc, 1461 transport, 1462 SCTP_TRANSPORT_UP, 1463 SCTP_RECEIVED_SACK); 1464 } 1465 1466 sctp_transport_raise_cwnd(transport, sack_ctsn, 1467 bytes_acked); 1468 1469 transport->flight_size -= bytes_acked; 1470 q->outstanding_bytes -= bytes_acked; 1471 } else { 1472 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 1473 * When a sender is doing zero window probing, it 1474 * should not timeout the association if it continues 1475 * to receive new packets from the receiver. The 1476 * reason is that the receiver MAY keep its window 1477 * closed for an indefinite time. 1478 * A sender is doing zero window probing when the 1479 * receiver's advertised window is zero, and there is 1480 * only one data chunk in flight to the receiver. 1481 */ 1482 if (!q->asoc->peer.rwnd && 1483 !list_empty(&tlist) && 1484 (sack_ctsn+2 == q->asoc->next_tsn)) { 1485 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1486 "window probe: %u\n", 1487 __FUNCTION__, sack_ctsn); 1488 q->asoc->overall_error_count = 0; 1489 transport->error_count = 0; 1490 } 1491 } 1492 1493 /* RFC 2960 6.3.2 Retransmission Timer Rules 1494 * 1495 * R2) Whenever all outstanding data sent to an address have 1496 * been acknowledged, turn off the T3-rtx timer of that 1497 * address. 1498 */ 1499 if (!transport->flight_size) { 1500 if (timer_pending(&transport->T3_rtx_timer) && 1501 del_timer(&transport->T3_rtx_timer)) { 1502 sctp_transport_put(transport); 1503 } 1504 } else if (restart_timer) { 1505 if (!mod_timer(&transport->T3_rtx_timer, 1506 jiffies + transport->rto)) 1507 sctp_transport_hold(transport); 1508 } 1509 } 1510 1511 list_splice(&tlist, transmitted_queue); 1512 } 1513 1514 /* Mark chunks as missing and consequently may get retransmitted. */ 1515 static void sctp_mark_missing(struct sctp_outq *q, 1516 struct list_head *transmitted_queue, 1517 struct sctp_transport *transport, 1518 __u32 highest_new_tsn_in_sack, 1519 int count_of_newacks) 1520 { 1521 struct sctp_chunk *chunk; 1522 struct list_head *pos; 1523 __u32 tsn; 1524 char do_fast_retransmit = 0; 1525 struct sctp_transport *primary = q->asoc->peer.primary_path; 1526 1527 list_for_each(pos, transmitted_queue) { 1528 1529 chunk = list_entry(pos, struct sctp_chunk, transmitted_list); 1530 tsn = ntohl(chunk->subh.data_hdr->tsn); 1531 1532 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all 1533 * 'Unacknowledged TSN's', if the TSN number of an 1534 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' 1535 * value, increment the 'TSN.Missing.Report' count on that 1536 * chunk if it has NOT been fast retransmitted or marked for 1537 * fast retransmit already. 1538 */ 1539 if (!chunk->fast_retransmit && 1540 !chunk->tsn_gap_acked && 1541 TSN_lt(tsn, highest_new_tsn_in_sack)) { 1542 1543 /* SFR-CACC may require us to skip marking 1544 * this chunk as missing. 1545 */ 1546 if (!transport || !sctp_cacc_skip(primary, transport, 1547 count_of_newacks, tsn)) { 1548 chunk->tsn_missing_report++; 1549 1550 SCTP_DEBUG_PRINTK( 1551 "%s: TSN 0x%x missing counter: %d\n", 1552 __FUNCTION__, tsn, 1553 chunk->tsn_missing_report); 1554 } 1555 } 1556 /* 1557 * M4) If any DATA chunk is found to have a 1558 * 'TSN.Missing.Report' 1559 * value larger than or equal to 4, mark that chunk for 1560 * retransmission and start the fast retransmit procedure. 1561 */ 1562 1563 if (chunk->tsn_missing_report >= 4) { 1564 chunk->fast_retransmit = 1; 1565 do_fast_retransmit = 1; 1566 } 1567 } 1568 1569 if (transport) { 1570 if (do_fast_retransmit) 1571 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); 1572 1573 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " 1574 "ssthresh: %d, flight_size: %d, pba: %d\n", 1575 __FUNCTION__, transport, transport->cwnd, 1576 transport->ssthresh, transport->flight_size, 1577 transport->partial_bytes_acked); 1578 } 1579 } 1580 1581 /* Is the given TSN acked by this packet? */ 1582 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) 1583 { 1584 int i; 1585 sctp_sack_variable_t *frags; 1586 __u16 gap; 1587 __u32 ctsn = ntohl(sack->cum_tsn_ack); 1588 1589 if (TSN_lte(tsn, ctsn)) 1590 goto pass; 1591 1592 /* 3.3.4 Selective Acknowledgement (SACK) (3): 1593 * 1594 * Gap Ack Blocks: 1595 * These fields contain the Gap Ack Blocks. They are repeated 1596 * for each Gap Ack Block up to the number of Gap Ack Blocks 1597 * defined in the Number of Gap Ack Blocks field. All DATA 1598 * chunks with TSNs greater than or equal to (Cumulative TSN 1599 * Ack + Gap Ack Block Start) and less than or equal to 1600 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack 1601 * Block are assumed to have been received correctly. 1602 */ 1603 1604 frags = sack->variable; 1605 gap = tsn - ctsn; 1606 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { 1607 if (TSN_lte(ntohs(frags[i].gab.start), gap) && 1608 TSN_lte(gap, ntohs(frags[i].gab.end))) 1609 goto pass; 1610 } 1611 1612 return 0; 1613 pass: 1614 return 1; 1615 } 1616 1617 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, 1618 int nskips, __u16 stream) 1619 { 1620 int i; 1621 1622 for (i = 0; i < nskips; i++) { 1623 if (skiplist[i].stream == stream) 1624 return i; 1625 } 1626 return i; 1627 } 1628 1629 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ 1630 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) 1631 { 1632 struct sctp_association *asoc = q->asoc; 1633 struct sctp_chunk *ftsn_chunk = NULL; 1634 struct sctp_fwdtsn_skip ftsn_skip_arr[10]; 1635 int nskips = 0; 1636 int skip_pos = 0; 1637 __u32 tsn; 1638 struct sctp_chunk *chunk; 1639 struct list_head *lchunk, *temp; 1640 1641 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the 1642 * received SACK. 1643 * 1644 * If (Advanced.Peer.Ack.Point < SackCumAck), then update 1645 * Advanced.Peer.Ack.Point to be equal to SackCumAck. 1646 */ 1647 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1648 asoc->adv_peer_ack_point = ctsn; 1649 1650 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" 1651 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as 1652 * the chunk next in the out-queue space is marked as "abandoned" as 1653 * shown in the following example: 1654 * 1655 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 1656 * and the Advanced.Peer.Ack.Point is updated to this value: 1657 * 1658 * out-queue at the end of ==> out-queue after Adv.Ack.Point 1659 * normal SACK processing local advancement 1660 * ... ... 1661 * Adv.Ack.Pt-> 102 acked 102 acked 1662 * 103 abandoned 103 abandoned 1663 * 104 abandoned Adv.Ack.P-> 104 abandoned 1664 * 105 105 1665 * 106 acked 106 acked 1666 * ... ... 1667 * 1668 * In this example, the data sender successfully advanced the 1669 * "Advanced.Peer.Ack.Point" from 102 to 104 locally. 1670 */ 1671 list_for_each_safe(lchunk, temp, &q->abandoned) { 1672 chunk = list_entry(lchunk, struct sctp_chunk, 1673 transmitted_list); 1674 tsn = ntohl(chunk->subh.data_hdr->tsn); 1675 1676 /* Remove any chunks in the abandoned queue that are acked by 1677 * the ctsn. 1678 */ 1679 if (TSN_lte(tsn, ctsn)) { 1680 list_del_init(lchunk); 1681 if (!chunk->tsn_gap_acked) { 1682 chunk->transport->flight_size -= 1683 sctp_data_size(chunk); 1684 q->outstanding_bytes -= sctp_data_size(chunk); 1685 } 1686 sctp_chunk_free(chunk); 1687 } else { 1688 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { 1689 asoc->adv_peer_ack_point = tsn; 1690 if (chunk->chunk_hdr->flags & 1691 SCTP_DATA_UNORDERED) 1692 continue; 1693 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], 1694 nskips, 1695 chunk->subh.data_hdr->stream); 1696 ftsn_skip_arr[skip_pos].stream = 1697 chunk->subh.data_hdr->stream; 1698 ftsn_skip_arr[skip_pos].ssn = 1699 chunk->subh.data_hdr->ssn; 1700 if (skip_pos == nskips) 1701 nskips++; 1702 if (nskips == 10) 1703 break; 1704 } else 1705 break; 1706 } 1707 } 1708 1709 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" 1710 * is greater than the Cumulative TSN ACK carried in the received 1711 * SACK, the data sender MUST send the data receiver a FORWARD TSN 1712 * chunk containing the latest value of the 1713 * "Advanced.Peer.Ack.Point". 1714 * 1715 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD 1716 * list each stream and sequence number in the forwarded TSN. This 1717 * information will enable the receiver to easily find any 1718 * stranded TSN's waiting on stream reorder queues. Each stream 1719 * SHOULD only be reported once; this means that if multiple 1720 * abandoned messages occur in the same stream then only the 1721 * highest abandoned stream sequence number is reported. If the 1722 * total size of the FORWARD TSN does NOT fit in a single MTU then 1723 * the sender of the FORWARD TSN SHOULD lower the 1724 * Advanced.Peer.Ack.Point to the last TSN that will fit in a 1725 * single MTU. 1726 */ 1727 if (asoc->adv_peer_ack_point > ctsn) 1728 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, 1729 nskips, &ftsn_skip_arr[0]); 1730 1731 if (ftsn_chunk) { 1732 __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); 1733 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 1734 } 1735 } 1736