1 /* SCTP kernel implementation 2 * (C) Copyright IBM Corp. 2001, 2004 3 * Copyright (c) 1999-2000 Cisco, Inc. 4 * Copyright (c) 1999-2001 Motorola, Inc. 5 * 6 * This file is part of the SCTP kernel implementation 7 * 8 * These functions handle output processing. 9 * 10 * This SCTP implementation is free software; 11 * you can redistribute it and/or modify it under the terms of 12 * the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This SCTP implementation is distributed in the hope that it 17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied 18 * ************************ 19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 20 * See the GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with GNU CC; see the file COPYING. If not, see 24 * <http://www.gnu.org/licenses/>. 25 * 26 * Please send any bug reports or fixes you make to the 27 * email address(es): 28 * lksctp developers <linux-sctp@vger.kernel.org> 29 * 30 * Written or modified by: 31 * La Monte H.P. Yarroll <piggy@acm.org> 32 * Karl Knutson <karl@athena.chicago.il.us> 33 * Jon Grimm <jgrimm@austin.ibm.com> 34 * Sridhar Samudrala <sri@us.ibm.com> 35 */ 36 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 38 39 #include <linux/types.h> 40 #include <linux/kernel.h> 41 #include <linux/wait.h> 42 #include <linux/time.h> 43 #include <linux/ip.h> 44 #include <linux/ipv6.h> 45 #include <linux/init.h> 46 #include <linux/slab.h> 47 #include <net/inet_ecn.h> 48 #include <net/ip.h> 49 #include <net/icmp.h> 50 #include <net/net_namespace.h> 51 52 #include <linux/socket.h> /* for sa_family_t */ 53 #include <net/sock.h> 54 55 #include <net/sctp/sctp.h> 56 #include <net/sctp/sm.h> 57 #include <net/sctp/checksum.h> 58 59 /* Forward declarations for private helpers. */ 60 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, 61 struct sctp_chunk *chunk); 62 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, 63 struct sctp_chunk *chunk); 64 static void sctp_packet_append_data(struct sctp_packet *packet, 65 struct sctp_chunk *chunk); 66 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, 67 struct sctp_chunk *chunk, 68 u16 chunk_len); 69 70 static void sctp_packet_reset(struct sctp_packet *packet) 71 { 72 /* sctp_packet_transmit() relies on this to reset size to the 73 * current overhead after sending packets. 74 */ 75 packet->size = packet->overhead; 76 77 packet->has_cookie_echo = 0; 78 packet->has_sack = 0; 79 packet->has_data = 0; 80 packet->has_auth = 0; 81 packet->ipfragok = 0; 82 packet->auth = NULL; 83 } 84 85 /* Config a packet. 86 * This appears to be a followup set of initializations. 87 */ 88 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, 89 int ecn_capable) 90 { 91 struct sctp_transport *tp = packet->transport; 92 struct sctp_association *asoc = tp->asoc; 93 struct sctp_sock *sp = NULL; 94 struct sock *sk; 95 96 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 97 packet->vtag = vtag; 98 99 /* do the following jobs only once for a flush schedule */ 100 if (!sctp_packet_empty(packet)) 101 return; 102 103 /* set packet max_size with pathmtu, then calculate overhead */ 104 packet->max_size = tp->pathmtu; 105 106 if (asoc) { 107 sk = asoc->base.sk; 108 sp = sctp_sk(sk); 109 } 110 packet->overhead = sctp_mtu_payload(sp, 0, 0); 111 packet->size = packet->overhead; 112 113 if (!asoc) 114 return; 115 116 /* update dst or transport pathmtu if in need */ 117 if (!sctp_transport_dst_check(tp)) { 118 sctp_transport_route(tp, NULL, sp); 119 if (asoc->param_flags & SPP_PMTUD_ENABLE) 120 sctp_assoc_sync_pmtu(asoc); 121 } 122 123 /* If there a is a prepend chunk stick it on the list before 124 * any other chunks get appended. 125 */ 126 if (ecn_capable) { 127 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); 128 129 if (chunk) 130 sctp_packet_append_chunk(packet, chunk); 131 } 132 133 if (!tp->dst) 134 return; 135 136 /* set packet max_size with gso_max_size if gso is enabled*/ 137 rcu_read_lock(); 138 if (__sk_dst_get(sk) != tp->dst) { 139 dst_hold(tp->dst); 140 sk_setup_caps(sk, tp->dst); 141 } 142 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size 143 : asoc->pathmtu; 144 rcu_read_unlock(); 145 } 146 147 /* Initialize the packet structure. */ 148 void sctp_packet_init(struct sctp_packet *packet, 149 struct sctp_transport *transport, 150 __u16 sport, __u16 dport) 151 { 152 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); 153 154 packet->transport = transport; 155 packet->source_port = sport; 156 packet->destination_port = dport; 157 INIT_LIST_HEAD(&packet->chunk_list); 158 /* The overhead will be calculated by sctp_packet_config() */ 159 packet->overhead = 0; 160 sctp_packet_reset(packet); 161 packet->vtag = 0; 162 } 163 164 /* Free a packet. */ 165 void sctp_packet_free(struct sctp_packet *packet) 166 { 167 struct sctp_chunk *chunk, *tmp; 168 169 pr_debug("%s: packet:%p\n", __func__, packet); 170 171 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 172 list_del_init(&chunk->list); 173 sctp_chunk_free(chunk); 174 } 175 } 176 177 /* This routine tries to append the chunk to the offered packet. If adding 178 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk 179 * is not present in the packet, it transmits the input packet. 180 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long 181 * as it can fit in the packet, but any more data that does not fit in this 182 * packet can be sent only after receiving the COOKIE_ACK. 183 */ 184 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, 185 struct sctp_chunk *chunk, 186 int one_packet, gfp_t gfp) 187 { 188 enum sctp_xmit retval; 189 190 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, 191 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); 192 193 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { 194 case SCTP_XMIT_PMTU_FULL: 195 if (!packet->has_cookie_echo) { 196 int error = 0; 197 198 error = sctp_packet_transmit(packet, gfp); 199 if (error < 0) 200 chunk->skb->sk->sk_err = -error; 201 202 /* If we have an empty packet, then we can NOT ever 203 * return PMTU_FULL. 204 */ 205 if (!one_packet) 206 retval = sctp_packet_append_chunk(packet, 207 chunk); 208 } 209 break; 210 211 case SCTP_XMIT_RWND_FULL: 212 case SCTP_XMIT_OK: 213 case SCTP_XMIT_DELAY: 214 break; 215 } 216 217 return retval; 218 } 219 220 /* Try to bundle an auth chunk into the packet. */ 221 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, 222 struct sctp_chunk *chunk) 223 { 224 struct sctp_association *asoc = pkt->transport->asoc; 225 enum sctp_xmit retval = SCTP_XMIT_OK; 226 struct sctp_chunk *auth; 227 228 /* if we don't have an association, we can't do authentication */ 229 if (!asoc) 230 return retval; 231 232 /* See if this is an auth chunk we are bundling or if 233 * auth is already bundled. 234 */ 235 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) 236 return retval; 237 238 /* if the peer did not request this chunk to be authenticated, 239 * don't do it 240 */ 241 if (!chunk->auth) 242 return retval; 243 244 auth = sctp_make_auth(asoc, chunk->shkey->key_id); 245 if (!auth) 246 return retval; 247 248 auth->shkey = chunk->shkey; 249 sctp_auth_shkey_hold(auth->shkey); 250 251 retval = __sctp_packet_append_chunk(pkt, auth); 252 253 if (retval != SCTP_XMIT_OK) 254 sctp_chunk_free(auth); 255 256 return retval; 257 } 258 259 /* Try to bundle a SACK with the packet. */ 260 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, 261 struct sctp_chunk *chunk) 262 { 263 enum sctp_xmit retval = SCTP_XMIT_OK; 264 265 /* If sending DATA and haven't aleady bundled a SACK, try to 266 * bundle one in to the packet. 267 */ 268 if (sctp_chunk_is_data(chunk) && !pkt->has_sack && 269 !pkt->has_cookie_echo) { 270 struct sctp_association *asoc; 271 struct timer_list *timer; 272 asoc = pkt->transport->asoc; 273 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 274 275 /* If the SACK timer is running, we have a pending SACK */ 276 if (timer_pending(timer)) { 277 struct sctp_chunk *sack; 278 279 if (pkt->transport->sack_generation != 280 pkt->transport->asoc->peer.sack_generation) 281 return retval; 282 283 asoc->a_rwnd = asoc->rwnd; 284 sack = sctp_make_sack(asoc); 285 if (sack) { 286 retval = __sctp_packet_append_chunk(pkt, sack); 287 if (retval != SCTP_XMIT_OK) { 288 sctp_chunk_free(sack); 289 goto out; 290 } 291 asoc->peer.sack_needed = 0; 292 if (del_timer(timer)) 293 sctp_association_put(asoc); 294 } 295 } 296 } 297 out: 298 return retval; 299 } 300 301 302 /* Append a chunk to the offered packet reporting back any inability to do 303 * so. 304 */ 305 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, 306 struct sctp_chunk *chunk) 307 { 308 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); 309 enum sctp_xmit retval = SCTP_XMIT_OK; 310 311 /* Check to see if this chunk will fit into the packet */ 312 retval = sctp_packet_will_fit(packet, chunk, chunk_len); 313 if (retval != SCTP_XMIT_OK) 314 goto finish; 315 316 /* We believe that this chunk is OK to add to the packet */ 317 switch (chunk->chunk_hdr->type) { 318 case SCTP_CID_DATA: 319 case SCTP_CID_I_DATA: 320 /* Account for the data being in the packet */ 321 sctp_packet_append_data(packet, chunk); 322 /* Disallow SACK bundling after DATA. */ 323 packet->has_sack = 1; 324 /* Disallow AUTH bundling after DATA */ 325 packet->has_auth = 1; 326 /* Let it be knows that packet has DATA in it */ 327 packet->has_data = 1; 328 /* timestamp the chunk for rtx purposes */ 329 chunk->sent_at = jiffies; 330 /* Mainly used for prsctp RTX policy */ 331 chunk->sent_count++; 332 break; 333 case SCTP_CID_COOKIE_ECHO: 334 packet->has_cookie_echo = 1; 335 break; 336 337 case SCTP_CID_SACK: 338 packet->has_sack = 1; 339 if (chunk->asoc) 340 chunk->asoc->stats.osacks++; 341 break; 342 343 case SCTP_CID_AUTH: 344 packet->has_auth = 1; 345 packet->auth = chunk; 346 break; 347 } 348 349 /* It is OK to send this chunk. */ 350 list_add_tail(&chunk->list, &packet->chunk_list); 351 packet->size += chunk_len; 352 chunk->transport = packet->transport; 353 finish: 354 return retval; 355 } 356 357 /* Append a chunk to the offered packet reporting back any inability to do 358 * so. 359 */ 360 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, 361 struct sctp_chunk *chunk) 362 { 363 enum sctp_xmit retval = SCTP_XMIT_OK; 364 365 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); 366 367 /* Data chunks are special. Before seeing what else we can 368 * bundle into this packet, check to see if we are allowed to 369 * send this DATA. 370 */ 371 if (sctp_chunk_is_data(chunk)) { 372 retval = sctp_packet_can_append_data(packet, chunk); 373 if (retval != SCTP_XMIT_OK) 374 goto finish; 375 } 376 377 /* Try to bundle AUTH chunk */ 378 retval = sctp_packet_bundle_auth(packet, chunk); 379 if (retval != SCTP_XMIT_OK) 380 goto finish; 381 382 /* Try to bundle SACK chunk */ 383 retval = sctp_packet_bundle_sack(packet, chunk); 384 if (retval != SCTP_XMIT_OK) 385 goto finish; 386 387 retval = __sctp_packet_append_chunk(packet, chunk); 388 389 finish: 390 return retval; 391 } 392 393 static void sctp_packet_release_owner(struct sk_buff *skb) 394 { 395 sk_free(skb->sk); 396 } 397 398 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) 399 { 400 skb_orphan(skb); 401 skb->sk = sk; 402 skb->destructor = sctp_packet_release_owner; 403 404 /* 405 * The data chunks have already been accounted for in sctp_sendmsg(), 406 * therefore only reserve a single byte to keep socket around until 407 * the packet has been transmitted. 408 */ 409 refcount_inc(&sk->sk_wmem_alloc); 410 } 411 412 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) 413 { 414 if (SCTP_OUTPUT_CB(head)->last == head) 415 skb_shinfo(head)->frag_list = skb; 416 else 417 SCTP_OUTPUT_CB(head)->last->next = skb; 418 SCTP_OUTPUT_CB(head)->last = skb; 419 420 head->truesize += skb->truesize; 421 head->data_len += skb->len; 422 head->len += skb->len; 423 424 __skb_header_release(skb); 425 } 426 427 static int sctp_packet_pack(struct sctp_packet *packet, 428 struct sk_buff *head, int gso, gfp_t gfp) 429 { 430 struct sctp_transport *tp = packet->transport; 431 struct sctp_auth_chunk *auth = NULL; 432 struct sctp_chunk *chunk, *tmp; 433 int pkt_count = 0, pkt_size; 434 struct sock *sk = head->sk; 435 struct sk_buff *nskb; 436 int auth_len = 0; 437 438 if (gso) { 439 skb_shinfo(head)->gso_type = sk->sk_gso_type; 440 SCTP_OUTPUT_CB(head)->last = head; 441 } else { 442 nskb = head; 443 pkt_size = packet->size; 444 goto merge; 445 } 446 447 do { 448 /* calculate the pkt_size and alloc nskb */ 449 pkt_size = packet->overhead; 450 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, 451 list) { 452 int padded = SCTP_PAD4(chunk->skb->len); 453 454 if (chunk == packet->auth) 455 auth_len = padded; 456 else if (auth_len + padded + packet->overhead > 457 tp->pathmtu) 458 return 0; 459 else if (pkt_size + padded > tp->pathmtu) 460 break; 461 pkt_size += padded; 462 } 463 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); 464 if (!nskb) 465 return 0; 466 skb_reserve(nskb, packet->overhead + MAX_HEADER); 467 468 merge: 469 /* merge chunks into nskb and append nskb into head list */ 470 pkt_size -= packet->overhead; 471 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 472 int padding; 473 474 list_del_init(&chunk->list); 475 if (sctp_chunk_is_data(chunk)) { 476 if (!sctp_chunk_retransmitted(chunk) && 477 !tp->rto_pending) { 478 chunk->rtt_in_progress = 1; 479 tp->rto_pending = 1; 480 } 481 } 482 483 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; 484 if (padding) 485 skb_put_zero(chunk->skb, padding); 486 487 if (chunk == packet->auth) 488 auth = (struct sctp_auth_chunk *) 489 skb_tail_pointer(nskb); 490 491 skb_put_data(nskb, chunk->skb->data, chunk->skb->len); 492 493 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n", 494 chunk, 495 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), 496 chunk->has_tsn ? "TSN" : "No TSN", 497 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, 498 ntohs(chunk->chunk_hdr->length), chunk->skb->len, 499 chunk->rtt_in_progress); 500 501 pkt_size -= SCTP_PAD4(chunk->skb->len); 502 503 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) 504 sctp_chunk_free(chunk); 505 506 if (!pkt_size) 507 break; 508 } 509 510 if (auth) { 511 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, 512 packet->auth->shkey, gfp); 513 /* free auth if no more chunks, or add it back */ 514 if (list_empty(&packet->chunk_list)) 515 sctp_chunk_free(packet->auth); 516 else 517 list_add(&packet->auth->list, 518 &packet->chunk_list); 519 } 520 521 if (gso) 522 sctp_packet_gso_append(head, nskb); 523 524 pkt_count++; 525 } while (!list_empty(&packet->chunk_list)); 526 527 if (gso) { 528 memset(head->cb, 0, max(sizeof(struct inet_skb_parm), 529 sizeof(struct inet6_skb_parm))); 530 skb_shinfo(head)->gso_segs = pkt_count; 531 skb_shinfo(head)->gso_size = GSO_BY_FRAGS; 532 rcu_read_lock(); 533 if (skb_dst(head) != tp->dst) { 534 dst_hold(tp->dst); 535 sk_setup_caps(sk, tp->dst); 536 } 537 rcu_read_unlock(); 538 goto chksum; 539 } 540 541 if (sctp_checksum_disable) 542 return 1; 543 544 if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) || 545 dst_xfrm(skb_dst(head)) || packet->ipfragok) { 546 struct sctphdr *sh = 547 (struct sctphdr *)skb_transport_header(head); 548 549 sh->checksum = sctp_compute_cksum(head, 0); 550 } else { 551 chksum: 552 head->ip_summed = CHECKSUM_PARTIAL; 553 head->csum_not_inet = 1; 554 head->csum_start = skb_transport_header(head) - head->head; 555 head->csum_offset = offsetof(struct sctphdr, checksum); 556 } 557 558 return pkt_count; 559 } 560 561 /* All packets are sent to the network through this function from 562 * sctp_outq_tail(). 563 * 564 * The return value is always 0 for now. 565 */ 566 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) 567 { 568 struct sctp_transport *tp = packet->transport; 569 struct sctp_association *asoc = tp->asoc; 570 struct sctp_chunk *chunk, *tmp; 571 int pkt_count, gso = 0; 572 struct dst_entry *dst; 573 struct sk_buff *head; 574 struct sctphdr *sh; 575 struct sock *sk; 576 577 pr_debug("%s: packet:%p\n", __func__, packet); 578 if (list_empty(&packet->chunk_list)) 579 return 0; 580 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); 581 sk = chunk->skb->sk; 582 583 /* check gso */ 584 if (packet->size > tp->pathmtu && !packet->ipfragok) { 585 if (!sk_can_gso(sk)) { 586 pr_err_once("Trying to GSO but underlying device doesn't support it."); 587 goto out; 588 } 589 gso = 1; 590 } 591 592 /* alloc head skb */ 593 head = alloc_skb((gso ? packet->overhead : packet->size) + 594 MAX_HEADER, gfp); 595 if (!head) 596 goto out; 597 skb_reserve(head, packet->overhead + MAX_HEADER); 598 sctp_packet_set_owner_w(head, sk); 599 600 /* set sctp header */ 601 sh = skb_push(head, sizeof(struct sctphdr)); 602 skb_reset_transport_header(head); 603 sh->source = htons(packet->source_port); 604 sh->dest = htons(packet->destination_port); 605 sh->vtag = htonl(packet->vtag); 606 sh->checksum = 0; 607 608 /* drop packet if no dst */ 609 dst = dst_clone(tp->dst); 610 if (!dst) { 611 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 612 kfree_skb(head); 613 goto out; 614 } 615 skb_dst_set(head, dst); 616 617 /* pack up chunks */ 618 pkt_count = sctp_packet_pack(packet, head, gso, gfp); 619 if (!pkt_count) { 620 kfree_skb(head); 621 goto out; 622 } 623 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); 624 625 /* start autoclose timer */ 626 if (packet->has_data && sctp_state(asoc, ESTABLISHED) && 627 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { 628 struct timer_list *timer = 629 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 630 unsigned long timeout = 631 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; 632 633 if (!mod_timer(timer, jiffies + timeout)) 634 sctp_association_hold(asoc); 635 } 636 637 /* sctp xmit */ 638 tp->af_specific->ecn_capable(sk); 639 if (asoc) { 640 asoc->stats.opackets += pkt_count; 641 if (asoc->peer.last_sent_to != tp) 642 asoc->peer.last_sent_to = tp; 643 } 644 head->ignore_df = packet->ipfragok; 645 if (tp->dst_pending_confirm) 646 skb_set_dst_pending_confirm(head, 1); 647 /* neighbour should be confirmed on successful transmission or 648 * positive error 649 */ 650 if (tp->af_specific->sctp_xmit(head, tp) >= 0 && 651 tp->dst_pending_confirm) 652 tp->dst_pending_confirm = 0; 653 654 out: 655 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { 656 list_del_init(&chunk->list); 657 if (!sctp_chunk_is_data(chunk)) 658 sctp_chunk_free(chunk); 659 } 660 sctp_packet_reset(packet); 661 return 0; 662 } 663 664 /******************************************************************** 665 * 2nd Level Abstractions 666 ********************************************************************/ 667 668 /* This private function check to see if a chunk can be added */ 669 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, 670 struct sctp_chunk *chunk) 671 { 672 size_t datasize, rwnd, inflight, flight_size; 673 struct sctp_transport *transport = packet->transport; 674 struct sctp_association *asoc = transport->asoc; 675 struct sctp_outq *q = &asoc->outqueue; 676 677 /* RFC 2960 6.1 Transmission of DATA Chunks 678 * 679 * A) At any given time, the data sender MUST NOT transmit new data to 680 * any destination transport address if its peer's rwnd indicates 681 * that the peer has no buffer space (i.e. rwnd is 0, see Section 682 * 6.2.1). However, regardless of the value of rwnd (including if it 683 * is 0), the data sender can always have one DATA chunk in flight to 684 * the receiver if allowed by cwnd (see rule B below). This rule 685 * allows the sender to probe for a change in rwnd that the sender 686 * missed due to the SACK having been lost in transit from the data 687 * receiver to the data sender. 688 */ 689 690 rwnd = asoc->peer.rwnd; 691 inflight = q->outstanding_bytes; 692 flight_size = transport->flight_size; 693 694 datasize = sctp_data_size(chunk); 695 696 if (datasize > rwnd && inflight > 0) 697 /* We have (at least) one data chunk in flight, 698 * so we can't fall back to rule 6.1 B). 699 */ 700 return SCTP_XMIT_RWND_FULL; 701 702 /* RFC 2960 6.1 Transmission of DATA Chunks 703 * 704 * B) At any given time, the sender MUST NOT transmit new data 705 * to a given transport address if it has cwnd or more bytes 706 * of data outstanding to that transport address. 707 */ 708 /* RFC 7.2.4 & the Implementers Guide 2.8. 709 * 710 * 3) ... 711 * When a Fast Retransmit is being performed the sender SHOULD 712 * ignore the value of cwnd and SHOULD NOT delay retransmission. 713 */ 714 if (chunk->fast_retransmit != SCTP_NEED_FRTX && 715 flight_size >= transport->cwnd) 716 return SCTP_XMIT_RWND_FULL; 717 718 /* Nagle's algorithm to solve small-packet problem: 719 * Inhibit the sending of new chunks when new outgoing data arrives 720 * if any previously transmitted data on the connection remains 721 * unacknowledged. 722 */ 723 724 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && 725 !asoc->force_delay) 726 /* Nothing unacked */ 727 return SCTP_XMIT_OK; 728 729 if (!sctp_packet_empty(packet)) 730 /* Append to packet */ 731 return SCTP_XMIT_OK; 732 733 if (!sctp_state(asoc, ESTABLISHED)) 734 return SCTP_XMIT_OK; 735 736 /* Check whether this chunk and all the rest of pending data will fit 737 * or delay in hopes of bundling a full sized packet. 738 */ 739 if (chunk->skb->len + q->out_qlen > transport->pathmtu - 740 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4) 741 /* Enough data queued to fill a packet */ 742 return SCTP_XMIT_OK; 743 744 /* Don't delay large message writes that may have been fragmented */ 745 if (!chunk->msg->can_delay) 746 return SCTP_XMIT_OK; 747 748 /* Defer until all data acked or packet full */ 749 return SCTP_XMIT_DELAY; 750 } 751 752 /* This private function does management things when adding DATA chunk */ 753 static void sctp_packet_append_data(struct sctp_packet *packet, 754 struct sctp_chunk *chunk) 755 { 756 struct sctp_transport *transport = packet->transport; 757 size_t datasize = sctp_data_size(chunk); 758 struct sctp_association *asoc = transport->asoc; 759 u32 rwnd = asoc->peer.rwnd; 760 761 /* Keep track of how many bytes are in flight over this transport. */ 762 transport->flight_size += datasize; 763 764 /* Keep track of how many bytes are in flight to the receiver. */ 765 asoc->outqueue.outstanding_bytes += datasize; 766 767 /* Update our view of the receiver's rwnd. */ 768 if (datasize < rwnd) 769 rwnd -= datasize; 770 else 771 rwnd = 0; 772 773 asoc->peer.rwnd = rwnd; 774 sctp_chunk_assign_tsn(chunk); 775 asoc->stream.si->assign_number(chunk); 776 } 777 778 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, 779 struct sctp_chunk *chunk, 780 u16 chunk_len) 781 { 782 enum sctp_xmit retval = SCTP_XMIT_OK; 783 size_t psize, pmtu, maxsize; 784 785 /* Don't bundle in this packet if this chunk's auth key doesn't 786 * match other chunks already enqueued on this packet. Also, 787 * don't bundle the chunk with auth key if other chunks in this 788 * packet don't have auth key. 789 */ 790 if ((packet->auth && chunk->shkey != packet->auth->shkey) || 791 (!packet->auth && chunk->shkey && 792 chunk->chunk_hdr->type != SCTP_CID_AUTH)) 793 return SCTP_XMIT_PMTU_FULL; 794 795 psize = packet->size; 796 if (packet->transport->asoc) 797 pmtu = packet->transport->asoc->pathmtu; 798 else 799 pmtu = packet->transport->pathmtu; 800 801 /* Decide if we need to fragment or resubmit later. */ 802 if (psize + chunk_len > pmtu) { 803 /* It's OK to fragment at IP level if any one of the following 804 * is true: 805 * 1. The packet is empty (meaning this chunk is greater 806 * the MTU) 807 * 2. The packet doesn't have any data in it yet and data 808 * requires authentication. 809 */ 810 if (sctp_packet_empty(packet) || 811 (!packet->has_data && chunk->auth)) { 812 /* We no longer do re-fragmentation. 813 * Just fragment at the IP layer, if we 814 * actually hit this condition 815 */ 816 packet->ipfragok = 1; 817 goto out; 818 } 819 820 /* Similarly, if this chunk was built before a PMTU 821 * reduction, we have to fragment it at IP level now. So 822 * if the packet already contains something, we need to 823 * flush. 824 */ 825 maxsize = pmtu - packet->overhead; 826 if (packet->auth) 827 maxsize -= SCTP_PAD4(packet->auth->skb->len); 828 if (chunk_len > maxsize) 829 retval = SCTP_XMIT_PMTU_FULL; 830 831 /* It is also okay to fragment if the chunk we are 832 * adding is a control chunk, but only if current packet 833 * is not a GSO one otherwise it causes fragmentation of 834 * a large frame. So in this case we allow the 835 * fragmentation by forcing it to be in a new packet. 836 */ 837 if (!sctp_chunk_is_data(chunk) && packet->has_data) 838 retval = SCTP_XMIT_PMTU_FULL; 839 840 if (psize + chunk_len > packet->max_size) 841 /* Hit GSO/PMTU limit, gotta flush */ 842 retval = SCTP_XMIT_PMTU_FULL; 843 844 if (!packet->transport->burst_limited && 845 psize + chunk_len > (packet->transport->cwnd >> 1)) 846 /* Do not allow a single GSO packet to use more 847 * than half of cwnd. 848 */ 849 retval = SCTP_XMIT_PMTU_FULL; 850 851 if (packet->transport->burst_limited && 852 psize + chunk_len > (packet->transport->burst_limited >> 1)) 853 /* Do not allow a single GSO packet to use more 854 * than half of original cwnd. 855 */ 856 retval = SCTP_XMIT_PMTU_FULL; 857 /* Otherwise it will fit in the GSO packet */ 858 } 859 860 out: 861 return retval; 862 } 863