1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright IBM Corp. 2001, 2004 4 * Copyright (c) 1999-2000 Cisco, Inc. 5 * Copyright (c) 1999-2001 Motorola, Inc. 6 * Copyright (c) 2001 Intel Corp. 7 * Copyright (c) 2001 La Monte H.P. Yarroll 8 * 9 * This file is part of the SCTP kernel implementation 10 * 11 * This module provides the abstraction for an SCTP association. 12 * 13 * Please send any bug reports or fixes you make to the 14 * email address(es): 15 * lksctp developers <linux-sctp@vger.kernel.org> 16 * 17 * Written or modified by: 18 * La Monte H.P. Yarroll <piggy@acm.org> 19 * Karl Knutson <karl@athena.chicago.il.us> 20 * Jon Grimm <jgrimm@us.ibm.com> 21 * Xingang Guo <xingang.guo@intel.com> 22 * Hui Huang <hui.huang@nokia.com> 23 * Sridhar Samudrala <sri@us.ibm.com> 24 * Daisy Chang <daisyc@us.ibm.com> 25 * Ryan Layer <rmlayer@us.ibm.com> 26 * Kevin Gao <kevin.gao@intel.com> 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/types.h> 32 #include <linux/fcntl.h> 33 #include <linux/poll.h> 34 #include <linux/init.h> 35 36 #include <linux/slab.h> 37 #include <linux/in.h> 38 #include <net/ipv6.h> 39 #include <net/sctp/sctp.h> 40 #include <net/sctp/sm.h> 41 42 /* Forward declarations for internal functions. */ 43 static void sctp_select_active_and_retran_path(struct sctp_association *asoc); 44 static void sctp_assoc_bh_rcv(struct work_struct *work); 45 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 46 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); 47 48 /* 1st Level Abstractions. */ 49 50 /* Initialize a new association from provided memory. */ 51 static struct sctp_association *sctp_association_init( 52 struct sctp_association *asoc, 53 const struct sctp_endpoint *ep, 54 const struct sock *sk, 55 enum sctp_scope scope, gfp_t gfp) 56 { 57 struct net *net = sock_net(sk); 58 struct sctp_sock *sp; 59 struct sctp_paramhdr *p; 60 int i; 61 62 /* Retrieve the SCTP per socket area. */ 63 sp = sctp_sk((struct sock *)sk); 64 65 /* Discarding const is appropriate here. */ 66 asoc->ep = (struct sctp_endpoint *)ep; 67 asoc->base.sk = (struct sock *)sk; 68 69 sctp_endpoint_hold(asoc->ep); 70 sock_hold(asoc->base.sk); 71 72 /* Initialize the common base substructure. */ 73 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; 74 75 /* Initialize the object handling fields. */ 76 refcount_set(&asoc->base.refcnt, 1); 77 78 /* Initialize the bind addr area. */ 79 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); 80 81 asoc->state = SCTP_STATE_CLOSED; 82 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); 83 asoc->user_frag = sp->user_frag; 84 85 /* Set the association max_retrans and RTO values from the 86 * socket values. 87 */ 88 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; 89 asoc->pf_retrans = sp->pf_retrans; 90 91 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); 92 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); 93 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); 94 95 /* Initialize the association's heartbeat interval based on the 96 * sock configured value. 97 */ 98 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); 99 100 /* Initialize path max retrans value. */ 101 asoc->pathmaxrxt = sp->pathmaxrxt; 102 103 asoc->flowlabel = sp->flowlabel; 104 asoc->dscp = sp->dscp; 105 106 /* Set association default SACK delay */ 107 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); 108 asoc->sackfreq = sp->sackfreq; 109 110 /* Set the association default flags controlling 111 * Heartbeat, SACK delay, and Path MTU Discovery. 112 */ 113 asoc->param_flags = sp->param_flags; 114 115 /* Initialize the maximum number of new data packets that can be sent 116 * in a burst. 117 */ 118 asoc->max_burst = sp->max_burst; 119 120 asoc->subscribe = sp->subscribe; 121 122 /* initialize association timers */ 123 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; 124 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; 125 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; 126 127 /* sctpimpguide Section 2.12.2 128 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 129 * recommended value of 5 times 'RTO.Max'. 130 */ 131 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 132 = 5 * asoc->rto_max; 133 134 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; 135 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; 136 137 /* Initializes the timers */ 138 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 139 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0); 140 141 /* Pull default initialization values from the sock options. 142 * Note: This assumes that the values have already been 143 * validated in the sock. 144 */ 145 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; 146 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; 147 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; 148 149 asoc->max_init_timeo = 150 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); 151 152 /* Set the local window size for receive. 153 * This is also the rcvbuf space per association. 154 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of 155 * 1500 bytes in one SCTP packet. 156 */ 157 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) 158 asoc->rwnd = SCTP_DEFAULT_MINWINDOW; 159 else 160 asoc->rwnd = sk->sk_rcvbuf/2; 161 162 asoc->a_rwnd = asoc->rwnd; 163 164 /* Use my own max window until I learn something better. */ 165 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; 166 167 /* Initialize the receive memory counter */ 168 atomic_set(&asoc->rmem_alloc, 0); 169 170 init_waitqueue_head(&asoc->wait); 171 172 asoc->c.my_vtag = sctp_generate_tag(ep); 173 asoc->c.my_port = ep->base.bind_addr.port; 174 175 asoc->c.initial_tsn = sctp_generate_tsn(ep); 176 177 asoc->next_tsn = asoc->c.initial_tsn; 178 179 asoc->ctsn_ack_point = asoc->next_tsn - 1; 180 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 181 asoc->highest_sacked = asoc->ctsn_ack_point; 182 asoc->last_cwr_tsn = asoc->ctsn_ack_point; 183 184 /* ADDIP Section 4.1 Asconf Chunk Procedures 185 * 186 * When an endpoint has an ASCONF signaled change to be sent to the 187 * remote endpoint it should do the following: 188 * ... 189 * A2) a serial number should be assigned to the chunk. The serial 190 * number SHOULD be a monotonically increasing number. The serial 191 * numbers SHOULD be initialized at the start of the 192 * association to the same value as the initial TSN. 193 */ 194 asoc->addip_serial = asoc->c.initial_tsn; 195 asoc->strreset_outseq = asoc->c.initial_tsn; 196 197 INIT_LIST_HEAD(&asoc->addip_chunk_list); 198 INIT_LIST_HEAD(&asoc->asconf_ack_list); 199 200 /* Make an empty list of remote transport addresses. */ 201 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); 202 203 /* RFC 2960 5.1 Normal Establishment of an Association 204 * 205 * After the reception of the first data chunk in an 206 * association the endpoint must immediately respond with a 207 * sack to acknowledge the data chunk. Subsequent 208 * acknowledgements should be done as described in Section 209 * 6.2. 210 * 211 * [We implement this by telling a new association that it 212 * already received one packet.] 213 */ 214 asoc->peer.sack_needed = 1; 215 asoc->peer.sack_generation = 1; 216 217 /* Assume that the peer will tell us if he recognizes ASCONF 218 * as part of INIT exchange. 219 * The sctp_addip_noauth option is there for backward compatibility 220 * and will revert old behavior. 221 */ 222 if (net->sctp.addip_noauth) 223 asoc->peer.asconf_capable = 1; 224 225 /* Create an input queue. */ 226 sctp_inq_init(&asoc->base.inqueue); 227 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); 228 229 /* Create an output queue. */ 230 sctp_outq_init(asoc, &asoc->outqueue); 231 232 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 233 goto fail_init; 234 235 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 236 0, gfp)) 237 goto fail_init; 238 239 /* Initialize default path MTU. */ 240 asoc->pathmtu = sp->pathmtu; 241 sctp_assoc_update_frag_point(asoc); 242 243 /* Assume that peer would support both address types unless we are 244 * told otherwise. 245 */ 246 asoc->peer.ipv4_address = 1; 247 if (asoc->base.sk->sk_family == PF_INET6) 248 asoc->peer.ipv6_address = 1; 249 INIT_LIST_HEAD(&asoc->asocs); 250 251 asoc->default_stream = sp->default_stream; 252 asoc->default_ppid = sp->default_ppid; 253 asoc->default_flags = sp->default_flags; 254 asoc->default_context = sp->default_context; 255 asoc->default_timetolive = sp->default_timetolive; 256 asoc->default_rcv_context = sp->default_rcv_context; 257 258 /* AUTH related initializations */ 259 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 260 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) 261 goto stream_free; 262 263 asoc->active_key_id = ep->active_key_id; 264 asoc->prsctp_enable = ep->prsctp_enable; 265 asoc->reconf_enable = ep->reconf_enable; 266 asoc->strreset_enable = ep->strreset_enable; 267 268 /* Save the hmacs and chunks list into this association */ 269 if (ep->auth_hmacs_list) 270 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, 271 ntohs(ep->auth_hmacs_list->param_hdr.length)); 272 if (ep->auth_chunk_list) 273 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, 274 ntohs(ep->auth_chunk_list->param_hdr.length)); 275 276 /* Get the AUTH random number for this association */ 277 p = (struct sctp_paramhdr *)asoc->c.auth_random; 278 p->type = SCTP_PARAM_RANDOM; 279 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH); 280 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); 281 282 return asoc; 283 284 stream_free: 285 sctp_stream_free(&asoc->stream); 286 fail_init: 287 sock_put(asoc->base.sk); 288 sctp_endpoint_put(asoc->ep); 289 return NULL; 290 } 291 292 /* Allocate and initialize a new association */ 293 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 294 const struct sock *sk, 295 enum sctp_scope scope, gfp_t gfp) 296 { 297 struct sctp_association *asoc; 298 299 asoc = kzalloc(sizeof(*asoc), gfp); 300 if (!asoc) 301 goto fail; 302 303 if (!sctp_association_init(asoc, ep, sk, scope, gfp)) 304 goto fail_init; 305 306 SCTP_DBG_OBJCNT_INC(assoc); 307 308 pr_debug("Created asoc %p\n", asoc); 309 310 return asoc; 311 312 fail_init: 313 kfree(asoc); 314 fail: 315 return NULL; 316 } 317 318 /* Free this association if possible. There may still be users, so 319 * the actual deallocation may be delayed. 320 */ 321 void sctp_association_free(struct sctp_association *asoc) 322 { 323 struct sock *sk = asoc->base.sk; 324 struct sctp_transport *transport; 325 struct list_head *pos, *temp; 326 int i; 327 328 /* Only real associations count against the endpoint, so 329 * don't bother for if this is a temporary association. 330 */ 331 if (!list_empty(&asoc->asocs)) { 332 list_del(&asoc->asocs); 333 334 /* Decrement the backlog value for a TCP-style listening 335 * socket. 336 */ 337 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) 338 sk->sk_ack_backlog--; 339 } 340 341 /* Mark as dead, so other users can know this structure is 342 * going away. 343 */ 344 asoc->base.dead = true; 345 346 /* Dispose of any data lying around in the outqueue. */ 347 sctp_outq_free(&asoc->outqueue); 348 349 /* Dispose of any pending messages for the upper layer. */ 350 sctp_ulpq_free(&asoc->ulpq); 351 352 /* Dispose of any pending chunks on the inqueue. */ 353 sctp_inq_free(&asoc->base.inqueue); 354 355 sctp_tsnmap_free(&asoc->peer.tsn_map); 356 357 /* Free stream information. */ 358 sctp_stream_free(&asoc->stream); 359 360 if (asoc->strreset_chunk) 361 sctp_chunk_free(asoc->strreset_chunk); 362 363 /* Clean up the bound address list. */ 364 sctp_bind_addr_free(&asoc->base.bind_addr); 365 366 /* Do we need to go through all of our timers and 367 * delete them? To be safe we will try to delete all, but we 368 * should be able to go through and make a guess based 369 * on our state. 370 */ 371 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 372 if (del_timer(&asoc->timers[i])) 373 sctp_association_put(asoc); 374 } 375 376 /* Free peer's cached cookie. */ 377 kfree(asoc->peer.cookie); 378 kfree(asoc->peer.peer_random); 379 kfree(asoc->peer.peer_chunks); 380 kfree(asoc->peer.peer_hmacs); 381 382 /* Release the transport structures. */ 383 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 384 transport = list_entry(pos, struct sctp_transport, transports); 385 list_del_rcu(pos); 386 sctp_unhash_transport(transport); 387 sctp_transport_free(transport); 388 } 389 390 asoc->peer.transport_count = 0; 391 392 sctp_asconf_queue_teardown(asoc); 393 394 /* Free pending address space being deleted */ 395 kfree(asoc->asconf_addr_del_pending); 396 397 /* AUTH - Free the endpoint shared keys */ 398 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 399 400 /* AUTH - Free the association shared key */ 401 sctp_auth_key_put(asoc->asoc_shared_key); 402 403 sctp_association_put(asoc); 404 } 405 406 /* Cleanup and free up an association. */ 407 static void sctp_association_destroy(struct sctp_association *asoc) 408 { 409 if (unlikely(!asoc->base.dead)) { 410 WARN(1, "Attempt to destroy undead association %p!\n", asoc); 411 return; 412 } 413 414 sctp_endpoint_put(asoc->ep); 415 sock_put(asoc->base.sk); 416 417 if (asoc->assoc_id != 0) { 418 spin_lock_bh(&sctp_assocs_id_lock); 419 idr_remove(&sctp_assocs_id, asoc->assoc_id); 420 spin_unlock_bh(&sctp_assocs_id_lock); 421 } 422 423 WARN_ON(atomic_read(&asoc->rmem_alloc)); 424 425 kfree_rcu(asoc, rcu); 426 SCTP_DBG_OBJCNT_DEC(assoc); 427 } 428 429 /* Change the primary destination address for the peer. */ 430 void sctp_assoc_set_primary(struct sctp_association *asoc, 431 struct sctp_transport *transport) 432 { 433 int changeover = 0; 434 435 /* it's a changeover only if we already have a primary path 436 * that we are changing 437 */ 438 if (asoc->peer.primary_path != NULL && 439 asoc->peer.primary_path != transport) 440 changeover = 1 ; 441 442 asoc->peer.primary_path = transport; 443 444 /* Set a default msg_name for events. */ 445 memcpy(&asoc->peer.primary_addr, &transport->ipaddr, 446 sizeof(union sctp_addr)); 447 448 /* If the primary path is changing, assume that the 449 * user wants to use this new path. 450 */ 451 if ((transport->state == SCTP_ACTIVE) || 452 (transport->state == SCTP_UNKNOWN)) 453 asoc->peer.active_path = transport; 454 455 /* 456 * SFR-CACC algorithm: 457 * Upon the receipt of a request to change the primary 458 * destination address, on the data structure for the new 459 * primary destination, the sender MUST do the following: 460 * 461 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch 462 * to this destination address earlier. The sender MUST set 463 * CYCLING_CHANGEOVER to indicate that this switch is a 464 * double switch to the same destination address. 465 * 466 * Really, only bother is we have data queued or outstanding on 467 * the association. 468 */ 469 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) 470 return; 471 472 if (transport->cacc.changeover_active) 473 transport->cacc.cycling_changeover = changeover; 474 475 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that 476 * a changeover has occurred. 477 */ 478 transport->cacc.changeover_active = changeover; 479 480 /* 3) The sender MUST store the next TSN to be sent in 481 * next_tsn_at_change. 482 */ 483 transport->cacc.next_tsn_at_change = asoc->next_tsn; 484 } 485 486 /* Remove a transport from an association. */ 487 void sctp_assoc_rm_peer(struct sctp_association *asoc, 488 struct sctp_transport *peer) 489 { 490 struct sctp_transport *transport; 491 struct list_head *pos; 492 struct sctp_chunk *ch; 493 494 pr_debug("%s: association:%p addr:%pISpc\n", 495 __func__, asoc, &peer->ipaddr.sa); 496 497 /* If we are to remove the current retran_path, update it 498 * to the next peer before removing this peer from the list. 499 */ 500 if (asoc->peer.retran_path == peer) 501 sctp_assoc_update_retran_path(asoc); 502 503 /* Remove this peer from the list. */ 504 list_del_rcu(&peer->transports); 505 /* Remove this peer from the transport hashtable */ 506 sctp_unhash_transport(peer); 507 508 /* Get the first transport of asoc. */ 509 pos = asoc->peer.transport_addr_list.next; 510 transport = list_entry(pos, struct sctp_transport, transports); 511 512 /* Update any entries that match the peer to be deleted. */ 513 if (asoc->peer.primary_path == peer) 514 sctp_assoc_set_primary(asoc, transport); 515 if (asoc->peer.active_path == peer) 516 asoc->peer.active_path = transport; 517 if (asoc->peer.retran_path == peer) 518 asoc->peer.retran_path = transport; 519 if (asoc->peer.last_data_from == peer) 520 asoc->peer.last_data_from = transport; 521 522 if (asoc->strreset_chunk && 523 asoc->strreset_chunk->transport == peer) { 524 asoc->strreset_chunk->transport = transport; 525 sctp_transport_reset_reconf_timer(transport); 526 } 527 528 /* If we remove the transport an INIT was last sent to, set it to 529 * NULL. Combined with the update of the retran path above, this 530 * will cause the next INIT to be sent to the next available 531 * transport, maintaining the cycle. 532 */ 533 if (asoc->init_last_sent_to == peer) 534 asoc->init_last_sent_to = NULL; 535 536 /* If we remove the transport an SHUTDOWN was last sent to, set it 537 * to NULL. Combined with the update of the retran path above, this 538 * will cause the next SHUTDOWN to be sent to the next available 539 * transport, maintaining the cycle. 540 */ 541 if (asoc->shutdown_last_sent_to == peer) 542 asoc->shutdown_last_sent_to = NULL; 543 544 /* If we remove the transport an ASCONF was last sent to, set it to 545 * NULL. 546 */ 547 if (asoc->addip_last_asconf && 548 asoc->addip_last_asconf->transport == peer) 549 asoc->addip_last_asconf->transport = NULL; 550 551 /* If we have something on the transmitted list, we have to 552 * save it off. The best place is the active path. 553 */ 554 if (!list_empty(&peer->transmitted)) { 555 struct sctp_transport *active = asoc->peer.active_path; 556 557 /* Reset the transport of each chunk on this list */ 558 list_for_each_entry(ch, &peer->transmitted, 559 transmitted_list) { 560 ch->transport = NULL; 561 ch->rtt_in_progress = 0; 562 } 563 564 list_splice_tail_init(&peer->transmitted, 565 &active->transmitted); 566 567 /* Start a T3 timer here in case it wasn't running so 568 * that these migrated packets have a chance to get 569 * retransmitted. 570 */ 571 if (!timer_pending(&active->T3_rtx_timer)) 572 if (!mod_timer(&active->T3_rtx_timer, 573 jiffies + active->rto)) 574 sctp_transport_hold(active); 575 } 576 577 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) 578 if (ch->transport == peer) 579 ch->transport = NULL; 580 581 asoc->peer.transport_count--; 582 583 sctp_transport_free(peer); 584 } 585 586 /* Add a transport address to an association. */ 587 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 588 const union sctp_addr *addr, 589 const gfp_t gfp, 590 const int peer_state) 591 { 592 struct net *net = sock_net(asoc->base.sk); 593 struct sctp_transport *peer; 594 struct sctp_sock *sp; 595 unsigned short port; 596 597 sp = sctp_sk(asoc->base.sk); 598 599 /* AF_INET and AF_INET6 share common port field. */ 600 port = ntohs(addr->v4.sin_port); 601 602 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__, 603 asoc, &addr->sa, peer_state); 604 605 /* Set the port if it has not been set yet. */ 606 if (0 == asoc->peer.port) 607 asoc->peer.port = port; 608 609 /* Check to see if this is a duplicate. */ 610 peer = sctp_assoc_lookup_paddr(asoc, addr); 611 if (peer) { 612 /* An UNKNOWN state is only set on transports added by 613 * user in sctp_connectx() call. Such transports should be 614 * considered CONFIRMED per RFC 4960, Section 5.4. 615 */ 616 if (peer->state == SCTP_UNKNOWN) { 617 peer->state = SCTP_ACTIVE; 618 } 619 return peer; 620 } 621 622 peer = sctp_transport_new(net, addr, gfp); 623 if (!peer) 624 return NULL; 625 626 sctp_transport_set_owner(peer, asoc); 627 628 /* Initialize the peer's heartbeat interval based on the 629 * association configured value. 630 */ 631 peer->hbinterval = asoc->hbinterval; 632 633 /* Set the path max_retrans. */ 634 peer->pathmaxrxt = asoc->pathmaxrxt; 635 636 /* And the partial failure retrans threshold */ 637 peer->pf_retrans = asoc->pf_retrans; 638 639 /* Initialize the peer's SACK delay timeout based on the 640 * association configured value. 641 */ 642 peer->sackdelay = asoc->sackdelay; 643 peer->sackfreq = asoc->sackfreq; 644 645 if (addr->sa.sa_family == AF_INET6) { 646 __be32 info = addr->v6.sin6_flowinfo; 647 648 if (info) { 649 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK); 650 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK; 651 } else { 652 peer->flowlabel = asoc->flowlabel; 653 } 654 } 655 peer->dscp = asoc->dscp; 656 657 /* Enable/disable heartbeat, SACK delay, and path MTU discovery 658 * based on association setting. 659 */ 660 peer->param_flags = asoc->param_flags; 661 662 /* Initialize the pmtu of the transport. */ 663 sctp_transport_route(peer, NULL, sp); 664 665 /* If this is the first transport addr on this association, 666 * initialize the association PMTU to the peer's PMTU. 667 * If not and the current association PMTU is higher than the new 668 * peer's PMTU, reset the association PMTU to the new peer's PMTU. 669 */ 670 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ? 671 min_t(int, peer->pathmtu, asoc->pathmtu) : 672 peer->pathmtu); 673 674 peer->pmtu_pending = 0; 675 676 /* The asoc->peer.port might not be meaningful yet, but 677 * initialize the packet structure anyway. 678 */ 679 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, 680 asoc->peer.port); 681 682 /* 7.2.1 Slow-Start 683 * 684 * o The initial cwnd before DATA transmission or after a sufficiently 685 * long idle period MUST be set to 686 * min(4*MTU, max(2*MTU, 4380 bytes)) 687 * 688 * o The initial value of ssthresh MAY be arbitrarily high 689 * (for example, implementations MAY use the size of the 690 * receiver advertised window). 691 */ 692 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 693 694 /* At this point, we may not have the receiver's advertised window, 695 * so initialize ssthresh to the default value and it will be set 696 * later when we process the INIT. 697 */ 698 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; 699 700 peer->partial_bytes_acked = 0; 701 peer->flight_size = 0; 702 peer->burst_limited = 0; 703 704 /* Set the transport's RTO.initial value */ 705 peer->rto = asoc->rto_initial; 706 sctp_max_rto(asoc, peer); 707 708 /* Set the peer's active state. */ 709 peer->state = peer_state; 710 711 /* Add this peer into the transport hashtable */ 712 if (sctp_hash_transport(peer)) { 713 sctp_transport_free(peer); 714 return NULL; 715 } 716 717 /* Attach the remote transport to our asoc. */ 718 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); 719 asoc->peer.transport_count++; 720 721 /* If we do not yet have a primary path, set one. */ 722 if (!asoc->peer.primary_path) { 723 sctp_assoc_set_primary(asoc, peer); 724 asoc->peer.retran_path = peer; 725 } 726 727 if (asoc->peer.active_path == asoc->peer.retran_path && 728 peer->state != SCTP_UNCONFIRMED) { 729 asoc->peer.retran_path = peer; 730 } 731 732 return peer; 733 } 734 735 /* Delete a transport address from an association. */ 736 void sctp_assoc_del_peer(struct sctp_association *asoc, 737 const union sctp_addr *addr) 738 { 739 struct list_head *pos; 740 struct list_head *temp; 741 struct sctp_transport *transport; 742 743 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 744 transport = list_entry(pos, struct sctp_transport, transports); 745 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { 746 /* Do book keeping for removing the peer and free it. */ 747 sctp_assoc_rm_peer(asoc, transport); 748 break; 749 } 750 } 751 } 752 753 /* Lookup a transport by address. */ 754 struct sctp_transport *sctp_assoc_lookup_paddr( 755 const struct sctp_association *asoc, 756 const union sctp_addr *address) 757 { 758 struct sctp_transport *t; 759 760 /* Cycle through all transports searching for a peer address. */ 761 762 list_for_each_entry(t, &asoc->peer.transport_addr_list, 763 transports) { 764 if (sctp_cmp_addr_exact(address, &t->ipaddr)) 765 return t; 766 } 767 768 return NULL; 769 } 770 771 /* Remove all transports except a give one */ 772 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, 773 struct sctp_transport *primary) 774 { 775 struct sctp_transport *temp; 776 struct sctp_transport *t; 777 778 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, 779 transports) { 780 /* if the current transport is not the primary one, delete it */ 781 if (t != primary) 782 sctp_assoc_rm_peer(asoc, t); 783 } 784 } 785 786 /* Engage in transport control operations. 787 * Mark the transport up or down and send a notification to the user. 788 * Select and update the new active and retran paths. 789 */ 790 void sctp_assoc_control_transport(struct sctp_association *asoc, 791 struct sctp_transport *transport, 792 enum sctp_transport_cmd command, 793 sctp_sn_error_t error) 794 { 795 struct sctp_ulpevent *event; 796 struct sockaddr_storage addr; 797 int spc_state = 0; 798 bool ulp_notify = true; 799 800 /* Record the transition on the transport. */ 801 switch (command) { 802 case SCTP_TRANSPORT_UP: 803 /* If we are moving from UNCONFIRMED state due 804 * to heartbeat success, report the SCTP_ADDR_CONFIRMED 805 * state to the user, otherwise report SCTP_ADDR_AVAILABLE. 806 */ 807 if (SCTP_UNCONFIRMED == transport->state && 808 SCTP_HEARTBEAT_SUCCESS == error) 809 spc_state = SCTP_ADDR_CONFIRMED; 810 else 811 spc_state = SCTP_ADDR_AVAILABLE; 812 /* Don't inform ULP about transition from PF to 813 * active state and set cwnd to 1 MTU, see SCTP 814 * Quick failover draft section 5.1, point 5 815 */ 816 if (transport->state == SCTP_PF) { 817 ulp_notify = false; 818 transport->cwnd = asoc->pathmtu; 819 } 820 transport->state = SCTP_ACTIVE; 821 break; 822 823 case SCTP_TRANSPORT_DOWN: 824 /* If the transport was never confirmed, do not transition it 825 * to inactive state. Also, release the cached route since 826 * there may be a better route next time. 827 */ 828 if (transport->state != SCTP_UNCONFIRMED) 829 transport->state = SCTP_INACTIVE; 830 else { 831 sctp_transport_dst_release(transport); 832 ulp_notify = false; 833 } 834 835 spc_state = SCTP_ADDR_UNREACHABLE; 836 break; 837 838 case SCTP_TRANSPORT_PF: 839 transport->state = SCTP_PF; 840 ulp_notify = false; 841 break; 842 843 default: 844 return; 845 } 846 847 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification 848 * to the user. 849 */ 850 if (ulp_notify) { 851 memset(&addr, 0, sizeof(struct sockaddr_storage)); 852 memcpy(&addr, &transport->ipaddr, 853 transport->af_specific->sockaddr_len); 854 855 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 856 0, spc_state, error, GFP_ATOMIC); 857 if (event) 858 asoc->stream.si->enqueue_event(&asoc->ulpq, event); 859 } 860 861 /* Select new active and retran paths. */ 862 sctp_select_active_and_retran_path(asoc); 863 } 864 865 /* Hold a reference to an association. */ 866 void sctp_association_hold(struct sctp_association *asoc) 867 { 868 refcount_inc(&asoc->base.refcnt); 869 } 870 871 /* Release a reference to an association and cleanup 872 * if there are no more references. 873 */ 874 void sctp_association_put(struct sctp_association *asoc) 875 { 876 if (refcount_dec_and_test(&asoc->base.refcnt)) 877 sctp_association_destroy(asoc); 878 } 879 880 /* Allocate the next TSN, Transmission Sequence Number, for the given 881 * association. 882 */ 883 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) 884 { 885 /* From Section 1.6 Serial Number Arithmetic: 886 * Transmission Sequence Numbers wrap around when they reach 887 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use 888 * after transmitting TSN = 2*32 - 1 is TSN = 0. 889 */ 890 __u32 retval = asoc->next_tsn; 891 asoc->next_tsn++; 892 asoc->unack_data++; 893 894 return retval; 895 } 896 897 /* Compare two addresses to see if they match. Wildcard addresses 898 * only match themselves. 899 */ 900 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 901 const union sctp_addr *ss2) 902 { 903 struct sctp_af *af; 904 905 af = sctp_get_af_specific(ss1->sa.sa_family); 906 if (unlikely(!af)) 907 return 0; 908 909 return af->cmp_addr(ss1, ss2); 910 } 911 912 /* Return an ecne chunk to get prepended to a packet. 913 * Note: We are sly and return a shared, prealloced chunk. FIXME: 914 * No we don't, but we could/should. 915 */ 916 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) 917 { 918 if (!asoc->need_ecne) 919 return NULL; 920 921 /* Send ECNE if needed. 922 * Not being able to allocate a chunk here is not deadly. 923 */ 924 return sctp_make_ecne(asoc, asoc->last_ecne_tsn); 925 } 926 927 /* 928 * Find which transport this TSN was sent on. 929 */ 930 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, 931 __u32 tsn) 932 { 933 struct sctp_transport *active; 934 struct sctp_transport *match; 935 struct sctp_transport *transport; 936 struct sctp_chunk *chunk; 937 __be32 key = htonl(tsn); 938 939 match = NULL; 940 941 /* 942 * FIXME: In general, find a more efficient data structure for 943 * searching. 944 */ 945 946 /* 947 * The general strategy is to search each transport's transmitted 948 * list. Return which transport this TSN lives on. 949 * 950 * Let's be hopeful and check the active_path first. 951 * Another optimization would be to know if there is only one 952 * outbound path and not have to look for the TSN at all. 953 * 954 */ 955 956 active = asoc->peer.active_path; 957 958 list_for_each_entry(chunk, &active->transmitted, 959 transmitted_list) { 960 961 if (key == chunk->subh.data_hdr->tsn) { 962 match = active; 963 goto out; 964 } 965 } 966 967 /* If not found, go search all the other transports. */ 968 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 969 transports) { 970 971 if (transport == active) 972 continue; 973 list_for_each_entry(chunk, &transport->transmitted, 974 transmitted_list) { 975 if (key == chunk->subh.data_hdr->tsn) { 976 match = transport; 977 goto out; 978 } 979 } 980 } 981 out: 982 return match; 983 } 984 985 /* Do delayed input processing. This is scheduled by sctp_rcv(). */ 986 static void sctp_assoc_bh_rcv(struct work_struct *work) 987 { 988 struct sctp_association *asoc = 989 container_of(work, struct sctp_association, 990 base.inqueue.immediate); 991 struct net *net = sock_net(asoc->base.sk); 992 union sctp_subtype subtype; 993 struct sctp_endpoint *ep; 994 struct sctp_chunk *chunk; 995 struct sctp_inq *inqueue; 996 int first_time = 1; /* is this the first time through the loop */ 997 int error = 0; 998 int state; 999 1000 /* The association should be held so we should be safe. */ 1001 ep = asoc->ep; 1002 1003 inqueue = &asoc->base.inqueue; 1004 sctp_association_hold(asoc); 1005 while (NULL != (chunk = sctp_inq_pop(inqueue))) { 1006 state = asoc->state; 1007 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); 1008 1009 /* If the first chunk in the packet is AUTH, do special 1010 * processing specified in Section 6.3 of SCTP-AUTH spec 1011 */ 1012 if (first_time && subtype.chunk == SCTP_CID_AUTH) { 1013 struct sctp_chunkhdr *next_hdr; 1014 1015 next_hdr = sctp_inq_peek(inqueue); 1016 if (!next_hdr) 1017 goto normal; 1018 1019 /* If the next chunk is COOKIE-ECHO, skip the AUTH 1020 * chunk while saving a pointer to it so we can do 1021 * Authentication later (during cookie-echo 1022 * processing). 1023 */ 1024 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { 1025 chunk->auth_chunk = skb_clone(chunk->skb, 1026 GFP_ATOMIC); 1027 chunk->auth = 1; 1028 continue; 1029 } 1030 } 1031 1032 normal: 1033 /* SCTP-AUTH, Section 6.3: 1034 * The receiver has a list of chunk types which it expects 1035 * to be received only after an AUTH-chunk. This list has 1036 * been sent to the peer during the association setup. It 1037 * MUST silently discard these chunks if they are not placed 1038 * after an AUTH chunk in the packet. 1039 */ 1040 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) 1041 continue; 1042 1043 /* Remember where the last DATA chunk came from so we 1044 * know where to send the SACK. 1045 */ 1046 if (sctp_chunk_is_data(chunk)) 1047 asoc->peer.last_data_from = chunk->transport; 1048 else { 1049 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); 1050 asoc->stats.ictrlchunks++; 1051 if (chunk->chunk_hdr->type == SCTP_CID_SACK) 1052 asoc->stats.isacks++; 1053 } 1054 1055 if (chunk->transport) 1056 chunk->transport->last_time_heard = ktime_get(); 1057 1058 /* Run through the state machine. */ 1059 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, 1060 state, ep, asoc, chunk, GFP_ATOMIC); 1061 1062 /* Check to see if the association is freed in response to 1063 * the incoming chunk. If so, get out of the while loop. 1064 */ 1065 if (asoc->base.dead) 1066 break; 1067 1068 /* If there is an error on chunk, discard this packet. */ 1069 if (error && chunk) 1070 chunk->pdiscard = 1; 1071 1072 if (first_time) 1073 first_time = 0; 1074 } 1075 sctp_association_put(asoc); 1076 } 1077 1078 /* This routine moves an association from its old sk to a new sk. */ 1079 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) 1080 { 1081 struct sctp_sock *newsp = sctp_sk(newsk); 1082 struct sock *oldsk = assoc->base.sk; 1083 1084 /* Delete the association from the old endpoint's list of 1085 * associations. 1086 */ 1087 list_del_init(&assoc->asocs); 1088 1089 /* Decrement the backlog value for a TCP-style socket. */ 1090 if (sctp_style(oldsk, TCP)) 1091 oldsk->sk_ack_backlog--; 1092 1093 /* Release references to the old endpoint and the sock. */ 1094 sctp_endpoint_put(assoc->ep); 1095 sock_put(assoc->base.sk); 1096 1097 /* Get a reference to the new endpoint. */ 1098 assoc->ep = newsp->ep; 1099 sctp_endpoint_hold(assoc->ep); 1100 1101 /* Get a reference to the new sock. */ 1102 assoc->base.sk = newsk; 1103 sock_hold(assoc->base.sk); 1104 1105 /* Add the association to the new endpoint's list of associations. */ 1106 sctp_endpoint_add_asoc(newsp->ep, assoc); 1107 } 1108 1109 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ 1110 int sctp_assoc_update(struct sctp_association *asoc, 1111 struct sctp_association *new) 1112 { 1113 struct sctp_transport *trans; 1114 struct list_head *pos, *temp; 1115 1116 /* Copy in new parameters of peer. */ 1117 asoc->c = new->c; 1118 asoc->peer.rwnd = new->peer.rwnd; 1119 asoc->peer.sack_needed = new->peer.sack_needed; 1120 asoc->peer.auth_capable = new->peer.auth_capable; 1121 asoc->peer.i = new->peer.i; 1122 1123 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1124 asoc->peer.i.initial_tsn, GFP_ATOMIC)) 1125 return -ENOMEM; 1126 1127 /* Remove any peer addresses not present in the new association. */ 1128 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1129 trans = list_entry(pos, struct sctp_transport, transports); 1130 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { 1131 sctp_assoc_rm_peer(asoc, trans); 1132 continue; 1133 } 1134 1135 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1136 sctp_transport_reset(trans); 1137 } 1138 1139 /* If the case is A (association restart), use 1140 * initial_tsn as next_tsn. If the case is B, use 1141 * current next_tsn in case data sent to peer 1142 * has been discarded and needs retransmission. 1143 */ 1144 if (asoc->state >= SCTP_STATE_ESTABLISHED) { 1145 asoc->next_tsn = new->next_tsn; 1146 asoc->ctsn_ack_point = new->ctsn_ack_point; 1147 asoc->adv_peer_ack_point = new->adv_peer_ack_point; 1148 1149 /* Reinitialize SSN for both local streams 1150 * and peer's streams. 1151 */ 1152 sctp_stream_clear(&asoc->stream); 1153 1154 /* Flush the ULP reassembly and ordered queue. 1155 * Any data there will now be stale and will 1156 * cause problems. 1157 */ 1158 sctp_ulpq_flush(&asoc->ulpq); 1159 1160 /* reset the overall association error count so 1161 * that the restarted association doesn't get torn 1162 * down on the next retransmission timer. 1163 */ 1164 asoc->overall_error_count = 0; 1165 1166 } else { 1167 /* Add any peer addresses from the new association. */ 1168 list_for_each_entry(trans, &new->peer.transport_addr_list, 1169 transports) 1170 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) && 1171 !sctp_assoc_add_peer(asoc, &trans->ipaddr, 1172 GFP_ATOMIC, trans->state)) 1173 return -ENOMEM; 1174 1175 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1176 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1177 1178 if (sctp_state(asoc, COOKIE_WAIT)) 1179 sctp_stream_update(&asoc->stream, &new->stream); 1180 1181 /* get a new assoc id if we don't have one yet. */ 1182 if (sctp_assoc_set_id(asoc, GFP_ATOMIC)) 1183 return -ENOMEM; 1184 } 1185 1186 /* SCTP-AUTH: Save the peer parameters from the new associations 1187 * and also move the association shared keys over 1188 */ 1189 kfree(asoc->peer.peer_random); 1190 asoc->peer.peer_random = new->peer.peer_random; 1191 new->peer.peer_random = NULL; 1192 1193 kfree(asoc->peer.peer_chunks); 1194 asoc->peer.peer_chunks = new->peer.peer_chunks; 1195 new->peer.peer_chunks = NULL; 1196 1197 kfree(asoc->peer.peer_hmacs); 1198 asoc->peer.peer_hmacs = new->peer.peer_hmacs; 1199 new->peer.peer_hmacs = NULL; 1200 1201 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); 1202 } 1203 1204 /* Update the retran path for sending a retransmitted packet. 1205 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: 1206 * 1207 * When there is outbound data to send and the primary path 1208 * becomes inactive (e.g., due to failures), or where the 1209 * SCTP user explicitly requests to send data to an 1210 * inactive destination transport address, before reporting 1211 * an error to its ULP, the SCTP endpoint should try to send 1212 * the data to an alternate active destination transport 1213 * address if one exists. 1214 * 1215 * When retransmitting data that timed out, if the endpoint 1216 * is multihomed, it should consider each source-destination 1217 * address pair in its retransmission selection policy. 1218 * When retransmitting timed-out data, the endpoint should 1219 * attempt to pick the most divergent source-destination 1220 * pair from the original source-destination pair to which 1221 * the packet was transmitted. 1222 * 1223 * Note: Rules for picking the most divergent source-destination 1224 * pair are an implementation decision and are not specified 1225 * within this document. 1226 * 1227 * Our basic strategy is to round-robin transports in priorities 1228 * according to sctp_trans_score() e.g., if no such 1229 * transport with state SCTP_ACTIVE exists, round-robin through 1230 * SCTP_UNKNOWN, etc. You get the picture. 1231 */ 1232 static u8 sctp_trans_score(const struct sctp_transport *trans) 1233 { 1234 switch (trans->state) { 1235 case SCTP_ACTIVE: 1236 return 3; /* best case */ 1237 case SCTP_UNKNOWN: 1238 return 2; 1239 case SCTP_PF: 1240 return 1; 1241 default: /* case SCTP_INACTIVE */ 1242 return 0; /* worst case */ 1243 } 1244 } 1245 1246 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, 1247 struct sctp_transport *trans2) 1248 { 1249 if (trans1->error_count > trans2->error_count) { 1250 return trans2; 1251 } else if (trans1->error_count == trans2->error_count && 1252 ktime_after(trans2->last_time_heard, 1253 trans1->last_time_heard)) { 1254 return trans2; 1255 } else { 1256 return trans1; 1257 } 1258 } 1259 1260 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, 1261 struct sctp_transport *best) 1262 { 1263 u8 score_curr, score_best; 1264 1265 if (best == NULL || curr == best) 1266 return curr; 1267 1268 score_curr = sctp_trans_score(curr); 1269 score_best = sctp_trans_score(best); 1270 1271 /* First, try a score-based selection if both transport states 1272 * differ. If we're in a tie, lets try to make a more clever 1273 * decision here based on error counts and last time heard. 1274 */ 1275 if (score_curr > score_best) 1276 return curr; 1277 else if (score_curr == score_best) 1278 return sctp_trans_elect_tie(best, curr); 1279 else 1280 return best; 1281 } 1282 1283 void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1284 { 1285 struct sctp_transport *trans = asoc->peer.retran_path; 1286 struct sctp_transport *trans_next = NULL; 1287 1288 /* We're done as we only have the one and only path. */ 1289 if (asoc->peer.transport_count == 1) 1290 return; 1291 /* If active_path and retran_path are the same and active, 1292 * then this is the only active path. Use it. 1293 */ 1294 if (asoc->peer.active_path == asoc->peer.retran_path && 1295 asoc->peer.active_path->state == SCTP_ACTIVE) 1296 return; 1297 1298 /* Iterate from retran_path's successor back to retran_path. */ 1299 for (trans = list_next_entry(trans, transports); 1; 1300 trans = list_next_entry(trans, transports)) { 1301 /* Manually skip the head element. */ 1302 if (&trans->transports == &asoc->peer.transport_addr_list) 1303 continue; 1304 if (trans->state == SCTP_UNCONFIRMED) 1305 continue; 1306 trans_next = sctp_trans_elect_best(trans, trans_next); 1307 /* Active is good enough for immediate return. */ 1308 if (trans_next->state == SCTP_ACTIVE) 1309 break; 1310 /* We've reached the end, time to update path. */ 1311 if (trans == asoc->peer.retran_path) 1312 break; 1313 } 1314 1315 asoc->peer.retran_path = trans_next; 1316 1317 pr_debug("%s: association:%p updated new path to addr:%pISpc\n", 1318 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); 1319 } 1320 1321 static void sctp_select_active_and_retran_path(struct sctp_association *asoc) 1322 { 1323 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL; 1324 struct sctp_transport *trans_pf = NULL; 1325 1326 /* Look for the two most recently used active transports. */ 1327 list_for_each_entry(trans, &asoc->peer.transport_addr_list, 1328 transports) { 1329 /* Skip uninteresting transports. */ 1330 if (trans->state == SCTP_INACTIVE || 1331 trans->state == SCTP_UNCONFIRMED) 1332 continue; 1333 /* Keep track of the best PF transport from our 1334 * list in case we don't find an active one. 1335 */ 1336 if (trans->state == SCTP_PF) { 1337 trans_pf = sctp_trans_elect_best(trans, trans_pf); 1338 continue; 1339 } 1340 /* For active transports, pick the most recent ones. */ 1341 if (trans_pri == NULL || 1342 ktime_after(trans->last_time_heard, 1343 trans_pri->last_time_heard)) { 1344 trans_sec = trans_pri; 1345 trans_pri = trans; 1346 } else if (trans_sec == NULL || 1347 ktime_after(trans->last_time_heard, 1348 trans_sec->last_time_heard)) { 1349 trans_sec = trans; 1350 } 1351 } 1352 1353 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints 1354 * 1355 * By default, an endpoint should always transmit to the primary 1356 * path, unless the SCTP user explicitly specifies the 1357 * destination transport address (and possibly source transport 1358 * address) to use. [If the primary is active but not most recent, 1359 * bump the most recently used transport.] 1360 */ 1361 if ((asoc->peer.primary_path->state == SCTP_ACTIVE || 1362 asoc->peer.primary_path->state == SCTP_UNKNOWN) && 1363 asoc->peer.primary_path != trans_pri) { 1364 trans_sec = trans_pri; 1365 trans_pri = asoc->peer.primary_path; 1366 } 1367 1368 /* We did not find anything useful for a possible retransmission 1369 * path; either primary path that we found is the the same as 1370 * the current one, or we didn't generally find an active one. 1371 */ 1372 if (trans_sec == NULL) 1373 trans_sec = trans_pri; 1374 1375 /* If we failed to find a usable transport, just camp on the 1376 * active or pick a PF iff it's the better choice. 1377 */ 1378 if (trans_pri == NULL) { 1379 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); 1380 trans_sec = trans_pri; 1381 } 1382 1383 /* Set the active and retran transports. */ 1384 asoc->peer.active_path = trans_pri; 1385 asoc->peer.retran_path = trans_sec; 1386 } 1387 1388 struct sctp_transport * 1389 sctp_assoc_choose_alter_transport(struct sctp_association *asoc, 1390 struct sctp_transport *last_sent_to) 1391 { 1392 /* If this is the first time packet is sent, use the active path, 1393 * else use the retran path. If the last packet was sent over the 1394 * retran path, update the retran path and use it. 1395 */ 1396 if (last_sent_to == NULL) { 1397 return asoc->peer.active_path; 1398 } else { 1399 if (last_sent_to == asoc->peer.retran_path) 1400 sctp_assoc_update_retran_path(asoc); 1401 1402 return asoc->peer.retran_path; 1403 } 1404 } 1405 1406 void sctp_assoc_update_frag_point(struct sctp_association *asoc) 1407 { 1408 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu, 1409 sctp_datachk_len(&asoc->stream)); 1410 1411 if (asoc->user_frag) 1412 frag = min_t(int, frag, asoc->user_frag); 1413 1414 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN - 1415 sctp_datachk_len(&asoc->stream)); 1416 1417 asoc->frag_point = SCTP_TRUNC4(frag); 1418 } 1419 1420 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu) 1421 { 1422 if (asoc->pathmtu != pmtu) { 1423 asoc->pathmtu = pmtu; 1424 sctp_assoc_update_frag_point(asoc); 1425 } 1426 1427 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, 1428 asoc->pathmtu, asoc->frag_point); 1429 } 1430 1431 /* Update the association's pmtu and frag_point by going through all the 1432 * transports. This routine is called when a transport's PMTU has changed. 1433 */ 1434 void sctp_assoc_sync_pmtu(struct sctp_association *asoc) 1435 { 1436 struct sctp_transport *t; 1437 __u32 pmtu = 0; 1438 1439 if (!asoc) 1440 return; 1441 1442 /* Get the lowest pmtu of all the transports. */ 1443 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 1444 if (t->pmtu_pending && t->dst) { 1445 sctp_transport_update_pmtu(t, 1446 atomic_read(&t->mtu_info)); 1447 t->pmtu_pending = 0; 1448 } 1449 if (!pmtu || (t->pathmtu < pmtu)) 1450 pmtu = t->pathmtu; 1451 } 1452 1453 sctp_assoc_set_pmtu(asoc, pmtu); 1454 } 1455 1456 /* Should we send a SACK to update our peer? */ 1457 static inline bool sctp_peer_needs_update(struct sctp_association *asoc) 1458 { 1459 struct net *net = sock_net(asoc->base.sk); 1460 switch (asoc->state) { 1461 case SCTP_STATE_ESTABLISHED: 1462 case SCTP_STATE_SHUTDOWN_PENDING: 1463 case SCTP_STATE_SHUTDOWN_RECEIVED: 1464 case SCTP_STATE_SHUTDOWN_SENT: 1465 if ((asoc->rwnd > asoc->a_rwnd) && 1466 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, 1467 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), 1468 asoc->pathmtu))) 1469 return true; 1470 break; 1471 default: 1472 break; 1473 } 1474 return false; 1475 } 1476 1477 /* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1478 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1479 { 1480 struct sctp_chunk *sack; 1481 struct timer_list *timer; 1482 1483 if (asoc->rwnd_over) { 1484 if (asoc->rwnd_over >= len) { 1485 asoc->rwnd_over -= len; 1486 } else { 1487 asoc->rwnd += (len - asoc->rwnd_over); 1488 asoc->rwnd_over = 0; 1489 } 1490 } else { 1491 asoc->rwnd += len; 1492 } 1493 1494 /* If we had window pressure, start recovering it 1495 * once our rwnd had reached the accumulated pressure 1496 * threshold. The idea is to recover slowly, but up 1497 * to the initial advertised window. 1498 */ 1499 if (asoc->rwnd_press) { 1500 int change = min(asoc->pathmtu, asoc->rwnd_press); 1501 asoc->rwnd += change; 1502 asoc->rwnd_press -= change; 1503 } 1504 1505 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1506 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1507 asoc->a_rwnd); 1508 1509 /* Send a window update SACK if the rwnd has increased by at least the 1510 * minimum of the association's PMTU and half of the receive buffer. 1511 * The algorithm used is similar to the one described in 1512 * Section 4.2.3.3 of RFC 1122. 1513 */ 1514 if (sctp_peer_needs_update(asoc)) { 1515 asoc->a_rwnd = asoc->rwnd; 1516 1517 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " 1518 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd, 1519 asoc->a_rwnd); 1520 1521 sack = sctp_make_sack(asoc); 1522 if (!sack) 1523 return; 1524 1525 asoc->peer.sack_needed = 0; 1526 1527 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); 1528 1529 /* Stop the SACK timer. */ 1530 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1531 if (del_timer(timer)) 1532 sctp_association_put(asoc); 1533 } 1534 } 1535 1536 /* Decrease asoc's rwnd by len. */ 1537 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) 1538 { 1539 int rx_count; 1540 int over = 0; 1541 1542 if (unlikely(!asoc->rwnd || asoc->rwnd_over)) 1543 pr_debug("%s: association:%p has asoc->rwnd:%u, " 1544 "asoc->rwnd_over:%u!\n", __func__, asoc, 1545 asoc->rwnd, asoc->rwnd_over); 1546 1547 if (asoc->ep->rcvbuf_policy) 1548 rx_count = atomic_read(&asoc->rmem_alloc); 1549 else 1550 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1551 1552 /* If we've reached or overflowed our receive buffer, announce 1553 * a 0 rwnd if rwnd would still be positive. Store the 1554 * the potential pressure overflow so that the window can be restored 1555 * back to original value. 1556 */ 1557 if (rx_count >= asoc->base.sk->sk_rcvbuf) 1558 over = 1; 1559 1560 if (asoc->rwnd >= len) { 1561 asoc->rwnd -= len; 1562 if (over) { 1563 asoc->rwnd_press += asoc->rwnd; 1564 asoc->rwnd = 0; 1565 } 1566 } else { 1567 asoc->rwnd_over += len - asoc->rwnd; 1568 asoc->rwnd = 0; 1569 } 1570 1571 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", 1572 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1573 asoc->rwnd_press); 1574 } 1575 1576 /* Build the bind address list for the association based on info from the 1577 * local endpoint and the remote peer. 1578 */ 1579 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1580 enum sctp_scope scope, gfp_t gfp) 1581 { 1582 int flags; 1583 1584 /* Use scoping rules to determine the subset of addresses from 1585 * the endpoint. 1586 */ 1587 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1588 if (asoc->peer.ipv4_address) 1589 flags |= SCTP_ADDR4_PEERSUPP; 1590 if (asoc->peer.ipv6_address) 1591 flags |= SCTP_ADDR6_PEERSUPP; 1592 1593 return sctp_bind_addr_copy(sock_net(asoc->base.sk), 1594 &asoc->base.bind_addr, 1595 &asoc->ep->base.bind_addr, 1596 scope, gfp, flags); 1597 } 1598 1599 /* Build the association's bind address list from the cookie. */ 1600 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1601 struct sctp_cookie *cookie, 1602 gfp_t gfp) 1603 { 1604 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); 1605 int var_size3 = cookie->raw_addr_list_len; 1606 __u8 *raw = (__u8 *)cookie->peer_init + var_size2; 1607 1608 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, 1609 asoc->ep->base.bind_addr.port, gfp); 1610 } 1611 1612 /* Lookup laddr in the bind address list of an association. */ 1613 int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1614 const union sctp_addr *laddr) 1615 { 1616 int found = 0; 1617 1618 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1619 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1620 sctp_sk(asoc->base.sk))) 1621 found = 1; 1622 1623 return found; 1624 } 1625 1626 /* Set an association id for a given association */ 1627 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) 1628 { 1629 bool preload = gfpflags_allow_blocking(gfp); 1630 int ret; 1631 1632 /* If the id is already assigned, keep it. */ 1633 if (asoc->assoc_id) 1634 return 0; 1635 1636 if (preload) 1637 idr_preload(gfp); 1638 spin_lock_bh(&sctp_assocs_id_lock); 1639 /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and 1640 * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC. 1641 */ 1642 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0, 1643 GFP_NOWAIT); 1644 spin_unlock_bh(&sctp_assocs_id_lock); 1645 if (preload) 1646 idr_preload_end(); 1647 if (ret < 0) 1648 return ret; 1649 1650 asoc->assoc_id = (sctp_assoc_t)ret; 1651 return 0; 1652 } 1653 1654 /* Free the ASCONF queue */ 1655 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) 1656 { 1657 struct sctp_chunk *asconf; 1658 struct sctp_chunk *tmp; 1659 1660 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { 1661 list_del_init(&asconf->list); 1662 sctp_chunk_free(asconf); 1663 } 1664 } 1665 1666 /* Free asconf_ack cache */ 1667 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1668 { 1669 struct sctp_chunk *ack; 1670 struct sctp_chunk *tmp; 1671 1672 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1673 transmitted_list) { 1674 list_del_init(&ack->transmitted_list); 1675 sctp_chunk_free(ack); 1676 } 1677 } 1678 1679 /* Clean up the ASCONF_ACK queue */ 1680 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) 1681 { 1682 struct sctp_chunk *ack; 1683 struct sctp_chunk *tmp; 1684 1685 /* We can remove all the entries from the queue up to 1686 * the "Peer-Sequence-Number". 1687 */ 1688 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1689 transmitted_list) { 1690 if (ack->subh.addip_hdr->serial == 1691 htonl(asoc->peer.addip_serial)) 1692 break; 1693 1694 list_del_init(&ack->transmitted_list); 1695 sctp_chunk_free(ack); 1696 } 1697 } 1698 1699 /* Find the ASCONF_ACK whose serial number matches ASCONF */ 1700 struct sctp_chunk *sctp_assoc_lookup_asconf_ack( 1701 const struct sctp_association *asoc, 1702 __be32 serial) 1703 { 1704 struct sctp_chunk *ack; 1705 1706 /* Walk through the list of cached ASCONF-ACKs and find the 1707 * ack chunk whose serial number matches that of the request. 1708 */ 1709 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { 1710 if (sctp_chunk_pending(ack)) 1711 continue; 1712 if (ack->subh.addip_hdr->serial == serial) { 1713 sctp_chunk_hold(ack); 1714 return ack; 1715 } 1716 } 1717 1718 return NULL; 1719 } 1720 1721 void sctp_asconf_queue_teardown(struct sctp_association *asoc) 1722 { 1723 /* Free any cached ASCONF_ACK chunk. */ 1724 sctp_assoc_free_asconf_acks(asoc); 1725 1726 /* Free the ASCONF queue. */ 1727 sctp_assoc_free_asconf_queue(asoc); 1728 1729 /* Free any cached ASCONF chunk. */ 1730 if (asoc->addip_last_asconf) 1731 sctp_chunk_free(asoc->addip_last_asconf); 1732 } 1733