1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_structs.h,v 1.13 2005/03/06 16:04:18 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #ifndef __sctp_structs_h__ 37 #define __sctp_structs_h__ 38 39 #include <sys/queue.h> 40 41 #include <sys/callout.h> 42 #include <sys/socket.h> 43 44 #ifdef IPSEC 45 #include <netinet6/ipsec.h> 46 #include <netkey/key.h> 47 #endif 48 49 #include <netinet/sctp_header.h> 50 #include <netinet/sctp_uio.h> 51 #include <netinet/sctp_auth.h> 52 53 struct sctp_timer { 54 struct callout timer; 55 int type; 56 /* 57 * Depending on the timer type these will be setup and cast with the 58 * appropriate entity. 59 */ 60 void *ep; 61 void *tcb; 62 void *net; 63 64 /* for sanity checking */ 65 void *self; 66 uint32_t ticks; 67 }; 68 69 struct sctp_nonpad_sndrcvinfo { 70 uint16_t sinfo_stream; 71 uint16_t sinfo_ssn; 72 uint16_t sinfo_flags; 73 uint32_t sinfo_ppid; 74 uint32_t sinfo_context; 75 uint32_t sinfo_timetolive; 76 uint32_t sinfo_tsn; 77 uint32_t sinfo_cumtsn; 78 sctp_assoc_t sinfo_assoc_id; 79 }; 80 81 82 /* 83 * This is the information we track on each interface that we know about from 84 * the distant end. 85 */ 86 TAILQ_HEAD(sctpnetlisthead, sctp_nets); 87 88 struct sctp_stream_reset_list { 89 TAILQ_ENTRY(sctp_stream_reset_list) next_resp; 90 uint32_t tsn; 91 int number_entries; 92 struct sctp_stream_reset_out_request req; 93 }; 94 95 TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list); 96 97 /* 98 * Users of the iterator need to malloc a iterator with a call to 99 * sctp_initiate_iterator(inp_func, assoc_func, pcb_flags, pcb_features, 100 * asoc_state, void-ptr-arg, uint32-arg, end_func, inp); 101 * 102 * Use the following two defines if you don't care what pcb flags are on the EP 103 * and/or you don't care what state the association is in. 104 * 105 * Note that if you specify an INP as the last argument then ONLY each 106 * association of that single INP will be executed upon. Note that the pcb 107 * flags STILL apply so if the inp you specify has different pcb_flags then 108 * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to 109 * assure the inp you specify gets treated. 110 */ 111 #define SCTP_PCB_ANY_FLAGS 0x00000000 112 #define SCTP_PCB_ANY_FEATURES 0x00000000 113 #define SCTP_ASOC_ANY_STATE 0x00000000 114 115 typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr, 116 uint32_t val); 117 typedef void (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val); 118 typedef void (*end_func) (void *ptr, uint32_t val); 119 120 struct sctp_iterator { 121 LIST_ENTRY(sctp_iterator) sctp_nxt_itr; 122 struct sctp_timer tmr; 123 struct sctp_inpcb *inp; /* current endpoint */ 124 struct sctp_tcb *stcb; /* current* assoc */ 125 asoc_func function_assoc; /* per assoc function */ 126 inp_func function_inp; /* per endpoint function */ 127 end_func function_atend;/* iterator completion function */ 128 void *pointer; /* pointer for apply func to use */ 129 uint32_t val; /* value for apply func to use */ 130 uint32_t pcb_flags; /* endpoint flags being checked */ 131 uint32_t pcb_features; /* endpoint features being checked */ 132 uint32_t asoc_state; /* assoc state being checked */ 133 uint32_t iterator_flags; 134 uint8_t no_chunk_output; 135 }; 136 137 /* iterator_flags values */ 138 #define SCTP_ITERATOR_DO_ALL_INP 0x00000001 139 #define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002 140 141 LIST_HEAD(sctpiterators, sctp_iterator); 142 143 struct sctp_copy_all { 144 struct sctp_inpcb *inp; /* ep */ 145 struct mbuf *m; 146 struct sctp_sndrcvinfo sndrcv; 147 int sndlen; 148 int cnt_sent; 149 int cnt_failed; 150 }; 151 152 struct sctp_nets { 153 TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */ 154 155 /* 156 * Things on the top half may be able to be split into a common 157 * structure shared by all. 158 */ 159 struct sctp_timer pmtu_timer; 160 161 /* 162 * The following two in combination equate to a route entry for v6 163 * or v4. 164 */ 165 struct sctp_route { 166 struct rtentry *ro_rt; 167 union sctp_sockstore _l_addr; /* remote peer addr */ 168 union sctp_sockstore _s_addr; /* our selected src addr */ 169 } ro; 170 /* mtu discovered so far */ 171 uint32_t mtu; 172 uint32_t ssthresh; /* not sure about this one for split */ 173 174 /* smoothed average things for RTT and RTO itself */ 175 int lastsa; 176 int lastsv; 177 unsigned int RTO; 178 179 /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */ 180 struct sctp_timer rxt_timer; 181 struct sctp_timer fr_timer; /* for early fr */ 182 183 /* last time in seconds I sent to it */ 184 struct timeval last_sent_time; 185 int ref_count; 186 187 /* Congestion stats per destination */ 188 /* 189 * flight size variables and such, sorry Vern, I could not avoid 190 * this if I wanted performance :> 191 */ 192 uint32_t flight_size; 193 uint32_t cwnd; /* actual cwnd */ 194 uint32_t prev_cwnd; /* cwnd before any processing */ 195 uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */ 196 uint32_t rtt_variance; 197 uint32_t prev_rtt; 198 /* tracking variables to avoid the aloc/free in sack processing */ 199 unsigned int net_ack; 200 unsigned int net_ack2; 201 202 /* 203 * CMT variables (iyengar@cis.udel.edu) 204 */ 205 uint32_t this_sack_highest_newack; /* tracks highest TSN newly 206 * acked for a given dest in 207 * the current SACK. Used in 208 * SFR and HTNA algos */ 209 uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected 210 * pseudo-cumack for this destination */ 211 uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next 212 * expected pseudo-cumack for this 213 * destination */ 214 215 /* CMT fast recovery variables */ 216 uint32_t fast_recovery_tsn; 217 uint32_t heartbeat_random1; 218 uint32_t heartbeat_random2; 219 uint32_t tos_flowlabel; 220 221 /* if this guy is ok or not ... status */ 222 uint16_t dest_state; 223 /* number of transmit failures to down this guy */ 224 uint16_t failure_threshold; 225 /* error stats on destination */ 226 uint16_t error_count; 227 228 uint8_t fast_retran_loss_recovery; 229 uint8_t will_exit_fast_recovery; 230 /* Flags that probably can be combined into dest_state */ 231 uint8_t rto_variance_dir; /* increase = 1, decreasing = 0 */ 232 uint8_t rto_pending; /* is segment marked for RTO update ** if we 233 * split? */ 234 uint8_t fast_retran_ip; /* fast retransmit in progress */ 235 uint8_t hb_responded; 236 uint8_t saw_newack; /* CMT's SFR algorithm flag */ 237 uint8_t src_addr_selected; /* if we split we move */ 238 uint8_t indx_of_eligible_next_to_use; 239 uint8_t addr_is_local; /* its a local address (if known) could move 240 * in split */ 241 242 /* 243 * CMT variables (iyengar@cis.udel.edu) 244 */ 245 uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to 246 * find a new pseudocumack. This flag 247 * is set after a new pseudo-cumack 248 * has been received and indicates 249 * that the sender should find the 250 * next pseudo-cumack expected for 251 * this destination */ 252 uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to 253 * find a new rtx-pseudocumack. This 254 * flag is set after a new 255 * rtx-pseudo-cumack has been received 256 * and indicates that the sender 257 * should find the next 258 * rtx-pseudo-cumack expected for this 259 * destination */ 260 uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to 261 * indicate if a new pseudo-cumack or 262 * rtx-pseudo-cumack has been received */ 263 #ifdef SCTP_HIGH_SPEED 264 uint8_t last_hs_used; /* index into the last HS table entry we used */ 265 #endif 266 }; 267 268 269 struct sctp_data_chunkrec { 270 uint32_t TSN_seq; /* the TSN of this transmit */ 271 uint16_t stream_seq; /* the stream sequence number of this transmit */ 272 uint16_t stream_number; /* the stream number of this guy */ 273 uint32_t payloadtype; 274 uint32_t context; /* from send */ 275 276 /* ECN Nonce: Nonce Value for this chunk */ 277 uint8_t ect_nonce; 278 279 /* 280 * part of the Highest sacked algorithm to be able to stroke counts 281 * on ones that are FR'd. 282 */ 283 uint32_t fast_retran_tsn; /* sending_seq at the time of FR */ 284 struct timeval timetodrop; /* time we drop it from queue */ 285 uint8_t doing_fast_retransmit; 286 uint8_t rcv_flags; /* flags pulled from data chunk on inbound for 287 * outbound holds sending flags. */ 288 uint8_t state_flags; 289 uint8_t chunk_was_revoked; 290 }; 291 292 TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk); 293 294 /* The lower byte is used to enumerate PR_SCTP policies */ 295 #define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL 296 #define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF 297 #define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX 298 299 /* The upper byte is used a a bit mask */ 300 #define CHUNK_FLAGS_FRAGMENT_OK 0x0100 301 302 struct chk_id { 303 uint16_t id; 304 uint16_t can_take_data; 305 }; 306 307 308 struct sctp_tmit_chunk { 309 union { 310 struct sctp_data_chunkrec data; 311 struct chk_id chunk_id; 312 } rec; 313 struct sctp_association *asoc; /* bp to asoc this belongs to */ 314 struct timeval sent_rcv_time; /* filled in if RTT being calculated */ 315 struct mbuf *data; /* pointer to mbuf chain of data */ 316 struct mbuf *last_mbuf; /* pointer to last mbuf in chain */ 317 struct sctp_nets *whoTo; 318 TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */ 319 int32_t sent; /* the send status */ 320 uint16_t snd_count; /* number of times I sent */ 321 uint16_t flags; /* flags, such as FRAGMENT_OK */ 322 uint16_t send_size; 323 uint16_t book_size; 324 uint16_t mbcnt; 325 uint8_t pad_inplace; 326 uint8_t do_rtt; 327 uint8_t book_size_scale; 328 uint8_t addr_over; /* flag which is set if the dest address for 329 * this chunk is overridden by user. Used for 330 * CMT (iyengar@cis.udel.edu, 2005/06/21) */ 331 uint8_t no_fr_allowed; 332 uint8_t pr_sctp_on; 333 uint8_t copy_by_ref; 334 }; 335 336 /* 337 * The first part of this structure MUST be the entire sinfo structure. Maybe 338 * I should have made it a sub structure... we can circle back later and do 339 * that if we want. 340 */ 341 struct sctp_queued_to_read { /* sinfo structure Pluse more */ 342 uint16_t sinfo_stream; /* off the wire */ 343 uint16_t sinfo_ssn; /* off the wire */ 344 uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for 345 * EOR */ 346 uint32_t sinfo_ppid; /* off the wire */ 347 uint32_t sinfo_context; /* pick this up from assoc def context? */ 348 uint32_t sinfo_timetolive; /* not used by kernel */ 349 uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */ 350 uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */ 351 sctp_assoc_t sinfo_assoc_id; /* our assoc id */ 352 /* Non sinfo stuff */ 353 uint32_t length; /* length of data */ 354 uint32_t held_length; /* length held in sb */ 355 struct sctp_nets *whoFrom; /* where it came from */ 356 struct mbuf *data; /* front of the mbuf chain of data with 357 * PKT_HDR */ 358 struct mbuf *tail_mbuf; /* used for multi-part data */ 359 struct sctp_tcb *stcb; /* assoc, used for window update */ 360 TAILQ_ENTRY(sctp_queued_to_read) next; 361 uint16_t port_from; 362 uint8_t do_not_ref_stcb; 363 uint8_t end_added; 364 uint8_t pdapi_aborted; 365 }; 366 367 /* This data structure will be on the outbound 368 * stream queues. Data will be pulled off from 369 * the front of the mbuf data and chunk-ified 370 * by the output routines. We will custom 371 * fit every chunk we pull to the send/sent 372 * queue to make up the next full packet 373 * if we can. An entry cannot be removed 374 * from the stream_out queue until 375 * the msg_is_complete flag is set. This 376 * means at times data/tail_mbuf MIGHT 377 * be NULL.. If that occurs it happens 378 * for one of two reasons. Either the user 379 * is blocked on a send() call and has not 380 * awoken to copy more data down... OR 381 * the user is in the explict MSG_EOR mode 382 * and wrote some data, but has not completed 383 * sending. 384 */ 385 struct sctp_stream_queue_pending { 386 struct mbuf *data; 387 struct mbuf *tail_mbuf; 388 struct timeval ts; 389 struct sctp_nets *net; 390 TAILQ_ENTRY(sctp_stream_queue_pending) next; 391 uint32_t length; 392 uint32_t timetolive; 393 uint32_t ppid; 394 uint32_t context; 395 uint16_t sinfo_flags; 396 uint16_t stream; 397 uint16_t strseq; 398 uint8_t msg_is_complete; 399 uint8_t some_taken; 400 uint8_t addr_over; 401 uint8_t act_flags; 402 uint8_t pr_sctp_on; 403 uint8_t resv; 404 }; 405 406 /* 407 * this struct contains info that is used to track inbound stream data and 408 * help with ordering. 409 */ 410 TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in); 411 struct sctp_stream_in { 412 struct sctp_readhead inqueue; 413 TAILQ_ENTRY(sctp_stream_in) next_spoke; 414 uint16_t stream_no; 415 uint16_t last_sequence_delivered; /* used for re-order */ 416 }; 417 418 /* This struct is used to track the traffic on outbound streams */ 419 TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out); 420 struct sctp_stream_out { 421 struct sctp_streamhead outqueue; 422 TAILQ_ENTRY(sctp_stream_out) next_spoke; /* next link in wheel */ 423 uint16_t stream_no; 424 uint16_t next_sequence_sent; /* next one I expect to send out */ 425 uint8_t last_msg_incomplete; 426 }; 427 428 /* used to keep track of the addresses yet to try to add/delete */ 429 TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr); 430 struct sctp_asconf_addr { 431 TAILQ_ENTRY(sctp_asconf_addr) next; 432 struct sctp_asconf_addr_param ap; 433 struct ifaddr *ifa; /* save the ifa for add/del ip */ 434 uint8_t sent; /* has this been sent yet? */ 435 }; 436 437 struct sctp_scoping { 438 uint8_t ipv4_addr_legal; 439 uint8_t ipv6_addr_legal; 440 uint8_t loopback_scope; 441 uint8_t ipv4_local_scope; 442 uint8_t local_scope; 443 uint8_t site_scope; 444 }; 445 446 /* 447 * Here we have information about each individual association that we track. 448 * We probably in production would be more dynamic. But for ease of 449 * implementation we will have a fixed array that we hunt for in a linear 450 * fashion. 451 */ 452 struct sctp_association { 453 /* association state */ 454 int state; 455 /* queue of pending addrs to add/delete */ 456 struct sctp_asconf_addrhead asconf_queue; 457 struct timeval time_entered; /* time we entered state */ 458 struct timeval time_last_rcvd; 459 struct timeval time_last_sent; 460 struct timeval time_last_sat_advance; 461 struct sctp_sndrcvinfo def_send; /* default send parameters */ 462 463 /* timers and such */ 464 struct sctp_timer hb_timer; /* hb timer */ 465 struct sctp_timer dack_timer; /* Delayed ack timer */ 466 struct sctp_timer asconf_timer; /* Asconf */ 467 struct sctp_timer strreset_timer; /* stream reset */ 468 struct sctp_timer shut_guard_timer; /* guard */ 469 struct sctp_timer autoclose_timer; /* automatic close timer */ 470 struct sctp_timer delayed_event_timer; /* timer for delayed events */ 471 472 /* list of local addresses when add/del in progress */ 473 struct sctpladdr sctp_local_addr_list; 474 struct sctpnetlisthead nets; 475 476 /* Free chunk list */ 477 struct sctpchunk_listhead free_chunks; 478 479 /* Free stream output control list */ 480 struct sctp_streamhead free_strmoq; 481 482 /* Control chunk queue */ 483 struct sctpchunk_listhead control_send_queue; 484 485 /* 486 * Once a TSN hits the wire it is moved to the sent_queue. We 487 * maintain two counts here (don't know if any but retran_cnt is 488 * needed). The idea is that the sent_queue_retran_cnt reflects how 489 * many chunks have been marked for retranmission by either T3-rxt 490 * or FR. 491 */ 492 struct sctpchunk_listhead sent_queue; 493 struct sctpchunk_listhead send_queue; 494 495 496 /* re-assembly queue for fragmented chunks on the inbound path */ 497 struct sctpchunk_listhead reasmqueue; 498 499 /* 500 * this queue is used when we reach a condition that we can NOT put 501 * data into the socket buffer. We track the size of this queue and 502 * set our rwnd to the space in the socket minus also the 503 * size_on_delivery_queue. 504 */ 505 struct sctpwheel_listhead out_wheel; 506 507 /* 508 * This pointer will be set to NULL most of the time. But when we 509 * have a fragmented message, where we could not get out all of the 510 * message at the last send then this will point to the stream to go 511 * get data from. 512 */ 513 struct sctp_stream_out *locked_on_sending; 514 515 /* If an iterator is looking at me, this is it */ 516 struct sctp_iterator *stcb_starting_point_for_iterator; 517 518 /* ASCONF destination address last sent to */ 519 /* struct sctp_nets *asconf_last_sent_to;*/ 520 /* Peter, greppign for the above shows only on strange set 521 * I don't think we need it so I have commented it out. 522 */ 523 524 /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */ 525 struct mbuf *last_asconf_ack_sent; 526 527 /* 528 * pointer to last stream reset queued to control queue by us with 529 * requests. 530 */ 531 struct sctp_tmit_chunk *str_reset; 532 /* 533 * if Source Address Selection happening, this will rotate through 534 * the link list. 535 */ 536 struct sctp_laddr *last_used_address; 537 538 /* stream arrays */ 539 struct sctp_stream_in *strmin; 540 struct sctp_stream_out *strmout; 541 uint8_t *mapping_array; 542 /* primary destination to use */ 543 struct sctp_nets *primary_destination; 544 /* For CMT */ 545 struct sctp_nets *last_net_data_came_from; 546 /* last place I got a data chunk from */ 547 struct sctp_nets *last_data_chunk_from; 548 /* last place I got a control from */ 549 struct sctp_nets *last_control_chunk_from; 550 551 /* circular looking for output selection */ 552 struct sctp_stream_out *last_out_stream; 553 554 /* 555 * wait to the point the cum-ack passes req->send_reset_at_tsn for 556 * any req on the list. 557 */ 558 struct sctp_resethead resetHead; 559 560 /* queue of chunks waiting to be sent into the local stack */ 561 struct sctp_readhead pending_reply_queue; 562 563 uint32_t cookie_preserve_req; 564 /* ASCONF next seq I am sending out, inits at init-tsn */ 565 uint32_t asconf_seq_out; 566 /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */ 567 uint32_t asconf_seq_in; 568 569 /* next seq I am sending in str reset messages */ 570 uint32_t str_reset_seq_out; 571 572 /* next seq I am expecting in str reset messages */ 573 uint32_t str_reset_seq_in; 574 575 576 /* various verification tag information */ 577 uint32_t my_vtag; /* The tag to be used. if assoc is re-initited 578 * by remote end, and I have unlocked this 579 * will be regenerated to a new random value. */ 580 uint32_t peer_vtag; /* The peers last tag */ 581 582 uint32_t my_vtag_nonce; 583 uint32_t peer_vtag_nonce; 584 585 uint32_t assoc_id; 586 587 /* This is the SCTP fragmentation threshold */ 588 uint32_t smallest_mtu; 589 590 /* 591 * Special hook for Fast retransmit, allows us to track the highest 592 * TSN that is NEW in this SACK if gap ack blocks are present. 593 */ 594 uint32_t this_sack_highest_gap; 595 596 /* 597 * The highest consecutive TSN that has been acked by peer on my 598 * sends 599 */ 600 uint32_t last_acked_seq; 601 602 /* The next TSN that I will use in sending. */ 603 uint32_t sending_seq; 604 605 /* Original seq number I used ??questionable to keep?? */ 606 uint32_t init_seq_number; 607 608 609 /* The Advanced Peer Ack Point, as required by the PR-SCTP */ 610 /* (A1 in Section 4.2) */ 611 uint32_t advanced_peer_ack_point; 612 613 /* 614 * The highest consequetive TSN at the bottom of the mapping array 615 * (for his sends). 616 */ 617 uint32_t cumulative_tsn; 618 /* 619 * Used to track the mapping array and its offset bits. This MAY be 620 * lower then cumulative_tsn. 621 */ 622 uint32_t mapping_array_base_tsn; 623 /* 624 * used to track highest TSN we have received and is listed in the 625 * mapping array. 626 */ 627 uint32_t highest_tsn_inside_map; 628 629 uint32_t last_echo_tsn; 630 uint32_t last_cwr_tsn; 631 uint32_t fast_recovery_tsn; 632 uint32_t sat_t3_recovery_tsn; 633 uint32_t tsn_last_delivered; 634 /* 635 * For the pd-api we should re-write this a bit more efficent. We 636 * could have multiple sctp_queued_to_read's that we are building at 637 * once. Now we only do this when we get ready to deliver to the 638 * socket buffer. Note that we depend on the fact that the struct is 639 * "stuck" on the read queue until we finish all the pd-api. 640 */ 641 struct sctp_queued_to_read *control_pdapi; 642 643 uint32_t tsn_of_pdapi_last_delivered; 644 uint32_t pdapi_ppid; 645 uint32_t context; 646 uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS]; 647 uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS]; 648 uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS]; 649 /* 650 * window state information and smallest MTU that I use to bound 651 * segmentation 652 */ 653 uint32_t peers_rwnd; 654 uint32_t my_rwnd; 655 uint32_t my_last_reported_rwnd; 656 uint32_t my_rwnd_control_len; 657 658 uint32_t total_output_queue_size; 659 660 uint32_t sb_cc; /* shadow of sb_cc in one-2-one */ 661 uint32_t sb_mbcnt; /* shadow of sb_mbcnt in one-2-one */ 662 /* 32 bit nonce stuff */ 663 uint32_t nonce_resync_tsn; 664 uint32_t nonce_wait_tsn; 665 uint32_t default_flowlabel; 666 uint32_t pr_sctp_cnt; 667 int ctrl_queue_cnt; /* could be removed REM */ 668 /* 669 * All outbound datagrams queue into this list from the individual 670 * stream queue. Here they get assigned a TSN and then await 671 * sending. The stream seq comes when it is first put in the 672 * individual str queue 673 */ 674 unsigned int stream_queue_cnt; 675 unsigned int send_queue_cnt; 676 unsigned int sent_queue_cnt; 677 unsigned int sent_queue_cnt_removeable; 678 /* 679 * Number on sent queue that are marked for retran until this value 680 * is 0 we only send one packet of retran'ed data. 681 */ 682 unsigned int sent_queue_retran_cnt; 683 684 unsigned int size_on_reasm_queue; 685 unsigned int cnt_on_reasm_queue; 686 /* amount of data (bytes) currently in flight (on all destinations) */ 687 unsigned int total_flight; 688 /* Total book size in flight */ 689 unsigned int total_flight_count; /* count of chunks used with 690 * book total */ 691 /* count of destinaton nets and list of destination nets */ 692 unsigned int numnets; 693 694 /* Total error count on this association */ 695 unsigned int overall_error_count; 696 697 unsigned int cnt_msg_on_sb; 698 699 /* All stream count of chunks for delivery */ 700 unsigned int size_on_all_streams; 701 unsigned int cnt_on_all_streams; 702 703 /* Heart Beat delay in ticks */ 704 unsigned int heart_beat_delay; 705 706 /* autoclose */ 707 unsigned int sctp_autoclose_ticks; 708 709 /* how many preopen streams we have */ 710 unsigned int pre_open_streams; 711 712 /* How many streams I support coming into me */ 713 unsigned int max_inbound_streams; 714 715 /* the cookie life I award for any cookie, in seconds */ 716 unsigned int cookie_life; 717 /* time to delay acks for */ 718 unsigned int delayed_ack; 719 720 unsigned int numduptsns; 721 int dup_tsns[SCTP_MAX_DUP_TSNS]; 722 unsigned int initial_init_rto_max; /* initial RTO for INIT's */ 723 unsigned int initial_rto; /* initial send RTO */ 724 unsigned int minrto; /* per assoc RTO-MIN */ 725 unsigned int maxrto; /* per assoc RTO-MAX */ 726 727 /* authentication fields */ 728 sctp_auth_chklist_t *local_auth_chunks; 729 sctp_auth_chklist_t *peer_auth_chunks; 730 sctp_hmaclist_t *local_hmacs; /* local HMACs supported */ 731 sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */ 732 struct sctp_keyhead shared_keys; /* assoc's shared keys */ 733 sctp_authinfo_t authinfo; /* randoms, cached keys */ 734 /* 735 * refcnt to block freeing when a sender or receiver is off coping 736 * user data in. 737 */ 738 uint32_t refcnt; 739 uint32_t chunks_on_out_queue; /* total chunks floating around, 740 * locked by send socket buffer */ 741 742 uint16_t peer_hmac_id; /* peer HMAC id to send */ 743 744 /* 745 * Being that we have no bag to collect stale cookies, and that we 746 * really would not want to anyway.. we will count them in this 747 * counter. We of course feed them to the pigeons right away (I have 748 * always thought of pigeons as flying rats). 749 */ 750 uint16_t stale_cookie_count; 751 752 /* 753 * For the partial delivery API, if up, invoked this is what last 754 * TSN I delivered 755 */ 756 uint16_t str_of_pdapi; 757 uint16_t ssn_of_pdapi; 758 759 /* counts of actual built streams. Allocation may be more however */ 760 /* could re-arrange to optimize space here. */ 761 uint16_t streamincnt; 762 uint16_t streamoutcnt; 763 764 /* my maximum number of retrans of INIT and SEND */ 765 /* copied from SCTP but should be individually setable */ 766 uint16_t max_init_times; 767 uint16_t max_send_times; 768 769 uint16_t def_net_failure; 770 771 /* 772 * lock flag: 0 is ok to send, 1+ (duals as a retran count) is 773 * awaiting ACK 774 */ 775 uint16_t asconf_sent; /* possibly removable REM */ 776 uint16_t mapping_array_size; 777 778 uint16_t last_strm_seq_delivered; 779 uint16_t last_strm_no_delivered; 780 781 uint16_t last_revoke_count; 782 int16_t num_send_timers_up; 783 784 uint16_t stream_locked_on; 785 uint16_t ecn_echo_cnt_onq; 786 787 uint16_t free_chunk_cnt; 788 uint16_t free_strmoq_cnt; 789 790 uint8_t stream_locked; 791 uint8_t authenticated; /* packet authenticated ok */ 792 /* 793 * This flag indicates that we need to send the first SACK. If in 794 * place it says we have NOT yet sent a SACK and need to. 795 */ 796 uint8_t first_ack_sent; 797 798 /* max burst after fast retransmit completes */ 799 uint8_t max_burst; 800 801 uint8_t sat_network; /* RTT is in range of sat net or greater */ 802 uint8_t sat_network_lockout; /* lockout code */ 803 uint8_t burst_limit_applied; /* Burst limit in effect at last send? */ 804 /* flag goes on when we are doing a partial delivery api */ 805 uint8_t hb_random_values[4]; 806 uint8_t fragmented_delivery_inprogress; 807 uint8_t fragment_flags; 808 uint8_t last_flags_delivered; 809 uint8_t hb_ect_randombit; 810 uint8_t hb_random_idx; 811 uint8_t hb_is_disabled; /* is the hb disabled? */ 812 uint8_t default_tos; 813 814 /* ECN Nonce stuff */ 815 uint8_t receiver_nonce_sum; /* nonce I sum and put in my sack */ 816 uint8_t ecn_nonce_allowed; /* Tells us if ECN nonce is on */ 817 uint8_t nonce_sum_check;/* On off switch used during re-sync */ 818 uint8_t nonce_wait_for_ecne; /* flag when we expect a ECN */ 819 uint8_t peer_supports_ecn_nonce; 820 821 /* 822 * This value, plus all other ack'd but above cum-ack is added 823 * together to cross check against the bit that we have yet to 824 * define (probably in the SACK). When the cum-ack is updated, this 825 * sum is updated as well. 826 */ 827 uint8_t nonce_sum_expect_base; 828 /* Flag to tell if ECN is allowed */ 829 uint8_t ecn_allowed; 830 831 /* flag to indicate if peer can do asconf */ 832 uint8_t peer_supports_asconf; 833 /* pr-sctp support flag */ 834 uint8_t peer_supports_prsctp; 835 /* peer authentication support flag */ 836 uint8_t peer_supports_auth; 837 /* stream resets are supported by the peer */ 838 uint8_t peer_supports_strreset; 839 840 /* 841 * packet drop's are supported by the peer, we don't really care 842 * about this but we bookkeep it anyway. 843 */ 844 uint8_t peer_supports_pktdrop; 845 846 /* Do we allow V6/V4? */ 847 uint8_t ipv4_addr_legal; 848 uint8_t ipv6_addr_legal; 849 /* Address scoping flags */ 850 /* scope value for IPv4 */ 851 uint8_t ipv4_local_scope; 852 /* scope values for IPv6 */ 853 uint8_t local_scope; 854 uint8_t site_scope; 855 /* loopback scope */ 856 uint8_t loopback_scope; 857 /* flags to handle send alternate net tracking */ 858 uint8_t used_alt_onsack; 859 uint8_t used_alt_asconfack; 860 uint8_t fast_retran_loss_recovery; 861 uint8_t sat_t3_loss_recovery; 862 uint8_t dropped_special_cnt; 863 uint8_t seen_a_sack_this_pkt; 864 uint8_t stream_reset_outstanding; 865 uint8_t stream_reset_out_is_outstanding; 866 uint8_t delayed_connection; 867 uint8_t ifp_had_enobuf; 868 uint8_t saw_sack_with_frags; 869 uint8_t in_restart_hash; 870 uint8_t assoc_up_sent; 871 /* CMT variables */ 872 uint8_t cmt_dac_pkts_rcvd; 873 uint8_t sctp_cmt_on_off; 874 uint8_t iam_blocking; 875 /* 876 * The mapping array is used to track out of order sequences above 877 * last_acked_seq. 0 indicates packet missing 1 indicates packet 878 * rec'd. We slide it up every time we raise last_acked_seq and 0 879 * trailing locactions out. If I get a TSN above the array 880 * mappingArraySz, I discard the datagram and let retransmit happen. 881 */ 882 }; 883 884 #endif 885