1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved. 5 * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <netinet/sctp_pcb.h> 35 36 /* 37 * Default simple round-robin algorithm. 38 * Just interates the streams in the order they appear. 39 */ 40 41 static void 42 sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *, 43 struct sctp_stream_out *, 44 struct sctp_stream_queue_pending *); 45 46 static void 47 sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *, 48 struct sctp_stream_out *, 49 struct sctp_stream_queue_pending *); 50 51 static void 52 sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc) 53 { 54 uint16_t i; 55 56 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 57 58 asoc->ss_data.locked_on_sending = NULL; 59 asoc->ss_data.last_out_stream = NULL; 60 TAILQ_INIT(&asoc->ss_data.out.wheel); 61 /* 62 * If there is data in the stream queues already, the scheduler of 63 * an existing association has been changed. We need to add all 64 * stream queues to the wheel. 65 */ 66 for (i = 0; i < asoc->streamoutcnt; i++) { 67 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, 68 &asoc->strmout[i], 69 NULL); 70 } 71 return; 72 } 73 74 static void 75 sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 76 bool clear_values SCTP_UNUSED) 77 { 78 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 79 80 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 81 struct sctp_stream_out *strq; 82 83 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 84 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq)); 85 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 86 strq->ss_params.scheduled = false; 87 } 88 asoc->ss_data.last_out_stream = NULL; 89 return; 90 } 91 92 static void 93 sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 94 { 95 if (with_strq != NULL) { 96 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 97 stcb->asoc.ss_data.locked_on_sending = strq; 98 } 99 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 100 stcb->asoc.ss_data.last_out_stream = strq; 101 } 102 } 103 strq->ss_params.scheduled = false; 104 return; 105 } 106 107 static void 108 sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 109 struct sctp_stream_out *strq, 110 struct sctp_stream_queue_pending *sp SCTP_UNUSED) 111 { 112 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 113 114 /* Add to wheel if not already on it and stream queue not empty */ 115 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 116 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, 117 strq, ss_params.ss.rr.next_spoke); 118 strq->ss_params.scheduled = true; 119 } 120 return; 121 } 122 123 static bool 124 sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc) 125 { 126 return (TAILQ_EMPTY(&asoc->ss_data.out.wheel)); 127 } 128 129 static void 130 sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 131 struct sctp_stream_out *strq, 132 struct sctp_stream_queue_pending *sp SCTP_UNUSED) 133 { 134 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 135 136 /* 137 * Remove from wheel if stream queue is empty and actually is on the 138 * wheel 139 */ 140 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) { 141 if (asoc->ss_data.last_out_stream == strq) { 142 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, 143 sctpwheel_listhead, 144 ss_params.ss.rr.next_spoke); 145 if (asoc->ss_data.last_out_stream == NULL) { 146 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel, 147 sctpwheel_listhead); 148 } 149 if (asoc->ss_data.last_out_stream == strq) { 150 asoc->ss_data.last_out_stream = NULL; 151 } 152 } 153 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 154 strq->ss_params.scheduled = false; 155 } 156 return; 157 } 158 159 static struct sctp_stream_out * 160 sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 161 struct sctp_association *asoc) 162 { 163 struct sctp_stream_out *strq, *strqt; 164 165 if (asoc->ss_data.locked_on_sending) { 166 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled, 167 ("strq %p not scheduled", 168 (void *)asoc->ss_data.locked_on_sending)); 169 return (asoc->ss_data.locked_on_sending); 170 } 171 strqt = asoc->ss_data.last_out_stream; 172 default_again: 173 /* Find the next stream to use */ 174 if (strqt == NULL) { 175 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 176 } else { 177 KASSERT(strqt->ss_params.scheduled, 178 ("strq %p not scheduled", (void *)strqt)); 179 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke); 180 if (strq == NULL) { 181 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 182 } 183 } 184 KASSERT(strq == NULL || strq->ss_params.scheduled, 185 ("strq %p not scheduled", (void *)strq)); 186 187 /* 188 * If CMT is off, we must validate that the stream in question has 189 * the first item pointed towards are network destination requested 190 * by the caller. Note that if we turn out to be locked to a stream 191 * (assigning TSN's then we must stop, since we cannot look for 192 * another stream with data to send to that destination). In CMT's 193 * case, by skipping this check, we will send one data packet 194 * towards the requested net. 195 */ 196 if (net != NULL && strq != NULL && 197 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 198 if (TAILQ_FIRST(&strq->outqueue) && 199 TAILQ_FIRST(&strq->outqueue)->net != NULL && 200 TAILQ_FIRST(&strq->outqueue)->net != net) { 201 if (strq == asoc->ss_data.last_out_stream) { 202 return (NULL); 203 } else { 204 strqt = strq; 205 goto default_again; 206 } 207 } 208 } 209 return (strq); 210 } 211 212 static void 213 sctp_ss_default_scheduled(struct sctp_tcb *stcb, 214 struct sctp_nets *net SCTP_UNUSED, 215 struct sctp_association *asoc, 216 struct sctp_stream_out *strq, 217 int moved_how_much SCTP_UNUSED) 218 { 219 struct sctp_stream_queue_pending *sp; 220 221 KASSERT(strq != NULL, ("strq is NULL")); 222 KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq)); 223 asoc->ss_data.last_out_stream = strq; 224 if (asoc->idata_supported == 0) { 225 sp = TAILQ_FIRST(&strq->outqueue); 226 if ((sp != NULL) && (sp->some_taken == 1)) { 227 asoc->ss_data.locked_on_sending = strq; 228 } else { 229 asoc->ss_data.locked_on_sending = NULL; 230 } 231 } else { 232 asoc->ss_data.locked_on_sending = NULL; 233 } 234 return; 235 } 236 237 static void 238 sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED, 239 struct sctp_association *asoc SCTP_UNUSED) 240 { 241 /* Nothing to be done here */ 242 return; 243 } 244 245 static int 246 sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED, 247 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED) 248 { 249 /* Nothing to be done here */ 250 return (-1); 251 } 252 253 static int 254 sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED, 255 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED) 256 { 257 /* Nothing to be done here */ 258 return (-1); 259 } 260 261 static bool 262 sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc) 263 { 264 struct sctp_stream_out *strq; 265 struct sctp_stream_queue_pending *sp; 266 267 if (asoc->stream_queue_cnt != 1) { 268 return (false); 269 } 270 strq = asoc->ss_data.locked_on_sending; 271 if (strq == NULL) { 272 return (false); 273 } 274 sp = TAILQ_FIRST(&strq->outqueue); 275 if (sp == NULL) { 276 return (false); 277 } 278 return (sp->msg_is_complete == 0); 279 } 280 281 /* 282 * Real round-robin algorithm. 283 * Always interates the streams in ascending order. 284 */ 285 static void 286 sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 287 struct sctp_stream_out *strq, 288 struct sctp_stream_queue_pending *sp SCTP_UNUSED) 289 { 290 struct sctp_stream_out *strqt; 291 292 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 293 294 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 295 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 296 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 297 } else { 298 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 299 while (strqt != NULL && (strqt->sid < strq->sid)) { 300 strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke); 301 } 302 if (strqt != NULL) { 303 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke); 304 } else { 305 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 306 } 307 } 308 strq->ss_params.scheduled = true; 309 } 310 return; 311 } 312 313 /* 314 * Real round-robin per packet algorithm. 315 * Always interates the streams in ascending order and 316 * only fills messages of the same stream in a packet. 317 */ 318 static struct sctp_stream_out * 319 sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED, 320 struct sctp_association *asoc) 321 { 322 return (asoc->ss_data.last_out_stream); 323 } 324 325 static void 326 sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 327 struct sctp_association *asoc) 328 { 329 struct sctp_stream_out *strq, *strqt; 330 331 strqt = asoc->ss_data.last_out_stream; 332 rrp_again: 333 /* Find the next stream to use */ 334 if (strqt == NULL) { 335 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 336 } else { 337 KASSERT(strqt->ss_params.scheduled, 338 ("strq %p not scheduled", (void *)strqt)); 339 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke); 340 if (strq == NULL) { 341 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 342 } 343 } 344 KASSERT(strq == NULL || strq->ss_params.scheduled, 345 ("strq %p not scheduled", (void *)strq)); 346 347 /* 348 * If CMT is off, we must validate that the stream in question has 349 * the first item pointed towards are network destination requested 350 * by the caller. Note that if we turn out to be locked to a stream 351 * (assigning TSN's then we must stop, since we cannot look for 352 * another stream with data to send to that destination). In CMT's 353 * case, by skipping this check, we will send one data packet 354 * towards the requested net. 355 */ 356 if (net != NULL && strq != NULL && 357 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 358 if (TAILQ_FIRST(&strq->outqueue) && 359 TAILQ_FIRST(&strq->outqueue)->net != NULL && 360 TAILQ_FIRST(&strq->outqueue)->net != net) { 361 if (strq == asoc->ss_data.last_out_stream) { 362 strq = NULL; 363 } else { 364 strqt = strq; 365 goto rrp_again; 366 } 367 } 368 } 369 asoc->ss_data.last_out_stream = strq; 370 return; 371 } 372 373 /* 374 * Priority algorithm. 375 * Always prefers streams based on their priority id. 376 */ 377 static void 378 sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 379 bool clear_values) 380 { 381 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 382 383 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 384 struct sctp_stream_out *strq; 385 386 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 387 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq)); 388 if (clear_values) { 389 strq->ss_params.ss.prio.priority = 0; 390 } 391 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 392 strq->ss_params.scheduled = false; 393 } 394 asoc->ss_data.last_out_stream = NULL; 395 return; 396 } 397 398 static void 399 sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 400 { 401 if (with_strq != NULL) { 402 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 403 stcb->asoc.ss_data.locked_on_sending = strq; 404 } 405 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 406 stcb->asoc.ss_data.last_out_stream = strq; 407 } 408 } 409 strq->ss_params.scheduled = false; 410 if (with_strq != NULL) { 411 strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority; 412 } else { 413 strq->ss_params.ss.prio.priority = 0; 414 } 415 return; 416 } 417 418 static void 419 sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 420 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 421 { 422 struct sctp_stream_out *strqt; 423 424 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 425 426 /* Add to wheel if not already on it and stream queue not empty */ 427 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 428 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 429 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 430 } else { 431 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 432 while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) { 433 strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke); 434 } 435 if (strqt != NULL) { 436 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke); 437 } else { 438 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 439 } 440 } 441 strq->ss_params.scheduled = true; 442 } 443 return; 444 } 445 446 static void 447 sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 448 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 449 { 450 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 451 452 /* 453 * Remove from wheel if stream queue is empty and actually is on the 454 * wheel 455 */ 456 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) { 457 if (asoc->ss_data.last_out_stream == strq) { 458 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, 459 sctpwheel_listhead, 460 ss_params.ss.prio.next_spoke); 461 if (asoc->ss_data.last_out_stream == NULL) { 462 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel, 463 sctpwheel_listhead); 464 } 465 if (asoc->ss_data.last_out_stream == strq) { 466 asoc->ss_data.last_out_stream = NULL; 467 } 468 } 469 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 470 strq->ss_params.scheduled = false; 471 } 472 return; 473 } 474 475 static struct sctp_stream_out * 476 sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 477 struct sctp_association *asoc) 478 { 479 struct sctp_stream_out *strq, *strqt, *strqn; 480 481 if (asoc->ss_data.locked_on_sending) { 482 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled, 483 ("strq %p not scheduled", 484 (void *)asoc->ss_data.locked_on_sending)); 485 return (asoc->ss_data.locked_on_sending); 486 } 487 strqt = asoc->ss_data.last_out_stream; 488 prio_again: 489 /* Find the next stream to use */ 490 if (strqt == NULL) { 491 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 492 } else { 493 KASSERT(strqt->ss_params.scheduled, 494 ("strq %p not scheduled", (void *)strqt)); 495 strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke); 496 if (strqn != NULL && 497 strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) { 498 strq = strqn; 499 } else { 500 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 501 } 502 } 503 KASSERT(strq == NULL || strq->ss_params.scheduled, 504 ("strq %p not scheduled", (void *)strq)); 505 506 /* 507 * If CMT is off, we must validate that the stream in question has 508 * the first item pointed towards are network destination requested 509 * by the caller. Note that if we turn out to be locked to a stream 510 * (assigning TSN's then we must stop, since we cannot look for 511 * another stream with data to send to that destination). In CMT's 512 * case, by skipping this check, we will send one data packet 513 * towards the requested net. 514 */ 515 if (net != NULL && strq != NULL && 516 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 517 if (TAILQ_FIRST(&strq->outqueue) && 518 TAILQ_FIRST(&strq->outqueue)->net != NULL && 519 TAILQ_FIRST(&strq->outqueue)->net != net) { 520 if (strq == asoc->ss_data.last_out_stream) { 521 return (NULL); 522 } else { 523 strqt = strq; 524 goto prio_again; 525 } 526 } 527 } 528 return (strq); 529 } 530 531 static int 532 sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED, 533 struct sctp_stream_out *strq, uint16_t *value) 534 { 535 if (strq == NULL) { 536 return (-1); 537 } 538 *value = strq->ss_params.ss.prio.priority; 539 return (1); 540 } 541 542 static int 543 sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc, 544 struct sctp_stream_out *strq, uint16_t value) 545 { 546 if (strq == NULL) { 547 return (-1); 548 } 549 strq->ss_params.ss.prio.priority = value; 550 sctp_ss_prio_remove(stcb, asoc, strq, NULL); 551 sctp_ss_prio_add(stcb, asoc, strq, NULL); 552 return (1); 553 } 554 555 /* 556 * Fair bandwidth algorithm. 557 * Maintains an equal throughput per stream. 558 */ 559 static void 560 sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 561 bool clear_values) 562 { 563 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 564 565 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 566 struct sctp_stream_out *strq; 567 568 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 569 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq)); 570 if (clear_values) { 571 strq->ss_params.ss.fb.rounds = -1; 572 } 573 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke); 574 strq->ss_params.scheduled = false; 575 } 576 asoc->ss_data.last_out_stream = NULL; 577 return; 578 } 579 580 static void 581 sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 582 { 583 if (with_strq != NULL) { 584 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 585 stcb->asoc.ss_data.locked_on_sending = strq; 586 } 587 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 588 stcb->asoc.ss_data.last_out_stream = strq; 589 } 590 } 591 strq->ss_params.scheduled = false; 592 if (with_strq != NULL) { 593 strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds; 594 } else { 595 strq->ss_params.ss.fb.rounds = -1; 596 } 597 return; 598 } 599 600 static void 601 sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 602 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 603 { 604 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 605 606 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 607 if (strq->ss_params.ss.fb.rounds < 0) 608 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length; 609 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke); 610 strq->ss_params.scheduled = true; 611 } 612 return; 613 } 614 615 static void 616 sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 617 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 618 { 619 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 620 621 /* 622 * Remove from wheel if stream queue is empty and actually is on the 623 * wheel 624 */ 625 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) { 626 if (asoc->ss_data.last_out_stream == strq) { 627 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, 628 sctpwheel_listhead, 629 ss_params.ss.fb.next_spoke); 630 if (asoc->ss_data.last_out_stream == NULL) { 631 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel, 632 sctpwheel_listhead); 633 } 634 if (asoc->ss_data.last_out_stream == strq) { 635 asoc->ss_data.last_out_stream = NULL; 636 } 637 } 638 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke); 639 strq->ss_params.scheduled = false; 640 } 641 return; 642 } 643 644 static struct sctp_stream_out * 645 sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 646 struct sctp_association *asoc) 647 { 648 struct sctp_stream_out *strq = NULL, *strqt; 649 650 if (asoc->ss_data.locked_on_sending) { 651 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled, 652 ("strq %p not scheduled", 653 (void *)asoc->ss_data.locked_on_sending)); 654 return (asoc->ss_data.locked_on_sending); 655 } 656 if (asoc->ss_data.last_out_stream == NULL || 657 TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) { 658 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 659 } else { 660 strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke); 661 } 662 do { 663 if ((strqt != NULL) && 664 ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) || 665 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 && 666 (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) || 667 (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL && 668 TAILQ_FIRST(&strqt->outqueue)->net == net))))) { 669 if ((strqt->ss_params.ss.fb.rounds >= 0) && 670 ((strq == NULL) || 671 (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) { 672 strq = strqt; 673 } 674 } 675 if (strqt != NULL) { 676 strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke); 677 } else { 678 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 679 } 680 } while (strqt != strq); 681 return (strq); 682 } 683 684 static void 685 sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED, 686 struct sctp_association *asoc, struct sctp_stream_out *strq, 687 int moved_how_much SCTP_UNUSED) 688 { 689 struct sctp_stream_queue_pending *sp; 690 struct sctp_stream_out *strqt; 691 int subtract; 692 693 if (asoc->idata_supported == 0) { 694 sp = TAILQ_FIRST(&strq->outqueue); 695 if ((sp != NULL) && (sp->some_taken == 1)) { 696 asoc->ss_data.locked_on_sending = strq; 697 } else { 698 asoc->ss_data.locked_on_sending = NULL; 699 } 700 } else { 701 asoc->ss_data.locked_on_sending = NULL; 702 } 703 subtract = strq->ss_params.ss.fb.rounds; 704 TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) { 705 strqt->ss_params.ss.fb.rounds -= subtract; 706 if (strqt->ss_params.ss.fb.rounds < 0) 707 strqt->ss_params.ss.fb.rounds = 0; 708 } 709 if (TAILQ_FIRST(&strq->outqueue)) { 710 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length; 711 } else { 712 strq->ss_params.ss.fb.rounds = -1; 713 } 714 asoc->ss_data.last_out_stream = strq; 715 return; 716 } 717 718 /* 719 * First-come, first-serve algorithm. 720 * Maintains the order provided by the application. 721 */ 722 static void 723 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 724 struct sctp_stream_out *strq SCTP_UNUSED, 725 struct sctp_stream_queue_pending *sp); 726 727 static void 728 sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc) 729 { 730 uint32_t x, n = 0, add_more = 1; 731 struct sctp_stream_queue_pending *sp; 732 uint16_t i; 733 734 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 735 736 TAILQ_INIT(&asoc->ss_data.out.list); 737 /* 738 * If there is data in the stream queues already, the scheduler of 739 * an existing association has been changed. We can only cycle 740 * through the stream queues and add everything to the FCFS queue. 741 */ 742 while (add_more) { 743 add_more = 0; 744 for (i = 0; i < asoc->streamoutcnt; i++) { 745 sp = TAILQ_FIRST(&asoc->strmout[i].outqueue); 746 x = 0; 747 /* Find n. message in current stream queue */ 748 while (sp != NULL && x < n) { 749 sp = TAILQ_NEXT(sp, next); 750 x++; 751 } 752 if (sp != NULL) { 753 sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp); 754 add_more = 1; 755 } 756 } 757 n++; 758 } 759 return; 760 } 761 762 static void 763 sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 764 bool clear_values SCTP_UNUSED) 765 { 766 struct sctp_stream_queue_pending *sp; 767 768 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 769 770 while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) { 771 sp = TAILQ_FIRST(&asoc->ss_data.out.list); 772 KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp)); 773 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next); 774 sp->scheduled = false; 775 } 776 asoc->ss_data.last_out_stream = NULL; 777 return; 778 } 779 780 static void 781 sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 782 { 783 if (with_strq != NULL) { 784 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 785 stcb->asoc.ss_data.locked_on_sending = strq; 786 } 787 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 788 stcb->asoc.ss_data.last_out_stream = strq; 789 } 790 } 791 strq->ss_params.scheduled = false; 792 return; 793 } 794 795 static void 796 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 797 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp) 798 { 799 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 800 801 if (!sp->scheduled) { 802 TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next); 803 sp->scheduled = true; 804 } 805 return; 806 } 807 808 static bool 809 sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc) 810 { 811 return (TAILQ_EMPTY(&asoc->ss_data.out.list)); 812 } 813 814 static void 815 sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 816 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp) 817 { 818 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 819 820 if (sp->scheduled) { 821 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next); 822 sp->scheduled = false; 823 } 824 return; 825 } 826 827 static struct sctp_stream_out * 828 sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 829 struct sctp_association *asoc) 830 { 831 struct sctp_stream_out *strq; 832 struct sctp_stream_queue_pending *sp; 833 834 if (asoc->ss_data.locked_on_sending) { 835 return (asoc->ss_data.locked_on_sending); 836 } 837 sp = TAILQ_FIRST(&asoc->ss_data.out.list); 838 default_again: 839 if (sp != NULL) { 840 strq = &asoc->strmout[sp->sid]; 841 } else { 842 strq = NULL; 843 } 844 845 /* 846 * If CMT is off, we must validate that the stream in question has 847 * the first item pointed towards are network destination requested 848 * by the caller. Note that if we turn out to be locked to a stream 849 * (assigning TSN's then we must stop, since we cannot look for 850 * another stream with data to send to that destination). In CMT's 851 * case, by skipping this check, we will send one data packet 852 * towards the requested net. 853 */ 854 if (net != NULL && strq != NULL && 855 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 856 if (TAILQ_FIRST(&strq->outqueue) && 857 TAILQ_FIRST(&strq->outqueue)->net != NULL && 858 TAILQ_FIRST(&strq->outqueue)->net != net) { 859 sp = TAILQ_NEXT(sp, ss_next); 860 goto default_again; 861 } 862 } 863 return (strq); 864 } 865 866 static void 867 sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb, 868 struct sctp_nets *net SCTP_UNUSED, 869 struct sctp_association *asoc, 870 struct sctp_stream_out *strq, 871 int moved_how_much SCTP_UNUSED) 872 { 873 struct sctp_stream_queue_pending *sp; 874 875 KASSERT(strq != NULL, ("strq is NULL")); 876 asoc->ss_data.last_out_stream = strq; 877 if (asoc->idata_supported == 0) { 878 sp = TAILQ_FIRST(&strq->outqueue); 879 if ((sp != NULL) && (sp->some_taken == 1)) { 880 asoc->ss_data.locked_on_sending = strq; 881 } else { 882 asoc->ss_data.locked_on_sending = NULL; 883 } 884 } else { 885 asoc->ss_data.locked_on_sending = NULL; 886 } 887 return; 888 } 889 890 const struct sctp_ss_functions sctp_ss_functions[] = { 891 /* SCTP_SS_DEFAULT */ 892 { 893 .sctp_ss_init = sctp_ss_default_init, 894 .sctp_ss_clear = sctp_ss_default_clear, 895 .sctp_ss_init_stream = sctp_ss_default_init_stream, 896 .sctp_ss_add_to_stream = sctp_ss_default_add, 897 .sctp_ss_is_empty = sctp_ss_default_is_empty, 898 .sctp_ss_remove_from_stream = sctp_ss_default_remove, 899 .sctp_ss_select_stream = sctp_ss_default_select, 900 .sctp_ss_scheduled = sctp_ss_default_scheduled, 901 .sctp_ss_packet_done = sctp_ss_default_packet_done, 902 .sctp_ss_get_value = sctp_ss_default_get_value, 903 .sctp_ss_set_value = sctp_ss_default_set_value, 904 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 905 }, 906 /* SCTP_SS_ROUND_ROBIN */ 907 { 908 .sctp_ss_init = sctp_ss_default_init, 909 .sctp_ss_clear = sctp_ss_default_clear, 910 .sctp_ss_init_stream = sctp_ss_default_init_stream, 911 .sctp_ss_add_to_stream = sctp_ss_rr_add, 912 .sctp_ss_is_empty = sctp_ss_default_is_empty, 913 .sctp_ss_remove_from_stream = sctp_ss_default_remove, 914 .sctp_ss_select_stream = sctp_ss_default_select, 915 .sctp_ss_scheduled = sctp_ss_default_scheduled, 916 .sctp_ss_packet_done = sctp_ss_default_packet_done, 917 .sctp_ss_get_value = sctp_ss_default_get_value, 918 .sctp_ss_set_value = sctp_ss_default_set_value, 919 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 920 }, 921 /* SCTP_SS_ROUND_ROBIN_PACKET */ 922 { 923 .sctp_ss_init = sctp_ss_default_init, 924 .sctp_ss_clear = sctp_ss_default_clear, 925 .sctp_ss_init_stream = sctp_ss_default_init_stream, 926 .sctp_ss_add_to_stream = sctp_ss_rr_add, 927 .sctp_ss_is_empty = sctp_ss_default_is_empty, 928 .sctp_ss_remove_from_stream = sctp_ss_default_remove, 929 .sctp_ss_select_stream = sctp_ss_rrp_select, 930 .sctp_ss_scheduled = sctp_ss_default_scheduled, 931 .sctp_ss_packet_done = sctp_ss_rrp_packet_done, 932 .sctp_ss_get_value = sctp_ss_default_get_value, 933 .sctp_ss_set_value = sctp_ss_default_set_value, 934 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 935 }, 936 /* SCTP_SS_PRIORITY */ 937 { 938 .sctp_ss_init = sctp_ss_default_init, 939 .sctp_ss_clear = sctp_ss_prio_clear, 940 .sctp_ss_init_stream = sctp_ss_prio_init_stream, 941 .sctp_ss_add_to_stream = sctp_ss_prio_add, 942 .sctp_ss_is_empty = sctp_ss_default_is_empty, 943 .sctp_ss_remove_from_stream = sctp_ss_prio_remove, 944 .sctp_ss_select_stream = sctp_ss_prio_select, 945 .sctp_ss_scheduled = sctp_ss_default_scheduled, 946 .sctp_ss_packet_done = sctp_ss_default_packet_done, 947 .sctp_ss_get_value = sctp_ss_prio_get_value, 948 .sctp_ss_set_value = sctp_ss_prio_set_value, 949 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 950 }, 951 /* SCTP_SS_FAIR_BANDWITH */ 952 { 953 .sctp_ss_init = sctp_ss_default_init, 954 .sctp_ss_clear = sctp_ss_fb_clear, 955 .sctp_ss_init_stream = sctp_ss_fb_init_stream, 956 .sctp_ss_add_to_stream = sctp_ss_fb_add, 957 .sctp_ss_is_empty = sctp_ss_default_is_empty, 958 .sctp_ss_remove_from_stream = sctp_ss_fb_remove, 959 .sctp_ss_select_stream = sctp_ss_fb_select, 960 .sctp_ss_scheduled = sctp_ss_fb_scheduled, 961 .sctp_ss_packet_done = sctp_ss_default_packet_done, 962 .sctp_ss_get_value = sctp_ss_default_get_value, 963 .sctp_ss_set_value = sctp_ss_default_set_value, 964 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 965 }, 966 /* SCTP_SS_FIRST_COME */ 967 { 968 .sctp_ss_init = sctp_ss_fcfs_init, 969 .sctp_ss_clear = sctp_ss_fcfs_clear, 970 .sctp_ss_init_stream = sctp_ss_fcfs_init_stream, 971 .sctp_ss_add_to_stream = sctp_ss_fcfs_add, 972 .sctp_ss_is_empty = sctp_ss_fcfs_is_empty, 973 .sctp_ss_remove_from_stream = sctp_ss_fcfs_remove, 974 .sctp_ss_select_stream = sctp_ss_fcfs_select, 975 .sctp_ss_scheduled = sctp_ss_fcfs_scheduled, 976 .sctp_ss_packet_done = sctp_ss_default_packet_done, 977 .sctp_ss_get_value = sctp_ss_default_get_value, 978 .sctp_ss_set_value = sctp_ss_default_set_value, 979 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 980 } 981 }; 982