1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved. 5 * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <netinet/sctp_pcb.h> 35 36 /* 37 * Default simple round-robin algorithm. 38 * Just interates the streams in the order they appear. 39 */ 40 41 static void 42 sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *, 43 struct sctp_stream_out *, 44 struct sctp_stream_queue_pending *); 45 46 static void 47 sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *, 48 struct sctp_stream_out *, 49 struct sctp_stream_queue_pending *); 50 51 static void 52 sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc) 53 { 54 uint16_t i; 55 56 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 57 58 asoc->ss_data.locked_on_sending = NULL; 59 asoc->ss_data.last_out_stream = NULL; 60 TAILQ_INIT(&asoc->ss_data.out.wheel); 61 /* 62 * If there is data in the stream queues already, the scheduler of 63 * an existing association has been changed. We need to add all 64 * stream queues to the wheel. 65 */ 66 for (i = 0; i < asoc->streamoutcnt; i++) { 67 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, 68 &asoc->strmout[i], 69 NULL); 70 } 71 return; 72 } 73 74 static void 75 sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 76 bool clear_values SCTP_UNUSED) 77 { 78 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 79 80 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 81 struct sctp_stream_out *strq; 82 83 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 84 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq)); 85 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 86 strq->ss_params.scheduled = false; 87 } 88 asoc->ss_data.last_out_stream = NULL; 89 return; 90 } 91 92 static void 93 sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 94 { 95 if (with_strq != NULL) { 96 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 97 stcb->asoc.ss_data.locked_on_sending = strq; 98 } 99 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 100 stcb->asoc.ss_data.last_out_stream = strq; 101 } 102 } 103 strq->ss_params.scheduled = false; 104 return; 105 } 106 107 static void 108 sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 109 struct sctp_stream_out *strq, 110 struct sctp_stream_queue_pending *sp SCTP_UNUSED) 111 { 112 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 113 114 /* Add to wheel if not already on it and stream queue not empty */ 115 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 116 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, 117 strq, ss_params.ss.rr.next_spoke); 118 strq->ss_params.scheduled = true; 119 } 120 return; 121 } 122 123 static bool 124 sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc) 125 { 126 return (TAILQ_EMPTY(&asoc->ss_data.out.wheel)); 127 } 128 129 static void 130 sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 131 struct sctp_stream_out *strq, 132 struct sctp_stream_queue_pending *sp SCTP_UNUSED) 133 { 134 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 135 136 /* 137 * Remove from wheel if stream queue is empty and actually is on the 138 * wheel 139 */ 140 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) { 141 if (asoc->ss_data.last_out_stream == strq) { 142 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, 143 sctpwheel_listhead, 144 ss_params.ss.rr.next_spoke); 145 if (asoc->ss_data.last_out_stream == NULL) { 146 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel, 147 sctpwheel_listhead); 148 } 149 if (asoc->ss_data.last_out_stream == strq) { 150 asoc->ss_data.last_out_stream = NULL; 151 } 152 } 153 if (asoc->ss_data.locked_on_sending == strq) { 154 asoc->ss_data.locked_on_sending = NULL; 155 } 156 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 157 strq->ss_params.scheduled = false; 158 } 159 return; 160 } 161 162 static struct sctp_stream_out * 163 sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 164 struct sctp_association *asoc) 165 { 166 struct sctp_stream_out *strq, *strqt; 167 168 if (asoc->ss_data.locked_on_sending != NULL) { 169 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled, 170 ("locked_on_sending %p not scheduled", 171 (void *)asoc->ss_data.locked_on_sending)); 172 return (asoc->ss_data.locked_on_sending); 173 } 174 strqt = asoc->ss_data.last_out_stream; 175 KASSERT(strqt == NULL || strqt->ss_params.scheduled, 176 ("last_out_stream %p not scheduled", (void *)strqt)); 177 default_again: 178 /* Find the next stream to use */ 179 if (strqt == NULL) { 180 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 181 } else { 182 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke); 183 if (strq == NULL) { 184 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 185 } 186 } 187 KASSERT(strq == NULL || strq->ss_params.scheduled, 188 ("strq %p not scheduled", (void *)strq)); 189 190 /* 191 * If CMT is off, we must validate that the stream in question has 192 * the first item pointed towards are network destination requested 193 * by the caller. Note that if we turn out to be locked to a stream 194 * (assigning TSN's then we must stop, since we cannot look for 195 * another stream with data to send to that destination). In CMT's 196 * case, by skipping this check, we will send one data packet 197 * towards the requested net. 198 */ 199 if (net != NULL && strq != NULL && 200 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 201 if (TAILQ_FIRST(&strq->outqueue) && 202 TAILQ_FIRST(&strq->outqueue)->net != NULL && 203 TAILQ_FIRST(&strq->outqueue)->net != net) { 204 if (strq == asoc->ss_data.last_out_stream) { 205 return (NULL); 206 } else { 207 strqt = strq; 208 goto default_again; 209 } 210 } 211 } 212 return (strq); 213 } 214 215 static void 216 sctp_ss_default_scheduled(struct sctp_tcb *stcb, 217 struct sctp_nets *net SCTP_UNUSED, 218 struct sctp_association *asoc, 219 struct sctp_stream_out *strq, 220 int moved_how_much SCTP_UNUSED) 221 { 222 struct sctp_stream_queue_pending *sp; 223 224 KASSERT(strq != NULL, ("strq is NULL")); 225 KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq)); 226 asoc->ss_data.last_out_stream = strq; 227 if (asoc->idata_supported == 0) { 228 sp = TAILQ_FIRST(&strq->outqueue); 229 if ((sp != NULL) && (sp->some_taken == 1)) { 230 asoc->ss_data.locked_on_sending = strq; 231 } else { 232 asoc->ss_data.locked_on_sending = NULL; 233 } 234 } else { 235 asoc->ss_data.locked_on_sending = NULL; 236 } 237 return; 238 } 239 240 static void 241 sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED, 242 struct sctp_association *asoc SCTP_UNUSED) 243 { 244 /* Nothing to be done here */ 245 return; 246 } 247 248 static int 249 sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED, 250 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED) 251 { 252 /* Nothing to be done here */ 253 return (-1); 254 } 255 256 static int 257 sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED, 258 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED) 259 { 260 /* Nothing to be done here */ 261 return (-1); 262 } 263 264 static bool 265 sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc) 266 { 267 struct sctp_stream_out *strq; 268 struct sctp_stream_queue_pending *sp; 269 270 if (asoc->stream_queue_cnt != 1) { 271 return (false); 272 } 273 strq = asoc->ss_data.locked_on_sending; 274 if (strq == NULL) { 275 return (false); 276 } 277 sp = TAILQ_FIRST(&strq->outqueue); 278 if (sp == NULL) { 279 return (false); 280 } 281 return (sp->msg_is_complete == 0); 282 } 283 284 /* 285 * Real round-robin algorithm. 286 * Always interates the streams in ascending order. 287 */ 288 static void 289 sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 290 struct sctp_stream_out *strq, 291 struct sctp_stream_queue_pending *sp SCTP_UNUSED) 292 { 293 struct sctp_stream_out *strqt; 294 295 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 296 297 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 298 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 299 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 300 } else { 301 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 302 while (strqt != NULL && (strqt->sid < strq->sid)) { 303 strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke); 304 } 305 if (strqt != NULL) { 306 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke); 307 } else { 308 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke); 309 } 310 } 311 strq->ss_params.scheduled = true; 312 } 313 return; 314 } 315 316 /* 317 * Real round-robin per packet algorithm. 318 * Always interates the streams in ascending order and 319 * only fills messages of the same stream in a packet. 320 */ 321 static struct sctp_stream_out * 322 sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED, 323 struct sctp_association *asoc) 324 { 325 return (asoc->ss_data.last_out_stream); 326 } 327 328 static void 329 sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 330 struct sctp_association *asoc) 331 { 332 struct sctp_stream_out *strq, *strqt; 333 334 strqt = asoc->ss_data.last_out_stream; 335 KASSERT(strqt == NULL || strqt->ss_params.scheduled, 336 ("last_out_stream %p not scheduled", (void *)strqt)); 337 rrp_again: 338 /* Find the next stream to use */ 339 if (strqt == NULL) { 340 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 341 } else { 342 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke); 343 if (strq == NULL) { 344 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 345 } 346 } 347 KASSERT(strq == NULL || strq->ss_params.scheduled, 348 ("strq %p not scheduled", (void *)strq)); 349 350 /* 351 * If CMT is off, we must validate that the stream in question has 352 * the first item pointed towards are network destination requested 353 * by the caller. Note that if we turn out to be locked to a stream 354 * (assigning TSN's then we must stop, since we cannot look for 355 * another stream with data to send to that destination). In CMT's 356 * case, by skipping this check, we will send one data packet 357 * towards the requested net. 358 */ 359 if (net != NULL && strq != NULL && 360 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 361 if (TAILQ_FIRST(&strq->outqueue) && 362 TAILQ_FIRST(&strq->outqueue)->net != NULL && 363 TAILQ_FIRST(&strq->outqueue)->net != net) { 364 if (strq == asoc->ss_data.last_out_stream) { 365 strq = NULL; 366 } else { 367 strqt = strq; 368 goto rrp_again; 369 } 370 } 371 } 372 asoc->ss_data.last_out_stream = strq; 373 return; 374 } 375 376 /* 377 * Priority algorithm. 378 * Always prefers streams based on their priority id. 379 */ 380 static void 381 sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 382 bool clear_values) 383 { 384 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 385 386 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 387 struct sctp_stream_out *strq; 388 389 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 390 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq)); 391 if (clear_values) { 392 strq->ss_params.ss.prio.priority = 0; 393 } 394 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 395 strq->ss_params.scheduled = false; 396 } 397 asoc->ss_data.last_out_stream = NULL; 398 return; 399 } 400 401 static void 402 sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 403 { 404 if (with_strq != NULL) { 405 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 406 stcb->asoc.ss_data.locked_on_sending = strq; 407 } 408 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 409 stcb->asoc.ss_data.last_out_stream = strq; 410 } 411 } 412 strq->ss_params.scheduled = false; 413 if (with_strq != NULL) { 414 strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority; 415 } else { 416 strq->ss_params.ss.prio.priority = 0; 417 } 418 return; 419 } 420 421 static void 422 sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 423 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 424 { 425 struct sctp_stream_out *strqt; 426 427 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 428 429 /* Add to wheel if not already on it and stream queue not empty */ 430 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 431 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 432 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 433 } else { 434 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 435 while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) { 436 strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke); 437 } 438 if (strqt != NULL) { 439 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke); 440 } else { 441 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 442 } 443 } 444 strq->ss_params.scheduled = true; 445 } 446 return; 447 } 448 449 static void 450 sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 451 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 452 { 453 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 454 455 /* 456 * Remove from wheel if stream queue is empty and actually is on the 457 * wheel 458 */ 459 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) { 460 if (asoc->ss_data.last_out_stream == strq) { 461 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, 462 sctpwheel_listhead, 463 ss_params.ss.prio.next_spoke); 464 if (asoc->ss_data.last_out_stream == NULL) { 465 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel, 466 sctpwheel_listhead); 467 } 468 if (asoc->ss_data.last_out_stream == strq) { 469 asoc->ss_data.last_out_stream = NULL; 470 } 471 } 472 if (asoc->ss_data.locked_on_sending == strq) { 473 asoc->ss_data.locked_on_sending = NULL; 474 } 475 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke); 476 strq->ss_params.scheduled = false; 477 } 478 return; 479 } 480 481 static struct sctp_stream_out * 482 sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 483 struct sctp_association *asoc) 484 { 485 struct sctp_stream_out *strq, *strqt, *strqn; 486 487 if (asoc->ss_data.locked_on_sending != NULL) { 488 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled, 489 ("locked_on_sending %p not scheduled", 490 (void *)asoc->ss_data.locked_on_sending)); 491 return (asoc->ss_data.locked_on_sending); 492 } 493 strqt = asoc->ss_data.last_out_stream; 494 KASSERT(strqt == NULL || strqt->ss_params.scheduled, 495 ("last_out_stream %p not scheduled", (void *)strqt)); 496 prio_again: 497 /* Find the next stream to use */ 498 if (strqt == NULL) { 499 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 500 } else { 501 strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke); 502 if (strqn != NULL && 503 strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) { 504 strq = strqn; 505 } else { 506 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 507 } 508 } 509 KASSERT(strq == NULL || strq->ss_params.scheduled, 510 ("strq %p not scheduled", (void *)strq)); 511 512 /* 513 * If CMT is off, we must validate that the stream in question has 514 * the first item pointed towards are network destination requested 515 * by the caller. Note that if we turn out to be locked to a stream 516 * (assigning TSN's then we must stop, since we cannot look for 517 * another stream with data to send to that destination). In CMT's 518 * case, by skipping this check, we will send one data packet 519 * towards the requested net. 520 */ 521 if (net != NULL && strq != NULL && 522 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 523 if (TAILQ_FIRST(&strq->outqueue) && 524 TAILQ_FIRST(&strq->outqueue)->net != NULL && 525 TAILQ_FIRST(&strq->outqueue)->net != net) { 526 if (strq == asoc->ss_data.last_out_stream) { 527 return (NULL); 528 } else { 529 strqt = strq; 530 goto prio_again; 531 } 532 } 533 } 534 return (strq); 535 } 536 537 static int 538 sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED, 539 struct sctp_stream_out *strq, uint16_t *value) 540 { 541 if (strq == NULL) { 542 return (-1); 543 } 544 *value = strq->ss_params.ss.prio.priority; 545 return (1); 546 } 547 548 static int 549 sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc, 550 struct sctp_stream_out *strq, uint16_t value) 551 { 552 if (strq == NULL) { 553 return (-1); 554 } 555 strq->ss_params.ss.prio.priority = value; 556 sctp_ss_prio_remove(stcb, asoc, strq, NULL); 557 sctp_ss_prio_add(stcb, asoc, strq, NULL); 558 return (1); 559 } 560 561 /* 562 * Fair bandwidth algorithm. 563 * Maintains an equal throughput per stream. 564 */ 565 static void 566 sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 567 bool clear_values) 568 { 569 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 570 571 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) { 572 struct sctp_stream_out *strq; 573 574 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel); 575 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq)); 576 if (clear_values) { 577 strq->ss_params.ss.fb.rounds = -1; 578 } 579 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke); 580 strq->ss_params.scheduled = false; 581 } 582 asoc->ss_data.last_out_stream = NULL; 583 return; 584 } 585 586 static void 587 sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 588 { 589 if (with_strq != NULL) { 590 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 591 stcb->asoc.ss_data.locked_on_sending = strq; 592 } 593 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 594 stcb->asoc.ss_data.last_out_stream = strq; 595 } 596 } 597 strq->ss_params.scheduled = false; 598 if (with_strq != NULL) { 599 strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds; 600 } else { 601 strq->ss_params.ss.fb.rounds = -1; 602 } 603 return; 604 } 605 606 static void 607 sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 608 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 609 { 610 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 611 612 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) { 613 if (strq->ss_params.ss.fb.rounds < 0) 614 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length; 615 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke); 616 strq->ss_params.scheduled = true; 617 } 618 return; 619 } 620 621 static void 622 sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 623 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED) 624 { 625 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 626 627 /* 628 * Remove from wheel if stream queue is empty and actually is on the 629 * wheel 630 */ 631 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) { 632 if (asoc->ss_data.last_out_stream == strq) { 633 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, 634 sctpwheel_listhead, 635 ss_params.ss.fb.next_spoke); 636 if (asoc->ss_data.last_out_stream == NULL) { 637 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel, 638 sctpwheel_listhead); 639 } 640 if (asoc->ss_data.last_out_stream == strq) { 641 asoc->ss_data.last_out_stream = NULL; 642 } 643 } 644 if (asoc->ss_data.locked_on_sending == strq) { 645 asoc->ss_data.locked_on_sending = NULL; 646 } 647 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke); 648 strq->ss_params.scheduled = false; 649 } 650 return; 651 } 652 653 static struct sctp_stream_out * 654 sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 655 struct sctp_association *asoc) 656 { 657 struct sctp_stream_out *strq = NULL, *strqt; 658 659 if (asoc->ss_data.locked_on_sending != NULL) { 660 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled, 661 ("locked_on_sending %p not scheduled", 662 (void *)asoc->ss_data.locked_on_sending)); 663 return (asoc->ss_data.locked_on_sending); 664 } 665 if (asoc->ss_data.last_out_stream == NULL || 666 TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) { 667 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 668 } else { 669 strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke); 670 } 671 do { 672 if ((strqt != NULL) && 673 ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) || 674 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 && 675 (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) || 676 (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL && 677 TAILQ_FIRST(&strqt->outqueue)->net == net))))) { 678 if ((strqt->ss_params.ss.fb.rounds >= 0) && 679 ((strq == NULL) || 680 (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) { 681 strq = strqt; 682 } 683 } 684 if (strqt != NULL) { 685 strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke); 686 } else { 687 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); 688 } 689 } while (strqt != strq); 690 return (strq); 691 } 692 693 static void 694 sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED, 695 struct sctp_association *asoc, struct sctp_stream_out *strq, 696 int moved_how_much SCTP_UNUSED) 697 { 698 struct sctp_stream_queue_pending *sp; 699 struct sctp_stream_out *strqt; 700 int subtract; 701 702 if (asoc->idata_supported == 0) { 703 sp = TAILQ_FIRST(&strq->outqueue); 704 if ((sp != NULL) && (sp->some_taken == 1)) { 705 asoc->ss_data.locked_on_sending = strq; 706 } else { 707 asoc->ss_data.locked_on_sending = NULL; 708 } 709 } else { 710 asoc->ss_data.locked_on_sending = NULL; 711 } 712 subtract = strq->ss_params.ss.fb.rounds; 713 TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) { 714 strqt->ss_params.ss.fb.rounds -= subtract; 715 if (strqt->ss_params.ss.fb.rounds < 0) 716 strqt->ss_params.ss.fb.rounds = 0; 717 } 718 if (TAILQ_FIRST(&strq->outqueue)) { 719 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length; 720 } else { 721 strq->ss_params.ss.fb.rounds = -1; 722 } 723 asoc->ss_data.last_out_stream = strq; 724 return; 725 } 726 727 /* 728 * First-come, first-serve algorithm. 729 * Maintains the order provided by the application. 730 */ 731 static void 732 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 733 struct sctp_stream_out *strq SCTP_UNUSED, 734 struct sctp_stream_queue_pending *sp); 735 736 static void 737 sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc) 738 { 739 uint32_t x, n = 0, add_more = 1; 740 struct sctp_stream_queue_pending *sp; 741 uint16_t i; 742 743 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 744 745 TAILQ_INIT(&asoc->ss_data.out.list); 746 /* 747 * If there is data in the stream queues already, the scheduler of 748 * an existing association has been changed. We can only cycle 749 * through the stream queues and add everything to the FCFS queue. 750 */ 751 while (add_more) { 752 add_more = 0; 753 for (i = 0; i < asoc->streamoutcnt; i++) { 754 sp = TAILQ_FIRST(&asoc->strmout[i].outqueue); 755 x = 0; 756 /* Find n. message in current stream queue */ 757 while (sp != NULL && x < n) { 758 sp = TAILQ_NEXT(sp, next); 759 x++; 760 } 761 if (sp != NULL) { 762 sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp); 763 add_more = 1; 764 } 765 } 766 n++; 767 } 768 return; 769 } 770 771 static void 772 sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, 773 bool clear_values SCTP_UNUSED) 774 { 775 struct sctp_stream_queue_pending *sp; 776 777 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 778 779 while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) { 780 sp = TAILQ_FIRST(&asoc->ss_data.out.list); 781 KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp)); 782 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next); 783 sp->scheduled = false; 784 } 785 asoc->ss_data.last_out_stream = NULL; 786 return; 787 } 788 789 static void 790 sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq) 791 { 792 if (with_strq != NULL) { 793 if (stcb->asoc.ss_data.locked_on_sending == with_strq) { 794 stcb->asoc.ss_data.locked_on_sending = strq; 795 } 796 if (stcb->asoc.ss_data.last_out_stream == with_strq) { 797 stcb->asoc.ss_data.last_out_stream = strq; 798 } 799 } 800 strq->ss_params.scheduled = false; 801 return; 802 } 803 804 static void 805 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc, 806 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp) 807 { 808 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 809 810 if (!sp->scheduled) { 811 TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next); 812 sp->scheduled = true; 813 } 814 return; 815 } 816 817 static bool 818 sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc) 819 { 820 return (TAILQ_EMPTY(&asoc->ss_data.out.list)); 821 } 822 823 static void 824 sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, 825 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp) 826 { 827 SCTP_TCB_SEND_LOCK_ASSERT(stcb); 828 829 if (sp->scheduled) { 830 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next); 831 sp->scheduled = false; 832 } 833 return; 834 } 835 836 static struct sctp_stream_out * 837 sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, 838 struct sctp_association *asoc) 839 { 840 struct sctp_stream_out *strq; 841 struct sctp_stream_queue_pending *sp; 842 843 if (asoc->ss_data.locked_on_sending) { 844 return (asoc->ss_data.locked_on_sending); 845 } 846 sp = TAILQ_FIRST(&asoc->ss_data.out.list); 847 default_again: 848 if (sp != NULL) { 849 strq = &asoc->strmout[sp->sid]; 850 } else { 851 strq = NULL; 852 } 853 854 /* 855 * If CMT is off, we must validate that the stream in question has 856 * the first item pointed towards are network destination requested 857 * by the caller. Note that if we turn out to be locked to a stream 858 * (assigning TSN's then we must stop, since we cannot look for 859 * another stream with data to send to that destination). In CMT's 860 * case, by skipping this check, we will send one data packet 861 * towards the requested net. 862 */ 863 if (net != NULL && strq != NULL && 864 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 865 if (TAILQ_FIRST(&strq->outqueue) && 866 TAILQ_FIRST(&strq->outqueue)->net != NULL && 867 TAILQ_FIRST(&strq->outqueue)->net != net) { 868 sp = TAILQ_NEXT(sp, ss_next); 869 goto default_again; 870 } 871 } 872 return (strq); 873 } 874 875 static void 876 sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb, 877 struct sctp_nets *net SCTP_UNUSED, 878 struct sctp_association *asoc, 879 struct sctp_stream_out *strq, 880 int moved_how_much SCTP_UNUSED) 881 { 882 struct sctp_stream_queue_pending *sp; 883 884 KASSERT(strq != NULL, ("strq is NULL")); 885 asoc->ss_data.last_out_stream = strq; 886 if (asoc->idata_supported == 0) { 887 sp = TAILQ_FIRST(&strq->outqueue); 888 if ((sp != NULL) && (sp->some_taken == 1)) { 889 asoc->ss_data.locked_on_sending = strq; 890 } else { 891 asoc->ss_data.locked_on_sending = NULL; 892 } 893 } else { 894 asoc->ss_data.locked_on_sending = NULL; 895 } 896 return; 897 } 898 899 const struct sctp_ss_functions sctp_ss_functions[] = { 900 /* SCTP_SS_DEFAULT */ 901 { 902 .sctp_ss_init = sctp_ss_default_init, 903 .sctp_ss_clear = sctp_ss_default_clear, 904 .sctp_ss_init_stream = sctp_ss_default_init_stream, 905 .sctp_ss_add_to_stream = sctp_ss_default_add, 906 .sctp_ss_is_empty = sctp_ss_default_is_empty, 907 .sctp_ss_remove_from_stream = sctp_ss_default_remove, 908 .sctp_ss_select_stream = sctp_ss_default_select, 909 .sctp_ss_scheduled = sctp_ss_default_scheduled, 910 .sctp_ss_packet_done = sctp_ss_default_packet_done, 911 .sctp_ss_get_value = sctp_ss_default_get_value, 912 .sctp_ss_set_value = sctp_ss_default_set_value, 913 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 914 }, 915 /* SCTP_SS_ROUND_ROBIN */ 916 { 917 .sctp_ss_init = sctp_ss_default_init, 918 .sctp_ss_clear = sctp_ss_default_clear, 919 .sctp_ss_init_stream = sctp_ss_default_init_stream, 920 .sctp_ss_add_to_stream = sctp_ss_rr_add, 921 .sctp_ss_is_empty = sctp_ss_default_is_empty, 922 .sctp_ss_remove_from_stream = sctp_ss_default_remove, 923 .sctp_ss_select_stream = sctp_ss_default_select, 924 .sctp_ss_scheduled = sctp_ss_default_scheduled, 925 .sctp_ss_packet_done = sctp_ss_default_packet_done, 926 .sctp_ss_get_value = sctp_ss_default_get_value, 927 .sctp_ss_set_value = sctp_ss_default_set_value, 928 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 929 }, 930 /* SCTP_SS_ROUND_ROBIN_PACKET */ 931 { 932 .sctp_ss_init = sctp_ss_default_init, 933 .sctp_ss_clear = sctp_ss_default_clear, 934 .sctp_ss_init_stream = sctp_ss_default_init_stream, 935 .sctp_ss_add_to_stream = sctp_ss_rr_add, 936 .sctp_ss_is_empty = sctp_ss_default_is_empty, 937 .sctp_ss_remove_from_stream = sctp_ss_default_remove, 938 .sctp_ss_select_stream = sctp_ss_rrp_select, 939 .sctp_ss_scheduled = sctp_ss_default_scheduled, 940 .sctp_ss_packet_done = sctp_ss_rrp_packet_done, 941 .sctp_ss_get_value = sctp_ss_default_get_value, 942 .sctp_ss_set_value = sctp_ss_default_set_value, 943 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 944 }, 945 /* SCTP_SS_PRIORITY */ 946 { 947 .sctp_ss_init = sctp_ss_default_init, 948 .sctp_ss_clear = sctp_ss_prio_clear, 949 .sctp_ss_init_stream = sctp_ss_prio_init_stream, 950 .sctp_ss_add_to_stream = sctp_ss_prio_add, 951 .sctp_ss_is_empty = sctp_ss_default_is_empty, 952 .sctp_ss_remove_from_stream = sctp_ss_prio_remove, 953 .sctp_ss_select_stream = sctp_ss_prio_select, 954 .sctp_ss_scheduled = sctp_ss_default_scheduled, 955 .sctp_ss_packet_done = sctp_ss_default_packet_done, 956 .sctp_ss_get_value = sctp_ss_prio_get_value, 957 .sctp_ss_set_value = sctp_ss_prio_set_value, 958 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 959 }, 960 /* SCTP_SS_FAIR_BANDWITH */ 961 { 962 .sctp_ss_init = sctp_ss_default_init, 963 .sctp_ss_clear = sctp_ss_fb_clear, 964 .sctp_ss_init_stream = sctp_ss_fb_init_stream, 965 .sctp_ss_add_to_stream = sctp_ss_fb_add, 966 .sctp_ss_is_empty = sctp_ss_default_is_empty, 967 .sctp_ss_remove_from_stream = sctp_ss_fb_remove, 968 .sctp_ss_select_stream = sctp_ss_fb_select, 969 .sctp_ss_scheduled = sctp_ss_fb_scheduled, 970 .sctp_ss_packet_done = sctp_ss_default_packet_done, 971 .sctp_ss_get_value = sctp_ss_default_get_value, 972 .sctp_ss_set_value = sctp_ss_default_set_value, 973 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 974 }, 975 /* SCTP_SS_FIRST_COME */ 976 { 977 .sctp_ss_init = sctp_ss_fcfs_init, 978 .sctp_ss_clear = sctp_ss_fcfs_clear, 979 .sctp_ss_init_stream = sctp_ss_fcfs_init_stream, 980 .sctp_ss_add_to_stream = sctp_ss_fcfs_add, 981 .sctp_ss_is_empty = sctp_ss_fcfs_is_empty, 982 .sctp_ss_remove_from_stream = sctp_ss_fcfs_remove, 983 .sctp_ss_select_stream = sctp_ss_fcfs_select, 984 .sctp_ss_scheduled = sctp_ss_fcfs_scheduled, 985 .sctp_ss_packet_done = sctp_ss_default_packet_done, 986 .sctp_ss_get_value = sctp_ss_default_get_value, 987 .sctp_ss_set_value = sctp_ss_default_set_value, 988 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete 989 } 990 }; 991