xref: /freebsd/sys/netinet/sctp_ss_functions.c (revision 0d66206fff44f864ea8a4b220c3a53b4caa959a0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
5  * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <netinet/sctp_pcb.h>
35 
36 /*
37  * Default simple round-robin algorithm.
38  * Just iterates the streams in the order they appear.
39  */
40 
41 static void
42 sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
43     struct sctp_stream_out *,
44     struct sctp_stream_queue_pending *);
45 
46 static void
47 sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
48     struct sctp_stream_out *,
49     struct sctp_stream_queue_pending *);
50 
51 static void
52 sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
53 {
54 	uint16_t i;
55 
56 	SCTP_TCB_LOCK_ASSERT(stcb);
57 
58 	asoc->ss_data.locked_on_sending = NULL;
59 	asoc->ss_data.last_out_stream = NULL;
60 	TAILQ_INIT(&asoc->ss_data.out.wheel);
61 	/*
62 	 * If there is data in the stream queues already, the scheduler of
63 	 * an existing association has been changed. We need to add all
64 	 * stream queues to the wheel.
65 	 */
66 	for (i = 0; i < asoc->streamoutcnt; i++) {
67 		stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc,
68 		    &asoc->strmout[i],
69 		    NULL);
70 	}
71 	return;
72 }
73 
74 static void
75 sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
76     bool clear_values SCTP_UNUSED)
77 {
78 	SCTP_TCB_LOCK_ASSERT(stcb);
79 
80 	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
81 		struct sctp_stream_out *strq;
82 
83 		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
84 		KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
85 		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
86 		strq->ss_params.scheduled = false;
87 	}
88 	asoc->ss_data.last_out_stream = NULL;
89 	return;
90 }
91 
92 static void
93 sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
94 {
95 	SCTP_TCB_LOCK_ASSERT(stcb);
96 
97 	if (with_strq != NULL) {
98 		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
99 			stcb->asoc.ss_data.locked_on_sending = strq;
100 		}
101 		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
102 			stcb->asoc.ss_data.last_out_stream = strq;
103 		}
104 	}
105 	strq->ss_params.scheduled = false;
106 	return;
107 }
108 
109 static void
110 sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
111     struct sctp_stream_out *strq,
112     struct sctp_stream_queue_pending *sp SCTP_UNUSED)
113 {
114 	SCTP_TCB_LOCK_ASSERT(stcb);
115 
116 	/* Add to wheel if not already on it and stream queue not empty */
117 	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
118 		TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
119 		    strq, ss_params.ss.rr.next_spoke);
120 		strq->ss_params.scheduled = true;
121 	}
122 	return;
123 }
124 
125 static bool
126 sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
127 {
128 	SCTP_TCB_LOCK_ASSERT(stcb);
129 
130 	return (TAILQ_EMPTY(&asoc->ss_data.out.wheel));
131 }
132 
133 static void
134 sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
135     struct sctp_stream_out *strq,
136     struct sctp_stream_queue_pending *sp SCTP_UNUSED)
137 {
138 	SCTP_TCB_LOCK_ASSERT(stcb);
139 
140 	/*
141 	 * Remove from wheel if stream queue is empty and actually is on the
142 	 * wheel
143 	 */
144 	if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
145 		if (asoc->ss_data.last_out_stream == strq) {
146 			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
147 			    sctpwheel_listhead,
148 			    ss_params.ss.rr.next_spoke);
149 			if (asoc->ss_data.last_out_stream == NULL) {
150 				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
151 				    sctpwheel_listhead);
152 			}
153 			if (asoc->ss_data.last_out_stream == strq) {
154 				asoc->ss_data.last_out_stream = NULL;
155 			}
156 		}
157 		if (asoc->ss_data.locked_on_sending == strq) {
158 			asoc->ss_data.locked_on_sending = NULL;
159 		}
160 		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
161 		strq->ss_params.scheduled = false;
162 	}
163 	return;
164 }
165 
166 static struct sctp_stream_out *
167 sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
168     struct sctp_association *asoc)
169 {
170 	struct sctp_stream_out *strq, *strqt;
171 
172 	SCTP_TCB_LOCK_ASSERT(stcb);
173 
174 	if (asoc->ss_data.locked_on_sending != NULL) {
175 		KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
176 		    ("locked_on_sending %p not scheduled",
177 		    (void *)asoc->ss_data.locked_on_sending));
178 		return (asoc->ss_data.locked_on_sending);
179 	}
180 	strqt = asoc->ss_data.last_out_stream;
181 	KASSERT(strqt == NULL || strqt->ss_params.scheduled,
182 	    ("last_out_stream %p not scheduled", (void *)strqt));
183 default_again:
184 	/* Find the next stream to use */
185 	if (strqt == NULL) {
186 		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
187 	} else {
188 		strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
189 		if (strq == NULL) {
190 			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
191 		}
192 	}
193 	KASSERT(strq == NULL || strq->ss_params.scheduled,
194 	    ("strq %p not scheduled", (void *)strq));
195 
196 	/*
197 	 * If CMT is off, we must validate that the stream in question has
198 	 * the first item pointed towards are network destination requested
199 	 * by the caller. Note that if we turn out to be locked to a stream
200 	 * (assigning TSN's then we must stop, since we cannot look for
201 	 * another stream with data to send to that destination). In CMT's
202 	 * case, by skipping this check, we will send one data packet
203 	 * towards the requested net.
204 	 */
205 	if (net != NULL && strq != NULL &&
206 	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
207 		if (TAILQ_FIRST(&strq->outqueue) &&
208 		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
209 		    TAILQ_FIRST(&strq->outqueue)->net != net) {
210 			if (strq == asoc->ss_data.last_out_stream) {
211 				return (NULL);
212 			} else {
213 				strqt = strq;
214 				goto default_again;
215 			}
216 		}
217 	}
218 	return (strq);
219 }
220 
221 static void
222 sctp_ss_default_scheduled(struct sctp_tcb *stcb,
223     struct sctp_nets *net SCTP_UNUSED,
224     struct sctp_association *asoc,
225     struct sctp_stream_out *strq,
226     int moved_how_much SCTP_UNUSED)
227 {
228 	struct sctp_stream_queue_pending *sp;
229 
230 	KASSERT(strq != NULL, ("strq is NULL"));
231 	KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq));
232 	SCTP_TCB_LOCK_ASSERT(stcb);
233 
234 	asoc->ss_data.last_out_stream = strq;
235 	if (asoc->idata_supported == 0) {
236 		sp = TAILQ_FIRST(&strq->outqueue);
237 		if ((sp != NULL) && (sp->some_taken == 1)) {
238 			asoc->ss_data.locked_on_sending = strq;
239 		} else {
240 			asoc->ss_data.locked_on_sending = NULL;
241 		}
242 	} else {
243 		asoc->ss_data.locked_on_sending = NULL;
244 	}
245 	return;
246 }
247 
248 static void
249 sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
250     struct sctp_association *asoc SCTP_UNUSED)
251 {
252 	SCTP_TCB_LOCK_ASSERT(stcb);
253 
254 	/* Nothing to be done here */
255 	return;
256 }
257 
258 static int
259 sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
260     struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
261 {
262 	SCTP_TCB_LOCK_ASSERT(stcb);
263 
264 	/* Nothing to be done here */
265 	return (-1);
266 }
267 
268 static int
269 sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
270     struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
271 {
272 	SCTP_TCB_LOCK_ASSERT(stcb);
273 
274 	/* Nothing to be done here */
275 	return (-1);
276 }
277 
278 static bool
279 sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
280 {
281 	struct sctp_stream_out *strq;
282 	struct sctp_stream_queue_pending *sp;
283 
284 	SCTP_TCB_LOCK_ASSERT(stcb);
285 
286 	if (asoc->stream_queue_cnt != 1) {
287 		return (false);
288 	}
289 	strq = asoc->ss_data.locked_on_sending;
290 	if (strq == NULL) {
291 		return (false);
292 	}
293 	sp = TAILQ_FIRST(&strq->outqueue);
294 	if (sp == NULL) {
295 		return (false);
296 	}
297 	return (sp->msg_is_complete == 0);
298 }
299 
300 /*
301  * Real round-robin algorithm.
302  * Always iterates the streams in ascending order.
303  */
304 static void
305 sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
306     struct sctp_stream_out *strq,
307     struct sctp_stream_queue_pending *sp SCTP_UNUSED)
308 {
309 	struct sctp_stream_out *strqt;
310 
311 	SCTP_TCB_LOCK_ASSERT(stcb);
312 
313 	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
314 		if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
315 			TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
316 		} else {
317 			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
318 			while (strqt != NULL && (strqt->sid < strq->sid)) {
319 				strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
320 			}
321 			if (strqt != NULL) {
322 				TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke);
323 			} else {
324 				TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
325 			}
326 		}
327 		strq->ss_params.scheduled = true;
328 	}
329 	return;
330 }
331 
332 /*
333  * Real round-robin per packet algorithm.
334  * Always iterates the streams in ascending order and
335  * only fills messages of the same stream in a packet.
336  */
337 static struct sctp_stream_out *
338 sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
339     struct sctp_association *asoc)
340 {
341 	SCTP_TCB_LOCK_ASSERT(stcb);
342 
343 	return (asoc->ss_data.last_out_stream);
344 }
345 
346 static void
347 sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
348     struct sctp_association *asoc)
349 {
350 	struct sctp_stream_out *strq, *strqt;
351 
352 	SCTP_TCB_LOCK_ASSERT(stcb);
353 
354 	strqt = asoc->ss_data.last_out_stream;
355 	KASSERT(strqt == NULL || strqt->ss_params.scheduled,
356 	    ("last_out_stream %p not scheduled", (void *)strqt));
357 rrp_again:
358 	/* Find the next stream to use */
359 	if (strqt == NULL) {
360 		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
361 	} else {
362 		strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
363 		if (strq == NULL) {
364 			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
365 		}
366 	}
367 	KASSERT(strq == NULL || strq->ss_params.scheduled,
368 	    ("strq %p not scheduled", (void *)strq));
369 
370 	/*
371 	 * If CMT is off, we must validate that the stream in question has
372 	 * the first item pointed towards are network destination requested
373 	 * by the caller. Note that if we turn out to be locked to a stream
374 	 * (assigning TSN's then we must stop, since we cannot look for
375 	 * another stream with data to send to that destination). In CMT's
376 	 * case, by skipping this check, we will send one data packet
377 	 * towards the requested net.
378 	 */
379 	if (net != NULL && strq != NULL &&
380 	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
381 		if (TAILQ_FIRST(&strq->outqueue) &&
382 		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
383 		    TAILQ_FIRST(&strq->outqueue)->net != net) {
384 			if (strq == asoc->ss_data.last_out_stream) {
385 				strq = NULL;
386 			} else {
387 				strqt = strq;
388 				goto rrp_again;
389 			}
390 		}
391 	}
392 	asoc->ss_data.last_out_stream = strq;
393 	return;
394 }
395 
396 /*
397  * Priority algorithm.
398  * Always prefers streams based on their priority id.
399  */
400 static void
401 sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
402     bool clear_values)
403 {
404 	SCTP_TCB_LOCK_ASSERT(stcb);
405 
406 	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
407 		struct sctp_stream_out *strq;
408 
409 		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
410 		KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
411 		if (clear_values) {
412 			strq->ss_params.ss.prio.priority = 0;
413 		}
414 		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
415 		strq->ss_params.scheduled = false;
416 	}
417 	asoc->ss_data.last_out_stream = NULL;
418 	return;
419 }
420 
421 static void
422 sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
423 {
424 	SCTP_TCB_LOCK_ASSERT(stcb);
425 
426 	if (with_strq != NULL) {
427 		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
428 			stcb->asoc.ss_data.locked_on_sending = strq;
429 		}
430 		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
431 			stcb->asoc.ss_data.last_out_stream = strq;
432 		}
433 	}
434 	strq->ss_params.scheduled = false;
435 	if (with_strq != NULL) {
436 		strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority;
437 	} else {
438 		strq->ss_params.ss.prio.priority = 0;
439 	}
440 	return;
441 }
442 
443 static void
444 sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
445     struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
446 {
447 	struct sctp_stream_out *strqt;
448 
449 	SCTP_TCB_LOCK_ASSERT(stcb);
450 
451 	/* Add to wheel if not already on it and stream queue not empty */
452 	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
453 		if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
454 			TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
455 		} else {
456 			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
457 			while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) {
458 				strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
459 			}
460 			if (strqt != NULL) {
461 				TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke);
462 			} else {
463 				TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
464 			}
465 		}
466 		strq->ss_params.scheduled = true;
467 	}
468 	return;
469 }
470 
471 static void
472 sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
473     struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
474 {
475 	SCTP_TCB_LOCK_ASSERT(stcb);
476 
477 	/*
478 	 * Remove from wheel if stream queue is empty and actually is on the
479 	 * wheel
480 	 */
481 	if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
482 		if (asoc->ss_data.last_out_stream == strq) {
483 			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
484 			    sctpwheel_listhead,
485 			    ss_params.ss.prio.next_spoke);
486 			if (asoc->ss_data.last_out_stream == NULL) {
487 				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
488 				    sctpwheel_listhead);
489 			}
490 			if (asoc->ss_data.last_out_stream == strq) {
491 				asoc->ss_data.last_out_stream = NULL;
492 			}
493 		}
494 		if (asoc->ss_data.locked_on_sending == strq) {
495 			asoc->ss_data.locked_on_sending = NULL;
496 		}
497 		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
498 		strq->ss_params.scheduled = false;
499 	}
500 	return;
501 }
502 
503 static struct sctp_stream_out *
504 sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
505     struct sctp_association *asoc)
506 {
507 	struct sctp_stream_out *strq, *strqt, *strqn;
508 
509 	SCTP_TCB_LOCK_ASSERT(stcb);
510 
511 	if (asoc->ss_data.locked_on_sending != NULL) {
512 		KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
513 		    ("locked_on_sending %p not scheduled",
514 		    (void *)asoc->ss_data.locked_on_sending));
515 		return (asoc->ss_data.locked_on_sending);
516 	}
517 	strqt = asoc->ss_data.last_out_stream;
518 	KASSERT(strqt == NULL || strqt->ss_params.scheduled,
519 	    ("last_out_stream %p not scheduled", (void *)strqt));
520 prio_again:
521 	/* Find the next stream to use */
522 	if (strqt == NULL) {
523 		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
524 	} else {
525 		strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
526 		if (strqn != NULL &&
527 		    strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) {
528 			strq = strqn;
529 		} else {
530 			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
531 		}
532 	}
533 	KASSERT(strq == NULL || strq->ss_params.scheduled,
534 	    ("strq %p not scheduled", (void *)strq));
535 
536 	/*
537 	 * If CMT is off, we must validate that the stream in question has
538 	 * the first item pointed towards are network destination requested
539 	 * by the caller. Note that if we turn out to be locked to a stream
540 	 * (assigning TSN's then we must stop, since we cannot look for
541 	 * another stream with data to send to that destination). In CMT's
542 	 * case, by skipping this check, we will send one data packet
543 	 * towards the requested net.
544 	 */
545 	if (net != NULL && strq != NULL &&
546 	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
547 		if (TAILQ_FIRST(&strq->outqueue) &&
548 		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
549 		    TAILQ_FIRST(&strq->outqueue)->net != net) {
550 			if (strq == asoc->ss_data.last_out_stream) {
551 				return (NULL);
552 			} else {
553 				strqt = strq;
554 				goto prio_again;
555 			}
556 		}
557 	}
558 	return (strq);
559 }
560 
561 static int
562 sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
563     struct sctp_stream_out *strq, uint16_t *value)
564 {
565 	SCTP_TCB_LOCK_ASSERT(stcb);
566 
567 	if (strq == NULL) {
568 		return (-1);
569 	}
570 	*value = strq->ss_params.ss.prio.priority;
571 	return (1);
572 }
573 
574 static int
575 sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
576     struct sctp_stream_out *strq, uint16_t value)
577 {
578 	SCTP_TCB_LOCK_ASSERT(stcb);
579 
580 	if (strq == NULL) {
581 		return (-1);
582 	}
583 	strq->ss_params.ss.prio.priority = value;
584 	sctp_ss_prio_remove(stcb, asoc, strq, NULL);
585 	sctp_ss_prio_add(stcb, asoc, strq, NULL);
586 	return (1);
587 }
588 
589 /*
590  * Fair bandwidth algorithm.
591  * Maintains an equal throughput per stream.
592  */
593 static void
594 sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
595     bool clear_values)
596 {
597 	SCTP_TCB_LOCK_ASSERT(stcb);
598 
599 	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
600 		struct sctp_stream_out *strq;
601 
602 		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
603 		KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
604 		if (clear_values) {
605 			strq->ss_params.ss.fb.rounds = -1;
606 		}
607 		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
608 		strq->ss_params.scheduled = false;
609 	}
610 	asoc->ss_data.last_out_stream = NULL;
611 	return;
612 }
613 
614 static void
615 sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
616 {
617 	SCTP_TCB_LOCK_ASSERT(stcb);
618 
619 	if (with_strq != NULL) {
620 		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
621 			stcb->asoc.ss_data.locked_on_sending = strq;
622 		}
623 		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
624 			stcb->asoc.ss_data.last_out_stream = strq;
625 		}
626 	}
627 	strq->ss_params.scheduled = false;
628 	if (with_strq != NULL) {
629 		strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds;
630 	} else {
631 		strq->ss_params.ss.fb.rounds = -1;
632 	}
633 	return;
634 }
635 
636 static void
637 sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
638     struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
639 {
640 	SCTP_TCB_LOCK_ASSERT(stcb);
641 
642 	if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
643 		if (strq->ss_params.ss.fb.rounds < 0)
644 			strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
645 		TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
646 		strq->ss_params.scheduled = true;
647 	}
648 	return;
649 }
650 
651 static void
652 sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
653     struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
654 {
655 	SCTP_TCB_LOCK_ASSERT(stcb);
656 
657 	/*
658 	 * Remove from wheel if stream queue is empty and actually is on the
659 	 * wheel
660 	 */
661 	if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
662 		if (asoc->ss_data.last_out_stream == strq) {
663 			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
664 			    sctpwheel_listhead,
665 			    ss_params.ss.fb.next_spoke);
666 			if (asoc->ss_data.last_out_stream == NULL) {
667 				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
668 				    sctpwheel_listhead);
669 			}
670 			if (asoc->ss_data.last_out_stream == strq) {
671 				asoc->ss_data.last_out_stream = NULL;
672 			}
673 		}
674 		if (asoc->ss_data.locked_on_sending == strq) {
675 			asoc->ss_data.locked_on_sending = NULL;
676 		}
677 		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
678 		strq->ss_params.scheduled = false;
679 	}
680 	return;
681 }
682 
683 static struct sctp_stream_out *
684 sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
685     struct sctp_association *asoc)
686 {
687 	struct sctp_stream_out *strq = NULL, *strqt;
688 
689 	SCTP_TCB_LOCK_ASSERT(stcb);
690 
691 	if (asoc->ss_data.locked_on_sending != NULL) {
692 		KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
693 		    ("locked_on_sending %p not scheduled",
694 		    (void *)asoc->ss_data.locked_on_sending));
695 		return (asoc->ss_data.locked_on_sending);
696 	}
697 	if (asoc->ss_data.last_out_stream == NULL ||
698 	    TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
699 		strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
700 	} else {
701 		strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke);
702 	}
703 	do {
704 		if ((strqt != NULL) &&
705 		    ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
706 		    (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
707 		    (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
708 		    (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
709 		    TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
710 			if ((strqt->ss_params.ss.fb.rounds >= 0) &&
711 			    ((strq == NULL) ||
712 			    (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) {
713 				strq = strqt;
714 			}
715 		}
716 		if (strqt != NULL) {
717 			strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke);
718 		} else {
719 			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
720 		}
721 	} while (strqt != strq);
722 	return (strq);
723 }
724 
725 static void
726 sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
727     struct sctp_association *asoc, struct sctp_stream_out *strq,
728     int moved_how_much SCTP_UNUSED)
729 {
730 	struct sctp_stream_queue_pending *sp;
731 	struct sctp_stream_out *strqt;
732 	int subtract;
733 
734 	SCTP_TCB_LOCK_ASSERT(stcb);
735 
736 	if (asoc->idata_supported == 0) {
737 		sp = TAILQ_FIRST(&strq->outqueue);
738 		if ((sp != NULL) && (sp->some_taken == 1)) {
739 			asoc->ss_data.locked_on_sending = strq;
740 		} else {
741 			asoc->ss_data.locked_on_sending = NULL;
742 		}
743 	} else {
744 		asoc->ss_data.locked_on_sending = NULL;
745 	}
746 	subtract = strq->ss_params.ss.fb.rounds;
747 	TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) {
748 		strqt->ss_params.ss.fb.rounds -= subtract;
749 		if (strqt->ss_params.ss.fb.rounds < 0)
750 			strqt->ss_params.ss.fb.rounds = 0;
751 	}
752 	if (TAILQ_FIRST(&strq->outqueue)) {
753 		strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
754 	} else {
755 		strq->ss_params.ss.fb.rounds = -1;
756 	}
757 	asoc->ss_data.last_out_stream = strq;
758 	return;
759 }
760 
761 /*
762  * First-come, first-serve algorithm.
763  * Maintains the order provided by the application.
764  */
765 static void
766 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
767     struct sctp_stream_out *strq SCTP_UNUSED,
768     struct sctp_stream_queue_pending *sp);
769 
770 static void
771 sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
772 {
773 	uint32_t x, n = 0, add_more = 1;
774 	struct sctp_stream_queue_pending *sp;
775 	uint16_t i;
776 
777 	SCTP_TCB_LOCK_ASSERT(stcb);
778 
779 	TAILQ_INIT(&asoc->ss_data.out.list);
780 	/*
781 	 * If there is data in the stream queues already, the scheduler of
782 	 * an existing association has been changed. We can only cycle
783 	 * through the stream queues and add everything to the FCFS queue.
784 	 */
785 	while (add_more) {
786 		add_more = 0;
787 		for (i = 0; i < asoc->streamoutcnt; i++) {
788 			sp = TAILQ_FIRST(&asoc->strmout[i].outqueue);
789 			x = 0;
790 			/* Find n. message in current stream queue */
791 			while (sp != NULL && x < n) {
792 				sp = TAILQ_NEXT(sp, next);
793 				x++;
794 			}
795 			if (sp != NULL) {
796 				sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp);
797 				add_more = 1;
798 			}
799 		}
800 		n++;
801 	}
802 	return;
803 }
804 
805 static void
806 sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
807     bool clear_values SCTP_UNUSED)
808 {
809 	struct sctp_stream_queue_pending *sp;
810 
811 	SCTP_TCB_LOCK_ASSERT(stcb);
812 
813 	while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
814 		sp = TAILQ_FIRST(&asoc->ss_data.out.list);
815 		KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp));
816 		TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
817 		sp->scheduled = false;
818 	}
819 	asoc->ss_data.last_out_stream = NULL;
820 	return;
821 }
822 
823 static void
824 sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
825 {
826 	SCTP_TCB_LOCK_ASSERT(stcb);
827 
828 	if (with_strq != NULL) {
829 		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
830 			stcb->asoc.ss_data.locked_on_sending = strq;
831 		}
832 		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
833 			stcb->asoc.ss_data.last_out_stream = strq;
834 		}
835 	}
836 	strq->ss_params.scheduled = false;
837 	return;
838 }
839 
840 static void
841 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
842     struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
843 {
844 	SCTP_TCB_LOCK_ASSERT(stcb);
845 
846 	if (!sp->scheduled) {
847 		TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
848 		sp->scheduled = true;
849 	}
850 	return;
851 }
852 
853 static bool
854 sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
855 {
856 	SCTP_TCB_LOCK_ASSERT(stcb);
857 
858 	return (TAILQ_EMPTY(&asoc->ss_data.out.list));
859 }
860 
861 static void
862 sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
863     struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
864 {
865 	SCTP_TCB_LOCK_ASSERT(stcb);
866 
867 	if (sp->scheduled) {
868 		TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
869 		sp->scheduled = false;
870 	}
871 	return;
872 }
873 
874 static struct sctp_stream_out *
875 sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
876     struct sctp_association *asoc)
877 {
878 	struct sctp_stream_out *strq;
879 	struct sctp_stream_queue_pending *sp;
880 
881 	SCTP_TCB_LOCK_ASSERT(stcb);
882 
883 	if (asoc->ss_data.locked_on_sending) {
884 		return (asoc->ss_data.locked_on_sending);
885 	}
886 	sp = TAILQ_FIRST(&asoc->ss_data.out.list);
887 default_again:
888 	if (sp != NULL) {
889 		strq = &asoc->strmout[sp->sid];
890 	} else {
891 		strq = NULL;
892 	}
893 
894 	/*
895 	 * If CMT is off, we must validate that the stream in question has
896 	 * the first item pointed towards are network destination requested
897 	 * by the caller. Note that if we turn out to be locked to a stream
898 	 * (assigning TSN's then we must stop, since we cannot look for
899 	 * another stream with data to send to that destination). In CMT's
900 	 * case, by skipping this check, we will send one data packet
901 	 * towards the requested net.
902 	 */
903 	if (net != NULL && strq != NULL &&
904 	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
905 		if (TAILQ_FIRST(&strq->outqueue) &&
906 		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
907 		    TAILQ_FIRST(&strq->outqueue)->net != net) {
908 			sp = TAILQ_NEXT(sp, ss_next);
909 			goto default_again;
910 		}
911 	}
912 	return (strq);
913 }
914 
915 static void
916 sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb,
917     struct sctp_nets *net SCTP_UNUSED,
918     struct sctp_association *asoc,
919     struct sctp_stream_out *strq,
920     int moved_how_much SCTP_UNUSED)
921 {
922 	struct sctp_stream_queue_pending *sp;
923 
924 	KASSERT(strq != NULL, ("strq is NULL"));
925 	asoc->ss_data.last_out_stream = strq;
926 	if (asoc->idata_supported == 0) {
927 		sp = TAILQ_FIRST(&strq->outqueue);
928 		if ((sp != NULL) && (sp->some_taken == 1)) {
929 			asoc->ss_data.locked_on_sending = strq;
930 		} else {
931 			asoc->ss_data.locked_on_sending = NULL;
932 		}
933 	} else {
934 		asoc->ss_data.locked_on_sending = NULL;
935 	}
936 	return;
937 }
938 
939 const struct sctp_ss_functions sctp_ss_functions[] = {
940 /* SCTP_SS_DEFAULT */
941 	{
942 		.sctp_ss_init = sctp_ss_default_init,
943 		.sctp_ss_clear = sctp_ss_default_clear,
944 		.sctp_ss_init_stream = sctp_ss_default_init_stream,
945 		.sctp_ss_add_to_stream = sctp_ss_default_add,
946 		.sctp_ss_is_empty = sctp_ss_default_is_empty,
947 		.sctp_ss_remove_from_stream = sctp_ss_default_remove,
948 		.sctp_ss_select_stream = sctp_ss_default_select,
949 		.sctp_ss_scheduled = sctp_ss_default_scheduled,
950 		.sctp_ss_packet_done = sctp_ss_default_packet_done,
951 		.sctp_ss_get_value = sctp_ss_default_get_value,
952 		.sctp_ss_set_value = sctp_ss_default_set_value,
953 		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
954 	},
955 /* SCTP_SS_RR */
956 	{
957 		.sctp_ss_init = sctp_ss_default_init,
958 		.sctp_ss_clear = sctp_ss_default_clear,
959 		.sctp_ss_init_stream = sctp_ss_default_init_stream,
960 		.sctp_ss_add_to_stream = sctp_ss_rr_add,
961 		.sctp_ss_is_empty = sctp_ss_default_is_empty,
962 		.sctp_ss_remove_from_stream = sctp_ss_default_remove,
963 		.sctp_ss_select_stream = sctp_ss_default_select,
964 		.sctp_ss_scheduled = sctp_ss_default_scheduled,
965 		.sctp_ss_packet_done = sctp_ss_default_packet_done,
966 		.sctp_ss_get_value = sctp_ss_default_get_value,
967 		.sctp_ss_set_value = sctp_ss_default_set_value,
968 		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
969 	},
970 /* SCTP_SS_RR_PKT */
971 	{
972 		.sctp_ss_init = sctp_ss_default_init,
973 		.sctp_ss_clear = sctp_ss_default_clear,
974 		.sctp_ss_init_stream = sctp_ss_default_init_stream,
975 		.sctp_ss_add_to_stream = sctp_ss_rr_add,
976 		.sctp_ss_is_empty = sctp_ss_default_is_empty,
977 		.sctp_ss_remove_from_stream = sctp_ss_default_remove,
978 		.sctp_ss_select_stream = sctp_ss_rrp_select,
979 		.sctp_ss_scheduled = sctp_ss_default_scheduled,
980 		.sctp_ss_packet_done = sctp_ss_rrp_packet_done,
981 		.sctp_ss_get_value = sctp_ss_default_get_value,
982 		.sctp_ss_set_value = sctp_ss_default_set_value,
983 		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
984 	},
985 /* SCTP_SS_PRIO */
986 	{
987 		.sctp_ss_init = sctp_ss_default_init,
988 		.sctp_ss_clear = sctp_ss_prio_clear,
989 		.sctp_ss_init_stream = sctp_ss_prio_init_stream,
990 		.sctp_ss_add_to_stream = sctp_ss_prio_add,
991 		.sctp_ss_is_empty = sctp_ss_default_is_empty,
992 		.sctp_ss_remove_from_stream = sctp_ss_prio_remove,
993 		.sctp_ss_select_stream = sctp_ss_prio_select,
994 		.sctp_ss_scheduled = sctp_ss_default_scheduled,
995 		.sctp_ss_packet_done = sctp_ss_default_packet_done,
996 		.sctp_ss_get_value = sctp_ss_prio_get_value,
997 		.sctp_ss_set_value = sctp_ss_prio_set_value,
998 		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
999 	},
1000 /* SCTP_SS_FB */
1001 	{
1002 		.sctp_ss_init = sctp_ss_default_init,
1003 		.sctp_ss_clear = sctp_ss_fb_clear,
1004 		.sctp_ss_init_stream = sctp_ss_fb_init_stream,
1005 		.sctp_ss_add_to_stream = sctp_ss_fb_add,
1006 		.sctp_ss_is_empty = sctp_ss_default_is_empty,
1007 		.sctp_ss_remove_from_stream = sctp_ss_fb_remove,
1008 		.sctp_ss_select_stream = sctp_ss_fb_select,
1009 		.sctp_ss_scheduled = sctp_ss_fb_scheduled,
1010 		.sctp_ss_packet_done = sctp_ss_default_packet_done,
1011 		.sctp_ss_get_value = sctp_ss_default_get_value,
1012 		.sctp_ss_set_value = sctp_ss_default_set_value,
1013 		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1014 	},
1015 /* SCTP_SS_FCFS */
1016 	{
1017 		.sctp_ss_init = sctp_ss_fcfs_init,
1018 		.sctp_ss_clear = sctp_ss_fcfs_clear,
1019 		.sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
1020 		.sctp_ss_add_to_stream = sctp_ss_fcfs_add,
1021 		.sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
1022 		.sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
1023 		.sctp_ss_select_stream = sctp_ss_fcfs_select,
1024 		.sctp_ss_scheduled = sctp_ss_fcfs_scheduled,
1025 		.sctp_ss_packet_done = sctp_ss_default_packet_done,
1026 		.sctp_ss_get_value = sctp_ss_default_get_value,
1027 		.sctp_ss_set_value = sctp_ss_default_set_value,
1028 		.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1029 	}
1030 };
1031