xref: /linux/net/sctp/stream_sched.c (revision 1b0975ee3bdd3eb19a47371c26fd7ef8f7f6b599)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * (C) Copyright Red Hat Inc. 2017
4  *
5  * This file is part of the SCTP kernel implementation
6  *
7  * These functions manipulate sctp stream queue/scheduling.
8  *
9  * Please send any bug reports or fixes you make to the
10  * email addresched(es):
11  *    lksctp developers <linux-sctp@vger.kernel.org>
12  *
13  * Written or modified by:
14  *    Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
15  */
16 
17 #include <linux/list.h>
18 #include <net/sctp/sctp.h>
19 #include <net/sctp/sm.h>
20 #include <net/sctp/stream_sched.h>
21 
22 /* First Come First Serve (a.k.a. FIFO)
23  * RFC DRAFT ndata Section 3.1
24  */
25 static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
26 			       __u16 value, gfp_t gfp)
27 {
28 	return 0;
29 }
30 
31 static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
32 			       __u16 *value)
33 {
34 	*value = 0;
35 	return 0;
36 }
37 
38 static int sctp_sched_fcfs_init(struct sctp_stream *stream)
39 {
40 	return 0;
41 }
42 
43 static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
44 				    gfp_t gfp)
45 {
46 	return 0;
47 }
48 
49 static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
50 {
51 }
52 
53 static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
54 				    struct sctp_datamsg *msg)
55 {
56 }
57 
58 static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
59 {
60 	struct sctp_stream *stream = &q->asoc->stream;
61 	struct sctp_chunk *ch = NULL;
62 	struct list_head *entry;
63 
64 	if (list_empty(&q->out_chunk_list))
65 		goto out;
66 
67 	if (stream->out_curr) {
68 		ch = list_entry(stream->out_curr->ext->outq.next,
69 				struct sctp_chunk, stream_list);
70 	} else {
71 		entry = q->out_chunk_list.next;
72 		ch = list_entry(entry, struct sctp_chunk, list);
73 	}
74 
75 	sctp_sched_dequeue_common(q, ch);
76 
77 out:
78 	return ch;
79 }
80 
81 static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
82 					 struct sctp_chunk *chunk)
83 {
84 }
85 
86 static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
87 {
88 }
89 
90 static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
91 {
92 }
93 
94 static struct sctp_sched_ops sctp_sched_fcfs = {
95 	.set = sctp_sched_fcfs_set,
96 	.get = sctp_sched_fcfs_get,
97 	.init = sctp_sched_fcfs_init,
98 	.init_sid = sctp_sched_fcfs_init_sid,
99 	.free_sid = sctp_sched_fcfs_free_sid,
100 	.enqueue = sctp_sched_fcfs_enqueue,
101 	.dequeue = sctp_sched_fcfs_dequeue,
102 	.dequeue_done = sctp_sched_fcfs_dequeue_done,
103 	.sched_all = sctp_sched_fcfs_sched_all,
104 	.unsched_all = sctp_sched_fcfs_unsched_all,
105 };
106 
107 static void sctp_sched_ops_fcfs_init(void)
108 {
109 	sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
110 }
111 
112 /* API to other parts of the stack */
113 
114 static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
115 
116 void sctp_sched_ops_register(enum sctp_sched_type sched,
117 			     struct sctp_sched_ops *sched_ops)
118 {
119 	sctp_sched_ops[sched] = sched_ops;
120 }
121 
122 void sctp_sched_ops_init(void)
123 {
124 	sctp_sched_ops_fcfs_init();
125 	sctp_sched_ops_prio_init();
126 	sctp_sched_ops_rr_init();
127 	sctp_sched_ops_fc_init();
128 	sctp_sched_ops_wfq_init();
129 }
130 
131 static void sctp_sched_free_sched(struct sctp_stream *stream)
132 {
133 	struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
134 	struct sctp_stream_out_ext *soute;
135 	int i;
136 
137 	sched->unsched_all(stream);
138 	for (i = 0; i < stream->outcnt; i++) {
139 		soute = SCTP_SO(stream, i)->ext;
140 		if (!soute)
141 			continue;
142 		sched->free_sid(stream, i);
143 		/* Give the next scheduler a clean slate. */
144 		memset_after(soute, 0, outq);
145 	}
146 }
147 
148 int sctp_sched_set_sched(struct sctp_association *asoc,
149 			 enum sctp_sched_type sched)
150 {
151 	struct sctp_sched_ops *old = asoc->outqueue.sched;
152 	struct sctp_datamsg *msg = NULL;
153 	struct sctp_sched_ops *n;
154 	struct sctp_chunk *ch;
155 	int i, ret = 0;
156 
157 	if (sched > SCTP_SS_MAX)
158 		return -EINVAL;
159 
160 	n = sctp_sched_ops[sched];
161 	if (old == n)
162 		return ret;
163 
164 	if (old)
165 		sctp_sched_free_sched(&asoc->stream);
166 
167 	asoc->outqueue.sched = n;
168 	n->init(&asoc->stream);
169 	for (i = 0; i < asoc->stream.outcnt; i++) {
170 		if (!SCTP_SO(&asoc->stream, i)->ext)
171 			continue;
172 
173 		ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
174 		if (ret)
175 			goto err;
176 	}
177 
178 	/* We have to requeue all chunks already queued. */
179 	list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
180 		if (ch->msg == msg)
181 			continue;
182 		msg = ch->msg;
183 		n->enqueue(&asoc->outqueue, msg);
184 	}
185 
186 	return ret;
187 
188 err:
189 	sctp_sched_free_sched(&asoc->stream);
190 	asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
191 
192 	return ret;
193 }
194 
195 int sctp_sched_get_sched(struct sctp_association *asoc)
196 {
197 	int i;
198 
199 	for (i = 0; i <= SCTP_SS_MAX; i++)
200 		if (asoc->outqueue.sched == sctp_sched_ops[i])
201 			return i;
202 
203 	return 0;
204 }
205 
206 int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
207 			 __u16 value, gfp_t gfp)
208 {
209 	if (sid >= asoc->stream.outcnt)
210 		return -EINVAL;
211 
212 	if (!SCTP_SO(&asoc->stream, sid)->ext) {
213 		int ret;
214 
215 		ret = sctp_stream_init_ext(&asoc->stream, sid);
216 		if (ret)
217 			return ret;
218 	}
219 
220 	return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
221 }
222 
223 int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
224 			 __u16 *value)
225 {
226 	if (sid >= asoc->stream.outcnt)
227 		return -EINVAL;
228 
229 	if (!SCTP_SO(&asoc->stream, sid)->ext)
230 		return 0;
231 
232 	return asoc->outqueue.sched->get(&asoc->stream, sid, value);
233 }
234 
235 void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
236 {
237 	if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
238 	    !q->asoc->peer.intl_capable) {
239 		struct sctp_stream_out *sout;
240 		__u16 sid;
241 
242 		/* datamsg is not finish, so save it as current one,
243 		 * in case application switch scheduler or a higher
244 		 * priority stream comes in.
245 		 */
246 		sid = sctp_chunk_stream_no(ch);
247 		sout = SCTP_SO(&q->asoc->stream, sid);
248 		q->asoc->stream.out_curr = sout;
249 		return;
250 	}
251 
252 	q->asoc->stream.out_curr = NULL;
253 	q->sched->dequeue_done(q, ch);
254 }
255 
256 /* Auxiliary functions for the schedulers */
257 void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
258 {
259 	list_del_init(&ch->list);
260 	list_del_init(&ch->stream_list);
261 	q->out_qlen -= ch->skb->len;
262 }
263 
264 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
265 {
266 	struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
267 	struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
268 
269 	INIT_LIST_HEAD(&ext->outq);
270 	return sched->init_sid(stream, sid, gfp);
271 }
272 
273 struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
274 {
275 	struct sctp_association *asoc;
276 
277 	asoc = container_of(stream, struct sctp_association, stream);
278 
279 	return asoc->outqueue.sched;
280 }
281