1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright Red Hat Inc. 2017 4 * 5 * This file is part of the SCTP kernel implementation 6 * 7 * These functions manipulate sctp stream queue/scheduling. 8 * 9 * Please send any bug reports or fixes you make to the 10 * email addresched(es): 11 * lksctp developers <linux-sctp@vger.kernel.org> 12 * 13 * Written or modified by: 14 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> 15 */ 16 17 #include <linux/list.h> 18 #include <net/sctp/sctp.h> 19 #include <net/sctp/sm.h> 20 #include <net/sctp/stream_sched.h> 21 22 /* First Come First Serve (a.k.a. FIFO) 23 * RFC DRAFT ndata Section 3.1 24 */ 25 static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid, 26 __u16 value, gfp_t gfp) 27 { 28 return 0; 29 } 30 31 static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid, 32 __u16 *value) 33 { 34 *value = 0; 35 return 0; 36 } 37 38 static int sctp_sched_fcfs_init(struct sctp_stream *stream) 39 { 40 return 0; 41 } 42 43 static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid, 44 gfp_t gfp) 45 { 46 return 0; 47 } 48 49 static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid) 50 { 51 } 52 53 static void sctp_sched_fcfs_enqueue(struct sctp_outq *q, 54 struct sctp_datamsg *msg) 55 { 56 } 57 58 static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q) 59 { 60 struct sctp_stream *stream = &q->asoc->stream; 61 struct sctp_chunk *ch = NULL; 62 struct list_head *entry; 63 64 if (list_empty(&q->out_chunk_list)) 65 goto out; 66 67 if (stream->out_curr) { 68 ch = list_entry(stream->out_curr->ext->outq.next, 69 struct sctp_chunk, stream_list); 70 } else { 71 entry = q->out_chunk_list.next; 72 ch = list_entry(entry, struct sctp_chunk, list); 73 } 74 75 sctp_sched_dequeue_common(q, ch); 76 77 out: 78 return ch; 79 } 80 81 static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q, 82 struct sctp_chunk *chunk) 83 { 84 } 85 86 static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream) 87 { 88 } 89 90 static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream) 91 { 92 } 93 94 static struct sctp_sched_ops sctp_sched_fcfs = { 95 .set = sctp_sched_fcfs_set, 96 .get = sctp_sched_fcfs_get, 97 .init = sctp_sched_fcfs_init, 98 .init_sid = sctp_sched_fcfs_init_sid, 99 .free_sid = sctp_sched_fcfs_free_sid, 100 .enqueue = sctp_sched_fcfs_enqueue, 101 .dequeue = sctp_sched_fcfs_dequeue, 102 .dequeue_done = sctp_sched_fcfs_dequeue_done, 103 .sched_all = sctp_sched_fcfs_sched_all, 104 .unsched_all = sctp_sched_fcfs_unsched_all, 105 }; 106 107 static void sctp_sched_ops_fcfs_init(void) 108 { 109 sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs); 110 } 111 112 /* API to other parts of the stack */ 113 114 static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1]; 115 116 void sctp_sched_ops_register(enum sctp_sched_type sched, 117 struct sctp_sched_ops *sched_ops) 118 { 119 sctp_sched_ops[sched] = sched_ops; 120 } 121 122 void sctp_sched_ops_init(void) 123 { 124 sctp_sched_ops_fcfs_init(); 125 sctp_sched_ops_prio_init(); 126 sctp_sched_ops_rr_init(); 127 sctp_sched_ops_fc_init(); 128 sctp_sched_ops_wfq_init(); 129 } 130 131 static void sctp_sched_free_sched(struct sctp_stream *stream) 132 { 133 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); 134 struct sctp_stream_out_ext *soute; 135 int i; 136 137 sched->unsched_all(stream); 138 for (i = 0; i < stream->outcnt; i++) { 139 soute = SCTP_SO(stream, i)->ext; 140 if (!soute) 141 continue; 142 sched->free_sid(stream, i); 143 /* Give the next scheduler a clean slate. */ 144 memset_after(soute, 0, outq); 145 } 146 } 147 148 int sctp_sched_set_sched(struct sctp_association *asoc, 149 enum sctp_sched_type sched) 150 { 151 struct sctp_sched_ops *n = sctp_sched_ops[sched]; 152 struct sctp_sched_ops *old = asoc->outqueue.sched; 153 struct sctp_datamsg *msg = NULL; 154 struct sctp_chunk *ch; 155 int i, ret = 0; 156 157 if (old == n) 158 return ret; 159 160 if (sched > SCTP_SS_MAX) 161 return -EINVAL; 162 163 if (old) 164 sctp_sched_free_sched(&asoc->stream); 165 166 asoc->outqueue.sched = n; 167 n->init(&asoc->stream); 168 for (i = 0; i < asoc->stream.outcnt; i++) { 169 if (!SCTP_SO(&asoc->stream, i)->ext) 170 continue; 171 172 ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC); 173 if (ret) 174 goto err; 175 } 176 177 /* We have to requeue all chunks already queued. */ 178 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) { 179 if (ch->msg == msg) 180 continue; 181 msg = ch->msg; 182 n->enqueue(&asoc->outqueue, msg); 183 } 184 185 return ret; 186 187 err: 188 sctp_sched_free_sched(&asoc->stream); 189 asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */ 190 191 return ret; 192 } 193 194 int sctp_sched_get_sched(struct sctp_association *asoc) 195 { 196 int i; 197 198 for (i = 0; i <= SCTP_SS_MAX; i++) 199 if (asoc->outqueue.sched == sctp_sched_ops[i]) 200 return i; 201 202 return 0; 203 } 204 205 int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid, 206 __u16 value, gfp_t gfp) 207 { 208 if (sid >= asoc->stream.outcnt) 209 return -EINVAL; 210 211 if (!SCTP_SO(&asoc->stream, sid)->ext) { 212 int ret; 213 214 ret = sctp_stream_init_ext(&asoc->stream, sid); 215 if (ret) 216 return ret; 217 } 218 219 return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp); 220 } 221 222 int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid, 223 __u16 *value) 224 { 225 if (sid >= asoc->stream.outcnt) 226 return -EINVAL; 227 228 if (!SCTP_SO(&asoc->stream, sid)->ext) 229 return 0; 230 231 return asoc->outqueue.sched->get(&asoc->stream, sid, value); 232 } 233 234 void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) 235 { 236 if (!list_is_last(&ch->frag_list, &ch->msg->chunks) && 237 !q->asoc->peer.intl_capable) { 238 struct sctp_stream_out *sout; 239 __u16 sid; 240 241 /* datamsg is not finish, so save it as current one, 242 * in case application switch scheduler or a higher 243 * priority stream comes in. 244 */ 245 sid = sctp_chunk_stream_no(ch); 246 sout = SCTP_SO(&q->asoc->stream, sid); 247 q->asoc->stream.out_curr = sout; 248 return; 249 } 250 251 q->asoc->stream.out_curr = NULL; 252 q->sched->dequeue_done(q, ch); 253 } 254 255 /* Auxiliary functions for the schedulers */ 256 void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch) 257 { 258 list_del_init(&ch->list); 259 list_del_init(&ch->stream_list); 260 q->out_qlen -= ch->skb->len; 261 } 262 263 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) 264 { 265 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); 266 struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext; 267 268 INIT_LIST_HEAD(&ext->outq); 269 return sched->init_sid(stream, sid, gfp); 270 } 271 272 struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream) 273 { 274 struct sctp_association *asoc; 275 276 asoc = container_of(stream, struct sctp_association, stream); 277 278 return asoc->outqueue.sched; 279 } 280