1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2002 International Business Machines, Corp.
6 *
7 * This file is part of the SCTP kernel implementation
8 *
9 * These functions are the methods for accessing the SCTP inqueue.
10 *
11 * An SCTP inqueue is a queue into which you push SCTP packets
12 * (which might be bundles or fragments of chunks) and out of which you
13 * pop SCTP whole chunks.
14 *
15 * Please send any bug reports or fixes you make to the
16 * email address(es):
17 * lksctp developers <linux-sctp@vger.kernel.org>
18 *
19 * Written or modified by:
20 * La Monte H.P. Yarroll <piggy@acm.org>
21 * Karl Knutson <karl@athena.chicago.il.us>
22 */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <net/sctp/sctp.h>
27 #include <net/sctp/sm.h>
28 #include <linux/interrupt.h>
29 #include <linux/slab.h>
30
31 /* Initialize an SCTP inqueue. */
sctp_inq_init(struct sctp_inq * queue)32 void sctp_inq_init(struct sctp_inq *queue)
33 {
34 INIT_LIST_HEAD(&queue->in_chunk_list);
35 queue->in_progress = NULL;
36
37 /* Create a task for delivering data. */
38 INIT_WORK(&queue->immediate, NULL);
39 }
40
41 /* Properly release the chunk which is being worked on. */
sctp_inq_chunk_free(struct sctp_chunk * chunk)42 static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk)
43 {
44 if (chunk->head_skb)
45 chunk->skb = chunk->head_skb;
46 sctp_chunk_free(chunk);
47 }
48
49 /* Release the memory associated with an SCTP inqueue. */
sctp_inq_free(struct sctp_inq * queue)50 void sctp_inq_free(struct sctp_inq *queue)
51 {
52 struct sctp_chunk *chunk, *tmp;
53
54 /* Empty the queue. */
55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
56 list_del_init(&chunk->list);
57 sctp_chunk_free(chunk);
58 }
59
60 /* If there is a packet which is currently being worked on,
61 * free it as well.
62 */
63 if (queue->in_progress) {
64 sctp_inq_chunk_free(queue->in_progress);
65 queue->in_progress = NULL;
66 }
67 }
68
69 /* Put a new packet in an SCTP inqueue.
70 * We assume that packet->sctp_hdr is set and in host byte order.
71 */
sctp_inq_push(struct sctp_inq * q,struct sctp_chunk * chunk)72 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
73 {
74 /* Directly call the packet handling routine. */
75 if (chunk->rcvr->dead) {
76 sctp_chunk_free(chunk);
77 return;
78 }
79
80 /* We are now calling this either from the soft interrupt
81 * or from the backlog processing.
82 * Eventually, we should clean up inqueue to not rely
83 * on the BH related data structures.
84 */
85 list_add_tail(&chunk->list, &q->in_chunk_list);
86 if (chunk->asoc)
87 chunk->asoc->stats.ipackets++;
88 q->immediate.func(&q->immediate);
89 }
90
91 /* Peek at the next chunk on the inqeue. */
sctp_inq_peek(struct sctp_inq * queue)92 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
93 {
94 struct sctp_chunk *chunk;
95 struct sctp_chunkhdr *ch = NULL;
96
97 chunk = queue->in_progress;
98 /* If there is no more chunks in this packet, say so */
99 if (chunk->singleton ||
100 chunk->end_of_packet ||
101 chunk->pdiscard)
102 return NULL;
103
104 ch = (struct sctp_chunkhdr *)chunk->chunk_end;
105
106 return ch;
107 }
108
109
110 /* Extract a chunk from an SCTP inqueue.
111 *
112 * WARNING: If you need to put the chunk on another queue, you need to
113 * make a shallow copy (clone) of it.
114 */
sctp_inq_pop(struct sctp_inq * queue)115 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
116 {
117 struct sctp_chunk *chunk;
118 struct sctp_chunkhdr *ch = NULL;
119
120 /* The assumption is that we are safe to process the chunks
121 * at this time.
122 */
123
124 chunk = queue->in_progress;
125 if (chunk) {
126 /* There is a packet that we have been working on.
127 * Any post processing work to do before we move on?
128 */
129 if (chunk->singleton ||
130 chunk->end_of_packet ||
131 chunk->pdiscard) {
132 if (chunk->head_skb == chunk->skb) {
133 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
134 goto new_skb;
135 }
136 if (chunk->skb->next) {
137 chunk->skb = chunk->skb->next;
138 goto new_skb;
139 }
140
141 sctp_inq_chunk_free(chunk);
142 chunk = queue->in_progress = NULL;
143 } else {
144 /* Nothing to do. Next chunk in the packet, please. */
145 ch = (struct sctp_chunkhdr *)chunk->chunk_end;
146 /* Force chunk->skb->data to chunk->chunk_end. */
147 skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
148 /* We are guaranteed to pull a SCTP header. */
149 }
150 }
151
152 /* Do we need to take the next packet out of the queue to process? */
153 if (!chunk) {
154 struct list_head *entry;
155
156 next_chunk:
157 /* Is the queue empty? */
158 entry = sctp_list_dequeue(&queue->in_chunk_list);
159 if (!entry)
160 return NULL;
161
162 chunk = list_entry(entry, struct sctp_chunk, list);
163
164 if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
165 /* GSO-marked skbs but without frags, handle
166 * them normally
167 */
168 if (skb_shinfo(chunk->skb)->frag_list)
169 chunk->head_skb = chunk->skb;
170
171 /* skbs with "cover letter" */
172 if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
173 chunk->skb = skb_shinfo(chunk->skb)->frag_list;
174
175 if (WARN_ON(!chunk->skb)) {
176 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
177 sctp_chunk_free(chunk);
178 goto next_chunk;
179 }
180 }
181
182 if (chunk->asoc)
183 sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
184
185 queue->in_progress = chunk;
186
187 new_skb:
188 /* This is the first chunk in the packet. */
189 ch = (struct sctp_chunkhdr *)chunk->skb->data;
190 chunk->singleton = 1;
191 chunk->data_accepted = 0;
192 chunk->pdiscard = 0;
193 chunk->auth = 0;
194 chunk->has_asconf = 0;
195 chunk->end_of_packet = 0;
196 if (chunk->head_skb) {
197 struct sctp_input_cb
198 *cb = SCTP_INPUT_CB(chunk->skb),
199 *head_cb = SCTP_INPUT_CB(chunk->head_skb);
200
201 cb->chunk = head_cb->chunk;
202 cb->af = head_cb->af;
203 }
204 }
205
206 chunk->chunk_hdr = ch;
207 chunk->chunk_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
208 skb_pull(chunk->skb, sizeof(*ch));
209 chunk->subh.v = NULL; /* Subheader is no longer valid. */
210
211 if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
212 /* This is not a singleton */
213 chunk->singleton = 0;
214 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
215 /* Discard inside state machine. */
216 chunk->pdiscard = 1;
217 chunk->chunk_end = skb_tail_pointer(chunk->skb);
218 } else {
219 /* We are at the end of the packet, so mark the chunk
220 * in case we need to send a SACK.
221 */
222 chunk->end_of_packet = 1;
223 }
224
225 pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
226 chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
227 ntohs(chunk->chunk_hdr->length), chunk->skb->len);
228
229 return chunk;
230 }
231
232 /* Set a top-half handler.
233 *
234 * Originally, we the top-half handler was scheduled as a BH. We now
235 * call the handler directly in sctp_inq_push() at a time that
236 * we know we are lock safe.
237 * The intent is that this routine will pull stuff out of the
238 * inqueue and process it.
239 */
sctp_inq_set_th_handler(struct sctp_inq * q,work_func_t callback)240 void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
241 {
242 INIT_WORK(&q->immediate, callback);
243 }
244