xref: /linux/net/x25/x25_in.c (revision a1822cb524e89b4cd2cf0b82e484a2335496a6d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	X.25 Packet Layer release 002
4  *
5  *	This is ALPHA test software. This code may break your machine,
6  *	randomly fail to work with new releases, misbehave and/or generally
7  *	screw up. It might even work.
8  *
9  *	This code REQUIRES 2.1.15 or higher
10  *
11  *	History
12  *	X.25 001	Jonathan Naylor	  Started coding.
13  *	X.25 002	Jonathan Naylor	  Centralised disconnection code.
14  *					  New timer architecture.
15  *	2000-03-20	Daniela Squassoni Disabling/enabling of facilities
16  *					  negotiation.
17  *	2000-11-10	Henner Eisen	  Check and reset for out-of-sequence
18  *					  i-frames.
19  */
20 
21 #define pr_fmt(fmt) "X25: " fmt
22 
23 #include <linux/slab.h>
24 #include <linux/errno.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/skbuff.h>
28 #include <net/sock.h>
29 #include <net/tcp_states.h>
30 #include <net/x25.h>
31 
32 static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
33 {
34 	struct sk_buff *skbo, *skbn = skb;
35 	struct x25_sock *x25 = x25_sk(sk);
36 
37 	/* make sure we don't overflow */
38 	if (x25->fraglen + skb->len > USHRT_MAX)
39 		return 1;
40 
41 	if (more) {
42 		x25->fraglen += skb->len;
43 		skb_queue_tail(&x25->fragment_queue, skb);
44 		skb_set_owner_r(skb, sk);
45 		return 0;
46 	}
47 
48 	if (x25->fraglen > 0) {	/* End of fragment */
49 		int len = x25->fraglen + skb->len;
50 
51 		skbn = alloc_skb(len, GFP_ATOMIC);
52 		if (!skbn)
53 			return 1;
54 
55 		skb_queue_tail(&x25->fragment_queue, skb);
56 
57 		skb_reset_transport_header(skbn);
58 
59 		skbo = skb_dequeue(&x25->fragment_queue);
60 		skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
61 					  skbo->len);
62 		kfree_skb(skbo);
63 
64 		while ((skbo =
65 			skb_dequeue(&x25->fragment_queue)) != NULL) {
66 			skb_pull(skbo, (x25->neighbour->extended) ?
67 					X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
68 			skb_copy_from_linear_data(skbo,
69 						  skb_put(skbn, skbo->len),
70 						  skbo->len);
71 			kfree_skb(skbo);
72 		}
73 
74 		x25->fraglen = 0;
75 	}
76 
77 	skb_set_owner_r(skbn, sk);
78 	skb_queue_tail(&sk->sk_receive_queue, skbn);
79 	if (!sock_flag(sk, SOCK_DEAD))
80 		sk->sk_data_ready(sk);
81 
82 	return 0;
83 }
84 
85 /*
86  * State machine for state 1, Awaiting Call Accepted State.
87  * The handling of the timer(s) is in file x25_timer.c.
88  * Handling of state 0 and connection release is in af_x25.c.
89  */
90 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
91 {
92 	struct x25_address source_addr, dest_addr;
93 	int len;
94 	struct x25_sock *x25 = x25_sk(sk);
95 
96 	switch (frametype) {
97 	case X25_CALL_ACCEPTED: {
98 
99 		x25_stop_timer(sk);
100 		x25->condition = 0x00;
101 		x25->vs        = 0;
102 		x25->va        = 0;
103 		x25->vr        = 0;
104 		x25->vl        = 0;
105 		x25->state     = X25_STATE_3;
106 		sk->sk_state   = TCP_ESTABLISHED;
107 		/*
108 		 *	Parse the data in the frame.
109 		 */
110 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
111 			goto out_clear;
112 		skb_pull(skb, X25_STD_MIN_LEN);
113 
114 		len = x25_parse_address_block(skb, &source_addr,
115 					      &dest_addr);
116 		if (len > 0)
117 			skb_pull(skb, len);
118 		else if (len < 0)
119 			goto out_clear;
120 
121 		len = x25_parse_facilities(skb, &x25->facilities,
122 					   &x25->dte_facilities,
123 					   &x25->vc_facil_mask);
124 		if (len > 0)
125 			skb_pull(skb, len);
126 		else if (len < 0)
127 			goto out_clear;
128 		/*
129 		 *	Copy any Call User Data.
130 		 */
131 		if (skb->len > 0) {
132 			if (skb->len > X25_MAX_CUD_LEN)
133 				goto out_clear;
134 
135 			skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
136 				skb->len);
137 			x25->calluserdata.cudlength = skb->len;
138 		}
139 		if (!sock_flag(sk, SOCK_DEAD))
140 			sk->sk_state_change(sk);
141 		break;
142 	}
143 	case X25_CALL_REQUEST:
144 		/* call collision */
145 		x25->causediag.cause      = 0x01;
146 		x25->causediag.diagnostic = 0x48;
147 
148 		x25_write_internal(sk, X25_CLEAR_REQUEST);
149 		x25_disconnect(sk, EISCONN, 0x01, 0x48);
150 		break;
151 
152 	case X25_CLEAR_REQUEST:
153 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
154 			goto out_clear;
155 
156 		x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
157 		x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
158 		break;
159 
160 	default:
161 		break;
162 	}
163 
164 	return 0;
165 
166 out_clear:
167 	x25_write_internal(sk, X25_CLEAR_REQUEST);
168 	x25->state = X25_STATE_2;
169 	x25_start_t23timer(sk);
170 	return 0;
171 }
172 
173 /*
174  * State machine for state 2, Awaiting Clear Confirmation State.
175  * The handling of the timer(s) is in file x25_timer.c
176  * Handling of state 0 and connection release is in af_x25.c.
177  */
178 static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
179 {
180 	switch (frametype) {
181 
182 		case X25_CLEAR_REQUEST:
183 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
184 				goto out_clear;
185 
186 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
187 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
188 			break;
189 
190 		case X25_CLEAR_CONFIRMATION:
191 			x25_disconnect(sk, 0, 0, 0);
192 			break;
193 
194 		default:
195 			break;
196 	}
197 
198 	return 0;
199 
200 out_clear:
201 	x25_write_internal(sk, X25_CLEAR_REQUEST);
202 	x25_start_t23timer(sk);
203 	return 0;
204 }
205 
206 /*
207  * State machine for state 3, Connected State.
208  * The handling of the timer(s) is in file x25_timer.c
209  * Handling of state 0 and connection release is in af_x25.c.
210  */
211 static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
212 {
213 	int queued = 0;
214 	int modulus;
215 	struct x25_sock *x25 = x25_sk(sk);
216 
217 	modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
218 
219 	switch (frametype) {
220 
221 		case X25_RESET_REQUEST:
222 			x25_write_internal(sk, X25_RESET_CONFIRMATION);
223 			x25_stop_timer(sk);
224 			x25->condition = 0x00;
225 			x25->vs        = 0;
226 			x25->vr        = 0;
227 			x25->va        = 0;
228 			x25->vl        = 0;
229 			x25_requeue_frames(sk);
230 			break;
231 
232 		case X25_CLEAR_REQUEST:
233 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
234 				goto out_clear;
235 
236 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
237 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
238 			break;
239 
240 		case X25_RR:
241 		case X25_RNR:
242 			if (!x25_validate_nr(sk, nr)) {
243 				x25_clear_queues(sk);
244 				x25_write_internal(sk, X25_RESET_REQUEST);
245 				x25_start_t22timer(sk);
246 				x25->condition = 0x00;
247 				x25->vs        = 0;
248 				x25->vr        = 0;
249 				x25->va        = 0;
250 				x25->vl        = 0;
251 				x25->state     = X25_STATE_4;
252 			} else {
253 				x25_frames_acked(sk, nr);
254 				if (frametype == X25_RNR) {
255 					x25->condition |= X25_COND_PEER_RX_BUSY;
256 				} else {
257 					x25->condition &= ~X25_COND_PEER_RX_BUSY;
258 				}
259 			}
260 			break;
261 
262 		case X25_DATA:	/* XXX */
263 			x25->condition &= ~X25_COND_PEER_RX_BUSY;
264 			if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
265 				x25_clear_queues(sk);
266 				x25_write_internal(sk, X25_RESET_REQUEST);
267 				x25_start_t22timer(sk);
268 				x25->condition = 0x00;
269 				x25->vs        = 0;
270 				x25->vr        = 0;
271 				x25->va        = 0;
272 				x25->vl        = 0;
273 				x25->state     = X25_STATE_4;
274 				break;
275 			}
276 			x25_frames_acked(sk, nr);
277 			if (ns == x25->vr) {
278 				if (x25_queue_rx_frame(sk, skb, m) == 0) {
279 					x25->vr = (x25->vr + 1) % modulus;
280 					queued = 1;
281 				} else {
282 					/* Should never happen */
283 					x25_clear_queues(sk);
284 					x25_write_internal(sk, X25_RESET_REQUEST);
285 					x25_start_t22timer(sk);
286 					x25->condition = 0x00;
287 					x25->vs        = 0;
288 					x25->vr        = 0;
289 					x25->va        = 0;
290 					x25->vl        = 0;
291 					x25->state     = X25_STATE_4;
292 					break;
293 				}
294 				if (atomic_read(&sk->sk_rmem_alloc) >
295 				    (sk->sk_rcvbuf >> 1))
296 					x25->condition |= X25_COND_OWN_RX_BUSY;
297 			}
298 			/*
299 			 *	If the window is full Ack it immediately, else
300 			 *	start the holdback timer.
301 			 */
302 			if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
303 				x25->condition &= ~X25_COND_ACK_PENDING;
304 				x25_stop_timer(sk);
305 				x25_enquiry_response(sk);
306 			} else {
307 				x25->condition |= X25_COND_ACK_PENDING;
308 				x25_start_t2timer(sk);
309 			}
310 			break;
311 
312 		case X25_INTERRUPT_CONFIRMATION:
313 			clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
314 			break;
315 
316 		case X25_INTERRUPT:
317 			if (sock_flag(sk, SOCK_URGINLINE))
318 				queued = !sock_queue_rcv_skb(sk, skb);
319 			else {
320 				skb_set_owner_r(skb, sk);
321 				skb_queue_tail(&x25->interrupt_in_queue, skb);
322 				queued = 1;
323 			}
324 			sk_send_sigurg(sk);
325 			x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
326 			break;
327 
328 		default:
329 			pr_warn("unknown %02X in state 3\n", frametype);
330 			break;
331 	}
332 
333 	return queued;
334 
335 out_clear:
336 	x25_write_internal(sk, X25_CLEAR_REQUEST);
337 	x25->state = X25_STATE_2;
338 	x25_start_t23timer(sk);
339 	return 0;
340 }
341 
342 /*
343  * State machine for state 4, Awaiting Reset Confirmation State.
344  * The handling of the timer(s) is in file x25_timer.c
345  * Handling of state 0 and connection release is in af_x25.c.
346  */
347 static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
348 {
349 	struct x25_sock *x25 = x25_sk(sk);
350 
351 	switch (frametype) {
352 
353 		case X25_RESET_REQUEST:
354 			x25_write_internal(sk, X25_RESET_CONFIRMATION);
355 			fallthrough;
356 		case X25_RESET_CONFIRMATION: {
357 			x25_stop_timer(sk);
358 			x25->condition = 0x00;
359 			x25->va        = 0;
360 			x25->vr        = 0;
361 			x25->vs        = 0;
362 			x25->vl        = 0;
363 			x25->state     = X25_STATE_3;
364 			x25_requeue_frames(sk);
365 			break;
366 		}
367 		case X25_CLEAR_REQUEST:
368 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
369 				goto out_clear;
370 
371 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
372 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
373 			break;
374 
375 		default:
376 			break;
377 	}
378 
379 	return 0;
380 
381 out_clear:
382 	x25_write_internal(sk, X25_CLEAR_REQUEST);
383 	x25->state = X25_STATE_2;
384 	x25_start_t23timer(sk);
385 	return 0;
386 }
387 
388 /*
389  * State machine for state 5, Call Accepted / Call Connected pending (X25_ACCPT_APPRV_FLAG).
390  * The handling of the timer(s) is in file x25_timer.c
391  * Handling of state 0 and connection release is in af_x25.c.
392  */
393 static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
394 {
395 	struct x25_sock *x25 = x25_sk(sk);
396 
397 	switch (frametype) {
398 		case X25_CLEAR_REQUEST:
399 			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) {
400 				x25_write_internal(sk, X25_CLEAR_REQUEST);
401 				x25->state = X25_STATE_2;
402 				x25_start_t23timer(sk);
403 				return 0;
404 			}
405 
406 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
407 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
408 			break;
409 
410 		default:
411 			break;
412 	}
413 
414 	return 0;
415 }
416 
417 /* Higher level upcall for a LAPB frame */
418 int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
419 {
420 	struct x25_sock *x25 = x25_sk(sk);
421 	int queued = 0, frametype, ns, nr, q, d, m;
422 
423 	if (x25->state == X25_STATE_0)
424 		return 0;
425 
426 	frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
427 
428 	switch (x25->state) {
429 	case X25_STATE_1:
430 		queued = x25_state1_machine(sk, skb, frametype);
431 		break;
432 	case X25_STATE_2:
433 		queued = x25_state2_machine(sk, skb, frametype);
434 		break;
435 	case X25_STATE_3:
436 		queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
437 		break;
438 	case X25_STATE_4:
439 		queued = x25_state4_machine(sk, skb, frametype);
440 		break;
441 	case X25_STATE_5:
442 		queued = x25_state5_machine(sk, skb, frametype);
443 		break;
444 	}
445 
446 	x25_kick(sk);
447 
448 	return queued;
449 }
450 
451 int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
452 {
453 	int queued = x25_process_rx_frame(sk, skb);
454 
455 	if (!queued)
456 		kfree_skb(skb);
457 
458 	return 0;
459 }
460