xref: /linux/net/ax25/ax25_in.c (revision 37744feebc086908fd89760650f458ab19071750)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
5  * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
6  * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
7  * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
8  */
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/string.h>
16 #include <linux/sockios.h>
17 #include <linux/net.h>
18 #include <linux/slab.h>
19 #include <net/ax25.h>
20 #include <linux/inet.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <net/sock.h>
24 #include <net/tcp_states.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/mm.h>
28 #include <linux/interrupt.h>
29 
30 /*
31  *	Given a fragment, queue it on the fragment queue and if the fragment
32  *	is complete, send it back to ax25_rx_iframe.
33  */
34 static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
35 {
36 	struct sk_buff *skbn, *skbo;
37 
38 	if (ax25->fragno != 0) {
39 		if (!(*skb->data & AX25_SEG_FIRST)) {
40 			if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
41 				/* Enqueue fragment */
42 				ax25->fragno = *skb->data & AX25_SEG_REM;
43 				skb_pull(skb, 1);	/* skip fragno */
44 				ax25->fraglen += skb->len;
45 				skb_queue_tail(&ax25->frag_queue, skb);
46 
47 				/* Last fragment received ? */
48 				if (ax25->fragno == 0) {
49 					skbn = alloc_skb(AX25_MAX_HEADER_LEN +
50 							 ax25->fraglen,
51 							 GFP_ATOMIC);
52 					if (!skbn) {
53 						skb_queue_purge(&ax25->frag_queue);
54 						return 1;
55 					}
56 
57 					skb_reserve(skbn, AX25_MAX_HEADER_LEN);
58 
59 					skbn->dev   = ax25->ax25_dev->dev;
60 					skb_reset_network_header(skbn);
61 					skb_reset_transport_header(skbn);
62 
63 					/* Copy data from the fragments */
64 					while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
65 						skb_copy_from_linear_data(skbo,
66 							  skb_put(skbn, skbo->len),
67 									  skbo->len);
68 						kfree_skb(skbo);
69 					}
70 
71 					ax25->fraglen = 0;
72 
73 					if (ax25_rx_iframe(ax25, skbn) == 0)
74 						kfree_skb(skbn);
75 				}
76 
77 				return 1;
78 			}
79 		}
80 	} else {
81 		/* First fragment received */
82 		if (*skb->data & AX25_SEG_FIRST) {
83 			skb_queue_purge(&ax25->frag_queue);
84 			ax25->fragno = *skb->data & AX25_SEG_REM;
85 			skb_pull(skb, 1);		/* skip fragno */
86 			ax25->fraglen = skb->len;
87 			skb_queue_tail(&ax25->frag_queue, skb);
88 			return 1;
89 		}
90 	}
91 
92 	return 0;
93 }
94 
95 /*
96  *	This is where all valid I frames are sent to, to be dispatched to
97  *	whichever protocol requires them.
98  */
99 int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
100 {
101 	int (*func)(struct sk_buff *, ax25_cb *);
102 	unsigned char pid;
103 	int queued = 0;
104 
105 	if (skb == NULL) return 0;
106 
107 	ax25_start_idletimer(ax25);
108 
109 	pid = *skb->data;
110 
111 	if (pid == AX25_P_IP) {
112 		/* working around a TCP bug to keep additional listeners
113 		 * happy. TCP re-uses the buffer and destroys the original
114 		 * content.
115 		 */
116 		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
117 		if (skbn != NULL) {
118 			kfree_skb(skb);
119 			skb = skbn;
120 		}
121 
122 		skb_pull(skb, 1);	/* Remove PID */
123 		skb->mac_header = skb->network_header;
124 		skb_reset_network_header(skb);
125 		skb->dev      = ax25->ax25_dev->dev;
126 		skb->pkt_type = PACKET_HOST;
127 		skb->protocol = htons(ETH_P_IP);
128 		netif_rx(skb);
129 		return 1;
130 	}
131 	if (pid == AX25_P_SEGMENT) {
132 		skb_pull(skb, 1);	/* Remove PID */
133 		return ax25_rx_fragment(ax25, skb);
134 	}
135 
136 	if ((func = ax25_protocol_function(pid)) != NULL) {
137 		skb_pull(skb, 1);	/* Remove PID */
138 		return (*func)(skb, ax25);
139 	}
140 
141 	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
142 		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
143 		    ax25->pidincl) {
144 			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
145 				queued = 1;
146 			else
147 				ax25->condition |= AX25_COND_OWN_RX_BUSY;
148 		}
149 	}
150 
151 	return queued;
152 }
153 
154 /*
155  *	Higher level upcall for a LAPB frame
156  */
157 static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
158 {
159 	int queued = 0;
160 
161 	if (ax25->state == AX25_STATE_0)
162 		return 0;
163 
164 	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
165 	case AX25_PROTO_STD_SIMPLEX:
166 	case AX25_PROTO_STD_DUPLEX:
167 		queued = ax25_std_frame_in(ax25, skb, type);
168 		break;
169 
170 #ifdef CONFIG_AX25_DAMA_SLAVE
171 	case AX25_PROTO_DAMA_SLAVE:
172 		if (dama || ax25->ax25_dev->dama.slave)
173 			queued = ax25_ds_frame_in(ax25, skb, type);
174 		else
175 			queued = ax25_std_frame_in(ax25, skb, type);
176 		break;
177 #endif
178 	}
179 
180 	return queued;
181 }
182 
183 static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
184 	ax25_address *dev_addr, struct packet_type *ptype)
185 {
186 	ax25_address src, dest, *next_digi = NULL;
187 	int type = 0, mine = 0, dama;
188 	struct sock *make, *sk;
189 	ax25_digi dp, reverse_dp;
190 	ax25_cb *ax25;
191 	ax25_dev *ax25_dev;
192 
193 	/*
194 	 *	Process the AX.25/LAPB frame.
195 	 */
196 
197 	skb_reset_transport_header(skb);
198 
199 	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
200 		goto free;
201 
202 	/*
203 	 *	Parse the address header.
204 	 */
205 
206 	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
207 		goto free;
208 
209 	/*
210 	 *	Ours perhaps ?
211 	 */
212 	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
213 		next_digi = &dp.calls[dp.lastrepeat + 1];
214 
215 	/*
216 	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
217 	 */
218 	skb_pull(skb, ax25_addr_size(&dp));
219 
220 	/* For our port addresses ? */
221 	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
222 		mine = 1;
223 
224 	/* Also match on any registered callsign from L3/4 */
225 	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
226 		mine = 1;
227 
228 	/* UI frame - bypass LAPB processing */
229 	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
230 		skb_set_transport_header(skb, 2); /* skip control and pid */
231 
232 		ax25_send_to_raw(&dest, skb, skb->data[1]);
233 
234 		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
235 			goto free;
236 
237 		/* Now we are pointing at the pid byte */
238 		switch (skb->data[1]) {
239 		case AX25_P_IP:
240 			skb_pull(skb,2);		/* drop PID/CTRL */
241 			skb_reset_transport_header(skb);
242 			skb_reset_network_header(skb);
243 			skb->dev      = dev;
244 			skb->pkt_type = PACKET_HOST;
245 			skb->protocol = htons(ETH_P_IP);
246 			netif_rx(skb);
247 			break;
248 
249 		case AX25_P_ARP:
250 			skb_pull(skb,2);
251 			skb_reset_transport_header(skb);
252 			skb_reset_network_header(skb);
253 			skb->dev      = dev;
254 			skb->pkt_type = PACKET_HOST;
255 			skb->protocol = htons(ETH_P_ARP);
256 			netif_rx(skb);
257 			break;
258 		case AX25_P_TEXT:
259 			/* Now find a suitable dgram socket */
260 			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
261 			if (sk != NULL) {
262 				bh_lock_sock(sk);
263 				if (atomic_read(&sk->sk_rmem_alloc) >=
264 				    sk->sk_rcvbuf) {
265 					kfree_skb(skb);
266 				} else {
267 					/*
268 					 *	Remove the control and PID.
269 					 */
270 					skb_pull(skb, 2);
271 					if (sock_queue_rcv_skb(sk, skb) != 0)
272 						kfree_skb(skb);
273 				}
274 				bh_unlock_sock(sk);
275 				sock_put(sk);
276 			} else {
277 				kfree_skb(skb);
278 			}
279 			break;
280 
281 		default:
282 			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
283 			break;
284 		}
285 
286 		return 0;
287 	}
288 
289 	/*
290 	 *	Is connected mode supported on this device ?
291 	 *	If not, should we DM the incoming frame (except DMs) or
292 	 *	silently ignore them. For now we stay quiet.
293 	 */
294 	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
295 		goto free;
296 
297 	/* LAPB */
298 
299 	/* AX.25 state 1-4 */
300 
301 	ax25_digi_invert(&dp, &reverse_dp);
302 
303 	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
304 		/*
305 		 *	Process the frame. If it is queued up internally it
306 		 *	returns one otherwise we free it immediately. This
307 		 *	routine itself wakes the user context layers so we do
308 		 *	no further work
309 		 */
310 		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
311 			kfree_skb(skb);
312 
313 		ax25_cb_put(ax25);
314 		return 0;
315 	}
316 
317 	/* AX.25 state 0 (disconnected) */
318 
319 	/* a) received not a SABM(E) */
320 
321 	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
322 	    (*skb->data & ~AX25_PF) != AX25_SABME) {
323 		/*
324 		 *	Never reply to a DM. Also ignore any connects for
325 		 *	addresses that are not our interfaces and not a socket.
326 		 */
327 		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
328 			ax25_return_dm(dev, &src, &dest, &dp);
329 
330 		goto free;
331 	}
332 
333 	/* b) received SABM(E) */
334 
335 	if (dp.lastrepeat + 1 == dp.ndigi)
336 		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
337 	else
338 		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
339 
340 	if (sk != NULL) {
341 		bh_lock_sock(sk);
342 		if (sk_acceptq_is_full(sk) ||
343 		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
344 			if (mine)
345 				ax25_return_dm(dev, &src, &dest, &dp);
346 			kfree_skb(skb);
347 			bh_unlock_sock(sk);
348 			sock_put(sk);
349 
350 			return 0;
351 		}
352 
353 		ax25 = sk_to_ax25(make);
354 		skb_set_owner_r(skb, make);
355 		skb_queue_head(&sk->sk_receive_queue, skb);
356 
357 		make->sk_state = TCP_ESTABLISHED;
358 
359 		sk_acceptq_added(sk);
360 		bh_unlock_sock(sk);
361 	} else {
362 		if (!mine)
363 			goto free;
364 
365 		if ((ax25 = ax25_create_cb()) == NULL) {
366 			ax25_return_dm(dev, &src, &dest, &dp);
367 			goto free;
368 		}
369 
370 		ax25_fillin_cb(ax25, ax25_dev);
371 	}
372 
373 	ax25->source_addr = dest;
374 	ax25->dest_addr   = src;
375 
376 	/*
377 	 *	Sort out any digipeated paths.
378 	 */
379 	if (dp.ndigi && !ax25->digipeat &&
380 	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
381 		kfree_skb(skb);
382 		ax25_destroy_socket(ax25);
383 		if (sk)
384 			sock_put(sk);
385 		return 0;
386 	}
387 
388 	if (dp.ndigi == 0) {
389 		kfree(ax25->digipeat);
390 		ax25->digipeat = NULL;
391 	} else {
392 		/* Reverse the source SABM's path */
393 		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
394 	}
395 
396 	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
397 		ax25->modulus = AX25_EMODULUS;
398 		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
399 	} else {
400 		ax25->modulus = AX25_MODULUS;
401 		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
402 	}
403 
404 	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
405 
406 #ifdef CONFIG_AX25_DAMA_SLAVE
407 	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
408 		ax25_dama_on(ax25);
409 #endif
410 
411 	ax25->state = AX25_STATE_3;
412 
413 	ax25_cb_add(ax25);
414 
415 	ax25_start_heartbeat(ax25);
416 	ax25_start_t3timer(ax25);
417 	ax25_start_idletimer(ax25);
418 
419 	if (sk) {
420 		if (!sock_flag(sk, SOCK_DEAD))
421 			sk->sk_data_ready(sk);
422 		sock_put(sk);
423 	} else {
424 free:
425 		kfree_skb(skb);
426 	}
427 	return 0;
428 }
429 
430 /*
431  *	Receive an AX.25 frame via a SLIP interface.
432  */
433 int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
434 		  struct packet_type *ptype, struct net_device *orig_dev)
435 {
436 	skb_orphan(skb);
437 
438 	if (!net_eq(dev_net(dev), &init_net)) {
439 		kfree_skb(skb);
440 		return 0;
441 	}
442 
443 	if ((*skb->data & 0x0F) != 0) {
444 		kfree_skb(skb);	/* Not a KISS data frame */
445 		return 0;
446 	}
447 
448 	skb_pull(skb, AX25_KISS_HEADER_LEN);	/* Remove the KISS byte */
449 
450 	return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
451 }
452