xref: /linux/net/x25/x25_link.c (revision 7f356166aebb0d956d367dfe55e19d7783277d09)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	X.25 Packet Layer release 002
4  *
5  *	This is ALPHA test software. This code may break your machine,
6  *	randomly fail to work with new releases, misbehave and/or generally
7  *	screw up. It might even work.
8  *
9  *	This code REQUIRES 2.1.15 or higher
10  *
11  *	History
12  *	X.25 001	Jonathan Naylor	  Started coding.
13  *	X.25 002	Jonathan Naylor	  New timer architecture.
14  *	mar/20/00	Daniela Squassoni Disabling/enabling of facilities
15  *					  negotiation.
16  *	2000-09-04	Henner Eisen	  dev_hold() / dev_put() for x25_neigh.
17  */
18 
19 #define pr_fmt(fmt) "X25: " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/uaccess.h>
28 #include <linux/init.h>
29 #include <net/x25.h>
30 
31 LIST_HEAD(x25_neigh_list);
32 DEFINE_RWLOCK(x25_neigh_list_lock);
33 
34 static void x25_t20timer_expiry(struct timer_list *);
35 
36 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
37 static void x25_transmit_restart_request(struct x25_neigh *nb);
38 
39 /*
40  *	Linux set/reset timer routines
41  */
42 static inline void x25_start_t20timer(struct x25_neigh *nb)
43 {
44 	mod_timer(&nb->t20timer, jiffies + nb->t20);
45 }
46 
47 static void x25_t20timer_expiry(struct timer_list *t)
48 {
49 	struct x25_neigh *nb = from_timer(nb, t, t20timer);
50 
51 	x25_transmit_restart_request(nb);
52 
53 	x25_start_t20timer(nb);
54 }
55 
56 static inline void x25_stop_t20timer(struct x25_neigh *nb)
57 {
58 	del_timer(&nb->t20timer);
59 }
60 
61 static inline int x25_t20timer_pending(struct x25_neigh *nb)
62 {
63 	return timer_pending(&nb->t20timer);
64 }
65 
66 /*
67  *	This handles all restart and diagnostic frames.
68  */
69 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
70 		      unsigned short frametype)
71 {
72 	struct sk_buff *skbn;
73 	int confirm;
74 
75 	switch (frametype) {
76 	case X25_RESTART_REQUEST:
77 		switch (nb->state) {
78 		case X25_LINK_STATE_2:
79 			confirm = !x25_t20timer_pending(nb);
80 			x25_stop_t20timer(nb);
81 			nb->state = X25_LINK_STATE_3;
82 			if (confirm)
83 				x25_transmit_restart_confirmation(nb);
84 			break;
85 		case X25_LINK_STATE_3:
86 			/* clear existing virtual calls */
87 			x25_kill_by_neigh(nb);
88 
89 			x25_transmit_restart_confirmation(nb);
90 			break;
91 		}
92 		break;
93 
94 	case X25_RESTART_CONFIRMATION:
95 		switch (nb->state) {
96 		case X25_LINK_STATE_2:
97 			if (x25_t20timer_pending(nb)) {
98 				x25_stop_t20timer(nb);
99 				nb->state = X25_LINK_STATE_3;
100 			} else {
101 				x25_transmit_restart_request(nb);
102 				x25_start_t20timer(nb);
103 			}
104 			break;
105 		case X25_LINK_STATE_3:
106 			/* clear existing virtual calls */
107 			x25_kill_by_neigh(nb);
108 
109 			x25_transmit_restart_request(nb);
110 			nb->state = X25_LINK_STATE_2;
111 			x25_start_t20timer(nb);
112 			break;
113 		}
114 		break;
115 
116 	case X25_DIAGNOSTIC:
117 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
118 			break;
119 
120 		pr_warn("diagnostic #%d - %02X %02X %02X\n",
121 		       skb->data[3], skb->data[4],
122 		       skb->data[5], skb->data[6]);
123 		break;
124 
125 	default:
126 		pr_warn("received unknown %02X with LCI 000\n",
127 		       frametype);
128 		break;
129 	}
130 
131 	if (nb->state == X25_LINK_STATE_3)
132 		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
133 			x25_send_frame(skbn, nb);
134 }
135 
136 /*
137  *	This routine is called when a Restart Request is needed
138  */
139 static void x25_transmit_restart_request(struct x25_neigh *nb)
140 {
141 	unsigned char *dptr;
142 	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
143 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
144 
145 	if (!skb)
146 		return;
147 
148 	skb_reserve(skb, X25_MAX_L2_LEN);
149 
150 	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
151 
152 	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
153 	*dptr++ = 0x00;
154 	*dptr++ = X25_RESTART_REQUEST;
155 	*dptr++ = 0x00;
156 	*dptr++ = 0;
157 
158 	skb->sk = NULL;
159 
160 	x25_send_frame(skb, nb);
161 }
162 
163 /*
164  * This routine is called when a Restart Confirmation is needed
165  */
166 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
167 {
168 	unsigned char *dptr;
169 	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
170 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
171 
172 	if (!skb)
173 		return;
174 
175 	skb_reserve(skb, X25_MAX_L2_LEN);
176 
177 	dptr = skb_put(skb, X25_STD_MIN_LEN);
178 
179 	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
180 	*dptr++ = 0x00;
181 	*dptr++ = X25_RESTART_CONFIRMATION;
182 
183 	skb->sk = NULL;
184 
185 	x25_send_frame(skb, nb);
186 }
187 
188 /*
189  *	This routine is called when a Clear Request is needed outside of the context
190  *	of a connected socket.
191  */
192 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
193 				unsigned char cause)
194 {
195 	unsigned char *dptr;
196 	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
197 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
198 
199 	if (!skb)
200 		return;
201 
202 	skb_reserve(skb, X25_MAX_L2_LEN);
203 
204 	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
205 
206 	*dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
207 					 X25_GFI_EXTSEQ :
208 					 X25_GFI_STDSEQ);
209 	*dptr++ = (lci >> 0) & 0xFF;
210 	*dptr++ = X25_CLEAR_REQUEST;
211 	*dptr++ = cause;
212 	*dptr++ = 0x00;
213 
214 	skb->sk = NULL;
215 
216 	x25_send_frame(skb, nb);
217 }
218 
219 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
220 {
221 	switch (nb->state) {
222 	case X25_LINK_STATE_0:
223 		skb_queue_tail(&nb->queue, skb);
224 		nb->state = X25_LINK_STATE_1;
225 		x25_establish_link(nb);
226 		break;
227 	case X25_LINK_STATE_1:
228 	case X25_LINK_STATE_2:
229 		skb_queue_tail(&nb->queue, skb);
230 		break;
231 	case X25_LINK_STATE_3:
232 		x25_send_frame(skb, nb);
233 		break;
234 	}
235 }
236 
237 /*
238  *	Called when the link layer has become established.
239  */
240 void x25_link_established(struct x25_neigh *nb)
241 {
242 	switch (nb->state) {
243 	case X25_LINK_STATE_0:
244 	case X25_LINK_STATE_1:
245 		x25_transmit_restart_request(nb);
246 		nb->state = X25_LINK_STATE_2;
247 		x25_start_t20timer(nb);
248 		break;
249 	}
250 }
251 
252 /*
253  *	Called when the link layer has terminated, or an establishment
254  *	request has failed.
255  */
256 
257 void x25_link_terminated(struct x25_neigh *nb)
258 {
259 	nb->state = X25_LINK_STATE_0;
260 	skb_queue_purge(&nb->queue);
261 	x25_stop_t20timer(nb);
262 
263 	/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
264 	x25_kill_by_neigh(nb);
265 }
266 
267 /*
268  *	Add a new device.
269  */
270 void x25_link_device_up(struct net_device *dev)
271 {
272 	struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
273 
274 	if (!nb)
275 		return;
276 
277 	skb_queue_head_init(&nb->queue);
278 	timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
279 
280 	dev_hold(dev);
281 	nb->dev      = dev;
282 	nb->state    = X25_LINK_STATE_0;
283 	nb->extended = 0;
284 	/*
285 	 * Enables negotiation
286 	 */
287 	nb->global_facil_mask = X25_MASK_REVERSE |
288 				       X25_MASK_THROUGHPUT |
289 				       X25_MASK_PACKET_SIZE |
290 				       X25_MASK_WINDOW_SIZE;
291 	nb->t20      = sysctl_x25_restart_request_timeout;
292 	refcount_set(&nb->refcnt, 1);
293 
294 	write_lock_bh(&x25_neigh_list_lock);
295 	list_add(&nb->node, &x25_neigh_list);
296 	write_unlock_bh(&x25_neigh_list_lock);
297 }
298 
299 /**
300  *	__x25_remove_neigh - remove neighbour from x25_neigh_list
301  *	@nb: - neigh to remove
302  *
303  *	Remove neighbour from x25_neigh_list. If it was there.
304  *	Caller must hold x25_neigh_list_lock.
305  */
306 static void __x25_remove_neigh(struct x25_neigh *nb)
307 {
308 	if (nb->node.next) {
309 		list_del(&nb->node);
310 		x25_neigh_put(nb);
311 	}
312 }
313 
314 /*
315  *	A device has been removed, remove its links.
316  */
317 void x25_link_device_down(struct net_device *dev)
318 {
319 	struct x25_neigh *nb;
320 	struct list_head *entry, *tmp;
321 
322 	write_lock_bh(&x25_neigh_list_lock);
323 
324 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
325 		nb = list_entry(entry, struct x25_neigh, node);
326 
327 		if (nb->dev == dev) {
328 			__x25_remove_neigh(nb);
329 			dev_put(dev);
330 		}
331 	}
332 
333 	write_unlock_bh(&x25_neigh_list_lock);
334 }
335 
336 /*
337  *	Given a device, return the neighbour address.
338  */
339 struct x25_neigh *x25_get_neigh(struct net_device *dev)
340 {
341 	struct x25_neigh *nb, *use = NULL;
342 	struct list_head *entry;
343 
344 	read_lock_bh(&x25_neigh_list_lock);
345 	list_for_each(entry, &x25_neigh_list) {
346 		nb = list_entry(entry, struct x25_neigh, node);
347 
348 		if (nb->dev == dev) {
349 			use = nb;
350 			break;
351 		}
352 	}
353 
354 	if (use)
355 		x25_neigh_hold(use);
356 	read_unlock_bh(&x25_neigh_list_lock);
357 	return use;
358 }
359 
360 /*
361  *	Handle the ioctls that control the subscription functions.
362  */
363 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
364 {
365 	struct x25_subscrip_struct x25_subscr;
366 	struct x25_neigh *nb;
367 	struct net_device *dev;
368 	int rc = -EINVAL;
369 
370 	if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
371 		goto out;
372 
373 	rc = -EFAULT;
374 	if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
375 		goto out;
376 
377 	rc = -EINVAL;
378 	if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
379 		goto out;
380 
381 	if ((nb = x25_get_neigh(dev)) == NULL)
382 		goto out_dev_put;
383 
384 	dev_put(dev);
385 
386 	if (cmd == SIOCX25GSUBSCRIP) {
387 		read_lock_bh(&x25_neigh_list_lock);
388 		x25_subscr.extended	     = nb->extended;
389 		x25_subscr.global_facil_mask = nb->global_facil_mask;
390 		read_unlock_bh(&x25_neigh_list_lock);
391 		rc = copy_to_user(arg, &x25_subscr,
392 				  sizeof(x25_subscr)) ? -EFAULT : 0;
393 	} else {
394 		rc = -EINVAL;
395 		if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
396 			rc = 0;
397 			write_lock_bh(&x25_neigh_list_lock);
398 			nb->extended	     = x25_subscr.extended;
399 			nb->global_facil_mask = x25_subscr.global_facil_mask;
400 			write_unlock_bh(&x25_neigh_list_lock);
401 		}
402 	}
403 	x25_neigh_put(nb);
404 out:
405 	return rc;
406 out_dev_put:
407 	dev_put(dev);
408 	goto out;
409 }
410 
411 
412 /*
413  *	Release all memory associated with X.25 neighbour structures.
414  */
415 void __exit x25_link_free(void)
416 {
417 	struct x25_neigh *nb;
418 	struct list_head *entry, *tmp;
419 
420 	write_lock_bh(&x25_neigh_list_lock);
421 
422 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
423 		struct net_device *dev;
424 
425 		nb = list_entry(entry, struct x25_neigh, node);
426 		dev = nb->dev;
427 		__x25_remove_neigh(nb);
428 		dev_put(dev);
429 	}
430 	write_unlock_bh(&x25_neigh_list_lock);
431 }
432