xref: /linux/net/rose/af_rose.c (revision 9c736ace0666efe68efd53fcdfa2c6653c3e0e72)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5  * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6  * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
7  * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/types.h>
16 #include <linux/socket.h>
17 #include <linux/in.h>
18 #include <linux/slab.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/spinlock.h>
22 #include <linux/timer.h>
23 #include <linux/string.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/stat.h>
27 #include <net/net_namespace.h>
28 #include <net/ax25.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/uaccess.h>
35 #include <linux/fcntl.h>
36 #include <linux/termios.h>
37 #include <linux/mm.h>
38 #include <linux/interrupt.h>
39 #include <linux/notifier.h>
40 #include <net/rose.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <net/tcp_states.h>
44 #include <net/ip.h>
45 #include <net/arp.h>
46 
47 static int rose_ndevs = 10;
48 
49 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
50 int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
51 int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
52 int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
53 int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
54 int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
55 int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
56 int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
57 int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
58 int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
59 
60 static HLIST_HEAD(rose_list);
61 static DEFINE_SPINLOCK(rose_list_lock);
62 
63 static const struct proto_ops rose_proto_ops;
64 
65 ax25_address rose_callsign;
66 
67 /*
68  * ROSE network devices are virtual network devices encapsulating ROSE
69  * frames into AX.25 which will be sent through an AX.25 device, so form a
70  * special "super class" of normal net devices; split their locks off into a
71  * separate class since they always nest.
72  */
73 static struct lock_class_key rose_netdev_xmit_lock_key;
74 static struct lock_class_key rose_netdev_addr_lock_key;
75 
rose_set_lockdep_one(struct net_device * dev,struct netdev_queue * txq,void * _unused)76 static void rose_set_lockdep_one(struct net_device *dev,
77 				 struct netdev_queue *txq,
78 				 void *_unused)
79 {
80 	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
81 }
82 
rose_set_lockdep_key(struct net_device * dev)83 static void rose_set_lockdep_key(struct net_device *dev)
84 {
85 	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
86 	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
87 }
88 
89 /*
90  *	Convert a ROSE address into text.
91  */
rose2asc(char * buf,const rose_address * addr)92 char *rose2asc(char *buf, const rose_address *addr)
93 {
94 	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
95 	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
96 	    addr->rose_addr[4] == 0x00) {
97 		strcpy(buf, "*");
98 	} else {
99 		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
100 						addr->rose_addr[1] & 0xFF,
101 						addr->rose_addr[2] & 0xFF,
102 						addr->rose_addr[3] & 0xFF,
103 						addr->rose_addr[4] & 0xFF);
104 	}
105 
106 	return buf;
107 }
108 
109 /*
110  *	Compare two ROSE addresses, 0 == equal.
111  */
rosecmp(const rose_address * addr1,const rose_address * addr2)112 int rosecmp(const rose_address *addr1, const rose_address *addr2)
113 {
114 	int i;
115 
116 	for (i = 0; i < 5; i++)
117 		if (addr1->rose_addr[i] != addr2->rose_addr[i])
118 			return 1;
119 
120 	return 0;
121 }
122 
123 /*
124  *	Compare two ROSE addresses for only mask digits, 0 == equal.
125  */
rosecmpm(const rose_address * addr1,const rose_address * addr2,unsigned short mask)126 int rosecmpm(const rose_address *addr1, const rose_address *addr2,
127 	     unsigned short mask)
128 {
129 	unsigned int i, j;
130 
131 	if (mask > 10)
132 		return 1;
133 
134 	for (i = 0; i < mask; i++) {
135 		j = i / 2;
136 
137 		if ((i % 2) != 0) {
138 			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
139 				return 1;
140 		} else {
141 			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
142 				return 1;
143 		}
144 	}
145 
146 	return 0;
147 }
148 
149 /*
150  *	Socket removal during an interrupt is now safe.
151  */
rose_remove_socket(struct sock * sk)152 static void rose_remove_socket(struct sock *sk)
153 {
154 	spin_lock_bh(&rose_list_lock);
155 	sk_del_node_init(sk);
156 	spin_unlock_bh(&rose_list_lock);
157 }
158 
159 /*
160  *	Kill all bound sockets on a broken link layer connection to a
161  *	particular neighbour.
162  */
rose_kill_by_neigh(struct rose_neigh * neigh)163 void rose_kill_by_neigh(struct rose_neigh *neigh)
164 {
165 	struct sock *s;
166 
167 	spin_lock_bh(&rose_list_lock);
168 	sk_for_each(s, &rose_list) {
169 		struct rose_sock *rose = rose_sk(s);
170 
171 		if (rose->neighbour == neigh) {
172 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
173 			rose_neigh_put(rose->neighbour);
174 			rose->neighbour = NULL;
175 		}
176 	}
177 	spin_unlock_bh(&rose_list_lock);
178 }
179 
180 /*
181  *	Kill all bound sockets on a dropped device.
182  */
rose_kill_by_device(struct net_device * dev)183 static void rose_kill_by_device(struct net_device *dev)
184 {
185 	struct sock *sk, *array[16];
186 	struct rose_sock *rose;
187 	bool rescan;
188 	int i, cnt;
189 
190 start:
191 	rescan = false;
192 	cnt = 0;
193 	spin_lock_bh(&rose_list_lock);
194 	sk_for_each(sk, &rose_list) {
195 		rose = rose_sk(sk);
196 		if (rose->device == dev) {
197 			if (cnt == ARRAY_SIZE(array)) {
198 				rescan = true;
199 				break;
200 			}
201 			sock_hold(sk);
202 			array[cnt++] = sk;
203 		}
204 	}
205 	spin_unlock_bh(&rose_list_lock);
206 
207 	for (i = 0; i < cnt; i++) {
208 		sk = array[cnt];
209 		rose = rose_sk(sk);
210 		lock_sock(sk);
211 		spin_lock_bh(&rose_list_lock);
212 		if (rose->device == dev) {
213 			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
214 			if (rose->neighbour)
215 				rose_neigh_put(rose->neighbour);
216 			netdev_put(rose->device, &rose->dev_tracker);
217 			rose->device = NULL;
218 		}
219 		spin_unlock_bh(&rose_list_lock);
220 		release_sock(sk);
221 		sock_put(sk);
222 		cond_resched();
223 	}
224 	if (rescan)
225 		goto start;
226 }
227 
228 /*
229  *	Handle device status changes.
230  */
rose_device_event(struct notifier_block * this,unsigned long event,void * ptr)231 static int rose_device_event(struct notifier_block *this,
232 			     unsigned long event, void *ptr)
233 {
234 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
235 
236 	if (!net_eq(dev_net(dev), &init_net))
237 		return NOTIFY_DONE;
238 
239 	if (event != NETDEV_DOWN)
240 		return NOTIFY_DONE;
241 
242 	switch (dev->type) {
243 	case ARPHRD_ROSE:
244 		rose_kill_by_device(dev);
245 		break;
246 	case ARPHRD_AX25:
247 		rose_link_device_down(dev);
248 		rose_rt_device_down(dev);
249 		break;
250 	}
251 
252 	return NOTIFY_DONE;
253 }
254 
255 /*
256  *	Add a socket to the bound sockets list.
257  */
rose_insert_socket(struct sock * sk)258 static void rose_insert_socket(struct sock *sk)
259 {
260 
261 	spin_lock_bh(&rose_list_lock);
262 	sk_add_node(sk, &rose_list);
263 	spin_unlock_bh(&rose_list_lock);
264 }
265 
266 /*
267  *	Find a socket that wants to accept the Call Request we just
268  *	received.
269  */
rose_find_listener(rose_address * addr,ax25_address * call)270 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
271 {
272 	struct sock *s;
273 
274 	spin_lock_bh(&rose_list_lock);
275 	sk_for_each(s, &rose_list) {
276 		struct rose_sock *rose = rose_sk(s);
277 
278 		if (!rosecmp(&rose->source_addr, addr) &&
279 		    !ax25cmp(&rose->source_call, call) &&
280 		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
281 			goto found;
282 	}
283 
284 	sk_for_each(s, &rose_list) {
285 		struct rose_sock *rose = rose_sk(s);
286 
287 		if (!rosecmp(&rose->source_addr, addr) &&
288 		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
289 		    s->sk_state == TCP_LISTEN)
290 			goto found;
291 	}
292 	s = NULL;
293 found:
294 	spin_unlock_bh(&rose_list_lock);
295 	return s;
296 }
297 
298 /*
299  *	Find a connected ROSE socket given my LCI and device.
300  */
rose_find_socket(unsigned int lci,struct rose_neigh * neigh)301 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
302 {
303 	struct sock *s;
304 
305 	spin_lock_bh(&rose_list_lock);
306 	sk_for_each(s, &rose_list) {
307 		struct rose_sock *rose = rose_sk(s);
308 
309 		if (rose->lci == lci && rose->neighbour == neigh)
310 			goto found;
311 	}
312 	s = NULL;
313 found:
314 	spin_unlock_bh(&rose_list_lock);
315 	return s;
316 }
317 
318 /*
319  *	Find a unique LCI for a given device.
320  */
rose_new_lci(struct rose_neigh * neigh)321 unsigned int rose_new_lci(struct rose_neigh *neigh)
322 {
323 	int lci;
324 
325 	if (neigh->dce_mode) {
326 		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
327 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
328 				return lci;
329 	} else {
330 		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
331 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
332 				return lci;
333 	}
334 
335 	return 0;
336 }
337 
338 /*
339  *	Deferred destroy.
340  */
341 void rose_destroy_socket(struct sock *);
342 
343 /*
344  *	Handler for deferred kills.
345  */
rose_destroy_timer(struct timer_list * t)346 static void rose_destroy_timer(struct timer_list *t)
347 {
348 	struct sock *sk = timer_container_of(sk, t, sk_timer);
349 
350 	rose_destroy_socket(sk);
351 }
352 
353 /*
354  *	This is called from user mode and the timers. Thus it protects itself
355  *	against interrupt users but doesn't worry about being called during
356  *	work.  Once it is removed from the queue no interrupt or bottom half
357  *	will touch it and we are (fairly 8-) ) safe.
358  */
rose_destroy_socket(struct sock * sk)359 void rose_destroy_socket(struct sock *sk)
360 {
361 	struct sk_buff *skb;
362 
363 	rose_remove_socket(sk);
364 	rose_stop_heartbeat(sk);
365 	rose_stop_idletimer(sk);
366 	rose_stop_timer(sk);
367 
368 	rose_clear_queues(sk);		/* Flush the queues */
369 
370 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
371 		if (skb->sk != sk) {	/* A pending connection */
372 			/* Queue the unaccepted socket for death */
373 			sock_set_flag(skb->sk, SOCK_DEAD);
374 			rose_start_heartbeat(skb->sk);
375 			rose_sk(skb->sk)->state = ROSE_STATE_0;
376 		}
377 
378 		kfree_skb(skb);
379 	}
380 
381 	if (sk_has_allocations(sk)) {
382 		/* Defer: outstanding buffers */
383 		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
384 		sk->sk_timer.expires  = jiffies + 10 * HZ;
385 		add_timer(&sk->sk_timer);
386 	} else
387 		sock_put(sk);
388 }
389 
390 /*
391  *	Handling for system calls applied via the various interfaces to a
392  *	ROSE socket object.
393  */
394 
rose_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)395 static int rose_setsockopt(struct socket *sock, int level, int optname,
396 		sockptr_t optval, unsigned int optlen)
397 {
398 	struct sock *sk = sock->sk;
399 	struct rose_sock *rose = rose_sk(sk);
400 	unsigned int opt;
401 
402 	if (level != SOL_ROSE)
403 		return -ENOPROTOOPT;
404 
405 	if (optlen < sizeof(unsigned int))
406 		return -EINVAL;
407 
408 	if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
409 		return -EFAULT;
410 
411 	switch (optname) {
412 	case ROSE_DEFER:
413 		rose->defer = opt ? 1 : 0;
414 		return 0;
415 
416 	case ROSE_T1:
417 		if (opt < 1 || opt > UINT_MAX / HZ)
418 			return -EINVAL;
419 		rose->t1 = opt * HZ;
420 		return 0;
421 
422 	case ROSE_T2:
423 		if (opt < 1 || opt > UINT_MAX / HZ)
424 			return -EINVAL;
425 		rose->t2 = opt * HZ;
426 		return 0;
427 
428 	case ROSE_T3:
429 		if (opt < 1 || opt > UINT_MAX / HZ)
430 			return -EINVAL;
431 		rose->t3 = opt * HZ;
432 		return 0;
433 
434 	case ROSE_HOLDBACK:
435 		if (opt < 1 || opt > UINT_MAX / HZ)
436 			return -EINVAL;
437 		rose->hb = opt * HZ;
438 		return 0;
439 
440 	case ROSE_IDLE:
441 		if (opt > UINT_MAX / (60 * HZ))
442 			return -EINVAL;
443 		rose->idle = opt * 60 * HZ;
444 		return 0;
445 
446 	case ROSE_QBITINCL:
447 		rose->qbitincl = opt ? 1 : 0;
448 		return 0;
449 
450 	default:
451 		return -ENOPROTOOPT;
452 	}
453 }
454 
rose_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)455 static int rose_getsockopt(struct socket *sock, int level, int optname,
456 	char __user *optval, int __user *optlen)
457 {
458 	struct sock *sk = sock->sk;
459 	struct rose_sock *rose = rose_sk(sk);
460 	int val = 0;
461 	int len;
462 
463 	if (level != SOL_ROSE)
464 		return -ENOPROTOOPT;
465 
466 	if (get_user(len, optlen))
467 		return -EFAULT;
468 
469 	if (len < 0)
470 		return -EINVAL;
471 
472 	switch (optname) {
473 	case ROSE_DEFER:
474 		val = rose->defer;
475 		break;
476 
477 	case ROSE_T1:
478 		val = rose->t1 / HZ;
479 		break;
480 
481 	case ROSE_T2:
482 		val = rose->t2 / HZ;
483 		break;
484 
485 	case ROSE_T3:
486 		val = rose->t3 / HZ;
487 		break;
488 
489 	case ROSE_HOLDBACK:
490 		val = rose->hb / HZ;
491 		break;
492 
493 	case ROSE_IDLE:
494 		val = rose->idle / (60 * HZ);
495 		break;
496 
497 	case ROSE_QBITINCL:
498 		val = rose->qbitincl;
499 		break;
500 
501 	default:
502 		return -ENOPROTOOPT;
503 	}
504 
505 	len = min_t(unsigned int, len, sizeof(int));
506 
507 	if (put_user(len, optlen))
508 		return -EFAULT;
509 
510 	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
511 }
512 
rose_listen(struct socket * sock,int backlog)513 static int rose_listen(struct socket *sock, int backlog)
514 {
515 	struct sock *sk = sock->sk;
516 
517 	lock_sock(sk);
518 	if (sock->state != SS_UNCONNECTED) {
519 		release_sock(sk);
520 		return -EINVAL;
521 	}
522 
523 	if (sk->sk_state != TCP_LISTEN) {
524 		struct rose_sock *rose = rose_sk(sk);
525 
526 		rose->dest_ndigis = 0;
527 		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
528 		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
529 		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
530 		sk->sk_max_ack_backlog = backlog;
531 		sk->sk_state           = TCP_LISTEN;
532 		release_sock(sk);
533 		return 0;
534 	}
535 	release_sock(sk);
536 
537 	return -EOPNOTSUPP;
538 }
539 
540 static struct proto rose_proto = {
541 	.name	  = "ROSE",
542 	.owner	  = THIS_MODULE,
543 	.obj_size = sizeof(struct rose_sock),
544 };
545 
rose_create(struct net * net,struct socket * sock,int protocol,int kern)546 static int rose_create(struct net *net, struct socket *sock, int protocol,
547 		       int kern)
548 {
549 	struct sock *sk;
550 	struct rose_sock *rose;
551 
552 	if (!net_eq(net, &init_net))
553 		return -EAFNOSUPPORT;
554 
555 	if (sock->type != SOCK_SEQPACKET || protocol != 0)
556 		return -ESOCKTNOSUPPORT;
557 
558 	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
559 	if (sk == NULL)
560 		return -ENOMEM;
561 
562 	rose = rose_sk(sk);
563 
564 	sock_init_data(sock, sk);
565 
566 	skb_queue_head_init(&rose->ack_queue);
567 #ifdef M_BIT
568 	skb_queue_head_init(&rose->frag_queue);
569 	rose->fraglen    = 0;
570 #endif
571 
572 	sock->ops    = &rose_proto_ops;
573 	sk->sk_protocol = protocol;
574 
575 	timer_setup(&rose->timer, NULL, 0);
576 	timer_setup(&rose->idletimer, NULL, 0);
577 
578 	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
579 	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
580 	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
581 	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
582 	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
583 
584 	rose->state = ROSE_STATE_0;
585 
586 	return 0;
587 }
588 
rose_make_new(struct sock * osk)589 static struct sock *rose_make_new(struct sock *osk)
590 {
591 	struct sock *sk;
592 	struct rose_sock *rose, *orose;
593 
594 	if (osk->sk_type != SOCK_SEQPACKET)
595 		return NULL;
596 
597 	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
598 	if (sk == NULL)
599 		return NULL;
600 
601 	rose = rose_sk(sk);
602 
603 	sock_init_data(NULL, sk);
604 
605 	skb_queue_head_init(&rose->ack_queue);
606 #ifdef M_BIT
607 	skb_queue_head_init(&rose->frag_queue);
608 	rose->fraglen  = 0;
609 #endif
610 
611 	sk->sk_type     = osk->sk_type;
612 	sk->sk_priority = READ_ONCE(osk->sk_priority);
613 	sk->sk_protocol = osk->sk_protocol;
614 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
615 	sk->sk_sndbuf   = osk->sk_sndbuf;
616 	sk->sk_state    = TCP_ESTABLISHED;
617 	sock_copy_flags(sk, osk);
618 
619 	timer_setup(&rose->timer, NULL, 0);
620 	timer_setup(&rose->idletimer, NULL, 0);
621 
622 	orose		= rose_sk(osk);
623 	rose->t1	= orose->t1;
624 	rose->t2	= orose->t2;
625 	rose->t3	= orose->t3;
626 	rose->hb	= orose->hb;
627 	rose->idle	= orose->idle;
628 	rose->defer	= orose->defer;
629 	rose->device	= orose->device;
630 	if (rose->device)
631 		netdev_hold(rose->device, &rose->dev_tracker, GFP_ATOMIC);
632 	rose->qbitincl	= orose->qbitincl;
633 
634 	return sk;
635 }
636 
rose_release(struct socket * sock)637 static int rose_release(struct socket *sock)
638 {
639 	struct sock *sk = sock->sk;
640 	struct rose_sock *rose;
641 
642 	if (sk == NULL) return 0;
643 
644 	sock_hold(sk);
645 	sock_orphan(sk);
646 	lock_sock(sk);
647 	rose = rose_sk(sk);
648 
649 	switch (rose->state) {
650 	case ROSE_STATE_0:
651 		release_sock(sk);
652 		rose_disconnect(sk, 0, -1, -1);
653 		lock_sock(sk);
654 		rose_destroy_socket(sk);
655 		break;
656 
657 	case ROSE_STATE_2:
658 		rose_neigh_put(rose->neighbour);
659 		release_sock(sk);
660 		rose_disconnect(sk, 0, -1, -1);
661 		lock_sock(sk);
662 		rose_destroy_socket(sk);
663 		break;
664 
665 	case ROSE_STATE_1:
666 	case ROSE_STATE_3:
667 	case ROSE_STATE_4:
668 	case ROSE_STATE_5:
669 		rose_clear_queues(sk);
670 		rose_stop_idletimer(sk);
671 		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
672 		rose_start_t3timer(sk);
673 		rose->state  = ROSE_STATE_2;
674 		sk->sk_state    = TCP_CLOSE;
675 		sk->sk_shutdown |= SEND_SHUTDOWN;
676 		sk->sk_state_change(sk);
677 		sock_set_flag(sk, SOCK_DEAD);
678 		sock_set_flag(sk, SOCK_DESTROY);
679 		break;
680 
681 	default:
682 		break;
683 	}
684 
685 	spin_lock_bh(&rose_list_lock);
686 	netdev_put(rose->device, &rose->dev_tracker);
687 	rose->device = NULL;
688 	spin_unlock_bh(&rose_list_lock);
689 	sock->sk = NULL;
690 	release_sock(sk);
691 	sock_put(sk);
692 
693 	return 0;
694 }
695 
rose_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)696 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
697 {
698 	struct sock *sk = sock->sk;
699 	struct rose_sock *rose = rose_sk(sk);
700 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
701 	struct net_device *dev;
702 	ax25_address *source;
703 	ax25_uid_assoc *user;
704 	int err = -EINVAL;
705 	int n;
706 
707 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
708 		return -EINVAL;
709 
710 	if (addr->srose_family != AF_ROSE)
711 		return -EINVAL;
712 
713 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
714 		return -EINVAL;
715 
716 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
717 		return -EINVAL;
718 
719 	lock_sock(sk);
720 
721 	if (!sock_flag(sk, SOCK_ZAPPED))
722 		goto out_release;
723 
724 	err = -EADDRNOTAVAIL;
725 	dev = rose_dev_get(&addr->srose_addr);
726 	if (!dev)
727 		goto out_release;
728 
729 	source = &addr->srose_call;
730 
731 	user = ax25_findbyuid(current_euid());
732 	if (user) {
733 		rose->source_call = user->call;
734 		ax25_uid_put(user);
735 	} else {
736 		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
737 			dev_put(dev);
738 			err = -EACCES;
739 			goto out_release;
740 		}
741 		rose->source_call   = *source;
742 	}
743 
744 	rose->source_addr   = addr->srose_addr;
745 	rose->device        = dev;
746 	netdev_tracker_alloc(rose->device, &rose->dev_tracker, GFP_KERNEL);
747 	rose->source_ndigis = addr->srose_ndigis;
748 
749 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
750 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
751 		for (n = 0 ; n < addr->srose_ndigis ; n++)
752 			rose->source_digis[n] = full_addr->srose_digis[n];
753 	} else {
754 		if (rose->source_ndigis == 1) {
755 			rose->source_digis[0] = addr->srose_digi;
756 		}
757 	}
758 
759 	rose_insert_socket(sk);
760 
761 	sock_reset_flag(sk, SOCK_ZAPPED);
762 	err = 0;
763 out_release:
764 	release_sock(sk);
765 	return err;
766 }
767 
rose_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)768 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
769 {
770 	struct sock *sk = sock->sk;
771 	struct rose_sock *rose = rose_sk(sk);
772 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
773 	unsigned char cause, diagnostic;
774 	ax25_uid_assoc *user;
775 	int n, err = 0;
776 
777 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
778 		return -EINVAL;
779 
780 	if (addr->srose_family != AF_ROSE)
781 		return -EINVAL;
782 
783 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
784 		return -EINVAL;
785 
786 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
787 		return -EINVAL;
788 
789 	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
790 	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
791 		return -EINVAL;
792 
793 	lock_sock(sk);
794 
795 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
796 		/* Connect completed during a ERESTARTSYS event */
797 		sock->state = SS_CONNECTED;
798 		goto out_release;
799 	}
800 
801 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
802 		sock->state = SS_UNCONNECTED;
803 		err = -ECONNREFUSED;
804 		goto out_release;
805 	}
806 
807 	if (sk->sk_state == TCP_ESTABLISHED) {
808 		/* No reconnect on a seqpacket socket */
809 		err = -EISCONN;
810 		goto out_release;
811 	}
812 
813 	sk->sk_state   = TCP_CLOSE;
814 	sock->state = SS_UNCONNECTED;
815 
816 	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
817 					 &diagnostic, 0);
818 	if (!rose->neighbour) {
819 		err = -ENETUNREACH;
820 		goto out_release;
821 	}
822 
823 	rose->lci = rose_new_lci(rose->neighbour);
824 	if (!rose->lci) {
825 		err = -ENETUNREACH;
826 		rose_neigh_put(rose->neighbour);
827 		goto out_release;
828 	}
829 
830 	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
831 		struct net_device *dev;
832 
833 		sock_reset_flag(sk, SOCK_ZAPPED);
834 
835 		dev = rose_dev_first();
836 		if (!dev) {
837 			err = -ENETUNREACH;
838 			rose_neigh_put(rose->neighbour);
839 			goto out_release;
840 		}
841 
842 		user = ax25_findbyuid(current_euid());
843 		if (!user) {
844 			err = -EINVAL;
845 			rose_neigh_put(rose->neighbour);
846 			dev_put(dev);
847 			goto out_release;
848 		}
849 
850 		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
851 		rose->source_call = user->call;
852 		rose->device      = dev;
853 		netdev_tracker_alloc(rose->device, &rose->dev_tracker,
854 				     GFP_KERNEL);
855 		ax25_uid_put(user);
856 
857 		rose_insert_socket(sk);		/* Finish the bind */
858 	}
859 	rose->dest_addr   = addr->srose_addr;
860 	rose->dest_call   = addr->srose_call;
861 	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
862 	rose->dest_ndigis = addr->srose_ndigis;
863 
864 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
865 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
866 		for (n = 0 ; n < addr->srose_ndigis ; n++)
867 			rose->dest_digis[n] = full_addr->srose_digis[n];
868 	} else {
869 		if (rose->dest_ndigis == 1) {
870 			rose->dest_digis[0] = addr->srose_digi;
871 		}
872 	}
873 
874 	/* Move to connecting socket, start sending Connect Requests */
875 	sock->state   = SS_CONNECTING;
876 	sk->sk_state     = TCP_SYN_SENT;
877 
878 	rose->state = ROSE_STATE_1;
879 
880 	rose_write_internal(sk, ROSE_CALL_REQUEST);
881 	rose_start_heartbeat(sk);
882 	rose_start_t1timer(sk);
883 
884 	/* Now the loop */
885 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
886 		err = -EINPROGRESS;
887 		goto out_release;
888 	}
889 
890 	/*
891 	 * A Connect Ack with Choke or timeout or failed routing will go to
892 	 * closed.
893 	 */
894 	if (sk->sk_state == TCP_SYN_SENT) {
895 		DEFINE_WAIT(wait);
896 
897 		for (;;) {
898 			prepare_to_wait(sk_sleep(sk), &wait,
899 					TASK_INTERRUPTIBLE);
900 			if (sk->sk_state != TCP_SYN_SENT)
901 				break;
902 			if (!signal_pending(current)) {
903 				release_sock(sk);
904 				schedule();
905 				lock_sock(sk);
906 				continue;
907 			}
908 			err = -ERESTARTSYS;
909 			break;
910 		}
911 		finish_wait(sk_sleep(sk), &wait);
912 
913 		if (err)
914 			goto out_release;
915 	}
916 
917 	if (sk->sk_state != TCP_ESTABLISHED) {
918 		sock->state = SS_UNCONNECTED;
919 		err = sock_error(sk);	/* Always set at this point */
920 		goto out_release;
921 	}
922 
923 	sock->state = SS_CONNECTED;
924 
925 out_release:
926 	release_sock(sk);
927 
928 	return err;
929 }
930 
rose_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)931 static int rose_accept(struct socket *sock, struct socket *newsock,
932 		       struct proto_accept_arg *arg)
933 {
934 	struct sk_buff *skb;
935 	struct sock *newsk;
936 	DEFINE_WAIT(wait);
937 	struct sock *sk;
938 	int err = 0;
939 
940 	if ((sk = sock->sk) == NULL)
941 		return -EINVAL;
942 
943 	lock_sock(sk);
944 	if (sk->sk_type != SOCK_SEQPACKET) {
945 		err = -EOPNOTSUPP;
946 		goto out_release;
947 	}
948 
949 	if (sk->sk_state != TCP_LISTEN) {
950 		err = -EINVAL;
951 		goto out_release;
952 	}
953 
954 	/*
955 	 *	The write queue this time is holding sockets ready to use
956 	 *	hooked into the SABM we saved
957 	 */
958 	for (;;) {
959 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
960 
961 		skb = skb_dequeue(&sk->sk_receive_queue);
962 		if (skb)
963 			break;
964 
965 		if (arg->flags & O_NONBLOCK) {
966 			err = -EWOULDBLOCK;
967 			break;
968 		}
969 		if (!signal_pending(current)) {
970 			release_sock(sk);
971 			schedule();
972 			lock_sock(sk);
973 			continue;
974 		}
975 		err = -ERESTARTSYS;
976 		break;
977 	}
978 	finish_wait(sk_sleep(sk), &wait);
979 	if (err)
980 		goto out_release;
981 
982 	newsk = skb->sk;
983 	sock_graft(newsk, newsock);
984 
985 	/* Now attach up the new socket */
986 	skb->sk = NULL;
987 	kfree_skb(skb);
988 	sk_acceptq_removed(sk);
989 
990 out_release:
991 	release_sock(sk);
992 
993 	return err;
994 }
995 
rose_getname(struct socket * sock,struct sockaddr * uaddr,int peer)996 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
997 	int peer)
998 {
999 	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
1000 	struct sock *sk = sock->sk;
1001 	struct rose_sock *rose = rose_sk(sk);
1002 	int n;
1003 
1004 	memset(srose, 0, sizeof(*srose));
1005 	if (peer != 0) {
1006 		if (sk->sk_state != TCP_ESTABLISHED)
1007 			return -ENOTCONN;
1008 		srose->srose_family = AF_ROSE;
1009 		srose->srose_addr   = rose->dest_addr;
1010 		srose->srose_call   = rose->dest_call;
1011 		srose->srose_ndigis = rose->dest_ndigis;
1012 		for (n = 0; n < rose->dest_ndigis; n++)
1013 			srose->srose_digis[n] = rose->dest_digis[n];
1014 	} else {
1015 		srose->srose_family = AF_ROSE;
1016 		srose->srose_addr   = rose->source_addr;
1017 		srose->srose_call   = rose->source_call;
1018 		srose->srose_ndigis = rose->source_ndigis;
1019 		for (n = 0; n < rose->source_ndigis; n++)
1020 			srose->srose_digis[n] = rose->source_digis[n];
1021 	}
1022 
1023 	return sizeof(struct full_sockaddr_rose);
1024 }
1025 
rose_rx_call_request(struct sk_buff * skb,struct net_device * dev,struct rose_neigh * neigh,unsigned int lci)1026 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
1027 {
1028 	struct sock *sk;
1029 	struct sock *make;
1030 	struct rose_sock *make_rose;
1031 	struct rose_facilities_struct facilities;
1032 	int n;
1033 
1034 	skb->sk = NULL;		/* Initially we don't know who it's for */
1035 
1036 	/*
1037 	 *	skb->data points to the rose frame start
1038 	 */
1039 	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
1040 
1041 	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
1042 				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
1043 				   &facilities)) {
1044 		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
1045 		return 0;
1046 	}
1047 
1048 	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
1049 
1050 	/*
1051 	 * We can't accept the Call Request.
1052 	 */
1053 	if (sk == NULL || sk_acceptq_is_full(sk) ||
1054 	    (make = rose_make_new(sk)) == NULL) {
1055 		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1056 		return 0;
1057 	}
1058 
1059 	skb->sk     = make;
1060 	make->sk_state = TCP_ESTABLISHED;
1061 	make_rose = rose_sk(make);
1062 
1063 	make_rose->lci           = lci;
1064 	make_rose->dest_addr     = facilities.dest_addr;
1065 	make_rose->dest_call     = facilities.dest_call;
1066 	make_rose->dest_ndigis   = facilities.dest_ndigis;
1067 	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1068 		make_rose->dest_digis[n] = facilities.dest_digis[n];
1069 	make_rose->source_addr   = facilities.source_addr;
1070 	make_rose->source_call   = facilities.source_call;
1071 	make_rose->source_ndigis = facilities.source_ndigis;
1072 	for (n = 0 ; n < facilities.source_ndigis ; n++)
1073 		make_rose->source_digis[n] = facilities.source_digis[n];
1074 	make_rose->neighbour     = neigh;
1075 	make_rose->device        = dev;
1076 	/* Caller got a reference for us. */
1077 	netdev_tracker_alloc(make_rose->device, &make_rose->dev_tracker,
1078 			     GFP_ATOMIC);
1079 	make_rose->facilities    = facilities;
1080 
1081 	rose_neigh_hold(make_rose->neighbour);
1082 
1083 	if (rose_sk(sk)->defer) {
1084 		make_rose->state = ROSE_STATE_5;
1085 	} else {
1086 		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1087 		make_rose->state = ROSE_STATE_3;
1088 		rose_start_idletimer(make);
1089 	}
1090 
1091 	make_rose->condition = 0x00;
1092 	make_rose->vs        = 0;
1093 	make_rose->va        = 0;
1094 	make_rose->vr        = 0;
1095 	make_rose->vl        = 0;
1096 	sk_acceptq_added(sk);
1097 
1098 	rose_insert_socket(make);
1099 
1100 	skb_queue_head(&sk->sk_receive_queue, skb);
1101 
1102 	rose_start_heartbeat(make);
1103 
1104 	if (!sock_flag(sk, SOCK_DEAD))
1105 		sk->sk_data_ready(sk);
1106 
1107 	return 1;
1108 }
1109 
rose_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1110 static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1111 {
1112 	struct sock *sk = sock->sk;
1113 	struct rose_sock *rose = rose_sk(sk);
1114 	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1115 	int err;
1116 	struct full_sockaddr_rose srose;
1117 	struct sk_buff *skb;
1118 	unsigned char *asmptr;
1119 	int n, size, qbit = 0;
1120 
1121 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1122 		return -EINVAL;
1123 
1124 	if (sock_flag(sk, SOCK_ZAPPED))
1125 		return -EADDRNOTAVAIL;
1126 
1127 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1128 		send_sig(SIGPIPE, current, 0);
1129 		return -EPIPE;
1130 	}
1131 
1132 	if (rose->neighbour == NULL || rose->device == NULL)
1133 		return -ENETUNREACH;
1134 
1135 	if (usrose != NULL) {
1136 		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1137 			return -EINVAL;
1138 		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1139 		memcpy(&srose, usrose, msg->msg_namelen);
1140 		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1141 		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1142 			return -EISCONN;
1143 		if (srose.srose_ndigis != rose->dest_ndigis)
1144 			return -EISCONN;
1145 		if (srose.srose_ndigis == rose->dest_ndigis) {
1146 			for (n = 0 ; n < srose.srose_ndigis ; n++)
1147 				if (ax25cmp(&rose->dest_digis[n],
1148 					    &srose.srose_digis[n]))
1149 					return -EISCONN;
1150 		}
1151 		if (srose.srose_family != AF_ROSE)
1152 			return -EINVAL;
1153 	} else {
1154 		if (sk->sk_state != TCP_ESTABLISHED)
1155 			return -ENOTCONN;
1156 
1157 		srose.srose_family = AF_ROSE;
1158 		srose.srose_addr   = rose->dest_addr;
1159 		srose.srose_call   = rose->dest_call;
1160 		srose.srose_ndigis = rose->dest_ndigis;
1161 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1162 			srose.srose_digis[n] = rose->dest_digis[n];
1163 	}
1164 
1165 	/* Build a packet */
1166 	/* Sanity check the packet size */
1167 	if (len > 65535)
1168 		return -EMSGSIZE;
1169 
1170 	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1171 
1172 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1173 		return err;
1174 
1175 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1176 
1177 	/*
1178 	 *	Put the data on the end
1179 	 */
1180 
1181 	skb_reset_transport_header(skb);
1182 	skb_put(skb, len);
1183 
1184 	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1185 	if (err) {
1186 		kfree_skb(skb);
1187 		return err;
1188 	}
1189 
1190 	/*
1191 	 *	If the Q BIT Include socket option is in force, the first
1192 	 *	byte of the user data is the logical value of the Q Bit.
1193 	 */
1194 	if (rose->qbitincl) {
1195 		qbit = skb->data[0];
1196 		skb_pull(skb, 1);
1197 	}
1198 
1199 	/*
1200 	 *	Push down the ROSE header
1201 	 */
1202 	asmptr = skb_push(skb, ROSE_MIN_LEN);
1203 
1204 	/* Build a ROSE Network header */
1205 	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1206 	asmptr[1] = (rose->lci >> 0) & 0xFF;
1207 	asmptr[2] = ROSE_DATA;
1208 
1209 	if (qbit)
1210 		asmptr[0] |= ROSE_Q_BIT;
1211 
1212 	if (sk->sk_state != TCP_ESTABLISHED) {
1213 		kfree_skb(skb);
1214 		return -ENOTCONN;
1215 	}
1216 
1217 #ifdef M_BIT
1218 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1219 	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1220 		unsigned char header[ROSE_MIN_LEN];
1221 		struct sk_buff *skbn;
1222 		int frontlen;
1223 		int lg;
1224 
1225 		/* Save a copy of the Header */
1226 		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1227 		skb_pull(skb, ROSE_MIN_LEN);
1228 
1229 		frontlen = skb_headroom(skb);
1230 
1231 		while (skb->len > 0) {
1232 			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1233 				kfree_skb(skb);
1234 				return err;
1235 			}
1236 
1237 			skbn->sk   = sk;
1238 			skbn->free = 1;
1239 			skbn->arp  = 1;
1240 
1241 			skb_reserve(skbn, frontlen);
1242 
1243 			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1244 
1245 			/* Copy the user data */
1246 			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1247 			skb_pull(skb, lg);
1248 
1249 			/* Duplicate the Header */
1250 			skb_push(skbn, ROSE_MIN_LEN);
1251 			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1252 
1253 			if (skb->len > 0)
1254 				skbn->data[2] |= M_BIT;
1255 
1256 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1257 		}
1258 
1259 		skb->free = 1;
1260 		kfree_skb(skb);
1261 	} else {
1262 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1263 	}
1264 #else
1265 	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1266 #endif
1267 
1268 	rose_kick(sk);
1269 
1270 	return len;
1271 }
1272 
1273 
rose_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1274 static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1275 			int flags)
1276 {
1277 	struct sock *sk = sock->sk;
1278 	struct rose_sock *rose = rose_sk(sk);
1279 	size_t copied;
1280 	unsigned char *asmptr;
1281 	struct sk_buff *skb;
1282 	int n, er, qbit;
1283 
1284 	/*
1285 	 * This works for seqpacket too. The receiver has ordered the queue for
1286 	 * us! We do one quick check first though
1287 	 */
1288 	if (sk->sk_state != TCP_ESTABLISHED)
1289 		return -ENOTCONN;
1290 
1291 	/* Now we can treat all alike */
1292 	skb = skb_recv_datagram(sk, flags, &er);
1293 	if (!skb)
1294 		return er;
1295 
1296 	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1297 
1298 	skb_pull(skb, ROSE_MIN_LEN);
1299 
1300 	if (rose->qbitincl) {
1301 		asmptr  = skb_push(skb, 1);
1302 		*asmptr = qbit;
1303 	}
1304 
1305 	skb_reset_transport_header(skb);
1306 	copied     = skb->len;
1307 
1308 	if (copied > size) {
1309 		copied = size;
1310 		msg->msg_flags |= MSG_TRUNC;
1311 	}
1312 
1313 	skb_copy_datagram_msg(skb, 0, msg, copied);
1314 
1315 	if (msg->msg_name) {
1316 		struct sockaddr_rose *srose;
1317 		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1318 				 msg->msg_name);
1319 
1320 		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1321 		srose = msg->msg_name;
1322 		srose->srose_family = AF_ROSE;
1323 		srose->srose_addr   = rose->dest_addr;
1324 		srose->srose_call   = rose->dest_call;
1325 		srose->srose_ndigis = rose->dest_ndigis;
1326 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1327 			full_srose->srose_digis[n] = rose->dest_digis[n];
1328 		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1329 	}
1330 
1331 	skb_free_datagram(sk, skb);
1332 
1333 	return copied;
1334 }
1335 
1336 
rose_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1337 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1338 {
1339 	struct sock *sk = sock->sk;
1340 	struct rose_sock *rose = rose_sk(sk);
1341 	void __user *argp = (void __user *)arg;
1342 
1343 	switch (cmd) {
1344 	case TIOCOUTQ: {
1345 		long amount;
1346 
1347 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1348 		if (amount < 0)
1349 			amount = 0;
1350 		return put_user(amount, (unsigned int __user *) argp);
1351 	}
1352 
1353 	case TIOCINQ: {
1354 		struct sk_buff *skb;
1355 		long amount = 0L;
1356 
1357 		spin_lock_irq(&sk->sk_receive_queue.lock);
1358 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1359 			amount = skb->len;
1360 		spin_unlock_irq(&sk->sk_receive_queue.lock);
1361 		return put_user(amount, (unsigned int __user *) argp);
1362 	}
1363 
1364 	case SIOCGIFADDR:
1365 	case SIOCSIFADDR:
1366 	case SIOCGIFDSTADDR:
1367 	case SIOCSIFDSTADDR:
1368 	case SIOCGIFBRDADDR:
1369 	case SIOCSIFBRDADDR:
1370 	case SIOCGIFNETMASK:
1371 	case SIOCSIFNETMASK:
1372 	case SIOCGIFMETRIC:
1373 	case SIOCSIFMETRIC:
1374 		return -EINVAL;
1375 
1376 	case SIOCADDRT:
1377 	case SIOCDELRT:
1378 	case SIOCRSCLRRT:
1379 		if (!capable(CAP_NET_ADMIN))
1380 			return -EPERM;
1381 		return rose_rt_ioctl(cmd, argp);
1382 
1383 	case SIOCRSGCAUSE: {
1384 		struct rose_cause_struct rose_cause;
1385 		rose_cause.cause      = rose->cause;
1386 		rose_cause.diagnostic = rose->diagnostic;
1387 		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1388 	}
1389 
1390 	case SIOCRSSCAUSE: {
1391 		struct rose_cause_struct rose_cause;
1392 		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1393 			return -EFAULT;
1394 		rose->cause      = rose_cause.cause;
1395 		rose->diagnostic = rose_cause.diagnostic;
1396 		return 0;
1397 	}
1398 
1399 	case SIOCRSSL2CALL:
1400 		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1401 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1402 			ax25_listen_release(&rose_callsign, NULL);
1403 		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1404 			return -EFAULT;
1405 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1406 			return ax25_listen_register(&rose_callsign, NULL);
1407 
1408 		return 0;
1409 
1410 	case SIOCRSGL2CALL:
1411 		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1412 
1413 	case SIOCRSACCEPT:
1414 		if (rose->state == ROSE_STATE_5) {
1415 			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1416 			rose_start_idletimer(sk);
1417 			rose->condition = 0x00;
1418 			rose->vs        = 0;
1419 			rose->va        = 0;
1420 			rose->vr        = 0;
1421 			rose->vl        = 0;
1422 			rose->state     = ROSE_STATE_3;
1423 		}
1424 		return 0;
1425 
1426 	default:
1427 		return -ENOIOCTLCMD;
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 #ifdef CONFIG_PROC_FS
rose_info_start(struct seq_file * seq,loff_t * pos)1434 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1435 	__acquires(rose_list_lock)
1436 {
1437 	spin_lock_bh(&rose_list_lock);
1438 	return seq_hlist_start_head(&rose_list, *pos);
1439 }
1440 
rose_info_next(struct seq_file * seq,void * v,loff_t * pos)1441 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1442 {
1443 	return seq_hlist_next(v, &rose_list, pos);
1444 }
1445 
rose_info_stop(struct seq_file * seq,void * v)1446 static void rose_info_stop(struct seq_file *seq, void *v)
1447 	__releases(rose_list_lock)
1448 {
1449 	spin_unlock_bh(&rose_list_lock);
1450 }
1451 
rose_info_show(struct seq_file * seq,void * v)1452 static int rose_info_show(struct seq_file *seq, void *v)
1453 {
1454 	char buf[11], rsbuf[11];
1455 
1456 	if (v == SEQ_START_TOKEN)
1457 		seq_puts(seq,
1458 			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1459 
1460 	else {
1461 		struct sock *s = sk_entry(v);
1462 		struct rose_sock *rose = rose_sk(s);
1463 		const char *devname, *callsign;
1464 		const struct net_device *dev = rose->device;
1465 
1466 		if (!dev)
1467 			devname = "???";
1468 		else
1469 			devname = dev->name;
1470 
1471 		seq_printf(seq, "%-10s %-9s ",
1472 			   rose2asc(rsbuf, &rose->dest_addr),
1473 			   ax2asc(buf, &rose->dest_call));
1474 
1475 		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1476 			callsign = "??????-?";
1477 		else
1478 			callsign = ax2asc(buf, &rose->source_call);
1479 
1480 		seq_printf(seq,
1481 			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1482 			rose2asc(rsbuf, &rose->source_addr),
1483 			callsign,
1484 			devname,
1485 			rose->lci & 0x0FFF,
1486 			(rose->neighbour) ? rose->neighbour->number : 0,
1487 			rose->state,
1488 			rose->vs,
1489 			rose->vr,
1490 			rose->va,
1491 			ax25_display_timer(&rose->timer) / HZ,
1492 			rose->t1 / HZ,
1493 			rose->t2 / HZ,
1494 			rose->t3 / HZ,
1495 			rose->hb / HZ,
1496 			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1497 			rose->idle / (60 * HZ),
1498 			sk_wmem_alloc_get(s),
1499 			sk_rmem_alloc_get(s),
1500 			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static const struct seq_operations rose_info_seqops = {
1507 	.start = rose_info_start,
1508 	.next = rose_info_next,
1509 	.stop = rose_info_stop,
1510 	.show = rose_info_show,
1511 };
1512 #endif	/* CONFIG_PROC_FS */
1513 
1514 static const struct net_proto_family rose_family_ops = {
1515 	.family		=	PF_ROSE,
1516 	.create		=	rose_create,
1517 	.owner		=	THIS_MODULE,
1518 };
1519 
1520 static const struct proto_ops rose_proto_ops = {
1521 	.family		=	PF_ROSE,
1522 	.owner		=	THIS_MODULE,
1523 	.release	=	rose_release,
1524 	.bind		=	rose_bind,
1525 	.connect	=	rose_connect,
1526 	.socketpair	=	sock_no_socketpair,
1527 	.accept		=	rose_accept,
1528 	.getname	=	rose_getname,
1529 	.poll		=	datagram_poll,
1530 	.ioctl		=	rose_ioctl,
1531 	.gettstamp	=	sock_gettstamp,
1532 	.listen		=	rose_listen,
1533 	.shutdown	=	sock_no_shutdown,
1534 	.setsockopt	=	rose_setsockopt,
1535 	.getsockopt	=	rose_getsockopt,
1536 	.sendmsg	=	rose_sendmsg,
1537 	.recvmsg	=	rose_recvmsg,
1538 	.mmap		=	sock_no_mmap,
1539 };
1540 
1541 static struct notifier_block rose_dev_notifier = {
1542 	.notifier_call	=	rose_device_event,
1543 };
1544 
1545 static struct net_device **dev_rose;
1546 
1547 static struct ax25_protocol rose_pid = {
1548 	.pid	= AX25_P_ROSE,
1549 	.func	= rose_route_frame
1550 };
1551 
1552 static struct ax25_linkfail rose_linkfail_notifier = {
1553 	.func	= rose_link_failed
1554 };
1555 
rose_proto_init(void)1556 static int __init rose_proto_init(void)
1557 {
1558 	int i;
1559 	int rc;
1560 
1561 	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1562 		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
1563 		rc = -EINVAL;
1564 		goto out;
1565 	}
1566 
1567 	rc = proto_register(&rose_proto, 0);
1568 	if (rc != 0)
1569 		goto out;
1570 
1571 	rose_callsign = null_ax25_address;
1572 
1573 	dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
1574 			   GFP_KERNEL);
1575 	if (dev_rose == NULL) {
1576 		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1577 		rc = -ENOMEM;
1578 		goto out_proto_unregister;
1579 	}
1580 
1581 	for (i = 0; i < rose_ndevs; i++) {
1582 		struct net_device *dev;
1583 		char name[IFNAMSIZ];
1584 
1585 		sprintf(name, "rose%d", i);
1586 		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1587 		if (!dev) {
1588 			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1589 			rc = -ENOMEM;
1590 			goto fail;
1591 		}
1592 		rc = register_netdev(dev);
1593 		if (rc) {
1594 			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1595 			free_netdev(dev);
1596 			goto fail;
1597 		}
1598 		rose_set_lockdep_key(dev);
1599 		dev_rose[i] = dev;
1600 	}
1601 
1602 	sock_register(&rose_family_ops);
1603 	register_netdevice_notifier(&rose_dev_notifier);
1604 
1605 	ax25_register_pid(&rose_pid);
1606 	ax25_linkfail_register(&rose_linkfail_notifier);
1607 
1608 #ifdef CONFIG_SYSCTL
1609 	rose_register_sysctl();
1610 #endif
1611 	rose_loopback_init();
1612 
1613 	rose_add_loopback_neigh();
1614 
1615 	proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
1616 	proc_create_seq("rose_neigh", 0444, init_net.proc_net,
1617 		    &rose_neigh_seqops);
1618 	proc_create_seq("rose_nodes", 0444, init_net.proc_net,
1619 		    &rose_node_seqops);
1620 	proc_create_seq("rose_routes", 0444, init_net.proc_net,
1621 		    &rose_route_seqops);
1622 out:
1623 	return rc;
1624 fail:
1625 	while (--i >= 0) {
1626 		unregister_netdev(dev_rose[i]);
1627 		free_netdev(dev_rose[i]);
1628 	}
1629 	kfree(dev_rose);
1630 out_proto_unregister:
1631 	proto_unregister(&rose_proto);
1632 	goto out;
1633 }
1634 module_init(rose_proto_init);
1635 
1636 module_param(rose_ndevs, int, 0);
1637 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1638 
1639 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1640 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1641 MODULE_LICENSE("GPL");
1642 MODULE_ALIAS_NETPROTO(PF_ROSE);
1643 
rose_exit(void)1644 static void __exit rose_exit(void)
1645 {
1646 	int i;
1647 
1648 	remove_proc_entry("rose", init_net.proc_net);
1649 	remove_proc_entry("rose_neigh", init_net.proc_net);
1650 	remove_proc_entry("rose_nodes", init_net.proc_net);
1651 	remove_proc_entry("rose_routes", init_net.proc_net);
1652 	rose_loopback_clear();
1653 
1654 	rose_rt_free();
1655 
1656 	ax25_protocol_release(AX25_P_ROSE);
1657 	ax25_linkfail_release(&rose_linkfail_notifier);
1658 
1659 	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1660 		ax25_listen_release(&rose_callsign, NULL);
1661 
1662 #ifdef CONFIG_SYSCTL
1663 	rose_unregister_sysctl();
1664 #endif
1665 	unregister_netdevice_notifier(&rose_dev_notifier);
1666 
1667 	sock_unregister(PF_ROSE);
1668 
1669 	for (i = 0; i < rose_ndevs; i++) {
1670 		struct net_device *dev = dev_rose[i];
1671 
1672 		if (dev) {
1673 			unregister_netdev(dev);
1674 			free_netdev(dev);
1675 		}
1676 	}
1677 
1678 	kfree(dev_rose);
1679 	proto_unregister(&rose_proto);
1680 }
1681 
1682 module_exit(rose_exit);
1683