xref: /linux/net/x25/x25_forward.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *	This module:
3  *		This module is free software; you can redistribute it and/or
4  *		modify it under the terms of the GNU General Public License
5  *		as published by the Free Software Foundation; either version
6  *		2 of the License, or (at your option) any later version.
7  *
8  *	History
9  *	03-01-2007	Added forwarding for x.25	Andrew Hendry
10  */
11 #include <linux/if_arp.h>
12 #include <linux/init.h>
13 #include <net/x25.h>
14 
15 struct list_head x25_forward_list = LIST_HEAD_INIT(x25_forward_list);
16 DEFINE_RWLOCK(x25_forward_list_lock);
17 
18 int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
19 			struct sk_buff *skb, int lci)
20 {
21 	struct x25_route *rt;
22 	struct x25_neigh *neigh_new = NULL;
23 	struct list_head *entry;
24 	struct x25_forward *x25_frwd, *new_frwd;
25 	struct sk_buff *skbn;
26 	short same_lci = 0;
27 	int rc = 0;
28 
29 	if ((rt = x25_get_route(dest_addr)) != NULL) {
30 
31 		if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
32 			/* This shouldnt happen, if it occurs somehow
33 			 * do something sensible
34 			 */
35 			goto out_put_route;
36 		}
37 
38 		/* Avoid a loop. This is the normal exit path for a
39 		 * system with only one x.25 iface and default route
40 		 */
41 		if (rt->dev == from->dev) {
42 			goto out_put_nb;
43 		}
44 
45 		/* Remote end sending a call request on an already
46 		 * established LCI? It shouldnt happen, just in case..
47 		 */
48 		read_lock_bh(&x25_forward_list_lock);
49 		list_for_each(entry, &x25_forward_list) {
50 			x25_frwd = list_entry(entry, struct x25_forward, node);
51 			if (x25_frwd->lci == lci) {
52 				printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
53 				same_lci = 1;
54 			}
55 		}
56 		read_unlock_bh(&x25_forward_list_lock);
57 
58 		/* Save the forwarding details for future traffic */
59 		if (!same_lci){
60 			if ((new_frwd = kmalloc(sizeof(struct x25_forward),
61 							GFP_ATOMIC)) == NULL){
62 				rc = -ENOMEM;
63 				goto out_put_nb;
64 			}
65 			new_frwd->lci = lci;
66 			new_frwd->dev1 = rt->dev;
67 			new_frwd->dev2 = from->dev;
68 			write_lock_bh(&x25_forward_list_lock);
69 			list_add(&new_frwd->node, &x25_forward_list);
70 			write_unlock_bh(&x25_forward_list_lock);
71 		}
72 
73 		/* Forward the call request */
74 		if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
75 			goto out_put_nb;
76 		}
77 		x25_transmit_link(skbn, neigh_new);
78 		rc = 1;
79 	}
80 
81 
82 out_put_nb:
83 	x25_neigh_put(neigh_new);
84 
85 out_put_route:
86 	x25_route_put(rt);
87 	return rc;
88 }
89 
90 
91 int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
92 
93 	struct x25_forward *frwd;
94 	struct list_head *entry;
95 	struct net_device *peer = NULL;
96 	struct x25_neigh *nb;
97 	struct sk_buff *skbn;
98 	int rc = 0;
99 
100 	read_lock_bh(&x25_forward_list_lock);
101 	list_for_each(entry, &x25_forward_list) {
102 		frwd = list_entry(entry, struct x25_forward, node);
103 		if (frwd->lci == lci) {
104 			/* The call is established, either side can send */
105 			if (from->dev == frwd->dev1) {
106 				peer = frwd->dev2;
107 			} else {
108 				peer = frwd->dev1;
109 			}
110 			break;
111 		}
112 	}
113 	read_unlock_bh(&x25_forward_list_lock);
114 
115 	if ( (nb = x25_get_neigh(peer)) == NULL)
116 		goto out;
117 
118 	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
119 		goto out;
120 
121 	}
122 	x25_transmit_link(skbn, nb);
123 
124 	x25_neigh_put(nb);
125 	rc = 1;
126 out:
127 	return rc;
128 }
129 
130 void x25_clear_forward_by_lci(unsigned int lci)
131 {
132 	struct x25_forward *fwd;
133 	struct list_head *entry, *tmp;
134 
135 	write_lock_bh(&x25_forward_list_lock);
136 
137 	list_for_each_safe(entry, tmp, &x25_forward_list) {
138 		fwd = list_entry(entry, struct x25_forward, node);
139 		if (fwd->lci == lci) {
140 			list_del(&fwd->node);
141 			kfree(fwd);
142 		}
143 	}
144 	write_unlock_bh(&x25_forward_list_lock);
145 }
146 
147 
148 void x25_clear_forward_by_dev(struct net_device *dev)
149 {
150 	struct x25_forward *fwd;
151 	struct list_head *entry, *tmp;
152 
153 	write_lock_bh(&x25_forward_list_lock);
154 
155 	list_for_each_safe(entry, tmp, &x25_forward_list) {
156 		fwd = list_entry(entry, struct x25_forward, node);
157 		if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
158 			list_del(&fwd->node);
159 			kfree(fwd);
160 		}
161 	}
162 	write_unlock_bh(&x25_forward_list_lock);
163 }
164