xref: /linux/drivers/net/wan/hdlc_cisco.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic HDLC support routines for Linux
4  * Cisco HDLC support
5  *
6  * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/hdlc.h>
11 #include <linux/if_arp.h>
12 #include <linux/inetdevice.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pkt_sched.h>
17 #include <linux/poll.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/skbuff.h>
20 
21 #undef DEBUG_HARD_HEADER
22 
23 #define CISCO_MULTICAST		0x8F	/* Cisco multicast address */
24 #define CISCO_UNICAST		0x0F	/* Cisco unicast address */
25 #define CISCO_KEEPALIVE		0x8035	/* Cisco keepalive protocol */
26 #define CISCO_SYS_INFO		0x2000	/* Cisco interface/system info */
27 #define CISCO_ADDR_REQ		0	/* Cisco address request */
28 #define CISCO_ADDR_REPLY	1	/* Cisco address reply */
29 #define CISCO_KEEPALIVE_REQ	2	/* Cisco keepalive request */
30 
31 
32 struct hdlc_header {
33 	u8 address;
34 	u8 control;
35 	__be16 protocol;
36 }__packed;
37 
38 
39 struct cisco_packet {
40 	__be32 type;		/* code */
41 	__be32 par1;
42 	__be32 par2;
43 	__be16 rel;		/* reliability */
44 	__be32 time;
45 }__packed;
46 #define	CISCO_PACKET_LEN	18
47 #define	CISCO_BIG_PACKET_LEN	20
48 
49 
50 struct cisco_state {
51 	cisco_proto settings;
52 
53 	struct timer_list timer;
54 	struct net_device *dev;
55 	spinlock_t lock;
56 	unsigned long last_poll;
57 	int up;
58 	u32 txseq; /* TX sequence number, 0 = none */
59 	u32 rxseq; /* RX sequence number */
60 };
61 
62 
63 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
64 
65 
66 static inline struct cisco_state* state(hdlc_device *hdlc)
67 {
68 	return (struct cisco_state *)hdlc->state;
69 }
70 
71 
72 static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
73 			     u16 type, const void *daddr, const void *saddr,
74 			     unsigned int len)
75 {
76 	struct hdlc_header *data;
77 #ifdef DEBUG_HARD_HEADER
78 	printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
79 #endif
80 
81 	skb_push(skb, sizeof(struct hdlc_header));
82 	data = (struct hdlc_header*)skb->data;
83 	if (type == CISCO_KEEPALIVE)
84 		data->address = CISCO_MULTICAST;
85 	else
86 		data->address = CISCO_UNICAST;
87 	data->control = 0;
88 	data->protocol = htons(type);
89 
90 	return sizeof(struct hdlc_header);
91 }
92 
93 
94 
95 static void cisco_keepalive_send(struct net_device *dev, u32 type,
96 				 __be32 par1, __be32 par2)
97 {
98 	struct sk_buff *skb;
99 	struct cisco_packet *data;
100 
101 	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
102 			    sizeof(struct cisco_packet));
103 	if (!skb) {
104 		netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
105 		return;
106 	}
107 	skb_reserve(skb, 4);
108 	cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
109 	data = (struct cisco_packet*)(skb->data + 4);
110 
111 	data->type = htonl(type);
112 	data->par1 = par1;
113 	data->par2 = par2;
114 	data->rel = cpu_to_be16(0xFFFF);
115 	/* we will need do_div here if 1000 % HZ != 0 */
116 	data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
117 
118 	skb_put(skb, sizeof(struct cisco_packet));
119 	skb->priority = TC_PRIO_CONTROL;
120 	skb->dev = dev;
121 	skb_reset_network_header(skb);
122 
123 	dev_queue_xmit(skb);
124 }
125 
126 
127 
128 static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
129 {
130 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
131 
132 	if (skb->len < sizeof(struct hdlc_header))
133 		return cpu_to_be16(ETH_P_HDLC);
134 
135 	if (data->address != CISCO_MULTICAST &&
136 	    data->address != CISCO_UNICAST)
137 		return cpu_to_be16(ETH_P_HDLC);
138 
139 	switch (data->protocol) {
140 	case cpu_to_be16(ETH_P_IP):
141 	case cpu_to_be16(ETH_P_IPX):
142 	case cpu_to_be16(ETH_P_IPV6):
143 		skb_pull(skb, sizeof(struct hdlc_header));
144 		return data->protocol;
145 	default:
146 		return cpu_to_be16(ETH_P_HDLC);
147 	}
148 }
149 
150 
151 static int cisco_rx(struct sk_buff *skb)
152 {
153 	struct net_device *dev = skb->dev;
154 	hdlc_device *hdlc = dev_to_hdlc(dev);
155 	struct cisco_state *st = state(hdlc);
156 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
157 	struct cisco_packet *cisco_data;
158 	struct in_device *in_dev;
159 	__be32 addr, mask;
160 	u32 ack;
161 
162 	if (skb->len < sizeof(struct hdlc_header))
163 		goto rx_error;
164 
165 	if (data->address != CISCO_MULTICAST &&
166 	    data->address != CISCO_UNICAST)
167 		goto rx_error;
168 
169 	switch (ntohs(data->protocol)) {
170 	case CISCO_SYS_INFO:
171 		/* Packet is not needed, drop it. */
172 		dev_kfree_skb_any(skb);
173 		return NET_RX_SUCCESS;
174 
175 	case CISCO_KEEPALIVE:
176 		if ((skb->len != sizeof(struct hdlc_header) +
177 		     CISCO_PACKET_LEN) &&
178 		    (skb->len != sizeof(struct hdlc_header) +
179 		     CISCO_BIG_PACKET_LEN)) {
180 			netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
181 				    skb->len);
182 			goto rx_error;
183 		}
184 
185 		cisco_data = (struct cisco_packet*)(skb->data + sizeof
186 						    (struct hdlc_header));
187 
188 		switch (ntohl (cisco_data->type)) {
189 		case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
190 			rcu_read_lock();
191 			in_dev = __in_dev_get_rcu(dev);
192 			addr = 0;
193 			mask = ~cpu_to_be32(0); /* is the mask correct? */
194 
195 			if (in_dev != NULL) {
196 				struct in_ifaddr **ifap = &in_dev->ifa_list;
197 
198 				while (*ifap != NULL) {
199 					if (strcmp(dev->name,
200 						   (*ifap)->ifa_label) == 0) {
201 						addr = (*ifap)->ifa_local;
202 						mask = (*ifap)->ifa_mask;
203 						break;
204 					}
205 					ifap = &(*ifap)->ifa_next;
206 				}
207 
208 				cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
209 						     addr, mask);
210 			}
211 			rcu_read_unlock();
212 			dev_kfree_skb_any(skb);
213 			return NET_RX_SUCCESS;
214 
215 		case CISCO_ADDR_REPLY:
216 			netdev_info(dev, "Unexpected Cisco IP address reply\n");
217 			goto rx_error;
218 
219 		case CISCO_KEEPALIVE_REQ:
220 			spin_lock(&st->lock);
221 			st->rxseq = ntohl(cisco_data->par1);
222 			ack = ntohl(cisco_data->par2);
223 			if (ack && (ack == st->txseq ||
224 				    /* our current REQ may be in transit */
225 				    ack == st->txseq - 1)) {
226 				st->last_poll = jiffies;
227 				if (!st->up) {
228 					u32 sec, min, hrs, days;
229 					sec = ntohl(cisco_data->time) / 1000;
230 					min = sec / 60; sec -= min * 60;
231 					hrs = min / 60; min -= hrs * 60;
232 					days = hrs / 24; hrs -= days * 24;
233 					netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
234 						    days, hrs, min, sec);
235 					netif_dormant_off(dev);
236 					st->up = 1;
237 				}
238 			}
239 			spin_unlock(&st->lock);
240 
241 			dev_kfree_skb_any(skb);
242 			return NET_RX_SUCCESS;
243 		} /* switch (keepalive type) */
244 	} /* switch (protocol) */
245 
246 	netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
247 	dev_kfree_skb_any(skb);
248 	return NET_RX_DROP;
249 
250 rx_error:
251 	dev->stats.rx_errors++; /* Mark error */
252 	dev_kfree_skb_any(skb);
253 	return NET_RX_DROP;
254 }
255 
256 
257 
258 static void cisco_timer(struct timer_list *t)
259 {
260 	struct cisco_state *st = from_timer(st, t, timer);
261 	struct net_device *dev = st->dev;
262 
263 	spin_lock(&st->lock);
264 	if (st->up &&
265 	    time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
266 		st->up = 0;
267 		netdev_info(dev, "Link down\n");
268 		netif_dormant_on(dev);
269 	}
270 
271 	cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
272 			     htonl(st->rxseq));
273 	spin_unlock(&st->lock);
274 
275 	st->timer.expires = jiffies + st->settings.interval * HZ;
276 	add_timer(&st->timer);
277 }
278 
279 
280 
281 static void cisco_start(struct net_device *dev)
282 {
283 	hdlc_device *hdlc = dev_to_hdlc(dev);
284 	struct cisco_state *st = state(hdlc);
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&st->lock, flags);
288 	st->up = st->txseq = st->rxseq = 0;
289 	spin_unlock_irqrestore(&st->lock, flags);
290 
291 	st->dev = dev;
292 	timer_setup(&st->timer, cisco_timer, 0);
293 	st->timer.expires = jiffies + HZ; /* First poll after 1 s */
294 	add_timer(&st->timer);
295 }
296 
297 
298 
299 static void cisco_stop(struct net_device *dev)
300 {
301 	hdlc_device *hdlc = dev_to_hdlc(dev);
302 	struct cisco_state *st = state(hdlc);
303 	unsigned long flags;
304 
305 	del_timer_sync(&st->timer);
306 
307 	spin_lock_irqsave(&st->lock, flags);
308 	netif_dormant_on(dev);
309 	st->up = st->txseq = 0;
310 	spin_unlock_irqrestore(&st->lock, flags);
311 }
312 
313 
314 static struct hdlc_proto proto = {
315 	.start		= cisco_start,
316 	.stop		= cisco_stop,
317 	.type_trans	= cisco_type_trans,
318 	.ioctl		= cisco_ioctl,
319 	.netif_rx	= cisco_rx,
320 	.module		= THIS_MODULE,
321 };
322 
323 static const struct header_ops cisco_header_ops = {
324 	.create = cisco_hard_header,
325 };
326 
327 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
328 {
329 	cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
330 	const size_t size = sizeof(cisco_proto);
331 	cisco_proto new_settings;
332 	hdlc_device *hdlc = dev_to_hdlc(dev);
333 	int result;
334 
335 	switch (ifr->ifr_settings.type) {
336 	case IF_GET_PROTO:
337 		if (dev_to_hdlc(dev)->proto != &proto)
338 			return -EINVAL;
339 		ifr->ifr_settings.type = IF_PROTO_CISCO;
340 		if (ifr->ifr_settings.size < size) {
341 			ifr->ifr_settings.size = size; /* data size wanted */
342 			return -ENOBUFS;
343 		}
344 		if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
345 			return -EFAULT;
346 		return 0;
347 
348 	case IF_PROTO_CISCO:
349 		if (!capable(CAP_NET_ADMIN))
350 			return -EPERM;
351 
352 		if (dev->flags & IFF_UP)
353 			return -EBUSY;
354 
355 		if (copy_from_user(&new_settings, cisco_s, size))
356 			return -EFAULT;
357 
358 		if (new_settings.interval < 1 ||
359 		    new_settings.timeout < 2)
360 			return -EINVAL;
361 
362 		result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
363 		if (result)
364 			return result;
365 
366 		result = attach_hdlc_protocol(dev, &proto,
367 					      sizeof(struct cisco_state));
368 		if (result)
369 			return result;
370 
371 		memcpy(&state(hdlc)->settings, &new_settings, size);
372 		spin_lock_init(&state(hdlc)->lock);
373 		dev->header_ops = &cisco_header_ops;
374 		dev->type = ARPHRD_CISCO;
375 		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
376 		netif_dormant_on(dev);
377 		return 0;
378 	}
379 
380 	return -EINVAL;
381 }
382 
383 
384 static int __init mod_init(void)
385 {
386 	register_hdlc_protocol(&proto);
387 	return 0;
388 }
389 
390 
391 
392 static void __exit mod_exit(void)
393 {
394 	unregister_hdlc_protocol(&proto);
395 }
396 
397 
398 module_init(mod_init);
399 module_exit(mod_exit);
400 
401 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
402 MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
403 MODULE_LICENSE("GPL v2");
404