1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic HDLC support routines for Linux
4 * Cisco HDLC support
5 *
6 * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/hdlc.h>
11 #include <linux/if_arp.h>
12 #include <linux/inetdevice.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pkt_sched.h>
17 #include <linux/poll.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/skbuff.h>
20
21 #undef DEBUG_HARD_HEADER
22
23 #define CISCO_MULTICAST 0x8F /* Cisco multicast address */
24 #define CISCO_UNICAST 0x0F /* Cisco unicast address */
25 #define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
26 #define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
27 #define CISCO_ADDR_REQ 0 /* Cisco address request */
28 #define CISCO_ADDR_REPLY 1 /* Cisco address reply */
29 #define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
30
31 struct hdlc_header {
32 u8 address;
33 u8 control;
34 __be16 protocol;
35 } __packed;
36
37 struct cisco_packet {
38 __be32 type; /* code */
39 __be32 par1;
40 __be32 par2;
41 __be16 rel; /* reliability */
42 __be32 time;
43 } __packed;
44 #define CISCO_PACKET_LEN 18
45 #define CISCO_BIG_PACKET_LEN 20
46
47 struct cisco_state {
48 cisco_proto settings;
49
50 struct timer_list timer;
51 struct net_device *dev;
52 spinlock_t lock;
53 unsigned long last_poll;
54 int up;
55 u32 txseq; /* TX sequence number, 0 = none */
56 u32 rxseq; /* RX sequence number */
57 };
58
59 static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs);
60
state(hdlc_device * hdlc)61 static inline struct cisco_state *state(hdlc_device *hdlc)
62 {
63 return (struct cisco_state *)hdlc->state;
64 }
65
cisco_hard_header(struct sk_buff * skb,struct net_device * dev,u16 type,const void * daddr,const void * saddr,unsigned int len)66 static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
67 u16 type, const void *daddr, const void *saddr,
68 unsigned int len)
69 {
70 struct hdlc_header *data;
71 #ifdef DEBUG_HARD_HEADER
72 netdev_dbg(dev, "%s called\n", __func__);
73 #endif
74
75 skb_push(skb, sizeof(struct hdlc_header));
76 data = (struct hdlc_header *)skb->data;
77 if (type == CISCO_KEEPALIVE)
78 data->address = CISCO_MULTICAST;
79 else
80 data->address = CISCO_UNICAST;
81 data->control = 0;
82 data->protocol = htons(type);
83
84 return sizeof(struct hdlc_header);
85 }
86
cisco_keepalive_send(struct net_device * dev,u32 type,__be32 par1,__be32 par2)87 static void cisco_keepalive_send(struct net_device *dev, u32 type,
88 __be32 par1, __be32 par2)
89 {
90 struct sk_buff *skb;
91 struct cisco_packet *data;
92
93 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
94 sizeof(struct cisco_packet));
95 if (!skb)
96 return;
97
98 skb_reserve(skb, 4);
99 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
100 data = (struct cisco_packet *)(skb->data + 4);
101
102 data->type = htonl(type);
103 data->par1 = par1;
104 data->par2 = par2;
105 data->rel = cpu_to_be16(0xFFFF);
106 /* we will need do_div here if 1000 % HZ != 0 */
107 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
108
109 skb_put(skb, sizeof(struct cisco_packet));
110 skb->priority = TC_PRIO_CONTROL;
111 skb->dev = dev;
112 skb->protocol = htons(ETH_P_HDLC);
113 skb_reset_network_header(skb);
114
115 dev_queue_xmit(skb);
116 }
117
cisco_type_trans(struct sk_buff * skb,struct net_device * dev)118 static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
119 {
120 struct hdlc_header *data = (struct hdlc_header *)skb->data;
121
122 if (skb->len < sizeof(struct hdlc_header))
123 return cpu_to_be16(ETH_P_HDLC);
124
125 if (data->address != CISCO_MULTICAST &&
126 data->address != CISCO_UNICAST)
127 return cpu_to_be16(ETH_P_HDLC);
128
129 switch (data->protocol) {
130 case cpu_to_be16(ETH_P_IP):
131 case cpu_to_be16(ETH_P_IPX):
132 case cpu_to_be16(ETH_P_IPV6):
133 skb_pull(skb, sizeof(struct hdlc_header));
134 return data->protocol;
135 default:
136 return cpu_to_be16(ETH_P_HDLC);
137 }
138 }
139
cisco_rx(struct sk_buff * skb)140 static int cisco_rx(struct sk_buff *skb)
141 {
142 struct net_device *dev = skb->dev;
143 hdlc_device *hdlc = dev_to_hdlc(dev);
144 struct cisco_state *st = state(hdlc);
145 struct hdlc_header *data = (struct hdlc_header *)skb->data;
146 struct cisco_packet *cisco_data;
147 struct in_device *in_dev;
148 __be32 addr, mask;
149 u32 ack;
150
151 if (skb->len < sizeof(struct hdlc_header))
152 goto rx_error;
153
154 if (data->address != CISCO_MULTICAST &&
155 data->address != CISCO_UNICAST)
156 goto rx_error;
157
158 switch (ntohs(data->protocol)) {
159 case CISCO_SYS_INFO:
160 /* Packet is not needed, drop it. */
161 dev_kfree_skb_any(skb);
162 return NET_RX_SUCCESS;
163
164 case CISCO_KEEPALIVE:
165 if ((skb->len != sizeof(struct hdlc_header) +
166 CISCO_PACKET_LEN) &&
167 (skb->len != sizeof(struct hdlc_header) +
168 CISCO_BIG_PACKET_LEN)) {
169 netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
170 skb->len);
171 goto rx_error;
172 }
173
174 cisco_data = (struct cisco_packet *)(skb->data + sizeof
175 (struct hdlc_header));
176
177 switch (ntohl(cisco_data->type)) {
178 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
179 rcu_read_lock();
180 in_dev = __in_dev_get_rcu(dev);
181 addr = 0;
182 mask = ~cpu_to_be32(0); /* is the mask correct? */
183
184 if (in_dev != NULL) {
185 const struct in_ifaddr *ifa;
186
187 in_dev_for_each_ifa_rcu(ifa, in_dev) {
188 if (strcmp(dev->name,
189 ifa->ifa_label) == 0) {
190 addr = ifa->ifa_local;
191 mask = ifa->ifa_mask;
192 break;
193 }
194 }
195
196 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
197 addr, mask);
198 }
199 rcu_read_unlock();
200 dev_kfree_skb_any(skb);
201 return NET_RX_SUCCESS;
202
203 case CISCO_ADDR_REPLY:
204 netdev_info(dev, "Unexpected Cisco IP address reply\n");
205 goto rx_error;
206
207 case CISCO_KEEPALIVE_REQ:
208 spin_lock(&st->lock);
209 st->rxseq = ntohl(cisco_data->par1);
210 ack = ntohl(cisco_data->par2);
211 if (ack && (ack == st->txseq ||
212 /* our current REQ may be in transit */
213 ack == st->txseq - 1)) {
214 st->last_poll = jiffies;
215 if (!st->up) {
216 u32 sec, min, hrs, days;
217
218 sec = ntohl(cisco_data->time) / 1000;
219 min = sec / 60; sec -= min * 60;
220 hrs = min / 60; min -= hrs * 60;
221 days = hrs / 24; hrs -= days * 24;
222 netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
223 days, hrs, min, sec);
224 netif_dormant_off(dev);
225 st->up = 1;
226 }
227 }
228 spin_unlock(&st->lock);
229
230 dev_kfree_skb_any(skb);
231 return NET_RX_SUCCESS;
232 } /* switch (keepalive type) */
233 } /* switch (protocol) */
234
235 netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
236 dev_kfree_skb_any(skb);
237 return NET_RX_DROP;
238
239 rx_error:
240 dev->stats.rx_errors++; /* Mark error */
241 dev_kfree_skb_any(skb);
242 return NET_RX_DROP;
243 }
244
cisco_timer(struct timer_list * t)245 static void cisco_timer(struct timer_list *t)
246 {
247 struct cisco_state *st = from_timer(st, t, timer);
248 struct net_device *dev = st->dev;
249
250 spin_lock(&st->lock);
251 if (st->up &&
252 time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
253 st->up = 0;
254 netdev_info(dev, "Link down\n");
255 netif_dormant_on(dev);
256 }
257
258 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
259 htonl(st->rxseq));
260 spin_unlock(&st->lock);
261
262 st->timer.expires = jiffies + st->settings.interval * HZ;
263 add_timer(&st->timer);
264 }
265
cisco_start(struct net_device * dev)266 static void cisco_start(struct net_device *dev)
267 {
268 hdlc_device *hdlc = dev_to_hdlc(dev);
269 struct cisco_state *st = state(hdlc);
270 unsigned long flags;
271
272 spin_lock_irqsave(&st->lock, flags);
273 st->up = st->txseq = st->rxseq = 0;
274 spin_unlock_irqrestore(&st->lock, flags);
275
276 st->dev = dev;
277 timer_setup(&st->timer, cisco_timer, 0);
278 st->timer.expires = jiffies + HZ; /* First poll after 1 s */
279 add_timer(&st->timer);
280 }
281
cisco_stop(struct net_device * dev)282 static void cisco_stop(struct net_device *dev)
283 {
284 hdlc_device *hdlc = dev_to_hdlc(dev);
285 struct cisco_state *st = state(hdlc);
286 unsigned long flags;
287
288 del_timer_sync(&st->timer);
289
290 spin_lock_irqsave(&st->lock, flags);
291 netif_dormant_on(dev);
292 st->up = st->txseq = 0;
293 spin_unlock_irqrestore(&st->lock, flags);
294 }
295
296 static struct hdlc_proto proto = {
297 .start = cisco_start,
298 .stop = cisco_stop,
299 .type_trans = cisco_type_trans,
300 .ioctl = cisco_ioctl,
301 .netif_rx = cisco_rx,
302 .module = THIS_MODULE,
303 };
304
305 static const struct header_ops cisco_header_ops = {
306 .create = cisco_hard_header,
307 };
308
cisco_ioctl(struct net_device * dev,struct if_settings * ifs)309 static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs)
310 {
311 cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco;
312 const size_t size = sizeof(cisco_proto);
313 cisco_proto new_settings;
314 hdlc_device *hdlc = dev_to_hdlc(dev);
315 int result;
316
317 switch (ifs->type) {
318 case IF_GET_PROTO:
319 if (dev_to_hdlc(dev)->proto != &proto)
320 return -EINVAL;
321 ifs->type = IF_PROTO_CISCO;
322 if (ifs->size < size) {
323 ifs->size = size; /* data size wanted */
324 return -ENOBUFS;
325 }
326 if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
327 return -EFAULT;
328 return 0;
329
330 case IF_PROTO_CISCO:
331 if (!capable(CAP_NET_ADMIN))
332 return -EPERM;
333
334 if (dev->flags & IFF_UP)
335 return -EBUSY;
336
337 if (copy_from_user(&new_settings, cisco_s, size))
338 return -EFAULT;
339
340 if (new_settings.interval < 1 ||
341 new_settings.timeout < 2)
342 return -EINVAL;
343
344 result = hdlc->attach(dev, ENCODING_NRZ,
345 PARITY_CRC16_PR1_CCITT);
346 if (result)
347 return result;
348
349 result = attach_hdlc_protocol(dev, &proto,
350 sizeof(struct cisco_state));
351 if (result)
352 return result;
353
354 memcpy(&state(hdlc)->settings, &new_settings, size);
355 spin_lock_init(&state(hdlc)->lock);
356 dev->header_ops = &cisco_header_ops;
357 dev->hard_header_len = sizeof(struct hdlc_header);
358 dev->type = ARPHRD_CISCO;
359 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
360 netif_dormant_on(dev);
361 return 0;
362 }
363
364 return -EINVAL;
365 }
366
hdlc_cisco_init(void)367 static int __init hdlc_cisco_init(void)
368 {
369 register_hdlc_protocol(&proto);
370 return 0;
371 }
372
hdlc_cisco_exit(void)373 static void __exit hdlc_cisco_exit(void)
374 {
375 unregister_hdlc_protocol(&proto);
376 }
377
378 module_init(hdlc_cisco_init);
379 module_exit(hdlc_cisco_exit);
380
381 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
382 MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
383 MODULE_LICENSE("GPL v2");
384