xref: /linux/drivers/net/wan/hdlc_fr.c (revision 43347d56c8d9dd732cee2f8efd384ad21dd1f6c4)
1 /*
2  * Generic HDLC support routines for Linux
3  * Frame Relay support
4  *
5  * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of version 2 of the GNU General Public License
9  * as published by the Free Software Foundation.
10  *
11 
12             Theory of PVC state
13 
14  DCE mode:
15 
16  (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
17          0,x -> 1,1 if "link reliable" when sending FULL STATUS
18          1,1 -> 1,0 if received FULL STATUS ACK
19 
20  (active)    -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
21              -> 1 when "PVC up" and (exist,new) = 1,0
22 
23  DTE mode:
24  (exist,new,active) = FULL STATUS if "link reliable"
25 		    = 0, 0, 0 if "link unreliable"
26  No LMI:
27  active = open and "link reliable"
28  exist = new = not used
29 
30  CCITT LMI: ITU-T Q.933 Annex A
31  ANSI LMI: ANSI T1.617 Annex D
32  CISCO LMI: the original, aka "Gang of Four" LMI
33 
34 */
35 
36 #include <linux/errno.h>
37 #include <linux/etherdevice.h>
38 #include <linux/hdlc.h>
39 #include <linux/if_arp.h>
40 #include <linux/inetdevice.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/pkt_sched.h>
45 #include <linux/poll.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/skbuff.h>
48 #include <linux/slab.h>
49 
50 #undef DEBUG_PKT
51 #undef DEBUG_ECN
52 #undef DEBUG_LINK
53 #undef DEBUG_PROTO
54 #undef DEBUG_PVC
55 
56 #define FR_UI			0x03
57 #define FR_PAD			0x00
58 
59 #define NLPID_IP		0xCC
60 #define NLPID_IPV6		0x8E
61 #define NLPID_SNAP		0x80
62 #define NLPID_PAD		0x00
63 #define NLPID_CCITT_ANSI_LMI	0x08
64 #define NLPID_CISCO_LMI		0x09
65 
66 
67 #define LMI_CCITT_ANSI_DLCI	   0 /* LMI DLCI */
68 #define LMI_CISCO_DLCI		1023
69 
70 #define LMI_CALLREF		0x00 /* Call Reference */
71 #define LMI_ANSI_LOCKSHIFT	0x95 /* ANSI locking shift */
72 #define LMI_ANSI_CISCO_REPTYPE	0x01 /* report type */
73 #define LMI_CCITT_REPTYPE	0x51
74 #define LMI_ANSI_CISCO_ALIVE	0x03 /* keep alive */
75 #define LMI_CCITT_ALIVE		0x53
76 #define LMI_ANSI_CISCO_PVCSTAT	0x07 /* PVC status */
77 #define LMI_CCITT_PVCSTAT	0x57
78 
79 #define LMI_FULLREP		0x00 /* full report  */
80 #define LMI_INTEGRITY		0x01 /* link integrity report */
81 #define LMI_SINGLE		0x02 /* single PVC report */
82 
83 #define LMI_STATUS_ENQUIRY      0x75
84 #define LMI_STATUS              0x7D /* reply */
85 
86 #define LMI_REPT_LEN               1 /* report type element length */
87 #define LMI_INTEG_LEN              2 /* link integrity element length */
88 
89 #define LMI_CCITT_CISCO_LENGTH	  13 /* LMI frame lengths */
90 #define LMI_ANSI_LENGTH		  14
91 
92 
93 struct fr_hdr {
94 #if defined(__LITTLE_ENDIAN_BITFIELD)
95 	unsigned ea1:	1;
96 	unsigned cr:	1;
97 	unsigned dlcih:	6;
98 
99 	unsigned ea2:	1;
100 	unsigned de:	1;
101 	unsigned becn:	1;
102 	unsigned fecn:	1;
103 	unsigned dlcil:	4;
104 #else
105 	unsigned dlcih:	6;
106 	unsigned cr:	1;
107 	unsigned ea1:	1;
108 
109 	unsigned dlcil:	4;
110 	unsigned fecn:	1;
111 	unsigned becn:	1;
112 	unsigned de:	1;
113 	unsigned ea2:	1;
114 #endif
115 } __packed;
116 
117 
118 struct pvc_device {
119 	struct net_device *frad;
120 	struct net_device *main;
121 	struct net_device *ether;	/* bridged Ethernet interface	*/
122 	struct pvc_device *next;	/* Sorted in ascending DLCI order */
123 	int dlci;
124 	int open_count;
125 
126 	struct {
127 		unsigned int new: 1;
128 		unsigned int active: 1;
129 		unsigned int exist: 1;
130 		unsigned int deleted: 1;
131 		unsigned int fecn: 1;
132 		unsigned int becn: 1;
133 		unsigned int bandwidth;	/* Cisco LMI reporting only */
134 	}state;
135 };
136 
137 struct frad_state {
138 	fr_proto settings;
139 	struct pvc_device *first_pvc;
140 	int dce_pvc_count;
141 
142 	struct timer_list timer;
143 	unsigned long last_poll;
144 	int reliable;
145 	int dce_changed;
146 	int request;
147 	int fullrep_sent;
148 	u32 last_errors; /* last errors bit list */
149 	u8 n391cnt;
150 	u8 txseq; /* TX sequence number */
151 	u8 rxseq; /* RX sequence number */
152 };
153 
154 
155 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
156 
157 
158 static inline u16 q922_to_dlci(u8 *hdr)
159 {
160 	return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
161 }
162 
163 
164 static inline void dlci_to_q922(u8 *hdr, u16 dlci)
165 {
166 	hdr[0] = (dlci >> 2) & 0xFC;
167 	hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
168 }
169 
170 
171 static inline struct frad_state* state(hdlc_device *hdlc)
172 {
173 	return(struct frad_state *)(hdlc->state);
174 }
175 
176 
177 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
178 {
179 	struct pvc_device *pvc = state(hdlc)->first_pvc;
180 
181 	while (pvc) {
182 		if (pvc->dlci == dlci)
183 			return pvc;
184 		if (pvc->dlci > dlci)
185 			return NULL; /* the list is sorted */
186 		pvc = pvc->next;
187 	}
188 
189 	return NULL;
190 }
191 
192 
193 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
194 {
195 	hdlc_device *hdlc = dev_to_hdlc(dev);
196 	struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
197 
198 	while (*pvc_p) {
199 		if ((*pvc_p)->dlci == dlci)
200 			return *pvc_p;
201 		if ((*pvc_p)->dlci > dlci)
202 			break;	/* the list is sorted */
203 		pvc_p = &(*pvc_p)->next;
204 	}
205 
206 	pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
207 #ifdef DEBUG_PVC
208 	printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
209 #endif
210 	if (!pvc)
211 		return NULL;
212 
213 	pvc->dlci = dlci;
214 	pvc->frad = dev;
215 	pvc->next = *pvc_p;	/* Put it in the chain */
216 	*pvc_p = pvc;
217 	return pvc;
218 }
219 
220 
221 static inline int pvc_is_used(struct pvc_device *pvc)
222 {
223 	return pvc->main || pvc->ether;
224 }
225 
226 
227 static inline void pvc_carrier(int on, struct pvc_device *pvc)
228 {
229 	if (on) {
230 		if (pvc->main)
231 			if (!netif_carrier_ok(pvc->main))
232 				netif_carrier_on(pvc->main);
233 		if (pvc->ether)
234 			if (!netif_carrier_ok(pvc->ether))
235 				netif_carrier_on(pvc->ether);
236 	} else {
237 		if (pvc->main)
238 			if (netif_carrier_ok(pvc->main))
239 				netif_carrier_off(pvc->main);
240 		if (pvc->ether)
241 			if (netif_carrier_ok(pvc->ether))
242 				netif_carrier_off(pvc->ether);
243 	}
244 }
245 
246 
247 static inline void delete_unused_pvcs(hdlc_device *hdlc)
248 {
249 	struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
250 
251 	while (*pvc_p) {
252 		if (!pvc_is_used(*pvc_p)) {
253 			struct pvc_device *pvc = *pvc_p;
254 #ifdef DEBUG_PVC
255 			printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
256 #endif
257 			*pvc_p = pvc->next;
258 			kfree(pvc);
259 			continue;
260 		}
261 		pvc_p = &(*pvc_p)->next;
262 	}
263 }
264 
265 
266 static inline struct net_device **get_dev_p(struct pvc_device *pvc,
267 					    int type)
268 {
269 	if (type == ARPHRD_ETHER)
270 		return &pvc->ether;
271 	else
272 		return &pvc->main;
273 }
274 
275 
276 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
277 {
278 	u16 head_len;
279 	struct sk_buff *skb = *skb_p;
280 
281 	switch (skb->protocol) {
282 	case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
283 		head_len = 4;
284 		skb_push(skb, head_len);
285 		skb->data[3] = NLPID_CCITT_ANSI_LMI;
286 		break;
287 
288 	case cpu_to_be16(NLPID_CISCO_LMI):
289 		head_len = 4;
290 		skb_push(skb, head_len);
291 		skb->data[3] = NLPID_CISCO_LMI;
292 		break;
293 
294 	case cpu_to_be16(ETH_P_IP):
295 		head_len = 4;
296 		skb_push(skb, head_len);
297 		skb->data[3] = NLPID_IP;
298 		break;
299 
300 	case cpu_to_be16(ETH_P_IPV6):
301 		head_len = 4;
302 		skb_push(skb, head_len);
303 		skb->data[3] = NLPID_IPV6;
304 		break;
305 
306 	case cpu_to_be16(ETH_P_802_3):
307 		head_len = 10;
308 		if (skb_headroom(skb) < head_len) {
309 			struct sk_buff *skb2 = skb_realloc_headroom(skb,
310 								    head_len);
311 			if (!skb2)
312 				return -ENOBUFS;
313 			dev_kfree_skb(skb);
314 			skb = *skb_p = skb2;
315 		}
316 		skb_push(skb, head_len);
317 		skb->data[3] = FR_PAD;
318 		skb->data[4] = NLPID_SNAP;
319 		skb->data[5] = FR_PAD;
320 		skb->data[6] = 0x80;
321 		skb->data[7] = 0xC2;
322 		skb->data[8] = 0x00;
323 		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
324 		break;
325 
326 	default:
327 		head_len = 10;
328 		skb_push(skb, head_len);
329 		skb->data[3] = FR_PAD;
330 		skb->data[4] = NLPID_SNAP;
331 		skb->data[5] = FR_PAD;
332 		skb->data[6] = FR_PAD;
333 		skb->data[7] = FR_PAD;
334 		*(__be16*)(skb->data + 8) = skb->protocol;
335 	}
336 
337 	dlci_to_q922(skb->data, dlci);
338 	skb->data[2] = FR_UI;
339 	return 0;
340 }
341 
342 
343 
344 static int pvc_open(struct net_device *dev)
345 {
346 	struct pvc_device *pvc = dev->ml_priv;
347 
348 	if ((pvc->frad->flags & IFF_UP) == 0)
349 		return -EIO;  /* Frad must be UP in order to activate PVC */
350 
351 	if (pvc->open_count++ == 0) {
352 		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
353 		if (state(hdlc)->settings.lmi == LMI_NONE)
354 			pvc->state.active = netif_carrier_ok(pvc->frad);
355 
356 		pvc_carrier(pvc->state.active, pvc);
357 		state(hdlc)->dce_changed = 1;
358 	}
359 	return 0;
360 }
361 
362 
363 
364 static int pvc_close(struct net_device *dev)
365 {
366 	struct pvc_device *pvc = dev->ml_priv;
367 
368 	if (--pvc->open_count == 0) {
369 		hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
370 		if (state(hdlc)->settings.lmi == LMI_NONE)
371 			pvc->state.active = 0;
372 
373 		if (state(hdlc)->settings.dce) {
374 			state(hdlc)->dce_changed = 1;
375 			pvc->state.active = 0;
376 		}
377 	}
378 	return 0;
379 }
380 
381 
382 
383 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
384 {
385 	struct pvc_device *pvc = dev->ml_priv;
386 	fr_proto_pvc_info info;
387 
388 	if (ifr->ifr_settings.type == IF_GET_PROTO) {
389 		if (dev->type == ARPHRD_ETHER)
390 			ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
391 		else
392 			ifr->ifr_settings.type = IF_PROTO_FR_PVC;
393 
394 		if (ifr->ifr_settings.size < sizeof(info)) {
395 			/* data size wanted */
396 			ifr->ifr_settings.size = sizeof(info);
397 			return -ENOBUFS;
398 		}
399 
400 		info.dlci = pvc->dlci;
401 		memcpy(info.master, pvc->frad->name, IFNAMSIZ);
402 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
403 				 &info, sizeof(info)))
404 			return -EFAULT;
405 		return 0;
406 	}
407 
408 	return -EINVAL;
409 }
410 
411 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
412 {
413 	struct pvc_device *pvc = dev->ml_priv;
414 
415 	if (pvc->state.active) {
416 		if (dev->type == ARPHRD_ETHER) {
417 			int pad = ETH_ZLEN - skb->len;
418 			if (pad > 0) { /* Pad the frame with zeros */
419 				int len = skb->len;
420 				if (skb_tailroom(skb) < pad)
421 					if (pskb_expand_head(skb, 0, pad,
422 							     GFP_ATOMIC)) {
423 						dev->stats.tx_dropped++;
424 						dev_kfree_skb(skb);
425 						return NETDEV_TX_OK;
426 					}
427 				skb_put(skb, pad);
428 				memset(skb->data + len, 0, pad);
429 			}
430 			skb->protocol = cpu_to_be16(ETH_P_802_3);
431 		}
432 		if (!fr_hard_header(&skb, pvc->dlci)) {
433 			dev->stats.tx_bytes += skb->len;
434 			dev->stats.tx_packets++;
435 			if (pvc->state.fecn) /* TX Congestion counter */
436 				dev->stats.tx_compressed++;
437 			skb->dev = pvc->frad;
438 			dev_queue_xmit(skb);
439 			return NETDEV_TX_OK;
440 		}
441 	}
442 
443 	dev->stats.tx_dropped++;
444 	dev_kfree_skb(skb);
445 	return NETDEV_TX_OK;
446 }
447 
448 static inline void fr_log_dlci_active(struct pvc_device *pvc)
449 {
450 	netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
451 		    pvc->dlci,
452 		    pvc->main ? pvc->main->name : "",
453 		    pvc->main && pvc->ether ? " " : "",
454 		    pvc->ether ? pvc->ether->name : "",
455 		    pvc->state.new ? " new" : "",
456 		    !pvc->state.exist ? "deleted" :
457 		    pvc->state.active ? "active" : "inactive");
458 }
459 
460 
461 
462 static inline u8 fr_lmi_nextseq(u8 x)
463 {
464 	x++;
465 	return x ? x : 1;
466 }
467 
468 
469 static void fr_lmi_send(struct net_device *dev, int fullrep)
470 {
471 	hdlc_device *hdlc = dev_to_hdlc(dev);
472 	struct sk_buff *skb;
473 	struct pvc_device *pvc = state(hdlc)->first_pvc;
474 	int lmi = state(hdlc)->settings.lmi;
475 	int dce = state(hdlc)->settings.dce;
476 	int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
477 	int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
478 	u8 *data;
479 	int i = 0;
480 
481 	if (dce && fullrep) {
482 		len += state(hdlc)->dce_pvc_count * (2 + stat_len);
483 		if (len > HDLC_MAX_MRU) {
484 			netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
485 			return;
486 		}
487 	}
488 
489 	skb = dev_alloc_skb(len);
490 	if (!skb) {
491 		netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
492 		return;
493 	}
494 	memset(skb->data, 0, len);
495 	skb_reserve(skb, 4);
496 	if (lmi == LMI_CISCO) {
497 		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
498 		fr_hard_header(&skb, LMI_CISCO_DLCI);
499 	} else {
500 		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
501 		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
502 	}
503 	data = skb_tail_pointer(skb);
504 	data[i++] = LMI_CALLREF;
505 	data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
506 	if (lmi == LMI_ANSI)
507 		data[i++] = LMI_ANSI_LOCKSHIFT;
508 	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
509 		LMI_ANSI_CISCO_REPTYPE;
510 	data[i++] = LMI_REPT_LEN;
511 	data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
512 	data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
513 	data[i++] = LMI_INTEG_LEN;
514 	data[i++] = state(hdlc)->txseq =
515 		fr_lmi_nextseq(state(hdlc)->txseq);
516 	data[i++] = state(hdlc)->rxseq;
517 
518 	if (dce && fullrep) {
519 		while (pvc) {
520 			data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
521 				LMI_ANSI_CISCO_PVCSTAT;
522 			data[i++] = stat_len;
523 
524 			/* LMI start/restart */
525 			if (state(hdlc)->reliable && !pvc->state.exist) {
526 				pvc->state.exist = pvc->state.new = 1;
527 				fr_log_dlci_active(pvc);
528 			}
529 
530 			/* ifconfig PVC up */
531 			if (pvc->open_count && !pvc->state.active &&
532 			    pvc->state.exist && !pvc->state.new) {
533 				pvc_carrier(1, pvc);
534 				pvc->state.active = 1;
535 				fr_log_dlci_active(pvc);
536 			}
537 
538 			if (lmi == LMI_CISCO) {
539 				data[i] = pvc->dlci >> 8;
540 				data[i + 1] = pvc->dlci & 0xFF;
541 			} else {
542 				data[i] = (pvc->dlci >> 4) & 0x3F;
543 				data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
544 				data[i + 2] = 0x80;
545 			}
546 
547 			if (pvc->state.new)
548 				data[i + 2] |= 0x08;
549 			else if (pvc->state.active)
550 				data[i + 2] |= 0x02;
551 
552 			i += stat_len;
553 			pvc = pvc->next;
554 		}
555 	}
556 
557 	skb_put(skb, i);
558 	skb->priority = TC_PRIO_CONTROL;
559 	skb->dev = dev;
560 	skb_reset_network_header(skb);
561 
562 	dev_queue_xmit(skb);
563 }
564 
565 
566 
567 static void fr_set_link_state(int reliable, struct net_device *dev)
568 {
569 	hdlc_device *hdlc = dev_to_hdlc(dev);
570 	struct pvc_device *pvc = state(hdlc)->first_pvc;
571 
572 	state(hdlc)->reliable = reliable;
573 	if (reliable) {
574 		netif_dormant_off(dev);
575 		state(hdlc)->n391cnt = 0; /* Request full status */
576 		state(hdlc)->dce_changed = 1;
577 
578 		if (state(hdlc)->settings.lmi == LMI_NONE) {
579 			while (pvc) {	/* Activate all PVCs */
580 				pvc_carrier(1, pvc);
581 				pvc->state.exist = pvc->state.active = 1;
582 				pvc->state.new = 0;
583 				pvc = pvc->next;
584 			}
585 		}
586 	} else {
587 		netif_dormant_on(dev);
588 		while (pvc) {		/* Deactivate all PVCs */
589 			pvc_carrier(0, pvc);
590 			pvc->state.exist = pvc->state.active = 0;
591 			pvc->state.new = 0;
592 			if (!state(hdlc)->settings.dce)
593 				pvc->state.bandwidth = 0;
594 			pvc = pvc->next;
595 		}
596 	}
597 }
598 
599 
600 static void fr_timer(unsigned long arg)
601 {
602 	struct net_device *dev = (struct net_device *)arg;
603 	hdlc_device *hdlc = dev_to_hdlc(dev);
604 	int i, cnt = 0, reliable;
605 	u32 list;
606 
607 	if (state(hdlc)->settings.dce) {
608 		reliable = state(hdlc)->request &&
609 			time_before(jiffies, state(hdlc)->last_poll +
610 				    state(hdlc)->settings.t392 * HZ);
611 		state(hdlc)->request = 0;
612 	} else {
613 		state(hdlc)->last_errors <<= 1; /* Shift the list */
614 		if (state(hdlc)->request) {
615 			if (state(hdlc)->reliable)
616 				netdev_info(dev, "No LMI status reply received\n");
617 			state(hdlc)->last_errors |= 1;
618 		}
619 
620 		list = state(hdlc)->last_errors;
621 		for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
622 			cnt += (list & 1);	/* errors count */
623 
624 		reliable = (cnt < state(hdlc)->settings.n392);
625 	}
626 
627 	if (state(hdlc)->reliable != reliable) {
628 		netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
629 		fr_set_link_state(reliable, dev);
630 	}
631 
632 	if (state(hdlc)->settings.dce)
633 		state(hdlc)->timer.expires = jiffies +
634 			state(hdlc)->settings.t392 * HZ;
635 	else {
636 		if (state(hdlc)->n391cnt)
637 			state(hdlc)->n391cnt--;
638 
639 		fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
640 
641 		state(hdlc)->last_poll = jiffies;
642 		state(hdlc)->request = 1;
643 		state(hdlc)->timer.expires = jiffies +
644 			state(hdlc)->settings.t391 * HZ;
645 	}
646 
647 	add_timer(&state(hdlc)->timer);
648 }
649 
650 
651 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
652 {
653 	hdlc_device *hdlc = dev_to_hdlc(dev);
654 	struct pvc_device *pvc;
655 	u8 rxseq, txseq;
656 	int lmi = state(hdlc)->settings.lmi;
657 	int dce = state(hdlc)->settings.dce;
658 	int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
659 
660 	if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
661 			LMI_CCITT_CISCO_LENGTH)) {
662 		netdev_info(dev, "Short LMI frame\n");
663 		return 1;
664 	}
665 
666 	if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
667 			     NLPID_CCITT_ANSI_LMI)) {
668 		netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
669 		return 1;
670 	}
671 
672 	if (skb->data[4] != LMI_CALLREF) {
673 		netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
674 			    skb->data[4]);
675 		return 1;
676 	}
677 
678 	if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
679 		netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
680 			    skb->data[5]);
681 		return 1;
682 	}
683 
684 	if (lmi == LMI_ANSI) {
685 		if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
686 			netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
687 				    skb->data[6]);
688 			return 1;
689 		}
690 		i = 7;
691 	} else
692 		i = 6;
693 
694 	if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
695 			     LMI_ANSI_CISCO_REPTYPE)) {
696 		netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
697 			    skb->data[i]);
698 		return 1;
699 	}
700 
701 	if (skb->data[++i] != LMI_REPT_LEN) {
702 		netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
703 			    skb->data[i]);
704 		return 1;
705 	}
706 
707 	reptype = skb->data[++i];
708 	if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
709 		netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
710 			    reptype);
711 		return 1;
712 	}
713 
714 	if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
715 			       LMI_ANSI_CISCO_ALIVE)) {
716 		netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
717 			    skb->data[i]);
718 		return 1;
719 	}
720 
721 	if (skb->data[++i] != LMI_INTEG_LEN) {
722 		netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
723 			    skb->data[i]);
724 		return 1;
725 	}
726 	i++;
727 
728 	state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
729 	rxseq = skb->data[i++];	/* Should confirm our sequence */
730 
731 	txseq = state(hdlc)->txseq;
732 
733 	if (dce)
734 		state(hdlc)->last_poll = jiffies;
735 
736 	error = 0;
737 	if (!state(hdlc)->reliable)
738 		error = 1;
739 
740 	if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
741 		state(hdlc)->n391cnt = 0;
742 		error = 1;
743 	}
744 
745 	if (dce) {
746 		if (state(hdlc)->fullrep_sent && !error) {
747 /* Stop sending full report - the last one has been confirmed by DTE */
748 			state(hdlc)->fullrep_sent = 0;
749 			pvc = state(hdlc)->first_pvc;
750 			while (pvc) {
751 				if (pvc->state.new) {
752 					pvc->state.new = 0;
753 
754 /* Tell DTE that new PVC is now active */
755 					state(hdlc)->dce_changed = 1;
756 				}
757 				pvc = pvc->next;
758 			}
759 		}
760 
761 		if (state(hdlc)->dce_changed) {
762 			reptype = LMI_FULLREP;
763 			state(hdlc)->fullrep_sent = 1;
764 			state(hdlc)->dce_changed = 0;
765 		}
766 
767 		state(hdlc)->request = 1; /* got request */
768 		fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
769 		return 0;
770 	}
771 
772 	/* DTE */
773 
774 	state(hdlc)->request = 0; /* got response, no request pending */
775 
776 	if (error)
777 		return 0;
778 
779 	if (reptype != LMI_FULLREP)
780 		return 0;
781 
782 	pvc = state(hdlc)->first_pvc;
783 
784 	while (pvc) {
785 		pvc->state.deleted = 1;
786 		pvc = pvc->next;
787 	}
788 
789 	no_ram = 0;
790 	while (skb->len >= i + 2 + stat_len) {
791 		u16 dlci;
792 		u32 bw;
793 		unsigned int active, new;
794 
795 		if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
796 				       LMI_ANSI_CISCO_PVCSTAT)) {
797 			netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
798 				    skb->data[i]);
799 			return 1;
800 		}
801 
802 		if (skb->data[++i] != stat_len) {
803 			netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
804 				    skb->data[i]);
805 			return 1;
806 		}
807 		i++;
808 
809 		new = !! (skb->data[i + 2] & 0x08);
810 		active = !! (skb->data[i + 2] & 0x02);
811 		if (lmi == LMI_CISCO) {
812 			dlci = (skb->data[i] << 8) | skb->data[i + 1];
813 			bw = (skb->data[i + 3] << 16) |
814 				(skb->data[i + 4] << 8) |
815 				(skb->data[i + 5]);
816 		} else {
817 			dlci = ((skb->data[i] & 0x3F) << 4) |
818 				((skb->data[i + 1] & 0x78) >> 3);
819 			bw = 0;
820 		}
821 
822 		pvc = add_pvc(dev, dlci);
823 
824 		if (!pvc && !no_ram) {
825 			netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
826 			no_ram = 1;
827 		}
828 
829 		if (pvc) {
830 			pvc->state.exist = 1;
831 			pvc->state.deleted = 0;
832 			if (active != pvc->state.active ||
833 			    new != pvc->state.new ||
834 			    bw != pvc->state.bandwidth ||
835 			    !pvc->state.exist) {
836 				pvc->state.new = new;
837 				pvc->state.active = active;
838 				pvc->state.bandwidth = bw;
839 				pvc_carrier(active, pvc);
840 				fr_log_dlci_active(pvc);
841 			}
842 		}
843 
844 		i += stat_len;
845 	}
846 
847 	pvc = state(hdlc)->first_pvc;
848 
849 	while (pvc) {
850 		if (pvc->state.deleted && pvc->state.exist) {
851 			pvc_carrier(0, pvc);
852 			pvc->state.active = pvc->state.new = 0;
853 			pvc->state.exist = 0;
854 			pvc->state.bandwidth = 0;
855 			fr_log_dlci_active(pvc);
856 		}
857 		pvc = pvc->next;
858 	}
859 
860 	/* Next full report after N391 polls */
861 	state(hdlc)->n391cnt = state(hdlc)->settings.n391;
862 
863 	return 0;
864 }
865 
866 
867 static int fr_rx(struct sk_buff *skb)
868 {
869 	struct net_device *frad = skb->dev;
870 	hdlc_device *hdlc = dev_to_hdlc(frad);
871 	struct fr_hdr *fh = (struct fr_hdr *)skb->data;
872 	u8 *data = skb->data;
873 	u16 dlci;
874 	struct pvc_device *pvc;
875 	struct net_device *dev = NULL;
876 
877 	if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
878 		goto rx_error;
879 
880 	dlci = q922_to_dlci(skb->data);
881 
882 	if ((dlci == LMI_CCITT_ANSI_DLCI &&
883 	     (state(hdlc)->settings.lmi == LMI_ANSI ||
884 	      state(hdlc)->settings.lmi == LMI_CCITT)) ||
885 	    (dlci == LMI_CISCO_DLCI &&
886 	     state(hdlc)->settings.lmi == LMI_CISCO)) {
887 		if (fr_lmi_recv(frad, skb))
888 			goto rx_error;
889 		dev_kfree_skb_any(skb);
890 		return NET_RX_SUCCESS;
891 	}
892 
893 	pvc = find_pvc(hdlc, dlci);
894 	if (!pvc) {
895 #ifdef DEBUG_PKT
896 		netdev_info(frad, "No PVC for received frame's DLCI %d\n",
897 			    dlci);
898 #endif
899 		dev_kfree_skb_any(skb);
900 		return NET_RX_DROP;
901 	}
902 
903 	if (pvc->state.fecn != fh->fecn) {
904 #ifdef DEBUG_ECN
905 		printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
906 		       dlci, fh->fecn ? "N" : "FF");
907 #endif
908 		pvc->state.fecn ^= 1;
909 	}
910 
911 	if (pvc->state.becn != fh->becn) {
912 #ifdef DEBUG_ECN
913 		printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
914 		       dlci, fh->becn ? "N" : "FF");
915 #endif
916 		pvc->state.becn ^= 1;
917 	}
918 
919 
920 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
921 		frad->stats.rx_dropped++;
922 		return NET_RX_DROP;
923 	}
924 
925 	if (data[3] == NLPID_IP) {
926 		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
927 		dev = pvc->main;
928 		skb->protocol = htons(ETH_P_IP);
929 
930 	} else if (data[3] == NLPID_IPV6) {
931 		skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
932 		dev = pvc->main;
933 		skb->protocol = htons(ETH_P_IPV6);
934 
935 	} else if (skb->len > 10 && data[3] == FR_PAD &&
936 		   data[4] == NLPID_SNAP && data[5] == FR_PAD) {
937 		u16 oui = ntohs(*(__be16*)(data + 6));
938 		u16 pid = ntohs(*(__be16*)(data + 8));
939 		skb_pull(skb, 10);
940 
941 		switch ((((u32)oui) << 16) | pid) {
942 		case ETH_P_ARP: /* routed frame with SNAP */
943 		case ETH_P_IPX:
944 		case ETH_P_IP:	/* a long variant */
945 		case ETH_P_IPV6:
946 			dev = pvc->main;
947 			skb->protocol = htons(pid);
948 			break;
949 
950 		case 0x80C20007: /* bridged Ethernet frame */
951 			if ((dev = pvc->ether) != NULL)
952 				skb->protocol = eth_type_trans(skb, dev);
953 			break;
954 
955 		default:
956 			netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
957 				    oui, pid);
958 			dev_kfree_skb_any(skb);
959 			return NET_RX_DROP;
960 		}
961 	} else {
962 		netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
963 			    data[3], skb->len);
964 		dev_kfree_skb_any(skb);
965 		return NET_RX_DROP;
966 	}
967 
968 	if (dev) {
969 		dev->stats.rx_packets++; /* PVC traffic */
970 		dev->stats.rx_bytes += skb->len;
971 		if (pvc->state.becn)
972 			dev->stats.rx_compressed++;
973 		skb->dev = dev;
974 		netif_rx(skb);
975 		return NET_RX_SUCCESS;
976 	} else {
977 		dev_kfree_skb_any(skb);
978 		return NET_RX_DROP;
979 	}
980 
981  rx_error:
982 	frad->stats.rx_errors++; /* Mark error */
983 	dev_kfree_skb_any(skb);
984 	return NET_RX_DROP;
985 }
986 
987 
988 
989 static void fr_start(struct net_device *dev)
990 {
991 	hdlc_device *hdlc = dev_to_hdlc(dev);
992 #ifdef DEBUG_LINK
993 	printk(KERN_DEBUG "fr_start\n");
994 #endif
995 	if (state(hdlc)->settings.lmi != LMI_NONE) {
996 		state(hdlc)->reliable = 0;
997 		state(hdlc)->dce_changed = 1;
998 		state(hdlc)->request = 0;
999 		state(hdlc)->fullrep_sent = 0;
1000 		state(hdlc)->last_errors = 0xFFFFFFFF;
1001 		state(hdlc)->n391cnt = 0;
1002 		state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1003 
1004 		init_timer(&state(hdlc)->timer);
1005 		/* First poll after 1 s */
1006 		state(hdlc)->timer.expires = jiffies + HZ;
1007 		state(hdlc)->timer.function = fr_timer;
1008 		state(hdlc)->timer.data = (unsigned long)dev;
1009 		add_timer(&state(hdlc)->timer);
1010 	} else
1011 		fr_set_link_state(1, dev);
1012 }
1013 
1014 
1015 static void fr_stop(struct net_device *dev)
1016 {
1017 	hdlc_device *hdlc = dev_to_hdlc(dev);
1018 #ifdef DEBUG_LINK
1019 	printk(KERN_DEBUG "fr_stop\n");
1020 #endif
1021 	if (state(hdlc)->settings.lmi != LMI_NONE)
1022 		del_timer_sync(&state(hdlc)->timer);
1023 	fr_set_link_state(0, dev);
1024 }
1025 
1026 
1027 static void fr_close(struct net_device *dev)
1028 {
1029 	hdlc_device *hdlc = dev_to_hdlc(dev);
1030 	struct pvc_device *pvc = state(hdlc)->first_pvc;
1031 
1032 	while (pvc) {		/* Shutdown all PVCs for this FRAD */
1033 		if (pvc->main)
1034 			dev_close(pvc->main);
1035 		if (pvc->ether)
1036 			dev_close(pvc->ether);
1037 		pvc = pvc->next;
1038 	}
1039 }
1040 
1041 
1042 static void pvc_setup(struct net_device *dev)
1043 {
1044 	dev->type = ARPHRD_DLCI;
1045 	dev->flags = IFF_POINTOPOINT;
1046 	dev->hard_header_len = 10;
1047 	dev->addr_len = 2;
1048 	netif_keep_dst(dev);
1049 }
1050 
1051 static const struct net_device_ops pvc_ops = {
1052 	.ndo_open       = pvc_open,
1053 	.ndo_stop       = pvc_close,
1054 	.ndo_start_xmit = pvc_xmit,
1055 	.ndo_do_ioctl   = pvc_ioctl,
1056 };
1057 
1058 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1059 {
1060 	hdlc_device *hdlc = dev_to_hdlc(frad);
1061 	struct pvc_device *pvc;
1062 	struct net_device *dev;
1063 	int used;
1064 
1065 	if ((pvc = add_pvc(frad, dlci)) == NULL) {
1066 		netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1067 		return -ENOBUFS;
1068 	}
1069 
1070 	if (*get_dev_p(pvc, type))
1071 		return -EEXIST;
1072 
1073 	used = pvc_is_used(pvc);
1074 
1075 	if (type == ARPHRD_ETHER)
1076 		dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1077 				   ether_setup);
1078 	else
1079 		dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1080 
1081 	if (!dev) {
1082 		netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1083 		delete_unused_pvcs(hdlc);
1084 		return -ENOBUFS;
1085 	}
1086 
1087 	if (type == ARPHRD_ETHER) {
1088 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1089 		eth_hw_addr_random(dev);
1090 	} else {
1091 		*(__be16*)dev->dev_addr = htons(dlci);
1092 		dlci_to_q922(dev->broadcast, dlci);
1093 	}
1094 	dev->netdev_ops = &pvc_ops;
1095 	dev->mtu = HDLC_MAX_MTU;
1096 	dev->min_mtu = 68;
1097 	dev->max_mtu = HDLC_MAX_MTU;
1098 	dev->priv_flags |= IFF_NO_QUEUE;
1099 	dev->ml_priv = pvc;
1100 
1101 	if (register_netdevice(dev) != 0) {
1102 		free_netdev(dev);
1103 		delete_unused_pvcs(hdlc);
1104 		return -EIO;
1105 	}
1106 
1107 	dev->needs_free_netdev = true;
1108 	*get_dev_p(pvc, type) = dev;
1109 	if (!used) {
1110 		state(hdlc)->dce_changed = 1;
1111 		state(hdlc)->dce_pvc_count++;
1112 	}
1113 	return 0;
1114 }
1115 
1116 
1117 
1118 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1119 {
1120 	struct pvc_device *pvc;
1121 	struct net_device *dev;
1122 
1123 	if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1124 		return -ENOENT;
1125 
1126 	if ((dev = *get_dev_p(pvc, type)) == NULL)
1127 		return -ENOENT;
1128 
1129 	if (dev->flags & IFF_UP)
1130 		return -EBUSY;		/* PVC in use */
1131 
1132 	unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1133 	*get_dev_p(pvc, type) = NULL;
1134 
1135 	if (!pvc_is_used(pvc)) {
1136 		state(hdlc)->dce_pvc_count--;
1137 		state(hdlc)->dce_changed = 1;
1138 	}
1139 	delete_unused_pvcs(hdlc);
1140 	return 0;
1141 }
1142 
1143 
1144 
1145 static void fr_destroy(struct net_device *frad)
1146 {
1147 	hdlc_device *hdlc = dev_to_hdlc(frad);
1148 	struct pvc_device *pvc = state(hdlc)->first_pvc;
1149 	state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1150 	state(hdlc)->dce_pvc_count = 0;
1151 	state(hdlc)->dce_changed = 1;
1152 
1153 	while (pvc) {
1154 		struct pvc_device *next = pvc->next;
1155 		/* destructors will free_netdev() main and ether */
1156 		if (pvc->main)
1157 			unregister_netdevice(pvc->main);
1158 
1159 		if (pvc->ether)
1160 			unregister_netdevice(pvc->ether);
1161 
1162 		kfree(pvc);
1163 		pvc = next;
1164 	}
1165 }
1166 
1167 
1168 static struct hdlc_proto proto = {
1169 	.close		= fr_close,
1170 	.start		= fr_start,
1171 	.stop		= fr_stop,
1172 	.detach		= fr_destroy,
1173 	.ioctl		= fr_ioctl,
1174 	.netif_rx	= fr_rx,
1175 	.module		= THIS_MODULE,
1176 };
1177 
1178 
1179 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1180 {
1181 	fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1182 	const size_t size = sizeof(fr_proto);
1183 	fr_proto new_settings;
1184 	hdlc_device *hdlc = dev_to_hdlc(dev);
1185 	fr_proto_pvc pvc;
1186 	int result;
1187 
1188 	switch (ifr->ifr_settings.type) {
1189 	case IF_GET_PROTO:
1190 		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1191 			return -EINVAL;
1192 		ifr->ifr_settings.type = IF_PROTO_FR;
1193 		if (ifr->ifr_settings.size < size) {
1194 			ifr->ifr_settings.size = size; /* data size wanted */
1195 			return -ENOBUFS;
1196 		}
1197 		if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1198 			return -EFAULT;
1199 		return 0;
1200 
1201 	case IF_PROTO_FR:
1202 		if (!capable(CAP_NET_ADMIN))
1203 			return -EPERM;
1204 
1205 		if (dev->flags & IFF_UP)
1206 			return -EBUSY;
1207 
1208 		if (copy_from_user(&new_settings, fr_s, size))
1209 			return -EFAULT;
1210 
1211 		if (new_settings.lmi == LMI_DEFAULT)
1212 			new_settings.lmi = LMI_ANSI;
1213 
1214 		if ((new_settings.lmi != LMI_NONE &&
1215 		     new_settings.lmi != LMI_ANSI &&
1216 		     new_settings.lmi != LMI_CCITT &&
1217 		     new_settings.lmi != LMI_CISCO) ||
1218 		    new_settings.t391 < 1 ||
1219 		    new_settings.t392 < 2 ||
1220 		    new_settings.n391 < 1 ||
1221 		    new_settings.n392 < 1 ||
1222 		    new_settings.n393 < new_settings.n392 ||
1223 		    new_settings.n393 > 32 ||
1224 		    (new_settings.dce != 0 &&
1225 		     new_settings.dce != 1))
1226 			return -EINVAL;
1227 
1228 		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1229 		if (result)
1230 			return result;
1231 
1232 		if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1233 			result = attach_hdlc_protocol(dev, &proto,
1234 						      sizeof(struct frad_state));
1235 			if (result)
1236 				return result;
1237 			state(hdlc)->first_pvc = NULL;
1238 			state(hdlc)->dce_pvc_count = 0;
1239 		}
1240 		memcpy(&state(hdlc)->settings, &new_settings, size);
1241 		dev->type = ARPHRD_FRAD;
1242 		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1243 		return 0;
1244 
1245 	case IF_PROTO_FR_ADD_PVC:
1246 	case IF_PROTO_FR_DEL_PVC:
1247 	case IF_PROTO_FR_ADD_ETH_PVC:
1248 	case IF_PROTO_FR_DEL_ETH_PVC:
1249 		if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1250 			return -EINVAL;
1251 
1252 		if (!capable(CAP_NET_ADMIN))
1253 			return -EPERM;
1254 
1255 		if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1256 				   sizeof(fr_proto_pvc)))
1257 			return -EFAULT;
1258 
1259 		if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1260 			return -EINVAL;	/* Only 10 bits, DLCI 0 reserved */
1261 
1262 		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1263 		    ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1264 			result = ARPHRD_ETHER; /* bridged Ethernet device */
1265 		else
1266 			result = ARPHRD_DLCI;
1267 
1268 		if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1269 		    ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1270 			return fr_add_pvc(dev, pvc.dlci, result);
1271 		else
1272 			return fr_del_pvc(hdlc, pvc.dlci, result);
1273 	}
1274 
1275 	return -EINVAL;
1276 }
1277 
1278 
1279 static int __init mod_init(void)
1280 {
1281 	register_hdlc_protocol(&proto);
1282 	return 0;
1283 }
1284 
1285 
1286 static void __exit mod_exit(void)
1287 {
1288 	unregister_hdlc_protocol(&proto);
1289 }
1290 
1291 
1292 module_init(mod_init);
1293 module_exit(mod_exit);
1294 
1295 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1296 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1297 MODULE_LICENSE("GPL v2");
1298