xref: /linux/drivers/isdn/mISDN/layer2.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  *
3  * Author	Karsten Keil <kkeil@novell.com>
4  *
5  * Copyright 2008  by Karsten Keil <kkeil@novell.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #include <linux/mISDNif.h>
19 #include "core.h"
20 #include "fsm.h"
21 #include "layer2.h"
22 
23 static u_int *debug;
24 
25 static
26 struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
27 
28 static char *strL2State[] =
29 {
30 	"ST_L2_1",
31 	"ST_L2_2",
32 	"ST_L2_3",
33 	"ST_L2_4",
34 	"ST_L2_5",
35 	"ST_L2_6",
36 	"ST_L2_7",
37 	"ST_L2_8",
38 };
39 
40 enum {
41 	EV_L2_UI,
42 	EV_L2_SABME,
43 	EV_L2_DISC,
44 	EV_L2_DM,
45 	EV_L2_UA,
46 	EV_L2_FRMR,
47 	EV_L2_SUPER,
48 	EV_L2_I,
49 	EV_L2_DL_DATA,
50 	EV_L2_ACK_PULL,
51 	EV_L2_DL_UNITDATA,
52 	EV_L2_DL_ESTABLISH_REQ,
53 	EV_L2_DL_RELEASE_REQ,
54 	EV_L2_MDL_ASSIGN,
55 	EV_L2_MDL_REMOVE,
56 	EV_L2_MDL_ERROR,
57 	EV_L1_DEACTIVATE,
58 	EV_L2_T200,
59 	EV_L2_T203,
60 	EV_L2_SET_OWN_BUSY,
61 	EV_L2_CLEAR_OWN_BUSY,
62 	EV_L2_FRAME_ERROR,
63 };
64 
65 #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
66 
67 static char *strL2Event[] =
68 {
69 	"EV_L2_UI",
70 	"EV_L2_SABME",
71 	"EV_L2_DISC",
72 	"EV_L2_DM",
73 	"EV_L2_UA",
74 	"EV_L2_FRMR",
75 	"EV_L2_SUPER",
76 	"EV_L2_I",
77 	"EV_L2_DL_DATA",
78 	"EV_L2_ACK_PULL",
79 	"EV_L2_DL_UNITDATA",
80 	"EV_L2_DL_ESTABLISH_REQ",
81 	"EV_L2_DL_RELEASE_REQ",
82 	"EV_L2_MDL_ASSIGN",
83 	"EV_L2_MDL_REMOVE",
84 	"EV_L2_MDL_ERROR",
85 	"EV_L1_DEACTIVATE",
86 	"EV_L2_T200",
87 	"EV_L2_T203",
88 	"EV_L2_SET_OWN_BUSY",
89 	"EV_L2_CLEAR_OWN_BUSY",
90 	"EV_L2_FRAME_ERROR",
91 };
92 
93 static void
94 l2m_debug(struct FsmInst *fi, char *fmt, ...)
95 {
96 	struct layer2 *l2 = fi->userdata;
97 	va_list va;
98 
99 	if (!(*debug & DEBUG_L2_FSM))
100 		return;
101 	va_start(va, fmt);
102 	printk(KERN_DEBUG "l2 (tei %d): ", l2->tei);
103 	vprintk(fmt, va);
104 	printk("\n");
105 	va_end(va);
106 }
107 
108 inline u_int
109 l2headersize(struct layer2 *l2, int ui)
110 {
111 	return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
112 		(test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
113 }
114 
115 inline u_int
116 l2addrsize(struct layer2 *l2)
117 {
118 	return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
119 }
120 
121 static u_int
122 l2_newid(struct layer2 *l2)
123 {
124 	u_int	id;
125 
126 	id = l2->next_id++;
127 	if (id == 0x7fff)
128 		l2->next_id = 1;
129 	id <<= 16;
130 	id |= l2->tei << 8;
131 	id |= l2->sapi;
132 	return id;
133 }
134 
135 static void
136 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
137 {
138 	int	err;
139 
140 	if (!l2->up)
141 		return;
142 	mISDN_HEAD_PRIM(skb) = prim;
143 	mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
144 	err = l2->up->send(l2->up, skb);
145 	if (err) {
146 		printk(KERN_WARNING "%s: err=%d\n", __func__, err);
147 		dev_kfree_skb(skb);
148 	}
149 }
150 
151 static void
152 l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
153 {
154 	struct sk_buff	*skb;
155 	struct mISDNhead *hh;
156 	int		err;
157 
158 	if (!l2->up)
159 		return;
160 	skb = mI_alloc_skb(len, GFP_ATOMIC);
161 	if (!skb)
162 		return;
163 	hh = mISDN_HEAD_P(skb);
164 	hh->prim = prim;
165 	hh->id = (l2->ch.nr << 16) | l2->ch.addr;
166 	if (len)
167 		memcpy(skb_put(skb, len), arg, len);
168 	err = l2->up->send(l2->up, skb);
169 	if (err) {
170 		printk(KERN_WARNING "%s: err=%d\n", __func__, err);
171 		dev_kfree_skb(skb);
172 	}
173 }
174 
175 static int
176 l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
177 	int ret;
178 
179 	ret = l2->ch.recv(l2->ch.peer, skb);
180 	if (ret && (*debug & DEBUG_L2_RECV))
181 		printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
182 	return ret;
183 }
184 
185 static int
186 l2down_raw(struct layer2 *l2, struct sk_buff *skb)
187 {
188 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
189 
190 	if (hh->prim == PH_DATA_REQ) {
191 		if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
192 			skb_queue_tail(&l2->down_queue, skb);
193 			return 0;
194 		}
195 		l2->down_id = mISDN_HEAD_ID(skb);
196 	}
197 	return l2down_skb(l2, skb);
198 }
199 
200 static int
201 l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
202 {
203 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
204 
205 	hh->prim = prim;
206 	hh->id = id;
207 	return l2down_raw(l2, skb);
208 }
209 
210 static int
211 l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
212 {
213 	struct sk_buff	*skb;
214 	int		err;
215 	struct mISDNhead *hh;
216 
217 	skb = mI_alloc_skb(len, GFP_ATOMIC);
218 	if (!skb)
219 		return -ENOMEM;
220 	hh = mISDN_HEAD_P(skb);
221 	hh->prim = prim;
222 	hh->id = id;
223 	if (len)
224 		memcpy(skb_put(skb, len), arg, len);
225 	err = l2down_raw(l2, skb);
226 	if (err)
227 		dev_kfree_skb(skb);
228 	return err;
229 }
230 
231 static int
232 ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
233 	struct sk_buff *nskb = skb;
234 	int ret = -EAGAIN;
235 
236 	if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
237 		if (hh->id == l2->down_id) {
238 			nskb = skb_dequeue(&l2->down_queue);
239 			if (nskb) {
240 				l2->down_id = mISDN_HEAD_ID(nskb);
241 				if (l2down_skb(l2, nskb)) {
242 					dev_kfree_skb(nskb);
243 					l2->down_id = MISDN_ID_NONE;
244 				}
245 			} else
246 				l2->down_id = MISDN_ID_NONE;
247 			if (ret) {
248 				dev_kfree_skb(skb);
249 				ret = 0;
250 			}
251 			if (l2->down_id == MISDN_ID_NONE) {
252 				test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
253 				mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
254 			}
255 		}
256 	}
257 	if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
258 		nskb = skb_dequeue(&l2->down_queue);
259 		if (nskb) {
260 			l2->down_id = mISDN_HEAD_ID(nskb);
261 			if (l2down_skb(l2, nskb)) {
262 				dev_kfree_skb(nskb);
263 				l2->down_id = MISDN_ID_NONE;
264 				test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
265 			}
266 		} else
267 			test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
268 	}
269 	return ret;
270 }
271 
272 static int
273 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
274 	long c = (long)arg;
275 
276 	printk(KERN_WARNING
277 	    "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
278 	if (test_bit(FLG_LAPD, &l2->flag) &&
279 		!test_bit(FLG_FIXED_TEI, &l2->flag)) {
280 		switch (c) {
281 		case 'C':
282 		case 'D':
283 		case 'G':
284 		case 'H':
285 			l2_tei(l2, prim, (u_long)arg);
286 			break;
287 		}
288 	}
289 	return 0;
290 }
291 
292 static void
293 set_peer_busy(struct layer2 *l2) {
294 	test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
295 	if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
296 		test_and_set_bit(FLG_L2BLOCK, &l2->flag);
297 }
298 
299 static void
300 clear_peer_busy(struct layer2 *l2) {
301 	if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
302 		test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
303 }
304 
305 static void
306 InitWin(struct layer2 *l2)
307 {
308 	int i;
309 
310 	for (i = 0; i < MAX_WINDOW; i++)
311 		l2->windowar[i] = NULL;
312 }
313 
314 static int
315 freewin(struct layer2 *l2)
316 {
317 	int i, cnt = 0;
318 
319 	for (i = 0; i < MAX_WINDOW; i++) {
320 		if (l2->windowar[i]) {
321 			cnt++;
322 			dev_kfree_skb(l2->windowar[i]);
323 			l2->windowar[i] = NULL;
324 		}
325 	}
326 	return cnt;
327 }
328 
329 static void
330 ReleaseWin(struct layer2 *l2)
331 {
332 	int cnt = freewin(l2);
333 
334 	if (cnt)
335 		printk(KERN_WARNING
336 		    "isdnl2 freed %d skbuffs in release\n", cnt);
337 }
338 
339 inline unsigned int
340 cansend(struct layer2 *l2)
341 {
342 	unsigned int p1;
343 
344 	if (test_bit(FLG_MOD128, &l2->flag))
345 		p1 = (l2->vs - l2->va) % 128;
346 	else
347 		p1 = (l2->vs - l2->va) % 8;
348 	return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
349 }
350 
351 inline void
352 clear_exception(struct layer2 *l2)
353 {
354 	test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
355 	test_and_clear_bit(FLG_REJEXC, &l2->flag);
356 	test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
357 	clear_peer_busy(l2);
358 }
359 
360 static int
361 sethdraddr(struct layer2 *l2, u_char *header, int rsp)
362 {
363 	u_char *ptr = header;
364 	int crbit = rsp;
365 
366 	if (test_bit(FLG_LAPD, &l2->flag)) {
367 		if (test_bit(FLG_LAPD_NET, &l2->flag))
368 			crbit = !crbit;
369 		*ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
370 		*ptr++ = (l2->tei << 1) | 1;
371 		return 2;
372 	} else {
373 		if (test_bit(FLG_ORIG, &l2->flag))
374 			crbit = !crbit;
375 		if (crbit)
376 			*ptr++ = l2->addr.B;
377 		else
378 			*ptr++ = l2->addr.A;
379 		return 1;
380 	}
381 }
382 
383 static inline void
384 enqueue_super(struct layer2 *l2, struct sk_buff *skb)
385 {
386 	if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
387 		dev_kfree_skb(skb);
388 }
389 
390 static inline void
391 enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
392 {
393 	if (l2->tm)
394 		l2_tei(l2, MDL_STATUS_UI_IND, 0);
395 	if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
396 		dev_kfree_skb(skb);
397 }
398 
399 inline int
400 IsUI(u_char *data)
401 {
402 	return (data[0] & 0xef) == UI;
403 }
404 
405 inline int
406 IsUA(u_char *data)
407 {
408 	return (data[0] & 0xef) == UA;
409 }
410 
411 inline int
412 IsDM(u_char *data)
413 {
414 	return (data[0] & 0xef) == DM;
415 }
416 
417 inline int
418 IsDISC(u_char *data)
419 {
420 	return (data[0] & 0xef) == DISC;
421 }
422 
423 inline int
424 IsRR(u_char *data, struct layer2 *l2)
425 {
426 	if (test_bit(FLG_MOD128, &l2->flag))
427 		return data[0] == RR;
428 	else
429 		return (data[0] & 0xf) == 1;
430 }
431 
432 inline int
433 IsSFrame(u_char *data, struct layer2 *l2)
434 {
435 	register u_char d = *data;
436 
437 	if (!test_bit(FLG_MOD128, &l2->flag))
438 		d &= 0xf;
439 	return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
440 }
441 
442 inline int
443 IsSABME(u_char *data, struct layer2 *l2)
444 {
445 	u_char d = data[0] & ~0x10;
446 
447 	return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
448 }
449 
450 inline int
451 IsREJ(u_char *data, struct layer2 *l2)
452 {
453 	return test_bit(FLG_MOD128, &l2->flag) ?
454 		data[0] == REJ : (data[0] & 0xf) == REJ;
455 }
456 
457 inline int
458 IsFRMR(u_char *data)
459 {
460 	return (data[0] & 0xef) == FRMR;
461 }
462 
463 inline int
464 IsRNR(u_char *data, struct layer2 *l2)
465 {
466 	return test_bit(FLG_MOD128, &l2->flag) ?
467 	    data[0] == RNR : (data[0] & 0xf) == RNR;
468 }
469 
470 static int
471 iframe_error(struct layer2 *l2, struct sk_buff *skb)
472 {
473 	u_int	i;
474 	int	rsp = *skb->data & 0x2;
475 
476 	i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
477 	if (test_bit(FLG_ORIG, &l2->flag))
478 		rsp = !rsp;
479 	if (rsp)
480 		return 'L';
481 	if (skb->len < i)
482 		return 'N';
483 	if ((skb->len - i) > l2->maxlen)
484 		return 'O';
485 	return 0;
486 }
487 
488 static int
489 super_error(struct layer2 *l2, struct sk_buff *skb)
490 {
491 	if (skb->len != l2addrsize(l2) +
492 	    (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
493 		return 'N';
494 	return 0;
495 }
496 
497 static int
498 unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
499 {
500 	int rsp = (*skb->data & 0x2) >> 1;
501 	if (test_bit(FLG_ORIG, &l2->flag))
502 		rsp = !rsp;
503 	if (rsp != wantrsp)
504 		return 'L';
505 	if (skb->len != l2addrsize(l2) + 1)
506 		return 'N';
507 	return 0;
508 }
509 
510 static int
511 UI_error(struct layer2 *l2, struct sk_buff *skb)
512 {
513 	int rsp = *skb->data & 0x2;
514 	if (test_bit(FLG_ORIG, &l2->flag))
515 		rsp = !rsp;
516 	if (rsp)
517 		return 'L';
518 	if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
519 		return 'O';
520 	return 0;
521 }
522 
523 static int
524 FRMR_error(struct layer2 *l2, struct sk_buff *skb)
525 {
526 	u_int	headers = l2addrsize(l2) + 1;
527 	u_char	*datap = skb->data + headers;
528 	int	rsp = *skb->data & 0x2;
529 
530 	if (test_bit(FLG_ORIG, &l2->flag))
531 		rsp = !rsp;
532 	if (!rsp)
533 		return 'L';
534 	if (test_bit(FLG_MOD128, &l2->flag)) {
535 		if (skb->len < headers + 5)
536 			return 'N';
537 		else if (*debug & DEBUG_L2)
538 			l2m_debug(&l2->l2m,
539 			    "FRMR information %2x %2x %2x %2x %2x",
540 			    datap[0], datap[1], datap[2], datap[3], datap[4]);
541 	} else {
542 		if (skb->len < headers + 3)
543 			return 'N';
544 		else if (*debug & DEBUG_L2)
545 			l2m_debug(&l2->l2m,
546 			    "FRMR information %2x %2x %2x",
547 			    datap[0], datap[1], datap[2]);
548 	}
549 	return 0;
550 }
551 
552 static unsigned int
553 legalnr(struct layer2 *l2, unsigned int nr)
554 {
555 	if (test_bit(FLG_MOD128, &l2->flag))
556 		return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
557 	else
558 		return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
559 }
560 
561 static void
562 setva(struct layer2 *l2, unsigned int nr)
563 {
564 	struct sk_buff	*skb;
565 
566 	while (l2->va != nr) {
567 		l2->va++;
568 		if (test_bit(FLG_MOD128, &l2->flag))
569 			l2->va %= 128;
570 		else
571 			l2->va %= 8;
572 		if (l2->windowar[l2->sow]) {
573 			skb_trim(l2->windowar[l2->sow], 0);
574 			skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
575 			l2->windowar[l2->sow] = NULL;
576 		}
577 		l2->sow = (l2->sow + 1) % l2->window;
578 	}
579 	skb = skb_dequeue(&l2->tmp_queue);
580 	while (skb) {
581 		dev_kfree_skb(skb);
582 		skb = skb_dequeue(&l2->tmp_queue);
583 	}
584 }
585 
586 static void
587 send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
588 {
589 	u_char tmp[MAX_L2HEADER_LEN];
590 	int i;
591 
592 	i = sethdraddr(l2, tmp, cr);
593 	tmp[i++] = cmd;
594 	if (skb)
595 		skb_trim(skb, 0);
596 	else {
597 		skb = mI_alloc_skb(i, GFP_ATOMIC);
598 		if (!skb) {
599 			printk(KERN_WARNING "%s: can't alloc skbuff\n",
600 				__func__);
601 			return;
602 		}
603 	}
604 	memcpy(skb_put(skb, i), tmp, i);
605 	enqueue_super(l2, skb);
606 }
607 
608 
609 inline u_char
610 get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
611 {
612 	return skb->data[l2addrsize(l2)] & 0x10;
613 }
614 
615 inline u_char
616 get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
617 {
618 	u_char PF;
619 
620 	PF = get_PollFlag(l2, skb);
621 	dev_kfree_skb(skb);
622 	return PF;
623 }
624 
625 inline void
626 start_t200(struct layer2 *l2, int i)
627 {
628 	mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
629 	test_and_set_bit(FLG_T200_RUN, &l2->flag);
630 }
631 
632 inline void
633 restart_t200(struct layer2 *l2, int i)
634 {
635 	mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
636 	test_and_set_bit(FLG_T200_RUN, &l2->flag);
637 }
638 
639 inline void
640 stop_t200(struct layer2 *l2, int i)
641 {
642 	if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
643 		mISDN_FsmDelTimer(&l2->t200, i);
644 }
645 
646 inline void
647 st5_dl_release_l2l3(struct layer2 *l2)
648 {
649 	int pr;
650 
651 	if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
652 		pr = DL_RELEASE_CNF;
653 	else
654 		pr = DL_RELEASE_IND;
655 	l2up_create(l2, pr, 0, NULL);
656 }
657 
658 inline void
659 lapb_dl_release_l2l3(struct layer2 *l2, int f)
660 {
661 	if (test_bit(FLG_LAPB, &l2->flag))
662 		l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
663 	l2up_create(l2, f, 0, NULL);
664 }
665 
666 static void
667 establishlink(struct FsmInst *fi)
668 {
669 	struct layer2 *l2 = fi->userdata;
670 	u_char cmd;
671 
672 	clear_exception(l2);
673 	l2->rc = 0;
674 	cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
675 	send_uframe(l2, NULL, cmd, CMD);
676 	mISDN_FsmDelTimer(&l2->t203, 1);
677 	restart_t200(l2, 1);
678 	test_and_clear_bit(FLG_PEND_REL, &l2->flag);
679 	freewin(l2);
680 	mISDN_FsmChangeState(fi, ST_L2_5);
681 }
682 
683 static void
684 l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
685 {
686 	struct sk_buff *skb = arg;
687 	struct layer2 *l2 = fi->userdata;
688 
689 	if (get_PollFlagFree(l2, skb))
690 		l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
691 	else
692 		l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
693 
694 }
695 
696 static void
697 l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
698 {
699 	struct sk_buff *skb = arg;
700 	struct layer2 *l2 = fi->userdata;
701 
702 	if (get_PollFlagFree(l2, skb))
703 		l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
704 	else {
705 		l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
706 		establishlink(fi);
707 		test_and_clear_bit(FLG_L3_INIT, &l2->flag);
708 	}
709 }
710 
711 static void
712 l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
713 {
714 	struct sk_buff *skb = arg;
715 	struct layer2 *l2 = fi->userdata;
716 
717 	if (get_PollFlagFree(l2, skb))
718 		l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
719 	else
720 		l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
721 	establishlink(fi);
722 	test_and_clear_bit(FLG_L3_INIT, &l2->flag);
723 }
724 
725 static void
726 l2_go_st3(struct FsmInst *fi, int event, void *arg)
727 {
728 	dev_kfree_skb((struct sk_buff *)arg);
729 	mISDN_FsmChangeState(fi, ST_L2_3);
730 }
731 
732 static void
733 l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
734 {
735 	struct layer2	*l2 = fi->userdata;
736 
737 	mISDN_FsmChangeState(fi, ST_L2_3);
738 	dev_kfree_skb((struct sk_buff *)arg);
739 	l2_tei(l2, MDL_ASSIGN_IND, 0);
740 }
741 
742 static void
743 l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
744 {
745 	struct layer2 *l2 = fi->userdata;
746 	struct sk_buff *skb = arg;
747 
748 	skb_queue_tail(&l2->ui_queue, skb);
749 	mISDN_FsmChangeState(fi, ST_L2_2);
750 	l2_tei(l2, MDL_ASSIGN_IND, 0);
751 }
752 
753 static void
754 l2_queue_ui(struct FsmInst *fi, int event, void *arg)
755 {
756 	struct layer2 *l2 = fi->userdata;
757 	struct sk_buff *skb = arg;
758 
759 	skb_queue_tail(&l2->ui_queue, skb);
760 }
761 
762 static void
763 tx_ui(struct layer2 *l2)
764 {
765 	struct sk_buff *skb;
766 	u_char header[MAX_L2HEADER_LEN];
767 	int i;
768 
769 	i = sethdraddr(l2, header, CMD);
770 	if (test_bit(FLG_LAPD_NET, &l2->flag))
771 		header[1] = 0xff; /* tei 127 */
772 	header[i++] = UI;
773 	while ((skb = skb_dequeue(&l2->ui_queue))) {
774 		memcpy(skb_push(skb, i), header, i);
775 		enqueue_ui(l2, skb);
776 	}
777 }
778 
779 static void
780 l2_send_ui(struct FsmInst *fi, int event, void *arg)
781 {
782 	struct layer2 *l2 = fi->userdata;
783 	struct sk_buff *skb = arg;
784 
785 	skb_queue_tail(&l2->ui_queue, skb);
786 	tx_ui(l2);
787 }
788 
789 static void
790 l2_got_ui(struct FsmInst *fi, int event, void *arg)
791 {
792 	struct layer2 *l2 = fi->userdata;
793 	struct sk_buff *skb = arg;
794 
795 	skb_pull(skb, l2headersize(l2, 1));
796 /*
797  *		in states 1-3 for broadcast
798  */
799 
800 	if (l2->tm)
801 		l2_tei(l2, MDL_STATUS_UI_IND, 0);
802 	l2up(l2, DL_UNITDATA_IND, skb);
803 }
804 
805 static void
806 l2_establish(struct FsmInst *fi, int event, void *arg)
807 {
808 	struct sk_buff *skb = arg;
809 	struct layer2 *l2 = fi->userdata;
810 
811 	establishlink(fi);
812 	test_and_set_bit(FLG_L3_INIT, &l2->flag);
813 	dev_kfree_skb(skb);
814 }
815 
816 static void
817 l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
818 {
819 	struct sk_buff *skb = arg;
820 	struct layer2 *l2 = fi->userdata;
821 
822 	skb_queue_purge(&l2->i_queue);
823 	test_and_set_bit(FLG_L3_INIT, &l2->flag);
824 	test_and_clear_bit(FLG_PEND_REL, &l2->flag);
825 	dev_kfree_skb(skb);
826 }
827 
828 static void
829 l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
830 {
831 	struct sk_buff *skb = arg;
832 	struct layer2 *l2 = fi->userdata;
833 
834 	skb_queue_purge(&l2->i_queue);
835 	establishlink(fi);
836 	test_and_set_bit(FLG_L3_INIT, &l2->flag);
837 	dev_kfree_skb(skb);
838 }
839 
840 static void
841 l2_release(struct FsmInst *fi, int event, void *arg)
842 {
843 	struct layer2 *l2 = fi->userdata;
844 	struct sk_buff *skb = arg;
845 
846 	skb_trim(skb, 0);
847 	l2up(l2, DL_RELEASE_CNF, skb);
848 }
849 
850 static void
851 l2_pend_rel(struct FsmInst *fi, int event, void *arg)
852 {
853 	struct sk_buff *skb = arg;
854 	struct layer2 *l2 = fi->userdata;
855 
856 	test_and_set_bit(FLG_PEND_REL, &l2->flag);
857 	dev_kfree_skb(skb);
858 }
859 
860 static void
861 l2_disconnect(struct FsmInst *fi, int event, void *arg)
862 {
863 	struct layer2 *l2 = fi->userdata;
864 	struct sk_buff *skb = arg;
865 
866 	skb_queue_purge(&l2->i_queue);
867 	freewin(l2);
868 	mISDN_FsmChangeState(fi, ST_L2_6);
869 	l2->rc = 0;
870 	send_uframe(l2, NULL, DISC | 0x10, CMD);
871 	mISDN_FsmDelTimer(&l2->t203, 1);
872 	restart_t200(l2, 2);
873 	if (skb)
874 		dev_kfree_skb(skb);
875 }
876 
877 static void
878 l2_start_multi(struct FsmInst *fi, int event, void *arg)
879 {
880 	struct layer2	*l2 = fi->userdata;
881 	struct sk_buff	*skb = arg;
882 
883 	l2->vs = 0;
884 	l2->va = 0;
885 	l2->vr = 0;
886 	l2->sow = 0;
887 	clear_exception(l2);
888 	send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
889 	mISDN_FsmChangeState(fi, ST_L2_7);
890 	mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
891 	skb_trim(skb, 0);
892 	l2up(l2, DL_ESTABLISH_IND, skb);
893 	if (l2->tm)
894 		l2_tei(l2, MDL_STATUS_UP_IND, 0);
895 }
896 
897 static void
898 l2_send_UA(struct FsmInst *fi, int event, void *arg)
899 {
900 	struct layer2 *l2 = fi->userdata;
901 	struct sk_buff *skb = arg;
902 
903 	send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
904 }
905 
906 static void
907 l2_send_DM(struct FsmInst *fi, int event, void *arg)
908 {
909 	struct layer2 *l2 = fi->userdata;
910 	struct sk_buff *skb = arg;
911 
912 	send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
913 }
914 
915 static void
916 l2_restart_multi(struct FsmInst *fi, int event, void *arg)
917 {
918 	struct layer2	*l2 = fi->userdata;
919 	struct sk_buff	*skb = arg;
920 	int		est = 0;
921 
922 	send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
923 
924 	l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
925 
926 	if (l2->vs != l2->va) {
927 		skb_queue_purge(&l2->i_queue);
928 		est = 1;
929 	}
930 
931 	clear_exception(l2);
932 	l2->vs = 0;
933 	l2->va = 0;
934 	l2->vr = 0;
935 	l2->sow = 0;
936 	mISDN_FsmChangeState(fi, ST_L2_7);
937 	stop_t200(l2, 3);
938 	mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
939 
940 	if (est)
941 		l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
942 /*		mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
943  *		    MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
944  *		    0, NULL, 0);
945  */
946 	if (skb_queue_len(&l2->i_queue) && cansend(l2))
947 		mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
948 }
949 
950 static void
951 l2_stop_multi(struct FsmInst *fi, int event, void *arg)
952 {
953 	struct layer2	*l2 = fi->userdata;
954 	struct sk_buff	*skb = arg;
955 
956 	mISDN_FsmChangeState(fi, ST_L2_4);
957 	mISDN_FsmDelTimer(&l2->t203, 3);
958 	stop_t200(l2, 4);
959 
960 	send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
961 	skb_queue_purge(&l2->i_queue);
962 	freewin(l2);
963 	lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
964 	if (l2->tm)
965 		l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
966 }
967 
968 static void
969 l2_connected(struct FsmInst *fi, int event, void *arg)
970 {
971 	struct layer2	*l2 = fi->userdata;
972 	struct sk_buff	*skb = arg;
973 	int pr = -1;
974 
975 	if (!get_PollFlag(l2, skb)) {
976 		l2_mdl_error_ua(fi, event, arg);
977 		return;
978 	}
979 	dev_kfree_skb(skb);
980 	if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
981 		l2_disconnect(fi, event, NULL);
982 	if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
983 		pr = DL_ESTABLISH_CNF;
984 	} else if (l2->vs != l2->va) {
985 		skb_queue_purge(&l2->i_queue);
986 		pr = DL_ESTABLISH_IND;
987 	}
988 	stop_t200(l2, 5);
989 	l2->vr = 0;
990 	l2->vs = 0;
991 	l2->va = 0;
992 	l2->sow = 0;
993 	mISDN_FsmChangeState(fi, ST_L2_7);
994 	mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
995 	if (pr != -1)
996 		l2up_create(l2, pr, 0, NULL);
997 
998 	if (skb_queue_len(&l2->i_queue) && cansend(l2))
999 		mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1000 
1001 	if (l2->tm)
1002 		l2_tei(l2, MDL_STATUS_UP_IND, 0);
1003 }
1004 
1005 static void
1006 l2_released(struct FsmInst *fi, int event, void *arg)
1007 {
1008 	struct layer2 *l2 = fi->userdata;
1009 	struct sk_buff *skb = arg;
1010 
1011 	if (!get_PollFlag(l2, skb)) {
1012 		l2_mdl_error_ua(fi, event, arg);
1013 		return;
1014 	}
1015 	dev_kfree_skb(skb);
1016 	stop_t200(l2, 6);
1017 	lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1018 	mISDN_FsmChangeState(fi, ST_L2_4);
1019 	if (l2->tm)
1020 		l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1021 }
1022 
1023 static void
1024 l2_reestablish(struct FsmInst *fi, int event, void *arg)
1025 {
1026 	struct layer2 *l2 = fi->userdata;
1027 	struct sk_buff *skb = arg;
1028 
1029 	if (!get_PollFlagFree(l2, skb)) {
1030 		establishlink(fi);
1031 		test_and_set_bit(FLG_L3_INIT, &l2->flag);
1032 	}
1033 }
1034 
1035 static void
1036 l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1037 {
1038 	struct layer2 *l2 = fi->userdata;
1039 	struct sk_buff *skb = arg;
1040 
1041 	if (get_PollFlagFree(l2, skb)) {
1042 		stop_t200(l2, 7);
1043 		if (!test_bit(FLG_L3_INIT, &l2->flag))
1044 			skb_queue_purge(&l2->i_queue);
1045 		if (test_bit(FLG_LAPB, &l2->flag))
1046 			l2down_create(l2, PH_DEACTIVATE_REQ,
1047 				l2_newid(l2), 0, NULL);
1048 		st5_dl_release_l2l3(l2);
1049 		mISDN_FsmChangeState(fi, ST_L2_4);
1050 		if (l2->tm)
1051 			l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1052 	}
1053 }
1054 
1055 static void
1056 l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1057 {
1058 	struct layer2 *l2 = fi->userdata;
1059 	struct sk_buff *skb = arg;
1060 
1061 	if (get_PollFlagFree(l2, skb)) {
1062 		stop_t200(l2, 8);
1063 		lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1064 		mISDN_FsmChangeState(fi, ST_L2_4);
1065 		if (l2->tm)
1066 			l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1067 	}
1068 }
1069 
1070 static void
1071 enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1072 {
1073 	struct sk_buff *skb;
1074 	u_char tmp[MAX_L2HEADER_LEN];
1075 	int i;
1076 
1077 	i = sethdraddr(l2, tmp, cr);
1078 	if (test_bit(FLG_MOD128, &l2->flag)) {
1079 		tmp[i++] = typ;
1080 		tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1081 	} else
1082 		tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1083 	skb = mI_alloc_skb(i, GFP_ATOMIC);
1084 	if (!skb) {
1085 		printk(KERN_WARNING
1086 		    "isdnl2 can't alloc sbbuff for enquiry_cr\n");
1087 		return;
1088 	}
1089 	memcpy(skb_put(skb, i), tmp, i);
1090 	enqueue_super(l2, skb);
1091 }
1092 
1093 inline void
1094 enquiry_response(struct layer2 *l2)
1095 {
1096 	if (test_bit(FLG_OWN_BUSY, &l2->flag))
1097 		enquiry_cr(l2, RNR, RSP, 1);
1098 	else
1099 		enquiry_cr(l2, RR, RSP, 1);
1100 	test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1101 }
1102 
1103 inline void
1104 transmit_enquiry(struct layer2 *l2)
1105 {
1106 	if (test_bit(FLG_OWN_BUSY, &l2->flag))
1107 		enquiry_cr(l2, RNR, CMD, 1);
1108 	else
1109 		enquiry_cr(l2, RR, CMD, 1);
1110 	test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1111 	start_t200(l2, 9);
1112 }
1113 
1114 
1115 static void
1116 nrerrorrecovery(struct FsmInst *fi)
1117 {
1118 	struct layer2 *l2 = fi->userdata;
1119 
1120 	l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1121 	establishlink(fi);
1122 	test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1123 }
1124 
1125 static void
1126 invoke_retransmission(struct layer2 *l2, unsigned int nr)
1127 {
1128 	u_int	p1;
1129 
1130 	if (l2->vs != nr) {
1131 		while (l2->vs != nr) {
1132 			(l2->vs)--;
1133 			if (test_bit(FLG_MOD128, &l2->flag)) {
1134 				l2->vs %= 128;
1135 				p1 = (l2->vs - l2->va) % 128;
1136 			} else {
1137 				l2->vs %= 8;
1138 				p1 = (l2->vs - l2->va) % 8;
1139 			}
1140 			p1 = (p1 + l2->sow) % l2->window;
1141 			if (l2->windowar[p1])
1142 				skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1143 			else
1144 				printk(KERN_WARNING
1145 				    "%s: windowar[%d] is NULL\n",
1146 				    __func__, p1);
1147 			l2->windowar[p1] = NULL;
1148 		}
1149 		mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1150 	}
1151 }
1152 
1153 static void
1154 l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1155 {
1156 	struct layer2 *l2 = fi->userdata;
1157 	struct sk_buff *skb = arg;
1158 	int PollFlag, rsp, typ = RR;
1159 	unsigned int nr;
1160 
1161 	rsp = *skb->data & 0x2;
1162 	if (test_bit(FLG_ORIG, &l2->flag))
1163 		rsp = !rsp;
1164 
1165 	skb_pull(skb, l2addrsize(l2));
1166 	if (IsRNR(skb->data, l2)) {
1167 		set_peer_busy(l2);
1168 		typ = RNR;
1169 	} else
1170 		clear_peer_busy(l2);
1171 	if (IsREJ(skb->data, l2))
1172 		typ = REJ;
1173 
1174 	if (test_bit(FLG_MOD128, &l2->flag)) {
1175 		PollFlag = (skb->data[1] & 0x1) == 0x1;
1176 		nr = skb->data[1] >> 1;
1177 	} else {
1178 		PollFlag = (skb->data[0] & 0x10);
1179 		nr = (skb->data[0] >> 5) & 0x7;
1180 	}
1181 	dev_kfree_skb(skb);
1182 
1183 	if (PollFlag) {
1184 		if (rsp)
1185 			l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1186 		else
1187 			enquiry_response(l2);
1188 	}
1189 	if (legalnr(l2, nr)) {
1190 		if (typ == REJ) {
1191 			setva(l2, nr);
1192 			invoke_retransmission(l2, nr);
1193 			stop_t200(l2, 10);
1194 			if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1195 					EV_L2_T203, NULL, 6))
1196 				l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1197 		} else if ((nr == l2->vs) && (typ == RR)) {
1198 			setva(l2, nr);
1199 			stop_t200(l2, 11);
1200 			mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1201 					EV_L2_T203, NULL, 7);
1202 		} else if ((l2->va != nr) || (typ == RNR)) {
1203 			setva(l2, nr);
1204 			if (typ != RR)
1205 				mISDN_FsmDelTimer(&l2->t203, 9);
1206 			restart_t200(l2, 12);
1207 		}
1208 		if (skb_queue_len(&l2->i_queue) && (typ == RR))
1209 			mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1210 	} else
1211 		nrerrorrecovery(fi);
1212 }
1213 
1214 static void
1215 l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1216 {
1217 	struct layer2 *l2 = fi->userdata;
1218 	struct sk_buff *skb = arg;
1219 
1220 	if (!test_bit(FLG_L3_INIT, &l2->flag))
1221 		skb_queue_tail(&l2->i_queue, skb);
1222 	else
1223 		dev_kfree_skb(skb);
1224 }
1225 
1226 static void
1227 l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1228 {
1229 	struct layer2 *l2 = fi->userdata;
1230 	struct sk_buff *skb = arg;
1231 
1232 	skb_queue_tail(&l2->i_queue, skb);
1233 	mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1234 }
1235 
1236 static void
1237 l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1238 {
1239 	struct layer2 *l2 = fi->userdata;
1240 	struct sk_buff *skb = arg;
1241 
1242 	skb_queue_tail(&l2->i_queue, skb);
1243 }
1244 
1245 static void
1246 l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1247 {
1248 	struct layer2	*l2 = fi->userdata;
1249 	struct sk_buff	*skb = arg;
1250 	int		PollFlag, i;
1251 	u_int		ns, nr;
1252 
1253 	i = l2addrsize(l2);
1254 	if (test_bit(FLG_MOD128, &l2->flag)) {
1255 		PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1256 		ns = skb->data[i] >> 1;
1257 		nr = (skb->data[i + 1] >> 1) & 0x7f;
1258 	} else {
1259 		PollFlag = (skb->data[i] & 0x10);
1260 		ns = (skb->data[i] >> 1) & 0x7;
1261 		nr = (skb->data[i] >> 5) & 0x7;
1262 	}
1263 	if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1264 		dev_kfree_skb(skb);
1265 		if (PollFlag)
1266 			enquiry_response(l2);
1267 	} else {
1268 		if (l2->vr == ns) {
1269 			l2->vr++;
1270 			if (test_bit(FLG_MOD128, &l2->flag))
1271 				l2->vr %= 128;
1272 			else
1273 				l2->vr %= 8;
1274 			test_and_clear_bit(FLG_REJEXC, &l2->flag);
1275 			if (PollFlag)
1276 				enquiry_response(l2);
1277 			else
1278 				test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1279 			skb_pull(skb, l2headersize(l2, 0));
1280 			l2up(l2, DL_DATA_IND, skb);
1281 		} else {
1282 			/* n(s)!=v(r) */
1283 			dev_kfree_skb(skb);
1284 			if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1285 				if (PollFlag)
1286 					enquiry_response(l2);
1287 			} else {
1288 				enquiry_cr(l2, REJ, RSP, PollFlag);
1289 				test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1290 			}
1291 		}
1292 	}
1293 	if (legalnr(l2, nr)) {
1294 		if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1295 		    (fi->state == ST_L2_7)) {
1296 			if (nr == l2->vs) {
1297 				stop_t200(l2, 13);
1298 				mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1299 						EV_L2_T203, NULL, 7);
1300 			} else if (nr != l2->va)
1301 				restart_t200(l2, 14);
1302 		}
1303 		setva(l2, nr);
1304 	} else {
1305 		nrerrorrecovery(fi);
1306 		return;
1307 	}
1308 	if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1309 		mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1310 	if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1311 		enquiry_cr(l2, RR, RSP, 0);
1312 }
1313 
1314 static void
1315 l2_got_tei(struct FsmInst *fi, int event, void *arg)
1316 {
1317 	struct layer2	*l2 = fi->userdata;
1318 	u_int		info;
1319 
1320 	l2->tei = (signed char)(long)arg;
1321 	set_channel_address(&l2->ch, l2->sapi, l2->tei);
1322 	info = DL_INFO_L2_CONNECT;
1323 	l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1324 	if (fi->state == ST_L2_3) {
1325 		establishlink(fi);
1326 		test_and_set_bit(FLG_L3_INIT, &l2->flag);
1327 	} else
1328 		mISDN_FsmChangeState(fi, ST_L2_4);
1329 	if (skb_queue_len(&l2->ui_queue))
1330 		tx_ui(l2);
1331 }
1332 
1333 static void
1334 l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1335 {
1336 	struct layer2 *l2 = fi->userdata;
1337 
1338 	if (test_bit(FLG_LAPD, &l2->flag) &&
1339 		test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1340 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1341 	} else if (l2->rc == l2->N200) {
1342 		mISDN_FsmChangeState(fi, ST_L2_4);
1343 		test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1344 		skb_queue_purge(&l2->i_queue);
1345 		l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1346 		if (test_bit(FLG_LAPB, &l2->flag))
1347 			l2down_create(l2, PH_DEACTIVATE_REQ,
1348 				l2_newid(l2), 0, NULL);
1349 		st5_dl_release_l2l3(l2);
1350 		if (l2->tm)
1351 			l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1352 	} else {
1353 		l2->rc++;
1354 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1355 		send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1356 			SABME : SABM) | 0x10, CMD);
1357 	}
1358 }
1359 
1360 static void
1361 l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1362 {
1363 	struct layer2 *l2 = fi->userdata;
1364 
1365 	if (test_bit(FLG_LAPD, &l2->flag) &&
1366 		test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1367 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1368 	} else if (l2->rc == l2->N200) {
1369 		mISDN_FsmChangeState(fi, ST_L2_4);
1370 		test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1371 		l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1372 		lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1373 		if (l2->tm)
1374 			l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1375 	} else {
1376 		l2->rc++;
1377 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1378 			    NULL, 9);
1379 		send_uframe(l2, NULL, DISC | 0x10, CMD);
1380 	}
1381 }
1382 
1383 static void
1384 l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1385 {
1386 	struct layer2 *l2 = fi->userdata;
1387 
1388 	if (test_bit(FLG_LAPD, &l2->flag) &&
1389 		test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1390 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1391 		return;
1392 	}
1393 	test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1394 	l2->rc = 0;
1395 	mISDN_FsmChangeState(fi, ST_L2_8);
1396 	transmit_enquiry(l2);
1397 	l2->rc++;
1398 }
1399 
1400 static void
1401 l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1402 {
1403 	struct layer2 *l2 = fi->userdata;
1404 
1405 	if (test_bit(FLG_LAPD, &l2->flag) &&
1406 		test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1407 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1408 		return;
1409 	}
1410 	test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1411 	if (l2->rc == l2->N200) {
1412 		l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1413 		establishlink(fi);
1414 		test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1415 	} else {
1416 		transmit_enquiry(l2);
1417 		l2->rc++;
1418 	}
1419 }
1420 
1421 static void
1422 l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1423 {
1424 	struct layer2 *l2 = fi->userdata;
1425 
1426 	if (test_bit(FLG_LAPD, &l2->flag) &&
1427 		test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1428 		mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1429 		return;
1430 	}
1431 	mISDN_FsmChangeState(fi, ST_L2_8);
1432 	transmit_enquiry(l2);
1433 	l2->rc = 0;
1434 }
1435 
1436 static void
1437 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1438 {
1439 	struct layer2	*l2 = fi->userdata;
1440 	struct sk_buff	*skb, *nskb, *oskb;
1441 	u_char		header[MAX_L2HEADER_LEN];
1442 	u_int		i, p1;
1443 
1444 	if (!cansend(l2))
1445 		return;
1446 
1447 	skb = skb_dequeue(&l2->i_queue);
1448 	if (!skb)
1449 		return;
1450 
1451 	if (test_bit(FLG_MOD128, &l2->flag))
1452 		p1 = (l2->vs - l2->va) % 128;
1453 	else
1454 		p1 = (l2->vs - l2->va) % 8;
1455 	p1 = (p1 + l2->sow) % l2->window;
1456 	if (l2->windowar[p1]) {
1457 		printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
1458 		    p1);
1459 		dev_kfree_skb(l2->windowar[p1]);
1460 	}
1461 	l2->windowar[p1] = skb;
1462 	i = sethdraddr(l2, header, CMD);
1463 	if (test_bit(FLG_MOD128, &l2->flag)) {
1464 		header[i++] = l2->vs << 1;
1465 		header[i++] = l2->vr << 1;
1466 		l2->vs = (l2->vs + 1) % 128;
1467 	} else {
1468 		header[i++] = (l2->vr << 5) | (l2->vs << 1);
1469 		l2->vs = (l2->vs + 1) % 8;
1470 	}
1471 
1472 	nskb = skb_clone(skb, GFP_ATOMIC);
1473 	p1 = skb_headroom(nskb);
1474 	if (p1 >= i)
1475 		memcpy(skb_push(nskb, i), header, i);
1476 	else {
1477 		printk(KERN_WARNING
1478 		    "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1479 		oskb = nskb;
1480 		nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1481 		if (!nskb) {
1482 			dev_kfree_skb(oskb);
1483 			printk(KERN_WARNING "%s: no skb mem\n", __func__);
1484 			return;
1485 		}
1486 		memcpy(skb_put(nskb, i), header, i);
1487 		memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1488 		dev_kfree_skb(oskb);
1489 	}
1490 	l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1491 	test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1492 	if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1493 		mISDN_FsmDelTimer(&l2->t203, 13);
1494 		mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1495 	}
1496 }
1497 
1498 static void
1499 l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1500 {
1501 	struct layer2 *l2 = fi->userdata;
1502 	struct sk_buff *skb = arg;
1503 	int PollFlag, rsp, rnr = 0;
1504 	unsigned int nr;
1505 
1506 	rsp = *skb->data & 0x2;
1507 	if (test_bit(FLG_ORIG, &l2->flag))
1508 		rsp = !rsp;
1509 
1510 	skb_pull(skb, l2addrsize(l2));
1511 
1512 	if (IsRNR(skb->data, l2)) {
1513 		set_peer_busy(l2);
1514 		rnr = 1;
1515 	} else
1516 		clear_peer_busy(l2);
1517 
1518 	if (test_bit(FLG_MOD128, &l2->flag)) {
1519 		PollFlag = (skb->data[1] & 0x1) == 0x1;
1520 		nr = skb->data[1] >> 1;
1521 	} else {
1522 		PollFlag = (skb->data[0] & 0x10);
1523 		nr = (skb->data[0] >> 5) & 0x7;
1524 	}
1525 	dev_kfree_skb(skb);
1526 	if (rsp && PollFlag) {
1527 		if (legalnr(l2, nr)) {
1528 			if (rnr) {
1529 				restart_t200(l2, 15);
1530 			} else {
1531 				stop_t200(l2, 16);
1532 				mISDN_FsmAddTimer(&l2->t203, l2->T203,
1533 					    EV_L2_T203, NULL, 5);
1534 				setva(l2, nr);
1535 			}
1536 			invoke_retransmission(l2, nr);
1537 			mISDN_FsmChangeState(fi, ST_L2_7);
1538 			if (skb_queue_len(&l2->i_queue) && cansend(l2))
1539 				mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1540 		} else
1541 			nrerrorrecovery(fi);
1542 	} else {
1543 		if (!rsp && PollFlag)
1544 			enquiry_response(l2);
1545 		if (legalnr(l2, nr))
1546 			setva(l2, nr);
1547 		else
1548 			nrerrorrecovery(fi);
1549 	}
1550 }
1551 
1552 static void
1553 l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1554 {
1555 	struct layer2 *l2 = fi->userdata;
1556 	struct sk_buff *skb = arg;
1557 
1558 	skb_pull(skb, l2addrsize(l2) + 1);
1559 
1560 	if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1561 	    (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1562 		l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1563 		establishlink(fi);
1564 		test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1565 	}
1566 	dev_kfree_skb(skb);
1567 }
1568 
1569 static void
1570 l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1571 {
1572 	struct layer2 *l2 = fi->userdata;
1573 
1574 	skb_queue_purge(&l2->ui_queue);
1575 	l2->tei = GROUP_TEI;
1576 	mISDN_FsmChangeState(fi, ST_L2_1);
1577 }
1578 
1579 static void
1580 l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1581 {
1582 	struct layer2 *l2 = fi->userdata;
1583 
1584 	skb_queue_purge(&l2->ui_queue);
1585 	l2->tei = GROUP_TEI;
1586 	l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1587 	mISDN_FsmChangeState(fi, ST_L2_1);
1588 }
1589 
1590 static void
1591 l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1592 {
1593 	struct layer2 *l2 = fi->userdata;
1594 
1595 	skb_queue_purge(&l2->i_queue);
1596 	skb_queue_purge(&l2->ui_queue);
1597 	freewin(l2);
1598 	l2->tei = GROUP_TEI;
1599 	stop_t200(l2, 17);
1600 	st5_dl_release_l2l3(l2);
1601 	mISDN_FsmChangeState(fi, ST_L2_1);
1602 }
1603 
1604 static void
1605 l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1606 {
1607 	struct layer2 *l2 = fi->userdata;
1608 
1609 	skb_queue_purge(&l2->ui_queue);
1610 	l2->tei = GROUP_TEI;
1611 	stop_t200(l2, 18);
1612 	l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1613 	mISDN_FsmChangeState(fi, ST_L2_1);
1614 }
1615 
1616 static void
1617 l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1618 {
1619 	struct layer2 *l2 = fi->userdata;
1620 
1621 	skb_queue_purge(&l2->i_queue);
1622 	skb_queue_purge(&l2->ui_queue);
1623 	freewin(l2);
1624 	l2->tei = GROUP_TEI;
1625 	stop_t200(l2, 17);
1626 	mISDN_FsmDelTimer(&l2->t203, 19);
1627 	l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1628 /*	mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1629  *		MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1630  *		0, NULL, 0);
1631  */
1632 	mISDN_FsmChangeState(fi, ST_L2_1);
1633 }
1634 
1635 static void
1636 l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
1637 {
1638 	struct layer2 *l2 = fi->userdata;
1639 	struct sk_buff *skb = arg;
1640 
1641 	skb_queue_purge(&l2->i_queue);
1642 	skb_queue_purge(&l2->ui_queue);
1643 	if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1644 		l2up(l2, DL_RELEASE_IND, skb);
1645 	else
1646 		dev_kfree_skb(skb);
1647 }
1648 
1649 static void
1650 l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
1651 {
1652 	struct layer2 *l2 = fi->userdata;
1653 	struct sk_buff *skb = arg;
1654 
1655 	skb_queue_purge(&l2->i_queue);
1656 	skb_queue_purge(&l2->ui_queue);
1657 	freewin(l2);
1658 	stop_t200(l2, 19);
1659 	st5_dl_release_l2l3(l2);
1660 	mISDN_FsmChangeState(fi, ST_L2_4);
1661 	if (l2->tm)
1662 		l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1663 	dev_kfree_skb(skb);
1664 }
1665 
1666 static void
1667 l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
1668 {
1669 	struct layer2 *l2 = fi->userdata;
1670 	struct sk_buff *skb = arg;
1671 
1672 	skb_queue_purge(&l2->ui_queue);
1673 	stop_t200(l2, 20);
1674 	l2up(l2, DL_RELEASE_CNF, skb);
1675 	mISDN_FsmChangeState(fi, ST_L2_4);
1676 	if (l2->tm)
1677 		l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1678 }
1679 
1680 static void
1681 l2_persistant_da(struct FsmInst *fi, int event, void *arg)
1682 {
1683 	struct layer2 *l2 = fi->userdata;
1684 	struct sk_buff *skb = arg;
1685 
1686 	skb_queue_purge(&l2->i_queue);
1687 	skb_queue_purge(&l2->ui_queue);
1688 	freewin(l2);
1689 	stop_t200(l2, 19);
1690 	mISDN_FsmDelTimer(&l2->t203, 19);
1691 	l2up(l2, DL_RELEASE_IND, skb);
1692 	mISDN_FsmChangeState(fi, ST_L2_4);
1693 	if (l2->tm)
1694 		l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1695 }
1696 
1697 static void
1698 l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1699 {
1700 	struct layer2 *l2 = fi->userdata;
1701 	struct sk_buff *skb = arg;
1702 
1703 	if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1704 		enquiry_cr(l2, RNR, RSP, 0);
1705 		test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1706 	}
1707 	if (skb)
1708 		dev_kfree_skb(skb);
1709 }
1710 
1711 static void
1712 l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1713 {
1714 	struct layer2 *l2 = fi->userdata;
1715 	struct sk_buff *skb = arg;
1716 
1717 	if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1718 		enquiry_cr(l2, RR, RSP, 0);
1719 		test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1720 	}
1721 	if (skb)
1722 		dev_kfree_skb(skb);
1723 }
1724 
1725 static void
1726 l2_frame_error(struct FsmInst *fi, int event, void *arg)
1727 {
1728 	struct layer2 *l2 = fi->userdata;
1729 
1730 	l2mgr(l2, MDL_ERROR_IND, arg);
1731 }
1732 
1733 static void
1734 l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1735 {
1736 	struct layer2 *l2 = fi->userdata;
1737 
1738 	l2mgr(l2, MDL_ERROR_IND, arg);
1739 	establishlink(fi);
1740 	test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1741 }
1742 
1743 static struct FsmNode L2FnList[] =
1744 {
1745 	{ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1746 	{ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1747 	{ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1748 	{ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1749 	{ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1750 	{ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1751 	{ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1752 	{ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1753 	{ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1754 	{ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1755 	{ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1756 	{ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1757 	{ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1758 	{ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1759 	{ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1760 	{ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1761 	{ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1762 	{ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1763 	{ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1764 	{ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1765 	{ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1766 	{ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1767 	{ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1768 	{ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1769 	{ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1770 	{ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1771 	{ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1772 	{ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1773 	{ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1774 	{ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1775 	{ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1776 	{ST_L2_4, EV_L2_SABME, l2_start_multi},
1777 	{ST_L2_5, EV_L2_SABME, l2_send_UA},
1778 	{ST_L2_6, EV_L2_SABME, l2_send_DM},
1779 	{ST_L2_7, EV_L2_SABME, l2_restart_multi},
1780 	{ST_L2_8, EV_L2_SABME, l2_restart_multi},
1781 	{ST_L2_4, EV_L2_DISC, l2_send_DM},
1782 	{ST_L2_5, EV_L2_DISC, l2_send_DM},
1783 	{ST_L2_6, EV_L2_DISC, l2_send_UA},
1784 	{ST_L2_7, EV_L2_DISC, l2_stop_multi},
1785 	{ST_L2_8, EV_L2_DISC, l2_stop_multi},
1786 	{ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1787 	{ST_L2_5, EV_L2_UA, l2_connected},
1788 	{ST_L2_6, EV_L2_UA, l2_released},
1789 	{ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1790 	{ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1791 	{ST_L2_4, EV_L2_DM, l2_reestablish},
1792 	{ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1793 	{ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1794 	{ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1795 	{ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1796 	{ST_L2_1, EV_L2_UI, l2_got_ui},
1797 	{ST_L2_2, EV_L2_UI, l2_got_ui},
1798 	{ST_L2_3, EV_L2_UI, l2_got_ui},
1799 	{ST_L2_4, EV_L2_UI, l2_got_ui},
1800 	{ST_L2_5, EV_L2_UI, l2_got_ui},
1801 	{ST_L2_6, EV_L2_UI, l2_got_ui},
1802 	{ST_L2_7, EV_L2_UI, l2_got_ui},
1803 	{ST_L2_8, EV_L2_UI, l2_got_ui},
1804 	{ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1805 	{ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1806 	{ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1807 	{ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1808 	{ST_L2_7, EV_L2_I, l2_got_iframe},
1809 	{ST_L2_8, EV_L2_I, l2_got_iframe},
1810 	{ST_L2_5, EV_L2_T200, l2_st5_tout_200},
1811 	{ST_L2_6, EV_L2_T200, l2_st6_tout_200},
1812 	{ST_L2_7, EV_L2_T200, l2_st7_tout_200},
1813 	{ST_L2_8, EV_L2_T200, l2_st8_tout_200},
1814 	{ST_L2_7, EV_L2_T203, l2_st7_tout_203},
1815 	{ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1816 	{ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1817 	{ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1818 	{ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1819 	{ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1820 	{ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1821 	{ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1822 	{ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1823 	{ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1824 	{ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1825 	{ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1826 	{ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1827 	{ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1828 	{ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1829 	{ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
1830 	{ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
1831 	{ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
1832 	{ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
1833 };
1834 
1835 #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
1836 
1837 static int
1838 ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1839 {
1840 	u_char	*datap = skb->data;
1841 	int	ret = -EINVAL;
1842 	int	psapi, ptei;
1843 	u_int	l;
1844 	int	c = 0;
1845 
1846 	l = l2addrsize(l2);
1847 	if (skb->len <= l) {
1848 		mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1849 		return ret;
1850 	}
1851 	if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1852 		psapi = *datap++;
1853 		ptei = *datap++;
1854 		if ((psapi & 1) || !(ptei & 1)) {
1855 			printk(KERN_WARNING
1856 			    "l2 D-channel frame wrong EA0/EA1\n");
1857 			return ret;
1858 		}
1859 		psapi >>= 2;
1860 		ptei >>= 1;
1861 		if (psapi != l2->sapi) {
1862 			/* not our bussiness
1863 			 * printk(KERN_DEBUG "%s: sapi %d/%d sapi mismatch\n",
1864 			 *  __func__,
1865 			 *	psapi, l2->sapi);
1866 			 */
1867 			dev_kfree_skb(skb);
1868 			return 0;
1869 		}
1870 		if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1871 			/* not our bussiness
1872 			 * printk(KERN_DEBUG "%s: tei %d/%d sapi %d mismatch\n",
1873 			 *  __func__,
1874 			 *	ptei, l2->tei, psapi);
1875 			 */
1876 			dev_kfree_skb(skb);
1877 			return 0;
1878 		}
1879 	} else
1880 		datap += l;
1881 	if (!(*datap & 1)) {	/* I-Frame */
1882 		c = iframe_error(l2, skb);
1883 		if (!c)
1884 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1885 	} else if (IsSFrame(datap, l2)) {	/* S-Frame */
1886 		c = super_error(l2, skb);
1887 		if (!c)
1888 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1889 	} else if (IsUI(datap)) {
1890 		c = UI_error(l2, skb);
1891 		if (!c)
1892 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1893 	} else if (IsSABME(datap, l2)) {
1894 		c = unnum_error(l2, skb, CMD);
1895 		if (!c)
1896 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1897 	} else if (IsUA(datap)) {
1898 		c = unnum_error(l2, skb, RSP);
1899 		if (!c)
1900 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1901 	} else if (IsDISC(datap)) {
1902 		c = unnum_error(l2, skb, CMD);
1903 		if (!c)
1904 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1905 	} else if (IsDM(datap)) {
1906 		c = unnum_error(l2, skb, RSP);
1907 		if (!c)
1908 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1909 	} else if (IsFRMR(datap)) {
1910 		c = FRMR_error(l2, skb);
1911 		if (!c)
1912 			ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1913 	} else
1914 		c = 'L';
1915 	if (c) {
1916 		printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
1917 		mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1918 	}
1919 	return ret;
1920 }
1921 
1922 static int
1923 l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1924 {
1925 	struct layer2		*l2 = container_of(ch, struct layer2, ch);
1926 	struct mISDNhead	*hh =  mISDN_HEAD_P(skb);
1927 	int 			ret = -EINVAL;
1928 
1929 	if (*debug & DEBUG_L2_RECV)
1930 		printk(KERN_DEBUG "%s: prim(%x) id(%x) tei(%d)\n",
1931 		    __func__, hh->prim, hh->id, l2->tei);
1932 	switch (hh->prim) {
1933 	case PH_DATA_IND:
1934 		ret = ph_data_indication(l2, hh, skb);
1935 		break;
1936 	case PH_DATA_CNF:
1937 		ret = ph_data_confirm(l2, hh, skb);
1938 		break;
1939 	case PH_ACTIVATE_IND:
1940 		test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1941 		l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1942 		if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1943 			ret = mISDN_FsmEvent(&l2->l2m,
1944 				EV_L2_DL_ESTABLISH_REQ, skb);
1945 		break;
1946 	case PH_DEACTIVATE_IND:
1947 		test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1948 		l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1949 		ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1950 		break;
1951 	case MPH_INFORMATION_IND:
1952 		if (!l2->up)
1953 			break;
1954 		ret = l2->up->send(l2->up, skb);
1955 		break;
1956 	case DL_DATA_REQ:
1957 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1958 		break;
1959 	case DL_UNITDATA_REQ:
1960 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1961 		break;
1962 	case DL_ESTABLISH_REQ:
1963 		if (test_bit(FLG_LAPB, &l2->flag))
1964 			test_and_set_bit(FLG_ORIG, &l2->flag);
1965 		if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1966 			if (test_bit(FLG_LAPD, &l2->flag) ||
1967 				test_bit(FLG_ORIG, &l2->flag))
1968 				ret = mISDN_FsmEvent(&l2->l2m,
1969 					EV_L2_DL_ESTABLISH_REQ, skb);
1970 		} else {
1971 			if (test_bit(FLG_LAPD, &l2->flag) ||
1972 				test_bit(FLG_ORIG, &l2->flag)) {
1973 				test_and_set_bit(FLG_ESTAB_PEND,
1974 					&l2->flag);
1975 			}
1976 			ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
1977 			    skb);
1978 		}
1979 		break;
1980 	case DL_RELEASE_REQ:
1981 		if (test_bit(FLG_LAPB, &l2->flag))
1982 			l2down_create(l2, PH_DEACTIVATE_REQ,
1983 				l2_newid(l2), 0, NULL);
1984 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1985 		    skb);
1986 		break;
1987 	default:
1988 		if (*debug & DEBUG_L2)
1989 			l2m_debug(&l2->l2m, "l2 unknown pr %04x",
1990 			    hh->prim);
1991 	}
1992 	if (ret) {
1993 		dev_kfree_skb(skb);
1994 		ret = 0;
1995 	}
1996 	return ret;
1997 }
1998 
1999 int
2000 tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2001 {
2002 	int		ret = -EINVAL;
2003 
2004 	if (*debug & DEBUG_L2_TEI)
2005 		printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
2006 	switch (cmd) {
2007 	case (MDL_ASSIGN_REQ):
2008 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2009 		break;
2010 	case (MDL_REMOVE_REQ):
2011 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2012 		break;
2013 	case (MDL_ERROR_IND):
2014 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2015 		break;
2016 	case (MDL_ERROR_RSP):
2017 		/* ETS 300-125 5.3.2.1 Test: TC13010 */
2018 		printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
2019 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2020 		break;
2021 	}
2022 	return ret;
2023 }
2024 
2025 static void
2026 release_l2(struct layer2 *l2)
2027 {
2028 	mISDN_FsmDelTimer(&l2->t200, 21);
2029 	mISDN_FsmDelTimer(&l2->t203, 16);
2030 	skb_queue_purge(&l2->i_queue);
2031 	skb_queue_purge(&l2->ui_queue);
2032 	skb_queue_purge(&l2->down_queue);
2033 	ReleaseWin(l2);
2034 	if (test_bit(FLG_LAPD, &l2->flag)) {
2035 		TEIrelease(l2);
2036 		if (l2->ch.st)
2037 			l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2038 			    CLOSE_CHANNEL, NULL);
2039 	}
2040 	kfree(l2);
2041 }
2042 
2043 static int
2044 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2045 {
2046 	struct layer2		*l2 = container_of(ch, struct layer2, ch);
2047 	u_int			info;
2048 
2049 	if (*debug & DEBUG_L2_CTRL)
2050 		printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
2051 
2052 	switch (cmd) {
2053 	case OPEN_CHANNEL:
2054 		if (test_bit(FLG_LAPD, &l2->flag)) {
2055 			set_channel_address(&l2->ch, l2->sapi, l2->tei);
2056 			info = DL_INFO_L2_CONNECT;
2057 			l2up_create(l2, DL_INFORMATION_IND,
2058 			    sizeof(info), &info);
2059 		}
2060 		break;
2061 	case CLOSE_CHANNEL:
2062 		if (l2->ch.peer)
2063 			l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2064 		release_l2(l2);
2065 		break;
2066 	}
2067 	return 0;
2068 }
2069 
2070 struct layer2 *
2071 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
2072 {
2073 	struct layer2		*l2;
2074 	struct channel_req	rq;
2075 
2076 	l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2077 	if (!l2) {
2078 		printk(KERN_ERR "kzalloc layer2 failed\n");
2079 		return NULL;
2080 	}
2081 	l2->next_id = 1;
2082 	l2->down_id = MISDN_ID_NONE;
2083 	l2->up = ch;
2084 	l2->ch.st = ch->st;
2085 	l2->ch.send = l2_send;
2086 	l2->ch.ctrl = l2_ctrl;
2087 	switch (protocol) {
2088 	case ISDN_P_LAPD_NT:
2089 		test_and_set_bit(FLG_LAPD, &l2->flag);
2090 		test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2091 		test_and_set_bit(FLG_MOD128, &l2->flag);
2092 		l2->sapi = 0;
2093 		l2->maxlen = MAX_DFRAME_LEN;
2094 		if (test_bit(OPTION_L2_PMX, &options))
2095 			l2->window = 7;
2096 		else
2097 			l2->window = 1;
2098 		if (test_bit(OPTION_L2_PTP, &options))
2099 			test_and_set_bit(FLG_PTP, &l2->flag);
2100 		if (test_bit(OPTION_L2_FIXEDTEI, &options))
2101 			test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2102 		l2->tei = (u_int)arg;
2103 		l2->T200 = 1000;
2104 		l2->N200 = 3;
2105 		l2->T203 = 10000;
2106 		if (test_bit(OPTION_L2_PMX, &options))
2107 			rq.protocol = ISDN_P_NT_E1;
2108 		else
2109 			rq.protocol = ISDN_P_NT_S0;
2110 		rq.adr.channel = 0;
2111 		l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2112 		break;
2113 	case ISDN_P_LAPD_TE:
2114 		test_and_set_bit(FLG_LAPD, &l2->flag);
2115 		test_and_set_bit(FLG_MOD128, &l2->flag);
2116 		test_and_set_bit(FLG_ORIG, &l2->flag);
2117 		l2->sapi = 0;
2118 		l2->maxlen = MAX_DFRAME_LEN;
2119 		if (test_bit(OPTION_L2_PMX, &options))
2120 			l2->window = 7;
2121 		else
2122 			l2->window = 1;
2123 		if (test_bit(OPTION_L2_PTP, &options))
2124 			test_and_set_bit(FLG_PTP, &l2->flag);
2125 		if (test_bit(OPTION_L2_FIXEDTEI, &options))
2126 			test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2127 		l2->tei = (u_int)arg;
2128 		l2->T200 = 1000;
2129 		l2->N200 = 3;
2130 		l2->T203 = 10000;
2131 		if (test_bit(OPTION_L2_PMX, &options))
2132 			rq.protocol = ISDN_P_TE_E1;
2133 		else
2134 			rq.protocol = ISDN_P_TE_S0;
2135 		rq.adr.channel = 0;
2136 		l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2137 		break;
2138 	case ISDN_P_B_X75SLP:
2139 		test_and_set_bit(FLG_LAPB, &l2->flag);
2140 		l2->window = 7;
2141 		l2->maxlen = MAX_DATA_SIZE;
2142 		l2->T200 = 1000;
2143 		l2->N200 = 4;
2144 		l2->T203 = 5000;
2145 		l2->addr.A = 3;
2146 		l2->addr.B = 1;
2147 		break;
2148 	default:
2149 		printk(KERN_ERR "layer2 create failed prt %x\n",
2150 			protocol);
2151 		kfree(l2);
2152 		return NULL;
2153 	}
2154 	skb_queue_head_init(&l2->i_queue);
2155 	skb_queue_head_init(&l2->ui_queue);
2156 	skb_queue_head_init(&l2->down_queue);
2157 	skb_queue_head_init(&l2->tmp_queue);
2158 	InitWin(l2);
2159 	l2->l2m.fsm = &l2fsm;
2160 	if (test_bit(FLG_LAPB, &l2->flag) ||
2161 		test_bit(FLG_PTP, &l2->flag) ||
2162 		test_bit(FLG_LAPD_NET, &l2->flag))
2163 		l2->l2m.state = ST_L2_4;
2164 	else
2165 		l2->l2m.state = ST_L2_1;
2166 	l2->l2m.debug = *debug;
2167 	l2->l2m.userdata = l2;
2168 	l2->l2m.userint = 0;
2169 	l2->l2m.printdebug = l2m_debug;
2170 
2171 	mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2172 	mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2173 	return l2;
2174 }
2175 
2176 static int
2177 x75create(struct channel_req *crq)
2178 {
2179 	struct layer2	*l2;
2180 
2181 	if (crq->protocol != ISDN_P_B_X75SLP)
2182 		return -EPROTONOSUPPORT;
2183 	l2 = create_l2(crq->ch, crq->protocol, 0, 0);
2184 	if (!l2)
2185 		return -ENOMEM;
2186 	crq->ch = &l2->ch;
2187 	crq->protocol = ISDN_P_B_HDLC;
2188 	return 0;
2189 }
2190 
2191 static struct Bprotocol X75SLP = {
2192 	.Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2193 	.name = "X75SLP",
2194 	.create = x75create
2195 };
2196 
2197 int
2198 Isdnl2_Init(u_int *deb)
2199 {
2200 	debug = deb;
2201 	mISDN_register_Bprotocol(&X75SLP);
2202 	l2fsm.state_count = L2_STATE_COUNT;
2203 	l2fsm.event_count = L2_EVENT_COUNT;
2204 	l2fsm.strEvent = strL2Event;
2205 	l2fsm.strState = strL2State;
2206 	mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2207 	TEIInit(deb);
2208 	return 0;
2209 }
2210 
2211 void
2212 Isdnl2_cleanup(void)
2213 {
2214 	mISDN_unregister_Bprotocol(&X75SLP);
2215 	TEIFree();
2216 	mISDN_FsmFree(&l2fsm);
2217 }
2218 
2219