xref: /linux/drivers/isdn/hardware/mISDN/netjet.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NETJet mISDN driver
4  *
5  * Author       Karsten Keil <keil@isdn4linux.de>
6  *
7  * Copyright 2009  by Karsten Keil <keil@isdn4linux.de>
8  */
9 
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/delay.h>
14 #include <linux/mISDNhw.h>
15 #include <linux/slab.h>
16 #include "ipac.h"
17 #include "iohelper.h"
18 #include "netjet.h"
19 #include "isdnhdlc.h"
20 
21 #define NETJET_REV	"2.0"
22 
23 enum nj_types {
24 	NETJET_S_TJ300,
25 	NETJET_S_TJ320,
26 	ENTERNOW__TJ320,
27 };
28 
29 struct tiger_dma {
30 	size_t		size;
31 	u32		*start;
32 	int		idx;
33 	u32		dmastart;
34 	u32		dmairq;
35 	u32		dmaend;
36 	u32		dmacur;
37 };
38 
39 struct tiger_hw;
40 
41 struct tiger_ch {
42 	struct bchannel		bch;
43 	struct tiger_hw		*nj;
44 	int			idx;
45 	int			free;
46 	int			lastrx;
47 	u16			rxstate;
48 	u16			txstate;
49 	struct isdnhdlc_vars	hsend;
50 	struct isdnhdlc_vars	hrecv;
51 	u8			*hsbuf;
52 	u8			*hrbuf;
53 };
54 
55 #define TX_INIT		0x0001
56 #define TX_IDLE		0x0002
57 #define TX_RUN		0x0004
58 #define TX_UNDERRUN	0x0100
59 #define RX_OVERRUN	0x0100
60 
61 #define LOG_SIZE	64
62 
63 struct tiger_hw {
64 	struct list_head	list;
65 	struct pci_dev		*pdev;
66 	char			name[MISDN_MAX_IDLEN];
67 	enum nj_types		typ;
68 	int			irq;
69 	u32			irqcnt;
70 	u32			base;
71 	size_t			base_s;
72 	dma_addr_t		dma;
73 	void			*dma_p;
74 	spinlock_t		lock;	/* lock HW */
75 	struct isac_hw		isac;
76 	struct tiger_dma	send;
77 	struct tiger_dma	recv;
78 	struct tiger_ch		bc[2];
79 	u8			ctrlreg;
80 	u8			dmactrl;
81 	u8			auxd;
82 	u8			last_is0;
83 	u8			irqmask0;
84 	char			log[LOG_SIZE];
85 };
86 
87 static LIST_HEAD(Cards);
88 static DEFINE_RWLOCK(card_lock); /* protect Cards */
89 static u32 debug;
90 static int nj_cnt;
91 
92 static void
93 _set_debug(struct tiger_hw *card)
94 {
95 	card->isac.dch.debug = debug;
96 	card->bc[0].bch.debug = debug;
97 	card->bc[1].bch.debug = debug;
98 }
99 
100 static int
101 set_debug(const char *val, const struct kernel_param *kp)
102 {
103 	int ret;
104 	struct tiger_hw *card;
105 
106 	ret = param_set_uint(val, kp);
107 	if (!ret) {
108 		read_lock(&card_lock);
109 		list_for_each_entry(card, &Cards, list)
110 			_set_debug(card);
111 		read_unlock(&card_lock);
112 	}
113 	return ret;
114 }
115 
116 MODULE_AUTHOR("Karsten Keil");
117 MODULE_LICENSE("GPL v2");
118 MODULE_VERSION(NETJET_REV);
119 module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(debug, "Netjet debug mask");
121 
122 static void
123 nj_disable_hwirq(struct tiger_hw *card)
124 {
125 	outb(0, card->base + NJ_IRQMASK0);
126 	outb(0, card->base + NJ_IRQMASK1);
127 }
128 
129 
130 static u8
131 ReadISAC_nj(void *p, u8 offset)
132 {
133 	struct tiger_hw *card = p;
134 	u8 ret;
135 
136 	card->auxd &= 0xfc;
137 	card->auxd |= (offset >> 4) & 3;
138 	outb(card->auxd, card->base + NJ_AUXDATA);
139 	ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
140 	return ret;
141 }
142 
143 static void
144 WriteISAC_nj(void *p, u8 offset, u8 value)
145 {
146 	struct tiger_hw *card = p;
147 
148 	card->auxd &= 0xfc;
149 	card->auxd |= (offset >> 4) & 3;
150 	outb(card->auxd, card->base + NJ_AUXDATA);
151 	outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
152 }
153 
154 static void
155 ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
156 {
157 	struct tiger_hw *card = p;
158 
159 	card->auxd &= 0xfc;
160 	outb(card->auxd, card->base + NJ_AUXDATA);
161 	insb(card->base + NJ_ISAC_OFF, data, size);
162 }
163 
164 static void
165 WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
166 {
167 	struct tiger_hw *card = p;
168 
169 	card->auxd &= 0xfc;
170 	outb(card->auxd, card->base + NJ_AUXDATA);
171 	outsb(card->base + NJ_ISAC_OFF, data, size);
172 }
173 
174 static void
175 fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
176 {
177 	struct tiger_hw *card = bc->bch.hw;
178 	u32 mask = 0xff, val;
179 
180 	pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
181 		 bc->bch.nr, fill, cnt, idx, card->send.idx);
182 	if (bc->bch.nr & 2) {
183 		fill  <<= 8;
184 		mask <<= 8;
185 	}
186 	mask ^= 0xffffffff;
187 	while (cnt--) {
188 		val = card->send.start[idx];
189 		val &= mask;
190 		val |= fill;
191 		card->send.start[idx++] = val;
192 		if (idx >= card->send.size)
193 			idx = 0;
194 	}
195 }
196 
197 static int
198 mode_tiger(struct tiger_ch *bc, u32 protocol)
199 {
200 	struct tiger_hw *card = bc->bch.hw;
201 
202 	pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
203 		 bc->bch.nr, bc->bch.state, protocol);
204 	switch (protocol) {
205 	case ISDN_P_NONE:
206 		if (bc->bch.state == ISDN_P_NONE)
207 			break;
208 		fill_mem(bc, 0, card->send.size, 0xff);
209 		bc->bch.state = protocol;
210 		/* only stop dma and interrupts if both channels NULL */
211 		if ((card->bc[0].bch.state == ISDN_P_NONE) &&
212 		    (card->bc[1].bch.state == ISDN_P_NONE)) {
213 			card->dmactrl = 0;
214 			outb(card->dmactrl, card->base + NJ_DMACTRL);
215 			outb(0, card->base + NJ_IRQMASK0);
216 		}
217 		test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
218 		test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
219 		bc->txstate = 0;
220 		bc->rxstate = 0;
221 		bc->lastrx = -1;
222 		break;
223 	case ISDN_P_B_RAW:
224 		test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
225 		bc->bch.state = protocol;
226 		bc->idx = 0;
227 		bc->free = card->send.size / 2;
228 		bc->rxstate = 0;
229 		bc->txstate = TX_INIT | TX_IDLE;
230 		bc->lastrx = -1;
231 		if (!card->dmactrl) {
232 			card->dmactrl = 1;
233 			outb(card->dmactrl, card->base + NJ_DMACTRL);
234 			outb(0x0f, card->base + NJ_IRQMASK0);
235 		}
236 		break;
237 	case ISDN_P_B_HDLC:
238 		test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
239 		bc->bch.state = protocol;
240 		bc->idx = 0;
241 		bc->free = card->send.size / 2;
242 		bc->rxstate = 0;
243 		bc->txstate = TX_INIT | TX_IDLE;
244 		isdnhdlc_rcv_init(&bc->hrecv, 0);
245 		isdnhdlc_out_init(&bc->hsend, 0);
246 		bc->lastrx = -1;
247 		if (!card->dmactrl) {
248 			card->dmactrl = 1;
249 			outb(card->dmactrl, card->base + NJ_DMACTRL);
250 			outb(0x0f, card->base + NJ_IRQMASK0);
251 		}
252 		break;
253 	default:
254 		pr_info("%s: %s protocol %x not handled\n", card->name,
255 			__func__, protocol);
256 		return -ENOPROTOOPT;
257 	}
258 	card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
259 	card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
260 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
261 	card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
262 	pr_debug("%s: %s ctrl %x irq  %02x/%02x idx %d/%d\n",
263 		 card->name, __func__,
264 		 inb(card->base + NJ_DMACTRL),
265 		 inb(card->base + NJ_IRQMASK0),
266 		 inb(card->base + NJ_IRQSTAT0),
267 		 card->send.idx,
268 		 card->recv.idx);
269 	return 0;
270 }
271 
272 static void
273 nj_reset(struct tiger_hw *card)
274 {
275 	outb(0xff, card->base + NJ_CTRL); /* Reset On */
276 	mdelay(1);
277 
278 	/* now edge triggered for TJ320 GE 13/07/00 */
279 	/* see comment in IRQ function */
280 	if (card->typ == NETJET_S_TJ320) /* TJ320 */
281 		card->ctrlreg = 0x40;  /* Reset Off and status read clear */
282 	else
283 		card->ctrlreg = 0x00;  /* Reset Off and status read clear */
284 	outb(card->ctrlreg, card->base + NJ_CTRL);
285 	mdelay(10);
286 
287 	/* configure AUX pins (all output except ISAC IRQ pin) */
288 	card->auxd = 0;
289 	card->dmactrl = 0;
290 	outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
291 	outb(NJ_ISACIRQ,  card->base + NJ_IRQMASK1);
292 	outb(card->auxd, card->base + NJ_AUXDATA);
293 }
294 
295 static int
296 inittiger(struct tiger_hw *card)
297 {
298 	int i;
299 
300 	card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
301 					 &card->dma, GFP_ATOMIC);
302 	if (!card->dma_p) {
303 		pr_info("%s: No DMA memory\n", card->name);
304 		return -ENOMEM;
305 	}
306 	if ((u64)card->dma > 0xffffffff) {
307 		pr_info("%s: DMA outside 32 bit\n", card->name);
308 		return -ENOMEM;
309 	}
310 	for (i = 0; i < 2; i++) {
311 		card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
312 		if (!card->bc[i].hsbuf) {
313 			pr_info("%s: no B%d send buffer\n", card->name, i + 1);
314 			return -ENOMEM;
315 		}
316 		card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
317 		if (!card->bc[i].hrbuf) {
318 			pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
319 			return -ENOMEM;
320 		}
321 	}
322 	memset(card->dma_p, 0xff, NJ_DMA_SIZE);
323 
324 	card->send.start = card->dma_p;
325 	card->send.dmastart = (u32)card->dma;
326 	card->send.dmaend = card->send.dmastart +
327 		(4 * (NJ_DMA_TXSIZE - 1));
328 	card->send.dmairq = card->send.dmastart +
329 		(4 * ((NJ_DMA_TXSIZE / 2) - 1));
330 	card->send.size = NJ_DMA_TXSIZE;
331 
332 	if (debug & DEBUG_HW)
333 		pr_notice("%s: send buffer phy %#x - %#x - %#x  virt %p"
334 			  " size %zu u32\n", card->name,
335 			  card->send.dmastart, card->send.dmairq,
336 			  card->send.dmaend, card->send.start, card->send.size);
337 
338 	outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
339 	outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
340 	outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
341 
342 	card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
343 	card->recv.dmastart = (u32)card->dma  + (NJ_DMA_SIZE / 2);
344 	card->recv.dmaend = card->recv.dmastart +
345 		(4 * (NJ_DMA_RXSIZE - 1));
346 	card->recv.dmairq = card->recv.dmastart +
347 		(4 * ((NJ_DMA_RXSIZE / 2) - 1));
348 	card->recv.size = NJ_DMA_RXSIZE;
349 
350 	if (debug & DEBUG_HW)
351 		pr_notice("%s: recv buffer phy %#x - %#x - %#x  virt %p"
352 			  " size %zu u32\n", card->name,
353 			  card->recv.dmastart, card->recv.dmairq,
354 			  card->recv.dmaend, card->recv.start, card->recv.size);
355 
356 	outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
357 	outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
358 	outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
359 	return 0;
360 }
361 
362 static void
363 read_dma(struct tiger_ch *bc, u32 idx, int cnt)
364 {
365 	struct tiger_hw *card = bc->bch.hw;
366 	int i, stat;
367 	u32 val;
368 	u8 *p, *pn;
369 
370 	if (bc->lastrx == idx) {
371 		bc->rxstate |= RX_OVERRUN;
372 		pr_info("%s: B%1d overrun at idx %d\n", card->name,
373 			bc->bch.nr, idx);
374 	}
375 	bc->lastrx = idx;
376 	if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
377 		bc->bch.dropcnt += cnt;
378 		return;
379 	}
380 	stat = bchannel_get_rxbuf(&bc->bch, cnt);
381 	/* only transparent use the count here, HDLC overun is detected later */
382 	if (stat == -ENOMEM) {
383 		pr_warn("%s.B%d: No memory for %d bytes\n",
384 			card->name, bc->bch.nr, cnt);
385 		return;
386 	}
387 	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
388 		p = skb_put(bc->bch.rx_skb, cnt);
389 	else
390 		p = bc->hrbuf;
391 
392 	for (i = 0; i < cnt; i++) {
393 		val = card->recv.start[idx++];
394 		if (bc->bch.nr & 2)
395 			val >>= 8;
396 		if (idx >= card->recv.size)
397 			idx = 0;
398 		p[i] = val & 0xff;
399 	}
400 
401 	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
402 		recv_Bchannel(&bc->bch, 0, false);
403 		return;
404 	}
405 
406 	pn = bc->hrbuf;
407 	while (cnt > 0) {
408 		stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
409 				       bc->bch.rx_skb->data, bc->bch.maxlen);
410 		if (stat > 0) { /* valid frame received */
411 			p = skb_put(bc->bch.rx_skb, stat);
412 			if (debug & DEBUG_HW_BFIFO) {
413 				snprintf(card->log, LOG_SIZE,
414 					 "B%1d-recv %s %d ", bc->bch.nr,
415 					 card->name, stat);
416 				print_hex_dump_bytes(card->log,
417 						     DUMP_PREFIX_OFFSET, p,
418 						     stat);
419 			}
420 			recv_Bchannel(&bc->bch, 0, false);
421 			stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
422 			if (stat < 0) {
423 				pr_warn("%s.B%d: No memory for %d bytes\n",
424 					card->name, bc->bch.nr, cnt);
425 				return;
426 			}
427 		} else if (stat == -HDLC_CRC_ERROR) {
428 			pr_info("%s: B%1d receive frame CRC error\n",
429 				card->name, bc->bch.nr);
430 		} else if (stat == -HDLC_FRAMING_ERROR) {
431 			pr_info("%s: B%1d receive framing error\n",
432 				card->name, bc->bch.nr);
433 		} else if (stat == -HDLC_LENGTH_ERROR) {
434 			pr_info("%s: B%1d receive frame too long (> %d)\n",
435 				card->name, bc->bch.nr, bc->bch.maxlen);
436 		}
437 		pn += i;
438 		cnt -= i;
439 	}
440 }
441 
442 static void
443 recv_tiger(struct tiger_hw *card, u8 irq_stat)
444 {
445 	u32 idx;
446 	int cnt = card->recv.size / 2;
447 
448 	/* Note receive is via the WRITE DMA channel */
449 	card->last_is0 &= ~NJ_IRQM0_WR_MASK;
450 	card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
451 
452 	if (irq_stat & NJ_IRQM0_WR_END)
453 		idx = cnt - 1;
454 	else
455 		idx = card->recv.size - 1;
456 
457 	if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
458 		read_dma(&card->bc[0], idx, cnt);
459 	if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
460 		read_dma(&card->bc[1], idx, cnt);
461 }
462 
463 /* sync with current DMA address at start or after exception */
464 static void
465 resync(struct tiger_ch *bc, struct tiger_hw *card)
466 {
467 	card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
468 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
469 	if (bc->free > card->send.size / 2)
470 		bc->free = card->send.size / 2;
471 	/* currently we simple sync to the next complete free area
472 	 * this hast the advantage that we have always maximum time to
473 	 * handle TX irq
474 	 */
475 	if (card->send.idx < ((card->send.size / 2) - 1))
476 		bc->idx = (card->recv.size / 2) - 1;
477 	else
478 		bc->idx = card->recv.size - 1;
479 	bc->txstate = TX_RUN;
480 	pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
481 		 __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
482 }
483 
484 static int bc_next_frame(struct tiger_ch *);
485 
486 static void
487 fill_hdlc_flag(struct tiger_ch *bc)
488 {
489 	struct tiger_hw *card = bc->bch.hw;
490 	int count, i;
491 	u32 m, v;
492 	u8  *p;
493 
494 	if (bc->free == 0)
495 		return;
496 	pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
497 		 __func__, bc->bch.nr, bc->free, bc->txstate,
498 		 bc->idx, card->send.idx);
499 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
500 		resync(bc, card);
501 	count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
502 				bc->hsbuf, bc->free);
503 	pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
504 		 bc->bch.nr, count);
505 	bc->free -= count;
506 	p = bc->hsbuf;
507 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
508 	for (i = 0; i < count; i++) {
509 		if (bc->idx >= card->send.size)
510 			bc->idx = 0;
511 		v = card->send.start[bc->idx];
512 		v &= m;
513 		v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
514 		card->send.start[bc->idx++] = v;
515 	}
516 	if (debug & DEBUG_HW_BFIFO) {
517 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
518 			 bc->bch.nr, card->name, count);
519 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
520 	}
521 }
522 
523 static void
524 fill_dma(struct tiger_ch *bc)
525 {
526 	struct tiger_hw *card = bc->bch.hw;
527 	int count, i, fillempty = 0;
528 	u32 m, v, n = 0;
529 	u8  *p;
530 
531 	if (bc->free == 0)
532 		return;
533 	if (!bc->bch.tx_skb) {
534 		if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
535 			return;
536 		fillempty = 1;
537 		count = card->send.size >> 1;
538 		p = bc->bch.fill;
539 	} else {
540 		count = bc->bch.tx_skb->len - bc->bch.tx_idx;
541 		if (count <= 0)
542 			return;
543 		pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
544 			 card->name, __func__, bc->bch.nr, count, bc->free,
545 			 bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
546 			 bc->idx, card->send.idx);
547 		p = bc->bch.tx_skb->data + bc->bch.tx_idx;
548 	}
549 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
550 		resync(bc, card);
551 	if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
552 		count = isdnhdlc_encode(&bc->hsend, p, count, &i,
553 					bc->hsbuf, bc->free);
554 		pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
555 			 bc->bch.nr, i, count);
556 		bc->bch.tx_idx += i;
557 		bc->free -= count;
558 		p = bc->hsbuf;
559 	} else {
560 		if (count > bc->free)
561 			count = bc->free;
562 		if (!fillempty)
563 			bc->bch.tx_idx += count;
564 		bc->free -= count;
565 	}
566 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
567 	if (fillempty) {
568 		n = p[0];
569 		if (!(bc->bch.nr & 1))
570 			n <<= 8;
571 		for (i = 0; i < count; i++) {
572 			if (bc->idx >= card->send.size)
573 				bc->idx = 0;
574 			v = card->send.start[bc->idx];
575 			v &= m;
576 			v |= n;
577 			card->send.start[bc->idx++] = v;
578 		}
579 	} else {
580 		for (i = 0; i < count; i++) {
581 			if (bc->idx >= card->send.size)
582 				bc->idx = 0;
583 			v = card->send.start[bc->idx];
584 			v &= m;
585 			n = p[i];
586 			v |= (bc->bch.nr & 1) ? n : n << 8;
587 			card->send.start[bc->idx++] = v;
588 		}
589 	}
590 	if (debug & DEBUG_HW_BFIFO) {
591 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
592 			 bc->bch.nr, card->name, count);
593 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
594 	}
595 	if (bc->free)
596 		bc_next_frame(bc);
597 }
598 
599 
600 static int
601 bc_next_frame(struct tiger_ch *bc)
602 {
603 	int ret = 1;
604 
605 	if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
606 		fill_dma(bc);
607 	} else {
608 		dev_kfree_skb(bc->bch.tx_skb);
609 		if (get_next_bframe(&bc->bch)) {
610 			fill_dma(bc);
611 			test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
612 		} else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
613 			fill_dma(bc);
614 		} else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
615 			test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
616 			ret = 0;
617 		} else {
618 			ret = 0;
619 		}
620 	}
621 	return ret;
622 }
623 
624 static void
625 send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
626 {
627 	int ret;
628 
629 	bc->free += card->send.size / 2;
630 	if (bc->free >= card->send.size) {
631 		if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
632 			pr_info("%s: B%1d TX underrun state %x\n", card->name,
633 				bc->bch.nr, bc->txstate);
634 			bc->txstate |= TX_UNDERRUN;
635 		}
636 		bc->free = card->send.size;
637 	}
638 	ret = bc_next_frame(bc);
639 	if (!ret) {
640 		if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
641 			fill_hdlc_flag(bc);
642 			return;
643 		}
644 		pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
645 			 bc->bch.nr, bc->free, bc->idx, card->send.idx);
646 		if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
647 			fill_mem(bc, bc->idx, bc->free, 0xff);
648 			if (bc->free == card->send.size)
649 				bc->txstate |= TX_IDLE;
650 		}
651 	}
652 }
653 
654 static void
655 send_tiger(struct tiger_hw *card, u8 irq_stat)
656 {
657 	int i;
658 
659 	/* Note send is via the READ DMA channel */
660 	if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
661 		pr_info("%s: tiger warn write double dma %x/%x\n",
662 			card->name, irq_stat, card->last_is0);
663 		return;
664 	} else {
665 		card->last_is0 &= ~NJ_IRQM0_RD_MASK;
666 		card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
667 	}
668 	for (i = 0; i < 2; i++) {
669 		if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
670 			send_tiger_bc(card, &card->bc[i]);
671 	}
672 }
673 
674 static irqreturn_t
675 nj_irq(int intno, void *dev_id)
676 {
677 	struct tiger_hw *card = dev_id;
678 	u8 val, s1val, s0val;
679 
680 	spin_lock(&card->lock);
681 	s0val = inb(card->base | NJ_IRQSTAT0);
682 	s1val = inb(card->base | NJ_IRQSTAT1);
683 	if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
684 		/* shared IRQ */
685 		spin_unlock(&card->lock);
686 		return IRQ_NONE;
687 	}
688 	pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
689 	card->irqcnt++;
690 	if (!(s1val & NJ_ISACIRQ)) {
691 		val = ReadISAC_nj(card, ISAC_ISTA);
692 		if (val)
693 			mISDNisac_irq(&card->isac, val);
694 	}
695 
696 	if (s0val)
697 		/* write to clear */
698 		outb(s0val, card->base | NJ_IRQSTAT0);
699 	else
700 		goto end;
701 	s1val = s0val;
702 	/* set bits in sval to indicate which page is free */
703 	card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
704 	card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
705 	if (card->recv.dmacur < card->recv.dmairq)
706 		s0val = 0x08;	/* the 2nd write area is free */
707 	else
708 		s0val = 0x04;	/* the 1st write area is free */
709 
710 	card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
711 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
712 	if (card->send.dmacur < card->send.dmairq)
713 		s0val |= 0x02;	/* the 2nd read area is free */
714 	else
715 		s0val |= 0x01;	/* the 1st read area is free */
716 
717 	pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
718 		 s1val, s0val, card->last_is0,
719 		 card->recv.idx, card->send.idx);
720 	/* test if we have a DMA interrupt */
721 	if (s0val != card->last_is0) {
722 		if ((s0val & NJ_IRQM0_RD_MASK) !=
723 		    (card->last_is0 & NJ_IRQM0_RD_MASK))
724 			/* got a write dma int */
725 			send_tiger(card, s0val);
726 		if ((s0val & NJ_IRQM0_WR_MASK) !=
727 		    (card->last_is0 & NJ_IRQM0_WR_MASK))
728 			/* got a read dma int */
729 			recv_tiger(card, s0val);
730 	}
731 end:
732 	spin_unlock(&card->lock);
733 	return IRQ_HANDLED;
734 }
735 
736 static int
737 nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
738 {
739 	int ret = -EINVAL;
740 	struct bchannel *bch = container_of(ch, struct bchannel, ch);
741 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
742 	struct tiger_hw *card = bch->hw;
743 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
744 	unsigned long flags;
745 
746 	switch (hh->prim) {
747 	case PH_DATA_REQ:
748 		spin_lock_irqsave(&card->lock, flags);
749 		ret = bchannel_senddata(bch, skb);
750 		if (ret > 0) { /* direct TX */
751 			fill_dma(bc);
752 			ret = 0;
753 		}
754 		spin_unlock_irqrestore(&card->lock, flags);
755 		return ret;
756 	case PH_ACTIVATE_REQ:
757 		spin_lock_irqsave(&card->lock, flags);
758 		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
759 			ret = mode_tiger(bc, ch->protocol);
760 		else
761 			ret = 0;
762 		spin_unlock_irqrestore(&card->lock, flags);
763 		if (!ret)
764 			_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
765 				    NULL, GFP_KERNEL);
766 		break;
767 	case PH_DEACTIVATE_REQ:
768 		spin_lock_irqsave(&card->lock, flags);
769 		mISDN_clear_bchannel(bch);
770 		mode_tiger(bc, ISDN_P_NONE);
771 		spin_unlock_irqrestore(&card->lock, flags);
772 		_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
773 			    NULL, GFP_KERNEL);
774 		ret = 0;
775 		break;
776 	}
777 	if (!ret)
778 		dev_kfree_skb(skb);
779 	return ret;
780 }
781 
782 static int
783 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
784 {
785 	return mISDN_ctrl_bchannel(&bc->bch, cq);
786 }
787 
788 static int
789 nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
790 {
791 	struct bchannel *bch = container_of(ch, struct bchannel, ch);
792 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
793 	struct tiger_hw *card  = bch->hw;
794 	int ret = -EINVAL;
795 	u_long flags;
796 
797 	pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
798 	switch (cmd) {
799 	case CLOSE_CHANNEL:
800 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
801 		cancel_work_sync(&bch->workq);
802 		spin_lock_irqsave(&card->lock, flags);
803 		mISDN_clear_bchannel(bch);
804 		mode_tiger(bc, ISDN_P_NONE);
805 		spin_unlock_irqrestore(&card->lock, flags);
806 		ch->protocol = ISDN_P_NONE;
807 		ch->peer = NULL;
808 		module_put(THIS_MODULE);
809 		ret = 0;
810 		break;
811 	case CONTROL_CHANNEL:
812 		ret = channel_bctrl(bc, arg);
813 		break;
814 	default:
815 		pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
816 	}
817 	return ret;
818 }
819 
820 static int
821 channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
822 {
823 	int	ret = 0;
824 
825 	switch (cq->op) {
826 	case MISDN_CTRL_GETOP:
827 		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
828 		break;
829 	case MISDN_CTRL_LOOP:
830 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
831 		if (cq->channel < 0 || cq->channel > 3) {
832 			ret = -EINVAL;
833 			break;
834 		}
835 		ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
836 		break;
837 	case MISDN_CTRL_L1_TIMER3:
838 		ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
839 		break;
840 	default:
841 		pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
842 		ret = -EINVAL;
843 		break;
844 	}
845 	return ret;
846 }
847 
848 static int
849 open_bchannel(struct tiger_hw *card, struct channel_req *rq)
850 {
851 	struct bchannel *bch;
852 
853 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
854 		return -EINVAL;
855 	if (rq->protocol == ISDN_P_NONE)
856 		return -EINVAL;
857 	bch = &card->bc[rq->adr.channel - 1].bch;
858 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
859 		return -EBUSY; /* b-channel can be only open once */
860 	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
861 	bch->ch.protocol = rq->protocol;
862 	rq->ch = &bch->ch;
863 	return 0;
864 }
865 
866 /*
867  * device control function
868  */
869 static int
870 nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
871 {
872 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
873 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
874 	struct tiger_hw	*card = dch->hw;
875 	struct channel_req	*rq;
876 	int			err = 0;
877 
878 	pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
879 	switch (cmd) {
880 	case OPEN_CHANNEL:
881 		rq = arg;
882 		if (rq->protocol == ISDN_P_TE_S0)
883 			err = card->isac.open(&card->isac, rq);
884 		else
885 			err = open_bchannel(card, rq);
886 		if (err)
887 			break;
888 		if (!try_module_get(THIS_MODULE))
889 			pr_info("%s: cannot get module\n", card->name);
890 		break;
891 	case CLOSE_CHANNEL:
892 		pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
893 			 __builtin_return_address(0));
894 		module_put(THIS_MODULE);
895 		break;
896 	case CONTROL_CHANNEL:
897 		err = channel_ctrl(card, arg);
898 		break;
899 	default:
900 		pr_debug("%s: %s unknown command %x\n",
901 			 card->name, __func__, cmd);
902 		return -EINVAL;
903 	}
904 	return err;
905 }
906 
907 static int
908 nj_init_card(struct tiger_hw *card)
909 {
910 	u_long flags;
911 	int ret;
912 
913 	spin_lock_irqsave(&card->lock, flags);
914 	nj_disable_hwirq(card);
915 	spin_unlock_irqrestore(&card->lock, flags);
916 
917 	card->irq = card->pdev->irq;
918 	if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
919 		pr_info("%s: couldn't get interrupt %d\n",
920 			card->name, card->irq);
921 		card->irq = -1;
922 		return -EIO;
923 	}
924 
925 	spin_lock_irqsave(&card->lock, flags);
926 	nj_reset(card);
927 	ret = card->isac.init(&card->isac);
928 	if (ret)
929 		goto error;
930 	ret = inittiger(card);
931 	if (ret)
932 		goto error;
933 	mode_tiger(&card->bc[0], ISDN_P_NONE);
934 	mode_tiger(&card->bc[1], ISDN_P_NONE);
935 error:
936 	spin_unlock_irqrestore(&card->lock, flags);
937 	return ret;
938 }
939 
940 
941 static void
942 nj_release(struct tiger_hw *card)
943 {
944 	u_long flags;
945 	int i;
946 
947 	if (card->base_s) {
948 		spin_lock_irqsave(&card->lock, flags);
949 		nj_disable_hwirq(card);
950 		mode_tiger(&card->bc[0], ISDN_P_NONE);
951 		mode_tiger(&card->bc[1], ISDN_P_NONE);
952 		spin_unlock_irqrestore(&card->lock, flags);
953 		card->isac.release(&card->isac);
954 		release_region(card->base, card->base_s);
955 		card->base_s = 0;
956 	}
957 	if (card->irq > 0)
958 		free_irq(card->irq, card);
959 	if (device_is_registered(&card->isac.dch.dev.dev))
960 		mISDN_unregister_device(&card->isac.dch.dev);
961 
962 	for (i = 0; i < 2; i++) {
963 		mISDN_freebchannel(&card->bc[i].bch);
964 		kfree(card->bc[i].hsbuf);
965 		kfree(card->bc[i].hrbuf);
966 	}
967 	if (card->dma_p)
968 		dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
969 				  card->dma);
970 	write_lock_irqsave(&card_lock, flags);
971 	list_del(&card->list);
972 	write_unlock_irqrestore(&card_lock, flags);
973 	pci_disable_device(card->pdev);
974 	pci_set_drvdata(card->pdev, NULL);
975 	kfree(card);
976 }
977 
978 
979 static int
980 nj_setup(struct tiger_hw *card)
981 {
982 	card->base = pci_resource_start(card->pdev, 0);
983 	card->base_s = pci_resource_len(card->pdev, 0);
984 	if (!request_region(card->base, card->base_s, card->name)) {
985 		pr_info("%s: NETjet config port %#x-%#x already in use\n",
986 			card->name, card->base,
987 			(u32)(card->base + card->base_s - 1));
988 		card->base_s = 0;
989 		return -EIO;
990 	}
991 	ASSIGN_FUNC(nj, ISAC, card->isac);
992 	return 0;
993 }
994 
995 
996 static int
997 setup_instance(struct tiger_hw *card)
998 {
999 	int i, err;
1000 	u_long flags;
1001 
1002 	snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
1003 	write_lock_irqsave(&card_lock, flags);
1004 	list_add_tail(&card->list, &Cards);
1005 	write_unlock_irqrestore(&card_lock, flags);
1006 
1007 	_set_debug(card);
1008 	card->isac.name = card->name;
1009 	spin_lock_init(&card->lock);
1010 	card->isac.hwlock = &card->lock;
1011 	mISDNisac_init(&card->isac, card);
1012 
1013 	card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
1014 		(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
1015 	card->isac.dch.dev.D.ctrl = nj_dctrl;
1016 	for (i = 0; i < 2; i++) {
1017 		card->bc[i].bch.nr = i + 1;
1018 		set_channelmap(i + 1, card->isac.dch.dev.channelmap);
1019 		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
1020 				   NJ_DMA_RXSIZE >> 1);
1021 		card->bc[i].bch.hw = card;
1022 		card->bc[i].bch.ch.send = nj_l2l1B;
1023 		card->bc[i].bch.ch.ctrl = nj_bctrl;
1024 		card->bc[i].bch.ch.nr = i + 1;
1025 		list_add(&card->bc[i].bch.ch.list,
1026 			 &card->isac.dch.dev.bchannels);
1027 		card->bc[i].bch.hw = card;
1028 	}
1029 	err = nj_setup(card);
1030 	if (err)
1031 		goto error;
1032 	err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
1033 				    card->name);
1034 	if (err)
1035 		goto error;
1036 	err = nj_init_card(card);
1037 	if (!err)  {
1038 		nj_cnt++;
1039 		pr_notice("Netjet %d cards installed\n", nj_cnt);
1040 		return 0;
1041 	}
1042 error:
1043 	nj_release(card);
1044 	return err;
1045 }
1046 
1047 static int
1048 nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1049 {
1050 	int err = -ENOMEM;
1051 	int cfg;
1052 	struct tiger_hw *card;
1053 
1054 	if (pdev->subsystem_vendor == 0x8086 &&
1055 	    pdev->subsystem_device == 0x0003) {
1056 		pr_notice("Netjet: Digium X100P/X101P not handled\n");
1057 		return -ENODEV;
1058 	}
1059 
1060 	if (pdev->subsystem_vendor == 0x55 &&
1061 	    pdev->subsystem_device == 0x02) {
1062 		pr_notice("Netjet: Enter!Now not handled yet\n");
1063 		return -ENODEV;
1064 	}
1065 
1066 	if (pdev->subsystem_vendor == 0xb100 &&
1067 	    pdev->subsystem_device == 0x0003) {
1068 		pr_notice("Netjet: Digium TDM400P not handled yet\n");
1069 		return -ENODEV;
1070 	}
1071 
1072 	card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
1073 	if (!card) {
1074 		pr_info("No kmem for Netjet\n");
1075 		return err;
1076 	}
1077 
1078 	card->pdev = pdev;
1079 
1080 	err = pci_enable_device(pdev);
1081 	if (err) {
1082 		kfree(card);
1083 		return err;
1084 	}
1085 
1086 	printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
1087 	       pci_name(pdev));
1088 
1089 	pci_set_master(pdev);
1090 
1091 	/* the TJ300 and TJ320 must be detected, the IRQ handling is different
1092 	 * unfortunately the chips use the same device ID, but the TJ320 has
1093 	 * the bit20 in status PCI cfg register set
1094 	 */
1095 	pci_read_config_dword(pdev, 0x04, &cfg);
1096 	if (cfg & 0x00100000)
1097 		card->typ = NETJET_S_TJ320;
1098 	else
1099 		card->typ = NETJET_S_TJ300;
1100 
1101 	card->base = pci_resource_start(pdev, 0);
1102 	pci_set_drvdata(pdev, card);
1103 	err = setup_instance(card);
1104 	if (err)
1105 		pci_set_drvdata(pdev, NULL);
1106 
1107 	return err;
1108 }
1109 
1110 
1111 static void nj_remove(struct pci_dev *pdev)
1112 {
1113 	struct tiger_hw *card = pci_get_drvdata(pdev);
1114 
1115 	if (card)
1116 		nj_release(card);
1117 	else
1118 		pr_info("%s drvdata already removed\n", __func__);
1119 }
1120 
1121 /* We cannot select cards with PCI_SUB... IDs, since here are cards with
1122  * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1123  * known other cards which not work with this driver - see probe function */
1124 static const struct pci_device_id nj_pci_ids[] = {
1125 	{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
1126 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1127 	{ }
1128 };
1129 MODULE_DEVICE_TABLE(pci, nj_pci_ids);
1130 
1131 static struct pci_driver nj_driver = {
1132 	.name = "netjet",
1133 	.probe = nj_probe,
1134 	.remove = nj_remove,
1135 	.id_table = nj_pci_ids,
1136 };
1137 
1138 static int __init nj_init(void)
1139 {
1140 	int err;
1141 
1142 	pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
1143 	err = pci_register_driver(&nj_driver);
1144 	return err;
1145 }
1146 
1147 static void __exit nj_cleanup(void)
1148 {
1149 	pci_unregister_driver(&nj_driver);
1150 }
1151 
1152 module_init(nj_init);
1153 module_exit(nj_cleanup);
1154