xref: /linux/drivers/isdn/hardware/mISDN/netjet.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NETJet mISDN driver
4  *
5  * Author       Karsten Keil <keil@isdn4linux.de>
6  *
7  * Copyright 2009  by Karsten Keil <keil@isdn4linux.de>
8  */
9 
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/delay.h>
14 #include <linux/mISDNhw.h>
15 #include <linux/slab.h>
16 #include "ipac.h"
17 #include "iohelper.h"
18 #include "netjet.h"
19 #include "isdnhdlc.h"
20 
21 #define NETJET_REV	"2.0"
22 
23 enum nj_types {
24 	NETJET_S_TJ300,
25 	NETJET_S_TJ320,
26 	ENTERNOW__TJ320,
27 };
28 
29 struct tiger_dma {
30 	size_t		size;
31 	u32		*start;
32 	int		idx;
33 	u32		dmastart;
34 	u32		dmairq;
35 	u32		dmaend;
36 	u32		dmacur;
37 };
38 
39 struct tiger_hw;
40 
41 struct tiger_ch {
42 	struct bchannel		bch;
43 	struct tiger_hw		*nj;
44 	int			idx;
45 	int			free;
46 	int			lastrx;
47 	u16			rxstate;
48 	u16			txstate;
49 	struct isdnhdlc_vars	hsend;
50 	struct isdnhdlc_vars	hrecv;
51 	u8			*hsbuf;
52 	u8			*hrbuf;
53 };
54 
55 #define TX_INIT		0x0001
56 #define TX_IDLE		0x0002
57 #define TX_RUN		0x0004
58 #define TX_UNDERRUN	0x0100
59 #define RX_OVERRUN	0x0100
60 
61 #define LOG_SIZE	64
62 
63 struct tiger_hw {
64 	struct list_head	list;
65 	struct pci_dev		*pdev;
66 	char			name[MISDN_MAX_IDLEN];
67 	enum nj_types		typ;
68 	int			irq;
69 	u32			irqcnt;
70 	u32			base;
71 	size_t			base_s;
72 	dma_addr_t		dma;
73 	void			*dma_p;
74 	spinlock_t		lock;	/* lock HW */
75 	struct isac_hw		isac;
76 	struct tiger_dma	send;
77 	struct tiger_dma	recv;
78 	struct tiger_ch		bc[2];
79 	u8			ctrlreg;
80 	u8			dmactrl;
81 	u8			auxd;
82 	u8			last_is0;
83 	u8			irqmask0;
84 	char			log[LOG_SIZE];
85 };
86 
87 static LIST_HEAD(Cards);
88 static DEFINE_RWLOCK(card_lock); /* protect Cards */
89 static u32 debug;
90 static int nj_cnt;
91 
92 static void
_set_debug(struct tiger_hw * card)93 _set_debug(struct tiger_hw *card)
94 {
95 	card->isac.dch.debug = debug;
96 	card->bc[0].bch.debug = debug;
97 	card->bc[1].bch.debug = debug;
98 }
99 
100 static int
set_debug(const char * val,const struct kernel_param * kp)101 set_debug(const char *val, const struct kernel_param *kp)
102 {
103 	int ret;
104 	struct tiger_hw *card;
105 
106 	ret = param_set_uint(val, kp);
107 	if (!ret) {
108 		read_lock(&card_lock);
109 		list_for_each_entry(card, &Cards, list)
110 			_set_debug(card);
111 		read_unlock(&card_lock);
112 	}
113 	return ret;
114 }
115 
116 MODULE_AUTHOR("Karsten Keil");
117 MODULE_DESCRIPTION("mISDN driver for NETJet cards");
118 MODULE_LICENSE("GPL v2");
119 MODULE_VERSION(NETJET_REV);
120 module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
121 MODULE_PARM_DESC(debug, "Netjet debug mask");
122 
123 static void
nj_disable_hwirq(struct tiger_hw * card)124 nj_disable_hwirq(struct tiger_hw *card)
125 {
126 	outb(0, card->base + NJ_IRQMASK0);
127 	outb(0, card->base + NJ_IRQMASK1);
128 }
129 
130 
131 static u8
ReadISAC_nj(void * p,u8 offset)132 ReadISAC_nj(void *p, u8 offset)
133 {
134 	struct tiger_hw *card = p;
135 	u8 ret;
136 
137 	card->auxd &= 0xfc;
138 	card->auxd |= (offset >> 4) & 3;
139 	outb(card->auxd, card->base + NJ_AUXDATA);
140 	ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
141 	return ret;
142 }
143 
144 static void
WriteISAC_nj(void * p,u8 offset,u8 value)145 WriteISAC_nj(void *p, u8 offset, u8 value)
146 {
147 	struct tiger_hw *card = p;
148 
149 	card->auxd &= 0xfc;
150 	card->auxd |= (offset >> 4) & 3;
151 	outb(card->auxd, card->base + NJ_AUXDATA);
152 	outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
153 }
154 
155 static void
ReadFiFoISAC_nj(void * p,u8 offset,u8 * data,int size)156 ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
157 {
158 	struct tiger_hw *card = p;
159 
160 	card->auxd &= 0xfc;
161 	outb(card->auxd, card->base + NJ_AUXDATA);
162 	insb(card->base + NJ_ISAC_OFF, data, size);
163 }
164 
165 static void
WriteFiFoISAC_nj(void * p,u8 offset,u8 * data,int size)166 WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
167 {
168 	struct tiger_hw *card = p;
169 
170 	card->auxd &= 0xfc;
171 	outb(card->auxd, card->base + NJ_AUXDATA);
172 	outsb(card->base + NJ_ISAC_OFF, data, size);
173 }
174 
175 static void
fill_mem(struct tiger_ch * bc,u32 idx,u32 cnt,u32 fill)176 fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
177 {
178 	struct tiger_hw *card = bc->bch.hw;
179 	u32 mask = 0xff, val;
180 
181 	pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
182 		 bc->bch.nr, fill, cnt, idx, card->send.idx);
183 	if (bc->bch.nr & 2) {
184 		fill  <<= 8;
185 		mask <<= 8;
186 	}
187 	mask ^= 0xffffffff;
188 	while (cnt--) {
189 		val = card->send.start[idx];
190 		val &= mask;
191 		val |= fill;
192 		card->send.start[idx++] = val;
193 		if (idx >= card->send.size)
194 			idx = 0;
195 	}
196 }
197 
198 static int
mode_tiger(struct tiger_ch * bc,u32 protocol)199 mode_tiger(struct tiger_ch *bc, u32 protocol)
200 {
201 	struct tiger_hw *card = bc->bch.hw;
202 
203 	pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
204 		 bc->bch.nr, bc->bch.state, protocol);
205 	switch (protocol) {
206 	case ISDN_P_NONE:
207 		if (bc->bch.state == ISDN_P_NONE)
208 			break;
209 		fill_mem(bc, 0, card->send.size, 0xff);
210 		bc->bch.state = protocol;
211 		/* only stop dma and interrupts if both channels NULL */
212 		if ((card->bc[0].bch.state == ISDN_P_NONE) &&
213 		    (card->bc[1].bch.state == ISDN_P_NONE)) {
214 			card->dmactrl = 0;
215 			outb(card->dmactrl, card->base + NJ_DMACTRL);
216 			outb(0, card->base + NJ_IRQMASK0);
217 		}
218 		test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
219 		test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
220 		bc->txstate = 0;
221 		bc->rxstate = 0;
222 		bc->lastrx = -1;
223 		break;
224 	case ISDN_P_B_RAW:
225 		test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
226 		bc->bch.state = protocol;
227 		bc->idx = 0;
228 		bc->free = card->send.size / 2;
229 		bc->rxstate = 0;
230 		bc->txstate = TX_INIT | TX_IDLE;
231 		bc->lastrx = -1;
232 		if (!card->dmactrl) {
233 			card->dmactrl = 1;
234 			outb(card->dmactrl, card->base + NJ_DMACTRL);
235 			outb(0x0f, card->base + NJ_IRQMASK0);
236 		}
237 		break;
238 	case ISDN_P_B_HDLC:
239 		test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
240 		bc->bch.state = protocol;
241 		bc->idx = 0;
242 		bc->free = card->send.size / 2;
243 		bc->rxstate = 0;
244 		bc->txstate = TX_INIT | TX_IDLE;
245 		isdnhdlc_rcv_init(&bc->hrecv, 0);
246 		isdnhdlc_out_init(&bc->hsend, 0);
247 		bc->lastrx = -1;
248 		if (!card->dmactrl) {
249 			card->dmactrl = 1;
250 			outb(card->dmactrl, card->base + NJ_DMACTRL);
251 			outb(0x0f, card->base + NJ_IRQMASK0);
252 		}
253 		break;
254 	default:
255 		pr_info("%s: %s protocol %x not handled\n", card->name,
256 			__func__, protocol);
257 		return -ENOPROTOOPT;
258 	}
259 	card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
260 	card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
261 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
262 	card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
263 	pr_debug("%s: %s ctrl %x irq  %02x/%02x idx %d/%d\n",
264 		 card->name, __func__,
265 		 inb(card->base + NJ_DMACTRL),
266 		 inb(card->base + NJ_IRQMASK0),
267 		 inb(card->base + NJ_IRQSTAT0),
268 		 card->send.idx,
269 		 card->recv.idx);
270 	return 0;
271 }
272 
273 static void
nj_reset(struct tiger_hw * card)274 nj_reset(struct tiger_hw *card)
275 {
276 	outb(0xff, card->base + NJ_CTRL); /* Reset On */
277 	mdelay(1);
278 
279 	/* now edge triggered for TJ320 GE 13/07/00 */
280 	/* see comment in IRQ function */
281 	if (card->typ == NETJET_S_TJ320) /* TJ320 */
282 		card->ctrlreg = 0x40;  /* Reset Off and status read clear */
283 	else
284 		card->ctrlreg = 0x00;  /* Reset Off and status read clear */
285 	outb(card->ctrlreg, card->base + NJ_CTRL);
286 	mdelay(10);
287 
288 	/* configure AUX pins (all output except ISAC IRQ pin) */
289 	card->auxd = 0;
290 	card->dmactrl = 0;
291 	outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
292 	outb(NJ_ISACIRQ,  card->base + NJ_IRQMASK1);
293 	outb(card->auxd, card->base + NJ_AUXDATA);
294 }
295 
296 static int
inittiger(struct tiger_hw * card)297 inittiger(struct tiger_hw *card)
298 {
299 	int i;
300 
301 	card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
302 					 &card->dma, GFP_ATOMIC);
303 	if (!card->dma_p) {
304 		pr_info("%s: No DMA memory\n", card->name);
305 		return -ENOMEM;
306 	}
307 	if ((u64)card->dma > 0xffffffff) {
308 		pr_info("%s: DMA outside 32 bit\n", card->name);
309 		return -ENOMEM;
310 	}
311 	for (i = 0; i < 2; i++) {
312 		card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
313 		if (!card->bc[i].hsbuf) {
314 			pr_info("%s: no B%d send buffer\n", card->name, i + 1);
315 			return -ENOMEM;
316 		}
317 		card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
318 		if (!card->bc[i].hrbuf) {
319 			pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
320 			return -ENOMEM;
321 		}
322 	}
323 	memset(card->dma_p, 0xff, NJ_DMA_SIZE);
324 
325 	card->send.start = card->dma_p;
326 	card->send.dmastart = (u32)card->dma;
327 	card->send.dmaend = card->send.dmastart +
328 		(4 * (NJ_DMA_TXSIZE - 1));
329 	card->send.dmairq = card->send.dmastart +
330 		(4 * ((NJ_DMA_TXSIZE / 2) - 1));
331 	card->send.size = NJ_DMA_TXSIZE;
332 
333 	if (debug & DEBUG_HW)
334 		pr_notice("%s: send buffer phy %#x - %#x - %#x  virt %p"
335 			  " size %zu u32\n", card->name,
336 			  card->send.dmastart, card->send.dmairq,
337 			  card->send.dmaend, card->send.start, card->send.size);
338 
339 	outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
340 	outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
341 	outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
342 
343 	card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
344 	card->recv.dmastart = (u32)card->dma  + (NJ_DMA_SIZE / 2);
345 	card->recv.dmaend = card->recv.dmastart +
346 		(4 * (NJ_DMA_RXSIZE - 1));
347 	card->recv.dmairq = card->recv.dmastart +
348 		(4 * ((NJ_DMA_RXSIZE / 2) - 1));
349 	card->recv.size = NJ_DMA_RXSIZE;
350 
351 	if (debug & DEBUG_HW)
352 		pr_notice("%s: recv buffer phy %#x - %#x - %#x  virt %p"
353 			  " size %zu u32\n", card->name,
354 			  card->recv.dmastart, card->recv.dmairq,
355 			  card->recv.dmaend, card->recv.start, card->recv.size);
356 
357 	outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
358 	outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
359 	outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
360 	return 0;
361 }
362 
363 static void
read_dma(struct tiger_ch * bc,u32 idx,int cnt)364 read_dma(struct tiger_ch *bc, u32 idx, int cnt)
365 {
366 	struct tiger_hw *card = bc->bch.hw;
367 	int i, stat;
368 	u32 val;
369 	u8 *p, *pn;
370 
371 	if (bc->lastrx == idx) {
372 		bc->rxstate |= RX_OVERRUN;
373 		pr_info("%s: B%1d overrun at idx %d\n", card->name,
374 			bc->bch.nr, idx);
375 	}
376 	bc->lastrx = idx;
377 	if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
378 		bc->bch.dropcnt += cnt;
379 		return;
380 	}
381 	stat = bchannel_get_rxbuf(&bc->bch, cnt);
382 	/* only transparent use the count here, HDLC overun is detected later */
383 	if (stat == -ENOMEM) {
384 		pr_warn("%s.B%d: No memory for %d bytes\n",
385 			card->name, bc->bch.nr, cnt);
386 		return;
387 	}
388 	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
389 		p = skb_put(bc->bch.rx_skb, cnt);
390 	else
391 		p = bc->hrbuf;
392 
393 	for (i = 0; i < cnt; i++) {
394 		val = card->recv.start[idx++];
395 		if (bc->bch.nr & 2)
396 			val >>= 8;
397 		if (idx >= card->recv.size)
398 			idx = 0;
399 		p[i] = val & 0xff;
400 	}
401 
402 	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
403 		recv_Bchannel(&bc->bch, 0, false);
404 		return;
405 	}
406 
407 	pn = bc->hrbuf;
408 	while (cnt > 0) {
409 		stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
410 				       bc->bch.rx_skb->data, bc->bch.maxlen);
411 		if (stat > 0) { /* valid frame received */
412 			p = skb_put(bc->bch.rx_skb, stat);
413 			if (debug & DEBUG_HW_BFIFO) {
414 				snprintf(card->log, LOG_SIZE,
415 					 "B%1d-recv %s %d ", bc->bch.nr,
416 					 card->name, stat);
417 				print_hex_dump_bytes(card->log,
418 						     DUMP_PREFIX_OFFSET, p,
419 						     stat);
420 			}
421 			recv_Bchannel(&bc->bch, 0, false);
422 			stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
423 			if (stat < 0) {
424 				pr_warn("%s.B%d: No memory for %d bytes\n",
425 					card->name, bc->bch.nr, cnt);
426 				return;
427 			}
428 		} else if (stat == -HDLC_CRC_ERROR) {
429 			pr_info("%s: B%1d receive frame CRC error\n",
430 				card->name, bc->bch.nr);
431 		} else if (stat == -HDLC_FRAMING_ERROR) {
432 			pr_info("%s: B%1d receive framing error\n",
433 				card->name, bc->bch.nr);
434 		} else if (stat == -HDLC_LENGTH_ERROR) {
435 			pr_info("%s: B%1d receive frame too long (> %d)\n",
436 				card->name, bc->bch.nr, bc->bch.maxlen);
437 		}
438 		pn += i;
439 		cnt -= i;
440 	}
441 }
442 
443 static void
recv_tiger(struct tiger_hw * card,u8 irq_stat)444 recv_tiger(struct tiger_hw *card, u8 irq_stat)
445 {
446 	u32 idx;
447 	int cnt = card->recv.size / 2;
448 
449 	/* Note receive is via the WRITE DMA channel */
450 	card->last_is0 &= ~NJ_IRQM0_WR_MASK;
451 	card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
452 
453 	if (irq_stat & NJ_IRQM0_WR_END)
454 		idx = cnt - 1;
455 	else
456 		idx = card->recv.size - 1;
457 
458 	if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
459 		read_dma(&card->bc[0], idx, cnt);
460 	if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
461 		read_dma(&card->bc[1], idx, cnt);
462 }
463 
464 /* sync with current DMA address at start or after exception */
465 static void
resync(struct tiger_ch * bc,struct tiger_hw * card)466 resync(struct tiger_ch *bc, struct tiger_hw *card)
467 {
468 	card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
469 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
470 	if (bc->free > card->send.size / 2)
471 		bc->free = card->send.size / 2;
472 	/* currently we simple sync to the next complete free area
473 	 * this hast the advantage that we have always maximum time to
474 	 * handle TX irq
475 	 */
476 	if (card->send.idx < ((card->send.size / 2) - 1))
477 		bc->idx = (card->recv.size / 2) - 1;
478 	else
479 		bc->idx = card->recv.size - 1;
480 	bc->txstate = TX_RUN;
481 	pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
482 		 __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
483 }
484 
485 static int bc_next_frame(struct tiger_ch *);
486 
487 static void
fill_hdlc_flag(struct tiger_ch * bc)488 fill_hdlc_flag(struct tiger_ch *bc)
489 {
490 	struct tiger_hw *card = bc->bch.hw;
491 	int count, i;
492 	u32 m, v;
493 	u8  *p;
494 
495 	if (bc->free == 0)
496 		return;
497 	pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
498 		 __func__, bc->bch.nr, bc->free, bc->txstate,
499 		 bc->idx, card->send.idx);
500 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
501 		resync(bc, card);
502 	count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
503 				bc->hsbuf, bc->free);
504 	pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
505 		 bc->bch.nr, count);
506 	bc->free -= count;
507 	p = bc->hsbuf;
508 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
509 	for (i = 0; i < count; i++) {
510 		if (bc->idx >= card->send.size)
511 			bc->idx = 0;
512 		v = card->send.start[bc->idx];
513 		v &= m;
514 		v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
515 		card->send.start[bc->idx++] = v;
516 	}
517 	if (debug & DEBUG_HW_BFIFO) {
518 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
519 			 bc->bch.nr, card->name, count);
520 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
521 	}
522 }
523 
524 static void
fill_dma(struct tiger_ch * bc)525 fill_dma(struct tiger_ch *bc)
526 {
527 	struct tiger_hw *card = bc->bch.hw;
528 	int count, i, fillempty = 0;
529 	u32 m, v, n = 0;
530 	u8  *p;
531 
532 	if (bc->free == 0)
533 		return;
534 	if (!bc->bch.tx_skb) {
535 		if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
536 			return;
537 		fillempty = 1;
538 		count = card->send.size >> 1;
539 		p = bc->bch.fill;
540 	} else {
541 		count = bc->bch.tx_skb->len - bc->bch.tx_idx;
542 		if (count <= 0)
543 			return;
544 		pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
545 			 card->name, __func__, bc->bch.nr, count, bc->free,
546 			 bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
547 			 bc->idx, card->send.idx);
548 		p = bc->bch.tx_skb->data + bc->bch.tx_idx;
549 	}
550 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
551 		resync(bc, card);
552 	if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
553 		count = isdnhdlc_encode(&bc->hsend, p, count, &i,
554 					bc->hsbuf, bc->free);
555 		pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
556 			 bc->bch.nr, i, count);
557 		bc->bch.tx_idx += i;
558 		bc->free -= count;
559 		p = bc->hsbuf;
560 	} else {
561 		if (count > bc->free)
562 			count = bc->free;
563 		if (!fillempty)
564 			bc->bch.tx_idx += count;
565 		bc->free -= count;
566 	}
567 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
568 	if (fillempty) {
569 		n = p[0];
570 		if (!(bc->bch.nr & 1))
571 			n <<= 8;
572 		for (i = 0; i < count; i++) {
573 			if (bc->idx >= card->send.size)
574 				bc->idx = 0;
575 			v = card->send.start[bc->idx];
576 			v &= m;
577 			v |= n;
578 			card->send.start[bc->idx++] = v;
579 		}
580 	} else {
581 		for (i = 0; i < count; i++) {
582 			if (bc->idx >= card->send.size)
583 				bc->idx = 0;
584 			v = card->send.start[bc->idx];
585 			v &= m;
586 			n = p[i];
587 			v |= (bc->bch.nr & 1) ? n : n << 8;
588 			card->send.start[bc->idx++] = v;
589 		}
590 	}
591 	if (debug & DEBUG_HW_BFIFO) {
592 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
593 			 bc->bch.nr, card->name, count);
594 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
595 	}
596 	if (bc->free)
597 		bc_next_frame(bc);
598 }
599 
600 
601 static int
bc_next_frame(struct tiger_ch * bc)602 bc_next_frame(struct tiger_ch *bc)
603 {
604 	int ret = 1;
605 
606 	if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
607 		fill_dma(bc);
608 	} else {
609 		dev_kfree_skb(bc->bch.tx_skb);
610 		if (get_next_bframe(&bc->bch)) {
611 			fill_dma(bc);
612 			test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
613 		} else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
614 			fill_dma(bc);
615 		} else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
616 			test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
617 			ret = 0;
618 		} else {
619 			ret = 0;
620 		}
621 	}
622 	return ret;
623 }
624 
625 static void
send_tiger_bc(struct tiger_hw * card,struct tiger_ch * bc)626 send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
627 {
628 	int ret;
629 
630 	bc->free += card->send.size / 2;
631 	if (bc->free >= card->send.size) {
632 		if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
633 			pr_info("%s: B%1d TX underrun state %x\n", card->name,
634 				bc->bch.nr, bc->txstate);
635 			bc->txstate |= TX_UNDERRUN;
636 		}
637 		bc->free = card->send.size;
638 	}
639 	ret = bc_next_frame(bc);
640 	if (!ret) {
641 		if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
642 			fill_hdlc_flag(bc);
643 			return;
644 		}
645 		pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
646 			 bc->bch.nr, bc->free, bc->idx, card->send.idx);
647 		if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
648 			fill_mem(bc, bc->idx, bc->free, 0xff);
649 			if (bc->free == card->send.size)
650 				bc->txstate |= TX_IDLE;
651 		}
652 	}
653 }
654 
655 static void
send_tiger(struct tiger_hw * card,u8 irq_stat)656 send_tiger(struct tiger_hw *card, u8 irq_stat)
657 {
658 	int i;
659 
660 	/* Note send is via the READ DMA channel */
661 	if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
662 		pr_info("%s: tiger warn write double dma %x/%x\n",
663 			card->name, irq_stat, card->last_is0);
664 		return;
665 	} else {
666 		card->last_is0 &= ~NJ_IRQM0_RD_MASK;
667 		card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
668 	}
669 	for (i = 0; i < 2; i++) {
670 		if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
671 			send_tiger_bc(card, &card->bc[i]);
672 	}
673 }
674 
675 static irqreturn_t
nj_irq(int intno,void * dev_id)676 nj_irq(int intno, void *dev_id)
677 {
678 	struct tiger_hw *card = dev_id;
679 	u8 val, s1val, s0val;
680 
681 	spin_lock(&card->lock);
682 	s0val = inb(card->base | NJ_IRQSTAT0);
683 	s1val = inb(card->base | NJ_IRQSTAT1);
684 	if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
685 		/* shared IRQ */
686 		spin_unlock(&card->lock);
687 		return IRQ_NONE;
688 	}
689 	pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
690 	card->irqcnt++;
691 	if (!(s1val & NJ_ISACIRQ)) {
692 		val = ReadISAC_nj(card, ISAC_ISTA);
693 		if (val)
694 			mISDNisac_irq(&card->isac, val);
695 	}
696 
697 	if (s0val)
698 		/* write to clear */
699 		outb(s0val, card->base | NJ_IRQSTAT0);
700 	else
701 		goto end;
702 	s1val = s0val;
703 	/* set bits in sval to indicate which page is free */
704 	card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
705 	card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
706 	if (card->recv.dmacur < card->recv.dmairq)
707 		s0val = 0x08;	/* the 2nd write area is free */
708 	else
709 		s0val = 0x04;	/* the 1st write area is free */
710 
711 	card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
712 	card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
713 	if (card->send.dmacur < card->send.dmairq)
714 		s0val |= 0x02;	/* the 2nd read area is free */
715 	else
716 		s0val |= 0x01;	/* the 1st read area is free */
717 
718 	pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
719 		 s1val, s0val, card->last_is0,
720 		 card->recv.idx, card->send.idx);
721 	/* test if we have a DMA interrupt */
722 	if (s0val != card->last_is0) {
723 		if ((s0val & NJ_IRQM0_RD_MASK) !=
724 		    (card->last_is0 & NJ_IRQM0_RD_MASK))
725 			/* got a write dma int */
726 			send_tiger(card, s0val);
727 		if ((s0val & NJ_IRQM0_WR_MASK) !=
728 		    (card->last_is0 & NJ_IRQM0_WR_MASK))
729 			/* got a read dma int */
730 			recv_tiger(card, s0val);
731 	}
732 end:
733 	spin_unlock(&card->lock);
734 	return IRQ_HANDLED;
735 }
736 
737 static int
nj_l2l1B(struct mISDNchannel * ch,struct sk_buff * skb)738 nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
739 {
740 	int ret = -EINVAL;
741 	struct bchannel *bch = container_of(ch, struct bchannel, ch);
742 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
743 	struct tiger_hw *card = bch->hw;
744 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
745 	unsigned long flags;
746 
747 	switch (hh->prim) {
748 	case PH_DATA_REQ:
749 		spin_lock_irqsave(&card->lock, flags);
750 		ret = bchannel_senddata(bch, skb);
751 		if (ret > 0) { /* direct TX */
752 			fill_dma(bc);
753 			ret = 0;
754 		}
755 		spin_unlock_irqrestore(&card->lock, flags);
756 		return ret;
757 	case PH_ACTIVATE_REQ:
758 		spin_lock_irqsave(&card->lock, flags);
759 		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
760 			ret = mode_tiger(bc, ch->protocol);
761 		else
762 			ret = 0;
763 		spin_unlock_irqrestore(&card->lock, flags);
764 		if (!ret)
765 			_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
766 				    NULL, GFP_KERNEL);
767 		break;
768 	case PH_DEACTIVATE_REQ:
769 		spin_lock_irqsave(&card->lock, flags);
770 		mISDN_clear_bchannel(bch);
771 		mode_tiger(bc, ISDN_P_NONE);
772 		spin_unlock_irqrestore(&card->lock, flags);
773 		_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
774 			    NULL, GFP_KERNEL);
775 		ret = 0;
776 		break;
777 	}
778 	if (!ret)
779 		dev_kfree_skb(skb);
780 	return ret;
781 }
782 
783 static int
channel_bctrl(struct tiger_ch * bc,struct mISDN_ctrl_req * cq)784 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
785 {
786 	return mISDN_ctrl_bchannel(&bc->bch, cq);
787 }
788 
789 static int
nj_bctrl(struct mISDNchannel * ch,u32 cmd,void * arg)790 nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
791 {
792 	struct bchannel *bch = container_of(ch, struct bchannel, ch);
793 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
794 	struct tiger_hw *card  = bch->hw;
795 	int ret = -EINVAL;
796 	u_long flags;
797 
798 	pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
799 	switch (cmd) {
800 	case CLOSE_CHANNEL:
801 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
802 		cancel_work_sync(&bch->workq);
803 		spin_lock_irqsave(&card->lock, flags);
804 		mISDN_clear_bchannel(bch);
805 		mode_tiger(bc, ISDN_P_NONE);
806 		spin_unlock_irqrestore(&card->lock, flags);
807 		ch->protocol = ISDN_P_NONE;
808 		ch->peer = NULL;
809 		module_put(THIS_MODULE);
810 		ret = 0;
811 		break;
812 	case CONTROL_CHANNEL:
813 		ret = channel_bctrl(bc, arg);
814 		break;
815 	default:
816 		pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
817 	}
818 	return ret;
819 }
820 
821 static int
channel_ctrl(struct tiger_hw * card,struct mISDN_ctrl_req * cq)822 channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
823 {
824 	int	ret = 0;
825 
826 	switch (cq->op) {
827 	case MISDN_CTRL_GETOP:
828 		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
829 		break;
830 	case MISDN_CTRL_LOOP:
831 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
832 		if (cq->channel < 0 || cq->channel > 3) {
833 			ret = -EINVAL;
834 			break;
835 		}
836 		ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
837 		break;
838 	case MISDN_CTRL_L1_TIMER3:
839 		ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
840 		break;
841 	default:
842 		pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
843 		ret = -EINVAL;
844 		break;
845 	}
846 	return ret;
847 }
848 
849 static int
open_bchannel(struct tiger_hw * card,struct channel_req * rq)850 open_bchannel(struct tiger_hw *card, struct channel_req *rq)
851 {
852 	struct bchannel *bch;
853 
854 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
855 		return -EINVAL;
856 	if (rq->protocol == ISDN_P_NONE)
857 		return -EINVAL;
858 	bch = &card->bc[rq->adr.channel - 1].bch;
859 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
860 		return -EBUSY; /* b-channel can be only open once */
861 	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
862 	bch->ch.protocol = rq->protocol;
863 	rq->ch = &bch->ch;
864 	return 0;
865 }
866 
867 /*
868  * device control function
869  */
870 static int
nj_dctrl(struct mISDNchannel * ch,u32 cmd,void * arg)871 nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
872 {
873 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
874 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
875 	struct tiger_hw	*card = dch->hw;
876 	struct channel_req	*rq;
877 	int			err = 0;
878 
879 	pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
880 	switch (cmd) {
881 	case OPEN_CHANNEL:
882 		rq = arg;
883 		if (rq->protocol == ISDN_P_TE_S0)
884 			err = card->isac.open(&card->isac, rq);
885 		else
886 			err = open_bchannel(card, rq);
887 		if (err)
888 			break;
889 		if (!try_module_get(THIS_MODULE))
890 			pr_info("%s: cannot get module\n", card->name);
891 		break;
892 	case CLOSE_CHANNEL:
893 		pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
894 			 __builtin_return_address(0));
895 		module_put(THIS_MODULE);
896 		break;
897 	case CONTROL_CHANNEL:
898 		err = channel_ctrl(card, arg);
899 		break;
900 	default:
901 		pr_debug("%s: %s unknown command %x\n",
902 			 card->name, __func__, cmd);
903 		return -EINVAL;
904 	}
905 	return err;
906 }
907 
908 static int
nj_init_card(struct tiger_hw * card)909 nj_init_card(struct tiger_hw *card)
910 {
911 	u_long flags;
912 	int ret;
913 
914 	spin_lock_irqsave(&card->lock, flags);
915 	nj_disable_hwirq(card);
916 	spin_unlock_irqrestore(&card->lock, flags);
917 
918 	card->irq = card->pdev->irq;
919 	if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
920 		pr_info("%s: couldn't get interrupt %d\n",
921 			card->name, card->irq);
922 		card->irq = -1;
923 		return -EIO;
924 	}
925 
926 	spin_lock_irqsave(&card->lock, flags);
927 	nj_reset(card);
928 	ret = card->isac.init(&card->isac);
929 	if (ret)
930 		goto error;
931 	ret = inittiger(card);
932 	if (ret)
933 		goto error;
934 	mode_tiger(&card->bc[0], ISDN_P_NONE);
935 	mode_tiger(&card->bc[1], ISDN_P_NONE);
936 error:
937 	spin_unlock_irqrestore(&card->lock, flags);
938 	return ret;
939 }
940 
941 
942 static void
nj_release(struct tiger_hw * card)943 nj_release(struct tiger_hw *card)
944 {
945 	u_long flags;
946 	int i;
947 
948 	if (card->base_s) {
949 		spin_lock_irqsave(&card->lock, flags);
950 		nj_disable_hwirq(card);
951 		mode_tiger(&card->bc[0], ISDN_P_NONE);
952 		mode_tiger(&card->bc[1], ISDN_P_NONE);
953 		spin_unlock_irqrestore(&card->lock, flags);
954 		card->isac.release(&card->isac);
955 		release_region(card->base, card->base_s);
956 		card->base_s = 0;
957 	}
958 	if (card->irq > 0)
959 		free_irq(card->irq, card);
960 	if (device_is_registered(&card->isac.dch.dev.dev))
961 		mISDN_unregister_device(&card->isac.dch.dev);
962 
963 	for (i = 0; i < 2; i++) {
964 		mISDN_freebchannel(&card->bc[i].bch);
965 		kfree(card->bc[i].hsbuf);
966 		kfree(card->bc[i].hrbuf);
967 	}
968 	if (card->dma_p)
969 		dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
970 				  card->dma);
971 	write_lock_irqsave(&card_lock, flags);
972 	list_del(&card->list);
973 	write_unlock_irqrestore(&card_lock, flags);
974 	pci_disable_device(card->pdev);
975 	pci_set_drvdata(card->pdev, NULL);
976 	kfree(card);
977 }
978 
979 
980 static int
nj_setup(struct tiger_hw * card)981 nj_setup(struct tiger_hw *card)
982 {
983 	card->base = pci_resource_start(card->pdev, 0);
984 	card->base_s = pci_resource_len(card->pdev, 0);
985 	if (!request_region(card->base, card->base_s, card->name)) {
986 		pr_info("%s: NETjet config port %#x-%#x already in use\n",
987 			card->name, card->base,
988 			(u32)(card->base + card->base_s - 1));
989 		card->base_s = 0;
990 		return -EIO;
991 	}
992 	ASSIGN_FUNC(nj, ISAC, card->isac);
993 	return 0;
994 }
995 
996 
997 static int
setup_instance(struct tiger_hw * card)998 setup_instance(struct tiger_hw *card)
999 {
1000 	int i, err;
1001 	u_long flags;
1002 
1003 	snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
1004 	write_lock_irqsave(&card_lock, flags);
1005 	list_add_tail(&card->list, &Cards);
1006 	write_unlock_irqrestore(&card_lock, flags);
1007 
1008 	_set_debug(card);
1009 	card->isac.name = card->name;
1010 	spin_lock_init(&card->lock);
1011 	card->isac.hwlock = &card->lock;
1012 	mISDNisac_init(&card->isac, card);
1013 
1014 	card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
1015 		(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
1016 	card->isac.dch.dev.D.ctrl = nj_dctrl;
1017 	for (i = 0; i < 2; i++) {
1018 		card->bc[i].bch.nr = i + 1;
1019 		set_channelmap(i + 1, card->isac.dch.dev.channelmap);
1020 		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
1021 				   NJ_DMA_RXSIZE >> 1);
1022 		card->bc[i].bch.hw = card;
1023 		card->bc[i].bch.ch.send = nj_l2l1B;
1024 		card->bc[i].bch.ch.ctrl = nj_bctrl;
1025 		card->bc[i].bch.ch.nr = i + 1;
1026 		list_add(&card->bc[i].bch.ch.list,
1027 			 &card->isac.dch.dev.bchannels);
1028 		card->bc[i].bch.hw = card;
1029 	}
1030 	err = nj_setup(card);
1031 	if (err)
1032 		goto error;
1033 	err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
1034 				    card->name);
1035 	if (err)
1036 		goto error;
1037 	err = nj_init_card(card);
1038 	if (!err)  {
1039 		nj_cnt++;
1040 		pr_notice("Netjet %d cards installed\n", nj_cnt);
1041 		return 0;
1042 	}
1043 error:
1044 	nj_release(card);
1045 	return err;
1046 }
1047 
1048 static int
nj_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1049 nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1050 {
1051 	int err = -ENOMEM;
1052 	int cfg;
1053 	struct tiger_hw *card;
1054 
1055 	if (pdev->subsystem_vendor == 0x8086 &&
1056 	    pdev->subsystem_device == 0x0003) {
1057 		pr_notice("Netjet: Digium X100P/X101P not handled\n");
1058 		return -ENODEV;
1059 	}
1060 
1061 	if (pdev->subsystem_vendor == 0x55 &&
1062 	    pdev->subsystem_device == 0x02) {
1063 		pr_notice("Netjet: Enter!Now not handled yet\n");
1064 		return -ENODEV;
1065 	}
1066 
1067 	if (pdev->subsystem_vendor == 0xb100 &&
1068 	    pdev->subsystem_device == 0x0003) {
1069 		pr_notice("Netjet: Digium TDM400P not handled yet\n");
1070 		return -ENODEV;
1071 	}
1072 
1073 	card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
1074 	if (!card) {
1075 		pr_info("No kmem for Netjet\n");
1076 		return err;
1077 	}
1078 
1079 	card->pdev = pdev;
1080 
1081 	err = pci_enable_device(pdev);
1082 	if (err) {
1083 		kfree(card);
1084 		return err;
1085 	}
1086 
1087 	printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
1088 	       pci_name(pdev));
1089 
1090 	pci_set_master(pdev);
1091 
1092 	/* the TJ300 and TJ320 must be detected, the IRQ handling is different
1093 	 * unfortunately the chips use the same device ID, but the TJ320 has
1094 	 * the bit20 in status PCI cfg register set
1095 	 */
1096 	pci_read_config_dword(pdev, 0x04, &cfg);
1097 	if (cfg & 0x00100000)
1098 		card->typ = NETJET_S_TJ320;
1099 	else
1100 		card->typ = NETJET_S_TJ300;
1101 
1102 	card->base = pci_resource_start(pdev, 0);
1103 	pci_set_drvdata(pdev, card);
1104 	err = setup_instance(card);
1105 	if (err)
1106 		pci_set_drvdata(pdev, NULL);
1107 
1108 	return err;
1109 }
1110 
1111 
nj_remove(struct pci_dev * pdev)1112 static void nj_remove(struct pci_dev *pdev)
1113 {
1114 	struct tiger_hw *card = pci_get_drvdata(pdev);
1115 
1116 	if (card)
1117 		nj_release(card);
1118 	else
1119 		pr_info("%s drvdata already removed\n", __func__);
1120 }
1121 
1122 /* We cannot select cards with PCI_SUB... IDs, since here are cards with
1123  * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1124  * known other cards which not work with this driver - see probe function */
1125 static const struct pci_device_id nj_pci_ids[] = {
1126 	{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
1127 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1128 	{ }
1129 };
1130 MODULE_DEVICE_TABLE(pci, nj_pci_ids);
1131 
1132 static struct pci_driver nj_driver = {
1133 	.name = "netjet",
1134 	.probe = nj_probe,
1135 	.remove = nj_remove,
1136 	.id_table = nj_pci_ids,
1137 };
1138 
nj_init(void)1139 static int __init nj_init(void)
1140 {
1141 	int err;
1142 
1143 	pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
1144 	err = pci_register_driver(&nj_driver);
1145 	return err;
1146 }
1147 
nj_cleanup(void)1148 static void __exit nj_cleanup(void)
1149 {
1150 	pci_unregister_driver(&nj_driver);
1151 }
1152 
1153 module_init(nj_init);
1154 module_exit(nj_cleanup);
1155