xref: /linux/drivers/isdn/hardware/mISDN/hfcpci.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * hfcpci.c     low level driver for CCD's hfc-pci based cards
5  *
6  * Author     Werner Cornelius (werner@isdn4linux.de)
7  *            based on existing driver for CCD hfc ISA cards
8  *            type approval valid for HFC-S PCI A based card
9  *
10  * Copyright 1999  by Werner Cornelius (werner@isdn-development.de)
11  * Copyright 2008  by Karsten Keil <kkeil@novell.com>
12  *
13  * Module options:
14  *
15  * debug:
16  *	NOTE: only one poll value must be given for all cards
17  *	See hfc_pci.h for debug flags.
18  *
19  * poll:
20  *	NOTE: only one poll value must be given for all cards
21  *	Give the number of samples for each fifo process.
22  *	By default 128 is used. Decrease to reduce delay, increase to
23  *	reduce cpu load. If unsure, don't mess with it!
24  *	A value of 128 will use controller's interrupt. Other values will
25  *	use kernel timer, because the controller will not allow lower values
26  *	than 128.
27  *	Also note that the value depends on the kernel timer frequency.
28  *	If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
29  *	If the kernel uses 100 Hz, steps of 80 samples are possible.
30  *	If the kernel uses 300 Hz, steps of about 26 samples are possible.
31  */
32 
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/delay.h>
37 #include <linux/mISDNhw.h>
38 #include <linux/slab.h>
39 
40 #include "hfc_pci.h"
41 
42 static const char *hfcpci_revision = "2.0";
43 
44 static int HFC_cnt;
45 static uint debug;
46 static uint poll, tics;
47 static struct timer_list hfc_tl;
48 static unsigned long hfc_jiffies;
49 
50 MODULE_AUTHOR("Karsten Keil");
51 MODULE_DESCRIPTION("mISDN driver for CCD's hfc-pci based cards");
52 MODULE_LICENSE("GPL");
53 module_param(debug, uint, S_IRUGO | S_IWUSR);
54 module_param(poll, uint, S_IRUGO | S_IWUSR);
55 
56 enum {
57 	HFC_CCD_2BD0,
58 	HFC_CCD_B000,
59 	HFC_CCD_B006,
60 	HFC_CCD_B007,
61 	HFC_CCD_B008,
62 	HFC_CCD_B009,
63 	HFC_CCD_B00A,
64 	HFC_CCD_B00B,
65 	HFC_CCD_B00C,
66 	HFC_CCD_B100,
67 	HFC_CCD_B700,
68 	HFC_CCD_B701,
69 	HFC_ASUS_0675,
70 	HFC_BERKOM_A1T,
71 	HFC_BERKOM_TCONCEPT,
72 	HFC_ANIGMA_MC145575,
73 	HFC_ZOLTRIX_2BD0,
74 	HFC_DIGI_DF_M_IOM2_E,
75 	HFC_DIGI_DF_M_E,
76 	HFC_DIGI_DF_M_IOM2_A,
77 	HFC_DIGI_DF_M_A,
78 	HFC_ABOCOM_2BD1,
79 	HFC_SITECOM_DC105V2,
80 };
81 
82 struct hfcPCI_hw {
83 	unsigned char		cirm;
84 	unsigned char		ctmt;
85 	unsigned char		clkdel;
86 	unsigned char		states;
87 	unsigned char		conn;
88 	unsigned char		mst_m;
89 	unsigned char		int_m1;
90 	unsigned char		int_m2;
91 	unsigned char		sctrl;
92 	unsigned char		sctrl_r;
93 	unsigned char		sctrl_e;
94 	unsigned char		trm;
95 	unsigned char		fifo_en;
96 	unsigned char		bswapped;
97 	unsigned char		protocol;
98 	int			nt_timer;
99 	unsigned char __iomem	*pci_io; /* start of PCI IO memory */
100 	dma_addr_t		dmahandle;
101 	void			*fifos; /* FIFO memory */
102 	int			last_bfifo_cnt[2];
103 	/* marker saving last b-fifo frame count */
104 	struct timer_list	timer;
105 };
106 
107 #define	HFC_CFG_MASTER		1
108 #define HFC_CFG_SLAVE		2
109 #define	HFC_CFG_PCM		3
110 #define HFC_CFG_2HFC		4
111 #define HFC_CFG_SLAVEHFC	5
112 #define HFC_CFG_NEG_F0		6
113 #define HFC_CFG_SW_DD_DU	7
114 
115 #define FLG_HFC_TIMER_T1	16
116 #define FLG_HFC_TIMER_T3	17
117 
118 #define NT_T1_COUNT	1120	/* number of 3.125ms interrupts (3.5s) */
119 #define NT_T3_COUNT	31	/* number of 3.125ms interrupts (97 ms) */
120 #define CLKDEL_TE	0x0e	/* CLKDEL in TE mode */
121 #define CLKDEL_NT	0x6c	/* CLKDEL in NT mode */
122 
123 
124 struct hfc_pci {
125 	u_char			subtype;
126 	u_char			chanlimit;
127 	u_char			initdone;
128 	u_long			cfg;
129 	u_int			irq;
130 	u_int			irqcnt;
131 	struct pci_dev		*pdev;
132 	struct hfcPCI_hw	hw;
133 	spinlock_t		lock;	/* card lock */
134 	struct dchannel		dch;
135 	struct bchannel		bch[2];
136 };
137 
138 /* Interface functions */
139 static void
enable_hwirq(struct hfc_pci * hc)140 enable_hwirq(struct hfc_pci *hc)
141 {
142 	hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
143 	Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
144 }
145 
146 static void
disable_hwirq(struct hfc_pci * hc)147 disable_hwirq(struct hfc_pci *hc)
148 {
149 	hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
150 	Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
151 }
152 
153 /*
154  * free hardware resources used by driver
155  */
156 static void
release_io_hfcpci(struct hfc_pci * hc)157 release_io_hfcpci(struct hfc_pci *hc)
158 {
159 	/* disable memory mapped ports + busmaster */
160 	pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
161 	del_timer(&hc->hw.timer);
162 	dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
163 			  hc->hw.dmahandle);
164 	iounmap(hc->hw.pci_io);
165 }
166 
167 /*
168  * set mode (NT or TE)
169  */
170 static void
hfcpci_setmode(struct hfc_pci * hc)171 hfcpci_setmode(struct hfc_pci *hc)
172 {
173 	if (hc->hw.protocol == ISDN_P_NT_S0) {
174 		hc->hw.clkdel = CLKDEL_NT;	/* ST-Bit delay for NT-Mode */
175 		hc->hw.sctrl |= SCTRL_MODE_NT;	/* NT-MODE */
176 		hc->hw.states = 1;		/* G1 */
177 	} else {
178 		hc->hw.clkdel = CLKDEL_TE;	/* ST-Bit delay for TE-Mode */
179 		hc->hw.sctrl &= ~SCTRL_MODE_NT;	/* TE-MODE */
180 		hc->hw.states = 2;		/* F2 */
181 	}
182 	Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
183 	Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
184 	udelay(10);
185 	Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
186 	Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
187 }
188 
189 /*
190  * function called to reset the HFC PCI chip. A complete software reset of chip
191  * and fifos is done.
192  */
193 static void
reset_hfcpci(struct hfc_pci * hc)194 reset_hfcpci(struct hfc_pci *hc)
195 {
196 	u_char	val;
197 	int	cnt = 0;
198 
199 	printk(KERN_DEBUG "reset_hfcpci: entered\n");
200 	val = Read_hfc(hc, HFCPCI_CHIP_ID);
201 	printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
202 	/* enable memory mapped ports, disable busmaster */
203 	pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
204 	disable_hwirq(hc);
205 	/* enable memory ports + busmaster */
206 	pci_write_config_word(hc->pdev, PCI_COMMAND,
207 			      PCI_ENA_MEMIO + PCI_ENA_MASTER);
208 	val = Read_hfc(hc, HFCPCI_STATUS);
209 	printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
210 	hc->hw.cirm = HFCPCI_RESET;	/* Reset On */
211 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
212 	set_current_state(TASK_UNINTERRUPTIBLE);
213 	mdelay(10);			/* Timeout 10ms */
214 	hc->hw.cirm = 0;		/* Reset Off */
215 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
216 	val = Read_hfc(hc, HFCPCI_STATUS);
217 	printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
218 	while (cnt < 50000) { /* max 50000 us */
219 		udelay(5);
220 		cnt += 5;
221 		val = Read_hfc(hc, HFCPCI_STATUS);
222 		if (!(val & 2))
223 			break;
224 	}
225 	printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
226 
227 	hc->hw.fifo_en = 0x30;	/* only D fifos enabled */
228 
229 	hc->hw.bswapped = 0;	/* no exchange */
230 	hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
231 	hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
232 	hc->hw.sctrl = 0x40;	/* set tx_lo mode, error in datasheet ! */
233 	hc->hw.sctrl_r = 0;
234 	hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE;	/* S/T Auto awake */
235 	hc->hw.mst_m = 0;
236 	if (test_bit(HFC_CFG_MASTER, &hc->cfg))
237 		hc->hw.mst_m |= HFCPCI_MASTER;	/* HFC Master Mode */
238 	if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
239 		hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
240 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
241 	Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
242 	Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
243 	Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
244 
245 	hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
246 		HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
247 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
248 
249 	/* Clear already pending ints */
250 	val = Read_hfc(hc, HFCPCI_INT_S1);
251 
252 	/* set NT/TE mode */
253 	hfcpci_setmode(hc);
254 
255 	Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
256 	Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
257 
258 	/*
259 	 * Init GCI/IOM2 in master mode
260 	 * Slots 0 and 1 are set for B-chan 1 and 2
261 	 * D- and monitor/CI channel are not enabled
262 	 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
263 	 * STIO2 is used as data input, B1+B2 from IOM->ST
264 	 * ST B-channel send disabled -> continuous 1s
265 	 * The IOM slots are always enabled
266 	 */
267 	if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
268 		/* set data flow directions: connect B1,B2: HFC to/from PCM */
269 		hc->hw.conn = 0x09;
270 	} else {
271 		hc->hw.conn = 0x36;	/* set data flow directions */
272 		if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
273 			Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
274 			Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
275 			Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
276 			Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
277 		} else {
278 			Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
279 			Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
280 			Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
281 			Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
282 		}
283 	}
284 	Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
285 	val = Read_hfc(hc, HFCPCI_INT_S2);
286 }
287 
288 /*
289  * Timer function called when kernel timer expires
290  */
291 static void
hfcpci_Timer(struct timer_list * t)292 hfcpci_Timer(struct timer_list *t)
293 {
294 	struct hfc_pci *hc = from_timer(hc, t, hw.timer);
295 	hc->hw.timer.expires = jiffies + 75;
296 	/* WD RESET */
297 /*
298  *	WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
299  *	add_timer(&hc->hw.timer);
300  */
301 }
302 
303 
304 /*
305  * select a b-channel entry matching and active
306  */
307 static struct bchannel *
Sel_BCS(struct hfc_pci * hc,int channel)308 Sel_BCS(struct hfc_pci *hc, int channel)
309 {
310 	if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
311 	    (hc->bch[0].nr & channel))
312 		return &hc->bch[0];
313 	else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
314 		 (hc->bch[1].nr & channel))
315 		return &hc->bch[1];
316 	else
317 		return NULL;
318 }
319 
320 /*
321  * clear the desired B-channel rx fifo
322  */
323 static void
hfcpci_clear_fifo_rx(struct hfc_pci * hc,int fifo)324 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
325 {
326 	u_char		fifo_state;
327 	struct bzfifo	*bzr;
328 
329 	if (fifo) {
330 		bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
331 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
332 	} else {
333 		bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
334 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
335 	}
336 	if (fifo_state)
337 		hc->hw.fifo_en ^= fifo_state;
338 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
339 	hc->hw.last_bfifo_cnt[fifo] = 0;
340 	bzr->f1 = MAX_B_FRAMES;
341 	bzr->f2 = bzr->f1;	/* init F pointers to remain constant */
342 	bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
343 	bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
344 		le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
345 	if (fifo_state)
346 		hc->hw.fifo_en |= fifo_state;
347 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
348 }
349 
350 /*
351  * clear the desired B-channel tx fifo
352  */
hfcpci_clear_fifo_tx(struct hfc_pci * hc,int fifo)353 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
354 {
355 	u_char		fifo_state;
356 	struct bzfifo	*bzt;
357 
358 	if (fifo) {
359 		bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
360 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
361 	} else {
362 		bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
363 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
364 	}
365 	if (fifo_state)
366 		hc->hw.fifo_en ^= fifo_state;
367 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
368 	if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
369 		printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
370 		       "z1(%x) z2(%x) state(%x)\n",
371 		       fifo, bzt->f1, bzt->f2,
372 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
373 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
374 		       fifo_state);
375 	bzt->f2 = MAX_B_FRAMES;
376 	bzt->f1 = bzt->f2;	/* init F pointers to remain constant */
377 	bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
378 	bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
379 	if (fifo_state)
380 		hc->hw.fifo_en |= fifo_state;
381 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
382 	if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
383 		printk(KERN_DEBUG
384 		       "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
385 		       fifo, bzt->f1, bzt->f2,
386 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
387 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
388 }
389 
390 /*
391  * read a complete B-frame out of the buffer
392  */
393 static void
hfcpci_empty_bfifo(struct bchannel * bch,struct bzfifo * bz,u_char * bdata,int count)394 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
395 		   u_char *bdata, int count)
396 {
397 	u_char		*ptr, *ptr1, new_f2;
398 	int		maxlen, new_z2;
399 	struct zt	*zp;
400 
401 	if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
402 		printk(KERN_DEBUG "hfcpci_empty_fifo\n");
403 	zp = &bz->za[bz->f2];	/* point to Z-Regs */
404 	new_z2 = le16_to_cpu(zp->z2) + count;	/* new position in fifo */
405 	if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
406 		new_z2 -= B_FIFO_SIZE;	/* buffer wrap */
407 	new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
408 	if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
409 	    (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
410 		if (bch->debug & DEBUG_HW)
411 			printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
412 			       "invalid length %d or crc\n", count);
413 #ifdef ERROR_STATISTIC
414 		bch->err_inv++;
415 #endif
416 		bz->za[new_f2].z2 = cpu_to_le16(new_z2);
417 		bz->f2 = new_f2;	/* next buffer */
418 	} else {
419 		bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
420 		if (!bch->rx_skb) {
421 			printk(KERN_WARNING "HFCPCI: receive out of memory\n");
422 			return;
423 		}
424 		count -= 3;
425 		ptr = skb_put(bch->rx_skb, count);
426 
427 		if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
428 			maxlen = count;		/* complete transfer */
429 		else
430 			maxlen = B_FIFO_SIZE + B_SUB_VAL -
431 				le16_to_cpu(zp->z2);	/* maximum */
432 
433 		ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
434 		/* start of data */
435 		memcpy(ptr, ptr1, maxlen);	/* copy data */
436 		count -= maxlen;
437 
438 		if (count) {	/* rest remaining */
439 			ptr += maxlen;
440 			ptr1 = bdata;	/* start of buffer */
441 			memcpy(ptr, ptr1, count);	/* rest */
442 		}
443 		bz->za[new_f2].z2 = cpu_to_le16(new_z2);
444 		bz->f2 = new_f2;	/* next buffer */
445 		recv_Bchannel(bch, MISDN_ID_ANY, false);
446 	}
447 }
448 
449 /*
450  * D-channel receive procedure
451  */
452 static int
receive_dmsg(struct hfc_pci * hc)453 receive_dmsg(struct hfc_pci *hc)
454 {
455 	struct dchannel	*dch = &hc->dch;
456 	int		maxlen;
457 	int		rcnt, total;
458 	int		count = 5;
459 	u_char		*ptr, *ptr1;
460 	struct dfifo	*df;
461 	struct zt	*zp;
462 
463 	df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
464 	while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
465 		zp = &df->za[df->f2 & D_FREG_MASK];
466 		rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
467 		if (rcnt < 0)
468 			rcnt += D_FIFO_SIZE;
469 		rcnt++;
470 		if (dch->debug & DEBUG_HW_DCHANNEL)
471 			printk(KERN_DEBUG
472 			       "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
473 			       df->f1, df->f2,
474 			       le16_to_cpu(zp->z1),
475 			       le16_to_cpu(zp->z2),
476 			       rcnt);
477 
478 		if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
479 		    (df->data[le16_to_cpu(zp->z1)])) {
480 			if (dch->debug & DEBUG_HW)
481 				printk(KERN_DEBUG
482 				       "empty_fifo hfcpci packet inv. len "
483 				       "%d or crc %d\n",
484 				       rcnt,
485 				       df->data[le16_to_cpu(zp->z1)]);
486 #ifdef ERROR_STATISTIC
487 			cs->err_rx++;
488 #endif
489 			df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
490 				(MAX_D_FRAMES + 1);	/* next buffer */
491 			df->za[df->f2 & D_FREG_MASK].z2 =
492 				cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
493 					    (D_FIFO_SIZE - 1));
494 		} else {
495 			dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
496 			if (!dch->rx_skb) {
497 				printk(KERN_WARNING
498 				       "HFC-PCI: D receive out of memory\n");
499 				break;
500 			}
501 			total = rcnt;
502 			rcnt -= 3;
503 			ptr = skb_put(dch->rx_skb, rcnt);
504 
505 			if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
506 				maxlen = rcnt;	/* complete transfer */
507 			else
508 				maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
509 			/* maximum */
510 
511 			ptr1 = df->data + le16_to_cpu(zp->z2);
512 			/* start of data */
513 			memcpy(ptr, ptr1, maxlen);	/* copy data */
514 			rcnt -= maxlen;
515 
516 			if (rcnt) {	/* rest remaining */
517 				ptr += maxlen;
518 				ptr1 = df->data;	/* start of buffer */
519 				memcpy(ptr, ptr1, rcnt);	/* rest */
520 			}
521 			df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
522 				(MAX_D_FRAMES + 1);	/* next buffer */
523 			df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
524 									      le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
525 			recv_Dchannel(dch);
526 		}
527 	}
528 	return 1;
529 }
530 
531 /*
532  * check for transparent receive data and read max one 'poll' size if avail
533  */
534 static void
hfcpci_empty_fifo_trans(struct bchannel * bch,struct bzfifo * rxbz,struct bzfifo * txbz,u_char * bdata)535 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
536 			struct bzfifo *txbz, u_char *bdata)
537 {
538 	__le16	*z1r, *z2r, *z1t, *z2t;
539 	int	new_z2, fcnt_rx, fcnt_tx, maxlen;
540 	u_char	*ptr, *ptr1;
541 
542 	z1r = &rxbz->za[MAX_B_FRAMES].z1;	/* pointer to z reg */
543 	z2r = z1r + 1;
544 	z1t = &txbz->za[MAX_B_FRAMES].z1;
545 	z2t = z1t + 1;
546 
547 	fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
548 	if (!fcnt_rx)
549 		return;	/* no data avail */
550 
551 	if (fcnt_rx <= 0)
552 		fcnt_rx += B_FIFO_SIZE;	/* bytes actually buffered */
553 	new_z2 = le16_to_cpu(*z2r) + fcnt_rx;	/* new position in fifo */
554 	if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
555 		new_z2 -= B_FIFO_SIZE;	/* buffer wrap */
556 
557 	fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
558 	if (fcnt_tx <= 0)
559 		fcnt_tx += B_FIFO_SIZE;
560 	/* fcnt_tx contains available bytes in tx-fifo */
561 	fcnt_tx = B_FIFO_SIZE - fcnt_tx;
562 	/* remaining bytes to send (bytes in tx-fifo) */
563 
564 	if (test_bit(FLG_RX_OFF, &bch->Flags)) {
565 		bch->dropcnt += fcnt_rx;
566 		*z2r = cpu_to_le16(new_z2);
567 		return;
568 	}
569 	maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
570 	if (maxlen < 0) {
571 		pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
572 	} else {
573 		ptr = skb_put(bch->rx_skb, fcnt_rx);
574 		if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
575 			maxlen = fcnt_rx;	/* complete transfer */
576 		else
577 			maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
578 		/* maximum */
579 
580 		ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
581 		/* start of data */
582 		memcpy(ptr, ptr1, maxlen);	/* copy data */
583 		fcnt_rx -= maxlen;
584 
585 		if (fcnt_rx) {	/* rest remaining */
586 			ptr += maxlen;
587 			ptr1 = bdata;	/* start of buffer */
588 			memcpy(ptr, ptr1, fcnt_rx);	/* rest */
589 		}
590 		recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
591 	}
592 	*z2r = cpu_to_le16(new_z2);		/* new position */
593 }
594 
595 /*
596  * B-channel main receive routine
597  */
598 static void
main_rec_hfcpci(struct bchannel * bch)599 main_rec_hfcpci(struct bchannel *bch)
600 {
601 	struct hfc_pci	*hc = bch->hw;
602 	int		rcnt, real_fifo;
603 	int		receive = 0, count = 5;
604 	struct bzfifo	*txbz, *rxbz;
605 	u_char		*bdata;
606 	struct zt	*zp;
607 
608 	if ((bch->nr & 2) && (!hc->hw.bswapped)) {
609 		rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
610 		txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
611 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
612 		real_fifo = 1;
613 	} else {
614 		rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
615 		txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
616 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
617 		real_fifo = 0;
618 	}
619 Begin:
620 	count--;
621 	if (rxbz->f1 != rxbz->f2) {
622 		if (bch->debug & DEBUG_HW_BCHANNEL)
623 			printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
624 			       bch->nr, rxbz->f1, rxbz->f2);
625 		zp = &rxbz->za[rxbz->f2];
626 
627 		rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
628 		if (rcnt < 0)
629 			rcnt += B_FIFO_SIZE;
630 		rcnt++;
631 		if (bch->debug & DEBUG_HW_BCHANNEL)
632 			printk(KERN_DEBUG
633 			       "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
634 			       bch->nr, le16_to_cpu(zp->z1),
635 			       le16_to_cpu(zp->z2), rcnt);
636 		hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
637 		rcnt = rxbz->f1 - rxbz->f2;
638 		if (rcnt < 0)
639 			rcnt += MAX_B_FRAMES + 1;
640 		if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
641 			rcnt = 0;
642 			hfcpci_clear_fifo_rx(hc, real_fifo);
643 		}
644 		hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
645 		if (rcnt > 1)
646 			receive = 1;
647 		else
648 			receive = 0;
649 	} else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
650 		hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
651 		return;
652 	} else
653 		receive = 0;
654 	if (count && receive)
655 		goto Begin;
656 
657 }
658 
659 /*
660  * D-channel send routine
661  */
662 static void
hfcpci_fill_dfifo(struct hfc_pci * hc)663 hfcpci_fill_dfifo(struct hfc_pci *hc)
664 {
665 	struct dchannel	*dch = &hc->dch;
666 	int		fcnt;
667 	int		count, new_z1, maxlen;
668 	struct dfifo	*df;
669 	u_char		*src, *dst, new_f1;
670 
671 	if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
672 		printk(KERN_DEBUG "%s\n", __func__);
673 
674 	if (!dch->tx_skb)
675 		return;
676 	count = dch->tx_skb->len - dch->tx_idx;
677 	if (count <= 0)
678 		return;
679 	df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
680 
681 	if (dch->debug & DEBUG_HW_DFIFO)
682 		printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
683 		       df->f1, df->f2,
684 		       le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
685 	fcnt = df->f1 - df->f2;	/* frame count actually buffered */
686 	if (fcnt < 0)
687 		fcnt += (MAX_D_FRAMES + 1);	/* if wrap around */
688 	if (fcnt > (MAX_D_FRAMES - 1)) {
689 		if (dch->debug & DEBUG_HW_DCHANNEL)
690 			printk(KERN_DEBUG
691 			       "hfcpci_fill_Dfifo more as 14 frames\n");
692 #ifdef ERROR_STATISTIC
693 		cs->err_tx++;
694 #endif
695 		return;
696 	}
697 	/* now determine free bytes in FIFO buffer */
698 	maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
699 		le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
700 	if (maxlen <= 0)
701 		maxlen += D_FIFO_SIZE;	/* count now contains available bytes */
702 
703 	if (dch->debug & DEBUG_HW_DCHANNEL)
704 		printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
705 		       count, maxlen);
706 	if (count > maxlen) {
707 		if (dch->debug & DEBUG_HW_DCHANNEL)
708 			printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
709 		return;
710 	}
711 	new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
712 		(D_FIFO_SIZE - 1);
713 	new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
714 	src = dch->tx_skb->data + dch->tx_idx;	/* source pointer */
715 	dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
716 	maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
717 	/* end fifo */
718 	if (maxlen > count)
719 		maxlen = count;	/* limit size */
720 	memcpy(dst, src, maxlen);	/* first copy */
721 
722 	count -= maxlen;	/* remaining bytes */
723 	if (count) {
724 		dst = df->data;	/* start of buffer */
725 		src += maxlen;	/* new position */
726 		memcpy(dst, src, count);
727 	}
728 	df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
729 	/* for next buffer */
730 	df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
731 	/* new pos actual buffer */
732 	df->f1 = new_f1;	/* next frame */
733 	dch->tx_idx = dch->tx_skb->len;
734 }
735 
736 /*
737  * B-channel send routine
738  */
739 static void
hfcpci_fill_fifo(struct bchannel * bch)740 hfcpci_fill_fifo(struct bchannel *bch)
741 {
742 	struct hfc_pci	*hc = bch->hw;
743 	int		maxlen, fcnt;
744 	int		count, new_z1;
745 	struct bzfifo	*bz;
746 	u_char		*bdata;
747 	u_char		new_f1, *src, *dst;
748 	__le16 *z1t, *z2t;
749 
750 	if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
751 		printk(KERN_DEBUG "%s\n", __func__);
752 	if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
753 		if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
754 		    !test_bit(FLG_TRANSPARENT, &bch->Flags))
755 			return;
756 		count = HFCPCI_FILLEMPTY;
757 	} else {
758 		count = bch->tx_skb->len - bch->tx_idx;
759 	}
760 	if ((bch->nr & 2) && (!hc->hw.bswapped)) {
761 		bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
762 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
763 	} else {
764 		bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
765 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
766 	}
767 
768 	if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
769 		z1t = &bz->za[MAX_B_FRAMES].z1;
770 		z2t = z1t + 1;
771 		if (bch->debug & DEBUG_HW_BCHANNEL)
772 			printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
773 			       "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
774 			       le16_to_cpu(*z1t), le16_to_cpu(*z2t));
775 		fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
776 		if (fcnt <= 0)
777 			fcnt += B_FIFO_SIZE;
778 		if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
779 			/* fcnt contains available bytes in fifo */
780 			if (count > fcnt)
781 				count = fcnt;
782 			new_z1 = le16_to_cpu(*z1t) + count;
783 			/* new buffer Position */
784 			if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
785 				new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
786 			dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
787 			maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
788 			/* end of fifo */
789 			if (bch->debug & DEBUG_HW_BFIFO)
790 				printk(KERN_DEBUG "hfcpci_FFt fillempty "
791 				       "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
792 				       fcnt, maxlen, new_z1, dst);
793 			if (maxlen > count)
794 				maxlen = count;		/* limit size */
795 			memset(dst, bch->fill[0], maxlen); /* first copy */
796 			count -= maxlen;		/* remaining bytes */
797 			if (count) {
798 				dst = bdata;		/* start of buffer */
799 				memset(dst, bch->fill[0], count);
800 			}
801 			*z1t = cpu_to_le16(new_z1);	/* now send data */
802 			return;
803 		}
804 		/* fcnt contains available bytes in fifo */
805 		fcnt = B_FIFO_SIZE - fcnt;
806 		/* remaining bytes to send (bytes in fifo) */
807 
808 	next_t_frame:
809 		count = bch->tx_skb->len - bch->tx_idx;
810 		/* maximum fill shall be poll*2 */
811 		if (count > (poll << 1) - fcnt)
812 			count = (poll << 1) - fcnt;
813 		if (count <= 0)
814 			return;
815 		/* data is suitable for fifo */
816 		new_z1 = le16_to_cpu(*z1t) + count;
817 		/* new buffer Position */
818 		if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
819 			new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
820 		src = bch->tx_skb->data + bch->tx_idx;
821 		/* source pointer */
822 		dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
823 		maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
824 		/* end of fifo */
825 		if (bch->debug & DEBUG_HW_BFIFO)
826 			printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
827 			       "maxl(%d) nz1(%x) dst(%p)\n",
828 			       fcnt, maxlen, new_z1, dst);
829 		fcnt += count;
830 		bch->tx_idx += count;
831 		if (maxlen > count)
832 			maxlen = count;		/* limit size */
833 		memcpy(dst, src, maxlen);	/* first copy */
834 		count -= maxlen;	/* remaining bytes */
835 		if (count) {
836 			dst = bdata;	/* start of buffer */
837 			src += maxlen;	/* new position */
838 			memcpy(dst, src, count);
839 		}
840 		*z1t = cpu_to_le16(new_z1);	/* now send data */
841 		if (bch->tx_idx < bch->tx_skb->len)
842 			return;
843 		dev_kfree_skb_any(bch->tx_skb);
844 		if (get_next_bframe(bch))
845 			goto next_t_frame;
846 		return;
847 	}
848 	if (bch->debug & DEBUG_HW_BCHANNEL)
849 		printk(KERN_DEBUG
850 		       "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
851 		       __func__, bch->nr, bz->f1, bz->f2,
852 		       bz->za[bz->f1].z1);
853 	fcnt = bz->f1 - bz->f2;	/* frame count actually buffered */
854 	if (fcnt < 0)
855 		fcnt += (MAX_B_FRAMES + 1);	/* if wrap around */
856 	if (fcnt > (MAX_B_FRAMES - 1)) {
857 		if (bch->debug & DEBUG_HW_BCHANNEL)
858 			printk(KERN_DEBUG
859 			       "hfcpci_fill_Bfifo more as 14 frames\n");
860 		return;
861 	}
862 	/* now determine free bytes in FIFO buffer */
863 	maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
864 		le16_to_cpu(bz->za[bz->f1].z1) - 1;
865 	if (maxlen <= 0)
866 		maxlen += B_FIFO_SIZE;	/* count now contains available bytes */
867 
868 	if (bch->debug & DEBUG_HW_BCHANNEL)
869 		printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
870 		       bch->nr, count, maxlen);
871 
872 	if (maxlen < count) {
873 		if (bch->debug & DEBUG_HW_BCHANNEL)
874 			printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
875 		return;
876 	}
877 	new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
878 	/* new buffer Position */
879 	if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
880 		new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
881 
882 	new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
883 	src = bch->tx_skb->data + bch->tx_idx;	/* source pointer */
884 	dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
885 	maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
886 	/* end fifo */
887 	if (maxlen > count)
888 		maxlen = count;	/* limit size */
889 	memcpy(dst, src, maxlen);	/* first copy */
890 
891 	count -= maxlen;	/* remaining bytes */
892 	if (count) {
893 		dst = bdata;	/* start of buffer */
894 		src += maxlen;	/* new position */
895 		memcpy(dst, src, count);
896 	}
897 	bz->za[new_f1].z1 = cpu_to_le16(new_z1);	/* for next buffer */
898 	bz->f1 = new_f1;	/* next frame */
899 	dev_kfree_skb_any(bch->tx_skb);
900 	get_next_bframe(bch);
901 }
902 
903 
904 
905 /*
906  * handle L1 state changes TE
907  */
908 
909 static void
ph_state_te(struct dchannel * dch)910 ph_state_te(struct dchannel *dch)
911 {
912 	if (dch->debug)
913 		printk(KERN_DEBUG "%s: TE newstate %x\n",
914 		       __func__, dch->state);
915 	switch (dch->state) {
916 	case 0:
917 		l1_event(dch->l1, HW_RESET_IND);
918 		break;
919 	case 3:
920 		l1_event(dch->l1, HW_DEACT_IND);
921 		break;
922 	case 5:
923 	case 8:
924 		l1_event(dch->l1, ANYSIGNAL);
925 		break;
926 	case 6:
927 		l1_event(dch->l1, INFO2);
928 		break;
929 	case 7:
930 		l1_event(dch->l1, INFO4_P8);
931 		break;
932 	}
933 }
934 
935 /*
936  * handle L1 state changes NT
937  */
938 
939 static void
handle_nt_timer3(struct dchannel * dch)940 handle_nt_timer3(struct dchannel *dch) {
941 	struct hfc_pci	*hc = dch->hw;
942 
943 	test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
944 	hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
945 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
946 	hc->hw.nt_timer = 0;
947 	test_and_set_bit(FLG_ACTIVE, &dch->Flags);
948 	if (test_bit(HFC_CFG_MASTER, &hc->cfg))
949 		hc->hw.mst_m |= HFCPCI_MASTER;
950 	Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
951 	_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
952 		    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
953 }
954 
955 static void
ph_state_nt(struct dchannel * dch)956 ph_state_nt(struct dchannel *dch)
957 {
958 	struct hfc_pci	*hc = dch->hw;
959 
960 	if (dch->debug)
961 		printk(KERN_DEBUG "%s: NT newstate %x\n",
962 		       __func__, dch->state);
963 	switch (dch->state) {
964 	case 2:
965 		if (hc->hw.nt_timer < 0) {
966 			hc->hw.nt_timer = 0;
967 			test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
968 			test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
969 			hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
970 			Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
971 			/* Clear already pending ints */
972 			(void) Read_hfc(hc, HFCPCI_INT_S1);
973 			Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
974 			udelay(10);
975 			Write_hfc(hc, HFCPCI_STATES, 4);
976 			dch->state = 4;
977 		} else if (hc->hw.nt_timer == 0) {
978 			hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
979 			Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
980 			hc->hw.nt_timer = NT_T1_COUNT;
981 			hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
982 			hc->hw.ctmt |= HFCPCI_TIM3_125;
983 			Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
984 				  HFCPCI_CLTIMER);
985 			test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
986 			test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
987 			/* allow G2 -> G3 transition */
988 			Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
989 		} else {
990 			Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
991 		}
992 		break;
993 	case 1:
994 		hc->hw.nt_timer = 0;
995 		test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
996 		test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
997 		hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
998 		Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
999 		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1000 		hc->hw.mst_m &= ~HFCPCI_MASTER;
1001 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1002 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1003 		_queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
1004 			    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1005 		break;
1006 	case 4:
1007 		hc->hw.nt_timer = 0;
1008 		test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1009 		test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1010 		hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1011 		Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1012 		break;
1013 	case 3:
1014 		if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
1015 			if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1016 						&dch->Flags)) {
1017 				handle_nt_timer3(dch);
1018 				break;
1019 			}
1020 			test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1021 			hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1022 			Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1023 			hc->hw.nt_timer = NT_T3_COUNT;
1024 			hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1025 			hc->hw.ctmt |= HFCPCI_TIM3_125;
1026 			Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1027 				  HFCPCI_CLTIMER);
1028 		}
1029 		break;
1030 	}
1031 }
1032 
1033 static void
ph_state(struct dchannel * dch)1034 ph_state(struct dchannel *dch)
1035 {
1036 	struct hfc_pci	*hc = dch->hw;
1037 
1038 	if (hc->hw.protocol == ISDN_P_NT_S0) {
1039 		if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1040 		    hc->hw.nt_timer < 0)
1041 			handle_nt_timer3(dch);
1042 		else
1043 			ph_state_nt(dch);
1044 	} else
1045 		ph_state_te(dch);
1046 }
1047 
1048 /*
1049  * Layer 1 callback function
1050  */
1051 static int
hfc_l1callback(struct dchannel * dch,u_int cmd)1052 hfc_l1callback(struct dchannel *dch, u_int cmd)
1053 {
1054 	struct hfc_pci		*hc = dch->hw;
1055 
1056 	switch (cmd) {
1057 	case INFO3_P8:
1058 	case INFO3_P10:
1059 		if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1060 			hc->hw.mst_m |= HFCPCI_MASTER;
1061 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1062 		break;
1063 	case HW_RESET_REQ:
1064 		Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1065 		/* HFC ST 3 */
1066 		udelay(6);
1067 		Write_hfc(hc, HFCPCI_STATES, 3);	/* HFC ST 2 */
1068 		if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1069 			hc->hw.mst_m |= HFCPCI_MASTER;
1070 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1071 		Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1072 			  HFCPCI_DO_ACTION);
1073 		l1_event(dch->l1, HW_POWERUP_IND);
1074 		break;
1075 	case HW_DEACT_REQ:
1076 		hc->hw.mst_m &= ~HFCPCI_MASTER;
1077 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1078 		skb_queue_purge(&dch->squeue);
1079 		if (dch->tx_skb) {
1080 			dev_kfree_skb(dch->tx_skb);
1081 			dch->tx_skb = NULL;
1082 		}
1083 		dch->tx_idx = 0;
1084 		if (dch->rx_skb) {
1085 			dev_kfree_skb(dch->rx_skb);
1086 			dch->rx_skb = NULL;
1087 		}
1088 		test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1089 		if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1090 			del_timer(&dch->timer);
1091 		break;
1092 	case HW_POWERUP_REQ:
1093 		Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1094 		break;
1095 	case PH_ACTIVATE_IND:
1096 		test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1097 		_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1098 			    GFP_ATOMIC);
1099 		break;
1100 	case PH_DEACTIVATE_IND:
1101 		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1102 		_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1103 			    GFP_ATOMIC);
1104 		break;
1105 	default:
1106 		if (dch->debug & DEBUG_HW)
1107 			printk(KERN_DEBUG "%s: unknown command %x\n",
1108 			       __func__, cmd);
1109 		return -1;
1110 	}
1111 	return 0;
1112 }
1113 
1114 /*
1115  * Interrupt handler
1116  */
1117 static inline void
tx_birq(struct bchannel * bch)1118 tx_birq(struct bchannel *bch)
1119 {
1120 	if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1121 		hfcpci_fill_fifo(bch);
1122 	else {
1123 		dev_kfree_skb_any(bch->tx_skb);
1124 		if (get_next_bframe(bch))
1125 			hfcpci_fill_fifo(bch);
1126 	}
1127 }
1128 
1129 static inline void
tx_dirq(struct dchannel * dch)1130 tx_dirq(struct dchannel *dch)
1131 {
1132 	if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1133 		hfcpci_fill_dfifo(dch->hw);
1134 	else {
1135 		dev_kfree_skb(dch->tx_skb);
1136 		if (get_next_dframe(dch))
1137 			hfcpci_fill_dfifo(dch->hw);
1138 	}
1139 }
1140 
1141 static irqreturn_t
hfcpci_int(int intno,void * dev_id)1142 hfcpci_int(int intno, void *dev_id)
1143 {
1144 	struct hfc_pci	*hc = dev_id;
1145 	u_char		exval;
1146 	struct bchannel	*bch;
1147 	u_char		val, stat;
1148 
1149 	spin_lock(&hc->lock);
1150 	if (!(hc->hw.int_m2 & 0x08)) {
1151 		spin_unlock(&hc->lock);
1152 		return IRQ_NONE; /* not initialised */
1153 	}
1154 	stat = Read_hfc(hc, HFCPCI_STATUS);
1155 	if (HFCPCI_ANYINT & stat) {
1156 		val = Read_hfc(hc, HFCPCI_INT_S1);
1157 		if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1158 			printk(KERN_DEBUG
1159 			       "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1160 	} else {
1161 		/* shared */
1162 		spin_unlock(&hc->lock);
1163 		return IRQ_NONE;
1164 	}
1165 	hc->irqcnt++;
1166 
1167 	if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1168 		printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1169 	val &= hc->hw.int_m1;
1170 	if (val & 0x40) {	/* state machine irq */
1171 		exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1172 		if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1173 			printk(KERN_DEBUG "ph_state chg %d->%d\n",
1174 			       hc->dch.state, exval);
1175 		hc->dch.state = exval;
1176 		schedule_event(&hc->dch, FLG_PHCHANGE);
1177 		val &= ~0x40;
1178 	}
1179 	if (val & 0x80) {	/* timer irq */
1180 		if (hc->hw.protocol == ISDN_P_NT_S0) {
1181 			if ((--hc->hw.nt_timer) < 0)
1182 				schedule_event(&hc->dch, FLG_PHCHANGE);
1183 		}
1184 		val &= ~0x80;
1185 		Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1186 	}
1187 	if (val & 0x08) {	/* B1 rx */
1188 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1189 		if (bch)
1190 			main_rec_hfcpci(bch);
1191 		else if (hc->dch.debug)
1192 			printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1193 	}
1194 	if (val & 0x10) {	/* B2 rx */
1195 		bch = Sel_BCS(hc, 2);
1196 		if (bch)
1197 			main_rec_hfcpci(bch);
1198 		else if (hc->dch.debug)
1199 			printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1200 	}
1201 	if (val & 0x01) {	/* B1 tx */
1202 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1203 		if (bch)
1204 			tx_birq(bch);
1205 		else if (hc->dch.debug)
1206 			printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1207 	}
1208 	if (val & 0x02) {	/* B2 tx */
1209 		bch = Sel_BCS(hc, 2);
1210 		if (bch)
1211 			tx_birq(bch);
1212 		else if (hc->dch.debug)
1213 			printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1214 	}
1215 	if (val & 0x20)		/* D rx */
1216 		receive_dmsg(hc);
1217 	if (val & 0x04) {	/* D tx */
1218 		if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1219 			del_timer(&hc->dch.timer);
1220 		tx_dirq(&hc->dch);
1221 	}
1222 	spin_unlock(&hc->lock);
1223 	return IRQ_HANDLED;
1224 }
1225 
1226 /*
1227  * timer callback for D-chan busy resolution. Currently no function
1228  */
1229 static void
hfcpci_dbusy_timer(struct timer_list * t)1230 hfcpci_dbusy_timer(struct timer_list *t)
1231 {
1232 }
1233 
1234 /*
1235  * activate/deactivate hardware for selected channels and mode
1236  */
1237 static int
mode_hfcpci(struct bchannel * bch,int bc,int protocol)1238 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1239 {
1240 	struct hfc_pci	*hc = bch->hw;
1241 	int		fifo2;
1242 	u_char		rx_slot = 0, tx_slot = 0, pcm_mode;
1243 
1244 	if (bch->debug & DEBUG_HW_BCHANNEL)
1245 		printk(KERN_DEBUG
1246 		       "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1247 		       bch->state, protocol, bch->nr, bc);
1248 
1249 	fifo2 = bc;
1250 	pcm_mode = (bc >> 24) & 0xff;
1251 	if (pcm_mode) { /* PCM SLOT USE */
1252 		if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1253 			printk(KERN_WARNING
1254 			       "%s: pcm channel id without HFC_CFG_PCM\n",
1255 			       __func__);
1256 		rx_slot = (bc >> 8) & 0xff;
1257 		tx_slot = (bc >> 16) & 0xff;
1258 		bc = bc & 0xff;
1259 	} else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
1260 		printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1261 		       __func__);
1262 	if (hc->chanlimit > 1) {
1263 		hc->hw.bswapped = 0;	/* B1 and B2 normal mode */
1264 		hc->hw.sctrl_e &= ~0x80;
1265 	} else {
1266 		if (bc & 2) {
1267 			if (protocol != ISDN_P_NONE) {
1268 				hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1269 				hc->hw.sctrl_e |= 0x80;
1270 			} else {
1271 				hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1272 				hc->hw.sctrl_e &= ~0x80;
1273 			}
1274 			fifo2 = 1;
1275 		} else {
1276 			hc->hw.bswapped = 0;	/* B1 and B2 normal mode */
1277 			hc->hw.sctrl_e &= ~0x80;
1278 		}
1279 	}
1280 	switch (protocol) {
1281 	case (-1): /* used for init */
1282 		bch->state = -1;
1283 		bch->nr = bc;
1284 		fallthrough;
1285 	case (ISDN_P_NONE):
1286 		if (bch->state == ISDN_P_NONE)
1287 			return 0;
1288 		if (bc & 2) {
1289 			hc->hw.sctrl &= ~SCTRL_B2_ENA;
1290 			hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1291 		} else {
1292 			hc->hw.sctrl &= ~SCTRL_B1_ENA;
1293 			hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1294 		}
1295 		if (fifo2 & 2) {
1296 			hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1297 			hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
1298 					   HFCPCI_INTS_B2REC);
1299 		} else {
1300 			hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1301 			hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
1302 					   HFCPCI_INTS_B1REC);
1303 		}
1304 #ifdef REVERSE_BITORDER
1305 		if (bch->nr & 2)
1306 			hc->hw.cirm &= 0x7f;
1307 		else
1308 			hc->hw.cirm &= 0xbf;
1309 #endif
1310 		bch->state = ISDN_P_NONE;
1311 		bch->nr = bc;
1312 		test_and_clear_bit(FLG_HDLC, &bch->Flags);
1313 		test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1314 		break;
1315 	case (ISDN_P_B_RAW):
1316 		bch->state = protocol;
1317 		bch->nr = bc;
1318 		hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1319 		hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1320 		if (bc & 2) {
1321 			hc->hw.sctrl |= SCTRL_B2_ENA;
1322 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1323 #ifdef REVERSE_BITORDER
1324 			hc->hw.cirm |= 0x80;
1325 #endif
1326 		} else {
1327 			hc->hw.sctrl |= SCTRL_B1_ENA;
1328 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1329 #ifdef REVERSE_BITORDER
1330 			hc->hw.cirm |= 0x40;
1331 #endif
1332 		}
1333 		if (fifo2 & 2) {
1334 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1335 			if (!tics)
1336 				hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1337 						  HFCPCI_INTS_B2REC);
1338 			hc->hw.ctmt |= 2;
1339 			hc->hw.conn &= ~0x18;
1340 		} else {
1341 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1342 			if (!tics)
1343 				hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1344 						  HFCPCI_INTS_B1REC);
1345 			hc->hw.ctmt |= 1;
1346 			hc->hw.conn &= ~0x03;
1347 		}
1348 		test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1349 		break;
1350 	case (ISDN_P_B_HDLC):
1351 		bch->state = protocol;
1352 		bch->nr = bc;
1353 		hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1354 		hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1355 		if (bc & 2) {
1356 			hc->hw.sctrl |= SCTRL_B2_ENA;
1357 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1358 		} else {
1359 			hc->hw.sctrl |= SCTRL_B1_ENA;
1360 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1361 		}
1362 		if (fifo2 & 2) {
1363 			hc->hw.last_bfifo_cnt[1] = 0;
1364 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1365 			hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1366 					  HFCPCI_INTS_B2REC);
1367 			hc->hw.ctmt &= ~2;
1368 			hc->hw.conn &= ~0x18;
1369 		} else {
1370 			hc->hw.last_bfifo_cnt[0] = 0;
1371 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1372 			hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1373 					  HFCPCI_INTS_B1REC);
1374 			hc->hw.ctmt &= ~1;
1375 			hc->hw.conn &= ~0x03;
1376 		}
1377 		test_and_set_bit(FLG_HDLC, &bch->Flags);
1378 		break;
1379 	default:
1380 		printk(KERN_DEBUG "prot not known %x\n", protocol);
1381 		return -ENOPROTOOPT;
1382 	}
1383 	if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1384 		if ((protocol == ISDN_P_NONE) ||
1385 		    (protocol == -1)) {	/* init case */
1386 			rx_slot = 0;
1387 			tx_slot = 0;
1388 		} else {
1389 			if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1390 				rx_slot |= 0xC0;
1391 				tx_slot |= 0xC0;
1392 			} else {
1393 				rx_slot |= 0x80;
1394 				tx_slot |= 0x80;
1395 			}
1396 		}
1397 		if (bc & 2) {
1398 			hc->hw.conn &= 0xc7;
1399 			hc->hw.conn |= 0x08;
1400 			printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1401 			       __func__, tx_slot);
1402 			printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1403 			       __func__, rx_slot);
1404 			Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1405 			Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1406 		} else {
1407 			hc->hw.conn &= 0xf8;
1408 			hc->hw.conn |= 0x01;
1409 			printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1410 			       __func__, tx_slot);
1411 			printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1412 			       __func__, rx_slot);
1413 			Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1414 			Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1415 		}
1416 	}
1417 	Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1418 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1419 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1420 	Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1421 	Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1422 	Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1423 	Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1424 #ifdef REVERSE_BITORDER
1425 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1426 #endif
1427 	return 0;
1428 }
1429 
1430 static int
set_hfcpci_rxtest(struct bchannel * bch,int protocol,int chan)1431 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1432 {
1433 	struct hfc_pci	*hc = bch->hw;
1434 
1435 	if (bch->debug & DEBUG_HW_BCHANNEL)
1436 		printk(KERN_DEBUG
1437 		       "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1438 		       bch->state, protocol, bch->nr, chan);
1439 	if (bch->nr != chan) {
1440 		printk(KERN_DEBUG
1441 		       "HFCPCI rxtest wrong channel parameter %x/%x\n",
1442 		       bch->nr, chan);
1443 		return -EINVAL;
1444 	}
1445 	switch (protocol) {
1446 	case (ISDN_P_B_RAW):
1447 		bch->state = protocol;
1448 		hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1449 		if (chan & 2) {
1450 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1451 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1452 			if (!tics)
1453 				hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1454 			hc->hw.ctmt |= 2;
1455 			hc->hw.conn &= ~0x18;
1456 #ifdef REVERSE_BITORDER
1457 			hc->hw.cirm |= 0x80;
1458 #endif
1459 		} else {
1460 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1461 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1462 			if (!tics)
1463 				hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1464 			hc->hw.ctmt |= 1;
1465 			hc->hw.conn &= ~0x03;
1466 #ifdef REVERSE_BITORDER
1467 			hc->hw.cirm |= 0x40;
1468 #endif
1469 		}
1470 		break;
1471 	case (ISDN_P_B_HDLC):
1472 		bch->state = protocol;
1473 		hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1474 		if (chan & 2) {
1475 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1476 			hc->hw.last_bfifo_cnt[1] = 0;
1477 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1478 			hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1479 			hc->hw.ctmt &= ~2;
1480 			hc->hw.conn &= ~0x18;
1481 		} else {
1482 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1483 			hc->hw.last_bfifo_cnt[0] = 0;
1484 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1485 			hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1486 			hc->hw.ctmt &= ~1;
1487 			hc->hw.conn &= ~0x03;
1488 		}
1489 		break;
1490 	default:
1491 		printk(KERN_DEBUG "prot not known %x\n", protocol);
1492 		return -ENOPROTOOPT;
1493 	}
1494 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1495 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1496 	Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1497 	Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1498 	Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1499 #ifdef REVERSE_BITORDER
1500 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1501 #endif
1502 	return 0;
1503 }
1504 
1505 static void
deactivate_bchannel(struct bchannel * bch)1506 deactivate_bchannel(struct bchannel *bch)
1507 {
1508 	struct hfc_pci	*hc = bch->hw;
1509 	u_long		flags;
1510 
1511 	spin_lock_irqsave(&hc->lock, flags);
1512 	mISDN_clear_bchannel(bch);
1513 	mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1514 	spin_unlock_irqrestore(&hc->lock, flags);
1515 }
1516 
1517 /*
1518  * Layer 1 B-channel hardware access
1519  */
1520 static int
channel_bctrl(struct bchannel * bch,struct mISDN_ctrl_req * cq)1521 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1522 {
1523 	return mISDN_ctrl_bchannel(bch, cq);
1524 }
1525 static int
hfc_bctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1526 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1527 {
1528 	struct bchannel	*bch = container_of(ch, struct bchannel, ch);
1529 	struct hfc_pci	*hc = bch->hw;
1530 	int		ret = -EINVAL;
1531 	u_long		flags;
1532 
1533 	if (bch->debug & DEBUG_HW)
1534 		printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1535 	switch (cmd) {
1536 	case HW_TESTRX_RAW:
1537 		spin_lock_irqsave(&hc->lock, flags);
1538 		ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1539 		spin_unlock_irqrestore(&hc->lock, flags);
1540 		break;
1541 	case HW_TESTRX_HDLC:
1542 		spin_lock_irqsave(&hc->lock, flags);
1543 		ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1544 		spin_unlock_irqrestore(&hc->lock, flags);
1545 		break;
1546 	case HW_TESTRX_OFF:
1547 		spin_lock_irqsave(&hc->lock, flags);
1548 		mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1549 		spin_unlock_irqrestore(&hc->lock, flags);
1550 		ret = 0;
1551 		break;
1552 	case CLOSE_CHANNEL:
1553 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
1554 		deactivate_bchannel(bch);
1555 		ch->protocol = ISDN_P_NONE;
1556 		ch->peer = NULL;
1557 		module_put(THIS_MODULE);
1558 		ret = 0;
1559 		break;
1560 	case CONTROL_CHANNEL:
1561 		ret = channel_bctrl(bch, arg);
1562 		break;
1563 	default:
1564 		printk(KERN_WARNING "%s: unknown prim(%x)\n",
1565 		       __func__, cmd);
1566 	}
1567 	return ret;
1568 }
1569 
1570 /*
1571  * Layer2 -> Layer 1 Dchannel data
1572  */
1573 static int
hfcpci_l2l1D(struct mISDNchannel * ch,struct sk_buff * skb)1574 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1575 {
1576 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
1577 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
1578 	struct hfc_pci		*hc = dch->hw;
1579 	int			ret = -EINVAL;
1580 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
1581 	unsigned int		id;
1582 	u_long			flags;
1583 
1584 	switch (hh->prim) {
1585 	case PH_DATA_REQ:
1586 		spin_lock_irqsave(&hc->lock, flags);
1587 		ret = dchannel_senddata(dch, skb);
1588 		if (ret > 0) { /* direct TX */
1589 			id = hh->id; /* skb can be freed */
1590 			hfcpci_fill_dfifo(dch->hw);
1591 			ret = 0;
1592 			spin_unlock_irqrestore(&hc->lock, flags);
1593 			queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1594 		} else
1595 			spin_unlock_irqrestore(&hc->lock, flags);
1596 		return ret;
1597 	case PH_ACTIVATE_REQ:
1598 		spin_lock_irqsave(&hc->lock, flags);
1599 		if (hc->hw.protocol == ISDN_P_NT_S0) {
1600 			ret = 0;
1601 			if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1602 				hc->hw.mst_m |= HFCPCI_MASTER;
1603 			Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1604 			if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1605 				spin_unlock_irqrestore(&hc->lock, flags);
1606 				_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1607 					    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1608 				break;
1609 			}
1610 			test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1611 			Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1612 				  HFCPCI_DO_ACTION | 1);
1613 		} else
1614 			ret = l1_event(dch->l1, hh->prim);
1615 		spin_unlock_irqrestore(&hc->lock, flags);
1616 		break;
1617 	case PH_DEACTIVATE_REQ:
1618 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1619 		spin_lock_irqsave(&hc->lock, flags);
1620 		if (hc->hw.protocol == ISDN_P_NT_S0) {
1621 			struct sk_buff_head free_queue;
1622 
1623 			__skb_queue_head_init(&free_queue);
1624 			/* prepare deactivation */
1625 			Write_hfc(hc, HFCPCI_STATES, 0x40);
1626 			skb_queue_splice_init(&dch->squeue, &free_queue);
1627 			if (dch->tx_skb) {
1628 				__skb_queue_tail(&free_queue, dch->tx_skb);
1629 				dch->tx_skb = NULL;
1630 			}
1631 			dch->tx_idx = 0;
1632 			if (dch->rx_skb) {
1633 				__skb_queue_tail(&free_queue, dch->rx_skb);
1634 				dch->rx_skb = NULL;
1635 			}
1636 			test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1637 			if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1638 				del_timer(&dch->timer);
1639 #ifdef FIXME
1640 			if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1641 				dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1642 #endif
1643 			hc->hw.mst_m &= ~HFCPCI_MASTER;
1644 			Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1645 			ret = 0;
1646 			spin_unlock_irqrestore(&hc->lock, flags);
1647 			__skb_queue_purge(&free_queue);
1648 		} else {
1649 			ret = l1_event(dch->l1, hh->prim);
1650 			spin_unlock_irqrestore(&hc->lock, flags);
1651 		}
1652 		break;
1653 	}
1654 	if (!ret)
1655 		dev_kfree_skb(skb);
1656 	return ret;
1657 }
1658 
1659 /*
1660  * Layer2 -> Layer 1 Bchannel data
1661  */
1662 static int
hfcpci_l2l1B(struct mISDNchannel * ch,struct sk_buff * skb)1663 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1664 {
1665 	struct bchannel		*bch = container_of(ch, struct bchannel, ch);
1666 	struct hfc_pci		*hc = bch->hw;
1667 	int			ret = -EINVAL;
1668 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
1669 	unsigned long		flags;
1670 
1671 	switch (hh->prim) {
1672 	case PH_DATA_REQ:
1673 		spin_lock_irqsave(&hc->lock, flags);
1674 		ret = bchannel_senddata(bch, skb);
1675 		if (ret > 0) { /* direct TX */
1676 			hfcpci_fill_fifo(bch);
1677 			ret = 0;
1678 		}
1679 		spin_unlock_irqrestore(&hc->lock, flags);
1680 		return ret;
1681 	case PH_ACTIVATE_REQ:
1682 		spin_lock_irqsave(&hc->lock, flags);
1683 		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1684 			ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1685 		else
1686 			ret = 0;
1687 		spin_unlock_irqrestore(&hc->lock, flags);
1688 		if (!ret)
1689 			_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1690 				    NULL, GFP_KERNEL);
1691 		break;
1692 	case PH_DEACTIVATE_REQ:
1693 		deactivate_bchannel(bch);
1694 		_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1695 			    NULL, GFP_KERNEL);
1696 		ret = 0;
1697 		break;
1698 	}
1699 	if (!ret)
1700 		dev_kfree_skb(skb);
1701 	return ret;
1702 }
1703 
1704 /*
1705  * called for card init message
1706  */
1707 
1708 static void
inithfcpci(struct hfc_pci * hc)1709 inithfcpci(struct hfc_pci *hc)
1710 {
1711 	printk(KERN_DEBUG "inithfcpci: entered\n");
1712 	timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
1713 	hc->chanlimit = 2;
1714 	mode_hfcpci(&hc->bch[0], 1, -1);
1715 	mode_hfcpci(&hc->bch[1], 2, -1);
1716 }
1717 
1718 
1719 static int
init_card(struct hfc_pci * hc)1720 init_card(struct hfc_pci *hc)
1721 {
1722 	int	cnt = 3;
1723 	u_long	flags;
1724 
1725 	printk(KERN_DEBUG "init_card: entered\n");
1726 
1727 
1728 	spin_lock_irqsave(&hc->lock, flags);
1729 	disable_hwirq(hc);
1730 	spin_unlock_irqrestore(&hc->lock, flags);
1731 	if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1732 		printk(KERN_WARNING
1733 		       "mISDN: couldn't get interrupt %d\n", hc->irq);
1734 		return -EIO;
1735 	}
1736 	spin_lock_irqsave(&hc->lock, flags);
1737 	reset_hfcpci(hc);
1738 	while (cnt) {
1739 		inithfcpci(hc);
1740 		/*
1741 		 * Finally enable IRQ output
1742 		 * this is only allowed, if an IRQ routine is already
1743 		 * established for this HFC, so don't do that earlier
1744 		 */
1745 		enable_hwirq(hc);
1746 		spin_unlock_irqrestore(&hc->lock, flags);
1747 		/* Timeout 80ms */
1748 		set_current_state(TASK_UNINTERRUPTIBLE);
1749 		schedule_timeout((80 * HZ) / 1000);
1750 		printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1751 		       hc->irq, hc->irqcnt);
1752 		/* now switch timer interrupt off */
1753 		spin_lock_irqsave(&hc->lock, flags);
1754 		hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1755 		Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1756 		/* reinit mode reg */
1757 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1758 		if (!hc->irqcnt) {
1759 			printk(KERN_WARNING
1760 			       "HFC PCI: IRQ(%d) getting no interrupts "
1761 			       "during init %d\n", hc->irq, 4 - cnt);
1762 			if (cnt == 1)
1763 				break;
1764 			else {
1765 				reset_hfcpci(hc);
1766 				cnt--;
1767 			}
1768 		} else {
1769 			spin_unlock_irqrestore(&hc->lock, flags);
1770 			hc->initdone = 1;
1771 			return 0;
1772 		}
1773 	}
1774 	disable_hwirq(hc);
1775 	spin_unlock_irqrestore(&hc->lock, flags);
1776 	free_irq(hc->irq, hc);
1777 	return -EIO;
1778 }
1779 
1780 static int
channel_ctrl(struct hfc_pci * hc,struct mISDN_ctrl_req * cq)1781 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1782 {
1783 	int	ret = 0;
1784 	u_char	slot;
1785 
1786 	switch (cq->op) {
1787 	case MISDN_CTRL_GETOP:
1788 		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1789 			 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1790 		break;
1791 	case MISDN_CTRL_LOOP:
1792 		/* channel 0 disabled loop */
1793 		if (cq->channel < 0 || cq->channel > 2) {
1794 			ret = -EINVAL;
1795 			break;
1796 		}
1797 		if (cq->channel & 1) {
1798 			if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1799 				slot = 0xC0;
1800 			else
1801 				slot = 0x80;
1802 			printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1803 			       __func__, slot);
1804 			Write_hfc(hc, HFCPCI_B1_SSL, slot);
1805 			Write_hfc(hc, HFCPCI_B1_RSL, slot);
1806 			hc->hw.conn = (hc->hw.conn & ~7) | 6;
1807 			Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1808 		}
1809 		if (cq->channel & 2) {
1810 			if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1811 				slot = 0xC1;
1812 			else
1813 				slot = 0x81;
1814 			printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1815 			       __func__, slot);
1816 			Write_hfc(hc, HFCPCI_B2_SSL, slot);
1817 			Write_hfc(hc, HFCPCI_B2_RSL, slot);
1818 			hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1819 			Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1820 		}
1821 		if (cq->channel & 3)
1822 			hc->hw.trm |= 0x80;	/* enable IOM-loop */
1823 		else {
1824 			hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1825 			Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1826 			hc->hw.trm &= 0x7f;	/* disable IOM-loop */
1827 		}
1828 		Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1829 		break;
1830 	case MISDN_CTRL_CONNECT:
1831 		if (cq->channel == cq->p1) {
1832 			ret = -EINVAL;
1833 			break;
1834 		}
1835 		if (cq->channel < 1 || cq->channel > 2 ||
1836 		    cq->p1 < 1 || cq->p1 > 2) {
1837 			ret = -EINVAL;
1838 			break;
1839 		}
1840 		if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1841 			slot = 0xC0;
1842 		else
1843 			slot = 0x80;
1844 		printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1845 		       __func__, slot);
1846 		Write_hfc(hc, HFCPCI_B1_SSL, slot);
1847 		Write_hfc(hc, HFCPCI_B2_RSL, slot);
1848 		if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1849 			slot = 0xC1;
1850 		else
1851 			slot = 0x81;
1852 		printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1853 		       __func__, slot);
1854 		Write_hfc(hc, HFCPCI_B2_SSL, slot);
1855 		Write_hfc(hc, HFCPCI_B1_RSL, slot);
1856 		hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1857 		Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1858 		hc->hw.trm |= 0x80;
1859 		Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1860 		break;
1861 	case MISDN_CTRL_DISCONNECT:
1862 		hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1863 		Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1864 		hc->hw.trm &= 0x7f;	/* disable IOM-loop */
1865 		break;
1866 	case MISDN_CTRL_L1_TIMER3:
1867 		ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1868 		break;
1869 	default:
1870 		printk(KERN_WARNING "%s: unknown Op %x\n",
1871 		       __func__, cq->op);
1872 		ret = -EINVAL;
1873 		break;
1874 	}
1875 	return ret;
1876 }
1877 
1878 static int
open_dchannel(struct hfc_pci * hc,struct mISDNchannel * ch,struct channel_req * rq)1879 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1880 	      struct channel_req *rq)
1881 {
1882 	int err = 0;
1883 
1884 	if (debug & DEBUG_HW_OPEN)
1885 		printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1886 		       hc->dch.dev.id, __builtin_return_address(0));
1887 	if (rq->protocol == ISDN_P_NONE)
1888 		return -EINVAL;
1889 	if (rq->adr.channel == 1) {
1890 		/* TODO: E-Channel */
1891 		return -EINVAL;
1892 	}
1893 	if (!hc->initdone) {
1894 		if (rq->protocol == ISDN_P_TE_S0) {
1895 			err = create_l1(&hc->dch, hfc_l1callback);
1896 			if (err)
1897 				return err;
1898 		}
1899 		hc->hw.protocol = rq->protocol;
1900 		ch->protocol = rq->protocol;
1901 		err = init_card(hc);
1902 		if (err)
1903 			return err;
1904 	} else {
1905 		if (rq->protocol != ch->protocol) {
1906 			if (hc->hw.protocol == ISDN_P_TE_S0)
1907 				l1_event(hc->dch.l1, CLOSE_CHANNEL);
1908 			if (rq->protocol == ISDN_P_TE_S0) {
1909 				err = create_l1(&hc->dch, hfc_l1callback);
1910 				if (err)
1911 					return err;
1912 			}
1913 			hc->hw.protocol = rq->protocol;
1914 			ch->protocol = rq->protocol;
1915 			hfcpci_setmode(hc);
1916 		}
1917 	}
1918 
1919 	if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1920 	    ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1921 		_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1922 			    0, NULL, GFP_KERNEL);
1923 	}
1924 	rq->ch = ch;
1925 	if (!try_module_get(THIS_MODULE))
1926 		printk(KERN_WARNING "%s:cannot get module\n", __func__);
1927 	return 0;
1928 }
1929 
1930 static int
open_bchannel(struct hfc_pci * hc,struct channel_req * rq)1931 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1932 {
1933 	struct bchannel		*bch;
1934 
1935 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
1936 		return -EINVAL;
1937 	if (rq->protocol == ISDN_P_NONE)
1938 		return -EINVAL;
1939 	bch = &hc->bch[rq->adr.channel - 1];
1940 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1941 		return -EBUSY; /* b-channel can be only open once */
1942 	bch->ch.protocol = rq->protocol;
1943 	rq->ch = &bch->ch; /* TODO: E-channel */
1944 	if (!try_module_get(THIS_MODULE))
1945 		printk(KERN_WARNING "%s:cannot get module\n", __func__);
1946 	return 0;
1947 }
1948 
1949 /*
1950  * device control function
1951  */
1952 static int
hfc_dctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1953 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1954 {
1955 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
1956 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
1957 	struct hfc_pci		*hc = dch->hw;
1958 	struct channel_req	*rq;
1959 	int			err = 0;
1960 
1961 	if (dch->debug & DEBUG_HW)
1962 		printk(KERN_DEBUG "%s: cmd:%x %p\n",
1963 		       __func__, cmd, arg);
1964 	switch (cmd) {
1965 	case OPEN_CHANNEL:
1966 		rq = arg;
1967 		if ((rq->protocol == ISDN_P_TE_S0) ||
1968 		    (rq->protocol == ISDN_P_NT_S0))
1969 			err = open_dchannel(hc, ch, rq);
1970 		else
1971 			err = open_bchannel(hc, rq);
1972 		break;
1973 	case CLOSE_CHANNEL:
1974 		if (debug & DEBUG_HW_OPEN)
1975 			printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1976 			       __func__, hc->dch.dev.id,
1977 			       __builtin_return_address(0));
1978 		module_put(THIS_MODULE);
1979 		break;
1980 	case CONTROL_CHANNEL:
1981 		err = channel_ctrl(hc, arg);
1982 		break;
1983 	default:
1984 		if (dch->debug & DEBUG_HW)
1985 			printk(KERN_DEBUG "%s: unknown command %x\n",
1986 			       __func__, cmd);
1987 		return -EINVAL;
1988 	}
1989 	return err;
1990 }
1991 
1992 static int
setup_hw(struct hfc_pci * hc)1993 setup_hw(struct hfc_pci *hc)
1994 {
1995 	void	*buffer;
1996 
1997 	printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
1998 	hc->hw.cirm = 0;
1999 	hc->dch.state = 0;
2000 	pci_set_master(hc->pdev);
2001 	if (!hc->irq) {
2002 		printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
2003 		return -EINVAL;
2004 	}
2005 	hc->hw.pci_io =
2006 		(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2007 
2008 	if (!hc->hw.pci_io) {
2009 		printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2010 		return -ENOMEM;
2011 	}
2012 	/* Allocate memory for FIFOS */
2013 	/* the memory needs to be on a 32k boundary within the first 4G */
2014 	if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
2015 		printk(KERN_WARNING
2016 		       "HFC-PCI: No usable DMA configuration!\n");
2017 		return -EIO;
2018 	}
2019 	buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
2020 				    GFP_KERNEL);
2021 	/* We silently assume the address is okay if nonzero */
2022 	if (!buffer) {
2023 		printk(KERN_WARNING
2024 		       "HFC-PCI: Error allocating memory for FIFO!\n");
2025 		return -ENOMEM;
2026 	}
2027 	hc->hw.fifos = buffer;
2028 	pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
2029 	hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
2030 	if (unlikely(!hc->hw.pci_io)) {
2031 		printk(KERN_WARNING
2032 		       "HFC-PCI: Error in ioremap for PCI!\n");
2033 		dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
2034 				  hc->hw.dmahandle);
2035 		return -ENOMEM;
2036 	}
2037 
2038 	printk(KERN_INFO
2039 	       "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
2040 	       (u_long) hc->hw.pci_io, hc->hw.fifos,
2041 	       &hc->hw.dmahandle, hc->irq, HZ);
2042 
2043 	/* enable memory mapped ports, disable busmaster */
2044 	pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2045 	hc->hw.int_m2 = 0;
2046 	disable_hwirq(hc);
2047 	hc->hw.int_m1 = 0;
2048 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2049 	/* At this point the needed PCI config is done */
2050 	/* fifos are still not enabled */
2051 	timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
2052 	/* default PCM master */
2053 	test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2054 	return 0;
2055 }
2056 
2057 static void
release_card(struct hfc_pci * hc)2058 release_card(struct hfc_pci *hc) {
2059 	u_long	flags;
2060 
2061 	spin_lock_irqsave(&hc->lock, flags);
2062 	hc->hw.int_m2 = 0; /* interrupt output off ! */
2063 	disable_hwirq(hc);
2064 	mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2065 	mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2066 	if (hc->dch.timer.function != NULL) {
2067 		del_timer(&hc->dch.timer);
2068 		hc->dch.timer.function = NULL;
2069 	}
2070 	spin_unlock_irqrestore(&hc->lock, flags);
2071 	if (hc->hw.protocol == ISDN_P_TE_S0)
2072 		l1_event(hc->dch.l1, CLOSE_CHANNEL);
2073 	if (hc->initdone)
2074 		free_irq(hc->irq, hc);
2075 	release_io_hfcpci(hc); /* must release after free_irq! */
2076 	mISDN_unregister_device(&hc->dch.dev);
2077 	mISDN_freebchannel(&hc->bch[1]);
2078 	mISDN_freebchannel(&hc->bch[0]);
2079 	mISDN_freedchannel(&hc->dch);
2080 	pci_set_drvdata(hc->pdev, NULL);
2081 	kfree(hc);
2082 }
2083 
2084 static int
setup_card(struct hfc_pci * card)2085 setup_card(struct hfc_pci *card)
2086 {
2087 	int		err = -EINVAL;
2088 	u_int		i;
2089 	char		name[MISDN_MAX_IDLEN];
2090 
2091 	card->dch.debug = debug;
2092 	spin_lock_init(&card->lock);
2093 	mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2094 	card->dch.hw = card;
2095 	card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2096 	card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2097 		(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2098 	card->dch.dev.D.send = hfcpci_l2l1D;
2099 	card->dch.dev.D.ctrl = hfc_dctrl;
2100 	card->dch.dev.nrbchan = 2;
2101 	for (i = 0; i < 2; i++) {
2102 		card->bch[i].nr = i + 1;
2103 		set_channelmap(i + 1, card->dch.dev.channelmap);
2104 		card->bch[i].debug = debug;
2105 		mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
2106 		card->bch[i].hw = card;
2107 		card->bch[i].ch.send = hfcpci_l2l1B;
2108 		card->bch[i].ch.ctrl = hfc_bctrl;
2109 		card->bch[i].ch.nr = i + 1;
2110 		list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2111 	}
2112 	err = setup_hw(card);
2113 	if (err)
2114 		goto error;
2115 	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2116 	err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
2117 	if (err)
2118 		goto error;
2119 	HFC_cnt++;
2120 	printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2121 	return 0;
2122 error:
2123 	mISDN_freebchannel(&card->bch[1]);
2124 	mISDN_freebchannel(&card->bch[0]);
2125 	mISDN_freedchannel(&card->dch);
2126 	kfree(card);
2127 	return err;
2128 }
2129 
2130 /* private data in the PCI devices list */
2131 struct _hfc_map {
2132 	u_int	subtype;
2133 	u_int	flag;
2134 	char	*name;
2135 };
2136 
2137 static const struct _hfc_map hfc_map[] =
2138 {
2139 	{HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2140 	{HFC_CCD_B000, 0, "Billion B000"},
2141 	{HFC_CCD_B006, 0, "Billion B006"},
2142 	{HFC_CCD_B007, 0, "Billion B007"},
2143 	{HFC_CCD_B008, 0, "Billion B008"},
2144 	{HFC_CCD_B009, 0, "Billion B009"},
2145 	{HFC_CCD_B00A, 0, "Billion B00A"},
2146 	{HFC_CCD_B00B, 0, "Billion B00B"},
2147 	{HFC_CCD_B00C, 0, "Billion B00C"},
2148 	{HFC_CCD_B100, 0, "Seyeon B100"},
2149 	{HFC_CCD_B700, 0, "Primux II S0 B700"},
2150 	{HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2151 	{HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2152 	{HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2153 	{HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2154 	{HFC_BERKOM_A1T, 0, "German telekom A1T"},
2155 	{HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2156 	{HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2157 	{HFC_DIGI_DF_M_IOM2_E, 0,
2158 	 "Digi International DataFire Micro V IOM2 (Europe)"},
2159 	{HFC_DIGI_DF_M_E, 0,
2160 	 "Digi International DataFire Micro V (Europe)"},
2161 	{HFC_DIGI_DF_M_IOM2_A, 0,
2162 	 "Digi International DataFire Micro V IOM2 (North America)"},
2163 	{HFC_DIGI_DF_M_A, 0,
2164 	 "Digi International DataFire Micro V (North America)"},
2165 	{HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2166 	{},
2167 };
2168 
2169 static const struct pci_device_id hfc_ids[] =
2170 {
2171 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2172 	  (unsigned long) &hfc_map[0] },
2173 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
2174 	  (unsigned long) &hfc_map[1] },
2175 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
2176 	  (unsigned long) &hfc_map[2] },
2177 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
2178 	  (unsigned long) &hfc_map[3] },
2179 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
2180 	  (unsigned long) &hfc_map[4] },
2181 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
2182 	  (unsigned long) &hfc_map[5] },
2183 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
2184 	  (unsigned long) &hfc_map[6] },
2185 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
2186 	  (unsigned long) &hfc_map[7] },
2187 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
2188 	  (unsigned long) &hfc_map[8] },
2189 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
2190 	  (unsigned long) &hfc_map[9] },
2191 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
2192 	  (unsigned long) &hfc_map[10] },
2193 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
2194 	  (unsigned long) &hfc_map[11] },
2195 	{ PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
2196 	  (unsigned long) &hfc_map[12] },
2197 	{ PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
2198 	  (unsigned long) &hfc_map[13] },
2199 	{ PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
2200 	  (unsigned long) &hfc_map[14] },
2201 	{ PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
2202 	  (unsigned long) &hfc_map[15] },
2203 	{ PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
2204 	  (unsigned long) &hfc_map[16] },
2205 	{ PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
2206 	  (unsigned long) &hfc_map[17] },
2207 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
2208 	  (unsigned long) &hfc_map[18] },
2209 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
2210 	  (unsigned long) &hfc_map[19] },
2211 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
2212 	  (unsigned long) &hfc_map[20] },
2213 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
2214 	  (unsigned long) &hfc_map[21] },
2215 	{ PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
2216 	  (unsigned long) &hfc_map[22] },
2217 	{},
2218 };
2219 
2220 static int
hfc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2221 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2222 {
2223 	int		err = -ENOMEM;
2224 	struct hfc_pci	*card;
2225 	struct _hfc_map	*m = (struct _hfc_map *)ent->driver_data;
2226 
2227 	card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
2228 	if (!card) {
2229 		printk(KERN_ERR "No kmem for HFC card\n");
2230 		return err;
2231 	}
2232 	card->pdev = pdev;
2233 	card->subtype = m->subtype;
2234 	err = pci_enable_device(pdev);
2235 	if (err) {
2236 		kfree(card);
2237 		return err;
2238 	}
2239 
2240 	printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2241 	       m->name, pci_name(pdev));
2242 
2243 	card->irq = pdev->irq;
2244 	pci_set_drvdata(pdev, card);
2245 	err = setup_card(card);
2246 	if (err)
2247 		pci_set_drvdata(pdev, NULL);
2248 	return err;
2249 }
2250 
2251 static void
hfc_remove_pci(struct pci_dev * pdev)2252 hfc_remove_pci(struct pci_dev *pdev)
2253 {
2254 	struct hfc_pci	*card = pci_get_drvdata(pdev);
2255 
2256 	if (card)
2257 		release_card(card);
2258 	else
2259 		if (debug)
2260 			printk(KERN_DEBUG "%s: drvdata already removed\n",
2261 			       __func__);
2262 }
2263 
2264 
2265 static struct pci_driver hfc_driver = {
2266 	.name = "hfcpci",
2267 	.probe = hfc_probe,
2268 	.remove = hfc_remove_pci,
2269 	.id_table = hfc_ids,
2270 };
2271 
2272 static int
_hfcpci_softirq(struct device * dev,void * unused)2273 _hfcpci_softirq(struct device *dev, void *unused)
2274 {
2275 	struct hfc_pci  *hc = dev_get_drvdata(dev);
2276 	struct bchannel *bch;
2277 	if (hc == NULL)
2278 		return 0;
2279 
2280 	if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2281 		spin_lock_irq(&hc->lock);
2282 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
2283 		if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2284 			main_rec_hfcpci(bch);
2285 			tx_birq(bch);
2286 		}
2287 		bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
2288 		if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2289 			main_rec_hfcpci(bch);
2290 			tx_birq(bch);
2291 		}
2292 		spin_unlock_irq(&hc->lock);
2293 	}
2294 	return 0;
2295 }
2296 
2297 static void
hfcpci_softirq(struct timer_list * unused)2298 hfcpci_softirq(struct timer_list *unused)
2299 {
2300 	WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
2301 				      _hfcpci_softirq) != 0);
2302 
2303 	/* if next event would be in the past ... */
2304 	if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2305 		hfc_jiffies = jiffies + 1;
2306 	else
2307 		hfc_jiffies += tics;
2308 	hfc_tl.expires = hfc_jiffies;
2309 	add_timer(&hfc_tl);
2310 }
2311 
2312 static int __init
HFC_init(void)2313 HFC_init(void)
2314 {
2315 	int		err;
2316 
2317 	if (!poll)
2318 		poll = HFCPCI_BTRANS_THRESHOLD;
2319 
2320 	if (poll != HFCPCI_BTRANS_THRESHOLD) {
2321 		tics = (poll * HZ) / 8000;
2322 		if (tics < 1)
2323 			tics = 1;
2324 		poll = (tics * 8000) / HZ;
2325 		if (poll > 256 || poll < 8) {
2326 			printk(KERN_ERR "%s: Wrong poll value %d not in range "
2327 			       "of 8..256.\n", __func__, poll);
2328 			err = -EINVAL;
2329 			return err;
2330 		}
2331 	}
2332 	if (poll != HFCPCI_BTRANS_THRESHOLD) {
2333 		printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2334 		       __func__, poll);
2335 		timer_setup(&hfc_tl, hfcpci_softirq, 0);
2336 		hfc_tl.expires = jiffies + tics;
2337 		hfc_jiffies = hfc_tl.expires;
2338 		add_timer(&hfc_tl);
2339 	} else
2340 		tics = 0; /* indicate the use of controller's timer */
2341 
2342 	err = pci_register_driver(&hfc_driver);
2343 	if (err) {
2344 		if (timer_pending(&hfc_tl))
2345 			del_timer(&hfc_tl);
2346 	}
2347 
2348 	return err;
2349 }
2350 
2351 static void __exit
HFC_cleanup(void)2352 HFC_cleanup(void)
2353 {
2354 	del_timer_sync(&hfc_tl);
2355 
2356 	pci_unregister_driver(&hfc_driver);
2357 }
2358 
2359 module_init(HFC_init);
2360 module_exit(HFC_cleanup);
2361 
2362 MODULE_DEVICE_TABLE(pci, hfc_ids);
2363