xref: /linux/drivers/isdn/hardware/mISDN/hfcpci.c (revision 9c736ace0666efe68efd53fcdfa2c6653c3e0e72)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * hfcpci.c     low level driver for CCD's hfc-pci based cards
5  *
6  * Author     Werner Cornelius (werner@isdn4linux.de)
7  *            based on existing driver for CCD hfc ISA cards
8  *            type approval valid for HFC-S PCI A based card
9  *
10  * Copyright 1999  by Werner Cornelius (werner@isdn-development.de)
11  * Copyright 2008  by Karsten Keil <kkeil@novell.com>
12  *
13  * Module options:
14  *
15  * debug:
16  *	NOTE: only one poll value must be given for all cards
17  *	See hfc_pci.h for debug flags.
18  *
19  * poll:
20  *	NOTE: only one poll value must be given for all cards
21  *	Give the number of samples for each fifo process.
22  *	By default 128 is used. Decrease to reduce delay, increase to
23  *	reduce cpu load. If unsure, don't mess with it!
24  *	A value of 128 will use controller's interrupt. Other values will
25  *	use kernel timer, because the controller will not allow lower values
26  *	than 128.
27  *	Also note that the value depends on the kernel timer frequency.
28  *	If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
29  *	If the kernel uses 100 Hz, steps of 80 samples are possible.
30  *	If the kernel uses 300 Hz, steps of about 26 samples are possible.
31  */
32 
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/delay.h>
37 #include <linux/mISDNhw.h>
38 #include <linux/slab.h>
39 
40 #include "hfc_pci.h"
41 
42 static void hfcpci_softirq(struct timer_list *unused);
43 static const char *hfcpci_revision = "2.0";
44 
45 static int HFC_cnt;
46 static uint debug;
47 static uint poll, tics;
48 static DEFINE_TIMER(hfc_tl, hfcpci_softirq);
49 static unsigned long hfc_jiffies;
50 
51 MODULE_AUTHOR("Karsten Keil");
52 MODULE_DESCRIPTION("mISDN driver for CCD's hfc-pci based cards");
53 MODULE_LICENSE("GPL");
54 module_param(debug, uint, S_IRUGO | S_IWUSR);
55 module_param(poll, uint, S_IRUGO | S_IWUSR);
56 
57 enum {
58 	HFC_CCD_2BD0,
59 	HFC_CCD_B000,
60 	HFC_CCD_B006,
61 	HFC_CCD_B007,
62 	HFC_CCD_B008,
63 	HFC_CCD_B009,
64 	HFC_CCD_B00A,
65 	HFC_CCD_B00B,
66 	HFC_CCD_B00C,
67 	HFC_CCD_B100,
68 	HFC_CCD_B700,
69 	HFC_CCD_B701,
70 	HFC_ASUS_0675,
71 	HFC_BERKOM_A1T,
72 	HFC_BERKOM_TCONCEPT,
73 	HFC_ANIGMA_MC145575,
74 	HFC_ZOLTRIX_2BD0,
75 	HFC_DIGI_DF_M_IOM2_E,
76 	HFC_DIGI_DF_M_E,
77 	HFC_DIGI_DF_M_IOM2_A,
78 	HFC_DIGI_DF_M_A,
79 	HFC_ABOCOM_2BD1,
80 	HFC_SITECOM_DC105V2,
81 };
82 
83 struct hfcPCI_hw {
84 	unsigned char		cirm;
85 	unsigned char		ctmt;
86 	unsigned char		clkdel;
87 	unsigned char		states;
88 	unsigned char		conn;
89 	unsigned char		mst_m;
90 	unsigned char		int_m1;
91 	unsigned char		int_m2;
92 	unsigned char		sctrl;
93 	unsigned char		sctrl_r;
94 	unsigned char		sctrl_e;
95 	unsigned char		trm;
96 	unsigned char		fifo_en;
97 	unsigned char		bswapped;
98 	unsigned char		protocol;
99 	int			nt_timer;
100 	unsigned char __iomem	*pci_io; /* start of PCI IO memory */
101 	dma_addr_t		dmahandle;
102 	void			*fifos; /* FIFO memory */
103 	int			last_bfifo_cnt[2];
104 	/* marker saving last b-fifo frame count */
105 	struct timer_list	timer;
106 };
107 
108 #define	HFC_CFG_MASTER		1
109 #define HFC_CFG_SLAVE		2
110 #define	HFC_CFG_PCM		3
111 #define HFC_CFG_2HFC		4
112 #define HFC_CFG_SLAVEHFC	5
113 #define HFC_CFG_NEG_F0		6
114 #define HFC_CFG_SW_DD_DU	7
115 
116 #define FLG_HFC_TIMER_T1	16
117 #define FLG_HFC_TIMER_T3	17
118 
119 #define NT_T1_COUNT	1120	/* number of 3.125ms interrupts (3.5s) */
120 #define NT_T3_COUNT	31	/* number of 3.125ms interrupts (97 ms) */
121 #define CLKDEL_TE	0x0e	/* CLKDEL in TE mode */
122 #define CLKDEL_NT	0x6c	/* CLKDEL in NT mode */
123 
124 
125 struct hfc_pci {
126 	u_char			subtype;
127 	u_char			chanlimit;
128 	u_char			initdone;
129 	u_long			cfg;
130 	u_int			irq;
131 	u_int			irqcnt;
132 	struct pci_dev		*pdev;
133 	struct hfcPCI_hw	hw;
134 	spinlock_t		lock;	/* card lock */
135 	struct dchannel		dch;
136 	struct bchannel		bch[2];
137 };
138 
139 /* Interface functions */
140 static void
enable_hwirq(struct hfc_pci * hc)141 enable_hwirq(struct hfc_pci *hc)
142 {
143 	hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
144 	Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
145 }
146 
147 static void
disable_hwirq(struct hfc_pci * hc)148 disable_hwirq(struct hfc_pci *hc)
149 {
150 	hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
151 	Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
152 }
153 
154 /*
155  * free hardware resources used by driver
156  */
157 static void
release_io_hfcpci(struct hfc_pci * hc)158 release_io_hfcpci(struct hfc_pci *hc)
159 {
160 	/* disable memory mapped ports + busmaster */
161 	pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
162 	timer_delete(&hc->hw.timer);
163 	dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
164 			  hc->hw.dmahandle);
165 	iounmap(hc->hw.pci_io);
166 }
167 
168 /*
169  * set mode (NT or TE)
170  */
171 static void
hfcpci_setmode(struct hfc_pci * hc)172 hfcpci_setmode(struct hfc_pci *hc)
173 {
174 	if (hc->hw.protocol == ISDN_P_NT_S0) {
175 		hc->hw.clkdel = CLKDEL_NT;	/* ST-Bit delay for NT-Mode */
176 		hc->hw.sctrl |= SCTRL_MODE_NT;	/* NT-MODE */
177 		hc->hw.states = 1;		/* G1 */
178 	} else {
179 		hc->hw.clkdel = CLKDEL_TE;	/* ST-Bit delay for TE-Mode */
180 		hc->hw.sctrl &= ~SCTRL_MODE_NT;	/* TE-MODE */
181 		hc->hw.states = 2;		/* F2 */
182 	}
183 	Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
184 	Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
185 	udelay(10);
186 	Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
187 	Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
188 }
189 
190 /*
191  * function called to reset the HFC PCI chip. A complete software reset of chip
192  * and fifos is done.
193  */
194 static void
reset_hfcpci(struct hfc_pci * hc)195 reset_hfcpci(struct hfc_pci *hc)
196 {
197 	u_char	val;
198 	int	cnt = 0;
199 
200 	printk(KERN_DEBUG "reset_hfcpci: entered\n");
201 	val = Read_hfc(hc, HFCPCI_CHIP_ID);
202 	printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
203 	/* enable memory mapped ports, disable busmaster */
204 	pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
205 	disable_hwirq(hc);
206 	/* enable memory ports + busmaster */
207 	pci_write_config_word(hc->pdev, PCI_COMMAND,
208 			      PCI_ENA_MEMIO + PCI_ENA_MASTER);
209 	val = Read_hfc(hc, HFCPCI_STATUS);
210 	printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
211 	hc->hw.cirm = HFCPCI_RESET;	/* Reset On */
212 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
213 	set_current_state(TASK_UNINTERRUPTIBLE);
214 	mdelay(10);			/* Timeout 10ms */
215 	hc->hw.cirm = 0;		/* Reset Off */
216 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
217 	val = Read_hfc(hc, HFCPCI_STATUS);
218 	printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
219 	while (cnt < 50000) { /* max 50000 us */
220 		udelay(5);
221 		cnt += 5;
222 		val = Read_hfc(hc, HFCPCI_STATUS);
223 		if (!(val & 2))
224 			break;
225 	}
226 	printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
227 
228 	hc->hw.fifo_en = 0x30;	/* only D fifos enabled */
229 
230 	hc->hw.bswapped = 0;	/* no exchange */
231 	hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
232 	hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
233 	hc->hw.sctrl = 0x40;	/* set tx_lo mode, error in datasheet ! */
234 	hc->hw.sctrl_r = 0;
235 	hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE;	/* S/T Auto awake */
236 	hc->hw.mst_m = 0;
237 	if (test_bit(HFC_CFG_MASTER, &hc->cfg))
238 		hc->hw.mst_m |= HFCPCI_MASTER;	/* HFC Master Mode */
239 	if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
240 		hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
241 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
242 	Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
243 	Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
244 	Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
245 
246 	hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
247 		HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
248 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
249 
250 	/* Clear already pending ints */
251 	val = Read_hfc(hc, HFCPCI_INT_S1);
252 
253 	/* set NT/TE mode */
254 	hfcpci_setmode(hc);
255 
256 	Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
257 	Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
258 
259 	/*
260 	 * Init GCI/IOM2 in master mode
261 	 * Slots 0 and 1 are set for B-chan 1 and 2
262 	 * D- and monitor/CI channel are not enabled
263 	 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
264 	 * STIO2 is used as data input, B1+B2 from IOM->ST
265 	 * ST B-channel send disabled -> continuous 1s
266 	 * The IOM slots are always enabled
267 	 */
268 	if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
269 		/* set data flow directions: connect B1,B2: HFC to/from PCM */
270 		hc->hw.conn = 0x09;
271 	} else {
272 		hc->hw.conn = 0x36;	/* set data flow directions */
273 		if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
274 			Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
275 			Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
276 			Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
277 			Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
278 		} else {
279 			Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
280 			Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
281 			Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
282 			Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
283 		}
284 	}
285 	Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
286 	val = Read_hfc(hc, HFCPCI_INT_S2);
287 }
288 
289 /*
290  * Timer function called when kernel timer expires
291  */
292 static void
hfcpci_Timer(struct timer_list * t)293 hfcpci_Timer(struct timer_list *t)
294 {
295 	struct hfc_pci *hc = timer_container_of(hc, t, hw.timer);
296 	hc->hw.timer.expires = jiffies + 75;
297 	/* WD RESET */
298 /*
299  *	WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
300  *	add_timer(&hc->hw.timer);
301  */
302 }
303 
304 
305 /*
306  * select a b-channel entry matching and active
307  */
308 static struct bchannel *
Sel_BCS(struct hfc_pci * hc,int channel)309 Sel_BCS(struct hfc_pci *hc, int channel)
310 {
311 	if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
312 	    (hc->bch[0].nr & channel))
313 		return &hc->bch[0];
314 	else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
315 		 (hc->bch[1].nr & channel))
316 		return &hc->bch[1];
317 	else
318 		return NULL;
319 }
320 
321 /*
322  * clear the desired B-channel rx fifo
323  */
324 static void
hfcpci_clear_fifo_rx(struct hfc_pci * hc,int fifo)325 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
326 {
327 	u_char		fifo_state;
328 	struct bzfifo	*bzr;
329 
330 	if (fifo) {
331 		bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
332 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
333 	} else {
334 		bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
335 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
336 	}
337 	if (fifo_state)
338 		hc->hw.fifo_en ^= fifo_state;
339 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
340 	hc->hw.last_bfifo_cnt[fifo] = 0;
341 	bzr->f1 = MAX_B_FRAMES;
342 	bzr->f2 = bzr->f1;	/* init F pointers to remain constant */
343 	bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
344 	bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
345 		le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
346 	if (fifo_state)
347 		hc->hw.fifo_en |= fifo_state;
348 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
349 }
350 
351 /*
352  * clear the desired B-channel tx fifo
353  */
hfcpci_clear_fifo_tx(struct hfc_pci * hc,int fifo)354 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
355 {
356 	u_char		fifo_state;
357 	struct bzfifo	*bzt;
358 
359 	if (fifo) {
360 		bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
361 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
362 	} else {
363 		bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
364 		fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
365 	}
366 	if (fifo_state)
367 		hc->hw.fifo_en ^= fifo_state;
368 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
369 	if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
370 		printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
371 		       "z1(%x) z2(%x) state(%x)\n",
372 		       fifo, bzt->f1, bzt->f2,
373 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
374 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
375 		       fifo_state);
376 	bzt->f2 = MAX_B_FRAMES;
377 	bzt->f1 = bzt->f2;	/* init F pointers to remain constant */
378 	bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
379 	bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
380 	if (fifo_state)
381 		hc->hw.fifo_en |= fifo_state;
382 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
383 	if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
384 		printk(KERN_DEBUG
385 		       "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
386 		       fifo, bzt->f1, bzt->f2,
387 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
388 		       le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
389 }
390 
391 /*
392  * read a complete B-frame out of the buffer
393  */
394 static void
hfcpci_empty_bfifo(struct bchannel * bch,struct bzfifo * bz,u_char * bdata,int count)395 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
396 		   u_char *bdata, int count)
397 {
398 	u_char		*ptr, *ptr1, new_f2;
399 	int		maxlen, new_z2;
400 	struct zt	*zp;
401 
402 	if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
403 		printk(KERN_DEBUG "hfcpci_empty_fifo\n");
404 	zp = &bz->za[bz->f2];	/* point to Z-Regs */
405 	new_z2 = le16_to_cpu(zp->z2) + count;	/* new position in fifo */
406 	if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
407 		new_z2 -= B_FIFO_SIZE;	/* buffer wrap */
408 	new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
409 	if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
410 	    (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
411 		if (bch->debug & DEBUG_HW)
412 			printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
413 			       "invalid length %d or crc\n", count);
414 #ifdef ERROR_STATISTIC
415 		bch->err_inv++;
416 #endif
417 		bz->za[new_f2].z2 = cpu_to_le16(new_z2);
418 		bz->f2 = new_f2;	/* next buffer */
419 	} else {
420 		bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
421 		if (!bch->rx_skb) {
422 			printk(KERN_WARNING "HFCPCI: receive out of memory\n");
423 			return;
424 		}
425 		count -= 3;
426 		ptr = skb_put(bch->rx_skb, count);
427 
428 		if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
429 			maxlen = count;		/* complete transfer */
430 		else
431 			maxlen = B_FIFO_SIZE + B_SUB_VAL -
432 				le16_to_cpu(zp->z2);	/* maximum */
433 
434 		ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
435 		/* start of data */
436 		memcpy(ptr, ptr1, maxlen);	/* copy data */
437 		count -= maxlen;
438 
439 		if (count) {	/* rest remaining */
440 			ptr += maxlen;
441 			ptr1 = bdata;	/* start of buffer */
442 			memcpy(ptr, ptr1, count);	/* rest */
443 		}
444 		bz->za[new_f2].z2 = cpu_to_le16(new_z2);
445 		bz->f2 = new_f2;	/* next buffer */
446 		recv_Bchannel(bch, MISDN_ID_ANY, false);
447 	}
448 }
449 
450 /*
451  * D-channel receive procedure
452  */
453 static int
receive_dmsg(struct hfc_pci * hc)454 receive_dmsg(struct hfc_pci *hc)
455 {
456 	struct dchannel	*dch = &hc->dch;
457 	int		maxlen;
458 	int		rcnt, total;
459 	int		count = 5;
460 	u_char		*ptr, *ptr1;
461 	struct dfifo	*df;
462 	struct zt	*zp;
463 
464 	df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
465 	while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
466 		zp = &df->za[df->f2 & D_FREG_MASK];
467 		rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
468 		if (rcnt < 0)
469 			rcnt += D_FIFO_SIZE;
470 		rcnt++;
471 		if (dch->debug & DEBUG_HW_DCHANNEL)
472 			printk(KERN_DEBUG
473 			       "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
474 			       df->f1, df->f2,
475 			       le16_to_cpu(zp->z1),
476 			       le16_to_cpu(zp->z2),
477 			       rcnt);
478 
479 		if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
480 		    (df->data[le16_to_cpu(zp->z1)])) {
481 			if (dch->debug & DEBUG_HW)
482 				printk(KERN_DEBUG
483 				       "empty_fifo hfcpci packet inv. len "
484 				       "%d or crc %d\n",
485 				       rcnt,
486 				       df->data[le16_to_cpu(zp->z1)]);
487 #ifdef ERROR_STATISTIC
488 			cs->err_rx++;
489 #endif
490 			df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
491 				(MAX_D_FRAMES + 1);	/* next buffer */
492 			df->za[df->f2 & D_FREG_MASK].z2 =
493 				cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
494 					    (D_FIFO_SIZE - 1));
495 		} else {
496 			dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
497 			if (!dch->rx_skb) {
498 				printk(KERN_WARNING
499 				       "HFC-PCI: D receive out of memory\n");
500 				break;
501 			}
502 			total = rcnt;
503 			rcnt -= 3;
504 			ptr = skb_put(dch->rx_skb, rcnt);
505 
506 			if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
507 				maxlen = rcnt;	/* complete transfer */
508 			else
509 				maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
510 			/* maximum */
511 
512 			ptr1 = df->data + le16_to_cpu(zp->z2);
513 			/* start of data */
514 			memcpy(ptr, ptr1, maxlen);	/* copy data */
515 			rcnt -= maxlen;
516 
517 			if (rcnt) {	/* rest remaining */
518 				ptr += maxlen;
519 				ptr1 = df->data;	/* start of buffer */
520 				memcpy(ptr, ptr1, rcnt);	/* rest */
521 			}
522 			df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
523 				(MAX_D_FRAMES + 1);	/* next buffer */
524 			df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
525 									      le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
526 			recv_Dchannel(dch);
527 		}
528 	}
529 	return 1;
530 }
531 
532 /*
533  * check for transparent receive data and read max one 'poll' size if avail
534  */
535 static void
hfcpci_empty_fifo_trans(struct bchannel * bch,struct bzfifo * rxbz,struct bzfifo * txbz,u_char * bdata)536 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
537 			struct bzfifo *txbz, u_char *bdata)
538 {
539 	__le16	*z1r, *z2r, *z1t, *z2t;
540 	int	new_z2, fcnt_rx, fcnt_tx, maxlen;
541 	u_char	*ptr, *ptr1;
542 
543 	z1r = &rxbz->za[MAX_B_FRAMES].z1;	/* pointer to z reg */
544 	z2r = z1r + 1;
545 	z1t = &txbz->za[MAX_B_FRAMES].z1;
546 	z2t = z1t + 1;
547 
548 	fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
549 	if (!fcnt_rx)
550 		return;	/* no data avail */
551 
552 	if (fcnt_rx <= 0)
553 		fcnt_rx += B_FIFO_SIZE;	/* bytes actually buffered */
554 	new_z2 = le16_to_cpu(*z2r) + fcnt_rx;	/* new position in fifo */
555 	if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
556 		new_z2 -= B_FIFO_SIZE;	/* buffer wrap */
557 
558 	fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
559 	if (fcnt_tx <= 0)
560 		fcnt_tx += B_FIFO_SIZE;
561 	/* fcnt_tx contains available bytes in tx-fifo */
562 	fcnt_tx = B_FIFO_SIZE - fcnt_tx;
563 	/* remaining bytes to send (bytes in tx-fifo) */
564 
565 	if (test_bit(FLG_RX_OFF, &bch->Flags)) {
566 		bch->dropcnt += fcnt_rx;
567 		*z2r = cpu_to_le16(new_z2);
568 		return;
569 	}
570 	maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
571 	if (maxlen < 0) {
572 		pr_warn("B%d: No bufferspace for %d bytes\n", bch->nr, fcnt_rx);
573 	} else {
574 		ptr = skb_put(bch->rx_skb, fcnt_rx);
575 		if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
576 			maxlen = fcnt_rx;	/* complete transfer */
577 		else
578 			maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
579 		/* maximum */
580 
581 		ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
582 		/* start of data */
583 		memcpy(ptr, ptr1, maxlen);	/* copy data */
584 		fcnt_rx -= maxlen;
585 
586 		if (fcnt_rx) {	/* rest remaining */
587 			ptr += maxlen;
588 			ptr1 = bdata;	/* start of buffer */
589 			memcpy(ptr, ptr1, fcnt_rx);	/* rest */
590 		}
591 		recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
592 	}
593 	*z2r = cpu_to_le16(new_z2);		/* new position */
594 }
595 
596 /*
597  * B-channel main receive routine
598  */
599 static void
main_rec_hfcpci(struct bchannel * bch)600 main_rec_hfcpci(struct bchannel *bch)
601 {
602 	struct hfc_pci	*hc = bch->hw;
603 	int		rcnt, real_fifo;
604 	int		receive = 0, count = 5;
605 	struct bzfifo	*txbz, *rxbz;
606 	u_char		*bdata;
607 	struct zt	*zp;
608 
609 	if ((bch->nr & 2) && (!hc->hw.bswapped)) {
610 		rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
611 		txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
612 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
613 		real_fifo = 1;
614 	} else {
615 		rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
616 		txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
617 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
618 		real_fifo = 0;
619 	}
620 Begin:
621 	count--;
622 	if (rxbz->f1 != rxbz->f2) {
623 		if (bch->debug & DEBUG_HW_BCHANNEL)
624 			printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
625 			       bch->nr, rxbz->f1, rxbz->f2);
626 		zp = &rxbz->za[rxbz->f2];
627 
628 		rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
629 		if (rcnt < 0)
630 			rcnt += B_FIFO_SIZE;
631 		rcnt++;
632 		if (bch->debug & DEBUG_HW_BCHANNEL)
633 			printk(KERN_DEBUG
634 			       "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
635 			       bch->nr, le16_to_cpu(zp->z1),
636 			       le16_to_cpu(zp->z2), rcnt);
637 		hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
638 		rcnt = rxbz->f1 - rxbz->f2;
639 		if (rcnt < 0)
640 			rcnt += MAX_B_FRAMES + 1;
641 		if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
642 			rcnt = 0;
643 			hfcpci_clear_fifo_rx(hc, real_fifo);
644 		}
645 		hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
646 		if (rcnt > 1)
647 			receive = 1;
648 		else
649 			receive = 0;
650 	} else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
651 		hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
652 		return;
653 	} else
654 		receive = 0;
655 	if (count && receive)
656 		goto Begin;
657 
658 }
659 
660 /*
661  * D-channel send routine
662  */
663 static void
hfcpci_fill_dfifo(struct hfc_pci * hc)664 hfcpci_fill_dfifo(struct hfc_pci *hc)
665 {
666 	struct dchannel	*dch = &hc->dch;
667 	int		fcnt;
668 	int		count, new_z1, maxlen;
669 	struct dfifo	*df;
670 	u_char		*src, *dst, new_f1;
671 
672 	if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
673 		printk(KERN_DEBUG "%s\n", __func__);
674 
675 	if (!dch->tx_skb)
676 		return;
677 	count = dch->tx_skb->len - dch->tx_idx;
678 	if (count <= 0)
679 		return;
680 	df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
681 
682 	if (dch->debug & DEBUG_HW_DFIFO)
683 		printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
684 		       df->f1, df->f2,
685 		       le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
686 	fcnt = df->f1 - df->f2;	/* frame count actually buffered */
687 	if (fcnt < 0)
688 		fcnt += (MAX_D_FRAMES + 1);	/* if wrap around */
689 	if (fcnt > (MAX_D_FRAMES - 1)) {
690 		if (dch->debug & DEBUG_HW_DCHANNEL)
691 			printk(KERN_DEBUG
692 			       "hfcpci_fill_Dfifo more as 14 frames\n");
693 #ifdef ERROR_STATISTIC
694 		cs->err_tx++;
695 #endif
696 		return;
697 	}
698 	/* now determine free bytes in FIFO buffer */
699 	maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
700 		le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
701 	if (maxlen <= 0)
702 		maxlen += D_FIFO_SIZE;	/* count now contains available bytes */
703 
704 	if (dch->debug & DEBUG_HW_DCHANNEL)
705 		printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
706 		       count, maxlen);
707 	if (count > maxlen) {
708 		if (dch->debug & DEBUG_HW_DCHANNEL)
709 			printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
710 		return;
711 	}
712 	new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
713 		(D_FIFO_SIZE - 1);
714 	new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
715 	src = dch->tx_skb->data + dch->tx_idx;	/* source pointer */
716 	dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
717 	maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
718 	/* end fifo */
719 	if (maxlen > count)
720 		maxlen = count;	/* limit size */
721 	memcpy(dst, src, maxlen);	/* first copy */
722 
723 	count -= maxlen;	/* remaining bytes */
724 	if (count) {
725 		dst = df->data;	/* start of buffer */
726 		src += maxlen;	/* new position */
727 		memcpy(dst, src, count);
728 	}
729 	df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
730 	/* for next buffer */
731 	df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
732 	/* new pos actual buffer */
733 	df->f1 = new_f1;	/* next frame */
734 	dch->tx_idx = dch->tx_skb->len;
735 }
736 
737 /*
738  * B-channel send routine
739  */
740 static void
hfcpci_fill_fifo(struct bchannel * bch)741 hfcpci_fill_fifo(struct bchannel *bch)
742 {
743 	struct hfc_pci	*hc = bch->hw;
744 	int		maxlen, fcnt;
745 	int		count, new_z1;
746 	struct bzfifo	*bz;
747 	u_char		*bdata;
748 	u_char		new_f1, *src, *dst;
749 	__le16 *z1t, *z2t;
750 
751 	if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
752 		printk(KERN_DEBUG "%s\n", __func__);
753 	if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
754 		if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
755 		    !test_bit(FLG_TRANSPARENT, &bch->Flags))
756 			return;
757 		count = HFCPCI_FILLEMPTY;
758 	} else {
759 		count = bch->tx_skb->len - bch->tx_idx;
760 	}
761 	if ((bch->nr & 2) && (!hc->hw.bswapped)) {
762 		bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
763 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
764 	} else {
765 		bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
766 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
767 	}
768 
769 	if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
770 		z1t = &bz->za[MAX_B_FRAMES].z1;
771 		z2t = z1t + 1;
772 		if (bch->debug & DEBUG_HW_BCHANNEL)
773 			printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
774 			       "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
775 			       le16_to_cpu(*z1t), le16_to_cpu(*z2t));
776 		fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
777 		if (fcnt <= 0)
778 			fcnt += B_FIFO_SIZE;
779 		if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
780 			/* fcnt contains available bytes in fifo */
781 			if (count > fcnt)
782 				count = fcnt;
783 			new_z1 = le16_to_cpu(*z1t) + count;
784 			/* new buffer Position */
785 			if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
786 				new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
787 			dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
788 			maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
789 			/* end of fifo */
790 			if (bch->debug & DEBUG_HW_BFIFO)
791 				printk(KERN_DEBUG "hfcpci_FFt fillempty "
792 				       "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
793 				       fcnt, maxlen, new_z1, dst);
794 			if (maxlen > count)
795 				maxlen = count;		/* limit size */
796 			memset(dst, bch->fill[0], maxlen); /* first copy */
797 			count -= maxlen;		/* remaining bytes */
798 			if (count) {
799 				dst = bdata;		/* start of buffer */
800 				memset(dst, bch->fill[0], count);
801 			}
802 			*z1t = cpu_to_le16(new_z1);	/* now send data */
803 			return;
804 		}
805 		/* fcnt contains available bytes in fifo */
806 		fcnt = B_FIFO_SIZE - fcnt;
807 		/* remaining bytes to send (bytes in fifo) */
808 
809 	next_t_frame:
810 		count = bch->tx_skb->len - bch->tx_idx;
811 		/* maximum fill shall be poll*2 */
812 		if (count > (poll << 1) - fcnt)
813 			count = (poll << 1) - fcnt;
814 		if (count <= 0)
815 			return;
816 		/* data is suitable for fifo */
817 		new_z1 = le16_to_cpu(*z1t) + count;
818 		/* new buffer Position */
819 		if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
820 			new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
821 		src = bch->tx_skb->data + bch->tx_idx;
822 		/* source pointer */
823 		dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
824 		maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
825 		/* end of fifo */
826 		if (bch->debug & DEBUG_HW_BFIFO)
827 			printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
828 			       "maxl(%d) nz1(%x) dst(%p)\n",
829 			       fcnt, maxlen, new_z1, dst);
830 		fcnt += count;
831 		bch->tx_idx += count;
832 		if (maxlen > count)
833 			maxlen = count;		/* limit size */
834 		memcpy(dst, src, maxlen);	/* first copy */
835 		count -= maxlen;	/* remaining bytes */
836 		if (count) {
837 			dst = bdata;	/* start of buffer */
838 			src += maxlen;	/* new position */
839 			memcpy(dst, src, count);
840 		}
841 		*z1t = cpu_to_le16(new_z1);	/* now send data */
842 		if (bch->tx_idx < bch->tx_skb->len)
843 			return;
844 		dev_kfree_skb_any(bch->tx_skb);
845 		if (get_next_bframe(bch))
846 			goto next_t_frame;
847 		return;
848 	}
849 	if (bch->debug & DEBUG_HW_BCHANNEL)
850 		printk(KERN_DEBUG
851 		       "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
852 		       __func__, bch->nr, bz->f1, bz->f2,
853 		       bz->za[bz->f1].z1);
854 	fcnt = bz->f1 - bz->f2;	/* frame count actually buffered */
855 	if (fcnt < 0)
856 		fcnt += (MAX_B_FRAMES + 1);	/* if wrap around */
857 	if (fcnt > (MAX_B_FRAMES - 1)) {
858 		if (bch->debug & DEBUG_HW_BCHANNEL)
859 			printk(KERN_DEBUG
860 			       "hfcpci_fill_Bfifo more as 14 frames\n");
861 		return;
862 	}
863 	/* now determine free bytes in FIFO buffer */
864 	maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
865 		le16_to_cpu(bz->za[bz->f1].z1) - 1;
866 	if (maxlen <= 0)
867 		maxlen += B_FIFO_SIZE;	/* count now contains available bytes */
868 
869 	if (bch->debug & DEBUG_HW_BCHANNEL)
870 		printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
871 		       bch->nr, count, maxlen);
872 
873 	if (maxlen < count) {
874 		if (bch->debug & DEBUG_HW_BCHANNEL)
875 			printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
876 		return;
877 	}
878 	new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
879 	/* new buffer Position */
880 	if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
881 		new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
882 
883 	new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
884 	src = bch->tx_skb->data + bch->tx_idx;	/* source pointer */
885 	dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
886 	maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
887 	/* end fifo */
888 	if (maxlen > count)
889 		maxlen = count;	/* limit size */
890 	memcpy(dst, src, maxlen);	/* first copy */
891 
892 	count -= maxlen;	/* remaining bytes */
893 	if (count) {
894 		dst = bdata;	/* start of buffer */
895 		src += maxlen;	/* new position */
896 		memcpy(dst, src, count);
897 	}
898 	bz->za[new_f1].z1 = cpu_to_le16(new_z1);	/* for next buffer */
899 	bz->f1 = new_f1;	/* next frame */
900 	dev_kfree_skb_any(bch->tx_skb);
901 	get_next_bframe(bch);
902 }
903 
904 
905 
906 /*
907  * handle L1 state changes TE
908  */
909 
910 static void
ph_state_te(struct dchannel * dch)911 ph_state_te(struct dchannel *dch)
912 {
913 	if (dch->debug)
914 		printk(KERN_DEBUG "%s: TE newstate %x\n",
915 		       __func__, dch->state);
916 	switch (dch->state) {
917 	case 0:
918 		l1_event(dch->l1, HW_RESET_IND);
919 		break;
920 	case 3:
921 		l1_event(dch->l1, HW_DEACT_IND);
922 		break;
923 	case 5:
924 	case 8:
925 		l1_event(dch->l1, ANYSIGNAL);
926 		break;
927 	case 6:
928 		l1_event(dch->l1, INFO2);
929 		break;
930 	case 7:
931 		l1_event(dch->l1, INFO4_P8);
932 		break;
933 	}
934 }
935 
936 /*
937  * handle L1 state changes NT
938  */
939 
940 static void
handle_nt_timer3(struct dchannel * dch)941 handle_nt_timer3(struct dchannel *dch) {
942 	struct hfc_pci	*hc = dch->hw;
943 
944 	test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
945 	hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
946 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
947 	hc->hw.nt_timer = 0;
948 	test_and_set_bit(FLG_ACTIVE, &dch->Flags);
949 	if (test_bit(HFC_CFG_MASTER, &hc->cfg))
950 		hc->hw.mst_m |= HFCPCI_MASTER;
951 	Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
952 	_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
953 		    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
954 }
955 
956 static void
ph_state_nt(struct dchannel * dch)957 ph_state_nt(struct dchannel *dch)
958 {
959 	struct hfc_pci	*hc = dch->hw;
960 
961 	if (dch->debug)
962 		printk(KERN_DEBUG "%s: NT newstate %x\n",
963 		       __func__, dch->state);
964 	switch (dch->state) {
965 	case 2:
966 		if (hc->hw.nt_timer < 0) {
967 			hc->hw.nt_timer = 0;
968 			test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
969 			test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
970 			hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
971 			Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
972 			/* Clear already pending ints */
973 			(void) Read_hfc(hc, HFCPCI_INT_S1);
974 			Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
975 			udelay(10);
976 			Write_hfc(hc, HFCPCI_STATES, 4);
977 			dch->state = 4;
978 		} else if (hc->hw.nt_timer == 0) {
979 			hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
980 			Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
981 			hc->hw.nt_timer = NT_T1_COUNT;
982 			hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
983 			hc->hw.ctmt |= HFCPCI_TIM3_125;
984 			Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
985 				  HFCPCI_CLTIMER);
986 			test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
987 			test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
988 			/* allow G2 -> G3 transition */
989 			Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
990 		} else {
991 			Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
992 		}
993 		break;
994 	case 1:
995 		hc->hw.nt_timer = 0;
996 		test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
997 		test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
998 		hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
999 		Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1000 		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1001 		hc->hw.mst_m &= ~HFCPCI_MASTER;
1002 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1003 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1004 		_queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
1005 			    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1006 		break;
1007 	case 4:
1008 		hc->hw.nt_timer = 0;
1009 		test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1010 		test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1011 		hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1012 		Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1013 		break;
1014 	case 3:
1015 		if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
1016 			if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1017 						&dch->Flags)) {
1018 				handle_nt_timer3(dch);
1019 				break;
1020 			}
1021 			test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1022 			hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1023 			Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1024 			hc->hw.nt_timer = NT_T3_COUNT;
1025 			hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1026 			hc->hw.ctmt |= HFCPCI_TIM3_125;
1027 			Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1028 				  HFCPCI_CLTIMER);
1029 		}
1030 		break;
1031 	}
1032 }
1033 
1034 static void
ph_state(struct dchannel * dch)1035 ph_state(struct dchannel *dch)
1036 {
1037 	struct hfc_pci	*hc = dch->hw;
1038 
1039 	if (hc->hw.protocol == ISDN_P_NT_S0) {
1040 		if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1041 		    hc->hw.nt_timer < 0)
1042 			handle_nt_timer3(dch);
1043 		else
1044 			ph_state_nt(dch);
1045 	} else
1046 		ph_state_te(dch);
1047 }
1048 
1049 /*
1050  * Layer 1 callback function
1051  */
1052 static int
hfc_l1callback(struct dchannel * dch,u_int cmd)1053 hfc_l1callback(struct dchannel *dch, u_int cmd)
1054 {
1055 	struct hfc_pci		*hc = dch->hw;
1056 
1057 	switch (cmd) {
1058 	case INFO3_P8:
1059 	case INFO3_P10:
1060 		if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1061 			hc->hw.mst_m |= HFCPCI_MASTER;
1062 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1063 		break;
1064 	case HW_RESET_REQ:
1065 		Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1066 		/* HFC ST 3 */
1067 		udelay(6);
1068 		Write_hfc(hc, HFCPCI_STATES, 3);	/* HFC ST 2 */
1069 		if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1070 			hc->hw.mst_m |= HFCPCI_MASTER;
1071 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1072 		Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1073 			  HFCPCI_DO_ACTION);
1074 		l1_event(dch->l1, HW_POWERUP_IND);
1075 		break;
1076 	case HW_DEACT_REQ:
1077 		hc->hw.mst_m &= ~HFCPCI_MASTER;
1078 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1079 		skb_queue_purge(&dch->squeue);
1080 		if (dch->tx_skb) {
1081 			dev_kfree_skb(dch->tx_skb);
1082 			dch->tx_skb = NULL;
1083 		}
1084 		dch->tx_idx = 0;
1085 		if (dch->rx_skb) {
1086 			dev_kfree_skb(dch->rx_skb);
1087 			dch->rx_skb = NULL;
1088 		}
1089 		test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1090 		if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1091 			timer_delete(&dch->timer);
1092 		break;
1093 	case HW_POWERUP_REQ:
1094 		Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1095 		break;
1096 	case PH_ACTIVATE_IND:
1097 		test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1098 		_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1099 			    GFP_ATOMIC);
1100 		break;
1101 	case PH_DEACTIVATE_IND:
1102 		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1103 		_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1104 			    GFP_ATOMIC);
1105 		break;
1106 	default:
1107 		if (dch->debug & DEBUG_HW)
1108 			printk(KERN_DEBUG "%s: unknown command %x\n",
1109 			       __func__, cmd);
1110 		return -1;
1111 	}
1112 	return 0;
1113 }
1114 
1115 /*
1116  * Interrupt handler
1117  */
1118 static inline void
tx_birq(struct bchannel * bch)1119 tx_birq(struct bchannel *bch)
1120 {
1121 	if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1122 		hfcpci_fill_fifo(bch);
1123 	else {
1124 		dev_kfree_skb_any(bch->tx_skb);
1125 		if (get_next_bframe(bch))
1126 			hfcpci_fill_fifo(bch);
1127 	}
1128 }
1129 
1130 static inline void
tx_dirq(struct dchannel * dch)1131 tx_dirq(struct dchannel *dch)
1132 {
1133 	if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1134 		hfcpci_fill_dfifo(dch->hw);
1135 	else {
1136 		dev_kfree_skb(dch->tx_skb);
1137 		if (get_next_dframe(dch))
1138 			hfcpci_fill_dfifo(dch->hw);
1139 	}
1140 }
1141 
1142 static irqreturn_t
hfcpci_int(int intno,void * dev_id)1143 hfcpci_int(int intno, void *dev_id)
1144 {
1145 	struct hfc_pci	*hc = dev_id;
1146 	u_char		exval;
1147 	struct bchannel	*bch;
1148 	u_char		val, stat;
1149 
1150 	spin_lock(&hc->lock);
1151 	if (!(hc->hw.int_m2 & 0x08)) {
1152 		spin_unlock(&hc->lock);
1153 		return IRQ_NONE; /* not initialised */
1154 	}
1155 	stat = Read_hfc(hc, HFCPCI_STATUS);
1156 	if (HFCPCI_ANYINT & stat) {
1157 		val = Read_hfc(hc, HFCPCI_INT_S1);
1158 		if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1159 			printk(KERN_DEBUG
1160 			       "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1161 	} else {
1162 		/* shared */
1163 		spin_unlock(&hc->lock);
1164 		return IRQ_NONE;
1165 	}
1166 	hc->irqcnt++;
1167 
1168 	if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1169 		printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1170 	val &= hc->hw.int_m1;
1171 	if (val & 0x40) {	/* state machine irq */
1172 		exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1173 		if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1174 			printk(KERN_DEBUG "ph_state chg %d->%d\n",
1175 			       hc->dch.state, exval);
1176 		hc->dch.state = exval;
1177 		schedule_event(&hc->dch, FLG_PHCHANGE);
1178 		val &= ~0x40;
1179 	}
1180 	if (val & 0x80) {	/* timer irq */
1181 		if (hc->hw.protocol == ISDN_P_NT_S0) {
1182 			if ((--hc->hw.nt_timer) < 0)
1183 				schedule_event(&hc->dch, FLG_PHCHANGE);
1184 		}
1185 		val &= ~0x80;
1186 		Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1187 	}
1188 	if (val & 0x08) {	/* B1 rx */
1189 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1190 		if (bch)
1191 			main_rec_hfcpci(bch);
1192 		else if (hc->dch.debug)
1193 			printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1194 	}
1195 	if (val & 0x10) {	/* B2 rx */
1196 		bch = Sel_BCS(hc, 2);
1197 		if (bch)
1198 			main_rec_hfcpci(bch);
1199 		else if (hc->dch.debug)
1200 			printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1201 	}
1202 	if (val & 0x01) {	/* B1 tx */
1203 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1204 		if (bch)
1205 			tx_birq(bch);
1206 		else if (hc->dch.debug)
1207 			printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1208 	}
1209 	if (val & 0x02) {	/* B2 tx */
1210 		bch = Sel_BCS(hc, 2);
1211 		if (bch)
1212 			tx_birq(bch);
1213 		else if (hc->dch.debug)
1214 			printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1215 	}
1216 	if (val & 0x20)		/* D rx */
1217 		receive_dmsg(hc);
1218 	if (val & 0x04) {	/* D tx */
1219 		if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1220 			timer_delete(&hc->dch.timer);
1221 		tx_dirq(&hc->dch);
1222 	}
1223 	spin_unlock(&hc->lock);
1224 	return IRQ_HANDLED;
1225 }
1226 
1227 /*
1228  * timer callback for D-chan busy resolution. Currently no function
1229  */
1230 static void
hfcpci_dbusy_timer(struct timer_list * t)1231 hfcpci_dbusy_timer(struct timer_list *t)
1232 {
1233 }
1234 
1235 /*
1236  * activate/deactivate hardware for selected channels and mode
1237  */
1238 static int
mode_hfcpci(struct bchannel * bch,int bc,int protocol)1239 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1240 {
1241 	struct hfc_pci	*hc = bch->hw;
1242 	int		fifo2;
1243 	u_char		rx_slot = 0, tx_slot = 0, pcm_mode;
1244 
1245 	if (bch->debug & DEBUG_HW_BCHANNEL)
1246 		printk(KERN_DEBUG
1247 		       "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1248 		       bch->state, protocol, bch->nr, bc);
1249 
1250 	fifo2 = bc;
1251 	pcm_mode = (bc >> 24) & 0xff;
1252 	if (pcm_mode) { /* PCM SLOT USE */
1253 		if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1254 			printk(KERN_WARNING
1255 			       "%s: pcm channel id without HFC_CFG_PCM\n",
1256 			       __func__);
1257 		rx_slot = (bc >> 8) & 0xff;
1258 		tx_slot = (bc >> 16) & 0xff;
1259 		bc = bc & 0xff;
1260 	} else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
1261 		printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1262 		       __func__);
1263 	if (hc->chanlimit > 1) {
1264 		hc->hw.bswapped = 0;	/* B1 and B2 normal mode */
1265 		hc->hw.sctrl_e &= ~0x80;
1266 	} else {
1267 		if (bc & 2) {
1268 			if (protocol != ISDN_P_NONE) {
1269 				hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1270 				hc->hw.sctrl_e |= 0x80;
1271 			} else {
1272 				hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1273 				hc->hw.sctrl_e &= ~0x80;
1274 			}
1275 			fifo2 = 1;
1276 		} else {
1277 			hc->hw.bswapped = 0;	/* B1 and B2 normal mode */
1278 			hc->hw.sctrl_e &= ~0x80;
1279 		}
1280 	}
1281 	switch (protocol) {
1282 	case (-1): /* used for init */
1283 		bch->state = -1;
1284 		bch->nr = bc;
1285 		fallthrough;
1286 	case (ISDN_P_NONE):
1287 		if (bch->state == ISDN_P_NONE)
1288 			return 0;
1289 		if (bc & 2) {
1290 			hc->hw.sctrl &= ~SCTRL_B2_ENA;
1291 			hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1292 		} else {
1293 			hc->hw.sctrl &= ~SCTRL_B1_ENA;
1294 			hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1295 		}
1296 		if (fifo2 & 2) {
1297 			hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1298 			hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
1299 					   HFCPCI_INTS_B2REC);
1300 		} else {
1301 			hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1302 			hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
1303 					   HFCPCI_INTS_B1REC);
1304 		}
1305 #ifdef REVERSE_BITORDER
1306 		if (bch->nr & 2)
1307 			hc->hw.cirm &= 0x7f;
1308 		else
1309 			hc->hw.cirm &= 0xbf;
1310 #endif
1311 		bch->state = ISDN_P_NONE;
1312 		bch->nr = bc;
1313 		test_and_clear_bit(FLG_HDLC, &bch->Flags);
1314 		test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1315 		break;
1316 	case (ISDN_P_B_RAW):
1317 		bch->state = protocol;
1318 		bch->nr = bc;
1319 		hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1320 		hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1321 		if (bc & 2) {
1322 			hc->hw.sctrl |= SCTRL_B2_ENA;
1323 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1324 #ifdef REVERSE_BITORDER
1325 			hc->hw.cirm |= 0x80;
1326 #endif
1327 		} else {
1328 			hc->hw.sctrl |= SCTRL_B1_ENA;
1329 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1330 #ifdef REVERSE_BITORDER
1331 			hc->hw.cirm |= 0x40;
1332 #endif
1333 		}
1334 		if (fifo2 & 2) {
1335 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1336 			if (!tics)
1337 				hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1338 						  HFCPCI_INTS_B2REC);
1339 			hc->hw.ctmt |= 2;
1340 			hc->hw.conn &= ~0x18;
1341 		} else {
1342 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1343 			if (!tics)
1344 				hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1345 						  HFCPCI_INTS_B1REC);
1346 			hc->hw.ctmt |= 1;
1347 			hc->hw.conn &= ~0x03;
1348 		}
1349 		test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1350 		break;
1351 	case (ISDN_P_B_HDLC):
1352 		bch->state = protocol;
1353 		bch->nr = bc;
1354 		hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1355 		hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1356 		if (bc & 2) {
1357 			hc->hw.sctrl |= SCTRL_B2_ENA;
1358 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1359 		} else {
1360 			hc->hw.sctrl |= SCTRL_B1_ENA;
1361 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1362 		}
1363 		if (fifo2 & 2) {
1364 			hc->hw.last_bfifo_cnt[1] = 0;
1365 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1366 			hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
1367 					  HFCPCI_INTS_B2REC);
1368 			hc->hw.ctmt &= ~2;
1369 			hc->hw.conn &= ~0x18;
1370 		} else {
1371 			hc->hw.last_bfifo_cnt[0] = 0;
1372 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1373 			hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
1374 					  HFCPCI_INTS_B1REC);
1375 			hc->hw.ctmt &= ~1;
1376 			hc->hw.conn &= ~0x03;
1377 		}
1378 		test_and_set_bit(FLG_HDLC, &bch->Flags);
1379 		break;
1380 	default:
1381 		printk(KERN_DEBUG "prot not known %x\n", protocol);
1382 		return -ENOPROTOOPT;
1383 	}
1384 	if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1385 		if ((protocol == ISDN_P_NONE) ||
1386 		    (protocol == -1)) {	/* init case */
1387 			rx_slot = 0;
1388 			tx_slot = 0;
1389 		} else {
1390 			if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1391 				rx_slot |= 0xC0;
1392 				tx_slot |= 0xC0;
1393 			} else {
1394 				rx_slot |= 0x80;
1395 				tx_slot |= 0x80;
1396 			}
1397 		}
1398 		if (bc & 2) {
1399 			hc->hw.conn &= 0xc7;
1400 			hc->hw.conn |= 0x08;
1401 			printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1402 			       __func__, tx_slot);
1403 			printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1404 			       __func__, rx_slot);
1405 			Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1406 			Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1407 		} else {
1408 			hc->hw.conn &= 0xf8;
1409 			hc->hw.conn |= 0x01;
1410 			printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1411 			       __func__, tx_slot);
1412 			printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1413 			       __func__, rx_slot);
1414 			Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1415 			Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1416 		}
1417 	}
1418 	Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1419 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1420 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1421 	Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1422 	Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1423 	Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1424 	Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1425 #ifdef REVERSE_BITORDER
1426 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1427 #endif
1428 	return 0;
1429 }
1430 
1431 static int
set_hfcpci_rxtest(struct bchannel * bch,int protocol,int chan)1432 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1433 {
1434 	struct hfc_pci	*hc = bch->hw;
1435 
1436 	if (bch->debug & DEBUG_HW_BCHANNEL)
1437 		printk(KERN_DEBUG
1438 		       "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1439 		       bch->state, protocol, bch->nr, chan);
1440 	if (bch->nr != chan) {
1441 		printk(KERN_DEBUG
1442 		       "HFCPCI rxtest wrong channel parameter %x/%x\n",
1443 		       bch->nr, chan);
1444 		return -EINVAL;
1445 	}
1446 	switch (protocol) {
1447 	case (ISDN_P_B_RAW):
1448 		bch->state = protocol;
1449 		hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1450 		if (chan & 2) {
1451 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1452 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1453 			if (!tics)
1454 				hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1455 			hc->hw.ctmt |= 2;
1456 			hc->hw.conn &= ~0x18;
1457 #ifdef REVERSE_BITORDER
1458 			hc->hw.cirm |= 0x80;
1459 #endif
1460 		} else {
1461 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1462 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1463 			if (!tics)
1464 				hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1465 			hc->hw.ctmt |= 1;
1466 			hc->hw.conn &= ~0x03;
1467 #ifdef REVERSE_BITORDER
1468 			hc->hw.cirm |= 0x40;
1469 #endif
1470 		}
1471 		break;
1472 	case (ISDN_P_B_HDLC):
1473 		bch->state = protocol;
1474 		hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1475 		if (chan & 2) {
1476 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
1477 			hc->hw.last_bfifo_cnt[1] = 0;
1478 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1479 			hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1480 			hc->hw.ctmt &= ~2;
1481 			hc->hw.conn &= ~0x18;
1482 		} else {
1483 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
1484 			hc->hw.last_bfifo_cnt[0] = 0;
1485 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1486 			hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1487 			hc->hw.ctmt &= ~1;
1488 			hc->hw.conn &= ~0x03;
1489 		}
1490 		break;
1491 	default:
1492 		printk(KERN_DEBUG "prot not known %x\n", protocol);
1493 		return -ENOPROTOOPT;
1494 	}
1495 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1496 	Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1497 	Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1498 	Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1499 	Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1500 #ifdef REVERSE_BITORDER
1501 	Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1502 #endif
1503 	return 0;
1504 }
1505 
1506 static void
deactivate_bchannel(struct bchannel * bch)1507 deactivate_bchannel(struct bchannel *bch)
1508 {
1509 	struct hfc_pci	*hc = bch->hw;
1510 	u_long		flags;
1511 
1512 	spin_lock_irqsave(&hc->lock, flags);
1513 	mISDN_clear_bchannel(bch);
1514 	mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1515 	spin_unlock_irqrestore(&hc->lock, flags);
1516 }
1517 
1518 /*
1519  * Layer 1 B-channel hardware access
1520  */
1521 static int
channel_bctrl(struct bchannel * bch,struct mISDN_ctrl_req * cq)1522 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1523 {
1524 	return mISDN_ctrl_bchannel(bch, cq);
1525 }
1526 static int
hfc_bctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1527 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1528 {
1529 	struct bchannel	*bch = container_of(ch, struct bchannel, ch);
1530 	struct hfc_pci	*hc = bch->hw;
1531 	int		ret = -EINVAL;
1532 	u_long		flags;
1533 
1534 	if (bch->debug & DEBUG_HW)
1535 		printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1536 	switch (cmd) {
1537 	case HW_TESTRX_RAW:
1538 		spin_lock_irqsave(&hc->lock, flags);
1539 		ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1540 		spin_unlock_irqrestore(&hc->lock, flags);
1541 		break;
1542 	case HW_TESTRX_HDLC:
1543 		spin_lock_irqsave(&hc->lock, flags);
1544 		ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1545 		spin_unlock_irqrestore(&hc->lock, flags);
1546 		break;
1547 	case HW_TESTRX_OFF:
1548 		spin_lock_irqsave(&hc->lock, flags);
1549 		mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1550 		spin_unlock_irqrestore(&hc->lock, flags);
1551 		ret = 0;
1552 		break;
1553 	case CLOSE_CHANNEL:
1554 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
1555 		deactivate_bchannel(bch);
1556 		ch->protocol = ISDN_P_NONE;
1557 		ch->peer = NULL;
1558 		module_put(THIS_MODULE);
1559 		ret = 0;
1560 		break;
1561 	case CONTROL_CHANNEL:
1562 		ret = channel_bctrl(bch, arg);
1563 		break;
1564 	default:
1565 		printk(KERN_WARNING "%s: unknown prim(%x)\n",
1566 		       __func__, cmd);
1567 	}
1568 	return ret;
1569 }
1570 
1571 /*
1572  * Layer2 -> Layer 1 Dchannel data
1573  */
1574 static int
hfcpci_l2l1D(struct mISDNchannel * ch,struct sk_buff * skb)1575 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1576 {
1577 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
1578 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
1579 	struct hfc_pci		*hc = dch->hw;
1580 	int			ret = -EINVAL;
1581 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
1582 	unsigned int		id;
1583 	u_long			flags;
1584 
1585 	switch (hh->prim) {
1586 	case PH_DATA_REQ:
1587 		spin_lock_irqsave(&hc->lock, flags);
1588 		ret = dchannel_senddata(dch, skb);
1589 		if (ret > 0) { /* direct TX */
1590 			id = hh->id; /* skb can be freed */
1591 			hfcpci_fill_dfifo(dch->hw);
1592 			ret = 0;
1593 			spin_unlock_irqrestore(&hc->lock, flags);
1594 			queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1595 		} else
1596 			spin_unlock_irqrestore(&hc->lock, flags);
1597 		return ret;
1598 	case PH_ACTIVATE_REQ:
1599 		spin_lock_irqsave(&hc->lock, flags);
1600 		if (hc->hw.protocol == ISDN_P_NT_S0) {
1601 			ret = 0;
1602 			if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1603 				hc->hw.mst_m |= HFCPCI_MASTER;
1604 			Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1605 			if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1606 				spin_unlock_irqrestore(&hc->lock, flags);
1607 				_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1608 					    MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1609 				break;
1610 			}
1611 			test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1612 			Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1613 				  HFCPCI_DO_ACTION | 1);
1614 		} else
1615 			ret = l1_event(dch->l1, hh->prim);
1616 		spin_unlock_irqrestore(&hc->lock, flags);
1617 		break;
1618 	case PH_DEACTIVATE_REQ:
1619 		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1620 		spin_lock_irqsave(&hc->lock, flags);
1621 		if (hc->hw.protocol == ISDN_P_NT_S0) {
1622 			struct sk_buff_head free_queue;
1623 
1624 			__skb_queue_head_init(&free_queue);
1625 			/* prepare deactivation */
1626 			Write_hfc(hc, HFCPCI_STATES, 0x40);
1627 			skb_queue_splice_init(&dch->squeue, &free_queue);
1628 			if (dch->tx_skb) {
1629 				__skb_queue_tail(&free_queue, dch->tx_skb);
1630 				dch->tx_skb = NULL;
1631 			}
1632 			dch->tx_idx = 0;
1633 			if (dch->rx_skb) {
1634 				__skb_queue_tail(&free_queue, dch->rx_skb);
1635 				dch->rx_skb = NULL;
1636 			}
1637 			test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1638 			if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1639 				timer_delete(&dch->timer);
1640 #ifdef FIXME
1641 			if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1642 				dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1643 #endif
1644 			hc->hw.mst_m &= ~HFCPCI_MASTER;
1645 			Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1646 			ret = 0;
1647 			spin_unlock_irqrestore(&hc->lock, flags);
1648 			__skb_queue_purge(&free_queue);
1649 		} else {
1650 			ret = l1_event(dch->l1, hh->prim);
1651 			spin_unlock_irqrestore(&hc->lock, flags);
1652 		}
1653 		break;
1654 	}
1655 	if (!ret)
1656 		dev_kfree_skb(skb);
1657 	return ret;
1658 }
1659 
1660 /*
1661  * Layer2 -> Layer 1 Bchannel data
1662  */
1663 static int
hfcpci_l2l1B(struct mISDNchannel * ch,struct sk_buff * skb)1664 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1665 {
1666 	struct bchannel		*bch = container_of(ch, struct bchannel, ch);
1667 	struct hfc_pci		*hc = bch->hw;
1668 	int			ret = -EINVAL;
1669 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
1670 	unsigned long		flags;
1671 
1672 	switch (hh->prim) {
1673 	case PH_DATA_REQ:
1674 		spin_lock_irqsave(&hc->lock, flags);
1675 		ret = bchannel_senddata(bch, skb);
1676 		if (ret > 0) { /* direct TX */
1677 			hfcpci_fill_fifo(bch);
1678 			ret = 0;
1679 		}
1680 		spin_unlock_irqrestore(&hc->lock, flags);
1681 		return ret;
1682 	case PH_ACTIVATE_REQ:
1683 		spin_lock_irqsave(&hc->lock, flags);
1684 		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1685 			ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1686 		else
1687 			ret = 0;
1688 		spin_unlock_irqrestore(&hc->lock, flags);
1689 		if (!ret)
1690 			_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1691 				    NULL, GFP_KERNEL);
1692 		break;
1693 	case PH_DEACTIVATE_REQ:
1694 		deactivate_bchannel(bch);
1695 		_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1696 			    NULL, GFP_KERNEL);
1697 		ret = 0;
1698 		break;
1699 	}
1700 	if (!ret)
1701 		dev_kfree_skb(skb);
1702 	return ret;
1703 }
1704 
1705 /*
1706  * called for card init message
1707  */
1708 
1709 static void
inithfcpci(struct hfc_pci * hc)1710 inithfcpci(struct hfc_pci *hc)
1711 {
1712 	printk(KERN_DEBUG "inithfcpci: entered\n");
1713 	timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
1714 	hc->chanlimit = 2;
1715 	mode_hfcpci(&hc->bch[0], 1, -1);
1716 	mode_hfcpci(&hc->bch[1], 2, -1);
1717 }
1718 
1719 
1720 static int
init_card(struct hfc_pci * hc)1721 init_card(struct hfc_pci *hc)
1722 {
1723 	int	cnt = 3;
1724 	u_long	flags;
1725 
1726 	printk(KERN_DEBUG "init_card: entered\n");
1727 
1728 
1729 	spin_lock_irqsave(&hc->lock, flags);
1730 	disable_hwirq(hc);
1731 	spin_unlock_irqrestore(&hc->lock, flags);
1732 	if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1733 		printk(KERN_WARNING
1734 		       "mISDN: couldn't get interrupt %d\n", hc->irq);
1735 		return -EIO;
1736 	}
1737 	spin_lock_irqsave(&hc->lock, flags);
1738 	reset_hfcpci(hc);
1739 	while (cnt) {
1740 		inithfcpci(hc);
1741 		/*
1742 		 * Finally enable IRQ output
1743 		 * this is only allowed, if an IRQ routine is already
1744 		 * established for this HFC, so don't do that earlier
1745 		 */
1746 		enable_hwirq(hc);
1747 		spin_unlock_irqrestore(&hc->lock, flags);
1748 		/* Timeout 80ms */
1749 		set_current_state(TASK_UNINTERRUPTIBLE);
1750 		schedule_timeout((80 * HZ) / 1000);
1751 		printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1752 		       hc->irq, hc->irqcnt);
1753 		/* now switch timer interrupt off */
1754 		spin_lock_irqsave(&hc->lock, flags);
1755 		hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1756 		Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1757 		/* reinit mode reg */
1758 		Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1759 		if (!hc->irqcnt) {
1760 			printk(KERN_WARNING
1761 			       "HFC PCI: IRQ(%d) getting no interrupts "
1762 			       "during init %d\n", hc->irq, 4 - cnt);
1763 			if (cnt == 1)
1764 				break;
1765 			else {
1766 				reset_hfcpci(hc);
1767 				cnt--;
1768 			}
1769 		} else {
1770 			spin_unlock_irqrestore(&hc->lock, flags);
1771 			hc->initdone = 1;
1772 			return 0;
1773 		}
1774 	}
1775 	disable_hwirq(hc);
1776 	spin_unlock_irqrestore(&hc->lock, flags);
1777 	free_irq(hc->irq, hc);
1778 	return -EIO;
1779 }
1780 
1781 static int
channel_ctrl(struct hfc_pci * hc,struct mISDN_ctrl_req * cq)1782 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1783 {
1784 	int	ret = 0;
1785 	u_char	slot;
1786 
1787 	switch (cq->op) {
1788 	case MISDN_CTRL_GETOP:
1789 		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1790 			 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1791 		break;
1792 	case MISDN_CTRL_LOOP:
1793 		/* channel 0 disabled loop */
1794 		if (cq->channel < 0 || cq->channel > 2) {
1795 			ret = -EINVAL;
1796 			break;
1797 		}
1798 		if (cq->channel & 1) {
1799 			if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1800 				slot = 0xC0;
1801 			else
1802 				slot = 0x80;
1803 			printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1804 			       __func__, slot);
1805 			Write_hfc(hc, HFCPCI_B1_SSL, slot);
1806 			Write_hfc(hc, HFCPCI_B1_RSL, slot);
1807 			hc->hw.conn = (hc->hw.conn & ~7) | 6;
1808 			Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1809 		}
1810 		if (cq->channel & 2) {
1811 			if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1812 				slot = 0xC1;
1813 			else
1814 				slot = 0x81;
1815 			printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1816 			       __func__, slot);
1817 			Write_hfc(hc, HFCPCI_B2_SSL, slot);
1818 			Write_hfc(hc, HFCPCI_B2_RSL, slot);
1819 			hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1820 			Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1821 		}
1822 		if (cq->channel & 3)
1823 			hc->hw.trm |= 0x80;	/* enable IOM-loop */
1824 		else {
1825 			hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1826 			Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1827 			hc->hw.trm &= 0x7f;	/* disable IOM-loop */
1828 		}
1829 		Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1830 		break;
1831 	case MISDN_CTRL_CONNECT:
1832 		if (cq->channel == cq->p1) {
1833 			ret = -EINVAL;
1834 			break;
1835 		}
1836 		if (cq->channel < 1 || cq->channel > 2 ||
1837 		    cq->p1 < 1 || cq->p1 > 2) {
1838 			ret = -EINVAL;
1839 			break;
1840 		}
1841 		if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1842 			slot = 0xC0;
1843 		else
1844 			slot = 0x80;
1845 		printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1846 		       __func__, slot);
1847 		Write_hfc(hc, HFCPCI_B1_SSL, slot);
1848 		Write_hfc(hc, HFCPCI_B2_RSL, slot);
1849 		if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1850 			slot = 0xC1;
1851 		else
1852 			slot = 0x81;
1853 		printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1854 		       __func__, slot);
1855 		Write_hfc(hc, HFCPCI_B2_SSL, slot);
1856 		Write_hfc(hc, HFCPCI_B1_RSL, slot);
1857 		hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1858 		Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1859 		hc->hw.trm |= 0x80;
1860 		Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1861 		break;
1862 	case MISDN_CTRL_DISCONNECT:
1863 		hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1864 		Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1865 		hc->hw.trm &= 0x7f;	/* disable IOM-loop */
1866 		break;
1867 	case MISDN_CTRL_L1_TIMER3:
1868 		ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1869 		break;
1870 	default:
1871 		printk(KERN_WARNING "%s: unknown Op %x\n",
1872 		       __func__, cq->op);
1873 		ret = -EINVAL;
1874 		break;
1875 	}
1876 	return ret;
1877 }
1878 
1879 static int
open_dchannel(struct hfc_pci * hc,struct mISDNchannel * ch,struct channel_req * rq)1880 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1881 	      struct channel_req *rq)
1882 {
1883 	int err = 0;
1884 
1885 	if (debug & DEBUG_HW_OPEN)
1886 		printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1887 		       hc->dch.dev.id, __builtin_return_address(0));
1888 	if (rq->protocol == ISDN_P_NONE)
1889 		return -EINVAL;
1890 	if (rq->adr.channel == 1) {
1891 		/* TODO: E-Channel */
1892 		return -EINVAL;
1893 	}
1894 	if (!hc->initdone) {
1895 		if (rq->protocol == ISDN_P_TE_S0) {
1896 			err = create_l1(&hc->dch, hfc_l1callback);
1897 			if (err)
1898 				return err;
1899 		}
1900 		hc->hw.protocol = rq->protocol;
1901 		ch->protocol = rq->protocol;
1902 		err = init_card(hc);
1903 		if (err)
1904 			return err;
1905 	} else {
1906 		if (rq->protocol != ch->protocol) {
1907 			if (hc->hw.protocol == ISDN_P_TE_S0)
1908 				l1_event(hc->dch.l1, CLOSE_CHANNEL);
1909 			if (rq->protocol == ISDN_P_TE_S0) {
1910 				err = create_l1(&hc->dch, hfc_l1callback);
1911 				if (err)
1912 					return err;
1913 			}
1914 			hc->hw.protocol = rq->protocol;
1915 			ch->protocol = rq->protocol;
1916 			hfcpci_setmode(hc);
1917 		}
1918 	}
1919 
1920 	if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1921 	    ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1922 		_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1923 			    0, NULL, GFP_KERNEL);
1924 	}
1925 	rq->ch = ch;
1926 	if (!try_module_get(THIS_MODULE))
1927 		printk(KERN_WARNING "%s:cannot get module\n", __func__);
1928 	return 0;
1929 }
1930 
1931 static int
open_bchannel(struct hfc_pci * hc,struct channel_req * rq)1932 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1933 {
1934 	struct bchannel		*bch;
1935 
1936 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
1937 		return -EINVAL;
1938 	if (rq->protocol == ISDN_P_NONE)
1939 		return -EINVAL;
1940 	bch = &hc->bch[rq->adr.channel - 1];
1941 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1942 		return -EBUSY; /* b-channel can be only open once */
1943 	bch->ch.protocol = rq->protocol;
1944 	rq->ch = &bch->ch; /* TODO: E-channel */
1945 	if (!try_module_get(THIS_MODULE))
1946 		printk(KERN_WARNING "%s:cannot get module\n", __func__);
1947 	return 0;
1948 }
1949 
1950 /*
1951  * device control function
1952  */
1953 static int
hfc_dctrl(struct mISDNchannel * ch,u_int cmd,void * arg)1954 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1955 {
1956 	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
1957 	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
1958 	struct hfc_pci		*hc = dch->hw;
1959 	struct channel_req	*rq;
1960 	int			err = 0;
1961 
1962 	if (dch->debug & DEBUG_HW)
1963 		printk(KERN_DEBUG "%s: cmd:%x %p\n",
1964 		       __func__, cmd, arg);
1965 	switch (cmd) {
1966 	case OPEN_CHANNEL:
1967 		rq = arg;
1968 		if ((rq->protocol == ISDN_P_TE_S0) ||
1969 		    (rq->protocol == ISDN_P_NT_S0))
1970 			err = open_dchannel(hc, ch, rq);
1971 		else
1972 			err = open_bchannel(hc, rq);
1973 		break;
1974 	case CLOSE_CHANNEL:
1975 		if (debug & DEBUG_HW_OPEN)
1976 			printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1977 			       __func__, hc->dch.dev.id,
1978 			       __builtin_return_address(0));
1979 		module_put(THIS_MODULE);
1980 		break;
1981 	case CONTROL_CHANNEL:
1982 		err = channel_ctrl(hc, arg);
1983 		break;
1984 	default:
1985 		if (dch->debug & DEBUG_HW)
1986 			printk(KERN_DEBUG "%s: unknown command %x\n",
1987 			       __func__, cmd);
1988 		return -EINVAL;
1989 	}
1990 	return err;
1991 }
1992 
1993 static int
setup_hw(struct hfc_pci * hc)1994 setup_hw(struct hfc_pci *hc)
1995 {
1996 	void	*buffer;
1997 
1998 	printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
1999 	hc->hw.cirm = 0;
2000 	hc->dch.state = 0;
2001 	pci_set_master(hc->pdev);
2002 	if (!hc->irq) {
2003 		printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
2004 		return -EINVAL;
2005 	}
2006 	hc->hw.pci_io =
2007 		(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2008 
2009 	if (!hc->hw.pci_io) {
2010 		printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2011 		return -ENOMEM;
2012 	}
2013 	/* Allocate memory for FIFOS */
2014 	/* the memory needs to be on a 32k boundary within the first 4G */
2015 	if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
2016 		printk(KERN_WARNING
2017 		       "HFC-PCI: No usable DMA configuration!\n");
2018 		return -EIO;
2019 	}
2020 	buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
2021 				    GFP_KERNEL);
2022 	/* We silently assume the address is okay if nonzero */
2023 	if (!buffer) {
2024 		printk(KERN_WARNING
2025 		       "HFC-PCI: Error allocating memory for FIFO!\n");
2026 		return -ENOMEM;
2027 	}
2028 	hc->hw.fifos = buffer;
2029 	pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
2030 	hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
2031 	if (unlikely(!hc->hw.pci_io)) {
2032 		printk(KERN_WARNING
2033 		       "HFC-PCI: Error in ioremap for PCI!\n");
2034 		dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
2035 				  hc->hw.dmahandle);
2036 		return -ENOMEM;
2037 	}
2038 
2039 	printk(KERN_INFO
2040 	       "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
2041 	       (u_long) hc->hw.pci_io, hc->hw.fifos,
2042 	       &hc->hw.dmahandle, hc->irq, HZ);
2043 
2044 	/* enable memory mapped ports, disable busmaster */
2045 	pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2046 	hc->hw.int_m2 = 0;
2047 	disable_hwirq(hc);
2048 	hc->hw.int_m1 = 0;
2049 	Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2050 	/* At this point the needed PCI config is done */
2051 	/* fifos are still not enabled */
2052 	timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
2053 	/* default PCM master */
2054 	test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2055 	return 0;
2056 }
2057 
2058 static void
release_card(struct hfc_pci * hc)2059 release_card(struct hfc_pci *hc) {
2060 	u_long	flags;
2061 
2062 	spin_lock_irqsave(&hc->lock, flags);
2063 	hc->hw.int_m2 = 0; /* interrupt output off ! */
2064 	disable_hwirq(hc);
2065 	mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2066 	mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2067 	if (hc->dch.timer.function != NULL) {
2068 		timer_delete(&hc->dch.timer);
2069 		hc->dch.timer.function = NULL;
2070 	}
2071 	spin_unlock_irqrestore(&hc->lock, flags);
2072 	if (hc->hw.protocol == ISDN_P_TE_S0)
2073 		l1_event(hc->dch.l1, CLOSE_CHANNEL);
2074 	if (hc->initdone)
2075 		free_irq(hc->irq, hc);
2076 	release_io_hfcpci(hc); /* must release after free_irq! */
2077 	mISDN_unregister_device(&hc->dch.dev);
2078 	mISDN_freebchannel(&hc->bch[1]);
2079 	mISDN_freebchannel(&hc->bch[0]);
2080 	mISDN_freedchannel(&hc->dch);
2081 	pci_set_drvdata(hc->pdev, NULL);
2082 	kfree(hc);
2083 }
2084 
2085 static int
setup_card(struct hfc_pci * card)2086 setup_card(struct hfc_pci *card)
2087 {
2088 	int		err = -EINVAL;
2089 	u_int		i;
2090 	char		name[MISDN_MAX_IDLEN];
2091 
2092 	card->dch.debug = debug;
2093 	spin_lock_init(&card->lock);
2094 	mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2095 	card->dch.hw = card;
2096 	card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2097 	card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2098 		(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2099 	card->dch.dev.D.send = hfcpci_l2l1D;
2100 	card->dch.dev.D.ctrl = hfc_dctrl;
2101 	card->dch.dev.nrbchan = 2;
2102 	for (i = 0; i < 2; i++) {
2103 		card->bch[i].nr = i + 1;
2104 		set_channelmap(i + 1, card->dch.dev.channelmap);
2105 		card->bch[i].debug = debug;
2106 		mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
2107 		card->bch[i].hw = card;
2108 		card->bch[i].ch.send = hfcpci_l2l1B;
2109 		card->bch[i].ch.ctrl = hfc_bctrl;
2110 		card->bch[i].ch.nr = i + 1;
2111 		list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2112 	}
2113 	err = setup_hw(card);
2114 	if (err)
2115 		goto error;
2116 	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2117 	err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
2118 	if (err)
2119 		goto error;
2120 	HFC_cnt++;
2121 	printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2122 	return 0;
2123 error:
2124 	mISDN_freebchannel(&card->bch[1]);
2125 	mISDN_freebchannel(&card->bch[0]);
2126 	mISDN_freedchannel(&card->dch);
2127 	kfree(card);
2128 	return err;
2129 }
2130 
2131 /* private data in the PCI devices list */
2132 struct _hfc_map {
2133 	u_int	subtype;
2134 	u_int	flag;
2135 	char	*name;
2136 };
2137 
2138 static const struct _hfc_map hfc_map[] =
2139 {
2140 	{HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2141 	{HFC_CCD_B000, 0, "Billion B000"},
2142 	{HFC_CCD_B006, 0, "Billion B006"},
2143 	{HFC_CCD_B007, 0, "Billion B007"},
2144 	{HFC_CCD_B008, 0, "Billion B008"},
2145 	{HFC_CCD_B009, 0, "Billion B009"},
2146 	{HFC_CCD_B00A, 0, "Billion B00A"},
2147 	{HFC_CCD_B00B, 0, "Billion B00B"},
2148 	{HFC_CCD_B00C, 0, "Billion B00C"},
2149 	{HFC_CCD_B100, 0, "Seyeon B100"},
2150 	{HFC_CCD_B700, 0, "Primux II S0 B700"},
2151 	{HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2152 	{HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2153 	{HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2154 	{HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2155 	{HFC_BERKOM_A1T, 0, "German telekom A1T"},
2156 	{HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2157 	{HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2158 	{HFC_DIGI_DF_M_IOM2_E, 0,
2159 	 "Digi International DataFire Micro V IOM2 (Europe)"},
2160 	{HFC_DIGI_DF_M_E, 0,
2161 	 "Digi International DataFire Micro V (Europe)"},
2162 	{HFC_DIGI_DF_M_IOM2_A, 0,
2163 	 "Digi International DataFire Micro V IOM2 (North America)"},
2164 	{HFC_DIGI_DF_M_A, 0,
2165 	 "Digi International DataFire Micro V (North America)"},
2166 	{HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2167 	{},
2168 };
2169 
2170 static const struct pci_device_id hfc_ids[] =
2171 {
2172 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2173 	  (unsigned long) &hfc_map[0] },
2174 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
2175 	  (unsigned long) &hfc_map[1] },
2176 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
2177 	  (unsigned long) &hfc_map[2] },
2178 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
2179 	  (unsigned long) &hfc_map[3] },
2180 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
2181 	  (unsigned long) &hfc_map[4] },
2182 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
2183 	  (unsigned long) &hfc_map[5] },
2184 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
2185 	  (unsigned long) &hfc_map[6] },
2186 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
2187 	  (unsigned long) &hfc_map[7] },
2188 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
2189 	  (unsigned long) &hfc_map[8] },
2190 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
2191 	  (unsigned long) &hfc_map[9] },
2192 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
2193 	  (unsigned long) &hfc_map[10] },
2194 	{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
2195 	  (unsigned long) &hfc_map[11] },
2196 	{ PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
2197 	  (unsigned long) &hfc_map[12] },
2198 	{ PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
2199 	  (unsigned long) &hfc_map[13] },
2200 	{ PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
2201 	  (unsigned long) &hfc_map[14] },
2202 	{ PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
2203 	  (unsigned long) &hfc_map[15] },
2204 	{ PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
2205 	  (unsigned long) &hfc_map[16] },
2206 	{ PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
2207 	  (unsigned long) &hfc_map[17] },
2208 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
2209 	  (unsigned long) &hfc_map[18] },
2210 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
2211 	  (unsigned long) &hfc_map[19] },
2212 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
2213 	  (unsigned long) &hfc_map[20] },
2214 	{ PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
2215 	  (unsigned long) &hfc_map[21] },
2216 	{ PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
2217 	  (unsigned long) &hfc_map[22] },
2218 	{},
2219 };
2220 
2221 static int
hfc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2222 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2223 {
2224 	int		err = -ENOMEM;
2225 	struct hfc_pci	*card;
2226 	struct _hfc_map	*m = (struct _hfc_map *)ent->driver_data;
2227 
2228 	card = kzalloc(sizeof(struct hfc_pci), GFP_KERNEL);
2229 	if (!card) {
2230 		printk(KERN_ERR "No kmem for HFC card\n");
2231 		return err;
2232 	}
2233 	card->pdev = pdev;
2234 	card->subtype = m->subtype;
2235 	err = pci_enable_device(pdev);
2236 	if (err) {
2237 		kfree(card);
2238 		return err;
2239 	}
2240 
2241 	printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2242 	       m->name, pci_name(pdev));
2243 
2244 	card->irq = pdev->irq;
2245 	pci_set_drvdata(pdev, card);
2246 	err = setup_card(card);
2247 	if (err)
2248 		pci_set_drvdata(pdev, NULL);
2249 	return err;
2250 }
2251 
2252 static void
hfc_remove_pci(struct pci_dev * pdev)2253 hfc_remove_pci(struct pci_dev *pdev)
2254 {
2255 	struct hfc_pci	*card = pci_get_drvdata(pdev);
2256 
2257 	if (card)
2258 		release_card(card);
2259 	else
2260 		if (debug)
2261 			printk(KERN_DEBUG "%s: drvdata already removed\n",
2262 			       __func__);
2263 }
2264 
2265 
2266 static struct pci_driver hfc_driver = {
2267 	.name = "hfcpci",
2268 	.probe = hfc_probe,
2269 	.remove = hfc_remove_pci,
2270 	.id_table = hfc_ids,
2271 };
2272 
2273 static int
_hfcpci_softirq(struct device * dev,void * unused)2274 _hfcpci_softirq(struct device *dev, void *unused)
2275 {
2276 	struct hfc_pci  *hc = dev_get_drvdata(dev);
2277 	struct bchannel *bch;
2278 	if (hc == NULL)
2279 		return 0;
2280 
2281 	if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2282 		spin_lock_irq(&hc->lock);
2283 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
2284 		if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2285 			main_rec_hfcpci(bch);
2286 			tx_birq(bch);
2287 		}
2288 		bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
2289 		if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2290 			main_rec_hfcpci(bch);
2291 			tx_birq(bch);
2292 		}
2293 		spin_unlock_irq(&hc->lock);
2294 	}
2295 	return 0;
2296 }
2297 
2298 static void
hfcpci_softirq(struct timer_list * unused)2299 hfcpci_softirq(struct timer_list *unused)
2300 {
2301 	WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
2302 				      _hfcpci_softirq) != 0);
2303 
2304 	/* if next event would be in the past ... */
2305 	if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2306 		hfc_jiffies = jiffies + 1;
2307 	else
2308 		hfc_jiffies += tics;
2309 	mod_timer(&hfc_tl, hfc_jiffies);
2310 }
2311 
2312 static int __init
HFC_init(void)2313 HFC_init(void)
2314 {
2315 	int		err;
2316 
2317 	if (!poll)
2318 		poll = HFCPCI_BTRANS_THRESHOLD;
2319 
2320 	if (poll != HFCPCI_BTRANS_THRESHOLD) {
2321 		tics = (poll * HZ) / 8000;
2322 		if (tics < 1)
2323 			tics = 1;
2324 		poll = (tics * 8000) / HZ;
2325 		if (poll > 256 || poll < 8) {
2326 			printk(KERN_ERR "%s: Wrong poll value %d not in range "
2327 			       "of 8..256.\n", __func__, poll);
2328 			err = -EINVAL;
2329 			return err;
2330 		}
2331 	}
2332 	if (poll != HFCPCI_BTRANS_THRESHOLD) {
2333 		printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2334 		       __func__, poll);
2335 		hfc_jiffies = jiffies + tics;
2336 		mod_timer(&hfc_tl, hfc_jiffies);
2337 	} else
2338 		tics = 0; /* indicate the use of controller's timer */
2339 
2340 	err = pci_register_driver(&hfc_driver);
2341 	if (err) {
2342 		if (timer_pending(&hfc_tl))
2343 			timer_delete(&hfc_tl);
2344 	}
2345 
2346 	return err;
2347 }
2348 
2349 static void __exit
HFC_cleanup(void)2350 HFC_cleanup(void)
2351 {
2352 	timer_delete_sync(&hfc_tl);
2353 
2354 	pci_unregister_driver(&hfc_driver);
2355 }
2356 
2357 module_init(HFC_init);
2358 module_exit(HFC_cleanup);
2359 
2360 MODULE_DEVICE_TABLE(pci, hfc_ids);
2361