1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7 *******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The following are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/mm.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/ctype.h>
51 #include <linux/sonet.h>
52 #include <linux/skbuff.h>
53 #include <linux/time.h>
54 #include <linux/delay.h>
55 #include <linux/uio.h>
56 #include <linux/init.h>
57 #include <linux/interrupt.h>
58 #include <linux/wait.h>
59 #include <linux/slab.h>
60 #include <asm/io.h>
61 #include <linux/atomic.h>
62 #include <linux/uaccess.h>
63 #include <asm/string.h>
64 #include <asm/byteorder.h>
65 #include <linux/vmalloc.h>
66 #include <linux/jiffies.h>
67 #include <linux/nospec.h>
68 #include "iphase.h"
69 #include "suni.h"
70 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
71
72 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73
74 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75 static void desc_dbg(IADEV *iadev);
76
77 static IADEV *ia_dev[8];
78 static struct atm_dev *_ia_dev[8];
79 static int iadev_count;
80 static void ia_led_timer(struct timer_list *unused);
81 static DEFINE_TIMER(ia_timer, ia_led_timer);
82 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
86
87 module_param(IA_TX_BUF, int, 0);
88 module_param(IA_TX_BUF_SZ, int, 0);
89 module_param(IA_RX_BUF, int, 0);
90 module_param(IA_RX_BUF_SZ, int, 0);
91 module_param(IADebugFlag, uint, 0644);
92
93 MODULE_DESCRIPTION("Driver for Interphase ATM PCI NICs");
94 MODULE_LICENSE("GPL");
95
96 /**************************** IA_LIB **********************************/
97
ia_init_rtn_q(IARTN_Q * que)98 static void ia_init_rtn_q (IARTN_Q *que)
99 {
100 que->next = NULL;
101 que->tail = NULL;
102 }
103
ia_enque_head_rtn_q(IARTN_Q * que,IARTN_Q * data)104 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
105 {
106 data->next = NULL;
107 if (que->next == NULL)
108 que->next = que->tail = data;
109 else {
110 data->next = que->next;
111 que->next = data;
112 }
113 return;
114 }
115
ia_enque_rtn_q(IARTN_Q * que,struct desc_tbl_t data)116 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
117 IARTN_Q *entry = kmalloc_obj(*entry, GFP_ATOMIC);
118 if (!entry)
119 return -ENOMEM;
120 entry->data = data;
121 entry->next = NULL;
122 if (que->next == NULL)
123 que->next = que->tail = entry;
124 else {
125 que->tail->next = entry;
126 que->tail = que->tail->next;
127 }
128 return 1;
129 }
130
ia_deque_rtn_q(IARTN_Q * que)131 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
132 IARTN_Q *tmpdata;
133 if (que->next == NULL)
134 return NULL;
135 tmpdata = que->next;
136 if ( que->next == que->tail)
137 que->next = que->tail = NULL;
138 else
139 que->next = que->next->next;
140 return tmpdata;
141 }
142
ia_hack_tcq(IADEV * dev)143 static void ia_hack_tcq(IADEV *dev) {
144
145 u_short desc1;
146 u_short tcq_wr;
147 struct ia_vcc *iavcc_r = NULL;
148
149 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
150 while (dev->host_tcq_wr != tcq_wr) {
151 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
152 if (!desc1) ;
153 else if (!dev->desc_tbl[desc1 -1].timestamp) {
154 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
155 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
156 }
157 else if (dev->desc_tbl[desc1 -1].timestamp) {
158 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
159 printk("IA: Fatal err in get_desc\n");
160 continue;
161 }
162 iavcc_r->vc_desc_cnt--;
163 dev->desc_tbl[desc1 -1].timestamp = 0;
164 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
165 dev->desc_tbl[desc1 -1].txskb, desc1);)
166 if (iavcc_r->pcr < dev->rate_limit) {
167 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
168 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
169 printk("ia_hack_tcq: No memory available\n");
170 }
171 dev->desc_tbl[desc1 -1].iavcc = NULL;
172 dev->desc_tbl[desc1 -1].txskb = NULL;
173 }
174 dev->host_tcq_wr += 2;
175 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
176 dev->host_tcq_wr = dev->ffL.tcq_st;
177 }
178 } /* ia_hack_tcq */
179
get_desc(IADEV * dev,struct ia_vcc * iavcc)180 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
181 u_short desc_num, i;
182 struct ia_vcc *iavcc_r = NULL;
183 unsigned long delta;
184 static unsigned long timer = 0;
185 int ltimeout;
186
187 ia_hack_tcq (dev);
188 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
189 timer = jiffies;
190 i=0;
191 while (i < dev->num_tx_desc) {
192 if (!dev->desc_tbl[i].timestamp) {
193 i++;
194 continue;
195 }
196 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197 delta = jiffies - dev->desc_tbl[i].timestamp;
198 if (delta >= ltimeout) {
199 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
202 else
203 dev->ffL.tcq_rd -= 2;
204 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205 if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc))
206 printk("Fatal err, desc table vcc or skb is NULL\n");
207 else
208 iavcc_r->vc_desc_cnt--;
209 dev->desc_tbl[i].timestamp = 0;
210 dev->desc_tbl[i].iavcc = NULL;
211 dev->desc_tbl[i].txskb = NULL;
212 }
213 i++;
214 } /* while */
215 }
216 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
217 return 0xFFFF;
218
219 /* Get the next available descriptor number from TCQ */
220 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
221
222 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
223 dev->ffL.tcq_rd += 2;
224 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
225 dev->ffL.tcq_rd = dev->ffL.tcq_st;
226 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
227 return 0xFFFF;
228 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229 }
230
231 /* get system time */
232 dev->desc_tbl[desc_num -1].timestamp = jiffies;
233 return desc_num;
234 }
235
clear_lockup(struct atm_vcc * vcc,IADEV * dev)236 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
237 u_char foundLockUp;
238 vcstatus_t *vcstatus;
239 u_short *shd_tbl;
240 u_short tempCellSlot, tempFract;
241 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
242 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
243 u_int i;
244
245 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
246 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
247 vcstatus->cnt++;
248 foundLockUp = 0;
249 if( vcstatus->cnt == 0x05 ) {
250 abr_vc += vcc->vci;
251 eabr_vc += vcc->vci;
252 if( eabr_vc->last_desc ) {
253 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
254 /* Wait for 10 Micro sec */
255 udelay(10);
256 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
257 foundLockUp = 1;
258 }
259 else {
260 tempCellSlot = abr_vc->last_cell_slot;
261 tempFract = abr_vc->fraction;
262 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
263 && (tempFract == dev->testTable[vcc->vci]->fract))
264 foundLockUp = 1;
265 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
266 dev->testTable[vcc->vci]->fract = tempFract;
267 }
268 } /* last descriptor */
269 vcstatus->cnt = 0;
270 } /* vcstatus->cnt */
271
272 if (foundLockUp) {
273 IF_ABR(printk("LOCK UP found\n");)
274 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
275 /* Wait for 10 Micro sec */
276 udelay(10);
277 abr_vc->status &= 0xFFF8;
278 abr_vc->status |= 0x0001; /* state is idle */
279 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
280 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
281 if (i < dev->num_vc)
282 shd_tbl[i] = vcc->vci;
283 else
284 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
285 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
286 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
287 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
288 vcstatus->cnt = 0;
289 } /* foundLockUp */
290
291 } /* if an ABR VC */
292
293
294 }
295
296 /*
297 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
298 **
299 ** +----+----+------------------+-------------------------------+
300 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
301 ** +----+----+------------------+-------------------------------+
302 **
303 ** R = reserved (written as 0)
304 ** NZ = 0 if 0 cells/sec; 1 otherwise
305 **
306 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307 */
308 static u16
cellrate_to_float(u32 cr)309 cellrate_to_float(u32 cr)
310 {
311
312 #define NZ 0x4000
313 #define M_BITS 9 /* Number of bits in mantissa */
314 #define E_BITS 5 /* Number of bits in exponent */
315 #define M_MASK 0x1ff
316 #define E_MASK 0x1f
317 u16 flot;
318 u32 tmp = cr & 0x00ffffff;
319 int i = 0;
320 if (cr == 0)
321 return 0;
322 while (tmp != 1) {
323 tmp >>= 1;
324 i++;
325 }
326 if (i == M_BITS)
327 flot = NZ | (i << M_BITS) | (cr & M_MASK);
328 else if (i < M_BITS)
329 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
330 else
331 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
332 return flot;
333 }
334
335 #if 0
336 /*
337 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338 */
339 static u32
340 float_to_cellrate(u16 rate)
341 {
342 u32 exp, mantissa, cps;
343 if ((rate & NZ) == 0)
344 return 0;
345 exp = (rate >> M_BITS) & E_MASK;
346 mantissa = rate & M_MASK;
347 if (exp == 0)
348 return 1;
349 cps = (1 << M_BITS) | mantissa;
350 if (exp == M_BITS)
351 cps = cps;
352 else if (exp > M_BITS)
353 cps <<= (exp - M_BITS);
354 else
355 cps >>= (M_BITS - exp);
356 return cps;
357 }
358 #endif
359
init_abr_vc(IADEV * dev,srv_cls_param_t * srv_p)360 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
361 srv_p->class_type = ATM_ABR;
362 srv_p->pcr = dev->LineRate;
363 srv_p->mcr = 0;
364 srv_p->icr = 0x055cb7;
365 srv_p->tbe = 0xffffff;
366 srv_p->frtt = 0x3a;
367 srv_p->rif = 0xf;
368 srv_p->rdf = 0xb;
369 srv_p->nrm = 0x4;
370 srv_p->trm = 0x7;
371 srv_p->cdf = 0x3;
372 srv_p->adtf = 50;
373 }
374
375 static int
ia_open_abr_vc(IADEV * dev,srv_cls_param_t * srv_p,struct atm_vcc * vcc,u8 flag)376 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
377 struct atm_vcc *vcc, u8 flag)
378 {
379 f_vc_abr_entry *f_abr_vc;
380 r_vc_abr_entry *r_abr_vc;
381 u32 icr;
382 u8 trm, nrm, crm;
383 u16 adtf, air, *ptr16;
384 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
385 f_abr_vc += vcc->vci;
386 switch (flag) {
387 case 1: /* FFRED initialization */
388 #if 0 /* sanity check */
389 if (srv_p->pcr == 0)
390 return INVALID_PCR;
391 if (srv_p->pcr > dev->LineRate)
392 srv_p->pcr = dev->LineRate;
393 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
394 return MCR_UNAVAILABLE;
395 if (srv_p->mcr > srv_p->pcr)
396 return INVALID_MCR;
397 if (!(srv_p->icr))
398 srv_p->icr = srv_p->pcr;
399 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
400 return INVALID_ICR;
401 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
402 return INVALID_TBE;
403 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
404 return INVALID_FRTT;
405 if (srv_p->nrm > MAX_NRM)
406 return INVALID_NRM;
407 if (srv_p->trm > MAX_TRM)
408 return INVALID_TRM;
409 if (srv_p->adtf > MAX_ADTF)
410 return INVALID_ADTF;
411 else if (srv_p->adtf == 0)
412 srv_p->adtf = 1;
413 if (srv_p->cdf > MAX_CDF)
414 return INVALID_CDF;
415 if (srv_p->rif > MAX_RIF)
416 return INVALID_RIF;
417 if (srv_p->rdf > MAX_RDF)
418 return INVALID_RDF;
419 #endif
420 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
421 f_abr_vc->f_vc_type = ABR;
422 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
423 /* i.e 2**n = 2 << (n-1) */
424 f_abr_vc->f_nrm = nrm << 8 | nrm;
425 trm = 100000/(2 << (16 - srv_p->trm));
426 if ( trm == 0) trm = 1;
427 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
428 crm = srv_p->tbe / nrm;
429 if (crm == 0) crm = 1;
430 f_abr_vc->f_crm = crm & 0xff;
431 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
432 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
433 ((srv_p->tbe/srv_p->frtt)*1000000) :
434 (1000000/(srv_p->frtt/srv_p->tbe)));
435 f_abr_vc->f_icr = cellrate_to_float(icr);
436 adtf = (10000 * srv_p->adtf)/8192;
437 if (adtf == 0) adtf = 1;
438 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
439 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
440 f_abr_vc->f_acr = f_abr_vc->f_icr;
441 f_abr_vc->f_status = 0x0042;
442 break;
443 case 0: /* RFRED initialization */
444 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
445 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
446 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
447 r_abr_vc += vcc->vci;
448 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
449 air = srv_p->pcr << (15 - srv_p->rif);
450 if (air == 0) air = 1;
451 r_abr_vc->r_air = cellrate_to_float(air);
452 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
453 dev->sum_mcr += srv_p->mcr;
454 dev->n_abr++;
455 break;
456 default:
457 break;
458 }
459 return 0;
460 }
ia_cbr_setup(IADEV * dev,struct atm_vcc * vcc)461 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
462 u32 rateLow=0, rateHigh, rate;
463 int entries;
464 struct ia_vcc *ia_vcc;
465
466 int idealSlot =0, testSlot, toBeAssigned, inc;
467 u32 spacing;
468 u16 *SchedTbl, *TstSchedTbl;
469 u16 cbrVC, vcIndex;
470 u32 fracSlot = 0;
471 u32 sp_mod = 0;
472 u32 sp_mod2 = 0;
473
474 /* IpAdjustTrafficParams */
475 if (vcc->qos.txtp.max_pcr <= 0) {
476 IF_ERR(printk("PCR for CBR not defined\n");)
477 return -1;
478 }
479 rate = vcc->qos.txtp.max_pcr;
480 entries = rate / dev->Granularity;
481 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
482 entries, rate, dev->Granularity);)
483 if (entries < 1)
484 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
485 rateLow = entries * dev->Granularity;
486 rateHigh = (entries + 1) * dev->Granularity;
487 if (3*(rate - rateLow) > (rateHigh - rate))
488 entries++;
489 if (entries > dev->CbrRemEntries) {
490 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
491 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
492 entries, dev->CbrRemEntries);)
493 return -EBUSY;
494 }
495
496 ia_vcc = INPH_IA_VCC(vcc);
497 ia_vcc->NumCbrEntry = entries;
498 dev->sum_mcr += entries * dev->Granularity;
499 /* IaFFrednInsertCbrSched */
500 // Starting at an arbitrary location, place the entries into the table
501 // as smoothly as possible
502 cbrVC = 0;
503 spacing = dev->CbrTotEntries / entries;
504 sp_mod = dev->CbrTotEntries % entries; // get modulo
505 toBeAssigned = entries;
506 fracSlot = 0;
507 vcIndex = vcc->vci;
508 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
509 while (toBeAssigned)
510 {
511 // If this is the first time, start the table loading for this connection
512 // as close to entryPoint as possible.
513 if (toBeAssigned == entries)
514 {
515 idealSlot = dev->CbrEntryPt;
516 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
517 if (dev->CbrEntryPt >= dev->CbrTotEntries)
518 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
519 } else {
520 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
521 // in the table that would be smoothest
522 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
523 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
524 }
525 if (idealSlot >= (int)dev->CbrTotEntries)
526 idealSlot -= dev->CbrTotEntries;
527 // Continuously check around this ideal value until a null
528 // location is encountered.
529 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
530 inc = 0;
531 testSlot = idealSlot;
532 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
533 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
534 testSlot, TstSchedTbl,toBeAssigned);)
535 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
536 while (cbrVC) // If another VC at this location, we have to keep looking
537 {
538 inc++;
539 testSlot = idealSlot - inc;
540 if (testSlot < 0) { // Wrap if necessary
541 testSlot += dev->CbrTotEntries;
542 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543 SchedTbl,testSlot);)
544 }
545 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
546 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
547 if (!cbrVC)
548 break;
549 testSlot = idealSlot + inc;
550 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
551 testSlot -= dev->CbrTotEntries;
552 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
553 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
554 testSlot, toBeAssigned);)
555 }
556 // set table index and read in value
557 TstSchedTbl = (u16*)(SchedTbl + testSlot);
558 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
559 TstSchedTbl,cbrVC,inc);)
560 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
561 } /* while */
562 // Move this VCI number into this location of the CBR Sched table.
563 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
564 dev->CbrRemEntries--;
565 toBeAssigned--;
566 } /* while */
567
568 /* IaFFrednCbrEnable */
569 dev->NumEnabledCBR++;
570 if (dev->NumEnabledCBR == 1) {
571 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
572 IF_CBR(printk("CBR is enabled\n");)
573 }
574 return 0;
575 }
ia_cbrVc_close(struct atm_vcc * vcc)576 static void ia_cbrVc_close (struct atm_vcc *vcc) {
577 IADEV *iadev;
578 u16 *SchedTbl, NullVci = 0;
579 u32 i, NumFound;
580
581 iadev = INPH_IA_DEV(vcc->dev);
582 iadev->NumEnabledCBR--;
583 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
584 if (iadev->NumEnabledCBR == 0) {
585 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
586 IF_CBR (printk("CBR support disabled\n");)
587 }
588 NumFound = 0;
589 for (i=0; i < iadev->CbrTotEntries; i++)
590 {
591 if (*SchedTbl == vcc->vci) {
592 iadev->CbrRemEntries++;
593 *SchedTbl = NullVci;
594 IF_CBR(NumFound++;)
595 }
596 SchedTbl++;
597 }
598 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
599 }
600
ia_avail_descs(IADEV * iadev)601 static int ia_avail_descs(IADEV *iadev) {
602 int tmp = 0;
603 ia_hack_tcq(iadev);
604 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
605 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
606 else
607 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
608 iadev->ffL.tcq_st) / 2;
609 return tmp;
610 }
611
612 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
613
ia_que_tx(IADEV * iadev)614 static int ia_que_tx (IADEV *iadev) {
615 struct sk_buff *skb;
616 int num_desc;
617 struct atm_vcc *vcc;
618 num_desc = ia_avail_descs(iadev);
619
620 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
621 if (!(vcc = ATM_SKB(skb)->vcc)) {
622 dev_kfree_skb_any(skb);
623 printk("ia_que_tx: Null vcc\n");
624 break;
625 }
626 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
627 dev_kfree_skb_any(skb);
628 printk("Free the SKB on closed vci %d \n", vcc->vci);
629 break;
630 }
631 if (ia_pkt_tx (vcc, skb)) {
632 skb_queue_head(&iadev->tx_backlog, skb);
633 }
634 num_desc--;
635 }
636 return 0;
637 }
638
ia_tx_poll(IADEV * iadev)639 static void ia_tx_poll (IADEV *iadev) {
640 struct atm_vcc *vcc = NULL;
641 struct sk_buff *skb = NULL, *skb1 = NULL;
642 struct ia_vcc *iavcc;
643 IARTN_Q * rtne;
644
645 ia_hack_tcq(iadev);
646 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647 skb = rtne->data.txskb;
648 if (!skb) {
649 printk("ia_tx_poll: skb is null\n");
650 goto out;
651 }
652 vcc = ATM_SKB(skb)->vcc;
653 if (!vcc) {
654 printk("ia_tx_poll: vcc is null\n");
655 dev_kfree_skb_any(skb);
656 goto out;
657 }
658
659 iavcc = INPH_IA_VCC(vcc);
660 if (!iavcc) {
661 printk("ia_tx_poll: iavcc is null\n");
662 dev_kfree_skb_any(skb);
663 goto out;
664 }
665
666 skb1 = skb_dequeue(&iavcc->txing_skb);
667 while (skb1 && (skb1 != skb)) {
668 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
670 }
671 IF_ERR(printk("Release the SKB not match\n");)
672 if ((vcc->pop) && (skb1->len != 0))
673 {
674 vcc->pop(vcc, skb1);
675 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
676 (long)skb1);)
677 }
678 else
679 dev_kfree_skb_any(skb1);
680 skb1 = skb_dequeue(&iavcc->txing_skb);
681 }
682 if (!skb1) {
683 IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
684 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
685 break;
686 }
687 if ((vcc->pop) && (skb->len != 0))
688 {
689 vcc->pop(vcc, skb);
690 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
691 }
692 else
693 dev_kfree_skb_any(skb);
694 kfree(rtne);
695 }
696 ia_que_tx(iadev);
697 out:
698 return;
699 }
700 #if 0
701 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
702 {
703 u32 t;
704 int i;
705 /*
706 * Issue a command to enable writes to the NOVRAM
707 */
708 NVRAM_CMD (EXTEND + EWEN);
709 NVRAM_CLR_CE;
710 /*
711 * issue the write command
712 */
713 NVRAM_CMD(IAWRITE + addr);
714 /*
715 * Send the data, starting with D15, then D14, and so on for 16 bits
716 */
717 for (i=15; i>=0; i--) {
718 NVRAM_CLKOUT (val & 0x8000);
719 val <<= 1;
720 }
721 NVRAM_CLR_CE;
722 CFG_OR(NVCE);
723 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724 while (!(t & NVDO))
725 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
726
727 NVRAM_CLR_CE;
728 /*
729 * disable writes again
730 */
731 NVRAM_CMD(EXTEND + EWDS)
732 NVRAM_CLR_CE;
733 CFG_AND(~NVDI);
734 }
735 #endif
736
ia_eeprom_get(IADEV * iadev,u32 addr)737 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
738 {
739 u_short val;
740 u32 t;
741 int i;
742 /*
743 * Read the first bit that was clocked with the falling edge of
744 * the last command data clock
745 */
746 NVRAM_CMD(IAREAD + addr);
747 /*
748 * Now read the rest of the bits, the next bit read is D14, then D13,
749 * and so on.
750 */
751 val = 0;
752 for (i=15; i>=0; i--) {
753 NVRAM_CLKIN(t);
754 val |= (t << i);
755 }
756 NVRAM_CLR_CE;
757 CFG_AND(~NVDI);
758 return val;
759 }
760
ia_hw_type(IADEV * iadev)761 static void ia_hw_type(IADEV *iadev) {
762 u_short memType = ia_eeprom_get(iadev, 25);
763 iadev->memType = memType;
764 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765 iadev->num_tx_desc = IA_TX_BUF;
766 iadev->tx_buf_sz = IA_TX_BUF_SZ;
767 iadev->num_rx_desc = IA_RX_BUF;
768 iadev->rx_buf_sz = IA_RX_BUF_SZ;
769 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770 if (IA_TX_BUF == DFL_TX_BUFFERS)
771 iadev->num_tx_desc = IA_TX_BUF / 2;
772 else
773 iadev->num_tx_desc = IA_TX_BUF;
774 iadev->tx_buf_sz = IA_TX_BUF_SZ;
775 if (IA_RX_BUF == DFL_RX_BUFFERS)
776 iadev->num_rx_desc = IA_RX_BUF / 2;
777 else
778 iadev->num_rx_desc = IA_RX_BUF;
779 iadev->rx_buf_sz = IA_RX_BUF_SZ;
780 }
781 else {
782 if (IA_TX_BUF == DFL_TX_BUFFERS)
783 iadev->num_tx_desc = IA_TX_BUF / 8;
784 else
785 iadev->num_tx_desc = IA_TX_BUF;
786 iadev->tx_buf_sz = IA_TX_BUF_SZ;
787 if (IA_RX_BUF == DFL_RX_BUFFERS)
788 iadev->num_rx_desc = IA_RX_BUF / 8;
789 else
790 iadev->num_rx_desc = IA_RX_BUF;
791 iadev->rx_buf_sz = IA_RX_BUF_SZ;
792 }
793 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
794 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
797
798 #if 0
799 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800 iadev->phy_type = PHY_OC3C_S;
801 else if ((memType & FE_MASK) == FE_UTP_OPTION)
802 iadev->phy_type = PHY_UTP155;
803 else
804 iadev->phy_type = PHY_OC3C_M;
805 #endif
806
807 iadev->phy_type = memType & FE_MASK;
808 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809 memType,iadev->phy_type);)
810 if (iadev->phy_type == FE_25MBIT_PHY)
811 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812 else if (iadev->phy_type == FE_DS3_PHY)
813 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814 else if (iadev->phy_type == FE_E3_PHY)
815 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
816 else
817 iadev->LineRate = (u32)(ATM_OC3_PCR);
818 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
819
820 }
821
822 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
823 {
824 return readl(ia->phy + (reg >> 2));
825 }
826
827 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
828 {
829 writel(val, ia->phy + (reg >> 2));
830 }
831
832 static void ia_frontend_intr(struct iadev_priv *iadev)
833 {
834 u32 status;
835
836 if (iadev->phy_type & FE_25MBIT_PHY) {
837 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
838 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
839 } else if (iadev->phy_type & FE_DS3_PHY) {
840 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
841 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
842 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
843 } else if (iadev->phy_type & FE_E3_PHY) {
844 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
845 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
846 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
847 } else {
848 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
849 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
850 }
851
852 printk(KERN_INFO "IA: SUNI carrier %s\n",
853 iadev->carrier_detect ? "detected" : "lost signal");
854 }
855
856 static void ia_mb25_init(struct iadev_priv *iadev)
857 {
858 #if 0
859 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860 #endif
861 ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
862 ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
863
864 iadev->carrier_detect =
865 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
866 }
867
868 struct ia_reg {
869 u16 reg;
870 u16 val;
871 };
872
873 static void ia_phy_write(struct iadev_priv *iadev,
874 const struct ia_reg *regs, int len)
875 {
876 while (len--) {
877 ia_phy_write32(iadev, regs->reg, regs->val);
878 regs++;
879 }
880 }
881
882 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
883 {
884 static const struct ia_reg suni_ds3_init[] = {
885 { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
886 { SUNI_DS3_FRM_CFG, 0x01 },
887 { SUNI_DS3_TRAN_CFG, 0x01 },
888 { SUNI_CONFIG, 0 },
889 { SUNI_SPLR_CFG, 0 },
890 { SUNI_SPLT_CFG, 0 }
891 };
892 u32 status;
893
894 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
895 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
896
897 ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
898 }
899
900 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
901 {
902 static const struct ia_reg suni_e3_init[] = {
903 { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
904 { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
905 { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
906 { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
907 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
908 { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
909 { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
910 { SUNI_SPLR_CFG, 0x41 },
911 { SUNI_SPLT_CFG, 0x41 }
912 };
913 u32 status;
914
915 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
916 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
917 ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
918 }
919
920 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
921 {
922 static const struct ia_reg suni_init[] = {
923 /* Enable RSOP loss of signal interrupt. */
924 { SUNI_INTR_ENBL, 0x28 },
925 /* Clear error counters. */
926 { SUNI_ID_RESET, 0 },
927 /* Clear "PMCTST" in master test register. */
928 { SUNI_MASTER_TEST, 0 },
929
930 { SUNI_RXCP_CTRL, 0x2c },
931 { SUNI_RXCP_FCTRL, 0x81 },
932
933 { SUNI_RXCP_IDLE_PAT_H1, 0 },
934 { SUNI_RXCP_IDLE_PAT_H2, 0 },
935 { SUNI_RXCP_IDLE_PAT_H3, 0 },
936 { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
937
938 { SUNI_RXCP_IDLE_MASK_H1, 0xff },
939 { SUNI_RXCP_IDLE_MASK_H2, 0xff },
940 { SUNI_RXCP_IDLE_MASK_H3, 0xff },
941 { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
942
943 { SUNI_RXCP_CELL_PAT_H1, 0 },
944 { SUNI_RXCP_CELL_PAT_H2, 0 },
945 { SUNI_RXCP_CELL_PAT_H3, 0 },
946 { SUNI_RXCP_CELL_PAT_H4, 0x01 },
947
948 { SUNI_RXCP_CELL_MASK_H1, 0xff },
949 { SUNI_RXCP_CELL_MASK_H2, 0xff },
950 { SUNI_RXCP_CELL_MASK_H3, 0xff },
951 { SUNI_RXCP_CELL_MASK_H4, 0xff },
952
953 { SUNI_TXCP_CTRL, 0xa4 },
954 { SUNI_TXCP_INTR_EN_STS, 0x10 },
955 { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
956 };
957
958 if (iadev->phy_type & FE_DS3_PHY)
959 ia_suni_pm7345_init_ds3(iadev);
960 else
961 ia_suni_pm7345_init_e3(iadev);
962
963 ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
964
965 ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
966 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
967 SUNI_PM7345_DLB | SUNI_PM7345_PLB));
968 #ifdef __SNMP__
969 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
970 #endif /* __SNMP__ */
971 return;
972 }
973
974
975 /***************************** IA_LIB END *****************************/
976
977 #ifdef CONFIG_ATM_IA_DEBUG
978 static int tcnter = 0;
979 static void xdump( u_char* cp, int length, char* prefix )
980 {
981 int col, count;
982 u_char prntBuf[120];
983 u_char* pBuf = prntBuf;
984 count = 0;
985 while(count < length){
986 pBuf += sprintf( pBuf, "%s", prefix );
987 for(col = 0;count + col < length && col < 16; col++){
988 if (col != 0 && (col % 4) == 0)
989 pBuf += sprintf( pBuf, " " );
990 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
991 }
992 while(col++ < 16){ /* pad end of buffer with blanks */
993 if ((col % 4) == 0)
994 sprintf( pBuf, " " );
995 pBuf += sprintf( pBuf, " " );
996 }
997 pBuf += sprintf( pBuf, " " );
998 for(col = 0;count + col < length && col < 16; col++){
999 u_char c = cp[count + col];
1000
1001 if (isascii(c) && isprint(c))
1002 pBuf += sprintf(pBuf, "%c", c);
1003 else
1004 pBuf += sprintf(pBuf, ".");
1005 }
1006 printk("%s\n", prntBuf);
1007 count += col;
1008 pBuf = prntBuf;
1009 }
1010
1011 } /* close xdump(... */
1012 #endif /* CONFIG_ATM_IA_DEBUG */
1013
1014
1015 static struct atm_dev *ia_boards = NULL;
1016
1017 #define ACTUAL_RAM_BASE \
1018 RAM_BASE*((iadev->mem)/(128 * 1024))
1019 #define ACTUAL_SEG_RAM_BASE \
1020 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1021 #define ACTUAL_REASS_RAM_BASE \
1022 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1023
1024
1025 /*-- some utilities and memory allocation stuff will come here -------------*/
1026
1027 static void desc_dbg(IADEV *iadev) {
1028
1029 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1030 u32 i;
1031 void __iomem *tmp;
1032 // regval = readl((u32)ia_cmds->maddr);
1033 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1034 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1035 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1036 readw(iadev->seg_ram+tcq_wr_ptr-2));
1037 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1038 iadev->ffL.tcq_rd);
1039 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1040 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1041 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1042 i = 0;
1043 while (tcq_st_ptr != tcq_ed_ptr) {
1044 tmp = iadev->seg_ram+tcq_st_ptr;
1045 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1046 tcq_st_ptr += 2;
1047 }
1048 for(i=0; i <iadev->num_tx_desc; i++)
1049 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1050 }
1051
1052
1053 /*----------------------------- Receiving side stuff --------------------------*/
1054
1055 static void rx_excp_rcvd(struct atm_dev *dev)
1056 {
1057 #if 0 /* closing the receiving size will cause too many excp int */
1058 IADEV *iadev;
1059 u_short state;
1060 u_short excpq_rd_ptr;
1061 //u_short *ptr;
1062 int vci, error = 1;
1063 iadev = INPH_IA_DEV(dev);
1064 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1065 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1066 { printk("state = %x \n", state);
1067 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1068 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1069 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1070 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1071 // TODO: update exception stat
1072 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1073 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1074 // pwang_test
1075 excpq_rd_ptr += 4;
1076 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1077 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1078 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1079 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1080 }
1081 #endif
1082 }
1083
1084 static void free_desc(struct atm_dev *dev, int desc)
1085 {
1086 IADEV *iadev;
1087 iadev = INPH_IA_DEV(dev);
1088 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1089 iadev->rfL.fdq_wr +=2;
1090 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1091 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1092 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1093 }
1094
1095
1096 static int rx_pkt(struct atm_dev *dev)
1097 {
1098 IADEV *iadev;
1099 struct atm_vcc *vcc;
1100 unsigned short status;
1101 struct rx_buf_desc __iomem *buf_desc_ptr;
1102 int desc;
1103 struct dle* wr_ptr;
1104 int len;
1105 struct sk_buff *skb;
1106 u_int buf_addr, dma_addr;
1107
1108 iadev = INPH_IA_DEV(dev);
1109 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1110 {
1111 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1112 return -EINVAL;
1113 }
1114 /* mask 1st 3 bits to get the actual descno. */
1115 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1116 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1117 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1118 printk(" pcq_wr_ptr = 0x%x\n",
1119 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1120 /* update the read pointer - maybe we shud do this in the end*/
1121 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1122 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1123 else
1124 iadev->rfL.pcq_rd += 2;
1125 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1126
1127 /* get the buffer desc entry.
1128 update stuff. - doesn't seem to be any update necessary
1129 */
1130 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1131 /* make the ptr point to the corresponding buffer desc entry */
1132 buf_desc_ptr += desc;
1133 if (!desc || (desc > iadev->num_rx_desc) ||
1134 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1135 free_desc(dev, desc);
1136 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1137 return -1;
1138 }
1139 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1140 if (!vcc)
1141 {
1142 free_desc(dev, desc);
1143 printk("IA: null vcc, drop PDU\n");
1144 return -1;
1145 }
1146
1147
1148 /* might want to check the status bits for errors */
1149 status = (u_short) (buf_desc_ptr->desc_mode);
1150 if (status & (RX_CER | RX_PTE | RX_OFL))
1151 {
1152 atomic_inc(&vcc->stats->rx_err);
1153 IF_ERR(printk("IA: bad packet, dropping it");)
1154 if (status & RX_CER) {
1155 IF_ERR(printk(" cause: packet CRC error\n");)
1156 }
1157 else if (status & RX_PTE) {
1158 IF_ERR(printk(" cause: packet time out\n");)
1159 }
1160 else {
1161 IF_ERR(printk(" cause: buffer overflow\n");)
1162 }
1163 goto out_free_desc;
1164 }
1165
1166 /*
1167 build DLE.
1168 */
1169
1170 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1171 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1172 len = dma_addr - buf_addr;
1173 if (len > iadev->rx_buf_sz) {
1174 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1175 atomic_inc(&vcc->stats->rx_err);
1176 goto out_free_desc;
1177 }
1178
1179 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1180 if (vcc->vci < 32)
1181 printk("Drop control packets\n");
1182 goto out_free_desc;
1183 }
1184 skb_put(skb,len);
1185 // pwang_test
1186 ATM_SKB(skb)->vcc = vcc;
1187 ATM_DESC(skb) = desc;
1188 skb_queue_tail(&iadev->rx_dma_q, skb);
1189
1190 /* Build the DLE structure */
1191 wr_ptr = iadev->rx_dle_q.write;
1192 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1193 len, DMA_FROM_DEVICE);
1194 wr_ptr->local_pkt_addr = buf_addr;
1195 wr_ptr->bytes = len; /* We don't know this do we ?? */
1196 wr_ptr->mode = DMA_INT_ENABLE;
1197
1198 /* shud take care of wrap around here too. */
1199 if(++wr_ptr == iadev->rx_dle_q.end)
1200 wr_ptr = iadev->rx_dle_q.start;
1201 iadev->rx_dle_q.write = wr_ptr;
1202 udelay(1);
1203 /* Increment transaction counter */
1204 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1205 out: return 0;
1206 out_free_desc:
1207 free_desc(dev, desc);
1208 goto out;
1209 }
1210
1211 static void rx_intr(struct atm_dev *dev)
1212 {
1213 IADEV *iadev;
1214 u_short status;
1215 u_short state, i;
1216
1217 iadev = INPH_IA_DEV(dev);
1218 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1219 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1220 if (status & RX_PKT_RCVD)
1221 {
1222 /* do something */
1223 /* Basically recvd an interrupt for receiving a packet.
1224 A descriptor would have been written to the packet complete
1225 queue. Get all the descriptors and set up dma to move the
1226 packets till the packet complete queue is empty..
1227 */
1228 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1229 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1230 while(!(state & PCQ_EMPTY))
1231 {
1232 rx_pkt(dev);
1233 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1234 }
1235 iadev->rxing = 1;
1236 }
1237 if (status & RX_FREEQ_EMPT)
1238 {
1239 if (iadev->rxing) {
1240 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1241 iadev->rx_tmp_jif = jiffies;
1242 iadev->rxing = 0;
1243 }
1244 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1245 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1246 for (i = 1; i <= iadev->num_rx_desc; i++)
1247 free_desc(dev, i);
1248 printk("Test logic RUN!!!!\n");
1249 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1250 iadev->rxing = 1;
1251 }
1252 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1253 }
1254
1255 if (status & RX_EXCP_RCVD)
1256 {
1257 /* probably need to handle the exception queue also. */
1258 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1259 rx_excp_rcvd(dev);
1260 }
1261
1262
1263 if (status & RX_RAW_RCVD)
1264 {
1265 /* need to handle the raw incoming cells. This deepnds on
1266 whether we have programmed to receive the raw cells or not.
1267 Else ignore. */
1268 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1269 }
1270 }
1271
1272
1273 static void rx_dle_intr(struct atm_dev *dev)
1274 {
1275 IADEV *iadev;
1276 struct atm_vcc *vcc;
1277 struct sk_buff *skb;
1278 int desc;
1279 u_short state;
1280 struct dle *dle, *cur_dle;
1281 u_int dle_lp;
1282 int len;
1283 iadev = INPH_IA_DEV(dev);
1284
1285 /* free all the dles done, that is just update our own dle read pointer
1286 - do we really need to do this. Think not. */
1287 /* DMA is done, just get all the recevie buffers from the rx dma queue
1288 and push them up to the higher layer protocol. Also free the desc
1289 associated with the buffer. */
1290 dle = iadev->rx_dle_q.read;
1291 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1292 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1293 while(dle != cur_dle)
1294 {
1295 /* free the DMAed skb */
1296 skb = skb_dequeue(&iadev->rx_dma_q);
1297 if (!skb)
1298 goto INCR_DLE;
1299 desc = ATM_DESC(skb);
1300 free_desc(dev, desc);
1301
1302 if (!(len = skb->len))
1303 {
1304 printk("rx_dle_intr: skb len 0\n");
1305 dev_kfree_skb_any(skb);
1306 }
1307 else
1308 {
1309 struct cpcs_trailer *trailer;
1310 u_short length;
1311 struct ia_vcc *ia_vcc;
1312
1313 dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1314 len, DMA_FROM_DEVICE);
1315 /* no VCC related housekeeping done as yet. lets see */
1316 vcc = ATM_SKB(skb)->vcc;
1317 if (!vcc) {
1318 printk("IA: null vcc\n");
1319 dev_kfree_skb_any(skb);
1320 goto INCR_DLE;
1321 }
1322 ia_vcc = INPH_IA_VCC(vcc);
1323 if (ia_vcc == NULL)
1324 {
1325 atomic_inc(&vcc->stats->rx_err);
1326 atm_return(vcc, skb->truesize);
1327 dev_kfree_skb_any(skb);
1328 goto INCR_DLE;
1329 }
1330 // get real pkt length pwang_test
1331 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1332 skb->len - sizeof(*trailer));
1333 length = swap_byte_order(trailer->length);
1334 if ((length > iadev->rx_buf_sz) || (length >
1335 (skb->len - sizeof(struct cpcs_trailer))))
1336 {
1337 atomic_inc(&vcc->stats->rx_err);
1338 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1339 length, skb->len);)
1340 atm_return(vcc, skb->truesize);
1341 dev_kfree_skb_any(skb);
1342 goto INCR_DLE;
1343 }
1344 skb_trim(skb, length);
1345
1346 /* Display the packet */
1347 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1348 xdump(skb->data, skb->len, "RX: ");
1349 printk("\n");)
1350
1351 IF_RX(printk("rx_dle_intr: skb push");)
1352 vcc->push(vcc,skb);
1353 atomic_inc(&vcc->stats->rx);
1354 iadev->rx_pkt_cnt++;
1355 }
1356 INCR_DLE:
1357 if (++dle == iadev->rx_dle_q.end)
1358 dle = iadev->rx_dle_q.start;
1359 }
1360 iadev->rx_dle_q.read = dle;
1361
1362 /* if the interrupts are masked because there were no free desc available,
1363 unmask them now. */
1364 if (!iadev->rxing) {
1365 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1366 if (!(state & FREEQ_EMPTY)) {
1367 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1368 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1369 iadev->reass_reg+REASS_MASK_REG);
1370 iadev->rxing++;
1371 }
1372 }
1373 }
1374
1375
1376 static int open_rx(struct atm_vcc *vcc)
1377 {
1378 IADEV *iadev;
1379 u_short __iomem *vc_table;
1380 u_short __iomem *reass_ptr;
1381 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1382
1383 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1384 iadev = INPH_IA_DEV(vcc->dev);
1385 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1386 if (iadev->phy_type & FE_25MBIT_PHY) {
1387 printk("IA: ABR not support\n");
1388 return -EINVAL;
1389 }
1390 }
1391 /* Make only this VCI in the vc table valid and let all
1392 others be invalid entries */
1393 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1394 vc_table += vcc->vci;
1395 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1396
1397 *vc_table = vcc->vci << 6;
1398 /* Also keep a list of open rx vcs so that we can attach them with
1399 incoming PDUs later. */
1400 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1401 (vcc->qos.txtp.traffic_class == ATM_ABR))
1402 {
1403 srv_cls_param_t srv_p;
1404 init_abr_vc(iadev, &srv_p);
1405 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1406 }
1407 else { /* for UBR later may need to add CBR logic */
1408 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1409 reass_ptr += vcc->vci;
1410 *reass_ptr = NO_AAL5_PKT;
1411 }
1412
1413 if (iadev->rx_open[vcc->vci])
1414 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1415 vcc->dev->number, vcc->vci);
1416 iadev->rx_open[vcc->vci] = vcc;
1417 return 0;
1418 }
1419
1420 static int rx_init(struct atm_dev *dev)
1421 {
1422 IADEV *iadev;
1423 struct rx_buf_desc __iomem *buf_desc_ptr;
1424 unsigned long rx_pkt_start = 0;
1425 void *dle_addr;
1426 struct abr_vc_table *abr_vc_table;
1427 u16 *vc_table;
1428 u16 *reass_table;
1429 int i,j, vcsize_sel;
1430 u_short freeq_st_adr;
1431 u_short *freeq_start;
1432
1433 iadev = INPH_IA_DEV(dev);
1434 // spin_lock_init(&iadev->rx_lock);
1435
1436 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1437 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1438 &iadev->rx_dle_dma, GFP_KERNEL);
1439 if (!dle_addr) {
1440 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1441 goto err_out;
1442 }
1443 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1444 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1445 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1446 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1447 /* the end of the dle q points to the entry after the last
1448 DLE that can be used. */
1449
1450 /* write the upper 20 bits of the start address to rx list address register */
1451 /* We know this is 32bit bus addressed so the following is safe */
1452 writel(iadev->rx_dle_dma & 0xfffff000,
1453 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1454 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1455 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1456 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1457 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1458 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1459 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1460
1461 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1462 writew(0, iadev->reass_reg+MODE_REG);
1463 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1464
1465 /* Receive side control memory map
1466 -------------------------------
1467
1468 Buffer descr 0x0000 (736 - 23K)
1469 VP Table 0x5c00 (256 - 512)
1470 Except q 0x5e00 (128 - 512)
1471 Free buffer q 0x6000 (1K - 2K)
1472 Packet comp q 0x6800 (1K - 2K)
1473 Reass Table 0x7000 (1K - 2K)
1474 VC Table 0x7800 (1K - 2K)
1475 ABR VC Table 0x8000 (1K - 32K)
1476 */
1477
1478 /* Base address for Buffer Descriptor Table */
1479 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1480 /* Set the buffer size register */
1481 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1482
1483 /* Initialize each entry in the Buffer Descriptor Table */
1484 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1485 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1486 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1487 buf_desc_ptr++;
1488 rx_pkt_start = iadev->rx_pkt_ram;
1489 for(i=1; i<=iadev->num_rx_desc; i++)
1490 {
1491 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1492 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1493 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1494 buf_desc_ptr++;
1495 rx_pkt_start += iadev->rx_buf_sz;
1496 }
1497 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1498 i = FREE_BUF_DESC_Q*iadev->memSize;
1499 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1500 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1501 writew(i+iadev->num_rx_desc*sizeof(u_short),
1502 iadev->reass_reg+FREEQ_ED_ADR);
1503 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1504 writew(i+iadev->num_rx_desc*sizeof(u_short),
1505 iadev->reass_reg+FREEQ_WR_PTR);
1506 /* Fill the FREEQ with all the free descriptors. */
1507 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1508 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1509 for(i=1; i<=iadev->num_rx_desc; i++)
1510 {
1511 *freeq_start = (u_short)i;
1512 freeq_start++;
1513 }
1514 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1515 /* Packet Complete Queue */
1516 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1517 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1518 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1519 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1520 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1521
1522 /* Exception Queue */
1523 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1524 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1525 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1526 iadev->reass_reg+EXCP_Q_ED_ADR);
1527 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1528 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1529
1530 /* Load local copy of FREEQ and PCQ ptrs */
1531 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1532 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1533 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1534 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1535 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1536 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1537 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1538 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1539
1540 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1541 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1542 iadev->rfL.pcq_wr);)
1543 /* just for check - no VP TBL */
1544 /* VP Table */
1545 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1546 /* initialize VP Table for invalid VPIs
1547 - I guess we can write all 1s or 0x000f in the entire memory
1548 space or something similar.
1549 */
1550
1551 /* This seems to work and looks right to me too !!! */
1552 i = REASS_TABLE * iadev->memSize;
1553 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1554 /* initialize Reassembly table to I don't know what ???? */
1555 reass_table = (u16 *)(iadev->reass_ram+i);
1556 j = REASS_TABLE_SZ * iadev->memSize;
1557 for(i=0; i < j; i++)
1558 *reass_table++ = NO_AAL5_PKT;
1559 i = 8*1024;
1560 vcsize_sel = 0;
1561 while (i != iadev->num_vc) {
1562 i /= 2;
1563 vcsize_sel++;
1564 }
1565 i = RX_VC_TABLE * iadev->memSize;
1566 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1567 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1568 j = RX_VC_TABLE_SZ * iadev->memSize;
1569 for(i = 0; i < j; i++)
1570 {
1571 /* shift the reassembly pointer by 3 + lower 3 bits of
1572 vc_lkup_base register (=3 for 1K VCs) and the last byte
1573 is those low 3 bits.
1574 Shall program this later.
1575 */
1576 *vc_table = (i << 6) | 15; /* for invalid VCI */
1577 vc_table++;
1578 }
1579 /* ABR VC table */
1580 i = ABR_VC_TABLE * iadev->memSize;
1581 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1582
1583 i = ABR_VC_TABLE * iadev->memSize;
1584 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1585 j = REASS_TABLE_SZ * iadev->memSize;
1586 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1587 for(i = 0; i < j; i++) {
1588 abr_vc_table->rdf = 0x0003;
1589 abr_vc_table->air = 0x5eb1;
1590 abr_vc_table++;
1591 }
1592
1593 /* Initialize other registers */
1594
1595 /* VP Filter Register set for VC Reassembly only */
1596 writew(0xff00, iadev->reass_reg+VP_FILTER);
1597 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1598 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1599
1600 /* Packet Timeout Count related Registers :
1601 Set packet timeout to occur in about 3 seconds
1602 Set Packet Aging Interval count register to overflow in about 4 us
1603 */
1604 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1605
1606 i = (j >> 6) & 0xFF;
1607 j += 2 * (j - 1);
1608 i |= ((j << 2) & 0xFF00);
1609 writew(i, iadev->reass_reg+TMOUT_RANGE);
1610
1611 /* initiate the desc_tble */
1612 for(i=0; i<iadev->num_tx_desc;i++)
1613 iadev->desc_tbl[i].timestamp = 0;
1614
1615 /* to clear the interrupt status register - read it */
1616 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1617
1618 /* Mask Register - clear it */
1619 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1620
1621 skb_queue_head_init(&iadev->rx_dma_q);
1622 iadev->rx_free_desc_qhead = NULL;
1623
1624 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1625 if (!iadev->rx_open) {
1626 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1627 dev->number);
1628 goto err_free_dle;
1629 }
1630
1631 iadev->rxing = 1;
1632 iadev->rx_pkt_cnt = 0;
1633 /* Mode Register */
1634 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1635 return 0;
1636
1637 err_free_dle:
1638 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1639 iadev->rx_dle_dma);
1640 err_out:
1641 return -ENOMEM;
1642 }
1643
1644
1645 /*
1646 The memory map suggested in appendix A and the coding for it.
1647 Keeping it around just in case we change our mind later.
1648
1649 Buffer descr 0x0000 (128 - 4K)
1650 UBR sched 0x1000 (1K - 4K)
1651 UBR Wait q 0x2000 (1K - 4K)
1652 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1653 (128 - 256) each
1654 extended VC 0x4000 (1K - 8K)
1655 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1656 CBR sched 0x7000 (as needed)
1657 VC table 0x8000 (1K - 32K)
1658 */
1659
1660 static void tx_intr(struct atm_dev *dev)
1661 {
1662 IADEV *iadev;
1663 unsigned short status;
1664 unsigned long flags;
1665
1666 iadev = INPH_IA_DEV(dev);
1667
1668 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1669 if (status & TRANSMIT_DONE){
1670
1671 IF_EVENT(printk("Transmit Done Intr logic run\n");)
1672 spin_lock_irqsave(&iadev->tx_lock, flags);
1673 ia_tx_poll(iadev);
1674 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1675 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1676 if (iadev->close_pending)
1677 wake_up(&iadev->close_wait);
1678 }
1679 if (status & TCQ_NOT_EMPTY)
1680 {
1681 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1682 }
1683 }
1684
1685 static void tx_dle_intr(struct atm_dev *dev)
1686 {
1687 IADEV *iadev;
1688 struct dle *dle, *cur_dle;
1689 struct sk_buff *skb;
1690 struct atm_vcc *vcc;
1691 struct ia_vcc *iavcc;
1692 u_int dle_lp;
1693 unsigned long flags;
1694
1695 iadev = INPH_IA_DEV(dev);
1696 spin_lock_irqsave(&iadev->tx_lock, flags);
1697 dle = iadev->tx_dle_q.read;
1698 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1699 (sizeof(struct dle)*DLE_ENTRIES - 1);
1700 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1701 while (dle != cur_dle)
1702 {
1703 /* free the DMAed skb */
1704 skb = skb_dequeue(&iadev->tx_dma_q);
1705 if (!skb) break;
1706
1707 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1708 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1709 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1710 DMA_TO_DEVICE);
1711 }
1712 vcc = ATM_SKB(skb)->vcc;
1713 if (!vcc) {
1714 printk("tx_dle_intr: vcc is null\n");
1715 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1716 dev_kfree_skb_any(skb);
1717
1718 return;
1719 }
1720 iavcc = INPH_IA_VCC(vcc);
1721 if (!iavcc) {
1722 printk("tx_dle_intr: iavcc is null\n");
1723 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1724 dev_kfree_skb_any(skb);
1725 return;
1726 }
1727 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1728 if ((vcc->pop) && (skb->len != 0))
1729 {
1730 vcc->pop(vcc, skb);
1731 }
1732 else {
1733 dev_kfree_skb_any(skb);
1734 }
1735 }
1736 else { /* Hold the rate-limited skb for flow control */
1737 IA_SKB_STATE(skb) |= IA_DLED;
1738 skb_queue_tail(&iavcc->txing_skb, skb);
1739 }
1740 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1741 if (++dle == iadev->tx_dle_q.end)
1742 dle = iadev->tx_dle_q.start;
1743 }
1744 iadev->tx_dle_q.read = dle;
1745 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1746 }
1747
1748 static int open_tx(struct atm_vcc *vcc)
1749 {
1750 struct ia_vcc *ia_vcc;
1751 IADEV *iadev;
1752 struct main_vc *vc;
1753 struct ext_vc *evc;
1754 int ret;
1755 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1756 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1757 iadev = INPH_IA_DEV(vcc->dev);
1758
1759 if (iadev->phy_type & FE_25MBIT_PHY) {
1760 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1761 printk("IA: ABR not support\n");
1762 return -EINVAL;
1763 }
1764 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1765 printk("IA: CBR not support\n");
1766 return -EINVAL;
1767 }
1768 }
1769 ia_vcc = INPH_IA_VCC(vcc);
1770 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1771 if (vcc->qos.txtp.max_sdu >
1772 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1773 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1774 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1775 vcc->dev_data = NULL;
1776 kfree(ia_vcc);
1777 return -EINVAL;
1778 }
1779 ia_vcc->vc_desc_cnt = 0;
1780 ia_vcc->txing = 1;
1781
1782 /* find pcr */
1783 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1784 vcc->qos.txtp.pcr = iadev->LineRate;
1785 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1786 vcc->qos.txtp.pcr = iadev->LineRate;
1787 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1788 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1789 if (vcc->qos.txtp.pcr > iadev->LineRate)
1790 vcc->qos.txtp.pcr = iadev->LineRate;
1791 ia_vcc->pcr = vcc->qos.txtp.pcr;
1792
1793 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1794 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1795 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1796 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1797 if (ia_vcc->pcr < iadev->rate_limit)
1798 skb_queue_head_init (&ia_vcc->txing_skb);
1799 if (ia_vcc->pcr < iadev->rate_limit) {
1800 struct sock *sk = sk_atm(vcc);
1801
1802 if (vcc->qos.txtp.max_sdu != 0) {
1803 if (ia_vcc->pcr > 60000)
1804 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1805 else if (ia_vcc->pcr > 2000)
1806 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1807 else
1808 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1809 }
1810 else
1811 sk->sk_sndbuf = 24576;
1812 }
1813
1814 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1815 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1816 vc += vcc->vci;
1817 evc += vcc->vci;
1818 memset((caddr_t)vc, 0, sizeof(*vc));
1819 memset((caddr_t)evc, 0, sizeof(*evc));
1820
1821 /* store the most significant 4 bits of vci as the last 4 bits
1822 of first part of atm header.
1823 store the last 12 bits of vci as first 12 bits of the second
1824 part of the atm header.
1825 */
1826 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1827 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1828
1829 /* check the following for different traffic classes */
1830 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1831 {
1832 vc->type = UBR;
1833 vc->status = CRC_APPEND;
1834 vc->acr = cellrate_to_float(iadev->LineRate);
1835 if (vcc->qos.txtp.pcr > 0)
1836 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1837 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1838 vcc->qos.txtp.max_pcr,vc->acr);)
1839 }
1840 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1841 { srv_cls_param_t srv_p;
1842 IF_ABR(printk("Tx ABR VCC\n");)
1843 init_abr_vc(iadev, &srv_p);
1844 if (vcc->qos.txtp.pcr > 0)
1845 srv_p.pcr = vcc->qos.txtp.pcr;
1846 if (vcc->qos.txtp.min_pcr > 0) {
1847 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1848 if (tmpsum > iadev->LineRate)
1849 return -EBUSY;
1850 srv_p.mcr = vcc->qos.txtp.min_pcr;
1851 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1852 }
1853 else srv_p.mcr = 0;
1854 if (vcc->qos.txtp.icr)
1855 srv_p.icr = vcc->qos.txtp.icr;
1856 if (vcc->qos.txtp.tbe)
1857 srv_p.tbe = vcc->qos.txtp.tbe;
1858 if (vcc->qos.txtp.frtt)
1859 srv_p.frtt = vcc->qos.txtp.frtt;
1860 if (vcc->qos.txtp.rif)
1861 srv_p.rif = vcc->qos.txtp.rif;
1862 if (vcc->qos.txtp.rdf)
1863 srv_p.rdf = vcc->qos.txtp.rdf;
1864 if (vcc->qos.txtp.nrm_pres)
1865 srv_p.nrm = vcc->qos.txtp.nrm;
1866 if (vcc->qos.txtp.trm_pres)
1867 srv_p.trm = vcc->qos.txtp.trm;
1868 if (vcc->qos.txtp.adtf_pres)
1869 srv_p.adtf = vcc->qos.txtp.adtf;
1870 if (vcc->qos.txtp.cdf_pres)
1871 srv_p.cdf = vcc->qos.txtp.cdf;
1872 if (srv_p.icr > srv_p.pcr)
1873 srv_p.icr = srv_p.pcr;
1874 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1875 srv_p.pcr, srv_p.mcr);)
1876 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1877 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1878 if (iadev->phy_type & FE_25MBIT_PHY) {
1879 printk("IA: CBR not support\n");
1880 return -EINVAL;
1881 }
1882 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1883 IF_CBR(printk("PCR is not available\n");)
1884 return -1;
1885 }
1886 vc->type = CBR;
1887 vc->status = CRC_APPEND;
1888 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1889 return ret;
1890 }
1891 } else {
1892 printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
1893 }
1894
1895 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1896 IF_EVENT(printk("ia open_tx returning \n");)
1897 return 0;
1898 }
1899
1900
1901 static int tx_init(struct atm_dev *dev)
1902 {
1903 IADEV *iadev;
1904 struct tx_buf_desc *buf_desc_ptr;
1905 unsigned int tx_pkt_start;
1906 void *dle_addr;
1907 int i;
1908 u_short tcq_st_adr;
1909 u_short *tcq_start;
1910 u_short prq_st_adr;
1911 u_short *prq_start;
1912 struct main_vc *vc;
1913 struct ext_vc *evc;
1914 u_short tmp16;
1915 u32 vcsize_sel;
1916
1917 iadev = INPH_IA_DEV(dev);
1918 spin_lock_init(&iadev->tx_lock);
1919
1920 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1921 readw(iadev->seg_reg+SEG_MASK_REG));)
1922
1923 /* Allocate 4k (boundary aligned) bytes */
1924 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1925 &iadev->tx_dle_dma, GFP_KERNEL);
1926 if (!dle_addr) {
1927 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1928 goto err_out;
1929 }
1930 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1931 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1932 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1933 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1934
1935 /* write the upper 20 bits of the start address to tx list address register */
1936 writel(iadev->tx_dle_dma & 0xfffff000,
1937 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1938 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1939 writew(0, iadev->seg_reg+MODE_REG_0);
1940 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1941 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1942 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1943 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1944
1945 /*
1946 Transmit side control memory map
1947 --------------------------------
1948 Buffer descr 0x0000 (128 - 4K)
1949 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1950 (512 - 1K) each
1951 TCQ - 4K, PRQ - 5K
1952 CBR Table 0x1800 (as needed) - 6K
1953 UBR Table 0x3000 (1K - 4K) - 12K
1954 UBR Wait queue 0x4000 (1K - 4K) - 16K
1955 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1956 ABR Tbl - 20K, ABR Wq - 22K
1957 extended VC 0x6000 (1K - 8K) - 24K
1958 VC Table 0x8000 (1K - 32K) - 32K
1959
1960 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1961 and Wait q, which can be allotted later.
1962 */
1963
1964 /* Buffer Descriptor Table Base address */
1965 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1966
1967 /* initialize each entry in the buffer descriptor table */
1968 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1969 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1970 buf_desc_ptr++;
1971 tx_pkt_start = TX_PACKET_RAM;
1972 for(i=1; i<=iadev->num_tx_desc; i++)
1973 {
1974 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1975 buf_desc_ptr->desc_mode = AAL5;
1976 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1977 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1978 buf_desc_ptr++;
1979 tx_pkt_start += iadev->tx_buf_sz;
1980 }
1981 iadev->tx_buf = kmalloc_objs(*iadev->tx_buf, iadev->num_tx_desc);
1982 if (!iadev->tx_buf) {
1983 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984 goto err_free_dle;
1985 }
1986 for (i= 0; i< iadev->num_tx_desc; i++)
1987 {
1988 struct cpcs_trailer *cpcs;
1989
1990 cpcs = kmalloc_obj(*cpcs, GFP_KERNEL | GFP_DMA);
1991 if(!cpcs) {
1992 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1993 goto err_free_tx_bufs;
1994 }
1995 iadev->tx_buf[i].cpcs = cpcs;
1996 iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1997 cpcs,
1998 sizeof(*cpcs),
1999 DMA_TO_DEVICE);
2000 }
2001 iadev->desc_tbl = kmalloc_objs(*iadev->desc_tbl, iadev->num_tx_desc);
2002 if (!iadev->desc_tbl) {
2003 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2004 goto err_free_all_tx_bufs;
2005 }
2006
2007 /* Communication Queues base address */
2008 i = TX_COMP_Q * iadev->memSize;
2009 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2010
2011 /* Transmit Complete Queue */
2012 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2013 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2014 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2015 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2016 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2017 iadev->seg_reg+TCQ_ED_ADR);
2018 /* Fill the TCQ with all the free descriptors. */
2019 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2020 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2021 for(i=1; i<=iadev->num_tx_desc; i++)
2022 {
2023 *tcq_start = (u_short)i;
2024 tcq_start++;
2025 }
2026
2027 /* Packet Ready Queue */
2028 i = PKT_RDY_Q * iadev->memSize;
2029 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2030 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2031 iadev->seg_reg+PRQ_ED_ADR);
2032 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2033 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2034
2035 /* Load local copy of PRQ and TCQ ptrs */
2036 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2037 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2038 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2039
2040 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2041 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2042 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2043
2044 /* Just for safety initializing the queue to have desc 1 always */
2045 /* Fill the PRQ with all the free descriptors. */
2046 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2047 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2048 for(i=1; i<=iadev->num_tx_desc; i++)
2049 {
2050 *prq_start = (u_short)0; /* desc 1 in all entries */
2051 prq_start++;
2052 }
2053 /* CBR Table */
2054 IF_INIT(printk("Start CBR Init\n");)
2055 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2056 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2057 #else /* Charlie's logic is wrong ? */
2058 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2059 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2060 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2061 #endif
2062
2063 IF_INIT(printk("value in register = 0x%x\n",
2064 readw(iadev->seg_reg+CBR_PTR_BASE));)
2065 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2066 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2067 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2068 readw(iadev->seg_reg+CBR_TAB_BEG));)
2069 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2070 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2071 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2072 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2073 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2074 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2075 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2076 readw(iadev->seg_reg+CBR_TAB_END+1));)
2077
2078 /* Initialize the CBR Schedualing Table */
2079 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2080 0, iadev->num_vc*6);
2081 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2082 iadev->CbrEntryPt = 0;
2083 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2084 iadev->NumEnabledCBR = 0;
2085
2086 /* UBR scheduling Table and wait queue */
2087 /* initialize all bytes of UBR scheduler table and wait queue to 0
2088 - SCHEDSZ is 1K (# of entries).
2089 - UBR Table size is 4K
2090 - UBR wait queue is 4K
2091 since the table and wait queues are contiguous, all the bytes
2092 can be initialized by one memeset.
2093 */
2094
2095 vcsize_sel = 0;
2096 i = 8*1024;
2097 while (i != iadev->num_vc) {
2098 i /= 2;
2099 vcsize_sel++;
2100 }
2101
2102 i = MAIN_VC_TABLE * iadev->memSize;
2103 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2104 i = EXT_VC_TABLE * iadev->memSize;
2105 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2106 i = UBR_SCHED_TABLE * iadev->memSize;
2107 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2108 i = UBR_WAIT_Q * iadev->memSize;
2109 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2110 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2111 0, iadev->num_vc*8);
2112 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2113 /* initialize all bytes of ABR scheduler table and wait queue to 0
2114 - SCHEDSZ is 1K (# of entries).
2115 - ABR Table size is 2K
2116 - ABR wait queue is 2K
2117 since the table and wait queues are contiguous, all the bytes
2118 can be initialized by one memeset.
2119 */
2120 i = ABR_SCHED_TABLE * iadev->memSize;
2121 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2122 i = ABR_WAIT_Q * iadev->memSize;
2123 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2124
2125 i = ABR_SCHED_TABLE*iadev->memSize;
2126 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2127 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2128 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2129 iadev->testTable = kmalloc_objs(*iadev->testTable, iadev->num_vc);
2130 if (!iadev->testTable) {
2131 printk("Get freepage failed\n");
2132 goto err_free_desc_tbl;
2133 }
2134 for(i=0; i<iadev->num_vc; i++)
2135 {
2136 memset((caddr_t)vc, 0, sizeof(*vc));
2137 memset((caddr_t)evc, 0, sizeof(*evc));
2138 iadev->testTable[i] = kmalloc_obj(struct testTable_t);
2139 if (!iadev->testTable[i])
2140 goto err_free_test_tables;
2141 iadev->testTable[i]->lastTime = 0;
2142 iadev->testTable[i]->fract = 0;
2143 iadev->testTable[i]->vc_status = VC_UBR;
2144 vc++;
2145 evc++;
2146 }
2147
2148 /* Other Initialization */
2149
2150 /* Max Rate Register */
2151 if (iadev->phy_type & FE_25MBIT_PHY) {
2152 writew(RATE25, iadev->seg_reg+MAXRATE);
2153 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2154 }
2155 else {
2156 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2157 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2158 }
2159 /* Set Idle Header Reigisters to be sure */
2160 writew(0, iadev->seg_reg+IDLEHEADHI);
2161 writew(0, iadev->seg_reg+IDLEHEADLO);
2162
2163 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2164 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2165
2166 iadev->close_pending = 0;
2167 init_waitqueue_head(&iadev->close_wait);
2168 init_waitqueue_head(&iadev->timeout_wait);
2169 skb_queue_head_init(&iadev->tx_dma_q);
2170 ia_init_rtn_q(&iadev->tx_return_q);
2171
2172 /* RM Cell Protocol ID and Message Type */
2173 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2174 skb_queue_head_init (&iadev->tx_backlog);
2175
2176 /* Mode Register 1 */
2177 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2178
2179 /* Mode Register 0 */
2180 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2181
2182 /* Interrupt Status Register - read to clear */
2183 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2184
2185 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2186 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2187 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2188 iadev->tx_pkt_cnt = 0;
2189 iadev->rate_limit = iadev->LineRate / 3;
2190
2191 return 0;
2192
2193 err_free_test_tables:
2194 while (--i >= 0)
2195 kfree(iadev->testTable[i]);
2196 kfree(iadev->testTable);
2197 err_free_desc_tbl:
2198 kfree(iadev->desc_tbl);
2199 err_free_all_tx_bufs:
2200 i = iadev->num_tx_desc;
2201 err_free_tx_bufs:
2202 while (--i >= 0) {
2203 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2204
2205 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2206 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2207 kfree(desc->cpcs);
2208 }
2209 kfree(iadev->tx_buf);
2210 err_free_dle:
2211 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2212 iadev->tx_dle_dma);
2213 err_out:
2214 return -ENOMEM;
2215 }
2216
2217 static irqreturn_t ia_int(int irq, void *dev_id)
2218 {
2219 struct atm_dev *dev;
2220 IADEV *iadev;
2221 unsigned int status;
2222 int handled = 0;
2223
2224 dev = dev_id;
2225 iadev = INPH_IA_DEV(dev);
2226 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2227 {
2228 handled = 1;
2229 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2230 if (status & STAT_REASSINT)
2231 {
2232 /* do something */
2233 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2234 rx_intr(dev);
2235 }
2236 if (status & STAT_DLERINT)
2237 {
2238 /* Clear this bit by writing a 1 to it. */
2239 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2240 rx_dle_intr(dev);
2241 }
2242 if (status & STAT_SEGINT)
2243 {
2244 /* do something */
2245 IF_EVENT(printk("IA: tx_intr \n");)
2246 tx_intr(dev);
2247 }
2248 if (status & STAT_DLETINT)
2249 {
2250 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2251 tx_dle_intr(dev);
2252 }
2253 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2254 {
2255 if (status & STAT_FEINT)
2256 ia_frontend_intr(iadev);
2257 }
2258 }
2259 return IRQ_RETVAL(handled);
2260 }
2261
2262
2263
2264 /*----------------------------- entries --------------------------------*/
2265 static int get_esi(struct atm_dev *dev)
2266 {
2267 IADEV *iadev;
2268 int i;
2269 u32 mac1;
2270 u16 mac2;
2271
2272 iadev = INPH_IA_DEV(dev);
2273 mac1 = cpu_to_be32(le32_to_cpu(readl(
2274 iadev->reg+IPHASE5575_MAC1)));
2275 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2276 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2277 for (i=0; i<MAC1_LEN; i++)
2278 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2279
2280 for (i=0; i<MAC2_LEN; i++)
2281 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2282 return 0;
2283 }
2284
2285 static int reset_sar(struct atm_dev *dev)
2286 {
2287 IADEV *iadev;
2288 int i, error;
2289 unsigned int pci[64];
2290
2291 iadev = INPH_IA_DEV(dev);
2292 for (i = 0; i < 64; i++) {
2293 error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
2294 if (error != PCIBIOS_SUCCESSFUL)
2295 return error;
2296 }
2297 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2298 for (i = 0; i < 64; i++) {
2299 error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
2300 if (error != PCIBIOS_SUCCESSFUL)
2301 return error;
2302 }
2303 udelay(5);
2304 return 0;
2305 }
2306
2307
2308 static int ia_init(struct atm_dev *dev)
2309 {
2310 IADEV *iadev;
2311 unsigned long real_base;
2312 void __iomem *base;
2313 unsigned short command;
2314 int error, i;
2315
2316 /* The device has been identified and registered. Now we read
2317 necessary configuration info like memory base address,
2318 interrupt number etc */
2319
2320 IF_INIT(printk(">ia_init\n");)
2321 dev->ci_range.vpi_bits = 0;
2322 dev->ci_range.vci_bits = NR_VCI_LD;
2323
2324 iadev = INPH_IA_DEV(dev);
2325 real_base = pci_resource_start (iadev->pci, 0);
2326 iadev->irq = iadev->pci->irq;
2327
2328 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2329 if (error) {
2330 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2331 dev->number,error);
2332 return -EINVAL;
2333 }
2334 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2335 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2336
2337 /* find mapping size of board */
2338
2339 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2340
2341 if (iadev->pci_map_size == 0x100000){
2342 iadev->num_vc = 4096;
2343 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2344 iadev->memSize = 4;
2345 }
2346 else if (iadev->pci_map_size == 0x40000) {
2347 iadev->num_vc = 1024;
2348 iadev->memSize = 1;
2349 }
2350 else {
2351 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2352 return -EINVAL;
2353 }
2354 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2355
2356 /* enable bus mastering */
2357 pci_set_master(iadev->pci);
2358
2359 /*
2360 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2361 */
2362 udelay(10);
2363
2364 /* mapping the physical address to a virtual address in address space */
2365 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2366
2367 if (!base)
2368 {
2369 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2370 dev->number);
2371 return -ENOMEM;
2372 }
2373 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2374 dev->number, iadev->pci->revision, base, iadev->irq);)
2375
2376 /* filling the iphase dev structure */
2377 iadev->mem = iadev->pci_map_size /2;
2378 iadev->real_base = real_base;
2379 iadev->base = base;
2380
2381 /* Bus Interface Control Registers */
2382 iadev->reg = base + REG_BASE;
2383 /* Segmentation Control Registers */
2384 iadev->seg_reg = base + SEG_BASE;
2385 /* Reassembly Control Registers */
2386 iadev->reass_reg = base + REASS_BASE;
2387 /* Front end/ DMA control registers */
2388 iadev->phy = base + PHY_BASE;
2389 iadev->dma = base + PHY_BASE;
2390 /* RAM - Segmentation RAm and Reassembly RAM */
2391 iadev->ram = base + ACTUAL_RAM_BASE;
2392 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2393 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2394
2395 /* lets print out the above */
2396 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2397 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2398 iadev->phy, iadev->ram, iadev->seg_ram,
2399 iadev->reass_ram);)
2400
2401 /* lets try reading the MAC address */
2402 error = get_esi(dev);
2403 if (error) {
2404 iounmap(iadev->base);
2405 return error;
2406 }
2407 printk("IA: ");
2408 for (i=0; i < ESI_LEN; i++)
2409 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2410 printk("\n");
2411
2412 /* reset SAR */
2413 if (reset_sar(dev)) {
2414 iounmap(iadev->base);
2415 printk("IA: reset SAR fail, please try again\n");
2416 return 1;
2417 }
2418 return 0;
2419 }
2420
2421 static void ia_update_stats(IADEV *iadev) {
2422 if (!iadev->carrier_detect)
2423 return;
2424 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2425 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2426 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2427 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2428 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2429 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2430 return;
2431 }
2432
2433 static void ia_led_timer(struct timer_list *unused) {
2434 unsigned long flags;
2435 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2436 u_char i;
2437 static u32 ctrl_reg;
2438 for (i = 0; i < iadev_count; i++) {
2439 if (ia_dev[i]) {
2440 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2441 if (blinking[i] == 0) {
2442 blinking[i]++;
2443 ctrl_reg &= (~CTRL_LED);
2444 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2445 ia_update_stats(ia_dev[i]);
2446 }
2447 else {
2448 blinking[i] = 0;
2449 ctrl_reg |= CTRL_LED;
2450 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2451 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2452 if (ia_dev[i]->close_pending)
2453 wake_up(&ia_dev[i]->close_wait);
2454 ia_tx_poll(ia_dev[i]);
2455 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2456 }
2457 }
2458 }
2459 mod_timer(&ia_timer, jiffies + HZ / 4);
2460 return;
2461 }
2462
2463 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2464 unsigned long addr)
2465 {
2466 writel(value, INPH_IA_DEV(dev)->phy+addr);
2467 }
2468
2469 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2470 {
2471 return readl(INPH_IA_DEV(dev)->phy+addr);
2472 }
2473
2474 static void ia_free_tx(IADEV *iadev)
2475 {
2476 int i;
2477
2478 kfree(iadev->desc_tbl);
2479 for (i = 0; i < iadev->num_vc; i++)
2480 kfree(iadev->testTable[i]);
2481 kfree(iadev->testTable);
2482 for (i = 0; i < iadev->num_tx_desc; i++) {
2483 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2484
2485 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2486 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2487 kfree(desc->cpcs);
2488 }
2489 kfree(iadev->tx_buf);
2490 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2491 iadev->tx_dle_dma);
2492 }
2493
2494 static void ia_free_rx(IADEV *iadev)
2495 {
2496 kfree(iadev->rx_open);
2497 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2498 iadev->rx_dle_dma);
2499 }
2500
2501 static int ia_start(struct atm_dev *dev)
2502 {
2503 IADEV *iadev;
2504 int error;
2505 unsigned char phy;
2506 u32 ctrl_reg;
2507 IF_EVENT(printk(">ia_start\n");)
2508 iadev = INPH_IA_DEV(dev);
2509 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2510 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2511 dev->number, iadev->irq);
2512 error = -EAGAIN;
2513 goto err_out;
2514 }
2515 /* @@@ should release IRQ on error */
2516 /* enabling memory + master */
2517 if ((error = pci_write_config_word(iadev->pci,
2518 PCI_COMMAND,
2519 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2520 {
2521 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2522 "master (0x%x)\n",dev->number, error);
2523 error = -EIO;
2524 goto err_free_irq;
2525 }
2526 udelay(10);
2527
2528 /* Maybe we should reset the front end, initialize Bus Interface Control
2529 Registers and see. */
2530
2531 IF_INIT(printk("Bus ctrl reg: %08x\n",
2532 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2533 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2534 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2535 | CTRL_B8
2536 | CTRL_B16
2537 | CTRL_B32
2538 | CTRL_B48
2539 | CTRL_B64
2540 | CTRL_B128
2541 | CTRL_ERRMASK
2542 | CTRL_DLETMASK /* shud be removed l8r */
2543 | CTRL_DLERMASK
2544 | CTRL_SEGMASK
2545 | CTRL_REASSMASK
2546 | CTRL_FEMASK
2547 | CTRL_CSPREEMPT;
2548
2549 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2550
2551 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2552 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2553 printk("Bus status reg after init: %08x\n",
2554 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2555
2556 ia_hw_type(iadev);
2557 error = tx_init(dev);
2558 if (error)
2559 goto err_free_irq;
2560 error = rx_init(dev);
2561 if (error)
2562 goto err_free_tx;
2563
2564 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2565 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2566 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2567 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2568 phy = 0; /* resolve compiler complaint */
2569 IF_INIT (
2570 if ((phy=ia_phy_get(dev,0)) == 0x30)
2571 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2572 else
2573 printk("IA: utopia,rev.%0x\n",phy);)
2574
2575 if (iadev->phy_type & FE_25MBIT_PHY)
2576 ia_mb25_init(iadev);
2577 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2578 ia_suni_pm7345_init(iadev);
2579 else {
2580 error = suni_init(dev);
2581 if (error)
2582 goto err_free_rx;
2583 if (dev->phy->start) {
2584 error = dev->phy->start(dev);
2585 if (error)
2586 goto err_free_rx;
2587 }
2588 /* Get iadev->carrier_detect status */
2589 ia_frontend_intr(iadev);
2590 }
2591 return 0;
2592
2593 err_free_rx:
2594 ia_free_rx(iadev);
2595 err_free_tx:
2596 ia_free_tx(iadev);
2597 err_free_irq:
2598 free_irq(iadev->irq, dev);
2599 err_out:
2600 return error;
2601 }
2602
2603 static void ia_close(struct atm_vcc *vcc)
2604 {
2605 DEFINE_WAIT(wait);
2606 u16 *vc_table;
2607 IADEV *iadev;
2608 struct ia_vcc *ia_vcc;
2609 struct sk_buff *skb = NULL;
2610 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2611 unsigned long closetime, flags;
2612
2613 iadev = INPH_IA_DEV(vcc->dev);
2614 ia_vcc = INPH_IA_VCC(vcc);
2615 if (!ia_vcc) return;
2616
2617 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2618 ia_vcc->vc_desc_cnt,vcc->vci);)
2619 clear_bit(ATM_VF_READY,&vcc->flags);
2620 skb_queue_head_init (&tmp_tx_backlog);
2621 skb_queue_head_init (&tmp_vcc_backlog);
2622 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2623 iadev->close_pending++;
2624 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2625 schedule_timeout(msecs_to_jiffies(500));
2626 finish_wait(&iadev->timeout_wait, &wait);
2627 spin_lock_irqsave(&iadev->tx_lock, flags);
2628 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2629 if (ATM_SKB(skb)->vcc == vcc){
2630 if (vcc->pop) vcc->pop(vcc, skb);
2631 else dev_kfree_skb_any(skb);
2632 }
2633 else
2634 skb_queue_tail(&tmp_tx_backlog, skb);
2635 }
2636 while((skb = skb_dequeue(&tmp_tx_backlog)))
2637 skb_queue_tail(&iadev->tx_backlog, skb);
2638 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2639 closetime = 300000 / ia_vcc->pcr;
2640 if (closetime == 0)
2641 closetime = 1;
2642 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2643 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2644 spin_lock_irqsave(&iadev->tx_lock, flags);
2645 iadev->close_pending--;
2646 iadev->testTable[vcc->vci]->lastTime = 0;
2647 iadev->testTable[vcc->vci]->fract = 0;
2648 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2649 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2650 if (vcc->qos.txtp.min_pcr > 0)
2651 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2652 }
2653 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2654 ia_vcc = INPH_IA_VCC(vcc);
2655 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2656 ia_cbrVc_close (vcc);
2657 }
2658 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2659 }
2660
2661 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2662 // reset reass table
2663 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2664 vc_table += vcc->vci;
2665 *vc_table = NO_AAL5_PKT;
2666 // reset vc table
2667 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2668 vc_table += vcc->vci;
2669 *vc_table = (vcc->vci << 6) | 15;
2670 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2671 struct abr_vc_table __iomem *abr_vc_table =
2672 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2673 abr_vc_table += vcc->vci;
2674 abr_vc_table->rdf = 0x0003;
2675 abr_vc_table->air = 0x5eb1;
2676 }
2677 // Drain the packets
2678 rx_dle_intr(vcc->dev);
2679 iadev->rx_open[vcc->vci] = NULL;
2680 }
2681 kfree(INPH_IA_VCC(vcc));
2682 ia_vcc = NULL;
2683 vcc->dev_data = NULL;
2684 clear_bit(ATM_VF_ADDR,&vcc->flags);
2685 return;
2686 }
2687
2688 static int ia_open(struct atm_vcc *vcc)
2689 {
2690 struct ia_vcc *ia_vcc;
2691 int error;
2692 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2693 {
2694 IF_EVENT(printk("ia: not partially allocated resources\n");)
2695 vcc->dev_data = NULL;
2696 }
2697 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2698 {
2699 IF_EVENT(printk("iphase open: unspec part\n");)
2700 set_bit(ATM_VF_ADDR,&vcc->flags);
2701 }
2702 if (vcc->qos.aal != ATM_AAL5)
2703 return -EINVAL;
2704 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2705 vcc->dev->number, vcc->vpi, vcc->vci);)
2706
2707 /* Device dependent initialization */
2708 ia_vcc = kmalloc_obj(*ia_vcc);
2709 if (!ia_vcc) return -ENOMEM;
2710 vcc->dev_data = ia_vcc;
2711
2712 if ((error = open_rx(vcc)))
2713 {
2714 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2715 ia_close(vcc);
2716 return error;
2717 }
2718
2719 if ((error = open_tx(vcc)))
2720 {
2721 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2722 ia_close(vcc);
2723 return error;
2724 }
2725
2726 set_bit(ATM_VF_READY,&vcc->flags);
2727
2728 #if 0
2729 {
2730 static u8 first = 1;
2731 if (first) {
2732 ia_timer.expires = jiffies + 3*HZ;
2733 add_timer(&ia_timer);
2734 first = 0;
2735 }
2736 }
2737 #endif
2738 IF_EVENT(printk("ia open returning\n");)
2739 return 0;
2740 }
2741
2742 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2743 {
2744 IF_EVENT(printk(">ia_change_qos\n");)
2745 return 0;
2746 }
2747
2748 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2749 {
2750 IA_CMDBUF ia_cmds;
2751 IADEV *iadev;
2752 int i, board;
2753 u16 __user *tmps;
2754 IF_EVENT(printk(">ia_ioctl\n");)
2755 if (cmd != IA_CMD) {
2756 if (!dev->phy->ioctl) return -EINVAL;
2757 return dev->phy->ioctl(dev,cmd,arg);
2758 }
2759 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2760 board = ia_cmds.status;
2761
2762 if ((board < 0) || (board > iadev_count))
2763 board = 0;
2764 board = array_index_nospec(board, iadev_count + 1);
2765
2766 iadev = ia_dev[board];
2767 switch (ia_cmds.cmd) {
2768 case MEMDUMP:
2769 {
2770 switch (ia_cmds.sub_cmd) {
2771 case MEMDUMP_SEGREG:
2772 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2773 tmps = (u16 __user *)ia_cmds.buf;
2774 for(i=0; i<0x80; i+=2, tmps++)
2775 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2776 ia_cmds.status = 0;
2777 ia_cmds.len = 0x80;
2778 break;
2779 case MEMDUMP_REASSREG:
2780 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2781 tmps = (u16 __user *)ia_cmds.buf;
2782 for(i=0; i<0x80; i+=2, tmps++)
2783 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2784 ia_cmds.status = 0;
2785 ia_cmds.len = 0x80;
2786 break;
2787 case MEMDUMP_FFL:
2788 {
2789 ia_regs_t *regs_local;
2790 ffredn_t *ffL;
2791 rfredn_t *rfL;
2792
2793 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2794 regs_local = kmalloc_obj(*regs_local);
2795 if (!regs_local) return -ENOMEM;
2796 ffL = ®s_local->ffredn;
2797 rfL = ®s_local->rfredn;
2798 /* Copy real rfred registers into the local copy */
2799 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2800 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2801 /* Copy real ffred registers into the local copy */
2802 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2803 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2804
2805 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2806 kfree(regs_local);
2807 return -EFAULT;
2808 }
2809 kfree(regs_local);
2810 printk("Board %d registers dumped\n", board);
2811 ia_cmds.status = 0;
2812 }
2813 break;
2814 case READ_REG:
2815 {
2816 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2817 desc_dbg(iadev);
2818 ia_cmds.status = 0;
2819 }
2820 break;
2821 case 0x6:
2822 {
2823 ia_cmds.status = 0;
2824 printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2825 printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2826 }
2827 break;
2828 case 0x8:
2829 {
2830 struct k_sonet_stats *stats;
2831 stats = &PRIV(_ia_dev[board])->sonet_stats;
2832 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2833 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2834 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2835 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2836 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2837 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2838 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2839 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2840 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2841 }
2842 ia_cmds.status = 0;
2843 break;
2844 case 0x9:
2845 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2846 for (i = 1; i <= iadev->num_rx_desc; i++)
2847 free_desc(_ia_dev[board], i);
2848 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2849 iadev->reass_reg+REASS_MASK_REG);
2850 iadev->rxing = 1;
2851
2852 ia_cmds.status = 0;
2853 break;
2854
2855 case 0xb:
2856 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2857 ia_frontend_intr(iadev);
2858 break;
2859 case 0xa:
2860 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2861 {
2862 ia_cmds.status = 0;
2863 IADebugFlag = ia_cmds.maddr;
2864 printk("New debug option loaded\n");
2865 }
2866 break;
2867 default:
2868 ia_cmds.status = 0;
2869 break;
2870 }
2871 }
2872 break;
2873 default:
2874 break;
2875
2876 }
2877 return 0;
2878 }
2879
2880 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2881 IADEV *iadev;
2882 struct dle *wr_ptr;
2883 struct tx_buf_desc __iomem *buf_desc_ptr;
2884 int desc;
2885 int comp_code;
2886 int total_len;
2887 struct cpcs_trailer *trailer;
2888 struct ia_vcc *iavcc;
2889
2890 iadev = INPH_IA_DEV(vcc->dev);
2891 iavcc = INPH_IA_VCC(vcc);
2892 if (!iavcc->txing) {
2893 printk("discard packet on closed VC\n");
2894 if (vcc->pop)
2895 vcc->pop(vcc, skb);
2896 else
2897 dev_kfree_skb_any(skb);
2898 return 0;
2899 }
2900
2901 if (skb->len > iadev->tx_buf_sz - 8) {
2902 printk("Transmit size over tx buffer size\n");
2903 if (vcc->pop)
2904 vcc->pop(vcc, skb);
2905 else
2906 dev_kfree_skb_any(skb);
2907 return 0;
2908 }
2909 if ((unsigned long)skb->data & 3) {
2910 printk("Misaligned SKB\n");
2911 if (vcc->pop)
2912 vcc->pop(vcc, skb);
2913 else
2914 dev_kfree_skb_any(skb);
2915 return 0;
2916 }
2917 /* Get a descriptor number from our free descriptor queue
2918 We get the descr number from the TCQ now, since I am using
2919 the TCQ as a free buffer queue. Initially TCQ will be
2920 initialized with all the descriptors and is hence, full.
2921 */
2922 desc = get_desc (iadev, iavcc);
2923 if (desc == 0xffff)
2924 return 1;
2925 comp_code = desc >> 13;
2926 desc &= 0x1fff;
2927
2928 if ((desc == 0) || (desc > iadev->num_tx_desc))
2929 {
2930 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2931 atomic_inc(&vcc->stats->tx);
2932 if (vcc->pop)
2933 vcc->pop(vcc, skb);
2934 else
2935 dev_kfree_skb_any(skb);
2936 return 0; /* return SUCCESS */
2937 }
2938
2939 if (comp_code)
2940 {
2941 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2942 desc, comp_code);)
2943 }
2944
2945 /* remember the desc and vcc mapping */
2946 iavcc->vc_desc_cnt++;
2947 iadev->desc_tbl[desc-1].iavcc = iavcc;
2948 iadev->desc_tbl[desc-1].txskb = skb;
2949 IA_SKB_STATE(skb) = 0;
2950
2951 iadev->ffL.tcq_rd += 2;
2952 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2953 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2954 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2955
2956 /* Put the descriptor number in the packet ready queue
2957 and put the updated write pointer in the DLE field
2958 */
2959 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2960
2961 iadev->ffL.prq_wr += 2;
2962 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2963 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2964
2965 /* Figure out the exact length of the packet and padding required to
2966 make it aligned on a 48 byte boundary. */
2967 total_len = skb->len + sizeof(struct cpcs_trailer);
2968 total_len = ((total_len + 47) / 48) * 48;
2969 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2970
2971 /* Put the packet in a tx buffer */
2972 trailer = iadev->tx_buf[desc-1].cpcs;
2973 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2974 skb, skb->data, skb->len, desc);)
2975 trailer->control = 0;
2976 /*big endian*/
2977 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2978 trailer->crc32 = 0; /* not needed - dummy bytes */
2979
2980 /* Display the packet */
2981 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2982 skb->len, tcnter++);
2983 xdump(skb->data, skb->len, "TX: ");
2984 printk("\n");)
2985
2986 /* Build the buffer descriptor */
2987 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2988 buf_desc_ptr += desc; /* points to the corresponding entry */
2989 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2990 /* Huh ? p.115 of users guide describes this as a read-only register */
2991 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2992 buf_desc_ptr->vc_index = vcc->vci;
2993 buf_desc_ptr->bytes = total_len;
2994
2995 if (vcc->qos.txtp.traffic_class == ATM_ABR)
2996 clear_lockup (vcc, iadev);
2997
2998 /* Build the DLE structure */
2999 wr_ptr = iadev->tx_dle_q.write;
3000 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3001 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3002 skb->len, DMA_TO_DEVICE);
3003 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3004 buf_desc_ptr->buf_start_lo;
3005 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3006 wr_ptr->bytes = skb->len;
3007
3008 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3009 if ((wr_ptr->bytes >> 2) == 0xb)
3010 wr_ptr->bytes = 0x30;
3011
3012 wr_ptr->mode = TX_DLE_PSI;
3013 wr_ptr->prq_wr_ptr_data = 0;
3014
3015 /* end is not to be used for the DLE q */
3016 if (++wr_ptr == iadev->tx_dle_q.end)
3017 wr_ptr = iadev->tx_dle_q.start;
3018
3019 /* Build trailer dle */
3020 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3021 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3022 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3023
3024 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3025 wr_ptr->mode = DMA_INT_ENABLE;
3026 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3027
3028 /* end is not to be used for the DLE q */
3029 if (++wr_ptr == iadev->tx_dle_q.end)
3030 wr_ptr = iadev->tx_dle_q.start;
3031
3032 iadev->tx_dle_q.write = wr_ptr;
3033 ATM_DESC(skb) = vcc->vci;
3034 skb_queue_tail(&iadev->tx_dma_q, skb);
3035
3036 atomic_inc(&vcc->stats->tx);
3037 iadev->tx_pkt_cnt++;
3038 /* Increment transaction counter */
3039 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3040
3041 #if 0
3042 /* add flow control logic */
3043 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3044 if (iavcc->vc_desc_cnt > 10) {
3045 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3046 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3047 iavcc->flow_inc = -1;
3048 iavcc->saved_tx_quota = vcc->tx_quota;
3049 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3050 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3051 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3052 iavcc->flow_inc = 0;
3053 }
3054 }
3055 #endif
3056 IF_TX(printk("ia send done\n");)
3057 return 0;
3058 }
3059
3060 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3061 {
3062 IADEV *iadev;
3063 unsigned long flags;
3064
3065 iadev = INPH_IA_DEV(vcc->dev);
3066 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3067 {
3068 if (!skb)
3069 printk(KERN_CRIT "null skb in ia_send\n");
3070 else dev_kfree_skb_any(skb);
3071 return -EINVAL;
3072 }
3073 spin_lock_irqsave(&iadev->tx_lock, flags);
3074 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3075 dev_kfree_skb_any(skb);
3076 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3077 return -EINVAL;
3078 }
3079 ATM_SKB(skb)->vcc = vcc;
3080
3081 if (skb_peek(&iadev->tx_backlog)) {
3082 skb_queue_tail(&iadev->tx_backlog, skb);
3083 }
3084 else {
3085 if (ia_pkt_tx (vcc, skb)) {
3086 skb_queue_tail(&iadev->tx_backlog, skb);
3087 }
3088 }
3089 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3090 return 0;
3091
3092 }
3093
3094 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3095 {
3096 int left = *pos, n;
3097 char *tmpPtr;
3098 IADEV *iadev = INPH_IA_DEV(dev);
3099 if(!left--) {
3100 if (iadev->phy_type == FE_25MBIT_PHY) {
3101 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3102 return n;
3103 }
3104 if (iadev->phy_type == FE_DS3_PHY)
3105 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3106 else if (iadev->phy_type == FE_E3_PHY)
3107 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3108 else if (iadev->phy_type == FE_UTP_OPTION)
3109 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3110 else
3111 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3112 tmpPtr = page + n;
3113 if (iadev->pci_map_size == 0x40000)
3114 n += sprintf(tmpPtr, "-1KVC-");
3115 else
3116 n += sprintf(tmpPtr, "-4KVC-");
3117 tmpPtr = page + n;
3118 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3119 n += sprintf(tmpPtr, "1M \n");
3120 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3121 n += sprintf(tmpPtr, "512K\n");
3122 else
3123 n += sprintf(tmpPtr, "128K\n");
3124 return n;
3125 }
3126 if (!left) {
3127 return sprintf(page, " Number of Tx Buffer: %u\n"
3128 " Size of Tx Buffer : %u\n"
3129 " Number of Rx Buffer: %u\n"
3130 " Size of Rx Buffer : %u\n"
3131 " Packets Received : %u\n"
3132 " Packets Transmitted: %u\n"
3133 " Cells Received : %u\n"
3134 " Cells Transmitted : %u\n"
3135 " Board Dropped Cells: %u\n"
3136 " Board Dropped Pkts : %u\n",
3137 iadev->num_tx_desc, iadev->tx_buf_sz,
3138 iadev->num_rx_desc, iadev->rx_buf_sz,
3139 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3140 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3141 iadev->drop_rxcell, iadev->drop_rxpkt);
3142 }
3143 return 0;
3144 }
3145
3146 static const struct atmdev_ops ops = {
3147 .open = ia_open,
3148 .close = ia_close,
3149 .ioctl = ia_ioctl,
3150 .send = ia_send,
3151 .phy_put = ia_phy_put,
3152 .phy_get = ia_phy_get,
3153 .change_qos = ia_change_qos,
3154 .proc_read = ia_proc_read,
3155 .owner = THIS_MODULE,
3156 };
3157
3158 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3159 {
3160 struct atm_dev *dev;
3161 IADEV *iadev;
3162 int ret;
3163
3164 iadev = kzalloc_obj(*iadev);
3165 if (!iadev) {
3166 ret = -ENOMEM;
3167 goto err_out;
3168 }
3169
3170 iadev->pci = pdev;
3171
3172 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3173 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3174 if (pci_enable_device(pdev)) {
3175 ret = -ENODEV;
3176 goto err_out_free_iadev;
3177 }
3178 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3179 if (!dev) {
3180 ret = -ENOMEM;
3181 goto err_out_disable_dev;
3182 }
3183 dev->dev_data = iadev;
3184 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3185 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3186 iadev->LineRate);)
3187
3188 pci_set_drvdata(pdev, dev);
3189
3190 ia_dev[iadev_count] = iadev;
3191 _ia_dev[iadev_count] = dev;
3192 iadev_count++;
3193 if (ia_init(dev) || ia_start(dev)) {
3194 IF_INIT(printk("IA register failed!\n");)
3195 iadev_count--;
3196 ia_dev[iadev_count] = NULL;
3197 _ia_dev[iadev_count] = NULL;
3198 ret = -EINVAL;
3199 goto err_out_deregister_dev;
3200 }
3201 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3202
3203 iadev->next_board = ia_boards;
3204 ia_boards = dev;
3205
3206 return 0;
3207
3208 err_out_deregister_dev:
3209 atm_dev_deregister(dev);
3210 err_out_disable_dev:
3211 pci_disable_device(pdev);
3212 err_out_free_iadev:
3213 kfree(iadev);
3214 err_out:
3215 return ret;
3216 }
3217
3218 static void ia_remove_one(struct pci_dev *pdev)
3219 {
3220 struct atm_dev *dev = pci_get_drvdata(pdev);
3221 IADEV *iadev = INPH_IA_DEV(dev);
3222
3223 /* Disable phy interrupts */
3224 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3225 SUNI_RSOP_CIE);
3226 udelay(1);
3227
3228 if (dev->phy && dev->phy->stop)
3229 dev->phy->stop(dev);
3230
3231 /* De-register device */
3232 free_irq(iadev->irq, dev);
3233 iadev_count--;
3234 ia_dev[iadev_count] = NULL;
3235 _ia_dev[iadev_count] = NULL;
3236 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3237 atm_dev_deregister(dev);
3238
3239 iounmap(iadev->base);
3240 pci_disable_device(pdev);
3241
3242 ia_free_rx(iadev);
3243 ia_free_tx(iadev);
3244
3245 kfree(iadev);
3246 }
3247
3248 static const struct pci_device_id ia_pci_tbl[] = {
3249 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3250 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3251 { 0,}
3252 };
3253 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3254
3255 static struct pci_driver ia_driver = {
3256 .name = DEV_LABEL,
3257 .id_table = ia_pci_tbl,
3258 .probe = ia_init_one,
3259 .remove = ia_remove_one,
3260 };
3261
3262 static int __init ia_module_init(void)
3263 {
3264 int ret;
3265
3266 ret = pci_register_driver(&ia_driver);
3267 if (ret >= 0) {
3268 ia_timer.expires = jiffies + 3*HZ;
3269 add_timer(&ia_timer);
3270 } else
3271 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3272 return ret;
3273 }
3274
3275 static void __exit ia_module_exit(void)
3276 {
3277 pci_unregister_driver(&ia_driver);
3278
3279 timer_delete_sync(&ia_timer);
3280 }
3281
3282 module_init(ia_module_init);
3283 module_exit(ia_module_exit);
3284