xref: /linux/drivers/atm/fore200e.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2   A FORE Systems 200E-series driver for ATM on Linux.
3   Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
4 
5   Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
6 
7   This driver simultaneously supports PCA-200E and SBA-200E adapters
8   on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
9 
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2 of the License, or
13   (at your option) any later version.
14 
15   This program is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19 
20   You should have received a copy of the GNU General Public License
21   along with this program; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23 */
24 
25 
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <linux/firmware.h>
40 #include <asm/io.h>
41 #include <asm/string.h>
42 #include <asm/page.h>
43 #include <asm/irq.h>
44 #include <asm/dma.h>
45 #include <asm/byteorder.h>
46 #include <asm/uaccess.h>
47 #include <linux/atomic.h>
48 
49 #ifdef CONFIG_SBUS
50 #include <linux/of.h>
51 #include <linux/of_device.h>
52 #include <asm/idprom.h>
53 #include <asm/openprom.h>
54 #include <asm/oplib.h>
55 #include <asm/pgtable.h>
56 #endif
57 
58 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
59 #define FORE200E_USE_TASKLET
60 #endif
61 
62 #if 0 /* enable the debugging code of the buffer supply queues */
63 #define FORE200E_BSQ_DEBUG
64 #endif
65 
66 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
67 #define FORE200E_52BYTE_AAL0_SDU
68 #endif
69 
70 #include "fore200e.h"
71 #include "suni.h"
72 
73 #define FORE200E_VERSION "0.3e"
74 
75 #define FORE200E         "fore200e: "
76 
77 #if 0 /* override .config */
78 #define CONFIG_ATM_FORE200E_DEBUG 1
79 #endif
80 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
81 #define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
82                                                   printk(FORE200E format, ##args); } while (0)
83 #else
84 #define DPRINTK(level, format, args...)  do {} while (0)
85 #endif
86 
87 
88 #define FORE200E_ALIGN(addr, alignment) \
89         ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
90 
91 #define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
92 
93 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
94 
95 #define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
96 
97 #if 1
98 #define ASSERT(expr)     if (!(expr)) { \
99 			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
100 				    __func__, __LINE__, #expr); \
101 			     panic(FORE200E "%s", __func__); \
102 			 }
103 #else
104 #define ASSERT(expr)     do {} while (0)
105 #endif
106 
107 
108 static const struct atmdev_ops   fore200e_ops;
109 static const struct fore200e_bus fore200e_bus[];
110 
111 static LIST_HEAD(fore200e_boards);
112 
113 
114 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
115 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
116 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
117 
118 
119 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
120     { BUFFER_S1_NBR, BUFFER_L1_NBR },
121     { BUFFER_S2_NBR, BUFFER_L2_NBR }
122 };
123 
124 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
125     { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
126     { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
127 };
128 
129 
130 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
131 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
132 #endif
133 
134 
135 #if 0 /* currently unused */
136 static int
137 fore200e_fore2atm_aal(enum fore200e_aal aal)
138 {
139     switch(aal) {
140     case FORE200E_AAL0:  return ATM_AAL0;
141     case FORE200E_AAL34: return ATM_AAL34;
142     case FORE200E_AAL5:  return ATM_AAL5;
143     }
144 
145     return -EINVAL;
146 }
147 #endif
148 
149 
150 static enum fore200e_aal
151 fore200e_atm2fore_aal(int aal)
152 {
153     switch(aal) {
154     case ATM_AAL0:  return FORE200E_AAL0;
155     case ATM_AAL34: return FORE200E_AAL34;
156     case ATM_AAL1:
157     case ATM_AAL2:
158     case ATM_AAL5:  return FORE200E_AAL5;
159     }
160 
161     return -EINVAL;
162 }
163 
164 
165 static char*
166 fore200e_irq_itoa(int irq)
167 {
168     static char str[8];
169     sprintf(str, "%d", irq);
170     return str;
171 }
172 
173 
174 /* allocate and align a chunk of memory intended to hold the data behing exchanged
175    between the driver and the adapter (using streaming DVMA) */
176 
177 static int
178 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
179 {
180     unsigned long offset = 0;
181 
182     if (alignment <= sizeof(int))
183 	alignment = 0;
184 
185     chunk->alloc_size = size + alignment;
186     chunk->align_size = size;
187     chunk->direction  = direction;
188 
189     chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
190     if (chunk->alloc_addr == NULL)
191 	return -ENOMEM;
192 
193     if (alignment > 0)
194 	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
195 
196     chunk->align_addr = chunk->alloc_addr + offset;
197 
198     chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
199 
200     return 0;
201 }
202 
203 
204 /* free a chunk of memory */
205 
206 static void
207 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
208 {
209     fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
210 
211     kfree(chunk->alloc_addr);
212 }
213 
214 
215 static void
216 fore200e_spin(int msecs)
217 {
218     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
219     while (time_before(jiffies, timeout));
220 }
221 
222 
223 static int
224 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
225 {
226     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
227     int           ok;
228 
229     mb();
230     do {
231 	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
232 	    break;
233 
234     } while (time_before(jiffies, timeout));
235 
236 #if 1
237     if (!ok) {
238 	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
239 	       *addr, val);
240     }
241 #endif
242 
243     return ok;
244 }
245 
246 
247 static int
248 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
249 {
250     unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
251     int           ok;
252 
253     do {
254 	if ((ok = (fore200e->bus->read(addr) == val)))
255 	    break;
256 
257     } while (time_before(jiffies, timeout));
258 
259 #if 1
260     if (!ok) {
261 	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
262 	       fore200e->bus->read(addr), val);
263     }
264 #endif
265 
266     return ok;
267 }
268 
269 
270 static void
271 fore200e_free_rx_buf(struct fore200e* fore200e)
272 {
273     int scheme, magn, nbr;
274     struct buffer* buffer;
275 
276     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
277 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
278 
279 	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
280 
281 		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
282 
283 		    struct chunk* data = &buffer[ nbr ].data;
284 
285 		    if (data->alloc_addr != NULL)
286 			fore200e_chunk_free(fore200e, data);
287 		}
288 	    }
289 	}
290     }
291 }
292 
293 
294 static void
295 fore200e_uninit_bs_queue(struct fore200e* fore200e)
296 {
297     int scheme, magn;
298 
299     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
300 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
301 
302 	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
303 	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
304 
305 	    if (status->alloc_addr)
306 		fore200e->bus->dma_chunk_free(fore200e, status);
307 
308 	    if (rbd_block->alloc_addr)
309 		fore200e->bus->dma_chunk_free(fore200e, rbd_block);
310 	}
311     }
312 }
313 
314 
315 static int
316 fore200e_reset(struct fore200e* fore200e, int diag)
317 {
318     int ok;
319 
320     fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
321 
322     fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
323 
324     fore200e->bus->reset(fore200e);
325 
326     if (diag) {
327 	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
328 	if (ok == 0) {
329 
330 	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
331 	    return -ENODEV;
332 	}
333 
334 	printk(FORE200E "device %s self-test passed\n", fore200e->name);
335 
336 	fore200e->state = FORE200E_STATE_RESET;
337     }
338 
339     return 0;
340 }
341 
342 
343 static void
344 fore200e_shutdown(struct fore200e* fore200e)
345 {
346     printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
347 	   fore200e->name, fore200e->phys_base,
348 	   fore200e_irq_itoa(fore200e->irq));
349 
350     if (fore200e->state > FORE200E_STATE_RESET) {
351 	/* first, reset the board to prevent further interrupts or data transfers */
352 	fore200e_reset(fore200e, 0);
353     }
354 
355     /* then, release all allocated resources */
356     switch(fore200e->state) {
357 
358     case FORE200E_STATE_COMPLETE:
359 	kfree(fore200e->stats);
360 
361     case FORE200E_STATE_IRQ:
362 	free_irq(fore200e->irq, fore200e->atm_dev);
363 
364     case FORE200E_STATE_ALLOC_BUF:
365 	fore200e_free_rx_buf(fore200e);
366 
367     case FORE200E_STATE_INIT_BSQ:
368 	fore200e_uninit_bs_queue(fore200e);
369 
370     case FORE200E_STATE_INIT_RXQ:
371 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
372 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
373 
374     case FORE200E_STATE_INIT_TXQ:
375 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
376 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
377 
378     case FORE200E_STATE_INIT_CMDQ:
379 	fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
380 
381     case FORE200E_STATE_INITIALIZE:
382 	/* nothing to do for that state */
383 
384     case FORE200E_STATE_START_FW:
385 	/* nothing to do for that state */
386 
387     case FORE200E_STATE_RESET:
388 	/* nothing to do for that state */
389 
390     case FORE200E_STATE_MAP:
391 	fore200e->bus->unmap(fore200e);
392 
393     case FORE200E_STATE_CONFIGURE:
394 	/* nothing to do for that state */
395 
396     case FORE200E_STATE_REGISTER:
397 	/* XXX shouldn't we *start* by deregistering the device? */
398 	atm_dev_deregister(fore200e->atm_dev);
399 
400     case FORE200E_STATE_BLANK:
401 	/* nothing to do for that state */
402 	break;
403     }
404 }
405 
406 
407 #ifdef CONFIG_PCI
408 
409 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
410 {
411     /* on big-endian hosts, the board is configured to convert
412        the endianess of slave RAM accesses  */
413     return le32_to_cpu(readl(addr));
414 }
415 
416 
417 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
418 {
419     /* on big-endian hosts, the board is configured to convert
420        the endianess of slave RAM accesses  */
421     writel(cpu_to_le32(val), addr);
422 }
423 
424 
425 static u32
426 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
427 {
428     u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);
429 
430     DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d,  --> dma_addr = 0x%08x\n",
431 	    virt_addr, size, direction, dma_addr);
432 
433     return dma_addr;
434 }
435 
436 
437 static void
438 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
439 {
440     DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
441 	    dma_addr, size, direction);
442 
443     dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
444 }
445 
446 
447 static void
448 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
449 {
450     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
451 
452     dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
453 }
454 
455 static void
456 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
457 {
458     DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
459 
460     dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
461 }
462 
463 
464 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
465    (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
466 
467 static int
468 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
469 			     int size, int nbr, int alignment)
470 {
471     /* returned chunks are page-aligned */
472     chunk->alloc_size = size * nbr;
473     chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
474 					   chunk->alloc_size,
475 					   &chunk->dma_addr,
476 					   GFP_KERNEL);
477 
478     if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
479 	return -ENOMEM;
480 
481     chunk->align_addr = chunk->alloc_addr;
482 
483     return 0;
484 }
485 
486 
487 /* free a DMA consistent chunk of memory */
488 
489 static void
490 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
491 {
492     dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
493 			chunk->alloc_size,
494 			chunk->alloc_addr,
495 			chunk->dma_addr);
496 }
497 
498 
499 static int
500 fore200e_pca_irq_check(struct fore200e* fore200e)
501 {
502     /* this is a 1 bit register */
503     int irq_posted = readl(fore200e->regs.pca.psr);
504 
505 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
506     if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
507 	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
508     }
509 #endif
510 
511     return irq_posted;
512 }
513 
514 
515 static void
516 fore200e_pca_irq_ack(struct fore200e* fore200e)
517 {
518     writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
519 }
520 
521 
522 static void
523 fore200e_pca_reset(struct fore200e* fore200e)
524 {
525     writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
526     fore200e_spin(10);
527     writel(0, fore200e->regs.pca.hcr);
528 }
529 
530 
531 static int fore200e_pca_map(struct fore200e* fore200e)
532 {
533     DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
534 
535     fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
536 
537     if (fore200e->virt_base == NULL) {
538 	printk(FORE200E "can't map device %s\n", fore200e->name);
539 	return -EFAULT;
540     }
541 
542     DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
543 
544     /* gain access to the PCA specific registers  */
545     fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
546     fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
547     fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
548 
549     fore200e->state = FORE200E_STATE_MAP;
550     return 0;
551 }
552 
553 
554 static void
555 fore200e_pca_unmap(struct fore200e* fore200e)
556 {
557     DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
558 
559     if (fore200e->virt_base != NULL)
560 	iounmap(fore200e->virt_base);
561 }
562 
563 
564 static int fore200e_pca_configure(struct fore200e *fore200e)
565 {
566     struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
567     u8              master_ctrl, latency;
568 
569     DPRINTK(2, "device %s being configured\n", fore200e->name);
570 
571     if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
572 	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
573 	return -EIO;
574     }
575 
576     pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
577 
578     master_ctrl = master_ctrl
579 #if defined(__BIG_ENDIAN)
580 	/* request the PCA board to convert the endianess of slave RAM accesses */
581 	| PCA200E_CTRL_CONVERT_ENDIAN
582 #endif
583 #if 0
584         | PCA200E_CTRL_DIS_CACHE_RD
585         | PCA200E_CTRL_DIS_WRT_INVAL
586         | PCA200E_CTRL_ENA_CONT_REQ_MODE
587         | PCA200E_CTRL_2_CACHE_WRT_INVAL
588 #endif
589 	| PCA200E_CTRL_LARGE_PCI_BURSTS;
590 
591     pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
592 
593     /* raise latency from 32 (default) to 192, as this seems to prevent NIC
594        lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
595        this may impact the performances of other PCI devices on the same bus, though */
596     latency = 192;
597     pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
598 
599     fore200e->state = FORE200E_STATE_CONFIGURE;
600     return 0;
601 }
602 
603 
604 static int __init
605 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
606 {
607     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
608     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
609     struct prom_opcode      opcode;
610     int                     ok;
611     u32                     prom_dma;
612 
613     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
614 
615     opcode.opcode = OPCODE_GET_PROM;
616     opcode.pad    = 0;
617 
618     prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
619 
620     fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
621 
622     *entry->status = STATUS_PENDING;
623 
624     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
625 
626     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
627 
628     *entry->status = STATUS_FREE;
629 
630     fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
631 
632     if (ok == 0) {
633 	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
634 	return -EIO;
635     }
636 
637 #if defined(__BIG_ENDIAN)
638 
639 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
640 
641     /* MAC address is stored as little-endian */
642     swap_here(&prom->mac_addr[0]);
643     swap_here(&prom->mac_addr[4]);
644 #endif
645 
646     return 0;
647 }
648 
649 
650 static int
651 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
652 {
653     struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
654 
655     return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
656 		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
657 }
658 
659 #endif /* CONFIG_PCI */
660 
661 
662 #ifdef CONFIG_SBUS
663 
664 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
665 {
666     return sbus_readl(addr);
667 }
668 
669 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
670 {
671     sbus_writel(val, addr);
672 }
673 
674 static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
675 {
676 	struct platform_device *op = fore200e->bus_dev;
677 	u32 dma_addr;
678 
679 	dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
680 
681 	DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
682 		virt_addr, size, direction, dma_addr);
683 
684 	return dma_addr;
685 }
686 
687 static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
688 {
689 	struct platform_device *op = fore200e->bus_dev;
690 
691 	DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
692 		dma_addr, size, direction);
693 
694 	dma_unmap_single(&op->dev, dma_addr, size, direction);
695 }
696 
697 static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
698 {
699 	struct platform_device *op = fore200e->bus_dev;
700 
701 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
702 
703 	dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction);
704 }
705 
706 static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
707 {
708 	struct platform_device *op = fore200e->bus_dev;
709 
710 	DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
711 
712 	dma_sync_single_for_device(&op->dev, dma_addr, size, direction);
713 }
714 
715 /* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
716  * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter.
717  */
718 static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
719 					int size, int nbr, int alignment)
720 {
721 	struct platform_device *op = fore200e->bus_dev;
722 
723 	chunk->alloc_size = chunk->align_size = size * nbr;
724 
725 	/* returned chunks are page-aligned */
726 	chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size,
727 					       &chunk->dma_addr, GFP_ATOMIC);
728 
729 	if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
730 		return -ENOMEM;
731 
732 	chunk->align_addr = chunk->alloc_addr;
733 
734 	return 0;
735 }
736 
737 /* free a DVMA consistent chunk of memory */
738 static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
739 {
740 	struct platform_device *op = fore200e->bus_dev;
741 
742 	dma_free_coherent(&op->dev, chunk->alloc_size,
743 			  chunk->alloc_addr, chunk->dma_addr);
744 }
745 
746 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
747 {
748 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
749 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
750 }
751 
752 static int fore200e_sba_irq_check(struct fore200e *fore200e)
753 {
754 	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
755 }
756 
757 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
758 {
759 	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
760 	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
761 }
762 
763 static void fore200e_sba_reset(struct fore200e *fore200e)
764 {
765 	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
766 	fore200e_spin(10);
767 	fore200e->bus->write(0, fore200e->regs.sba.hcr);
768 }
769 
770 static int __init fore200e_sba_map(struct fore200e *fore200e)
771 {
772 	struct platform_device *op = fore200e->bus_dev;
773 	unsigned int bursts;
774 
775 	/* gain access to the SBA specific registers  */
776 	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
777 	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
778 	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
779 	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
780 
781 	if (!fore200e->virt_base) {
782 		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
783 		return -EFAULT;
784 	}
785 
786 	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
787 
788 	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
789 
790 	/* get the supported DVMA burst sizes */
791 	bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
792 
793 	if (sbus_can_dma_64bit())
794 		sbus_set_sbus64(&op->dev, bursts);
795 
796 	fore200e->state = FORE200E_STATE_MAP;
797 	return 0;
798 }
799 
800 static void fore200e_sba_unmap(struct fore200e *fore200e)
801 {
802 	struct platform_device *op = fore200e->bus_dev;
803 
804 	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
805 	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
806 	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
807 	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
808 }
809 
810 static int __init fore200e_sba_configure(struct fore200e *fore200e)
811 {
812 	fore200e->state = FORE200E_STATE_CONFIGURE;
813 	return 0;
814 }
815 
816 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
817 {
818 	struct platform_device *op = fore200e->bus_dev;
819 	const u8 *prop;
820 	int len;
821 
822 	prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
823 	if (!prop)
824 		return -ENODEV;
825 	memcpy(&prom->mac_addr[4], prop, 4);
826 
827 	prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
828 	if (!prop)
829 		return -ENODEV;
830 	memcpy(&prom->mac_addr[2], prop, 4);
831 
832 	prom->serial_number = of_getintprop_default(op->dev.of_node,
833 						    "serialnumber", 0);
834 	prom->hw_revision = of_getintprop_default(op->dev.of_node,
835 						  "promversion", 0);
836 
837 	return 0;
838 }
839 
840 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
841 {
842 	struct platform_device *op = fore200e->bus_dev;
843 	const struct linux_prom_registers *regs;
844 
845 	regs = of_get_property(op->dev.of_node, "reg", NULL);
846 
847 	return sprintf(page, "   SBUS slot/device:\t\t%d/'%s'\n",
848 		       (regs ? regs->which_io : 0), op->dev.of_node->name);
849 }
850 #endif /* CONFIG_SBUS */
851 
852 
853 static void
854 fore200e_tx_irq(struct fore200e* fore200e)
855 {
856     struct host_txq*        txq = &fore200e->host_txq;
857     struct host_txq_entry*  entry;
858     struct atm_vcc*         vcc;
859     struct fore200e_vc_map* vc_map;
860 
861     if (fore200e->host_txq.txing == 0)
862 	return;
863 
864     for (;;) {
865 
866 	entry = &txq->host_entry[ txq->tail ];
867 
868         if ((*entry->status & STATUS_COMPLETE) == 0) {
869 	    break;
870 	}
871 
872 	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
873 		entry, txq->tail, entry->vc_map, entry->skb);
874 
875 	/* free copy of misaligned data */
876 	kfree(entry->data);
877 
878 	/* remove DMA mapping */
879 	fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
880 				 DMA_TO_DEVICE);
881 
882 	vc_map = entry->vc_map;
883 
884 	/* vcc closed since the time the entry was submitted for tx? */
885 	if ((vc_map->vcc == NULL) ||
886 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
887 
888 	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
889 		    fore200e->atm_dev->number);
890 
891 	    dev_kfree_skb_any(entry->skb);
892 	}
893 	else {
894 	    ASSERT(vc_map->vcc);
895 
896 	    /* vcc closed then immediately re-opened? */
897 	    if (vc_map->incarn != entry->incarn) {
898 
899 		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
900 		   if the same vcc is immediately re-opened, those pending PDUs must
901 		   not be popped after the completion of their emission, as they refer
902 		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
903 		   would be decremented by the size of the (unrelated) skb, possibly
904 		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
905 		   we thus bind the tx entry to the current incarnation of the vcc
906 		   when the entry is submitted for tx. When the tx later completes,
907 		   if the incarnation number of the tx entry does not match the one
908 		   of the vcc, then this implies that the vcc has been closed then re-opened.
909 		   we thus just drop the skb here. */
910 
911 		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
912 			fore200e->atm_dev->number);
913 
914 		dev_kfree_skb_any(entry->skb);
915 	    }
916 	    else {
917 		vcc = vc_map->vcc;
918 		ASSERT(vcc);
919 
920 		/* notify tx completion */
921 		if (vcc->pop) {
922 		    vcc->pop(vcc, entry->skb);
923 		}
924 		else {
925 		    dev_kfree_skb_any(entry->skb);
926 		}
927 #if 1
928 		/* race fixed by the above incarnation mechanism, but... */
929 		if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
930 		    atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
931 		}
932 #endif
933 		/* check error condition */
934 		if (*entry->status & STATUS_ERROR)
935 		    atomic_inc(&vcc->stats->tx_err);
936 		else
937 		    atomic_inc(&vcc->stats->tx);
938 	    }
939 	}
940 
941 	*entry->status = STATUS_FREE;
942 
943 	fore200e->host_txq.txing--;
944 
945 	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
946     }
947 }
948 
949 
950 #ifdef FORE200E_BSQ_DEBUG
951 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
952 {
953     struct buffer* buffer;
954     int count = 0;
955 
956     buffer = bsq->freebuf;
957     while (buffer) {
958 
959 	if (buffer->supplied) {
960 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
961 		   where, scheme, magn, buffer->index);
962 	}
963 
964 	if (buffer->magn != magn) {
965 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
966 		   where, scheme, magn, buffer->index, buffer->magn);
967 	}
968 
969 	if (buffer->scheme != scheme) {
970 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
971 		   where, scheme, magn, buffer->index, buffer->scheme);
972 	}
973 
974 	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
975 	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
976 		   where, scheme, magn, buffer->index);
977 	}
978 
979 	count++;
980 	buffer = buffer->next;
981     }
982 
983     if (count != bsq->freebuf_count) {
984 	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
985 	       where, scheme, magn, count, bsq->freebuf_count);
986     }
987     return 0;
988 }
989 #endif
990 
991 
992 static void
993 fore200e_supply(struct fore200e* fore200e)
994 {
995     int  scheme, magn, i;
996 
997     struct host_bsq*       bsq;
998     struct host_bsq_entry* entry;
999     struct buffer*         buffer;
1000 
1001     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1002 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1003 
1004 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
1005 
1006 #ifdef FORE200E_BSQ_DEBUG
1007 	    bsq_audit(1, bsq, scheme, magn);
1008 #endif
1009 	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1010 
1011 		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1012 			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1013 
1014 		entry = &bsq->host_entry[ bsq->head ];
1015 
1016 		for (i = 0; i < RBD_BLK_SIZE; i++) {
1017 
1018 		    /* take the first buffer in the free buffer list */
1019 		    buffer = bsq->freebuf;
1020 		    if (!buffer) {
1021 			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1022 			       scheme, magn, bsq->freebuf_count);
1023 			return;
1024 		    }
1025 		    bsq->freebuf = buffer->next;
1026 
1027 #ifdef FORE200E_BSQ_DEBUG
1028 		    if (buffer->supplied)
1029 			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1030 			       scheme, magn, buffer->index);
1031 		    buffer->supplied = 1;
1032 #endif
1033 		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1034 		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
1035 		}
1036 
1037 		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1038 
1039  		/* decrease accordingly the number of free rx buffers */
1040 		bsq->freebuf_count -= RBD_BLK_SIZE;
1041 
1042 		*entry->status = STATUS_PENDING;
1043 		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1044 	    }
1045 	}
1046     }
1047 }
1048 
1049 
1050 static int
1051 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1052 {
1053     struct sk_buff*      skb;
1054     struct buffer*       buffer;
1055     struct fore200e_vcc* fore200e_vcc;
1056     int                  i, pdu_len = 0;
1057 #ifdef FORE200E_52BYTE_AAL0_SDU
1058     u32                  cell_header = 0;
1059 #endif
1060 
1061     ASSERT(vcc);
1062 
1063     fore200e_vcc = FORE200E_VCC(vcc);
1064     ASSERT(fore200e_vcc);
1065 
1066 #ifdef FORE200E_52BYTE_AAL0_SDU
1067     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1068 
1069 	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1070 	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1071                       (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1072                       (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1073                        rpd->atm_header.clp;
1074 	pdu_len = 4;
1075     }
1076 #endif
1077 
1078     /* compute total PDU length */
1079     for (i = 0; i < rpd->nseg; i++)
1080 	pdu_len += rpd->rsd[ i ].length;
1081 
1082     skb = alloc_skb(pdu_len, GFP_ATOMIC);
1083     if (skb == NULL) {
1084 	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1085 
1086 	atomic_inc(&vcc->stats->rx_drop);
1087 	return -ENOMEM;
1088     }
1089 
1090     __net_timestamp(skb);
1091 
1092 #ifdef FORE200E_52BYTE_AAL0_SDU
1093     if (cell_header) {
1094 	*((u32*)skb_put(skb, 4)) = cell_header;
1095     }
1096 #endif
1097 
1098     /* reassemble segments */
1099     for (i = 0; i < rpd->nseg; i++) {
1100 
1101 	/* rebuild rx buffer address from rsd handle */
1102 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1103 
1104 	/* Make device DMA transfer visible to CPU.  */
1105 	fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1106 
1107 	memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1108 
1109 	/* Now let the device get at it again.  */
1110 	fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1111     }
1112 
1113     DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1114 
1115     if (pdu_len < fore200e_vcc->rx_min_pdu)
1116 	fore200e_vcc->rx_min_pdu = pdu_len;
1117     if (pdu_len > fore200e_vcc->rx_max_pdu)
1118 	fore200e_vcc->rx_max_pdu = pdu_len;
1119     fore200e_vcc->rx_pdu++;
1120 
1121     /* push PDU */
1122     if (atm_charge(vcc, skb->truesize) == 0) {
1123 
1124 	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1125 		vcc->itf, vcc->vpi, vcc->vci);
1126 
1127 	dev_kfree_skb_any(skb);
1128 
1129 	atomic_inc(&vcc->stats->rx_drop);
1130 	return -ENOMEM;
1131     }
1132 
1133     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1134 
1135     vcc->push(vcc, skb);
1136     atomic_inc(&vcc->stats->rx);
1137 
1138     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1139 
1140     return 0;
1141 }
1142 
1143 
1144 static void
1145 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1146 {
1147     struct host_bsq* bsq;
1148     struct buffer*   buffer;
1149     int              i;
1150 
1151     for (i = 0; i < rpd->nseg; i++) {
1152 
1153 	/* rebuild rx buffer address from rsd handle */
1154 	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1155 
1156 	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1157 
1158 #ifdef FORE200E_BSQ_DEBUG
1159 	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1160 
1161 	if (buffer->supplied == 0)
1162 	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1163 		   buffer->scheme, buffer->magn, buffer->index);
1164 	buffer->supplied = 0;
1165 #endif
1166 
1167 	/* re-insert the buffer into the free buffer list */
1168 	buffer->next = bsq->freebuf;
1169 	bsq->freebuf = buffer;
1170 
1171 	/* then increment the number of free rx buffers */
1172 	bsq->freebuf_count++;
1173     }
1174 }
1175 
1176 
1177 static void
1178 fore200e_rx_irq(struct fore200e* fore200e)
1179 {
1180     struct host_rxq*        rxq = &fore200e->host_rxq;
1181     struct host_rxq_entry*  entry;
1182     struct atm_vcc*         vcc;
1183     struct fore200e_vc_map* vc_map;
1184 
1185     for (;;) {
1186 
1187 	entry = &rxq->host_entry[ rxq->head ];
1188 
1189 	/* no more received PDUs */
1190 	if ((*entry->status & STATUS_COMPLETE) == 0)
1191 	    break;
1192 
1193 	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1194 
1195 	if ((vc_map->vcc == NULL) ||
1196 	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1197 
1198 	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1199 		    fore200e->atm_dev->number,
1200 		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1201 	}
1202 	else {
1203 	    vcc = vc_map->vcc;
1204 	    ASSERT(vcc);
1205 
1206 	    if ((*entry->status & STATUS_ERROR) == 0) {
1207 
1208 		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1209 	    }
1210 	    else {
1211 		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1212 			fore200e->atm_dev->number,
1213 			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1214 		atomic_inc(&vcc->stats->rx_err);
1215 	    }
1216 	}
1217 
1218 	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1219 
1220 	fore200e_collect_rpd(fore200e, entry->rpd);
1221 
1222 	/* rewrite the rpd address to ack the received PDU */
1223 	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1224 	*entry->status = STATUS_FREE;
1225 
1226 	fore200e_supply(fore200e);
1227     }
1228 }
1229 
1230 
1231 #ifndef FORE200E_USE_TASKLET
1232 static void
1233 fore200e_irq(struct fore200e* fore200e)
1234 {
1235     unsigned long flags;
1236 
1237     spin_lock_irqsave(&fore200e->q_lock, flags);
1238     fore200e_rx_irq(fore200e);
1239     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1240 
1241     spin_lock_irqsave(&fore200e->q_lock, flags);
1242     fore200e_tx_irq(fore200e);
1243     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1244 }
1245 #endif
1246 
1247 
1248 static irqreturn_t
1249 fore200e_interrupt(int irq, void* dev)
1250 {
1251     struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1252 
1253     if (fore200e->bus->irq_check(fore200e) == 0) {
1254 
1255 	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1256 	return IRQ_NONE;
1257     }
1258     DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1259 
1260 #ifdef FORE200E_USE_TASKLET
1261     tasklet_schedule(&fore200e->tx_tasklet);
1262     tasklet_schedule(&fore200e->rx_tasklet);
1263 #else
1264     fore200e_irq(fore200e);
1265 #endif
1266 
1267     fore200e->bus->irq_ack(fore200e);
1268     return IRQ_HANDLED;
1269 }
1270 
1271 
1272 #ifdef FORE200E_USE_TASKLET
1273 static void
1274 fore200e_tx_tasklet(unsigned long data)
1275 {
1276     struct fore200e* fore200e = (struct fore200e*) data;
1277     unsigned long flags;
1278 
1279     DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1280 
1281     spin_lock_irqsave(&fore200e->q_lock, flags);
1282     fore200e_tx_irq(fore200e);
1283     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1284 }
1285 
1286 
1287 static void
1288 fore200e_rx_tasklet(unsigned long data)
1289 {
1290     struct fore200e* fore200e = (struct fore200e*) data;
1291     unsigned long    flags;
1292 
1293     DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1294 
1295     spin_lock_irqsave(&fore200e->q_lock, flags);
1296     fore200e_rx_irq((struct fore200e*) data);
1297     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1298 }
1299 #endif
1300 
1301 
1302 static int
1303 fore200e_select_scheme(struct atm_vcc* vcc)
1304 {
1305     /* fairly balance the VCs over (identical) buffer schemes */
1306     int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1307 
1308     DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1309 	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1310 
1311     return scheme;
1312 }
1313 
1314 
1315 static int
1316 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1317 {
1318     struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1319     struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1320     struct activate_opcode   activ_opcode;
1321     struct deactivate_opcode deactiv_opcode;
1322     struct vpvc              vpvc;
1323     int                      ok;
1324     enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1325 
1326     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1327 
1328     if (activate) {
1329 	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1330 
1331 	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1332 	activ_opcode.aal    = aal;
1333 	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1334 	activ_opcode.pad    = 0;
1335     }
1336     else {
1337 	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1338 	deactiv_opcode.pad    = 0;
1339     }
1340 
1341     vpvc.vci = vcc->vci;
1342     vpvc.vpi = vcc->vpi;
1343 
1344     *entry->status = STATUS_PENDING;
1345 
1346     if (activate) {
1347 
1348 #ifdef FORE200E_52BYTE_AAL0_SDU
1349 	mtu = 48;
1350 #endif
1351 	/* the MTU is not used by the cp, except in the case of AAL0 */
1352 	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1353 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1354 	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1355     }
1356     else {
1357 	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1358 	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1359     }
1360 
1361     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1362 
1363     *entry->status = STATUS_FREE;
1364 
1365     if (ok == 0) {
1366 	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1367 	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1368 	return -EIO;
1369     }
1370 
1371     DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1372 	    activate ? "open" : "clos");
1373 
1374     return 0;
1375 }
1376 
1377 
1378 #define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1379 
1380 static void
1381 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1382 {
1383     if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1384 
1385 	/* compute the data cells to idle cells ratio from the tx PCR */
1386 	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1387 	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1388     }
1389     else {
1390 	/* disable rate control */
1391 	rate->data_cells = rate->idle_cells = 0;
1392     }
1393 }
1394 
1395 
1396 static int
1397 fore200e_open(struct atm_vcc *vcc)
1398 {
1399     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1400     struct fore200e_vcc*    fore200e_vcc;
1401     struct fore200e_vc_map* vc_map;
1402     unsigned long	    flags;
1403     int			    vci = vcc->vci;
1404     short		    vpi = vcc->vpi;
1405 
1406     ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1407     ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1408 
1409     spin_lock_irqsave(&fore200e->q_lock, flags);
1410 
1411     vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1412     if (vc_map->vcc) {
1413 
1414 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1415 
1416 	printk(FORE200E "VC %d.%d.%d already in use\n",
1417 	       fore200e->atm_dev->number, vpi, vci);
1418 
1419 	return -EINVAL;
1420     }
1421 
1422     vc_map->vcc = vcc;
1423 
1424     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1425 
1426     fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1427     if (fore200e_vcc == NULL) {
1428 	vc_map->vcc = NULL;
1429 	return -ENOMEM;
1430     }
1431 
1432     DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1433 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1434 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1435 	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1436 	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1437 	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1438 	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1439 
1440     /* pseudo-CBR bandwidth requested? */
1441     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1442 
1443 	mutex_lock(&fore200e->rate_mtx);
1444 	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1445 	    mutex_unlock(&fore200e->rate_mtx);
1446 
1447 	    kfree(fore200e_vcc);
1448 	    vc_map->vcc = NULL;
1449 	    return -EAGAIN;
1450 	}
1451 
1452 	/* reserve bandwidth */
1453 	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1454 	mutex_unlock(&fore200e->rate_mtx);
1455     }
1456 
1457     vcc->itf = vcc->dev->number;
1458 
1459     set_bit(ATM_VF_PARTIAL,&vcc->flags);
1460     set_bit(ATM_VF_ADDR, &vcc->flags);
1461 
1462     vcc->dev_data = fore200e_vcc;
1463 
1464     if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1465 
1466 	vc_map->vcc = NULL;
1467 
1468 	clear_bit(ATM_VF_ADDR, &vcc->flags);
1469 	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1470 
1471 	vcc->dev_data = NULL;
1472 
1473 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1474 
1475 	kfree(fore200e_vcc);
1476 	return -EINVAL;
1477     }
1478 
1479     /* compute rate control parameters */
1480     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1481 
1482 	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1483 	set_bit(ATM_VF_HASQOS, &vcc->flags);
1484 
1485 	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1486 		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1487 		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1488 		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1489     }
1490 
1491     fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1492     fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1493     fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1494 
1495     /* new incarnation of the vcc */
1496     vc_map->incarn = ++fore200e->incarn_count;
1497 
1498     /* VC unusable before this flag is set */
1499     set_bit(ATM_VF_READY, &vcc->flags);
1500 
1501     return 0;
1502 }
1503 
1504 
1505 static void
1506 fore200e_close(struct atm_vcc* vcc)
1507 {
1508     struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1509     struct fore200e_vcc*    fore200e_vcc;
1510     struct fore200e_vc_map* vc_map;
1511     unsigned long           flags;
1512 
1513     ASSERT(vcc);
1514     ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1515     ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1516 
1517     DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1518 
1519     clear_bit(ATM_VF_READY, &vcc->flags);
1520 
1521     fore200e_activate_vcin(fore200e, 0, vcc, 0);
1522 
1523     spin_lock_irqsave(&fore200e->q_lock, flags);
1524 
1525     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1526 
1527     /* the vc is no longer considered as "in use" by fore200e_open() */
1528     vc_map->vcc = NULL;
1529 
1530     vcc->itf = vcc->vci = vcc->vpi = 0;
1531 
1532     fore200e_vcc = FORE200E_VCC(vcc);
1533     vcc->dev_data = NULL;
1534 
1535     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1536 
1537     /* release reserved bandwidth, if any */
1538     if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1539 
1540 	mutex_lock(&fore200e->rate_mtx);
1541 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1542 	mutex_unlock(&fore200e->rate_mtx);
1543 
1544 	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1545     }
1546 
1547     clear_bit(ATM_VF_ADDR, &vcc->flags);
1548     clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1549 
1550     ASSERT(fore200e_vcc);
1551     kfree(fore200e_vcc);
1552 }
1553 
1554 
1555 static int
1556 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1557 {
1558     struct fore200e*        fore200e     = FORE200E_DEV(vcc->dev);
1559     struct fore200e_vcc*    fore200e_vcc = FORE200E_VCC(vcc);
1560     struct fore200e_vc_map* vc_map;
1561     struct host_txq*        txq          = &fore200e->host_txq;
1562     struct host_txq_entry*  entry;
1563     struct tpd*             tpd;
1564     struct tpd_haddr        tpd_haddr;
1565     int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1566     int                     tx_copy      = 0;
1567     int                     tx_len       = skb->len;
1568     u32*                    cell_header  = NULL;
1569     unsigned char*          skb_data;
1570     int                     skb_len;
1571     unsigned char*          data;
1572     unsigned long           flags;
1573 
1574     ASSERT(vcc);
1575     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1576     ASSERT(fore200e);
1577     ASSERT(fore200e_vcc);
1578 
1579     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1580 	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1581 	dev_kfree_skb_any(skb);
1582 	return -EINVAL;
1583     }
1584 
1585 #ifdef FORE200E_52BYTE_AAL0_SDU
1586     if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1587 	cell_header = (u32*) skb->data;
1588 	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1589 	skb_len     = tx_len = skb->len  - 4;
1590 
1591 	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1592     }
1593     else
1594 #endif
1595     {
1596 	skb_data = skb->data;
1597 	skb_len  = skb->len;
1598     }
1599 
1600     if (((unsigned long)skb_data) & 0x3) {
1601 
1602 	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1603 	tx_copy = 1;
1604 	tx_len  = skb_len;
1605     }
1606 
1607     if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1608 
1609         /* this simply NUKES the PCA board */
1610 	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1611 	tx_copy = 1;
1612 	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1613     }
1614 
1615     if (tx_copy) {
1616 	data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1617 	if (data == NULL) {
1618 	    if (vcc->pop) {
1619 		vcc->pop(vcc, skb);
1620 	    }
1621 	    else {
1622 		dev_kfree_skb_any(skb);
1623 	    }
1624 	    return -ENOMEM;
1625 	}
1626 
1627 	memcpy(data, skb_data, skb_len);
1628 	if (skb_len < tx_len)
1629 	    memset(data + skb_len, 0x00, tx_len - skb_len);
1630     }
1631     else {
1632 	data = skb_data;
1633     }
1634 
1635     vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1636     ASSERT(vc_map->vcc == vcc);
1637 
1638   retry_here:
1639 
1640     spin_lock_irqsave(&fore200e->q_lock, flags);
1641 
1642     entry = &txq->host_entry[ txq->head ];
1643 
1644     if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1645 
1646 	/* try to free completed tx queue entries */
1647 	fore200e_tx_irq(fore200e);
1648 
1649 	if (*entry->status != STATUS_FREE) {
1650 
1651 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1652 
1653 	    /* retry once again? */
1654 	    if (--retry > 0) {
1655 		udelay(50);
1656 		goto retry_here;
1657 	    }
1658 
1659 	    atomic_inc(&vcc->stats->tx_err);
1660 
1661 	    fore200e->tx_sat++;
1662 	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1663 		    fore200e->name, fore200e->cp_queues->heartbeat);
1664 	    if (vcc->pop) {
1665 		vcc->pop(vcc, skb);
1666 	    }
1667 	    else {
1668 		dev_kfree_skb_any(skb);
1669 	    }
1670 
1671 	    if (tx_copy)
1672 		kfree(data);
1673 
1674 	    return -ENOBUFS;
1675 	}
1676     }
1677 
1678     entry->incarn = vc_map->incarn;
1679     entry->vc_map = vc_map;
1680     entry->skb    = skb;
1681     entry->data   = tx_copy ? data : NULL;
1682 
1683     tpd = entry->tpd;
1684     tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1685     tpd->tsd[ 0 ].length = tx_len;
1686 
1687     FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1688     txq->txing++;
1689 
1690     /* The dma_map call above implies a dma_sync so the device can use it,
1691      * thus no explicit dma_sync call is necessary here.
1692      */
1693 
1694     DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1695 	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1696 	    tpd->tsd[0].length, skb_len);
1697 
1698     if (skb_len < fore200e_vcc->tx_min_pdu)
1699 	fore200e_vcc->tx_min_pdu = skb_len;
1700     if (skb_len > fore200e_vcc->tx_max_pdu)
1701 	fore200e_vcc->tx_max_pdu = skb_len;
1702     fore200e_vcc->tx_pdu++;
1703 
1704     /* set tx rate control information */
1705     tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1706     tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1707 
1708     if (cell_header) {
1709 	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1710 	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1711 	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1712 	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1713 	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1714     }
1715     else {
1716 	/* set the ATM header, common to all cells conveying the PDU */
1717 	tpd->atm_header.clp = 0;
1718 	tpd->atm_header.plt = 0;
1719 	tpd->atm_header.vci = vcc->vci;
1720 	tpd->atm_header.vpi = vcc->vpi;
1721 	tpd->atm_header.gfc = 0;
1722     }
1723 
1724     tpd->spec.length = tx_len;
1725     tpd->spec.nseg   = 1;
1726     tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1727     tpd->spec.intr   = 1;
1728 
1729     tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1730     tpd_haddr.pad   = 0;
1731     tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1732 
1733     *entry->status = STATUS_PENDING;
1734     fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1735 
1736     spin_unlock_irqrestore(&fore200e->q_lock, flags);
1737 
1738     return 0;
1739 }
1740 
1741 
1742 static int
1743 fore200e_getstats(struct fore200e* fore200e)
1744 {
1745     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1746     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1747     struct stats_opcode     opcode;
1748     int                     ok;
1749     u32                     stats_dma_addr;
1750 
1751     if (fore200e->stats == NULL) {
1752 	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1753 	if (fore200e->stats == NULL)
1754 	    return -ENOMEM;
1755     }
1756 
1757     stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1758 					    sizeof(struct stats), DMA_FROM_DEVICE);
1759 
1760     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1761 
1762     opcode.opcode = OPCODE_GET_STATS;
1763     opcode.pad    = 0;
1764 
1765     fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1766 
1767     *entry->status = STATUS_PENDING;
1768 
1769     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1770 
1771     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1772 
1773     *entry->status = STATUS_FREE;
1774 
1775     fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1776 
1777     if (ok == 0) {
1778 	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1779 	return -EIO;
1780     }
1781 
1782     return 0;
1783 }
1784 
1785 
1786 static int
1787 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1788 {
1789     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1790 
1791     DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1792 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1793 
1794     return -EINVAL;
1795 }
1796 
1797 
1798 static int
1799 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1800 {
1801     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1802 
1803     DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1804 	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1805 
1806     return -EINVAL;
1807 }
1808 
1809 
1810 #if 0 /* currently unused */
1811 static int
1812 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1813 {
1814     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1815     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1816     struct oc3_opcode       opcode;
1817     int                     ok;
1818     u32                     oc3_regs_dma_addr;
1819 
1820     oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1821 
1822     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1823 
1824     opcode.opcode = OPCODE_GET_OC3;
1825     opcode.reg    = 0;
1826     opcode.value  = 0;
1827     opcode.mask   = 0;
1828 
1829     fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1830 
1831     *entry->status = STATUS_PENDING;
1832 
1833     fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1834 
1835     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1836 
1837     *entry->status = STATUS_FREE;
1838 
1839     fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1840 
1841     if (ok == 0) {
1842 	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1843 	return -EIO;
1844     }
1845 
1846     return 0;
1847 }
1848 #endif
1849 
1850 
1851 static int
1852 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1853 {
1854     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1855     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1856     struct oc3_opcode       opcode;
1857     int                     ok;
1858 
1859     DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1860 
1861     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1862 
1863     opcode.opcode = OPCODE_SET_OC3;
1864     opcode.reg    = reg;
1865     opcode.value  = value;
1866     opcode.mask   = mask;
1867 
1868     fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1869 
1870     *entry->status = STATUS_PENDING;
1871 
1872     fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1873 
1874     ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1875 
1876     *entry->status = STATUS_FREE;
1877 
1878     if (ok == 0) {
1879 	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1880 	return -EIO;
1881     }
1882 
1883     return 0;
1884 }
1885 
1886 
1887 static int
1888 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1889 {
1890     u32 mct_value, mct_mask;
1891     int error;
1892 
1893     if (!capable(CAP_NET_ADMIN))
1894 	return -EPERM;
1895 
1896     switch (loop_mode) {
1897 
1898     case ATM_LM_NONE:
1899 	mct_value = 0;
1900 	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1901 	break;
1902 
1903     case ATM_LM_LOC_PHY:
1904 	mct_value = mct_mask = SUNI_MCT_DLE;
1905 	break;
1906 
1907     case ATM_LM_RMT_PHY:
1908 	mct_value = mct_mask = SUNI_MCT_LLE;
1909 	break;
1910 
1911     default:
1912 	return -EINVAL;
1913     }
1914 
1915     error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1916     if (error == 0)
1917 	fore200e->loop_mode = loop_mode;
1918 
1919     return error;
1920 }
1921 
1922 
1923 static int
1924 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1925 {
1926     struct sonet_stats tmp;
1927 
1928     if (fore200e_getstats(fore200e) < 0)
1929 	return -EIO;
1930 
1931     tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1932     tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1933     tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1934     tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1935     tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1936     tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1937     tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1938     tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1939 	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1940 	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1941     tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1942 	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1943 	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1944 
1945     if (arg)
1946 	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1947 
1948     return 0;
1949 }
1950 
1951 
1952 static int
1953 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1954 {
1955     struct fore200e* fore200e = FORE200E_DEV(dev);
1956 
1957     DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1958 
1959     switch (cmd) {
1960 
1961     case SONET_GETSTAT:
1962 	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1963 
1964     case SONET_GETDIAG:
1965 	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1966 
1967     case ATM_SETLOOP:
1968 	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1969 
1970     case ATM_GETLOOP:
1971 	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1972 
1973     case ATM_QUERYLOOP:
1974 	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1975     }
1976 
1977     return -ENOSYS; /* not implemented */
1978 }
1979 
1980 
1981 static int
1982 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1983 {
1984     struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1985     struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
1986 
1987     if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1988 	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1989 	return -EINVAL;
1990     }
1991 
1992     DPRINTK(2, "change_qos %d.%d.%d, "
1993 	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1994 	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1995 	    "available_cell_rate = %u",
1996 	    vcc->itf, vcc->vpi, vcc->vci,
1997 	    fore200e_traffic_class[ qos->txtp.traffic_class ],
1998 	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1999 	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
2000 	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2001 	    flags, fore200e->available_cell_rate);
2002 
2003     if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2004 
2005 	mutex_lock(&fore200e->rate_mtx);
2006 	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2007 	    mutex_unlock(&fore200e->rate_mtx);
2008 	    return -EAGAIN;
2009 	}
2010 
2011 	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2012 	fore200e->available_cell_rate -= qos->txtp.max_pcr;
2013 
2014 	mutex_unlock(&fore200e->rate_mtx);
2015 
2016 	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2017 
2018 	/* update rate control parameters */
2019 	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2020 
2021 	set_bit(ATM_VF_HASQOS, &vcc->flags);
2022 
2023 	return 0;
2024     }
2025 
2026     return -EINVAL;
2027 }
2028 
2029 
2030 static int fore200e_irq_request(struct fore200e *fore200e)
2031 {
2032     if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2033 
2034 	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2035 	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
2036 	return -EBUSY;
2037     }
2038 
2039     printk(FORE200E "IRQ %s reserved for device %s\n",
2040 	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
2041 
2042 #ifdef FORE200E_USE_TASKLET
2043     tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2044     tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2045 #endif
2046 
2047     fore200e->state = FORE200E_STATE_IRQ;
2048     return 0;
2049 }
2050 
2051 
2052 static int fore200e_get_esi(struct fore200e *fore200e)
2053 {
2054     struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2055     int ok, i;
2056 
2057     if (!prom)
2058 	return -ENOMEM;
2059 
2060     ok = fore200e->bus->prom_read(fore200e, prom);
2061     if (ok < 0) {
2062 	kfree(prom);
2063 	return -EBUSY;
2064     }
2065 
2066     printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
2067 	   fore200e->name,
2068 	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
2069 	   prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
2070 
2071     for (i = 0; i < ESI_LEN; i++) {
2072 	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2073     }
2074 
2075     kfree(prom);
2076 
2077     return 0;
2078 }
2079 
2080 
2081 static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
2082 {
2083     int scheme, magn, nbr, size, i;
2084 
2085     struct host_bsq* bsq;
2086     struct buffer*   buffer;
2087 
2088     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2089 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2090 
2091 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2092 
2093 	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
2094 	    size = fore200e_rx_buf_size[ scheme ][ magn ];
2095 
2096 	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2097 
2098 	    /* allocate the array of receive buffers */
2099 	    buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2100 
2101 	    if (buffer == NULL)
2102 		return -ENOMEM;
2103 
2104 	    bsq->freebuf = NULL;
2105 
2106 	    for (i = 0; i < nbr; i++) {
2107 
2108 		buffer[ i ].scheme = scheme;
2109 		buffer[ i ].magn   = magn;
2110 #ifdef FORE200E_BSQ_DEBUG
2111 		buffer[ i ].index  = i;
2112 		buffer[ i ].supplied = 0;
2113 #endif
2114 
2115 		/* allocate the receive buffer body */
2116 		if (fore200e_chunk_alloc(fore200e,
2117 					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2118 					 DMA_FROM_DEVICE) < 0) {
2119 
2120 		    while (i > 0)
2121 			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2122 		    kfree(buffer);
2123 
2124 		    return -ENOMEM;
2125 		}
2126 
2127 		/* insert the buffer into the free buffer list */
2128 		buffer[ i ].next = bsq->freebuf;
2129 		bsq->freebuf = &buffer[ i ];
2130 	    }
2131 	    /* all the buffers are free, initially */
2132 	    bsq->freebuf_count = nbr;
2133 
2134 #ifdef FORE200E_BSQ_DEBUG
2135 	    bsq_audit(3, bsq, scheme, magn);
2136 #endif
2137 	}
2138     }
2139 
2140     fore200e->state = FORE200E_STATE_ALLOC_BUF;
2141     return 0;
2142 }
2143 
2144 
2145 static int fore200e_init_bs_queue(struct fore200e *fore200e)
2146 {
2147     int scheme, magn, i;
2148 
2149     struct host_bsq*     bsq;
2150     struct cp_bsq_entry __iomem * cp_entry;
2151 
2152     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2153 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2154 
2155 	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2156 
2157 	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2158 
2159 	    /* allocate and align the array of status words */
2160 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2161 					       &bsq->status,
2162 					       sizeof(enum status),
2163 					       QUEUE_SIZE_BS,
2164 					       fore200e->bus->status_alignment) < 0) {
2165 		return -ENOMEM;
2166 	    }
2167 
2168 	    /* allocate and align the array of receive buffer descriptors */
2169 	    if (fore200e->bus->dma_chunk_alloc(fore200e,
2170 					       &bsq->rbd_block,
2171 					       sizeof(struct rbd_block),
2172 					       QUEUE_SIZE_BS,
2173 					       fore200e->bus->descr_alignment) < 0) {
2174 
2175 		fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2176 		return -ENOMEM;
2177 	    }
2178 
2179 	    /* get the base address of the cp resident buffer supply queue entries */
2180 	    cp_entry = fore200e->virt_base +
2181 		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2182 
2183 	    /* fill the host resident and cp resident buffer supply queue entries */
2184 	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2185 
2186 		bsq->host_entry[ i ].status =
2187 		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2188 	        bsq->host_entry[ i ].rbd_block =
2189 		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2190 		bsq->host_entry[ i ].rbd_block_dma =
2191 		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2192 		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2193 
2194 		*bsq->host_entry[ i ].status = STATUS_FREE;
2195 
2196 		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2197 				     &cp_entry[ i ].status_haddr);
2198 	    }
2199 	}
2200     }
2201 
2202     fore200e->state = FORE200E_STATE_INIT_BSQ;
2203     return 0;
2204 }
2205 
2206 
2207 static int fore200e_init_rx_queue(struct fore200e *fore200e)
2208 {
2209     struct host_rxq*     rxq =  &fore200e->host_rxq;
2210     struct cp_rxq_entry __iomem * cp_entry;
2211     int i;
2212 
2213     DPRINTK(2, "receive queue is being initialized\n");
2214 
2215     /* allocate and align the array of status words */
2216     if (fore200e->bus->dma_chunk_alloc(fore200e,
2217 				       &rxq->status,
2218 				       sizeof(enum status),
2219 				       QUEUE_SIZE_RX,
2220 				       fore200e->bus->status_alignment) < 0) {
2221 	return -ENOMEM;
2222     }
2223 
2224     /* allocate and align the array of receive PDU descriptors */
2225     if (fore200e->bus->dma_chunk_alloc(fore200e,
2226 				       &rxq->rpd,
2227 				       sizeof(struct rpd),
2228 				       QUEUE_SIZE_RX,
2229 				       fore200e->bus->descr_alignment) < 0) {
2230 
2231 	fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2232 	return -ENOMEM;
2233     }
2234 
2235     /* get the base address of the cp resident rx queue entries */
2236     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2237 
2238     /* fill the host resident and cp resident rx entries */
2239     for (i=0; i < QUEUE_SIZE_RX; i++) {
2240 
2241 	rxq->host_entry[ i ].status =
2242 	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2243 	rxq->host_entry[ i ].rpd =
2244 	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2245 	rxq->host_entry[ i ].rpd_dma =
2246 	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2247 	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2248 
2249 	*rxq->host_entry[ i ].status = STATUS_FREE;
2250 
2251 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2252 			     &cp_entry[ i ].status_haddr);
2253 
2254 	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2255 			     &cp_entry[ i ].rpd_haddr);
2256     }
2257 
2258     /* set the head entry of the queue */
2259     rxq->head = 0;
2260 
2261     fore200e->state = FORE200E_STATE_INIT_RXQ;
2262     return 0;
2263 }
2264 
2265 
2266 static int fore200e_init_tx_queue(struct fore200e *fore200e)
2267 {
2268     struct host_txq*     txq =  &fore200e->host_txq;
2269     struct cp_txq_entry __iomem * cp_entry;
2270     int i;
2271 
2272     DPRINTK(2, "transmit queue is being initialized\n");
2273 
2274     /* allocate and align the array of status words */
2275     if (fore200e->bus->dma_chunk_alloc(fore200e,
2276 				       &txq->status,
2277 				       sizeof(enum status),
2278 				       QUEUE_SIZE_TX,
2279 				       fore200e->bus->status_alignment) < 0) {
2280 	return -ENOMEM;
2281     }
2282 
2283     /* allocate and align the array of transmit PDU descriptors */
2284     if (fore200e->bus->dma_chunk_alloc(fore200e,
2285 				       &txq->tpd,
2286 				       sizeof(struct tpd),
2287 				       QUEUE_SIZE_TX,
2288 				       fore200e->bus->descr_alignment) < 0) {
2289 
2290 	fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2291 	return -ENOMEM;
2292     }
2293 
2294     /* get the base address of the cp resident tx queue entries */
2295     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2296 
2297     /* fill the host resident and cp resident tx entries */
2298     for (i=0; i < QUEUE_SIZE_TX; i++) {
2299 
2300 	txq->host_entry[ i ].status =
2301 	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2302 	txq->host_entry[ i ].tpd =
2303 	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2304 	txq->host_entry[ i ].tpd_dma  =
2305                              FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2306 	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2307 
2308 	*txq->host_entry[ i ].status = STATUS_FREE;
2309 
2310 	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2311 			     &cp_entry[ i ].status_haddr);
2312 
2313         /* although there is a one-to-one mapping of tx queue entries and tpds,
2314 	   we do not write here the DMA (physical) base address of each tpd into
2315 	   the related cp resident entry, because the cp relies on this write
2316 	   operation to detect that a new pdu has been submitted for tx */
2317     }
2318 
2319     /* set the head and tail entries of the queue */
2320     txq->head = 0;
2321     txq->tail = 0;
2322 
2323     fore200e->state = FORE200E_STATE_INIT_TXQ;
2324     return 0;
2325 }
2326 
2327 
2328 static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2329 {
2330     struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2331     struct cp_cmdq_entry __iomem * cp_entry;
2332     int i;
2333 
2334     DPRINTK(2, "command queue is being initialized\n");
2335 
2336     /* allocate and align the array of status words */
2337     if (fore200e->bus->dma_chunk_alloc(fore200e,
2338 				       &cmdq->status,
2339 				       sizeof(enum status),
2340 				       QUEUE_SIZE_CMD,
2341 				       fore200e->bus->status_alignment) < 0) {
2342 	return -ENOMEM;
2343     }
2344 
2345     /* get the base address of the cp resident cmd queue entries */
2346     cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2347 
2348     /* fill the host resident and cp resident cmd entries */
2349     for (i=0; i < QUEUE_SIZE_CMD; i++) {
2350 
2351 	cmdq->host_entry[ i ].status   =
2352                               FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2353 	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2354 
2355 	*cmdq->host_entry[ i ].status = STATUS_FREE;
2356 
2357 	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2358                              &cp_entry[ i ].status_haddr);
2359     }
2360 
2361     /* set the head entry of the queue */
2362     cmdq->head = 0;
2363 
2364     fore200e->state = FORE200E_STATE_INIT_CMDQ;
2365     return 0;
2366 }
2367 
2368 
2369 static void fore200e_param_bs_queue(struct fore200e *fore200e,
2370 				    enum buffer_scheme scheme,
2371 				    enum buffer_magn magn, int queue_length,
2372 				    int pool_size, int supply_blksize)
2373 {
2374     struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2375 
2376     fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2377     fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2378     fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2379     fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2380 }
2381 
2382 
2383 static int fore200e_initialize(struct fore200e *fore200e)
2384 {
2385     struct cp_queues __iomem * cpq;
2386     int               ok, scheme, magn;
2387 
2388     DPRINTK(2, "device %s being initialized\n", fore200e->name);
2389 
2390     mutex_init(&fore200e->rate_mtx);
2391     spin_lock_init(&fore200e->q_lock);
2392 
2393     cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2394 
2395     /* enable cp to host interrupts */
2396     fore200e->bus->write(1, &cpq->imask);
2397 
2398     if (fore200e->bus->irq_enable)
2399 	fore200e->bus->irq_enable(fore200e);
2400 
2401     fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2402 
2403     fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2404     fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2405     fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2406 
2407     fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2408     fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2409 
2410     for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2411 	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2412 	    fore200e_param_bs_queue(fore200e, scheme, magn,
2413 				    QUEUE_SIZE_BS,
2414 				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2415 				    RBD_BLK_SIZE);
2416 
2417     /* issue the initialize command */
2418     fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2419     fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2420 
2421     ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2422     if (ok == 0) {
2423 	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2424 	return -ENODEV;
2425     }
2426 
2427     printk(FORE200E "device %s initialized\n", fore200e->name);
2428 
2429     fore200e->state = FORE200E_STATE_INITIALIZE;
2430     return 0;
2431 }
2432 
2433 
2434 static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2435 {
2436     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2437 
2438 #if 0
2439     printk("%c", c);
2440 #endif
2441     fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2442 }
2443 
2444 
2445 static int fore200e_monitor_getc(struct fore200e *fore200e)
2446 {
2447     struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2448     unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2449     int                c;
2450 
2451     while (time_before(jiffies, timeout)) {
2452 
2453 	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2454 
2455 	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2456 
2457 	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2458 #if 0
2459 	    printk("%c", c & 0xFF);
2460 #endif
2461 	    return c & 0xFF;
2462 	}
2463     }
2464 
2465     return -1;
2466 }
2467 
2468 
2469 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2470 {
2471     while (*str) {
2472 
2473 	/* the i960 monitor doesn't accept any new character if it has something to say */
2474 	while (fore200e_monitor_getc(fore200e) >= 0);
2475 
2476 	fore200e_monitor_putc(fore200e, *str++);
2477     }
2478 
2479     while (fore200e_monitor_getc(fore200e) >= 0);
2480 }
2481 
2482 #ifdef __LITTLE_ENDIAN
2483 #define FW_EXT ".bin"
2484 #else
2485 #define FW_EXT "_ecd.bin2"
2486 #endif
2487 
2488 static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2489 {
2490     const struct firmware *firmware;
2491     struct device *device;
2492     struct fw_header *fw_header;
2493     const __le32 *fw_data;
2494     u32 fw_size;
2495     u32 __iomem *load_addr;
2496     char buf[48];
2497     int err = -ENODEV;
2498 
2499     if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
2500 	device = &((struct pci_dev *) fore200e->bus_dev)->dev;
2501 #ifdef CONFIG_SBUS
2502     else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
2503 	device = &((struct platform_device *) fore200e->bus_dev)->dev;
2504 #endif
2505     else
2506 	return err;
2507 
2508     sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2509     if ((err = request_firmware(&firmware, buf, device)) < 0) {
2510 	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2511 	return err;
2512     }
2513 
2514     fw_data = (__le32 *) firmware->data;
2515     fw_size = firmware->size / sizeof(u32);
2516     fw_header = (struct fw_header *) firmware->data;
2517     load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2518 
2519     DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2520 	    fore200e->name, load_addr, fw_size);
2521 
2522     if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2523 	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2524 	goto release;
2525     }
2526 
2527     for (; fw_size--; fw_data++, load_addr++)
2528 	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2529 
2530     DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2531 
2532 #if defined(__sparc_v9__)
2533     /* reported to be required by SBA cards on some sparc64 hosts */
2534     fore200e_spin(100);
2535 #endif
2536 
2537     sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2538     fore200e_monitor_puts(fore200e, buf);
2539 
2540     if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2541 	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2542 	goto release;
2543     }
2544 
2545     printk(FORE200E "device %s firmware started\n", fore200e->name);
2546 
2547     fore200e->state = FORE200E_STATE_START_FW;
2548     err = 0;
2549 
2550 release:
2551     release_firmware(firmware);
2552     return err;
2553 }
2554 
2555 
2556 static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2557 {
2558     struct atm_dev* atm_dev;
2559 
2560     DPRINTK(2, "device %s being registered\n", fore200e->name);
2561 
2562     atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2563                                -1, NULL);
2564     if (atm_dev == NULL) {
2565 	printk(FORE200E "unable to register device %s\n", fore200e->name);
2566 	return -ENODEV;
2567     }
2568 
2569     atm_dev->dev_data = fore200e;
2570     fore200e->atm_dev = atm_dev;
2571 
2572     atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2573     atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2574 
2575     fore200e->available_cell_rate = ATM_OC3_PCR;
2576 
2577     fore200e->state = FORE200E_STATE_REGISTER;
2578     return 0;
2579 }
2580 
2581 
2582 static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2583 {
2584     if (fore200e_register(fore200e, parent) < 0)
2585 	return -ENODEV;
2586 
2587     if (fore200e->bus->configure(fore200e) < 0)
2588 	return -ENODEV;
2589 
2590     if (fore200e->bus->map(fore200e) < 0)
2591 	return -ENODEV;
2592 
2593     if (fore200e_reset(fore200e, 1) < 0)
2594 	return -ENODEV;
2595 
2596     if (fore200e_load_and_start_fw(fore200e) < 0)
2597 	return -ENODEV;
2598 
2599     if (fore200e_initialize(fore200e) < 0)
2600 	return -ENODEV;
2601 
2602     if (fore200e_init_cmd_queue(fore200e) < 0)
2603 	return -ENOMEM;
2604 
2605     if (fore200e_init_tx_queue(fore200e) < 0)
2606 	return -ENOMEM;
2607 
2608     if (fore200e_init_rx_queue(fore200e) < 0)
2609 	return -ENOMEM;
2610 
2611     if (fore200e_init_bs_queue(fore200e) < 0)
2612 	return -ENOMEM;
2613 
2614     if (fore200e_alloc_rx_buf(fore200e) < 0)
2615 	return -ENOMEM;
2616 
2617     if (fore200e_get_esi(fore200e) < 0)
2618 	return -EIO;
2619 
2620     if (fore200e_irq_request(fore200e) < 0)
2621 	return -EBUSY;
2622 
2623     fore200e_supply(fore200e);
2624 
2625     /* all done, board initialization is now complete */
2626     fore200e->state = FORE200E_STATE_COMPLETE;
2627     return 0;
2628 }
2629 
2630 #ifdef CONFIG_SBUS
2631 static const struct of_device_id fore200e_sba_match[];
2632 static int fore200e_sba_probe(struct platform_device *op)
2633 {
2634 	const struct of_device_id *match;
2635 	const struct fore200e_bus *bus;
2636 	struct fore200e *fore200e;
2637 	static int index = 0;
2638 	int err;
2639 
2640 	match = of_match_device(fore200e_sba_match, &op->dev);
2641 	if (!match)
2642 		return -EINVAL;
2643 	bus = match->data;
2644 
2645 	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2646 	if (!fore200e)
2647 		return -ENOMEM;
2648 
2649 	fore200e->bus = bus;
2650 	fore200e->bus_dev = op;
2651 	fore200e->irq = op->archdata.irqs[0];
2652 	fore200e->phys_base = op->resource[0].start;
2653 
2654 	sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2655 
2656 	err = fore200e_init(fore200e, &op->dev);
2657 	if (err < 0) {
2658 		fore200e_shutdown(fore200e);
2659 		kfree(fore200e);
2660 		return err;
2661 	}
2662 
2663 	index++;
2664 	dev_set_drvdata(&op->dev, fore200e);
2665 
2666 	return 0;
2667 }
2668 
2669 static int fore200e_sba_remove(struct platform_device *op)
2670 {
2671 	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2672 
2673 	fore200e_shutdown(fore200e);
2674 	kfree(fore200e);
2675 
2676 	return 0;
2677 }
2678 
2679 static const struct of_device_id fore200e_sba_match[] = {
2680 	{
2681 		.name = SBA200E_PROM_NAME,
2682 		.data = (void *) &fore200e_bus[1],
2683 	},
2684 	{},
2685 };
2686 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2687 
2688 static struct platform_driver fore200e_sba_driver = {
2689 	.driver = {
2690 		.name = "fore_200e",
2691 		.of_match_table = fore200e_sba_match,
2692 	},
2693 	.probe		= fore200e_sba_probe,
2694 	.remove		= fore200e_sba_remove,
2695 };
2696 #endif
2697 
2698 #ifdef CONFIG_PCI
2699 static int fore200e_pca_detect(struct pci_dev *pci_dev,
2700 			       const struct pci_device_id *pci_ent)
2701 {
2702     const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2703     struct fore200e* fore200e;
2704     int err = 0;
2705     static int index = 0;
2706 
2707     if (pci_enable_device(pci_dev)) {
2708 	err = -EINVAL;
2709 	goto out;
2710     }
2711 
2712     if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2713 	err = -EINVAL;
2714 	goto out;
2715     }
2716 
2717     fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2718     if (fore200e == NULL) {
2719 	err = -ENOMEM;
2720 	goto out_disable;
2721     }
2722 
2723     fore200e->bus       = bus;
2724     fore200e->bus_dev   = pci_dev;
2725     fore200e->irq       = pci_dev->irq;
2726     fore200e->phys_base = pci_resource_start(pci_dev, 0);
2727 
2728     sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2729 
2730     pci_set_master(pci_dev);
2731 
2732     printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2733 	   fore200e->bus->model_name,
2734 	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2735 
2736     sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2737 
2738     err = fore200e_init(fore200e, &pci_dev->dev);
2739     if (err < 0) {
2740 	fore200e_shutdown(fore200e);
2741 	goto out_free;
2742     }
2743 
2744     ++index;
2745     pci_set_drvdata(pci_dev, fore200e);
2746 
2747 out:
2748     return err;
2749 
2750 out_free:
2751     kfree(fore200e);
2752 out_disable:
2753     pci_disable_device(pci_dev);
2754     goto out;
2755 }
2756 
2757 
2758 static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2759 {
2760     struct fore200e *fore200e;
2761 
2762     fore200e = pci_get_drvdata(pci_dev);
2763 
2764     fore200e_shutdown(fore200e);
2765     kfree(fore200e);
2766     pci_disable_device(pci_dev);
2767 }
2768 
2769 
2770 static struct pci_device_id fore200e_pca_tbl[] = {
2771     { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2772       0, 0, (unsigned long) &fore200e_bus[0] },
2773     { 0, }
2774 };
2775 
2776 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2777 
2778 static struct pci_driver fore200e_pca_driver = {
2779     .name =     "fore_200e",
2780     .probe =    fore200e_pca_detect,
2781     .remove =   fore200e_pca_remove_one,
2782     .id_table = fore200e_pca_tbl,
2783 };
2784 #endif
2785 
2786 static int __init fore200e_module_init(void)
2787 {
2788 	int err = 0;
2789 
2790 	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2791 
2792 #ifdef CONFIG_SBUS
2793 	err = platform_driver_register(&fore200e_sba_driver);
2794 	if (err)
2795 		return err;
2796 #endif
2797 
2798 #ifdef CONFIG_PCI
2799 	err = pci_register_driver(&fore200e_pca_driver);
2800 #endif
2801 
2802 #ifdef CONFIG_SBUS
2803 	if (err)
2804 		platform_driver_unregister(&fore200e_sba_driver);
2805 #endif
2806 
2807 	return err;
2808 }
2809 
2810 static void __exit fore200e_module_cleanup(void)
2811 {
2812 #ifdef CONFIG_PCI
2813 	pci_unregister_driver(&fore200e_pca_driver);
2814 #endif
2815 #ifdef CONFIG_SBUS
2816 	platform_driver_unregister(&fore200e_sba_driver);
2817 #endif
2818 }
2819 
2820 static int
2821 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2822 {
2823     struct fore200e*     fore200e  = FORE200E_DEV(dev);
2824     struct fore200e_vcc* fore200e_vcc;
2825     struct atm_vcc*      vcc;
2826     int                  i, len, left = *pos;
2827     unsigned long        flags;
2828 
2829     if (!left--) {
2830 
2831 	if (fore200e_getstats(fore200e) < 0)
2832 	    return -EIO;
2833 
2834 	len = sprintf(page,"\n"
2835 		       " device:\n"
2836 		       "   internal name:\t\t%s\n", fore200e->name);
2837 
2838 	/* print bus-specific information */
2839 	if (fore200e->bus->proc_read)
2840 	    len += fore200e->bus->proc_read(fore200e, page + len);
2841 
2842 	len += sprintf(page + len,
2843 		"   interrupt line:\t\t%s\n"
2844 		"   physical base address:\t0x%p\n"
2845 		"   virtual base address:\t0x%p\n"
2846 		"   factory address (ESI):\t%pM\n"
2847 		"   board serial number:\t\t%d\n\n",
2848 		fore200e_irq_itoa(fore200e->irq),
2849 		(void*)fore200e->phys_base,
2850 		fore200e->virt_base,
2851 		fore200e->esi,
2852 		fore200e->esi[4] * 256 + fore200e->esi[5]);
2853 
2854 	return len;
2855     }
2856 
2857     if (!left--)
2858 	return sprintf(page,
2859 		       "   free small bufs, scheme 1:\t%d\n"
2860 		       "   free large bufs, scheme 1:\t%d\n"
2861 		       "   free small bufs, scheme 2:\t%d\n"
2862 		       "   free large bufs, scheme 2:\t%d\n",
2863 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2864 		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2865 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2866 		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2867 
2868     if (!left--) {
2869 	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2870 
2871 	len = sprintf(page,"\n\n"
2872 		      " cell processor:\n"
2873 		      "   heartbeat state:\t\t");
2874 
2875 	if (hb >> 16 != 0xDEAD)
2876 	    len += sprintf(page + len, "0x%08x\n", hb);
2877 	else
2878 	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2879 
2880 	return len;
2881     }
2882 
2883     if (!left--) {
2884 	static const char* media_name[] = {
2885 	    "unshielded twisted pair",
2886 	    "multimode optical fiber ST",
2887 	    "multimode optical fiber SC",
2888 	    "single-mode optical fiber ST",
2889 	    "single-mode optical fiber SC",
2890 	    "unknown"
2891 	};
2892 
2893 	static const char* oc3_mode[] = {
2894 	    "normal operation",
2895 	    "diagnostic loopback",
2896 	    "line loopback",
2897 	    "unknown"
2898 	};
2899 
2900 	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2901 	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2902 	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2903 	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2904 	u32 oc3_index;
2905 
2906 	if (media_index > 4)
2907 		media_index = 5;
2908 
2909 	switch (fore200e->loop_mode) {
2910 	    case ATM_LM_NONE:    oc3_index = 0;
2911 		                 break;
2912 	    case ATM_LM_LOC_PHY: oc3_index = 1;
2913 		                 break;
2914 	    case ATM_LM_RMT_PHY: oc3_index = 2;
2915 		                 break;
2916 	    default:             oc3_index = 3;
2917 	}
2918 
2919 	return sprintf(page,
2920 		       "   firmware release:\t\t%d.%d.%d\n"
2921 		       "   monitor release:\t\t%d.%d\n"
2922 		       "   media type:\t\t\t%s\n"
2923 		       "   OC-3 revision:\t\t0x%x\n"
2924                        "   OC-3 mode:\t\t\t%s",
2925 		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2926 		       mon960_release >> 16, mon960_release << 16 >> 16,
2927 		       media_name[ media_index ],
2928 		       oc3_revision,
2929 		       oc3_mode[ oc3_index ]);
2930     }
2931 
2932     if (!left--) {
2933 	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2934 
2935 	return sprintf(page,
2936 		       "\n\n"
2937 		       " monitor:\n"
2938 		       "   version number:\t\t%d\n"
2939 		       "   boot status word:\t\t0x%08x\n",
2940 		       fore200e->bus->read(&cp_monitor->mon_version),
2941 		       fore200e->bus->read(&cp_monitor->bstat));
2942     }
2943 
2944     if (!left--)
2945 	return sprintf(page,
2946 		       "\n"
2947 		       " device statistics:\n"
2948 		       "  4b5b:\n"
2949 		       "     crc_header_errors:\t\t%10u\n"
2950 		       "     framing_errors:\t\t%10u\n",
2951 		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2952 		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2953 
2954     if (!left--)
2955 	return sprintf(page, "\n"
2956 		       "  OC-3:\n"
2957 		       "     section_bip8_errors:\t%10u\n"
2958 		       "     path_bip8_errors:\t\t%10u\n"
2959 		       "     line_bip24_errors:\t\t%10u\n"
2960 		       "     line_febe_errors:\t\t%10u\n"
2961 		       "     path_febe_errors:\t\t%10u\n"
2962 		       "     corr_hcs_errors:\t\t%10u\n"
2963 		       "     ucorr_hcs_errors:\t\t%10u\n",
2964 		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2965 		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2966 		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2967 		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2968 		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2969 		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2970 		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2971 
2972     if (!left--)
2973 	return sprintf(page,"\n"
2974 		       "   ATM:\t\t\t\t     cells\n"
2975 		       "     TX:\t\t\t%10u\n"
2976 		       "     RX:\t\t\t%10u\n"
2977 		       "     vpi out of range:\t\t%10u\n"
2978 		       "     vpi no conn:\t\t%10u\n"
2979 		       "     vci out of range:\t\t%10u\n"
2980 		       "     vci no conn:\t\t%10u\n",
2981 		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2982 		       be32_to_cpu(fore200e->stats->atm.cells_received),
2983 		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2984 		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2985 		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2986 		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2987 
2988     if (!left--)
2989 	return sprintf(page,"\n"
2990 		       "   AAL0:\t\t\t     cells\n"
2991 		       "     TX:\t\t\t%10u\n"
2992 		       "     RX:\t\t\t%10u\n"
2993 		       "     dropped:\t\t\t%10u\n",
2994 		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2995 		       be32_to_cpu(fore200e->stats->aal0.cells_received),
2996 		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2997 
2998     if (!left--)
2999 	return sprintf(page,"\n"
3000 		       "   AAL3/4:\n"
3001 		       "     SAR sublayer:\t\t     cells\n"
3002 		       "       TX:\t\t\t%10u\n"
3003 		       "       RX:\t\t\t%10u\n"
3004 		       "       dropped:\t\t\t%10u\n"
3005 		       "       CRC errors:\t\t%10u\n"
3006 		       "       protocol errors:\t\t%10u\n\n"
3007 		       "     CS  sublayer:\t\t      PDUs\n"
3008 		       "       TX:\t\t\t%10u\n"
3009 		       "       RX:\t\t\t%10u\n"
3010 		       "       dropped:\t\t\t%10u\n"
3011 		       "       protocol errors:\t\t%10u\n",
3012 		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
3013 		       be32_to_cpu(fore200e->stats->aal34.cells_received),
3014 		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
3015 		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
3016 		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
3017 		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
3018 		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
3019 		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
3020 		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
3021 
3022     if (!left--)
3023 	return sprintf(page,"\n"
3024 		       "   AAL5:\n"
3025 		       "     SAR sublayer:\t\t     cells\n"
3026 		       "       TX:\t\t\t%10u\n"
3027 		       "       RX:\t\t\t%10u\n"
3028 		       "       dropped:\t\t\t%10u\n"
3029 		       "       congestions:\t\t%10u\n\n"
3030 		       "     CS  sublayer:\t\t      PDUs\n"
3031 		       "       TX:\t\t\t%10u\n"
3032 		       "       RX:\t\t\t%10u\n"
3033 		       "       dropped:\t\t\t%10u\n"
3034 		       "       CRC errors:\t\t%10u\n"
3035 		       "       protocol errors:\t\t%10u\n",
3036 		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
3037 		       be32_to_cpu(fore200e->stats->aal5.cells_received),
3038 		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
3039 		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
3040 		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
3041 		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
3042 		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
3043 		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
3044 		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
3045 
3046     if (!left--)
3047 	return sprintf(page,"\n"
3048 		       "   AUX:\t\t       allocation failures\n"
3049 		       "     small b1:\t\t\t%10u\n"
3050 		       "     large b1:\t\t\t%10u\n"
3051 		       "     small b2:\t\t\t%10u\n"
3052 		       "     large b2:\t\t\t%10u\n"
3053 		       "     RX PDUs:\t\t\t%10u\n"
3054 		       "     TX PDUs:\t\t\t%10lu\n",
3055 		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
3056 		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
3057 		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
3058 		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
3059 		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
3060 		       fore200e->tx_sat);
3061 
3062     if (!left--)
3063 	return sprintf(page,"\n"
3064 		       " receive carrier:\t\t\t%s\n",
3065 		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3066 
3067     if (!left--) {
3068         return sprintf(page,"\n"
3069 		       " VCCs:\n  address   VPI VCI   AAL "
3070 		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
3071     }
3072 
3073     for (i = 0; i < NBR_CONNECT; i++) {
3074 
3075 	vcc = fore200e->vc_map[i].vcc;
3076 
3077 	if (vcc == NULL)
3078 	    continue;
3079 
3080 	spin_lock_irqsave(&fore200e->q_lock, flags);
3081 
3082 	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3083 
3084 	    fore200e_vcc = FORE200E_VCC(vcc);
3085 	    ASSERT(fore200e_vcc);
3086 
3087 	    len = sprintf(page,
3088 			  "  %08x  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
3089 			  (u32)(unsigned long)vcc,
3090 			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3091 			  fore200e_vcc->tx_pdu,
3092 			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3093 			  fore200e_vcc->tx_max_pdu,
3094 			  fore200e_vcc->rx_pdu,
3095 			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3096 			  fore200e_vcc->rx_max_pdu);
3097 
3098 	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
3099 	    return len;
3100 	}
3101 
3102 	spin_unlock_irqrestore(&fore200e->q_lock, flags);
3103     }
3104 
3105     return 0;
3106 }
3107 
3108 module_init(fore200e_module_init);
3109 module_exit(fore200e_module_cleanup);
3110 
3111 
3112 static const struct atmdev_ops fore200e_ops =
3113 {
3114 	.open       = fore200e_open,
3115 	.close      = fore200e_close,
3116 	.ioctl      = fore200e_ioctl,
3117 	.getsockopt = fore200e_getsockopt,
3118 	.setsockopt = fore200e_setsockopt,
3119 	.send       = fore200e_send,
3120 	.change_qos = fore200e_change_qos,
3121 	.proc_read  = fore200e_proc_read,
3122 	.owner      = THIS_MODULE
3123 };
3124 
3125 
3126 static const struct fore200e_bus fore200e_bus[] = {
3127 #ifdef CONFIG_PCI
3128     { "PCA-200E", "pca200e", 32, 4, 32,
3129       fore200e_pca_read,
3130       fore200e_pca_write,
3131       fore200e_pca_dma_map,
3132       fore200e_pca_dma_unmap,
3133       fore200e_pca_dma_sync_for_cpu,
3134       fore200e_pca_dma_sync_for_device,
3135       fore200e_pca_dma_chunk_alloc,
3136       fore200e_pca_dma_chunk_free,
3137       fore200e_pca_configure,
3138       fore200e_pca_map,
3139       fore200e_pca_reset,
3140       fore200e_pca_prom_read,
3141       fore200e_pca_unmap,
3142       NULL,
3143       fore200e_pca_irq_check,
3144       fore200e_pca_irq_ack,
3145       fore200e_pca_proc_read,
3146     },
3147 #endif
3148 #ifdef CONFIG_SBUS
3149     { "SBA-200E", "sba200e", 32, 64, 32,
3150       fore200e_sba_read,
3151       fore200e_sba_write,
3152       fore200e_sba_dma_map,
3153       fore200e_sba_dma_unmap,
3154       fore200e_sba_dma_sync_for_cpu,
3155       fore200e_sba_dma_sync_for_device,
3156       fore200e_sba_dma_chunk_alloc,
3157       fore200e_sba_dma_chunk_free,
3158       fore200e_sba_configure,
3159       fore200e_sba_map,
3160       fore200e_sba_reset,
3161       fore200e_sba_prom_read,
3162       fore200e_sba_unmap,
3163       fore200e_sba_irq_enable,
3164       fore200e_sba_irq_check,
3165       fore200e_sba_irq_ack,
3166       fore200e_sba_proc_read,
3167     },
3168 #endif
3169     {}
3170 };
3171 
3172 MODULE_LICENSE("GPL");
3173 #ifdef CONFIG_PCI
3174 #ifdef __LITTLE_ENDIAN__
3175 MODULE_FIRMWARE("pca200e.bin");
3176 #else
3177 MODULE_FIRMWARE("pca200e_ecd.bin2");
3178 #endif
3179 #endif /* CONFIG_PCI */
3180 #ifdef CONFIG_SBUS
3181 MODULE_FIRMWARE("sba200e_ecd.bin2");
3182 #endif
3183