1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 A FORE Systems 200E-series driver for ATM on Linux.
4 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
5
6 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
7
8 This driver simultaneously supports PCA-200E and SBA-200E adapters
9 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
10
11 */
12
13
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/capability.h>
18 #include <linux/interrupt.h>
19 #include <linux/bitops.h>
20 #include <linux/pci.h>
21 #include <linux/module.h>
22 #include <linux/atmdev.h>
23 #include <linux/sonet.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/firmware.h>
27 #include <linux/pgtable.h>
28 #include <asm/io.h>
29 #include <asm/string.h>
30 #include <asm/page.h>
31 #include <asm/irq.h>
32 #include <asm/dma.h>
33 #include <asm/byteorder.h>
34 #include <linux/uaccess.h>
35 #include <linux/atomic.h>
36
37 #ifdef CONFIG_SBUS
38 #include <linux/of.h>
39 #include <linux/platform_device.h>
40 #include <asm/idprom.h>
41 #include <asm/openprom.h>
42 #include <asm/oplib.h>
43 #endif
44
45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
46 #define FORE200E_USE_TASKLET
47 #endif
48
49 #if 0 /* enable the debugging code of the buffer supply queues */
50 #define FORE200E_BSQ_DEBUG
51 #endif
52
53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
54 #define FORE200E_52BYTE_AAL0_SDU
55 #endif
56
57 #include "fore200e.h"
58 #include "suni.h"
59
60 #define FORE200E_VERSION "0.3e"
61
62 #define FORE200E "fore200e: "
63
64 #if 0 /* override .config */
65 #define CONFIG_ATM_FORE200E_DEBUG 1
66 #endif
67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
68 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
69 printk(FORE200E format, ##args); } while (0)
70 #else
71 #define DPRINTK(level, format, args...) do {} while (0)
72 #endif
73
74
75 #define FORE200E_ALIGN(addr, alignment) \
76 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
77
78 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
79
80 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
81
82 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
83
84 #if 1
85 #define ASSERT(expr) if (!(expr)) { \
86 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
87 __func__, __LINE__, #expr); \
88 panic(FORE200E "%s", __func__); \
89 }
90 #else
91 #define ASSERT(expr) do {} while (0)
92 #endif
93
94
95 static const struct atmdev_ops fore200e_ops;
96
97 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
98 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
99
100 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
101 { BUFFER_S1_NBR, BUFFER_L1_NBR },
102 { BUFFER_S2_NBR, BUFFER_L2_NBR }
103 };
104
105 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
106 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
107 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
108 };
109
110
111 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
112 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
113 #endif
114
115
116 #if 0 /* currently unused */
117 static int
118 fore200e_fore2atm_aal(enum fore200e_aal aal)
119 {
120 switch(aal) {
121 case FORE200E_AAL0: return ATM_AAL0;
122 case FORE200E_AAL34: return ATM_AAL34;
123 case FORE200E_AAL5: return ATM_AAL5;
124 }
125
126 return -EINVAL;
127 }
128 #endif
129
130
131 static enum fore200e_aal
fore200e_atm2fore_aal(int aal)132 fore200e_atm2fore_aal(int aal)
133 {
134 switch(aal) {
135 case ATM_AAL0: return FORE200E_AAL0;
136 case ATM_AAL34: return FORE200E_AAL34;
137 case ATM_AAL1:
138 case ATM_AAL2:
139 case ATM_AAL5: return FORE200E_AAL5;
140 }
141
142 return -EINVAL;
143 }
144
145
146 static char*
fore200e_irq_itoa(int irq)147 fore200e_irq_itoa(int irq)
148 {
149 static char str[8];
150 sprintf(str, "%d", irq);
151 return str;
152 }
153
154
155 /* allocate and align a chunk of memory intended to hold the data behing exchanged
156 between the driver and the adapter (using streaming DVMA) */
157
158 static int
fore200e_chunk_alloc(struct fore200e * fore200e,struct chunk * chunk,int size,int alignment,int direction)159 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
160 {
161 unsigned long offset = 0;
162
163 if (alignment <= sizeof(int))
164 alignment = 0;
165
166 chunk->alloc_size = size + alignment;
167 chunk->direction = direction;
168
169 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
170 if (chunk->alloc_addr == NULL)
171 return -ENOMEM;
172
173 if (alignment > 0)
174 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
175
176 chunk->align_addr = chunk->alloc_addr + offset;
177
178 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
179 size, direction);
180 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
181 kfree(chunk->alloc_addr);
182 return -ENOMEM;
183 }
184 return 0;
185 }
186
187
188 /* free a chunk of memory */
189
190 static void
fore200e_chunk_free(struct fore200e * fore200e,struct chunk * chunk)191 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
192 {
193 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
194 chunk->direction);
195 kfree(chunk->alloc_addr);
196 }
197
198 /*
199 * Allocate a DMA consistent chunk of memory intended to act as a communication
200 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
201 * and the adapter.
202 */
203 static int
fore200e_dma_chunk_alloc(struct fore200e * fore200e,struct chunk * chunk,int size,int nbr,int alignment)204 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
205 int size, int nbr, int alignment)
206 {
207 /* returned chunks are page-aligned */
208 chunk->alloc_size = size * nbr;
209 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
210 &chunk->dma_addr, GFP_KERNEL);
211 if (!chunk->alloc_addr)
212 return -ENOMEM;
213 chunk->align_addr = chunk->alloc_addr;
214 return 0;
215 }
216
217 /*
218 * Free a DMA consistent chunk of memory.
219 */
220 static void
fore200e_dma_chunk_free(struct fore200e * fore200e,struct chunk * chunk)221 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
222 {
223 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
224 chunk->dma_addr);
225 }
226
227 static void
fore200e_spin(int msecs)228 fore200e_spin(int msecs)
229 {
230 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
231 while (time_before(jiffies, timeout));
232 }
233
234
235 static int
fore200e_poll(struct fore200e * fore200e,volatile u32 * addr,u32 val,int msecs)236 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
237 {
238 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
239 int ok;
240
241 mb();
242 do {
243 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
244 break;
245
246 } while (time_before(jiffies, timeout));
247
248 #if 1
249 if (!ok) {
250 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
251 *addr, val);
252 }
253 #endif
254
255 return ok;
256 }
257
258
259 static int
fore200e_io_poll(struct fore200e * fore200e,volatile u32 __iomem * addr,u32 val,int msecs)260 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
261 {
262 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
263 int ok;
264
265 do {
266 if ((ok = (fore200e->bus->read(addr) == val)))
267 break;
268
269 } while (time_before(jiffies, timeout));
270
271 #if 1
272 if (!ok) {
273 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
274 fore200e->bus->read(addr), val);
275 }
276 #endif
277
278 return ok;
279 }
280
281
282 static void
fore200e_free_rx_buf(struct fore200e * fore200e)283 fore200e_free_rx_buf(struct fore200e* fore200e)
284 {
285 int scheme, magn, nbr;
286 struct buffer* buffer;
287
288 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
289 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
290
291 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
292
293 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
294
295 struct chunk* data = &buffer[ nbr ].data;
296
297 if (data->alloc_addr != NULL)
298 fore200e_chunk_free(fore200e, data);
299 }
300 }
301 }
302 }
303 }
304
305
306 static void
fore200e_uninit_bs_queue(struct fore200e * fore200e)307 fore200e_uninit_bs_queue(struct fore200e* fore200e)
308 {
309 int scheme, magn;
310
311 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
312 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
313
314 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
315 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
316
317 if (status->alloc_addr)
318 fore200e_dma_chunk_free(fore200e, status);
319
320 if (rbd_block->alloc_addr)
321 fore200e_dma_chunk_free(fore200e, rbd_block);
322 }
323 }
324 }
325
326
327 static int
fore200e_reset(struct fore200e * fore200e,int diag)328 fore200e_reset(struct fore200e* fore200e, int diag)
329 {
330 int ok;
331
332 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
333
334 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
335
336 fore200e->bus->reset(fore200e);
337
338 if (diag) {
339 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
340 if (ok == 0) {
341
342 printk(FORE200E "device %s self-test failed\n", fore200e->name);
343 return -ENODEV;
344 }
345
346 printk(FORE200E "device %s self-test passed\n", fore200e->name);
347
348 fore200e->state = FORE200E_STATE_RESET;
349 }
350
351 return 0;
352 }
353
354
355 static void
fore200e_shutdown(struct fore200e * fore200e)356 fore200e_shutdown(struct fore200e* fore200e)
357 {
358 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
359 fore200e->name, fore200e->phys_base,
360 fore200e_irq_itoa(fore200e->irq));
361
362 if (fore200e->state > FORE200E_STATE_RESET) {
363 /* first, reset the board to prevent further interrupts or data transfers */
364 fore200e_reset(fore200e, 0);
365 }
366
367 /* then, release all allocated resources */
368 switch(fore200e->state) {
369
370 case FORE200E_STATE_COMPLETE:
371 kfree(fore200e->stats);
372
373 fallthrough;
374 case FORE200E_STATE_IRQ:
375 free_irq(fore200e->irq, fore200e->atm_dev);
376 #ifdef FORE200E_USE_TASKLET
377 tasklet_kill(&fore200e->tx_tasklet);
378 tasklet_kill(&fore200e->rx_tasklet);
379 #endif
380
381 fallthrough;
382 case FORE200E_STATE_ALLOC_BUF:
383 fore200e_free_rx_buf(fore200e);
384
385 fallthrough;
386 case FORE200E_STATE_INIT_BSQ:
387 fore200e_uninit_bs_queue(fore200e);
388
389 fallthrough;
390 case FORE200E_STATE_INIT_RXQ:
391 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
392 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
393
394 fallthrough;
395 case FORE200E_STATE_INIT_TXQ:
396 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
397 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
398
399 fallthrough;
400 case FORE200E_STATE_INIT_CMDQ:
401 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
402
403 fallthrough;
404 case FORE200E_STATE_INITIALIZE:
405 /* nothing to do for that state */
406
407 case FORE200E_STATE_START_FW:
408 /* nothing to do for that state */
409
410 case FORE200E_STATE_RESET:
411 /* nothing to do for that state */
412
413 case FORE200E_STATE_MAP:
414 fore200e->bus->unmap(fore200e);
415
416 fallthrough;
417 case FORE200E_STATE_CONFIGURE:
418 /* nothing to do for that state */
419
420 case FORE200E_STATE_REGISTER:
421 /* XXX shouldn't we *start* by deregistering the device? */
422 atm_dev_deregister(fore200e->atm_dev);
423
424 fallthrough;
425 case FORE200E_STATE_BLANK:
426 /* nothing to do for that state */
427 break;
428 }
429 }
430
431
432 #ifdef CONFIG_PCI
433
fore200e_pca_read(volatile u32 __iomem * addr)434 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
435 {
436 /* on big-endian hosts, the board is configured to convert
437 the endianess of slave RAM accesses */
438 return le32_to_cpu(readl(addr));
439 }
440
441
fore200e_pca_write(u32 val,volatile u32 __iomem * addr)442 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
443 {
444 /* on big-endian hosts, the board is configured to convert
445 the endianess of slave RAM accesses */
446 writel(cpu_to_le32(val), addr);
447 }
448
449 static int
fore200e_pca_irq_check(struct fore200e * fore200e)450 fore200e_pca_irq_check(struct fore200e* fore200e)
451 {
452 /* this is a 1 bit register */
453 int irq_posted = readl(fore200e->regs.pca.psr);
454
455 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
456 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
457 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
458 }
459 #endif
460
461 return irq_posted;
462 }
463
464
465 static void
fore200e_pca_irq_ack(struct fore200e * fore200e)466 fore200e_pca_irq_ack(struct fore200e* fore200e)
467 {
468 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
469 }
470
471
472 static void
fore200e_pca_reset(struct fore200e * fore200e)473 fore200e_pca_reset(struct fore200e* fore200e)
474 {
475 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
476 fore200e_spin(10);
477 writel(0, fore200e->regs.pca.hcr);
478 }
479
480
fore200e_pca_map(struct fore200e * fore200e)481 static int fore200e_pca_map(struct fore200e* fore200e)
482 {
483 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
484
485 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
486
487 if (fore200e->virt_base == NULL) {
488 printk(FORE200E "can't map device %s\n", fore200e->name);
489 return -EFAULT;
490 }
491
492 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
493
494 /* gain access to the PCA specific registers */
495 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
496 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
497 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
498
499 fore200e->state = FORE200E_STATE_MAP;
500 return 0;
501 }
502
503
504 static void
fore200e_pca_unmap(struct fore200e * fore200e)505 fore200e_pca_unmap(struct fore200e* fore200e)
506 {
507 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
508
509 if (fore200e->virt_base != NULL)
510 iounmap(fore200e->virt_base);
511 }
512
513
fore200e_pca_configure(struct fore200e * fore200e)514 static int fore200e_pca_configure(struct fore200e *fore200e)
515 {
516 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
517 u8 master_ctrl, latency;
518
519 DPRINTK(2, "device %s being configured\n", fore200e->name);
520
521 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
522 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
523 return -EIO;
524 }
525
526 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
527
528 master_ctrl = master_ctrl
529 #if defined(__BIG_ENDIAN)
530 /* request the PCA board to convert the endianess of slave RAM accesses */
531 | PCA200E_CTRL_CONVERT_ENDIAN
532 #endif
533 #if 0
534 | PCA200E_CTRL_DIS_CACHE_RD
535 | PCA200E_CTRL_DIS_WRT_INVAL
536 | PCA200E_CTRL_ENA_CONT_REQ_MODE
537 | PCA200E_CTRL_2_CACHE_WRT_INVAL
538 #endif
539 | PCA200E_CTRL_LARGE_PCI_BURSTS;
540
541 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
542
543 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
544 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
545 this may impact the performances of other PCI devices on the same bus, though */
546 latency = 192;
547 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
548
549 fore200e->state = FORE200E_STATE_CONFIGURE;
550 return 0;
551 }
552
553
554 static int __init
fore200e_pca_prom_read(struct fore200e * fore200e,struct prom_data * prom)555 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
556 {
557 struct host_cmdq* cmdq = &fore200e->host_cmdq;
558 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
559 struct prom_opcode opcode;
560 int ok;
561 u32 prom_dma;
562
563 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
564
565 opcode.opcode = OPCODE_GET_PROM;
566 opcode.pad = 0;
567
568 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
569 DMA_FROM_DEVICE);
570 if (dma_mapping_error(fore200e->dev, prom_dma))
571 return -ENOMEM;
572
573 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
574
575 *entry->status = STATUS_PENDING;
576
577 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
578
579 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
580
581 *entry->status = STATUS_FREE;
582
583 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
584
585 if (ok == 0) {
586 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
587 return -EIO;
588 }
589
590 #if defined(__BIG_ENDIAN)
591
592 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
593
594 /* MAC address is stored as little-endian */
595 swap_here(&prom->mac_addr[0]);
596 swap_here(&prom->mac_addr[4]);
597 #endif
598
599 return 0;
600 }
601
602
603 static int
fore200e_pca_proc_read(struct fore200e * fore200e,char * page)604 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
605 {
606 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
607
608 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
609 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
610 }
611
612 static const struct fore200e_bus fore200e_pci_ops = {
613 .model_name = "PCA-200E",
614 .proc_name = "pca200e",
615 .descr_alignment = 32,
616 .buffer_alignment = 4,
617 .status_alignment = 32,
618 .read = fore200e_pca_read,
619 .write = fore200e_pca_write,
620 .configure = fore200e_pca_configure,
621 .map = fore200e_pca_map,
622 .reset = fore200e_pca_reset,
623 .prom_read = fore200e_pca_prom_read,
624 .unmap = fore200e_pca_unmap,
625 .irq_check = fore200e_pca_irq_check,
626 .irq_ack = fore200e_pca_irq_ack,
627 .proc_read = fore200e_pca_proc_read,
628 };
629 #endif /* CONFIG_PCI */
630
631 #ifdef CONFIG_SBUS
632
fore200e_sba_read(volatile u32 __iomem * addr)633 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
634 {
635 return sbus_readl(addr);
636 }
637
fore200e_sba_write(u32 val,volatile u32 __iomem * addr)638 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
639 {
640 sbus_writel(val, addr);
641 }
642
fore200e_sba_irq_enable(struct fore200e * fore200e)643 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
644 {
645 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
646 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
647 }
648
fore200e_sba_irq_check(struct fore200e * fore200e)649 static int fore200e_sba_irq_check(struct fore200e *fore200e)
650 {
651 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
652 }
653
fore200e_sba_irq_ack(struct fore200e * fore200e)654 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
655 {
656 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
657 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
658 }
659
fore200e_sba_reset(struct fore200e * fore200e)660 static void fore200e_sba_reset(struct fore200e *fore200e)
661 {
662 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
663 fore200e_spin(10);
664 fore200e->bus->write(0, fore200e->regs.sba.hcr);
665 }
666
fore200e_sba_map(struct fore200e * fore200e)667 static int __init fore200e_sba_map(struct fore200e *fore200e)
668 {
669 struct platform_device *op = to_platform_device(fore200e->dev);
670 unsigned int bursts;
671
672 /* gain access to the SBA specific registers */
673 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
674 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
675 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
676 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
677
678 if (!fore200e->virt_base) {
679 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
680 return -EFAULT;
681 }
682
683 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
684
685 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
686
687 /* get the supported DVMA burst sizes */
688 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
689
690 if (sbus_can_dma_64bit())
691 sbus_set_sbus64(&op->dev, bursts);
692
693 fore200e->state = FORE200E_STATE_MAP;
694 return 0;
695 }
696
fore200e_sba_unmap(struct fore200e * fore200e)697 static void fore200e_sba_unmap(struct fore200e *fore200e)
698 {
699 struct platform_device *op = to_platform_device(fore200e->dev);
700
701 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
702 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
703 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
704 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH);
705 }
706
fore200e_sba_configure(struct fore200e * fore200e)707 static int __init fore200e_sba_configure(struct fore200e *fore200e)
708 {
709 fore200e->state = FORE200E_STATE_CONFIGURE;
710 return 0;
711 }
712
fore200e_sba_prom_read(struct fore200e * fore200e,struct prom_data * prom)713 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
714 {
715 struct platform_device *op = to_platform_device(fore200e->dev);
716 const u8 *prop;
717 int len;
718
719 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
720 if (!prop)
721 return -ENODEV;
722 memcpy(&prom->mac_addr[4], prop, 4);
723
724 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
725 if (!prop)
726 return -ENODEV;
727 memcpy(&prom->mac_addr[2], prop, 4);
728
729 prom->serial_number = of_getintprop_default(op->dev.of_node,
730 "serialnumber", 0);
731 prom->hw_revision = of_getintprop_default(op->dev.of_node,
732 "promversion", 0);
733
734 return 0;
735 }
736
fore200e_sba_proc_read(struct fore200e * fore200e,char * page)737 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
738 {
739 struct platform_device *op = to_platform_device(fore200e->dev);
740 const struct linux_prom_registers *regs;
741
742 regs = of_get_property(op->dev.of_node, "reg", NULL);
743
744 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n",
745 (regs ? regs->which_io : 0), op->dev.of_node);
746 }
747
748 static const struct fore200e_bus fore200e_sbus_ops = {
749 .model_name = "SBA-200E",
750 .proc_name = "sba200e",
751 .descr_alignment = 32,
752 .buffer_alignment = 64,
753 .status_alignment = 32,
754 .read = fore200e_sba_read,
755 .write = fore200e_sba_write,
756 .configure = fore200e_sba_configure,
757 .map = fore200e_sba_map,
758 .reset = fore200e_sba_reset,
759 .prom_read = fore200e_sba_prom_read,
760 .unmap = fore200e_sba_unmap,
761 .irq_enable = fore200e_sba_irq_enable,
762 .irq_check = fore200e_sba_irq_check,
763 .irq_ack = fore200e_sba_irq_ack,
764 .proc_read = fore200e_sba_proc_read,
765 };
766 #endif /* CONFIG_SBUS */
767
768 static void
fore200e_tx_irq(struct fore200e * fore200e)769 fore200e_tx_irq(struct fore200e* fore200e)
770 {
771 struct host_txq* txq = &fore200e->host_txq;
772 struct host_txq_entry* entry;
773 struct atm_vcc* vcc;
774 struct fore200e_vc_map* vc_map;
775
776 if (fore200e->host_txq.txing == 0)
777 return;
778
779 for (;;) {
780
781 entry = &txq->host_entry[ txq->tail ];
782
783 if ((*entry->status & STATUS_COMPLETE) == 0) {
784 break;
785 }
786
787 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
788 entry, txq->tail, entry->vc_map, entry->skb);
789
790 /* free copy of misaligned data */
791 kfree(entry->data);
792
793 /* remove DMA mapping */
794 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
795 DMA_TO_DEVICE);
796
797 vc_map = entry->vc_map;
798
799 /* vcc closed since the time the entry was submitted for tx? */
800 if ((vc_map->vcc == NULL) ||
801 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
802
803 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
804 fore200e->atm_dev->number);
805
806 dev_kfree_skb_any(entry->skb);
807 }
808 else {
809 ASSERT(vc_map->vcc);
810
811 /* vcc closed then immediately re-opened? */
812 if (vc_map->incarn != entry->incarn) {
813
814 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
815 if the same vcc is immediately re-opened, those pending PDUs must
816 not be popped after the completion of their emission, as they refer
817 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
818 would be decremented by the size of the (unrelated) skb, possibly
819 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
820 we thus bind the tx entry to the current incarnation of the vcc
821 when the entry is submitted for tx. When the tx later completes,
822 if the incarnation number of the tx entry does not match the one
823 of the vcc, then this implies that the vcc has been closed then re-opened.
824 we thus just drop the skb here. */
825
826 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
827 fore200e->atm_dev->number);
828
829 dev_kfree_skb_any(entry->skb);
830 }
831 else {
832 vcc = vc_map->vcc;
833 ASSERT(vcc);
834
835 /* notify tx completion */
836 if (vcc->pop) {
837 vcc->pop(vcc, entry->skb);
838 }
839 else {
840 dev_kfree_skb_any(entry->skb);
841 }
842
843 /* check error condition */
844 if (*entry->status & STATUS_ERROR)
845 atomic_inc(&vcc->stats->tx_err);
846 else
847 atomic_inc(&vcc->stats->tx);
848 }
849 }
850
851 *entry->status = STATUS_FREE;
852
853 fore200e->host_txq.txing--;
854
855 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
856 }
857 }
858
859
860 #ifdef FORE200E_BSQ_DEBUG
bsq_audit(int where,struct host_bsq * bsq,int scheme,int magn)861 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
862 {
863 struct buffer* buffer;
864 int count = 0;
865
866 buffer = bsq->freebuf;
867 while (buffer) {
868
869 if (buffer->supplied) {
870 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
871 where, scheme, magn, buffer->index);
872 }
873
874 if (buffer->magn != magn) {
875 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
876 where, scheme, magn, buffer->index, buffer->magn);
877 }
878
879 if (buffer->scheme != scheme) {
880 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
881 where, scheme, magn, buffer->index, buffer->scheme);
882 }
883
884 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
885 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
886 where, scheme, magn, buffer->index);
887 }
888
889 count++;
890 buffer = buffer->next;
891 }
892
893 if (count != bsq->freebuf_count) {
894 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
895 where, scheme, magn, count, bsq->freebuf_count);
896 }
897 return 0;
898 }
899 #endif
900
901
902 static void
fore200e_supply(struct fore200e * fore200e)903 fore200e_supply(struct fore200e* fore200e)
904 {
905 int scheme, magn, i;
906
907 struct host_bsq* bsq;
908 struct host_bsq_entry* entry;
909 struct buffer* buffer;
910
911 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
912 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
913
914 bsq = &fore200e->host_bsq[ scheme ][ magn ];
915
916 #ifdef FORE200E_BSQ_DEBUG
917 bsq_audit(1, bsq, scheme, magn);
918 #endif
919 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
920
921 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
922 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
923
924 entry = &bsq->host_entry[ bsq->head ];
925
926 for (i = 0; i < RBD_BLK_SIZE; i++) {
927
928 /* take the first buffer in the free buffer list */
929 buffer = bsq->freebuf;
930 if (!buffer) {
931 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
932 scheme, magn, bsq->freebuf_count);
933 return;
934 }
935 bsq->freebuf = buffer->next;
936
937 #ifdef FORE200E_BSQ_DEBUG
938 if (buffer->supplied)
939 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
940 scheme, magn, buffer->index);
941 buffer->supplied = 1;
942 #endif
943 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
944 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
945 }
946
947 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
948
949 /* decrease accordingly the number of free rx buffers */
950 bsq->freebuf_count -= RBD_BLK_SIZE;
951
952 *entry->status = STATUS_PENDING;
953 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
954 }
955 }
956 }
957 }
958
959
960 static int
fore200e_push_rpd(struct fore200e * fore200e,struct atm_vcc * vcc,struct rpd * rpd)961 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
962 {
963 struct sk_buff* skb;
964 struct buffer* buffer;
965 struct fore200e_vcc* fore200e_vcc;
966 int i, pdu_len = 0;
967 #ifdef FORE200E_52BYTE_AAL0_SDU
968 u32 cell_header = 0;
969 #endif
970
971 ASSERT(vcc);
972
973 fore200e_vcc = FORE200E_VCC(vcc);
974 ASSERT(fore200e_vcc);
975
976 #ifdef FORE200E_52BYTE_AAL0_SDU
977 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
978
979 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
980 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
981 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
982 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
983 rpd->atm_header.clp;
984 pdu_len = 4;
985 }
986 #endif
987
988 /* compute total PDU length */
989 for (i = 0; i < rpd->nseg; i++)
990 pdu_len += rpd->rsd[ i ].length;
991
992 skb = alloc_skb(pdu_len, GFP_ATOMIC);
993 if (skb == NULL) {
994 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
995
996 atomic_inc(&vcc->stats->rx_drop);
997 return -ENOMEM;
998 }
999
1000 __net_timestamp(skb);
1001
1002 #ifdef FORE200E_52BYTE_AAL0_SDU
1003 if (cell_header) {
1004 *((u32*)skb_put(skb, 4)) = cell_header;
1005 }
1006 #endif
1007
1008 /* reassemble segments */
1009 for (i = 0; i < rpd->nseg; i++) {
1010
1011 /* rebuild rx buffer address from rsd handle */
1012 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1013
1014 /* Make device DMA transfer visible to CPU. */
1015 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1016 rpd->rsd[i].length, DMA_FROM_DEVICE);
1017
1018 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1019
1020 /* Now let the device get at it again. */
1021 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1022 rpd->rsd[i].length, DMA_FROM_DEVICE);
1023 }
1024
1025 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1026
1027 if (pdu_len < fore200e_vcc->rx_min_pdu)
1028 fore200e_vcc->rx_min_pdu = pdu_len;
1029 if (pdu_len > fore200e_vcc->rx_max_pdu)
1030 fore200e_vcc->rx_max_pdu = pdu_len;
1031 fore200e_vcc->rx_pdu++;
1032
1033 /* push PDU */
1034 if (atm_charge(vcc, skb->truesize) == 0) {
1035
1036 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1037 vcc->itf, vcc->vpi, vcc->vci);
1038
1039 dev_kfree_skb_any(skb);
1040
1041 atomic_inc(&vcc->stats->rx_drop);
1042 return -ENOMEM;
1043 }
1044
1045 vcc->push(vcc, skb);
1046 atomic_inc(&vcc->stats->rx);
1047
1048 return 0;
1049 }
1050
1051
1052 static void
fore200e_collect_rpd(struct fore200e * fore200e,struct rpd * rpd)1053 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1054 {
1055 struct host_bsq* bsq;
1056 struct buffer* buffer;
1057 int i;
1058
1059 for (i = 0; i < rpd->nseg; i++) {
1060
1061 /* rebuild rx buffer address from rsd handle */
1062 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1063
1064 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1065
1066 #ifdef FORE200E_BSQ_DEBUG
1067 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1068
1069 if (buffer->supplied == 0)
1070 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1071 buffer->scheme, buffer->magn, buffer->index);
1072 buffer->supplied = 0;
1073 #endif
1074
1075 /* re-insert the buffer into the free buffer list */
1076 buffer->next = bsq->freebuf;
1077 bsq->freebuf = buffer;
1078
1079 /* then increment the number of free rx buffers */
1080 bsq->freebuf_count++;
1081 }
1082 }
1083
1084
1085 static void
fore200e_rx_irq(struct fore200e * fore200e)1086 fore200e_rx_irq(struct fore200e* fore200e)
1087 {
1088 struct host_rxq* rxq = &fore200e->host_rxq;
1089 struct host_rxq_entry* entry;
1090 struct atm_vcc* vcc;
1091 struct fore200e_vc_map* vc_map;
1092
1093 for (;;) {
1094
1095 entry = &rxq->host_entry[ rxq->head ];
1096
1097 /* no more received PDUs */
1098 if ((*entry->status & STATUS_COMPLETE) == 0)
1099 break;
1100
1101 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1102
1103 if ((vc_map->vcc == NULL) ||
1104 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1105
1106 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1107 fore200e->atm_dev->number,
1108 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1109 }
1110 else {
1111 vcc = vc_map->vcc;
1112 ASSERT(vcc);
1113
1114 if ((*entry->status & STATUS_ERROR) == 0) {
1115
1116 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1117 }
1118 else {
1119 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1120 fore200e->atm_dev->number,
1121 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1122 atomic_inc(&vcc->stats->rx_err);
1123 }
1124 }
1125
1126 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1127
1128 fore200e_collect_rpd(fore200e, entry->rpd);
1129
1130 /* rewrite the rpd address to ack the received PDU */
1131 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1132 *entry->status = STATUS_FREE;
1133
1134 fore200e_supply(fore200e);
1135 }
1136 }
1137
1138
1139 #ifndef FORE200E_USE_TASKLET
1140 static void
fore200e_irq(struct fore200e * fore200e)1141 fore200e_irq(struct fore200e* fore200e)
1142 {
1143 unsigned long flags;
1144
1145 spin_lock_irqsave(&fore200e->q_lock, flags);
1146 fore200e_rx_irq(fore200e);
1147 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1148
1149 spin_lock_irqsave(&fore200e->q_lock, flags);
1150 fore200e_tx_irq(fore200e);
1151 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1152 }
1153 #endif
1154
1155
1156 static irqreturn_t
fore200e_interrupt(int irq,void * dev)1157 fore200e_interrupt(int irq, void* dev)
1158 {
1159 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1160
1161 if (fore200e->bus->irq_check(fore200e) == 0) {
1162
1163 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1164 return IRQ_NONE;
1165 }
1166 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1167
1168 #ifdef FORE200E_USE_TASKLET
1169 tasklet_schedule(&fore200e->tx_tasklet);
1170 tasklet_schedule(&fore200e->rx_tasklet);
1171 #else
1172 fore200e_irq(fore200e);
1173 #endif
1174
1175 fore200e->bus->irq_ack(fore200e);
1176 return IRQ_HANDLED;
1177 }
1178
1179
1180 #ifdef FORE200E_USE_TASKLET
1181 static void
fore200e_tx_tasklet(unsigned long data)1182 fore200e_tx_tasklet(unsigned long data)
1183 {
1184 struct fore200e* fore200e = (struct fore200e*) data;
1185 unsigned long flags;
1186
1187 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1188
1189 spin_lock_irqsave(&fore200e->q_lock, flags);
1190 fore200e_tx_irq(fore200e);
1191 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1192 }
1193
1194
1195 static void
fore200e_rx_tasklet(unsigned long data)1196 fore200e_rx_tasklet(unsigned long data)
1197 {
1198 struct fore200e* fore200e = (struct fore200e*) data;
1199 unsigned long flags;
1200
1201 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1202
1203 spin_lock_irqsave(&fore200e->q_lock, flags);
1204 fore200e_rx_irq((struct fore200e*) data);
1205 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1206 }
1207 #endif
1208
1209
1210 static int
fore200e_select_scheme(struct atm_vcc * vcc)1211 fore200e_select_scheme(struct atm_vcc* vcc)
1212 {
1213 /* fairly balance the VCs over (identical) buffer schemes */
1214 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1215
1216 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1217 vcc->itf, vcc->vpi, vcc->vci, scheme);
1218
1219 return scheme;
1220 }
1221
1222
1223 static int
fore200e_activate_vcin(struct fore200e * fore200e,int activate,struct atm_vcc * vcc,int mtu)1224 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1225 {
1226 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1227 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1228 struct activate_opcode activ_opcode;
1229 struct deactivate_opcode deactiv_opcode;
1230 struct vpvc vpvc;
1231 int ok;
1232 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1233
1234 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1235
1236 if (activate) {
1237 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1238
1239 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1240 activ_opcode.aal = aal;
1241 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1242 activ_opcode.pad = 0;
1243 }
1244 else {
1245 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1246 deactiv_opcode.pad = 0;
1247 }
1248
1249 vpvc.vci = vcc->vci;
1250 vpvc.vpi = vcc->vpi;
1251
1252 *entry->status = STATUS_PENDING;
1253
1254 if (activate) {
1255
1256 #ifdef FORE200E_52BYTE_AAL0_SDU
1257 mtu = 48;
1258 #endif
1259 /* the MTU is not used by the cp, except in the case of AAL0 */
1260 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1261 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1262 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1263 }
1264 else {
1265 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1266 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1267 }
1268
1269 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1270
1271 *entry->status = STATUS_FREE;
1272
1273 if (ok == 0) {
1274 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1275 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1276 return -EIO;
1277 }
1278
1279 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1280 activate ? "open" : "clos");
1281
1282 return 0;
1283 }
1284
1285
1286 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1287
1288 static void
fore200e_rate_ctrl(struct atm_qos * qos,struct tpd_rate * rate)1289 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1290 {
1291 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1292
1293 /* compute the data cells to idle cells ratio from the tx PCR */
1294 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1295 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1296 }
1297 else {
1298 /* disable rate control */
1299 rate->data_cells = rate->idle_cells = 0;
1300 }
1301 }
1302
1303
1304 static int
fore200e_open(struct atm_vcc * vcc)1305 fore200e_open(struct atm_vcc *vcc)
1306 {
1307 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1308 struct fore200e_vcc* fore200e_vcc;
1309 struct fore200e_vc_map* vc_map;
1310 unsigned long flags;
1311 int vci = vcc->vci;
1312 short vpi = vcc->vpi;
1313
1314 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1315 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1316
1317 spin_lock_irqsave(&fore200e->q_lock, flags);
1318
1319 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1320 if (vc_map->vcc) {
1321
1322 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1323
1324 printk(FORE200E "VC %d.%d.%d already in use\n",
1325 fore200e->atm_dev->number, vpi, vci);
1326
1327 return -EINVAL;
1328 }
1329
1330 vc_map->vcc = vcc;
1331
1332 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1333
1334 fore200e_vcc = kzalloc_obj(struct fore200e_vcc, GFP_ATOMIC);
1335 if (fore200e_vcc == NULL) {
1336 vc_map->vcc = NULL;
1337 return -ENOMEM;
1338 }
1339
1340 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1341 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1342 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1343 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1344 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1345 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1346 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1347
1348 /* pseudo-CBR bandwidth requested? */
1349 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1350
1351 mutex_lock(&fore200e->rate_mtx);
1352 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1353 mutex_unlock(&fore200e->rate_mtx);
1354
1355 kfree(fore200e_vcc);
1356 vc_map->vcc = NULL;
1357 return -EAGAIN;
1358 }
1359
1360 /* reserve bandwidth */
1361 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1362 mutex_unlock(&fore200e->rate_mtx);
1363 }
1364
1365 vcc->itf = vcc->dev->number;
1366
1367 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1368 set_bit(ATM_VF_ADDR, &vcc->flags);
1369
1370 vcc->dev_data = fore200e_vcc;
1371
1372 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1373
1374 vc_map->vcc = NULL;
1375
1376 clear_bit(ATM_VF_ADDR, &vcc->flags);
1377 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1378
1379 vcc->dev_data = NULL;
1380
1381 mutex_lock(&fore200e->rate_mtx);
1382 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1383 mutex_unlock(&fore200e->rate_mtx);
1384
1385 kfree(fore200e_vcc);
1386 return -EINVAL;
1387 }
1388
1389 /* compute rate control parameters */
1390 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1391
1392 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1393 set_bit(ATM_VF_HASQOS, &vcc->flags);
1394
1395 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1396 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1397 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1398 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1399 }
1400
1401 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1402 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1403 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1404
1405 /* new incarnation of the vcc */
1406 vc_map->incarn = ++fore200e->incarn_count;
1407
1408 /* VC unusable before this flag is set */
1409 set_bit(ATM_VF_READY, &vcc->flags);
1410
1411 return 0;
1412 }
1413
1414
1415 static void
fore200e_close(struct atm_vcc * vcc)1416 fore200e_close(struct atm_vcc* vcc)
1417 {
1418 struct fore200e_vcc* fore200e_vcc;
1419 struct fore200e* fore200e;
1420 struct fore200e_vc_map* vc_map;
1421 unsigned long flags;
1422
1423 ASSERT(vcc);
1424 fore200e = FORE200E_DEV(vcc->dev);
1425
1426 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1427 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1428
1429 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1430
1431 clear_bit(ATM_VF_READY, &vcc->flags);
1432
1433 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1434
1435 spin_lock_irqsave(&fore200e->q_lock, flags);
1436
1437 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1438
1439 /* the vc is no longer considered as "in use" by fore200e_open() */
1440 vc_map->vcc = NULL;
1441
1442 vcc->itf = vcc->vci = vcc->vpi = 0;
1443
1444 fore200e_vcc = FORE200E_VCC(vcc);
1445 vcc->dev_data = NULL;
1446
1447 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1448
1449 /* release reserved bandwidth, if any */
1450 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1451
1452 mutex_lock(&fore200e->rate_mtx);
1453 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1454 mutex_unlock(&fore200e->rate_mtx);
1455
1456 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1457 }
1458
1459 clear_bit(ATM_VF_ADDR, &vcc->flags);
1460 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1461
1462 ASSERT(fore200e_vcc);
1463 kfree(fore200e_vcc);
1464 }
1465
1466
1467 static int
fore200e_send(struct atm_vcc * vcc,struct sk_buff * skb)1468 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1469 {
1470 struct fore200e* fore200e;
1471 struct fore200e_vcc* fore200e_vcc;
1472 struct fore200e_vc_map* vc_map;
1473 struct host_txq* txq;
1474 struct host_txq_entry* entry;
1475 struct tpd* tpd;
1476 struct tpd_haddr tpd_haddr;
1477 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1478 int tx_copy = 0;
1479 int tx_len = skb->len;
1480 u32* cell_header = NULL;
1481 unsigned char* skb_data;
1482 int skb_len;
1483 unsigned char* data;
1484 unsigned long flags;
1485
1486 if (!vcc)
1487 return -EINVAL;
1488
1489 fore200e = FORE200E_DEV(vcc->dev);
1490 fore200e_vcc = FORE200E_VCC(vcc);
1491
1492 if (!fore200e)
1493 return -EINVAL;
1494
1495 txq = &fore200e->host_txq;
1496 if (!fore200e_vcc)
1497 return -EINVAL;
1498
1499 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1500 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1501 dev_kfree_skb_any(skb);
1502 return -EINVAL;
1503 }
1504
1505 #ifdef FORE200E_52BYTE_AAL0_SDU
1506 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1507 cell_header = (u32*) skb->data;
1508 skb_data = skb->data + 4; /* skip 4-byte cell header */
1509 skb_len = tx_len = skb->len - 4;
1510
1511 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1512 }
1513 else
1514 #endif
1515 {
1516 skb_data = skb->data;
1517 skb_len = skb->len;
1518 }
1519
1520 if (((unsigned long)skb_data) & 0x3) {
1521
1522 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1523 tx_copy = 1;
1524 tx_len = skb_len;
1525 }
1526
1527 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1528
1529 /* this simply NUKES the PCA board */
1530 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1531 tx_copy = 1;
1532 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1533 }
1534
1535 if (tx_copy) {
1536 data = kmalloc(tx_len, GFP_ATOMIC);
1537 if (data == NULL) {
1538 if (vcc->pop) {
1539 vcc->pop(vcc, skb);
1540 }
1541 else {
1542 dev_kfree_skb_any(skb);
1543 }
1544 return -ENOMEM;
1545 }
1546
1547 memcpy(data, skb_data, skb_len);
1548 if (skb_len < tx_len)
1549 memset(data + skb_len, 0x00, tx_len - skb_len);
1550 }
1551 else {
1552 data = skb_data;
1553 }
1554
1555 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1556 ASSERT(vc_map->vcc == vcc);
1557
1558 retry_here:
1559
1560 spin_lock_irqsave(&fore200e->q_lock, flags);
1561
1562 entry = &txq->host_entry[ txq->head ];
1563
1564 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1565
1566 /* try to free completed tx queue entries */
1567 fore200e_tx_irq(fore200e);
1568
1569 if (*entry->status != STATUS_FREE) {
1570
1571 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1572
1573 /* retry once again? */
1574 if (--retry > 0) {
1575 udelay(50);
1576 goto retry_here;
1577 }
1578
1579 atomic_inc(&vcc->stats->tx_err);
1580
1581 fore200e->tx_sat++;
1582 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1583 fore200e->name, fore200e->cp_queues->heartbeat);
1584 if (vcc->pop) {
1585 vcc->pop(vcc, skb);
1586 }
1587 else {
1588 dev_kfree_skb_any(skb);
1589 }
1590
1591 if (tx_copy)
1592 kfree(data);
1593
1594 return -ENOBUFS;
1595 }
1596 }
1597
1598 entry->incarn = vc_map->incarn;
1599 entry->vc_map = vc_map;
1600 entry->skb = skb;
1601 entry->data = tx_copy ? data : NULL;
1602
1603 tpd = entry->tpd;
1604 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1605 DMA_TO_DEVICE);
1606 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1607 if (tx_copy)
1608 kfree(data);
1609 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1610 return -ENOMEM;
1611 }
1612 tpd->tsd[ 0 ].length = tx_len;
1613
1614 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1615 txq->txing++;
1616
1617 /* The dma_map call above implies a dma_sync so the device can use it,
1618 * thus no explicit dma_sync call is necessary here.
1619 */
1620
1621 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1622 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1623 tpd->tsd[0].length, skb_len);
1624
1625 if (skb_len < fore200e_vcc->tx_min_pdu)
1626 fore200e_vcc->tx_min_pdu = skb_len;
1627 if (skb_len > fore200e_vcc->tx_max_pdu)
1628 fore200e_vcc->tx_max_pdu = skb_len;
1629 fore200e_vcc->tx_pdu++;
1630
1631 /* set tx rate control information */
1632 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1633 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1634
1635 if (cell_header) {
1636 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1637 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1638 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1639 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1640 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1641 }
1642 else {
1643 /* set the ATM header, common to all cells conveying the PDU */
1644 tpd->atm_header.clp = 0;
1645 tpd->atm_header.plt = 0;
1646 tpd->atm_header.vci = vcc->vci;
1647 tpd->atm_header.vpi = vcc->vpi;
1648 tpd->atm_header.gfc = 0;
1649 }
1650
1651 tpd->spec.length = tx_len;
1652 tpd->spec.nseg = 1;
1653 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1654 tpd->spec.intr = 1;
1655
1656 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1657 tpd_haddr.pad = 0;
1658 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1659
1660 *entry->status = STATUS_PENDING;
1661 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1662
1663 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1664
1665 return 0;
1666 }
1667
1668
1669 static int
fore200e_getstats(struct fore200e * fore200e)1670 fore200e_getstats(struct fore200e* fore200e)
1671 {
1672 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1673 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1674 struct stats_opcode opcode;
1675 int ok;
1676 u32 stats_dma_addr;
1677
1678 if (fore200e->stats == NULL) {
1679 fore200e->stats = kzalloc_obj(struct stats);
1680 if (fore200e->stats == NULL)
1681 return -ENOMEM;
1682 }
1683
1684 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1685 sizeof(struct stats), DMA_FROM_DEVICE);
1686 if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1687 return -ENOMEM;
1688
1689 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1690
1691 opcode.opcode = OPCODE_GET_STATS;
1692 opcode.pad = 0;
1693
1694 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1695
1696 *entry->status = STATUS_PENDING;
1697
1698 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1699
1700 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1701
1702 *entry->status = STATUS_FREE;
1703
1704 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1705
1706 if (ok == 0) {
1707 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1708 return -EIO;
1709 }
1710
1711 return 0;
1712 }
1713
1714 #if 0 /* currently unused */
1715 static int
1716 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1717 {
1718 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1719 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1720 struct oc3_opcode opcode;
1721 int ok;
1722 u32 oc3_regs_dma_addr;
1723
1724 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1725
1726 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1727
1728 opcode.opcode = OPCODE_GET_OC3;
1729 opcode.reg = 0;
1730 opcode.value = 0;
1731 opcode.mask = 0;
1732
1733 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1734
1735 *entry->status = STATUS_PENDING;
1736
1737 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1738
1739 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1740
1741 *entry->status = STATUS_FREE;
1742
1743 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1744
1745 if (ok == 0) {
1746 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1747 return -EIO;
1748 }
1749
1750 return 0;
1751 }
1752 #endif
1753
1754
1755 static int
fore200e_set_oc3(struct fore200e * fore200e,u32 reg,u32 value,u32 mask)1756 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1757 {
1758 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1759 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1760 struct oc3_opcode opcode;
1761 int ok;
1762
1763 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1764
1765 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1766
1767 opcode.opcode = OPCODE_SET_OC3;
1768 opcode.reg = reg;
1769 opcode.value = value;
1770 opcode.mask = mask;
1771
1772 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1773
1774 *entry->status = STATUS_PENDING;
1775
1776 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1777
1778 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1779
1780 *entry->status = STATUS_FREE;
1781
1782 if (ok == 0) {
1783 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1784 return -EIO;
1785 }
1786
1787 return 0;
1788 }
1789
1790
1791 static int
fore200e_setloop(struct fore200e * fore200e,int loop_mode)1792 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1793 {
1794 u32 mct_value, mct_mask;
1795 int error;
1796
1797 if (!capable(CAP_NET_ADMIN))
1798 return -EPERM;
1799
1800 switch (loop_mode) {
1801
1802 case ATM_LM_NONE:
1803 mct_value = 0;
1804 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1805 break;
1806
1807 case ATM_LM_LOC_PHY:
1808 mct_value = mct_mask = SUNI_MCT_DLE;
1809 break;
1810
1811 case ATM_LM_RMT_PHY:
1812 mct_value = mct_mask = SUNI_MCT_LLE;
1813 break;
1814
1815 default:
1816 return -EINVAL;
1817 }
1818
1819 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1820 if (error == 0)
1821 fore200e->loop_mode = loop_mode;
1822
1823 return error;
1824 }
1825
1826
1827 static int
fore200e_fetch_stats(struct fore200e * fore200e,struct sonet_stats __user * arg)1828 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1829 {
1830 struct sonet_stats tmp;
1831
1832 if (fore200e_getstats(fore200e) < 0)
1833 return -EIO;
1834
1835 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1836 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1837 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1838 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1839 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1840 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1841 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1842 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1843 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1844 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1845 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
1846 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1847 be32_to_cpu(fore200e->stats->aal5.cells_received);
1848
1849 if (arg)
1850 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1851
1852 return 0;
1853 }
1854
1855
1856 static int
fore200e_ioctl(struct atm_dev * dev,unsigned int cmd,void __user * arg)1857 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1858 {
1859 struct fore200e* fore200e = FORE200E_DEV(dev);
1860
1861 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1862
1863 switch (cmd) {
1864
1865 case SONET_GETSTAT:
1866 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1867
1868 case SONET_GETDIAG:
1869 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1870
1871 case ATM_SETLOOP:
1872 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1873
1874 case ATM_GETLOOP:
1875 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1876
1877 case ATM_QUERYLOOP:
1878 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1879 }
1880
1881 return -ENOSYS; /* not implemented */
1882 }
1883
1884
1885 static int
fore200e_change_qos(struct atm_vcc * vcc,struct atm_qos * qos,int flags)1886 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1887 {
1888 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1889 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1890
1891 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1892 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1893 return -EINVAL;
1894 }
1895
1896 DPRINTK(2, "change_qos %d.%d.%d, "
1897 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1898 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1899 "available_cell_rate = %u",
1900 vcc->itf, vcc->vpi, vcc->vci,
1901 fore200e_traffic_class[ qos->txtp.traffic_class ],
1902 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1903 fore200e_traffic_class[ qos->rxtp.traffic_class ],
1904 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1905 flags, fore200e->available_cell_rate);
1906
1907 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1908
1909 mutex_lock(&fore200e->rate_mtx);
1910 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1911 mutex_unlock(&fore200e->rate_mtx);
1912 return -EAGAIN;
1913 }
1914
1915 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1916 fore200e->available_cell_rate -= qos->txtp.max_pcr;
1917
1918 mutex_unlock(&fore200e->rate_mtx);
1919
1920 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1921
1922 /* update rate control parameters */
1923 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1924
1925 set_bit(ATM_VF_HASQOS, &vcc->flags);
1926
1927 return 0;
1928 }
1929
1930 return -EINVAL;
1931 }
1932
1933
fore200e_irq_request(struct fore200e * fore200e)1934 static int fore200e_irq_request(struct fore200e *fore200e)
1935 {
1936 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1937
1938 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1939 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1940 return -EBUSY;
1941 }
1942
1943 printk(FORE200E "IRQ %s reserved for device %s\n",
1944 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1945
1946 #ifdef FORE200E_USE_TASKLET
1947 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1948 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1949 #endif
1950
1951 fore200e->state = FORE200E_STATE_IRQ;
1952 return 0;
1953 }
1954
1955
fore200e_get_esi(struct fore200e * fore200e)1956 static int fore200e_get_esi(struct fore200e *fore200e)
1957 {
1958 struct prom_data* prom = kzalloc_obj(struct prom_data);
1959 int ok, i;
1960
1961 if (!prom)
1962 return -ENOMEM;
1963
1964 ok = fore200e->bus->prom_read(fore200e, prom);
1965 if (ok < 0) {
1966 kfree(prom);
1967 return -EBUSY;
1968 }
1969
1970 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1971 fore200e->name,
1972 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
1973 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1974
1975 for (i = 0; i < ESI_LEN; i++) {
1976 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1977 }
1978
1979 kfree(prom);
1980
1981 return 0;
1982 }
1983
1984
fore200e_alloc_rx_buf(struct fore200e * fore200e)1985 static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1986 {
1987 int scheme, magn, nbr, size, i;
1988
1989 struct host_bsq* bsq;
1990 struct buffer* buffer;
1991
1992 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1993 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1994
1995 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1996
1997 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
1998 size = fore200e_rx_buf_size[ scheme ][ magn ];
1999
2000 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2001
2002 /* allocate the array of receive buffers */
2003 buffer = bsq->buffer = kzalloc_objs(struct buffer, nbr);
2004
2005 if (buffer == NULL)
2006 return -ENOMEM;
2007
2008 bsq->freebuf = NULL;
2009
2010 for (i = 0; i < nbr; i++) {
2011
2012 buffer[ i ].scheme = scheme;
2013 buffer[ i ].magn = magn;
2014 #ifdef FORE200E_BSQ_DEBUG
2015 buffer[ i ].index = i;
2016 buffer[ i ].supplied = 0;
2017 #endif
2018
2019 /* allocate the receive buffer body */
2020 if (fore200e_chunk_alloc(fore200e,
2021 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2022 DMA_FROM_DEVICE) < 0) {
2023
2024 while (i > 0)
2025 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2026 kfree(buffer);
2027
2028 return -ENOMEM;
2029 }
2030
2031 /* insert the buffer into the free buffer list */
2032 buffer[ i ].next = bsq->freebuf;
2033 bsq->freebuf = &buffer[ i ];
2034 }
2035 /* all the buffers are free, initially */
2036 bsq->freebuf_count = nbr;
2037
2038 #ifdef FORE200E_BSQ_DEBUG
2039 bsq_audit(3, bsq, scheme, magn);
2040 #endif
2041 }
2042 }
2043
2044 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2045 return 0;
2046 }
2047
2048
fore200e_init_bs_queue(struct fore200e * fore200e)2049 static int fore200e_init_bs_queue(struct fore200e *fore200e)
2050 {
2051 int scheme, magn, i;
2052
2053 struct host_bsq* bsq;
2054 struct cp_bsq_entry __iomem * cp_entry;
2055
2056 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2057 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2058
2059 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2060
2061 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2062
2063 /* allocate and align the array of status words */
2064 if (fore200e_dma_chunk_alloc(fore200e,
2065 &bsq->status,
2066 sizeof(enum status),
2067 QUEUE_SIZE_BS,
2068 fore200e->bus->status_alignment) < 0) {
2069 return -ENOMEM;
2070 }
2071
2072 /* allocate and align the array of receive buffer descriptors */
2073 if (fore200e_dma_chunk_alloc(fore200e,
2074 &bsq->rbd_block,
2075 sizeof(struct rbd_block),
2076 QUEUE_SIZE_BS,
2077 fore200e->bus->descr_alignment) < 0) {
2078
2079 fore200e_dma_chunk_free(fore200e, &bsq->status);
2080 return -ENOMEM;
2081 }
2082
2083 /* get the base address of the cp resident buffer supply queue entries */
2084 cp_entry = fore200e->virt_base +
2085 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2086
2087 /* fill the host resident and cp resident buffer supply queue entries */
2088 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2089
2090 bsq->host_entry[ i ].status =
2091 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2092 bsq->host_entry[ i ].rbd_block =
2093 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2094 bsq->host_entry[ i ].rbd_block_dma =
2095 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2096 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2097
2098 *bsq->host_entry[ i ].status = STATUS_FREE;
2099
2100 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2101 &cp_entry[ i ].status_haddr);
2102 }
2103 }
2104 }
2105
2106 fore200e->state = FORE200E_STATE_INIT_BSQ;
2107 return 0;
2108 }
2109
2110
fore200e_init_rx_queue(struct fore200e * fore200e)2111 static int fore200e_init_rx_queue(struct fore200e *fore200e)
2112 {
2113 struct host_rxq* rxq = &fore200e->host_rxq;
2114 struct cp_rxq_entry __iomem * cp_entry;
2115 int i;
2116
2117 DPRINTK(2, "receive queue is being initialized\n");
2118
2119 /* allocate and align the array of status words */
2120 if (fore200e_dma_chunk_alloc(fore200e,
2121 &rxq->status,
2122 sizeof(enum status),
2123 QUEUE_SIZE_RX,
2124 fore200e->bus->status_alignment) < 0) {
2125 return -ENOMEM;
2126 }
2127
2128 /* allocate and align the array of receive PDU descriptors */
2129 if (fore200e_dma_chunk_alloc(fore200e,
2130 &rxq->rpd,
2131 sizeof(struct rpd),
2132 QUEUE_SIZE_RX,
2133 fore200e->bus->descr_alignment) < 0) {
2134
2135 fore200e_dma_chunk_free(fore200e, &rxq->status);
2136 return -ENOMEM;
2137 }
2138
2139 /* get the base address of the cp resident rx queue entries */
2140 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2141
2142 /* fill the host resident and cp resident rx entries */
2143 for (i=0; i < QUEUE_SIZE_RX; i++) {
2144
2145 rxq->host_entry[ i ].status =
2146 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2147 rxq->host_entry[ i ].rpd =
2148 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2149 rxq->host_entry[ i ].rpd_dma =
2150 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2151 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2152
2153 *rxq->host_entry[ i ].status = STATUS_FREE;
2154
2155 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2156 &cp_entry[ i ].status_haddr);
2157
2158 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2159 &cp_entry[ i ].rpd_haddr);
2160 }
2161
2162 /* set the head entry of the queue */
2163 rxq->head = 0;
2164
2165 fore200e->state = FORE200E_STATE_INIT_RXQ;
2166 return 0;
2167 }
2168
2169
fore200e_init_tx_queue(struct fore200e * fore200e)2170 static int fore200e_init_tx_queue(struct fore200e *fore200e)
2171 {
2172 struct host_txq* txq = &fore200e->host_txq;
2173 struct cp_txq_entry __iomem * cp_entry;
2174 int i;
2175
2176 DPRINTK(2, "transmit queue is being initialized\n");
2177
2178 /* allocate and align the array of status words */
2179 if (fore200e_dma_chunk_alloc(fore200e,
2180 &txq->status,
2181 sizeof(enum status),
2182 QUEUE_SIZE_TX,
2183 fore200e->bus->status_alignment) < 0) {
2184 return -ENOMEM;
2185 }
2186
2187 /* allocate and align the array of transmit PDU descriptors */
2188 if (fore200e_dma_chunk_alloc(fore200e,
2189 &txq->tpd,
2190 sizeof(struct tpd),
2191 QUEUE_SIZE_TX,
2192 fore200e->bus->descr_alignment) < 0) {
2193
2194 fore200e_dma_chunk_free(fore200e, &txq->status);
2195 return -ENOMEM;
2196 }
2197
2198 /* get the base address of the cp resident tx queue entries */
2199 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2200
2201 /* fill the host resident and cp resident tx entries */
2202 for (i=0; i < QUEUE_SIZE_TX; i++) {
2203
2204 txq->host_entry[ i ].status =
2205 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2206 txq->host_entry[ i ].tpd =
2207 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2208 txq->host_entry[ i ].tpd_dma =
2209 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2210 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2211
2212 *txq->host_entry[ i ].status = STATUS_FREE;
2213
2214 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2215 &cp_entry[ i ].status_haddr);
2216
2217 /* although there is a one-to-one mapping of tx queue entries and tpds,
2218 we do not write here the DMA (physical) base address of each tpd into
2219 the related cp resident entry, because the cp relies on this write
2220 operation to detect that a new pdu has been submitted for tx */
2221 }
2222
2223 /* set the head and tail entries of the queue */
2224 txq->head = 0;
2225 txq->tail = 0;
2226
2227 fore200e->state = FORE200E_STATE_INIT_TXQ;
2228 return 0;
2229 }
2230
2231
fore200e_init_cmd_queue(struct fore200e * fore200e)2232 static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2233 {
2234 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2235 struct cp_cmdq_entry __iomem * cp_entry;
2236 int i;
2237
2238 DPRINTK(2, "command queue is being initialized\n");
2239
2240 /* allocate and align the array of status words */
2241 if (fore200e_dma_chunk_alloc(fore200e,
2242 &cmdq->status,
2243 sizeof(enum status),
2244 QUEUE_SIZE_CMD,
2245 fore200e->bus->status_alignment) < 0) {
2246 return -ENOMEM;
2247 }
2248
2249 /* get the base address of the cp resident cmd queue entries */
2250 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2251
2252 /* fill the host resident and cp resident cmd entries */
2253 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2254
2255 cmdq->host_entry[ i ].status =
2256 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2257 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2258
2259 *cmdq->host_entry[ i ].status = STATUS_FREE;
2260
2261 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2262 &cp_entry[ i ].status_haddr);
2263 }
2264
2265 /* set the head entry of the queue */
2266 cmdq->head = 0;
2267
2268 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2269 return 0;
2270 }
2271
2272
fore200e_param_bs_queue(struct fore200e * fore200e,enum buffer_scheme scheme,enum buffer_magn magn,int queue_length,int pool_size,int supply_blksize)2273 static void fore200e_param_bs_queue(struct fore200e *fore200e,
2274 enum buffer_scheme scheme,
2275 enum buffer_magn magn, int queue_length,
2276 int pool_size, int supply_blksize)
2277 {
2278 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2279
2280 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2281 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2282 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2283 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2284 }
2285
2286
fore200e_initialize(struct fore200e * fore200e)2287 static int fore200e_initialize(struct fore200e *fore200e)
2288 {
2289 struct cp_queues __iomem * cpq;
2290 int ok, scheme, magn;
2291
2292 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2293
2294 mutex_init(&fore200e->rate_mtx);
2295 spin_lock_init(&fore200e->q_lock);
2296
2297 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2298
2299 /* enable cp to host interrupts */
2300 fore200e->bus->write(1, &cpq->imask);
2301
2302 if (fore200e->bus->irq_enable)
2303 fore200e->bus->irq_enable(fore200e);
2304
2305 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2306
2307 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2308 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2309 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2310
2311 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2312 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2313
2314 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2315 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2316 fore200e_param_bs_queue(fore200e, scheme, magn,
2317 QUEUE_SIZE_BS,
2318 fore200e_rx_buf_nbr[ scheme ][ magn ],
2319 RBD_BLK_SIZE);
2320
2321 /* issue the initialize command */
2322 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2323 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2324
2325 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2326 if (ok == 0) {
2327 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2328 return -ENODEV;
2329 }
2330
2331 printk(FORE200E "device %s initialized\n", fore200e->name);
2332
2333 fore200e->state = FORE200E_STATE_INITIALIZE;
2334 return 0;
2335 }
2336
2337
fore200e_monitor_putc(struct fore200e * fore200e,char c)2338 static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2339 {
2340 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2341
2342 #if 0
2343 printk("%c", c);
2344 #endif
2345 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2346 }
2347
2348
fore200e_monitor_getc(struct fore200e * fore200e)2349 static int fore200e_monitor_getc(struct fore200e *fore200e)
2350 {
2351 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2352 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2353 int c;
2354
2355 while (time_before(jiffies, timeout)) {
2356
2357 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2358
2359 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2360
2361 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2362 #if 0
2363 printk("%c", c & 0xFF);
2364 #endif
2365 return c & 0xFF;
2366 }
2367 }
2368
2369 return -1;
2370 }
2371
2372
fore200e_monitor_puts(struct fore200e * fore200e,char * str)2373 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2374 {
2375 while (*str) {
2376
2377 /* the i960 monitor doesn't accept any new character if it has something to say */
2378 while (fore200e_monitor_getc(fore200e) >= 0);
2379
2380 fore200e_monitor_putc(fore200e, *str++);
2381 }
2382
2383 while (fore200e_monitor_getc(fore200e) >= 0);
2384 }
2385
2386 #ifdef __LITTLE_ENDIAN
2387 #define FW_EXT ".bin"
2388 #else
2389 #define FW_EXT "_ecd.bin2"
2390 #endif
2391
fore200e_load_and_start_fw(struct fore200e * fore200e)2392 static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2393 {
2394 const struct firmware *firmware;
2395 const struct fw_header *fw_header;
2396 const __le32 *fw_data;
2397 u32 fw_size;
2398 u32 __iomem *load_addr;
2399 char buf[48];
2400 int err;
2401
2402 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2403 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2404 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2405 return err;
2406 }
2407
2408 fw_data = (const __le32 *)firmware->data;
2409 fw_size = firmware->size / sizeof(u32);
2410 fw_header = (const struct fw_header *)firmware->data;
2411 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2412
2413 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2414 fore200e->name, load_addr, fw_size);
2415
2416 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2417 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2418 goto release;
2419 }
2420
2421 for (; fw_size--; fw_data++, load_addr++)
2422 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2423
2424 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2425
2426 #if defined(__sparc_v9__)
2427 /* reported to be required by SBA cards on some sparc64 hosts */
2428 fore200e_spin(100);
2429 #endif
2430
2431 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2432 fore200e_monitor_puts(fore200e, buf);
2433
2434 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2435 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2436 goto release;
2437 }
2438
2439 printk(FORE200E "device %s firmware started\n", fore200e->name);
2440
2441 fore200e->state = FORE200E_STATE_START_FW;
2442 err = 0;
2443
2444 release:
2445 release_firmware(firmware);
2446 return err;
2447 }
2448
2449
fore200e_register(struct fore200e * fore200e,struct device * parent)2450 static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2451 {
2452 struct atm_dev* atm_dev;
2453
2454 DPRINTK(2, "device %s being registered\n", fore200e->name);
2455
2456 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2457 -1, NULL);
2458 if (atm_dev == NULL) {
2459 printk(FORE200E "unable to register device %s\n", fore200e->name);
2460 return -ENODEV;
2461 }
2462
2463 atm_dev->dev_data = fore200e;
2464 fore200e->atm_dev = atm_dev;
2465
2466 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2467 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2468
2469 fore200e->available_cell_rate = ATM_OC3_PCR;
2470
2471 fore200e->state = FORE200E_STATE_REGISTER;
2472 return 0;
2473 }
2474
2475
fore200e_init(struct fore200e * fore200e,struct device * parent)2476 static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2477 {
2478 if (fore200e_register(fore200e, parent) < 0)
2479 return -ENODEV;
2480
2481 if (fore200e->bus->configure(fore200e) < 0)
2482 return -ENODEV;
2483
2484 if (fore200e->bus->map(fore200e) < 0)
2485 return -ENODEV;
2486
2487 if (fore200e_reset(fore200e, 1) < 0)
2488 return -ENODEV;
2489
2490 if (fore200e_load_and_start_fw(fore200e) < 0)
2491 return -ENODEV;
2492
2493 if (fore200e_initialize(fore200e) < 0)
2494 return -ENODEV;
2495
2496 if (fore200e_init_cmd_queue(fore200e) < 0)
2497 return -ENOMEM;
2498
2499 if (fore200e_init_tx_queue(fore200e) < 0)
2500 return -ENOMEM;
2501
2502 if (fore200e_init_rx_queue(fore200e) < 0)
2503 return -ENOMEM;
2504
2505 if (fore200e_init_bs_queue(fore200e) < 0)
2506 return -ENOMEM;
2507
2508 if (fore200e_alloc_rx_buf(fore200e) < 0)
2509 return -ENOMEM;
2510
2511 if (fore200e_get_esi(fore200e) < 0)
2512 return -EIO;
2513
2514 if (fore200e_irq_request(fore200e) < 0)
2515 return -EBUSY;
2516
2517 fore200e_supply(fore200e);
2518
2519 /* all done, board initialization is now complete */
2520 fore200e->state = FORE200E_STATE_COMPLETE;
2521 return 0;
2522 }
2523
2524 #ifdef CONFIG_SBUS
fore200e_sba_probe(struct platform_device * op)2525 static int fore200e_sba_probe(struct platform_device *op)
2526 {
2527 struct fore200e *fore200e;
2528 static int index = 0;
2529 int err;
2530
2531 fore200e = kzalloc_obj(struct fore200e);
2532 if (!fore200e)
2533 return -ENOMEM;
2534
2535 fore200e->bus = &fore200e_sbus_ops;
2536 fore200e->dev = &op->dev;
2537 fore200e->irq = op->archdata.irqs[0];
2538 fore200e->phys_base = op->resource[0].start;
2539
2540 sprintf(fore200e->name, "SBA-200E-%d", index);
2541
2542 err = fore200e_init(fore200e, &op->dev);
2543 if (err < 0) {
2544 fore200e_shutdown(fore200e);
2545 kfree(fore200e);
2546 return err;
2547 }
2548
2549 index++;
2550 dev_set_drvdata(&op->dev, fore200e);
2551
2552 return 0;
2553 }
2554
fore200e_sba_remove(struct platform_device * op)2555 static void fore200e_sba_remove(struct platform_device *op)
2556 {
2557 struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2558
2559 fore200e_shutdown(fore200e);
2560 kfree(fore200e);
2561 }
2562
2563 static const struct of_device_id fore200e_sba_match[] = {
2564 {
2565 .name = SBA200E_PROM_NAME,
2566 },
2567 {},
2568 };
2569 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2570
2571 static struct platform_driver fore200e_sba_driver = {
2572 .driver = {
2573 .name = "fore_200e",
2574 .of_match_table = fore200e_sba_match,
2575 },
2576 .probe = fore200e_sba_probe,
2577 .remove = fore200e_sba_remove,
2578 };
2579 #endif
2580
2581 #ifdef CONFIG_PCI
fore200e_pca_detect(struct pci_dev * pci_dev,const struct pci_device_id * pci_ent)2582 static int fore200e_pca_detect(struct pci_dev *pci_dev,
2583 const struct pci_device_id *pci_ent)
2584 {
2585 struct fore200e* fore200e;
2586 int err = 0;
2587 static int index = 0;
2588
2589 if (pci_enable_device(pci_dev)) {
2590 err = -EINVAL;
2591 goto out;
2592 }
2593
2594 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2595 err = -EINVAL;
2596 goto out;
2597 }
2598
2599 fore200e = kzalloc_obj(struct fore200e);
2600 if (fore200e == NULL) {
2601 err = -ENOMEM;
2602 goto out_disable;
2603 }
2604
2605 fore200e->bus = &fore200e_pci_ops;
2606 fore200e->dev = &pci_dev->dev;
2607 fore200e->irq = pci_dev->irq;
2608 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2609
2610 sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2611
2612 pci_set_master(pci_dev);
2613
2614 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2615 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2616
2617 sprintf(fore200e->name, "PCA-200E-%d", index);
2618
2619 err = fore200e_init(fore200e, &pci_dev->dev);
2620 if (err < 0) {
2621 fore200e_shutdown(fore200e);
2622 goto out_free;
2623 }
2624
2625 ++index;
2626 pci_set_drvdata(pci_dev, fore200e);
2627
2628 out:
2629 return err;
2630
2631 out_free:
2632 kfree(fore200e);
2633 out_disable:
2634 pci_disable_device(pci_dev);
2635 goto out;
2636 }
2637
2638
fore200e_pca_remove_one(struct pci_dev * pci_dev)2639 static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2640 {
2641 struct fore200e *fore200e;
2642
2643 fore200e = pci_get_drvdata(pci_dev);
2644
2645 fore200e_shutdown(fore200e);
2646 kfree(fore200e);
2647 pci_disable_device(pci_dev);
2648 }
2649
2650
2651 static const struct pci_device_id fore200e_pca_tbl[] = {
2652 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2653 { 0, }
2654 };
2655
2656 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2657
2658 static struct pci_driver fore200e_pca_driver = {
2659 .name = "fore_200e",
2660 .probe = fore200e_pca_detect,
2661 .remove = fore200e_pca_remove_one,
2662 .id_table = fore200e_pca_tbl,
2663 };
2664 #endif
2665
fore200e_module_init(void)2666 static int __init fore200e_module_init(void)
2667 {
2668 int err = 0;
2669
2670 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2671
2672 #ifdef CONFIG_SBUS
2673 err = platform_driver_register(&fore200e_sba_driver);
2674 if (err)
2675 return err;
2676 #endif
2677
2678 #ifdef CONFIG_PCI
2679 err = pci_register_driver(&fore200e_pca_driver);
2680 #endif
2681
2682 #ifdef CONFIG_SBUS
2683 if (err)
2684 platform_driver_unregister(&fore200e_sba_driver);
2685 #endif
2686
2687 return err;
2688 }
2689
fore200e_module_cleanup(void)2690 static void __exit fore200e_module_cleanup(void)
2691 {
2692 #ifdef CONFIG_PCI
2693 pci_unregister_driver(&fore200e_pca_driver);
2694 #endif
2695 #ifdef CONFIG_SBUS
2696 platform_driver_unregister(&fore200e_sba_driver);
2697 #endif
2698 }
2699
2700 static int
fore200e_proc_read(struct atm_dev * dev,loff_t * pos,char * page)2701 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2702 {
2703 struct fore200e* fore200e = FORE200E_DEV(dev);
2704 struct fore200e_vcc* fore200e_vcc;
2705 struct atm_vcc* vcc;
2706 int i, len, left = *pos;
2707 unsigned long flags;
2708
2709 if (!left--) {
2710
2711 if (fore200e_getstats(fore200e) < 0)
2712 return -EIO;
2713
2714 len = sprintf(page,"\n"
2715 " device:\n"
2716 " internal name:\t\t%s\n", fore200e->name);
2717
2718 /* print bus-specific information */
2719 if (fore200e->bus->proc_read)
2720 len += fore200e->bus->proc_read(fore200e, page + len);
2721
2722 len += sprintf(page + len,
2723 " interrupt line:\t\t%s\n"
2724 " physical base address:\t0x%p\n"
2725 " virtual base address:\t0x%p\n"
2726 " factory address (ESI):\t%pM\n"
2727 " board serial number:\t\t%d\n\n",
2728 fore200e_irq_itoa(fore200e->irq),
2729 (void*)fore200e->phys_base,
2730 fore200e->virt_base,
2731 fore200e->esi,
2732 fore200e->esi[4] * 256 + fore200e->esi[5]);
2733
2734 return len;
2735 }
2736
2737 if (!left--)
2738 return sprintf(page,
2739 " free small bufs, scheme 1:\t%d\n"
2740 " free large bufs, scheme 1:\t%d\n"
2741 " free small bufs, scheme 2:\t%d\n"
2742 " free large bufs, scheme 2:\t%d\n",
2743 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2744 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2745 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2746 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2747
2748 if (!left--) {
2749 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2750
2751 len = sprintf(page,"\n\n"
2752 " cell processor:\n"
2753 " heartbeat state:\t\t");
2754
2755 if (hb >> 16 != 0xDEAD)
2756 len += sprintf(page + len, "0x%08x\n", hb);
2757 else
2758 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2759
2760 return len;
2761 }
2762
2763 if (!left--) {
2764 static const char* media_name[] = {
2765 "unshielded twisted pair",
2766 "multimode optical fiber ST",
2767 "multimode optical fiber SC",
2768 "single-mode optical fiber ST",
2769 "single-mode optical fiber SC",
2770 "unknown"
2771 };
2772
2773 static const char* oc3_mode[] = {
2774 "normal operation",
2775 "diagnostic loopback",
2776 "line loopback",
2777 "unknown"
2778 };
2779
2780 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2781 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2782 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2783 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2784 u32 oc3_index;
2785
2786 if (media_index > 4)
2787 media_index = 5;
2788
2789 switch (fore200e->loop_mode) {
2790 case ATM_LM_NONE: oc3_index = 0;
2791 break;
2792 case ATM_LM_LOC_PHY: oc3_index = 1;
2793 break;
2794 case ATM_LM_RMT_PHY: oc3_index = 2;
2795 break;
2796 default: oc3_index = 3;
2797 }
2798
2799 return sprintf(page,
2800 " firmware release:\t\t%d.%d.%d\n"
2801 " monitor release:\t\t%d.%d\n"
2802 " media type:\t\t\t%s\n"
2803 " OC-3 revision:\t\t0x%x\n"
2804 " OC-3 mode:\t\t\t%s",
2805 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2806 mon960_release >> 16, mon960_release << 16 >> 16,
2807 media_name[ media_index ],
2808 oc3_revision,
2809 oc3_mode[ oc3_index ]);
2810 }
2811
2812 if (!left--) {
2813 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2814
2815 return sprintf(page,
2816 "\n\n"
2817 " monitor:\n"
2818 " version number:\t\t%d\n"
2819 " boot status word:\t\t0x%08x\n",
2820 fore200e->bus->read(&cp_monitor->mon_version),
2821 fore200e->bus->read(&cp_monitor->bstat));
2822 }
2823
2824 if (!left--)
2825 return sprintf(page,
2826 "\n"
2827 " device statistics:\n"
2828 " 4b5b:\n"
2829 " crc_header_errors:\t\t%10u\n"
2830 " framing_errors:\t\t%10u\n",
2831 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2832 be32_to_cpu(fore200e->stats->phy.framing_errors));
2833
2834 if (!left--)
2835 return sprintf(page, "\n"
2836 " OC-3:\n"
2837 " section_bip8_errors:\t%10u\n"
2838 " path_bip8_errors:\t\t%10u\n"
2839 " line_bip24_errors:\t\t%10u\n"
2840 " line_febe_errors:\t\t%10u\n"
2841 " path_febe_errors:\t\t%10u\n"
2842 " corr_hcs_errors:\t\t%10u\n"
2843 " ucorr_hcs_errors:\t\t%10u\n",
2844 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2845 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2846 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2847 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2848 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2849 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2850 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2851
2852 if (!left--)
2853 return sprintf(page,"\n"
2854 " ATM:\t\t\t\t cells\n"
2855 " TX:\t\t\t%10u\n"
2856 " RX:\t\t\t%10u\n"
2857 " vpi out of range:\t\t%10u\n"
2858 " vpi no conn:\t\t%10u\n"
2859 " vci out of range:\t\t%10u\n"
2860 " vci no conn:\t\t%10u\n",
2861 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2862 be32_to_cpu(fore200e->stats->atm.cells_received),
2863 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2864 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2865 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2866 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2867
2868 if (!left--)
2869 return sprintf(page,"\n"
2870 " AAL0:\t\t\t cells\n"
2871 " TX:\t\t\t%10u\n"
2872 " RX:\t\t\t%10u\n"
2873 " dropped:\t\t\t%10u\n",
2874 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2875 be32_to_cpu(fore200e->stats->aal0.cells_received),
2876 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2877
2878 if (!left--)
2879 return sprintf(page,"\n"
2880 " AAL3/4:\n"
2881 " SAR sublayer:\t\t cells\n"
2882 " TX:\t\t\t%10u\n"
2883 " RX:\t\t\t%10u\n"
2884 " dropped:\t\t\t%10u\n"
2885 " CRC errors:\t\t%10u\n"
2886 " protocol errors:\t\t%10u\n\n"
2887 " CS sublayer:\t\t PDUs\n"
2888 " TX:\t\t\t%10u\n"
2889 " RX:\t\t\t%10u\n"
2890 " dropped:\t\t\t%10u\n"
2891 " protocol errors:\t\t%10u\n",
2892 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2893 be32_to_cpu(fore200e->stats->aal34.cells_received),
2894 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2895 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2896 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2897 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2898 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2899 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2900 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2901
2902 if (!left--)
2903 return sprintf(page,"\n"
2904 " AAL5:\n"
2905 " SAR sublayer:\t\t cells\n"
2906 " TX:\t\t\t%10u\n"
2907 " RX:\t\t\t%10u\n"
2908 " dropped:\t\t\t%10u\n"
2909 " congestions:\t\t%10u\n\n"
2910 " CS sublayer:\t\t PDUs\n"
2911 " TX:\t\t\t%10u\n"
2912 " RX:\t\t\t%10u\n"
2913 " dropped:\t\t\t%10u\n"
2914 " CRC errors:\t\t%10u\n"
2915 " protocol errors:\t\t%10u\n",
2916 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2917 be32_to_cpu(fore200e->stats->aal5.cells_received),
2918 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2919 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2920 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2921 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2922 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2923 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2924 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2925
2926 if (!left--)
2927 return sprintf(page,"\n"
2928 " AUX:\t\t allocation failures\n"
2929 " small b1:\t\t\t%10u\n"
2930 " large b1:\t\t\t%10u\n"
2931 " small b2:\t\t\t%10u\n"
2932 " large b2:\t\t\t%10u\n"
2933 " RX PDUs:\t\t\t%10u\n"
2934 " TX PDUs:\t\t\t%10lu\n",
2935 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2936 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2937 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2938 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2939 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2940 fore200e->tx_sat);
2941
2942 if (!left--)
2943 return sprintf(page,"\n"
2944 " receive carrier:\t\t\t%s\n",
2945 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2946
2947 if (!left--) {
2948 return sprintf(page,"\n"
2949 " VCCs:\n address VPI VCI AAL "
2950 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
2951 }
2952
2953 for (i = 0; i < NBR_CONNECT; i++) {
2954
2955 vcc = fore200e->vc_map[i].vcc;
2956
2957 if (vcc == NULL)
2958 continue;
2959
2960 spin_lock_irqsave(&fore200e->q_lock, flags);
2961
2962 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2963
2964 fore200e_vcc = FORE200E_VCC(vcc);
2965 ASSERT(fore200e_vcc);
2966
2967 len = sprintf(page,
2968 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
2969 vcc,
2970 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2971 fore200e_vcc->tx_pdu,
2972 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2973 fore200e_vcc->tx_max_pdu,
2974 fore200e_vcc->rx_pdu,
2975 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2976 fore200e_vcc->rx_max_pdu);
2977
2978 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2979 return len;
2980 }
2981
2982 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2983 }
2984
2985 return 0;
2986 }
2987
2988 module_init(fore200e_module_init);
2989 module_exit(fore200e_module_cleanup);
2990
2991
2992 static const struct atmdev_ops fore200e_ops = {
2993 .open = fore200e_open,
2994 .close = fore200e_close,
2995 .ioctl = fore200e_ioctl,
2996 .send = fore200e_send,
2997 .change_qos = fore200e_change_qos,
2998 .proc_read = fore200e_proc_read,
2999 .owner = THIS_MODULE
3000 };
3001
3002 MODULE_LICENSE("GPL");
3003 #ifdef CONFIG_PCI
3004 #ifdef __LITTLE_ENDIAN__
3005 MODULE_FIRMWARE("pca200e.bin");
3006 #else
3007 MODULE_FIRMWARE("pca200e_ecd.bin2");
3008 #endif
3009 #endif /* CONFIG_PCI */
3010 #ifdef CONFIG_SBUS
3011 MODULE_FIRMWARE("sba200e_ecd.bin2");
3012 #endif
3013