1 /* 2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $ 3 4 A FORE Systems 200E-series driver for ATM on Linux. 5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 6 7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 8 9 This driver simultaneously supports PCA-200E and SBA-200E adapters 10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 11 12 This program is free software; you can redistribute it and/or modify 13 it under the terms of the GNU General Public License as published by 14 the Free Software Foundation; either version 2 of the License, or 15 (at your option) any later version. 16 17 This program is distributed in the hope that it will be useful, 18 but WITHOUT ANY WARRANTY; without even the implied warranty of 19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 GNU General Public License for more details. 21 22 You should have received a copy of the GNU General Public License 23 along with this program; if not, write to the Free Software 24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 */ 26 27 28 #include <linux/config.h> 29 #include <linux/kernel.h> 30 #include <linux/slab.h> 31 #include <linux/init.h> 32 #include <linux/capability.h> 33 #include <linux/sched.h> 34 #include <linux/interrupt.h> 35 #include <linux/bitops.h> 36 #include <linux/pci.h> 37 #include <linux/module.h> 38 #include <linux/atmdev.h> 39 #include <linux/sonet.h> 40 #include <linux/atm_suni.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/delay.h> 43 #include <asm/io.h> 44 #include <asm/string.h> 45 #include <asm/page.h> 46 #include <asm/irq.h> 47 #include <asm/dma.h> 48 #include <asm/byteorder.h> 49 #include <asm/uaccess.h> 50 #include <asm/atomic.h> 51 52 #ifdef CONFIG_ATM_FORE200E_SBA 53 #include <asm/idprom.h> 54 #include <asm/sbus.h> 55 #include <asm/openprom.h> 56 #include <asm/oplib.h> 57 #include <asm/pgtable.h> 58 #endif 59 60 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ 61 #define FORE200E_USE_TASKLET 62 #endif 63 64 #if 0 /* enable the debugging code of the buffer supply queues */ 65 #define FORE200E_BSQ_DEBUG 66 #endif 67 68 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ 69 #define FORE200E_52BYTE_AAL0_SDU 70 #endif 71 72 #include "fore200e.h" 73 #include "suni.h" 74 75 #define FORE200E_VERSION "0.3e" 76 77 #define FORE200E "fore200e: " 78 79 #if 0 /* override .config */ 80 #define CONFIG_ATM_FORE200E_DEBUG 1 81 #endif 82 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 83 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 84 printk(FORE200E format, ##args); } while (0) 85 #else 86 #define DPRINTK(level, format, args...) do {} while (0) 87 #endif 88 89 90 #define FORE200E_ALIGN(addr, alignment) \ 91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 92 93 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 94 95 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 96 97 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo)) 98 99 #if 1 100 #define ASSERT(expr) if (!(expr)) { \ 101 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 102 __FUNCTION__, __LINE__, #expr); \ 103 panic(FORE200E "%s", __FUNCTION__); \ 104 } 105 #else 106 #define ASSERT(expr) do {} while (0) 107 #endif 108 109 110 static const struct atmdev_ops fore200e_ops; 111 static const struct fore200e_bus fore200e_bus[]; 112 113 static LIST_HEAD(fore200e_boards); 114 115 116 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 117 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 118 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); 119 120 121 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 122 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 123 { BUFFER_S2_NBR, BUFFER_L2_NBR } 124 }; 125 126 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 127 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 128 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 129 }; 130 131 132 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 133 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 134 #endif 135 136 137 #if 0 /* currently unused */ 138 static int 139 fore200e_fore2atm_aal(enum fore200e_aal aal) 140 { 141 switch(aal) { 142 case FORE200E_AAL0: return ATM_AAL0; 143 case FORE200E_AAL34: return ATM_AAL34; 144 case FORE200E_AAL5: return ATM_AAL5; 145 } 146 147 return -EINVAL; 148 } 149 #endif 150 151 152 static enum fore200e_aal 153 fore200e_atm2fore_aal(int aal) 154 { 155 switch(aal) { 156 case ATM_AAL0: return FORE200E_AAL0; 157 case ATM_AAL34: return FORE200E_AAL34; 158 case ATM_AAL1: 159 case ATM_AAL2: 160 case ATM_AAL5: return FORE200E_AAL5; 161 } 162 163 return -EINVAL; 164 } 165 166 167 static char* 168 fore200e_irq_itoa(int irq) 169 { 170 #if defined(__sparc_v9__) 171 return __irq_itoa(irq); 172 #else 173 static char str[8]; 174 sprintf(str, "%d", irq); 175 return str; 176 #endif 177 } 178 179 180 static void* 181 fore200e_kmalloc(int size, int flags) 182 { 183 void* chunk = kmalloc(size, flags); 184 185 if (chunk) 186 memset(chunk, 0x00, size); 187 else 188 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); 189 190 return chunk; 191 } 192 193 194 static void 195 fore200e_kfree(void* chunk) 196 { 197 kfree(chunk); 198 } 199 200 201 /* allocate and align a chunk of memory intended to hold the data behing exchanged 202 between the driver and the adapter (using streaming DVMA) */ 203 204 static int 205 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 206 { 207 unsigned long offset = 0; 208 209 if (alignment <= sizeof(int)) 210 alignment = 0; 211 212 chunk->alloc_size = size + alignment; 213 chunk->align_size = size; 214 chunk->direction = direction; 215 216 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA); 217 if (chunk->alloc_addr == NULL) 218 return -ENOMEM; 219 220 if (alignment > 0) 221 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 222 223 chunk->align_addr = chunk->alloc_addr + offset; 224 225 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction); 226 227 return 0; 228 } 229 230 231 /* free a chunk of memory */ 232 233 static void 234 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 235 { 236 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction); 237 238 fore200e_kfree(chunk->alloc_addr); 239 } 240 241 242 static void 243 fore200e_spin(int msecs) 244 { 245 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 246 while (time_before(jiffies, timeout)); 247 } 248 249 250 static int 251 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 252 { 253 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 254 int ok; 255 256 mb(); 257 do { 258 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 259 break; 260 261 } while (time_before(jiffies, timeout)); 262 263 #if 1 264 if (!ok) { 265 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 266 *addr, val); 267 } 268 #endif 269 270 return ok; 271 } 272 273 274 static int 275 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 276 { 277 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 278 int ok; 279 280 do { 281 if ((ok = (fore200e->bus->read(addr) == val))) 282 break; 283 284 } while (time_before(jiffies, timeout)); 285 286 #if 1 287 if (!ok) { 288 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 289 fore200e->bus->read(addr), val); 290 } 291 #endif 292 293 return ok; 294 } 295 296 297 static void 298 fore200e_free_rx_buf(struct fore200e* fore200e) 299 { 300 int scheme, magn, nbr; 301 struct buffer* buffer; 302 303 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 304 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 305 306 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 307 308 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 309 310 struct chunk* data = &buffer[ nbr ].data; 311 312 if (data->alloc_addr != NULL) 313 fore200e_chunk_free(fore200e, data); 314 } 315 } 316 } 317 } 318 } 319 320 321 static void 322 fore200e_uninit_bs_queue(struct fore200e* fore200e) 323 { 324 int scheme, magn; 325 326 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 327 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 328 329 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 330 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 331 332 if (status->alloc_addr) 333 fore200e->bus->dma_chunk_free(fore200e, status); 334 335 if (rbd_block->alloc_addr) 336 fore200e->bus->dma_chunk_free(fore200e, rbd_block); 337 } 338 } 339 } 340 341 342 static int 343 fore200e_reset(struct fore200e* fore200e, int diag) 344 { 345 int ok; 346 347 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 348 349 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 350 351 fore200e->bus->reset(fore200e); 352 353 if (diag) { 354 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 355 if (ok == 0) { 356 357 printk(FORE200E "device %s self-test failed\n", fore200e->name); 358 return -ENODEV; 359 } 360 361 printk(FORE200E "device %s self-test passed\n", fore200e->name); 362 363 fore200e->state = FORE200E_STATE_RESET; 364 } 365 366 return 0; 367 } 368 369 370 static void 371 fore200e_shutdown(struct fore200e* fore200e) 372 { 373 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 374 fore200e->name, fore200e->phys_base, 375 fore200e_irq_itoa(fore200e->irq)); 376 377 if (fore200e->state > FORE200E_STATE_RESET) { 378 /* first, reset the board to prevent further interrupts or data transfers */ 379 fore200e_reset(fore200e, 0); 380 } 381 382 /* then, release all allocated resources */ 383 switch(fore200e->state) { 384 385 case FORE200E_STATE_COMPLETE: 386 kfree(fore200e->stats); 387 388 case FORE200E_STATE_IRQ: 389 free_irq(fore200e->irq, fore200e->atm_dev); 390 391 case FORE200E_STATE_ALLOC_BUF: 392 fore200e_free_rx_buf(fore200e); 393 394 case FORE200E_STATE_INIT_BSQ: 395 fore200e_uninit_bs_queue(fore200e); 396 397 case FORE200E_STATE_INIT_RXQ: 398 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); 399 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 400 401 case FORE200E_STATE_INIT_TXQ: 402 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); 403 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 404 405 case FORE200E_STATE_INIT_CMDQ: 406 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 407 408 case FORE200E_STATE_INITIALIZE: 409 /* nothing to do for that state */ 410 411 case FORE200E_STATE_START_FW: 412 /* nothing to do for that state */ 413 414 case FORE200E_STATE_LOAD_FW: 415 /* nothing to do for that state */ 416 417 case FORE200E_STATE_RESET: 418 /* nothing to do for that state */ 419 420 case FORE200E_STATE_MAP: 421 fore200e->bus->unmap(fore200e); 422 423 case FORE200E_STATE_CONFIGURE: 424 /* nothing to do for that state */ 425 426 case FORE200E_STATE_REGISTER: 427 /* XXX shouldn't we *start* by deregistering the device? */ 428 atm_dev_deregister(fore200e->atm_dev); 429 430 case FORE200E_STATE_BLANK: 431 /* nothing to do for that state */ 432 break; 433 } 434 } 435 436 437 #ifdef CONFIG_ATM_FORE200E_PCA 438 439 static u32 fore200e_pca_read(volatile u32 __iomem *addr) 440 { 441 /* on big-endian hosts, the board is configured to convert 442 the endianess of slave RAM accesses */ 443 return le32_to_cpu(readl(addr)); 444 } 445 446 447 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 448 { 449 /* on big-endian hosts, the board is configured to convert 450 the endianess of slave RAM accesses */ 451 writel(cpu_to_le32(val), addr); 452 } 453 454 455 static u32 456 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 457 { 458 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction); 459 460 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n", 461 virt_addr, size, direction, dma_addr); 462 463 return dma_addr; 464 } 465 466 467 static void 468 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 469 { 470 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n", 471 dma_addr, size, direction); 472 473 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 474 } 475 476 477 static void 478 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 479 { 480 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 481 482 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 483 } 484 485 static void 486 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 487 { 488 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 489 490 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 491 } 492 493 494 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism 495 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 496 497 static int 498 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 499 int size, int nbr, int alignment) 500 { 501 /* returned chunks are page-aligned */ 502 chunk->alloc_size = size * nbr; 503 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev, 504 chunk->alloc_size, 505 &chunk->dma_addr); 506 507 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 508 return -ENOMEM; 509 510 chunk->align_addr = chunk->alloc_addr; 511 512 return 0; 513 } 514 515 516 /* free a DMA consistent chunk of memory */ 517 518 static void 519 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 520 { 521 pci_free_consistent((struct pci_dev*)fore200e->bus_dev, 522 chunk->alloc_size, 523 chunk->alloc_addr, 524 chunk->dma_addr); 525 } 526 527 528 static int 529 fore200e_pca_irq_check(struct fore200e* fore200e) 530 { 531 /* this is a 1 bit register */ 532 int irq_posted = readl(fore200e->regs.pca.psr); 533 534 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 535 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 536 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 537 } 538 #endif 539 540 return irq_posted; 541 } 542 543 544 static void 545 fore200e_pca_irq_ack(struct fore200e* fore200e) 546 { 547 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 548 } 549 550 551 static void 552 fore200e_pca_reset(struct fore200e* fore200e) 553 { 554 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 555 fore200e_spin(10); 556 writel(0, fore200e->regs.pca.hcr); 557 } 558 559 560 static int __init 561 fore200e_pca_map(struct fore200e* fore200e) 562 { 563 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 564 565 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 566 567 if (fore200e->virt_base == NULL) { 568 printk(FORE200E "can't map device %s\n", fore200e->name); 569 return -EFAULT; 570 } 571 572 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 573 574 /* gain access to the PCA specific registers */ 575 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 576 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 577 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 578 579 fore200e->state = FORE200E_STATE_MAP; 580 return 0; 581 } 582 583 584 static void 585 fore200e_pca_unmap(struct fore200e* fore200e) 586 { 587 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 588 589 if (fore200e->virt_base != NULL) 590 iounmap(fore200e->virt_base); 591 } 592 593 594 static int __init 595 fore200e_pca_configure(struct fore200e* fore200e) 596 { 597 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 598 u8 master_ctrl, latency; 599 600 DPRINTK(2, "device %s being configured\n", fore200e->name); 601 602 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 603 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 604 return -EIO; 605 } 606 607 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 608 609 master_ctrl = master_ctrl 610 #if defined(__BIG_ENDIAN) 611 /* request the PCA board to convert the endianess of slave RAM accesses */ 612 | PCA200E_CTRL_CONVERT_ENDIAN 613 #endif 614 #if 0 615 | PCA200E_CTRL_DIS_CACHE_RD 616 | PCA200E_CTRL_DIS_WRT_INVAL 617 | PCA200E_CTRL_ENA_CONT_REQ_MODE 618 | PCA200E_CTRL_2_CACHE_WRT_INVAL 619 #endif 620 | PCA200E_CTRL_LARGE_PCI_BURSTS; 621 622 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 623 624 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 625 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 626 this may impact the performances of other PCI devices on the same bus, though */ 627 latency = 192; 628 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 629 630 fore200e->state = FORE200E_STATE_CONFIGURE; 631 return 0; 632 } 633 634 635 static int __init 636 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 637 { 638 struct host_cmdq* cmdq = &fore200e->host_cmdq; 639 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 640 struct prom_opcode opcode; 641 int ok; 642 u32 prom_dma; 643 644 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 645 646 opcode.opcode = OPCODE_GET_PROM; 647 opcode.pad = 0; 648 649 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE); 650 651 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 652 653 *entry->status = STATUS_PENDING; 654 655 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 656 657 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 658 659 *entry->status = STATUS_FREE; 660 661 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 662 663 if (ok == 0) { 664 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 665 return -EIO; 666 } 667 668 #if defined(__BIG_ENDIAN) 669 670 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 671 672 /* MAC address is stored as little-endian */ 673 swap_here(&prom->mac_addr[0]); 674 swap_here(&prom->mac_addr[4]); 675 #endif 676 677 return 0; 678 } 679 680 681 static int 682 fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 683 { 684 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 685 686 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 687 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 688 } 689 690 #endif /* CONFIG_ATM_FORE200E_PCA */ 691 692 693 #ifdef CONFIG_ATM_FORE200E_SBA 694 695 static u32 696 fore200e_sba_read(volatile u32 __iomem *addr) 697 { 698 return sbus_readl(addr); 699 } 700 701 702 static void 703 fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 704 { 705 sbus_writel(val, addr); 706 } 707 708 709 static u32 710 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 711 { 712 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction); 713 714 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", 715 virt_addr, size, direction, dma_addr); 716 717 return dma_addr; 718 } 719 720 721 static void 722 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 723 { 724 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", 725 dma_addr, size, direction); 726 727 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 728 } 729 730 731 static void 732 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 733 { 734 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 735 736 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 737 } 738 739 static void 740 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 741 { 742 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 743 744 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 745 } 746 747 748 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism 749 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 750 751 static int 752 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 753 int size, int nbr, int alignment) 754 { 755 chunk->alloc_size = chunk->align_size = size * nbr; 756 757 /* returned chunks are page-aligned */ 758 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev, 759 chunk->alloc_size, 760 &chunk->dma_addr); 761 762 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 763 return -ENOMEM; 764 765 chunk->align_addr = chunk->alloc_addr; 766 767 return 0; 768 } 769 770 771 /* free a DVMA consistent chunk of memory */ 772 773 static void 774 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 775 { 776 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev, 777 chunk->alloc_size, 778 chunk->alloc_addr, 779 chunk->dma_addr); 780 } 781 782 783 static void 784 fore200e_sba_irq_enable(struct fore200e* fore200e) 785 { 786 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 787 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 788 } 789 790 791 static int 792 fore200e_sba_irq_check(struct fore200e* fore200e) 793 { 794 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 795 } 796 797 798 static void 799 fore200e_sba_irq_ack(struct fore200e* fore200e) 800 { 801 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 802 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 803 } 804 805 806 static void 807 fore200e_sba_reset(struct fore200e* fore200e) 808 { 809 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 810 fore200e_spin(10); 811 fore200e->bus->write(0, fore200e->regs.sba.hcr); 812 } 813 814 815 static int __init 816 fore200e_sba_map(struct fore200e* fore200e) 817 { 818 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 819 unsigned int bursts; 820 821 /* gain access to the SBA specific registers */ 822 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 823 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 824 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 825 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 826 827 if (fore200e->virt_base == NULL) { 828 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 829 return -EFAULT; 830 } 831 832 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 833 834 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 835 836 /* get the supported DVMA burst sizes */ 837 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00); 838 839 if (sbus_can_dma_64bit(sbus_dev)) 840 sbus_set_sbus64(sbus_dev, bursts); 841 842 fore200e->state = FORE200E_STATE_MAP; 843 return 0; 844 } 845 846 847 static void 848 fore200e_sba_unmap(struct fore200e* fore200e) 849 { 850 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 851 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 852 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 853 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH); 854 } 855 856 857 static int __init 858 fore200e_sba_configure(struct fore200e* fore200e) 859 { 860 fore200e->state = FORE200E_STATE_CONFIGURE; 861 return 0; 862 } 863 864 865 static struct fore200e* __init 866 fore200e_sba_detect(const struct fore200e_bus* bus, int index) 867 { 868 struct fore200e* fore200e; 869 struct sbus_bus* sbus_bus; 870 struct sbus_dev* sbus_dev = NULL; 871 872 unsigned int count = 0; 873 874 for_each_sbus (sbus_bus) { 875 for_each_sbusdev (sbus_dev, sbus_bus) { 876 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) { 877 if (count >= index) 878 goto found; 879 count++; 880 } 881 } 882 } 883 return NULL; 884 885 found: 886 if (sbus_dev->num_registers != 4) { 887 printk(FORE200E "this %s device has %d instead of 4 registers\n", 888 bus->model_name, sbus_dev->num_registers); 889 return NULL; 890 } 891 892 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL); 893 if (fore200e == NULL) 894 return NULL; 895 896 fore200e->bus = bus; 897 fore200e->bus_dev = sbus_dev; 898 fore200e->irq = sbus_dev->irqs[ 0 ]; 899 900 fore200e->phys_base = (unsigned long)sbus_dev; 901 902 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 903 904 return fore200e; 905 } 906 907 908 static int __init 909 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom) 910 { 911 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev; 912 int len; 913 914 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4); 915 if (len < 0) 916 return -EBUSY; 917 918 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4); 919 if (len < 0) 920 return -EBUSY; 921 922 prom_getproperty(sbus_dev->prom_node, "serialnumber", 923 (char*)&prom->serial_number, sizeof(prom->serial_number)); 924 925 prom_getproperty(sbus_dev->prom_node, "promversion", 926 (char*)&prom->hw_revision, sizeof(prom->hw_revision)); 927 928 return 0; 929 } 930 931 932 static int 933 fore200e_sba_proc_read(struct fore200e* fore200e, char *page) 934 { 935 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 936 937 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name); 938 } 939 #endif /* CONFIG_ATM_FORE200E_SBA */ 940 941 942 static void 943 fore200e_tx_irq(struct fore200e* fore200e) 944 { 945 struct host_txq* txq = &fore200e->host_txq; 946 struct host_txq_entry* entry; 947 struct atm_vcc* vcc; 948 struct fore200e_vc_map* vc_map; 949 950 if (fore200e->host_txq.txing == 0) 951 return; 952 953 for (;;) { 954 955 entry = &txq->host_entry[ txq->tail ]; 956 957 if ((*entry->status & STATUS_COMPLETE) == 0) { 958 break; 959 } 960 961 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 962 entry, txq->tail, entry->vc_map, entry->skb); 963 964 /* free copy of misaligned data */ 965 kfree(entry->data); 966 967 /* remove DMA mapping */ 968 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 969 DMA_TO_DEVICE); 970 971 vc_map = entry->vc_map; 972 973 /* vcc closed since the time the entry was submitted for tx? */ 974 if ((vc_map->vcc == NULL) || 975 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 976 977 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 978 fore200e->atm_dev->number); 979 980 dev_kfree_skb_any(entry->skb); 981 } 982 else { 983 ASSERT(vc_map->vcc); 984 985 /* vcc closed then immediately re-opened? */ 986 if (vc_map->incarn != entry->incarn) { 987 988 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 989 if the same vcc is immediately re-opened, those pending PDUs must 990 not be popped after the completion of their emission, as they refer 991 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 992 would be decremented by the size of the (unrelated) skb, possibly 993 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 994 we thus bind the tx entry to the current incarnation of the vcc 995 when the entry is submitted for tx. When the tx later completes, 996 if the incarnation number of the tx entry does not match the one 997 of the vcc, then this implies that the vcc has been closed then re-opened. 998 we thus just drop the skb here. */ 999 1000 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 1001 fore200e->atm_dev->number); 1002 1003 dev_kfree_skb_any(entry->skb); 1004 } 1005 else { 1006 vcc = vc_map->vcc; 1007 ASSERT(vcc); 1008 1009 /* notify tx completion */ 1010 if (vcc->pop) { 1011 vcc->pop(vcc, entry->skb); 1012 } 1013 else { 1014 dev_kfree_skb_any(entry->skb); 1015 } 1016 #if 1 1017 /* race fixed by the above incarnation mechanism, but... */ 1018 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { 1019 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); 1020 } 1021 #endif 1022 /* check error condition */ 1023 if (*entry->status & STATUS_ERROR) 1024 atomic_inc(&vcc->stats->tx_err); 1025 else 1026 atomic_inc(&vcc->stats->tx); 1027 } 1028 } 1029 1030 *entry->status = STATUS_FREE; 1031 1032 fore200e->host_txq.txing--; 1033 1034 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 1035 } 1036 } 1037 1038 1039 #ifdef FORE200E_BSQ_DEBUG 1040 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 1041 { 1042 struct buffer* buffer; 1043 int count = 0; 1044 1045 buffer = bsq->freebuf; 1046 while (buffer) { 1047 1048 if (buffer->supplied) { 1049 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 1050 where, scheme, magn, buffer->index); 1051 } 1052 1053 if (buffer->magn != magn) { 1054 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 1055 where, scheme, magn, buffer->index, buffer->magn); 1056 } 1057 1058 if (buffer->scheme != scheme) { 1059 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 1060 where, scheme, magn, buffer->index, buffer->scheme); 1061 } 1062 1063 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 1064 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 1065 where, scheme, magn, buffer->index); 1066 } 1067 1068 count++; 1069 buffer = buffer->next; 1070 } 1071 1072 if (count != bsq->freebuf_count) { 1073 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 1074 where, scheme, magn, count, bsq->freebuf_count); 1075 } 1076 return 0; 1077 } 1078 #endif 1079 1080 1081 static void 1082 fore200e_supply(struct fore200e* fore200e) 1083 { 1084 int scheme, magn, i; 1085 1086 struct host_bsq* bsq; 1087 struct host_bsq_entry* entry; 1088 struct buffer* buffer; 1089 1090 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1091 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1092 1093 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1094 1095 #ifdef FORE200E_BSQ_DEBUG 1096 bsq_audit(1, bsq, scheme, magn); 1097 #endif 1098 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 1099 1100 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 1101 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 1102 1103 entry = &bsq->host_entry[ bsq->head ]; 1104 1105 for (i = 0; i < RBD_BLK_SIZE; i++) { 1106 1107 /* take the first buffer in the free buffer list */ 1108 buffer = bsq->freebuf; 1109 if (!buffer) { 1110 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 1111 scheme, magn, bsq->freebuf_count); 1112 return; 1113 } 1114 bsq->freebuf = buffer->next; 1115 1116 #ifdef FORE200E_BSQ_DEBUG 1117 if (buffer->supplied) 1118 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 1119 scheme, magn, buffer->index); 1120 buffer->supplied = 1; 1121 #endif 1122 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 1123 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 1124 } 1125 1126 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 1127 1128 /* decrease accordingly the number of free rx buffers */ 1129 bsq->freebuf_count -= RBD_BLK_SIZE; 1130 1131 *entry->status = STATUS_PENDING; 1132 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 1133 } 1134 } 1135 } 1136 } 1137 1138 1139 static int 1140 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 1141 { 1142 struct sk_buff* skb; 1143 struct buffer* buffer; 1144 struct fore200e_vcc* fore200e_vcc; 1145 int i, pdu_len = 0; 1146 #ifdef FORE200E_52BYTE_AAL0_SDU 1147 u32 cell_header = 0; 1148 #endif 1149 1150 ASSERT(vcc); 1151 1152 fore200e_vcc = FORE200E_VCC(vcc); 1153 ASSERT(fore200e_vcc); 1154 1155 #ifdef FORE200E_52BYTE_AAL0_SDU 1156 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 1157 1158 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 1159 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 1160 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 1161 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 1162 rpd->atm_header.clp; 1163 pdu_len = 4; 1164 } 1165 #endif 1166 1167 /* compute total PDU length */ 1168 for (i = 0; i < rpd->nseg; i++) 1169 pdu_len += rpd->rsd[ i ].length; 1170 1171 skb = alloc_skb(pdu_len, GFP_ATOMIC); 1172 if (skb == NULL) { 1173 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 1174 1175 atomic_inc(&vcc->stats->rx_drop); 1176 return -ENOMEM; 1177 } 1178 1179 __net_timestamp(skb); 1180 1181 #ifdef FORE200E_52BYTE_AAL0_SDU 1182 if (cell_header) { 1183 *((u32*)skb_put(skb, 4)) = cell_header; 1184 } 1185 #endif 1186 1187 /* reassemble segments */ 1188 for (i = 0; i < rpd->nseg; i++) { 1189 1190 /* rebuild rx buffer address from rsd handle */ 1191 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1192 1193 /* Make device DMA transfer visible to CPU. */ 1194 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1195 1196 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length); 1197 1198 /* Now let the device get at it again. */ 1199 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1200 } 1201 1202 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1203 1204 if (pdu_len < fore200e_vcc->rx_min_pdu) 1205 fore200e_vcc->rx_min_pdu = pdu_len; 1206 if (pdu_len > fore200e_vcc->rx_max_pdu) 1207 fore200e_vcc->rx_max_pdu = pdu_len; 1208 fore200e_vcc->rx_pdu++; 1209 1210 /* push PDU */ 1211 if (atm_charge(vcc, skb->truesize) == 0) { 1212 1213 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1214 vcc->itf, vcc->vpi, vcc->vci); 1215 1216 dev_kfree_skb_any(skb); 1217 1218 atomic_inc(&vcc->stats->rx_drop); 1219 return -ENOMEM; 1220 } 1221 1222 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1223 1224 vcc->push(vcc, skb); 1225 atomic_inc(&vcc->stats->rx); 1226 1227 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1228 1229 return 0; 1230 } 1231 1232 1233 static void 1234 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1235 { 1236 struct host_bsq* bsq; 1237 struct buffer* buffer; 1238 int i; 1239 1240 for (i = 0; i < rpd->nseg; i++) { 1241 1242 /* rebuild rx buffer address from rsd handle */ 1243 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1244 1245 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1246 1247 #ifdef FORE200E_BSQ_DEBUG 1248 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1249 1250 if (buffer->supplied == 0) 1251 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1252 buffer->scheme, buffer->magn, buffer->index); 1253 buffer->supplied = 0; 1254 #endif 1255 1256 /* re-insert the buffer into the free buffer list */ 1257 buffer->next = bsq->freebuf; 1258 bsq->freebuf = buffer; 1259 1260 /* then increment the number of free rx buffers */ 1261 bsq->freebuf_count++; 1262 } 1263 } 1264 1265 1266 static void 1267 fore200e_rx_irq(struct fore200e* fore200e) 1268 { 1269 struct host_rxq* rxq = &fore200e->host_rxq; 1270 struct host_rxq_entry* entry; 1271 struct atm_vcc* vcc; 1272 struct fore200e_vc_map* vc_map; 1273 1274 for (;;) { 1275 1276 entry = &rxq->host_entry[ rxq->head ]; 1277 1278 /* no more received PDUs */ 1279 if ((*entry->status & STATUS_COMPLETE) == 0) 1280 break; 1281 1282 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1283 1284 if ((vc_map->vcc == NULL) || 1285 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1286 1287 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1288 fore200e->atm_dev->number, 1289 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1290 } 1291 else { 1292 vcc = vc_map->vcc; 1293 ASSERT(vcc); 1294 1295 if ((*entry->status & STATUS_ERROR) == 0) { 1296 1297 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1298 } 1299 else { 1300 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1301 fore200e->atm_dev->number, 1302 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1303 atomic_inc(&vcc->stats->rx_err); 1304 } 1305 } 1306 1307 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1308 1309 fore200e_collect_rpd(fore200e, entry->rpd); 1310 1311 /* rewrite the rpd address to ack the received PDU */ 1312 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1313 *entry->status = STATUS_FREE; 1314 1315 fore200e_supply(fore200e); 1316 } 1317 } 1318 1319 1320 #ifndef FORE200E_USE_TASKLET 1321 static void 1322 fore200e_irq(struct fore200e* fore200e) 1323 { 1324 unsigned long flags; 1325 1326 spin_lock_irqsave(&fore200e->q_lock, flags); 1327 fore200e_rx_irq(fore200e); 1328 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1329 1330 spin_lock_irqsave(&fore200e->q_lock, flags); 1331 fore200e_tx_irq(fore200e); 1332 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1333 } 1334 #endif 1335 1336 1337 static irqreturn_t 1338 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs) 1339 { 1340 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1341 1342 if (fore200e->bus->irq_check(fore200e) == 0) { 1343 1344 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1345 return IRQ_NONE; 1346 } 1347 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1348 1349 #ifdef FORE200E_USE_TASKLET 1350 tasklet_schedule(&fore200e->tx_tasklet); 1351 tasklet_schedule(&fore200e->rx_tasklet); 1352 #else 1353 fore200e_irq(fore200e); 1354 #endif 1355 1356 fore200e->bus->irq_ack(fore200e); 1357 return IRQ_HANDLED; 1358 } 1359 1360 1361 #ifdef FORE200E_USE_TASKLET 1362 static void 1363 fore200e_tx_tasklet(unsigned long data) 1364 { 1365 struct fore200e* fore200e = (struct fore200e*) data; 1366 unsigned long flags; 1367 1368 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1369 1370 spin_lock_irqsave(&fore200e->q_lock, flags); 1371 fore200e_tx_irq(fore200e); 1372 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1373 } 1374 1375 1376 static void 1377 fore200e_rx_tasklet(unsigned long data) 1378 { 1379 struct fore200e* fore200e = (struct fore200e*) data; 1380 unsigned long flags; 1381 1382 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1383 1384 spin_lock_irqsave(&fore200e->q_lock, flags); 1385 fore200e_rx_irq((struct fore200e*) data); 1386 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1387 } 1388 #endif 1389 1390 1391 static int 1392 fore200e_select_scheme(struct atm_vcc* vcc) 1393 { 1394 /* fairly balance the VCs over (identical) buffer schemes */ 1395 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1396 1397 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1398 vcc->itf, vcc->vpi, vcc->vci, scheme); 1399 1400 return scheme; 1401 } 1402 1403 1404 static int 1405 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1406 { 1407 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1408 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1409 struct activate_opcode activ_opcode; 1410 struct deactivate_opcode deactiv_opcode; 1411 struct vpvc vpvc; 1412 int ok; 1413 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1414 1415 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1416 1417 if (activate) { 1418 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1419 1420 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1421 activ_opcode.aal = aal; 1422 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1423 activ_opcode.pad = 0; 1424 } 1425 else { 1426 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1427 deactiv_opcode.pad = 0; 1428 } 1429 1430 vpvc.vci = vcc->vci; 1431 vpvc.vpi = vcc->vpi; 1432 1433 *entry->status = STATUS_PENDING; 1434 1435 if (activate) { 1436 1437 #ifdef FORE200E_52BYTE_AAL0_SDU 1438 mtu = 48; 1439 #endif 1440 /* the MTU is not used by the cp, except in the case of AAL0 */ 1441 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1442 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1443 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1444 } 1445 else { 1446 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1447 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1448 } 1449 1450 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1451 1452 *entry->status = STATUS_FREE; 1453 1454 if (ok == 0) { 1455 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1456 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1457 return -EIO; 1458 } 1459 1460 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1461 activate ? "open" : "clos"); 1462 1463 return 0; 1464 } 1465 1466 1467 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ 1468 1469 static void 1470 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1471 { 1472 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1473 1474 /* compute the data cells to idle cells ratio from the tx PCR */ 1475 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1476 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1477 } 1478 else { 1479 /* disable rate control */ 1480 rate->data_cells = rate->idle_cells = 0; 1481 } 1482 } 1483 1484 1485 static int 1486 fore200e_open(struct atm_vcc *vcc) 1487 { 1488 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1489 struct fore200e_vcc* fore200e_vcc; 1490 struct fore200e_vc_map* vc_map; 1491 unsigned long flags; 1492 int vci = vcc->vci; 1493 short vpi = vcc->vpi; 1494 1495 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1496 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1497 1498 spin_lock_irqsave(&fore200e->q_lock, flags); 1499 1500 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1501 if (vc_map->vcc) { 1502 1503 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1504 1505 printk(FORE200E "VC %d.%d.%d already in use\n", 1506 fore200e->atm_dev->number, vpi, vci); 1507 1508 return -EINVAL; 1509 } 1510 1511 vc_map->vcc = vcc; 1512 1513 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1514 1515 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1516 if (fore200e_vcc == NULL) { 1517 vc_map->vcc = NULL; 1518 return -ENOMEM; 1519 } 1520 1521 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1522 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1523 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1524 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1525 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1526 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1527 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1528 1529 /* pseudo-CBR bandwidth requested? */ 1530 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1531 1532 down(&fore200e->rate_sf); 1533 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1534 up(&fore200e->rate_sf); 1535 1536 fore200e_kfree(fore200e_vcc); 1537 vc_map->vcc = NULL; 1538 return -EAGAIN; 1539 } 1540 1541 /* reserve bandwidth */ 1542 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1543 up(&fore200e->rate_sf); 1544 } 1545 1546 vcc->itf = vcc->dev->number; 1547 1548 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1549 set_bit(ATM_VF_ADDR, &vcc->flags); 1550 1551 vcc->dev_data = fore200e_vcc; 1552 1553 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1554 1555 vc_map->vcc = NULL; 1556 1557 clear_bit(ATM_VF_ADDR, &vcc->flags); 1558 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1559 1560 vcc->dev_data = NULL; 1561 1562 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1563 1564 fore200e_kfree(fore200e_vcc); 1565 return -EINVAL; 1566 } 1567 1568 /* compute rate control parameters */ 1569 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1570 1571 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1572 set_bit(ATM_VF_HASQOS, &vcc->flags); 1573 1574 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1575 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1576 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1577 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1578 } 1579 1580 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1581 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1582 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1583 1584 /* new incarnation of the vcc */ 1585 vc_map->incarn = ++fore200e->incarn_count; 1586 1587 /* VC unusable before this flag is set */ 1588 set_bit(ATM_VF_READY, &vcc->flags); 1589 1590 return 0; 1591 } 1592 1593 1594 static void 1595 fore200e_close(struct atm_vcc* vcc) 1596 { 1597 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1598 struct fore200e_vcc* fore200e_vcc; 1599 struct fore200e_vc_map* vc_map; 1600 unsigned long flags; 1601 1602 ASSERT(vcc); 1603 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1604 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1605 1606 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1607 1608 clear_bit(ATM_VF_READY, &vcc->flags); 1609 1610 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1611 1612 spin_lock_irqsave(&fore200e->q_lock, flags); 1613 1614 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1615 1616 /* the vc is no longer considered as "in use" by fore200e_open() */ 1617 vc_map->vcc = NULL; 1618 1619 vcc->itf = vcc->vci = vcc->vpi = 0; 1620 1621 fore200e_vcc = FORE200E_VCC(vcc); 1622 vcc->dev_data = NULL; 1623 1624 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1625 1626 /* release reserved bandwidth, if any */ 1627 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1628 1629 down(&fore200e->rate_sf); 1630 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1631 up(&fore200e->rate_sf); 1632 1633 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1634 } 1635 1636 clear_bit(ATM_VF_ADDR, &vcc->flags); 1637 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1638 1639 ASSERT(fore200e_vcc); 1640 fore200e_kfree(fore200e_vcc); 1641 } 1642 1643 1644 static int 1645 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1646 { 1647 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1648 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1649 struct fore200e_vc_map* vc_map; 1650 struct host_txq* txq = &fore200e->host_txq; 1651 struct host_txq_entry* entry; 1652 struct tpd* tpd; 1653 struct tpd_haddr tpd_haddr; 1654 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1655 int tx_copy = 0; 1656 int tx_len = skb->len; 1657 u32* cell_header = NULL; 1658 unsigned char* skb_data; 1659 int skb_len; 1660 unsigned char* data; 1661 unsigned long flags; 1662 1663 ASSERT(vcc); 1664 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1665 ASSERT(fore200e); 1666 ASSERT(fore200e_vcc); 1667 1668 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1669 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1670 dev_kfree_skb_any(skb); 1671 return -EINVAL; 1672 } 1673 1674 #ifdef FORE200E_52BYTE_AAL0_SDU 1675 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1676 cell_header = (u32*) skb->data; 1677 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1678 skb_len = tx_len = skb->len - 4; 1679 1680 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1681 } 1682 else 1683 #endif 1684 { 1685 skb_data = skb->data; 1686 skb_len = skb->len; 1687 } 1688 1689 if (((unsigned long)skb_data) & 0x3) { 1690 1691 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1692 tx_copy = 1; 1693 tx_len = skb_len; 1694 } 1695 1696 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1697 1698 /* this simply NUKES the PCA board */ 1699 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1700 tx_copy = 1; 1701 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1702 } 1703 1704 if (tx_copy) { 1705 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA); 1706 if (data == NULL) { 1707 if (vcc->pop) { 1708 vcc->pop(vcc, skb); 1709 } 1710 else { 1711 dev_kfree_skb_any(skb); 1712 } 1713 return -ENOMEM; 1714 } 1715 1716 memcpy(data, skb_data, skb_len); 1717 if (skb_len < tx_len) 1718 memset(data + skb_len, 0x00, tx_len - skb_len); 1719 } 1720 else { 1721 data = skb_data; 1722 } 1723 1724 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1725 ASSERT(vc_map->vcc == vcc); 1726 1727 retry_here: 1728 1729 spin_lock_irqsave(&fore200e->q_lock, flags); 1730 1731 entry = &txq->host_entry[ txq->head ]; 1732 1733 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1734 1735 /* try to free completed tx queue entries */ 1736 fore200e_tx_irq(fore200e); 1737 1738 if (*entry->status != STATUS_FREE) { 1739 1740 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1741 1742 /* retry once again? */ 1743 if (--retry > 0) { 1744 udelay(50); 1745 goto retry_here; 1746 } 1747 1748 atomic_inc(&vcc->stats->tx_err); 1749 1750 fore200e->tx_sat++; 1751 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1752 fore200e->name, fore200e->cp_queues->heartbeat); 1753 if (vcc->pop) { 1754 vcc->pop(vcc, skb); 1755 } 1756 else { 1757 dev_kfree_skb_any(skb); 1758 } 1759 1760 if (tx_copy) 1761 kfree(data); 1762 1763 return -ENOBUFS; 1764 } 1765 } 1766 1767 entry->incarn = vc_map->incarn; 1768 entry->vc_map = vc_map; 1769 entry->skb = skb; 1770 entry->data = tx_copy ? data : NULL; 1771 1772 tpd = entry->tpd; 1773 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE); 1774 tpd->tsd[ 0 ].length = tx_len; 1775 1776 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1777 txq->txing++; 1778 1779 /* The dma_map call above implies a dma_sync so the device can use it, 1780 * thus no explicit dma_sync call is necessary here. 1781 */ 1782 1783 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1784 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1785 tpd->tsd[0].length, skb_len); 1786 1787 if (skb_len < fore200e_vcc->tx_min_pdu) 1788 fore200e_vcc->tx_min_pdu = skb_len; 1789 if (skb_len > fore200e_vcc->tx_max_pdu) 1790 fore200e_vcc->tx_max_pdu = skb_len; 1791 fore200e_vcc->tx_pdu++; 1792 1793 /* set tx rate control information */ 1794 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1795 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1796 1797 if (cell_header) { 1798 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1799 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1800 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1801 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1802 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1803 } 1804 else { 1805 /* set the ATM header, common to all cells conveying the PDU */ 1806 tpd->atm_header.clp = 0; 1807 tpd->atm_header.plt = 0; 1808 tpd->atm_header.vci = vcc->vci; 1809 tpd->atm_header.vpi = vcc->vpi; 1810 tpd->atm_header.gfc = 0; 1811 } 1812 1813 tpd->spec.length = tx_len; 1814 tpd->spec.nseg = 1; 1815 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1816 tpd->spec.intr = 1; 1817 1818 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1819 tpd_haddr.pad = 0; 1820 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1821 1822 *entry->status = STATUS_PENDING; 1823 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1824 1825 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1826 1827 return 0; 1828 } 1829 1830 1831 static int 1832 fore200e_getstats(struct fore200e* fore200e) 1833 { 1834 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1835 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1836 struct stats_opcode opcode; 1837 int ok; 1838 u32 stats_dma_addr; 1839 1840 if (fore200e->stats == NULL) { 1841 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA); 1842 if (fore200e->stats == NULL) 1843 return -ENOMEM; 1844 } 1845 1846 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats, 1847 sizeof(struct stats), DMA_FROM_DEVICE); 1848 1849 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1850 1851 opcode.opcode = OPCODE_GET_STATS; 1852 opcode.pad = 0; 1853 1854 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1855 1856 *entry->status = STATUS_PENDING; 1857 1858 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1859 1860 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1861 1862 *entry->status = STATUS_FREE; 1863 1864 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1865 1866 if (ok == 0) { 1867 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1868 return -EIO; 1869 } 1870 1871 return 0; 1872 } 1873 1874 1875 static int 1876 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1877 { 1878 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1879 1880 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1881 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1882 1883 return -EINVAL; 1884 } 1885 1886 1887 static int 1888 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1889 { 1890 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1891 1892 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1893 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1894 1895 return -EINVAL; 1896 } 1897 1898 1899 #if 0 /* currently unused */ 1900 static int 1901 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) 1902 { 1903 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1904 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1905 struct oc3_opcode opcode; 1906 int ok; 1907 u32 oc3_regs_dma_addr; 1908 1909 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1910 1911 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1912 1913 opcode.opcode = OPCODE_GET_OC3; 1914 opcode.reg = 0; 1915 opcode.value = 0; 1916 opcode.mask = 0; 1917 1918 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1919 1920 *entry->status = STATUS_PENDING; 1921 1922 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); 1923 1924 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1925 1926 *entry->status = STATUS_FREE; 1927 1928 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1929 1930 if (ok == 0) { 1931 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); 1932 return -EIO; 1933 } 1934 1935 return 0; 1936 } 1937 #endif 1938 1939 1940 static int 1941 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1942 { 1943 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1944 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1945 struct oc3_opcode opcode; 1946 int ok; 1947 1948 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1949 1950 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1951 1952 opcode.opcode = OPCODE_SET_OC3; 1953 opcode.reg = reg; 1954 opcode.value = value; 1955 opcode.mask = mask; 1956 1957 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1958 1959 *entry->status = STATUS_PENDING; 1960 1961 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1962 1963 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1964 1965 *entry->status = STATUS_FREE; 1966 1967 if (ok == 0) { 1968 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1969 return -EIO; 1970 } 1971 1972 return 0; 1973 } 1974 1975 1976 static int 1977 fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1978 { 1979 u32 mct_value, mct_mask; 1980 int error; 1981 1982 if (!capable(CAP_NET_ADMIN)) 1983 return -EPERM; 1984 1985 switch (loop_mode) { 1986 1987 case ATM_LM_NONE: 1988 mct_value = 0; 1989 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1990 break; 1991 1992 case ATM_LM_LOC_PHY: 1993 mct_value = mct_mask = SUNI_MCT_DLE; 1994 break; 1995 1996 case ATM_LM_RMT_PHY: 1997 mct_value = mct_mask = SUNI_MCT_LLE; 1998 break; 1999 2000 default: 2001 return -EINVAL; 2002 } 2003 2004 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 2005 if (error == 0) 2006 fore200e->loop_mode = loop_mode; 2007 2008 return error; 2009 } 2010 2011 2012 static inline unsigned int 2013 fore200e_swap(unsigned int in) 2014 { 2015 #if defined(__LITTLE_ENDIAN) 2016 return swab32(in); 2017 #else 2018 return in; 2019 #endif 2020 } 2021 2022 2023 static int 2024 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 2025 { 2026 struct sonet_stats tmp; 2027 2028 if (fore200e_getstats(fore200e) < 0) 2029 return -EIO; 2030 2031 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors); 2032 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors); 2033 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors); 2034 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors); 2035 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors); 2036 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors); 2037 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors); 2038 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) + 2039 fore200e_swap(fore200e->stats->aal34.cells_transmitted) + 2040 fore200e_swap(fore200e->stats->aal5.cells_transmitted); 2041 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) + 2042 fore200e_swap(fore200e->stats->aal34.cells_received) + 2043 fore200e_swap(fore200e->stats->aal5.cells_received); 2044 2045 if (arg) 2046 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 2047 2048 return 0; 2049 } 2050 2051 2052 static int 2053 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 2054 { 2055 struct fore200e* fore200e = FORE200E_DEV(dev); 2056 2057 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 2058 2059 switch (cmd) { 2060 2061 case SONET_GETSTAT: 2062 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 2063 2064 case SONET_GETDIAG: 2065 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 2066 2067 case ATM_SETLOOP: 2068 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 2069 2070 case ATM_GETLOOP: 2071 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 2072 2073 case ATM_QUERYLOOP: 2074 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 2075 } 2076 2077 return -ENOSYS; /* not implemented */ 2078 } 2079 2080 2081 static int 2082 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 2083 { 2084 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 2085 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 2086 2087 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 2088 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 2089 return -EINVAL; 2090 } 2091 2092 DPRINTK(2, "change_qos %d.%d.%d, " 2093 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 2094 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 2095 "available_cell_rate = %u", 2096 vcc->itf, vcc->vpi, vcc->vci, 2097 fore200e_traffic_class[ qos->txtp.traffic_class ], 2098 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 2099 fore200e_traffic_class[ qos->rxtp.traffic_class ], 2100 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 2101 flags, fore200e->available_cell_rate); 2102 2103 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 2104 2105 down(&fore200e->rate_sf); 2106 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 2107 up(&fore200e->rate_sf); 2108 return -EAGAIN; 2109 } 2110 2111 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 2112 fore200e->available_cell_rate -= qos->txtp.max_pcr; 2113 2114 up(&fore200e->rate_sf); 2115 2116 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 2117 2118 /* update rate control parameters */ 2119 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 2120 2121 set_bit(ATM_VF_HASQOS, &vcc->flags); 2122 2123 return 0; 2124 } 2125 2126 return -EINVAL; 2127 } 2128 2129 2130 static int __init 2131 fore200e_irq_request(struct fore200e* fore200e) 2132 { 2133 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) { 2134 2135 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 2136 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2137 return -EBUSY; 2138 } 2139 2140 printk(FORE200E "IRQ %s reserved for device %s\n", 2141 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2142 2143 #ifdef FORE200E_USE_TASKLET 2144 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 2145 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 2146 #endif 2147 2148 fore200e->state = FORE200E_STATE_IRQ; 2149 return 0; 2150 } 2151 2152 2153 static int __init 2154 fore200e_get_esi(struct fore200e* fore200e) 2155 { 2156 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA); 2157 int ok, i; 2158 2159 if (!prom) 2160 return -ENOMEM; 2161 2162 ok = fore200e->bus->prom_read(fore200e, prom); 2163 if (ok < 0) { 2164 fore200e_kfree(prom); 2165 return -EBUSY; 2166 } 2167 2168 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n", 2169 fore200e->name, 2170 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 2171 prom->serial_number & 0xFFFF, 2172 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ], 2173 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]); 2174 2175 for (i = 0; i < ESI_LEN; i++) { 2176 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 2177 } 2178 2179 fore200e_kfree(prom); 2180 2181 return 0; 2182 } 2183 2184 2185 static int __init 2186 fore200e_alloc_rx_buf(struct fore200e* fore200e) 2187 { 2188 int scheme, magn, nbr, size, i; 2189 2190 struct host_bsq* bsq; 2191 struct buffer* buffer; 2192 2193 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2194 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2195 2196 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2197 2198 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 2199 size = fore200e_rx_buf_size[ scheme ][ magn ]; 2200 2201 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 2202 2203 /* allocate the array of receive buffers */ 2204 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL); 2205 2206 if (buffer == NULL) 2207 return -ENOMEM; 2208 2209 bsq->freebuf = NULL; 2210 2211 for (i = 0; i < nbr; i++) { 2212 2213 buffer[ i ].scheme = scheme; 2214 buffer[ i ].magn = magn; 2215 #ifdef FORE200E_BSQ_DEBUG 2216 buffer[ i ].index = i; 2217 buffer[ i ].supplied = 0; 2218 #endif 2219 2220 /* allocate the receive buffer body */ 2221 if (fore200e_chunk_alloc(fore200e, 2222 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2223 DMA_FROM_DEVICE) < 0) { 2224 2225 while (i > 0) 2226 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2227 fore200e_kfree(buffer); 2228 2229 return -ENOMEM; 2230 } 2231 2232 /* insert the buffer into the free buffer list */ 2233 buffer[ i ].next = bsq->freebuf; 2234 bsq->freebuf = &buffer[ i ]; 2235 } 2236 /* all the buffers are free, initially */ 2237 bsq->freebuf_count = nbr; 2238 2239 #ifdef FORE200E_BSQ_DEBUG 2240 bsq_audit(3, bsq, scheme, magn); 2241 #endif 2242 } 2243 } 2244 2245 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2246 return 0; 2247 } 2248 2249 2250 static int __init 2251 fore200e_init_bs_queue(struct fore200e* fore200e) 2252 { 2253 int scheme, magn, i; 2254 2255 struct host_bsq* bsq; 2256 struct cp_bsq_entry __iomem * cp_entry; 2257 2258 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2259 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2260 2261 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2262 2263 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2264 2265 /* allocate and align the array of status words */ 2266 if (fore200e->bus->dma_chunk_alloc(fore200e, 2267 &bsq->status, 2268 sizeof(enum status), 2269 QUEUE_SIZE_BS, 2270 fore200e->bus->status_alignment) < 0) { 2271 return -ENOMEM; 2272 } 2273 2274 /* allocate and align the array of receive buffer descriptors */ 2275 if (fore200e->bus->dma_chunk_alloc(fore200e, 2276 &bsq->rbd_block, 2277 sizeof(struct rbd_block), 2278 QUEUE_SIZE_BS, 2279 fore200e->bus->descr_alignment) < 0) { 2280 2281 fore200e->bus->dma_chunk_free(fore200e, &bsq->status); 2282 return -ENOMEM; 2283 } 2284 2285 /* get the base address of the cp resident buffer supply queue entries */ 2286 cp_entry = fore200e->virt_base + 2287 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2288 2289 /* fill the host resident and cp resident buffer supply queue entries */ 2290 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2291 2292 bsq->host_entry[ i ].status = 2293 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2294 bsq->host_entry[ i ].rbd_block = 2295 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2296 bsq->host_entry[ i ].rbd_block_dma = 2297 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2298 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2299 2300 *bsq->host_entry[ i ].status = STATUS_FREE; 2301 2302 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2303 &cp_entry[ i ].status_haddr); 2304 } 2305 } 2306 } 2307 2308 fore200e->state = FORE200E_STATE_INIT_BSQ; 2309 return 0; 2310 } 2311 2312 2313 static int __init 2314 fore200e_init_rx_queue(struct fore200e* fore200e) 2315 { 2316 struct host_rxq* rxq = &fore200e->host_rxq; 2317 struct cp_rxq_entry __iomem * cp_entry; 2318 int i; 2319 2320 DPRINTK(2, "receive queue is being initialized\n"); 2321 2322 /* allocate and align the array of status words */ 2323 if (fore200e->bus->dma_chunk_alloc(fore200e, 2324 &rxq->status, 2325 sizeof(enum status), 2326 QUEUE_SIZE_RX, 2327 fore200e->bus->status_alignment) < 0) { 2328 return -ENOMEM; 2329 } 2330 2331 /* allocate and align the array of receive PDU descriptors */ 2332 if (fore200e->bus->dma_chunk_alloc(fore200e, 2333 &rxq->rpd, 2334 sizeof(struct rpd), 2335 QUEUE_SIZE_RX, 2336 fore200e->bus->descr_alignment) < 0) { 2337 2338 fore200e->bus->dma_chunk_free(fore200e, &rxq->status); 2339 return -ENOMEM; 2340 } 2341 2342 /* get the base address of the cp resident rx queue entries */ 2343 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2344 2345 /* fill the host resident and cp resident rx entries */ 2346 for (i=0; i < QUEUE_SIZE_RX; i++) { 2347 2348 rxq->host_entry[ i ].status = 2349 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2350 rxq->host_entry[ i ].rpd = 2351 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2352 rxq->host_entry[ i ].rpd_dma = 2353 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2354 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2355 2356 *rxq->host_entry[ i ].status = STATUS_FREE; 2357 2358 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2359 &cp_entry[ i ].status_haddr); 2360 2361 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2362 &cp_entry[ i ].rpd_haddr); 2363 } 2364 2365 /* set the head entry of the queue */ 2366 rxq->head = 0; 2367 2368 fore200e->state = FORE200E_STATE_INIT_RXQ; 2369 return 0; 2370 } 2371 2372 2373 static int __init 2374 fore200e_init_tx_queue(struct fore200e* fore200e) 2375 { 2376 struct host_txq* txq = &fore200e->host_txq; 2377 struct cp_txq_entry __iomem * cp_entry; 2378 int i; 2379 2380 DPRINTK(2, "transmit queue is being initialized\n"); 2381 2382 /* allocate and align the array of status words */ 2383 if (fore200e->bus->dma_chunk_alloc(fore200e, 2384 &txq->status, 2385 sizeof(enum status), 2386 QUEUE_SIZE_TX, 2387 fore200e->bus->status_alignment) < 0) { 2388 return -ENOMEM; 2389 } 2390 2391 /* allocate and align the array of transmit PDU descriptors */ 2392 if (fore200e->bus->dma_chunk_alloc(fore200e, 2393 &txq->tpd, 2394 sizeof(struct tpd), 2395 QUEUE_SIZE_TX, 2396 fore200e->bus->descr_alignment) < 0) { 2397 2398 fore200e->bus->dma_chunk_free(fore200e, &txq->status); 2399 return -ENOMEM; 2400 } 2401 2402 /* get the base address of the cp resident tx queue entries */ 2403 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2404 2405 /* fill the host resident and cp resident tx entries */ 2406 for (i=0; i < QUEUE_SIZE_TX; i++) { 2407 2408 txq->host_entry[ i ].status = 2409 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2410 txq->host_entry[ i ].tpd = 2411 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2412 txq->host_entry[ i ].tpd_dma = 2413 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2414 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2415 2416 *txq->host_entry[ i ].status = STATUS_FREE; 2417 2418 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2419 &cp_entry[ i ].status_haddr); 2420 2421 /* although there is a one-to-one mapping of tx queue entries and tpds, 2422 we do not write here the DMA (physical) base address of each tpd into 2423 the related cp resident entry, because the cp relies on this write 2424 operation to detect that a new pdu has been submitted for tx */ 2425 } 2426 2427 /* set the head and tail entries of the queue */ 2428 txq->head = 0; 2429 txq->tail = 0; 2430 2431 fore200e->state = FORE200E_STATE_INIT_TXQ; 2432 return 0; 2433 } 2434 2435 2436 static int __init 2437 fore200e_init_cmd_queue(struct fore200e* fore200e) 2438 { 2439 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2440 struct cp_cmdq_entry __iomem * cp_entry; 2441 int i; 2442 2443 DPRINTK(2, "command queue is being initialized\n"); 2444 2445 /* allocate and align the array of status words */ 2446 if (fore200e->bus->dma_chunk_alloc(fore200e, 2447 &cmdq->status, 2448 sizeof(enum status), 2449 QUEUE_SIZE_CMD, 2450 fore200e->bus->status_alignment) < 0) { 2451 return -ENOMEM; 2452 } 2453 2454 /* get the base address of the cp resident cmd queue entries */ 2455 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2456 2457 /* fill the host resident and cp resident cmd entries */ 2458 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2459 2460 cmdq->host_entry[ i ].status = 2461 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2462 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2463 2464 *cmdq->host_entry[ i ].status = STATUS_FREE; 2465 2466 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2467 &cp_entry[ i ].status_haddr); 2468 } 2469 2470 /* set the head entry of the queue */ 2471 cmdq->head = 0; 2472 2473 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2474 return 0; 2475 } 2476 2477 2478 static void __init 2479 fore200e_param_bs_queue(struct fore200e* fore200e, 2480 enum buffer_scheme scheme, enum buffer_magn magn, 2481 int queue_length, int pool_size, int supply_blksize) 2482 { 2483 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2484 2485 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2486 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2487 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2488 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2489 } 2490 2491 2492 static int __init 2493 fore200e_initialize(struct fore200e* fore200e) 2494 { 2495 struct cp_queues __iomem * cpq; 2496 int ok, scheme, magn; 2497 2498 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2499 2500 init_MUTEX(&fore200e->rate_sf); 2501 spin_lock_init(&fore200e->q_lock); 2502 2503 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2504 2505 /* enable cp to host interrupts */ 2506 fore200e->bus->write(1, &cpq->imask); 2507 2508 if (fore200e->bus->irq_enable) 2509 fore200e->bus->irq_enable(fore200e); 2510 2511 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2512 2513 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2514 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2515 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2516 2517 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2518 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2519 2520 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2521 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2522 fore200e_param_bs_queue(fore200e, scheme, magn, 2523 QUEUE_SIZE_BS, 2524 fore200e_rx_buf_nbr[ scheme ][ magn ], 2525 RBD_BLK_SIZE); 2526 2527 /* issue the initialize command */ 2528 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2529 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2530 2531 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2532 if (ok == 0) { 2533 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2534 return -ENODEV; 2535 } 2536 2537 printk(FORE200E "device %s initialized\n", fore200e->name); 2538 2539 fore200e->state = FORE200E_STATE_INITIALIZE; 2540 return 0; 2541 } 2542 2543 2544 static void __init 2545 fore200e_monitor_putc(struct fore200e* fore200e, char c) 2546 { 2547 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2548 2549 #if 0 2550 printk("%c", c); 2551 #endif 2552 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2553 } 2554 2555 2556 static int __init 2557 fore200e_monitor_getc(struct fore200e* fore200e) 2558 { 2559 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2560 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2561 int c; 2562 2563 while (time_before(jiffies, timeout)) { 2564 2565 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2566 2567 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2568 2569 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2570 #if 0 2571 printk("%c", c & 0xFF); 2572 #endif 2573 return c & 0xFF; 2574 } 2575 } 2576 2577 return -1; 2578 } 2579 2580 2581 static void __init 2582 fore200e_monitor_puts(struct fore200e* fore200e, char* str) 2583 { 2584 while (*str) { 2585 2586 /* the i960 monitor doesn't accept any new character if it has something to say */ 2587 while (fore200e_monitor_getc(fore200e) >= 0); 2588 2589 fore200e_monitor_putc(fore200e, *str++); 2590 } 2591 2592 while (fore200e_monitor_getc(fore200e) >= 0); 2593 } 2594 2595 2596 static int __init 2597 fore200e_start_fw(struct fore200e* fore200e) 2598 { 2599 int ok; 2600 char cmd[ 48 ]; 2601 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data; 2602 2603 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2604 2605 #if defined(__sparc_v9__) 2606 /* reported to be required by SBA cards on some sparc64 hosts */ 2607 fore200e_spin(100); 2608 #endif 2609 2610 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2611 2612 fore200e_monitor_puts(fore200e, cmd); 2613 2614 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000); 2615 if (ok == 0) { 2616 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2617 return -ENODEV; 2618 } 2619 2620 printk(FORE200E "device %s firmware started\n", fore200e->name); 2621 2622 fore200e->state = FORE200E_STATE_START_FW; 2623 return 0; 2624 } 2625 2626 2627 static int __init 2628 fore200e_load_fw(struct fore200e* fore200e) 2629 { 2630 u32* fw_data = (u32*) fore200e->bus->fw_data; 2631 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32); 2632 2633 struct fw_header* fw_header = (struct fw_header*) fw_data; 2634 2635 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2636 2637 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2638 fore200e->name, load_addr, fw_size); 2639 2640 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2641 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2642 return -ENODEV; 2643 } 2644 2645 for (; fw_size--; fw_data++, load_addr++) 2646 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2647 2648 fore200e->state = FORE200E_STATE_LOAD_FW; 2649 return 0; 2650 } 2651 2652 2653 static int __init 2654 fore200e_register(struct fore200e* fore200e) 2655 { 2656 struct atm_dev* atm_dev; 2657 2658 DPRINTK(2, "device %s being registered\n", fore200e->name); 2659 2660 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1, 2661 NULL); 2662 if (atm_dev == NULL) { 2663 printk(FORE200E "unable to register device %s\n", fore200e->name); 2664 return -ENODEV; 2665 } 2666 2667 atm_dev->dev_data = fore200e; 2668 fore200e->atm_dev = atm_dev; 2669 2670 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2671 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2672 2673 fore200e->available_cell_rate = ATM_OC3_PCR; 2674 2675 fore200e->state = FORE200E_STATE_REGISTER; 2676 return 0; 2677 } 2678 2679 2680 static int __init 2681 fore200e_init(struct fore200e* fore200e) 2682 { 2683 if (fore200e_register(fore200e) < 0) 2684 return -ENODEV; 2685 2686 if (fore200e->bus->configure(fore200e) < 0) 2687 return -ENODEV; 2688 2689 if (fore200e->bus->map(fore200e) < 0) 2690 return -ENODEV; 2691 2692 if (fore200e_reset(fore200e, 1) < 0) 2693 return -ENODEV; 2694 2695 if (fore200e_load_fw(fore200e) < 0) 2696 return -ENODEV; 2697 2698 if (fore200e_start_fw(fore200e) < 0) 2699 return -ENODEV; 2700 2701 if (fore200e_initialize(fore200e) < 0) 2702 return -ENODEV; 2703 2704 if (fore200e_init_cmd_queue(fore200e) < 0) 2705 return -ENOMEM; 2706 2707 if (fore200e_init_tx_queue(fore200e) < 0) 2708 return -ENOMEM; 2709 2710 if (fore200e_init_rx_queue(fore200e) < 0) 2711 return -ENOMEM; 2712 2713 if (fore200e_init_bs_queue(fore200e) < 0) 2714 return -ENOMEM; 2715 2716 if (fore200e_alloc_rx_buf(fore200e) < 0) 2717 return -ENOMEM; 2718 2719 if (fore200e_get_esi(fore200e) < 0) 2720 return -EIO; 2721 2722 if (fore200e_irq_request(fore200e) < 0) 2723 return -EBUSY; 2724 2725 fore200e_supply(fore200e); 2726 2727 /* all done, board initialization is now complete */ 2728 fore200e->state = FORE200E_STATE_COMPLETE; 2729 return 0; 2730 } 2731 2732 2733 static int __devinit 2734 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) 2735 { 2736 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data; 2737 struct fore200e* fore200e; 2738 int err = 0; 2739 static int index = 0; 2740 2741 if (pci_enable_device(pci_dev)) { 2742 err = -EINVAL; 2743 goto out; 2744 } 2745 2746 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL); 2747 if (fore200e == NULL) { 2748 err = -ENOMEM; 2749 goto out_disable; 2750 } 2751 2752 fore200e->bus = bus; 2753 fore200e->bus_dev = pci_dev; 2754 fore200e->irq = pci_dev->irq; 2755 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2756 2757 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 2758 2759 pci_set_master(pci_dev); 2760 2761 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2762 fore200e->bus->model_name, 2763 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2764 2765 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2766 2767 err = fore200e_init(fore200e); 2768 if (err < 0) { 2769 fore200e_shutdown(fore200e); 2770 goto out_free; 2771 } 2772 2773 ++index; 2774 pci_set_drvdata(pci_dev, fore200e); 2775 2776 out: 2777 return err; 2778 2779 out_free: 2780 kfree(fore200e); 2781 out_disable: 2782 pci_disable_device(pci_dev); 2783 goto out; 2784 } 2785 2786 2787 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev) 2788 { 2789 struct fore200e *fore200e; 2790 2791 fore200e = pci_get_drvdata(pci_dev); 2792 2793 fore200e_shutdown(fore200e); 2794 kfree(fore200e); 2795 pci_disable_device(pci_dev); 2796 } 2797 2798 2799 #ifdef CONFIG_ATM_FORE200E_PCA 2800 static struct pci_device_id fore200e_pca_tbl[] = { 2801 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 2802 0, 0, (unsigned long) &fore200e_bus[0] }, 2803 { 0, } 2804 }; 2805 2806 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2807 2808 static struct pci_driver fore200e_pca_driver = { 2809 .name = "fore_200e", 2810 .probe = fore200e_pca_detect, 2811 .remove = __devexit_p(fore200e_pca_remove_one), 2812 .id_table = fore200e_pca_tbl, 2813 }; 2814 #endif 2815 2816 2817 static int __init 2818 fore200e_module_init(void) 2819 { 2820 const struct fore200e_bus* bus; 2821 struct fore200e* fore200e; 2822 int index; 2823 2824 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2825 2826 /* for each configured bus interface */ 2827 for (bus = fore200e_bus; bus->model_name; bus++) { 2828 2829 /* detect all boards present on that bus */ 2830 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) { 2831 2832 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2833 fore200e->bus->model_name, 2834 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2835 2836 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2837 2838 if (fore200e_init(fore200e) < 0) { 2839 2840 fore200e_shutdown(fore200e); 2841 break; 2842 } 2843 2844 list_add(&fore200e->entry, &fore200e_boards); 2845 } 2846 } 2847 2848 #ifdef CONFIG_ATM_FORE200E_PCA 2849 if (!pci_register_driver(&fore200e_pca_driver)) 2850 return 0; 2851 #endif 2852 2853 if (!list_empty(&fore200e_boards)) 2854 return 0; 2855 2856 return -ENODEV; 2857 } 2858 2859 2860 static void __exit 2861 fore200e_module_cleanup(void) 2862 { 2863 struct fore200e *fore200e, *next; 2864 2865 #ifdef CONFIG_ATM_FORE200E_PCA 2866 pci_unregister_driver(&fore200e_pca_driver); 2867 #endif 2868 2869 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) { 2870 fore200e_shutdown(fore200e); 2871 kfree(fore200e); 2872 } 2873 DPRINTK(1, "module being removed\n"); 2874 } 2875 2876 2877 static int 2878 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2879 { 2880 struct fore200e* fore200e = FORE200E_DEV(dev); 2881 struct fore200e_vcc* fore200e_vcc; 2882 struct atm_vcc* vcc; 2883 int i, len, left = *pos; 2884 unsigned long flags; 2885 2886 if (!left--) { 2887 2888 if (fore200e_getstats(fore200e) < 0) 2889 return -EIO; 2890 2891 len = sprintf(page,"\n" 2892 " device:\n" 2893 " internal name:\t\t%s\n", fore200e->name); 2894 2895 /* print bus-specific information */ 2896 if (fore200e->bus->proc_read) 2897 len += fore200e->bus->proc_read(fore200e, page + len); 2898 2899 len += sprintf(page + len, 2900 " interrupt line:\t\t%s\n" 2901 " physical base address:\t0x%p\n" 2902 " virtual base address:\t0x%p\n" 2903 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n" 2904 " board serial number:\t\t%d\n\n", 2905 fore200e_irq_itoa(fore200e->irq), 2906 (void*)fore200e->phys_base, 2907 fore200e->virt_base, 2908 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2], 2909 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5], 2910 fore200e->esi[4] * 256 + fore200e->esi[5]); 2911 2912 return len; 2913 } 2914 2915 if (!left--) 2916 return sprintf(page, 2917 " free small bufs, scheme 1:\t%d\n" 2918 " free large bufs, scheme 1:\t%d\n" 2919 " free small bufs, scheme 2:\t%d\n" 2920 " free large bufs, scheme 2:\t%d\n", 2921 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2922 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2923 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2924 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2925 2926 if (!left--) { 2927 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2928 2929 len = sprintf(page,"\n\n" 2930 " cell processor:\n" 2931 " heartbeat state:\t\t"); 2932 2933 if (hb >> 16 != 0xDEAD) 2934 len += sprintf(page + len, "0x%08x\n", hb); 2935 else 2936 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2937 2938 return len; 2939 } 2940 2941 if (!left--) { 2942 static const char* media_name[] = { 2943 "unshielded twisted pair", 2944 "multimode optical fiber ST", 2945 "multimode optical fiber SC", 2946 "single-mode optical fiber ST", 2947 "single-mode optical fiber SC", 2948 "unknown" 2949 }; 2950 2951 static const char* oc3_mode[] = { 2952 "normal operation", 2953 "diagnostic loopback", 2954 "line loopback", 2955 "unknown" 2956 }; 2957 2958 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2959 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2960 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2961 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2962 u32 oc3_index; 2963 2964 if ((media_index < 0) || (media_index > 4)) 2965 media_index = 5; 2966 2967 switch (fore200e->loop_mode) { 2968 case ATM_LM_NONE: oc3_index = 0; 2969 break; 2970 case ATM_LM_LOC_PHY: oc3_index = 1; 2971 break; 2972 case ATM_LM_RMT_PHY: oc3_index = 2; 2973 break; 2974 default: oc3_index = 3; 2975 } 2976 2977 return sprintf(page, 2978 " firmware release:\t\t%d.%d.%d\n" 2979 " monitor release:\t\t%d.%d\n" 2980 " media type:\t\t\t%s\n" 2981 " OC-3 revision:\t\t0x%x\n" 2982 " OC-3 mode:\t\t\t%s", 2983 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2984 mon960_release >> 16, mon960_release << 16 >> 16, 2985 media_name[ media_index ], 2986 oc3_revision, 2987 oc3_mode[ oc3_index ]); 2988 } 2989 2990 if (!left--) { 2991 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2992 2993 return sprintf(page, 2994 "\n\n" 2995 " monitor:\n" 2996 " version number:\t\t%d\n" 2997 " boot status word:\t\t0x%08x\n", 2998 fore200e->bus->read(&cp_monitor->mon_version), 2999 fore200e->bus->read(&cp_monitor->bstat)); 3000 } 3001 3002 if (!left--) 3003 return sprintf(page, 3004 "\n" 3005 " device statistics:\n" 3006 " 4b5b:\n" 3007 " crc_header_errors:\t\t%10u\n" 3008 " framing_errors:\t\t%10u\n", 3009 fore200e_swap(fore200e->stats->phy.crc_header_errors), 3010 fore200e_swap(fore200e->stats->phy.framing_errors)); 3011 3012 if (!left--) 3013 return sprintf(page, "\n" 3014 " OC-3:\n" 3015 " section_bip8_errors:\t%10u\n" 3016 " path_bip8_errors:\t\t%10u\n" 3017 " line_bip24_errors:\t\t%10u\n" 3018 " line_febe_errors:\t\t%10u\n" 3019 " path_febe_errors:\t\t%10u\n" 3020 " corr_hcs_errors:\t\t%10u\n" 3021 " ucorr_hcs_errors:\t\t%10u\n", 3022 fore200e_swap(fore200e->stats->oc3.section_bip8_errors), 3023 fore200e_swap(fore200e->stats->oc3.path_bip8_errors), 3024 fore200e_swap(fore200e->stats->oc3.line_bip24_errors), 3025 fore200e_swap(fore200e->stats->oc3.line_febe_errors), 3026 fore200e_swap(fore200e->stats->oc3.path_febe_errors), 3027 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors), 3028 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors)); 3029 3030 if (!left--) 3031 return sprintf(page,"\n" 3032 " ATM:\t\t\t\t cells\n" 3033 " TX:\t\t\t%10u\n" 3034 " RX:\t\t\t%10u\n" 3035 " vpi out of range:\t\t%10u\n" 3036 " vpi no conn:\t\t%10u\n" 3037 " vci out of range:\t\t%10u\n" 3038 " vci no conn:\t\t%10u\n", 3039 fore200e_swap(fore200e->stats->atm.cells_transmitted), 3040 fore200e_swap(fore200e->stats->atm.cells_received), 3041 fore200e_swap(fore200e->stats->atm.vpi_bad_range), 3042 fore200e_swap(fore200e->stats->atm.vpi_no_conn), 3043 fore200e_swap(fore200e->stats->atm.vci_bad_range), 3044 fore200e_swap(fore200e->stats->atm.vci_no_conn)); 3045 3046 if (!left--) 3047 return sprintf(page,"\n" 3048 " AAL0:\t\t\t cells\n" 3049 " TX:\t\t\t%10u\n" 3050 " RX:\t\t\t%10u\n" 3051 " dropped:\t\t\t%10u\n", 3052 fore200e_swap(fore200e->stats->aal0.cells_transmitted), 3053 fore200e_swap(fore200e->stats->aal0.cells_received), 3054 fore200e_swap(fore200e->stats->aal0.cells_dropped)); 3055 3056 if (!left--) 3057 return sprintf(page,"\n" 3058 " AAL3/4:\n" 3059 " SAR sublayer:\t\t cells\n" 3060 " TX:\t\t\t%10u\n" 3061 " RX:\t\t\t%10u\n" 3062 " dropped:\t\t\t%10u\n" 3063 " CRC errors:\t\t%10u\n" 3064 " protocol errors:\t\t%10u\n\n" 3065 " CS sublayer:\t\t PDUs\n" 3066 " TX:\t\t\t%10u\n" 3067 " RX:\t\t\t%10u\n" 3068 " dropped:\t\t\t%10u\n" 3069 " protocol errors:\t\t%10u\n", 3070 fore200e_swap(fore200e->stats->aal34.cells_transmitted), 3071 fore200e_swap(fore200e->stats->aal34.cells_received), 3072 fore200e_swap(fore200e->stats->aal34.cells_dropped), 3073 fore200e_swap(fore200e->stats->aal34.cells_crc_errors), 3074 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors), 3075 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted), 3076 fore200e_swap(fore200e->stats->aal34.cspdus_received), 3077 fore200e_swap(fore200e->stats->aal34.cspdus_dropped), 3078 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors)); 3079 3080 if (!left--) 3081 return sprintf(page,"\n" 3082 " AAL5:\n" 3083 " SAR sublayer:\t\t cells\n" 3084 " TX:\t\t\t%10u\n" 3085 " RX:\t\t\t%10u\n" 3086 " dropped:\t\t\t%10u\n" 3087 " congestions:\t\t%10u\n\n" 3088 " CS sublayer:\t\t PDUs\n" 3089 " TX:\t\t\t%10u\n" 3090 " RX:\t\t\t%10u\n" 3091 " dropped:\t\t\t%10u\n" 3092 " CRC errors:\t\t%10u\n" 3093 " protocol errors:\t\t%10u\n", 3094 fore200e_swap(fore200e->stats->aal5.cells_transmitted), 3095 fore200e_swap(fore200e->stats->aal5.cells_received), 3096 fore200e_swap(fore200e->stats->aal5.cells_dropped), 3097 fore200e_swap(fore200e->stats->aal5.congestion_experienced), 3098 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted), 3099 fore200e_swap(fore200e->stats->aal5.cspdus_received), 3100 fore200e_swap(fore200e->stats->aal5.cspdus_dropped), 3101 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors), 3102 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors)); 3103 3104 if (!left--) 3105 return sprintf(page,"\n" 3106 " AUX:\t\t allocation failures\n" 3107 " small b1:\t\t\t%10u\n" 3108 " large b1:\t\t\t%10u\n" 3109 " small b2:\t\t\t%10u\n" 3110 " large b2:\t\t\t%10u\n" 3111 " RX PDUs:\t\t\t%10u\n" 3112 " TX PDUs:\t\t\t%10lu\n", 3113 fore200e_swap(fore200e->stats->aux.small_b1_failed), 3114 fore200e_swap(fore200e->stats->aux.large_b1_failed), 3115 fore200e_swap(fore200e->stats->aux.small_b2_failed), 3116 fore200e_swap(fore200e->stats->aux.large_b2_failed), 3117 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed), 3118 fore200e->tx_sat); 3119 3120 if (!left--) 3121 return sprintf(page,"\n" 3122 " receive carrier:\t\t\t%s\n", 3123 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 3124 3125 if (!left--) { 3126 return sprintf(page,"\n" 3127 " VCCs:\n address VPI VCI AAL " 3128 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 3129 } 3130 3131 for (i = 0; i < NBR_CONNECT; i++) { 3132 3133 vcc = fore200e->vc_map[i].vcc; 3134 3135 if (vcc == NULL) 3136 continue; 3137 3138 spin_lock_irqsave(&fore200e->q_lock, flags); 3139 3140 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 3141 3142 fore200e_vcc = FORE200E_VCC(vcc); 3143 ASSERT(fore200e_vcc); 3144 3145 len = sprintf(page, 3146 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3147 (u32)(unsigned long)vcc, 3148 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 3149 fore200e_vcc->tx_pdu, 3150 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 3151 fore200e_vcc->tx_max_pdu, 3152 fore200e_vcc->rx_pdu, 3153 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 3154 fore200e_vcc->rx_max_pdu); 3155 3156 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3157 return len; 3158 } 3159 3160 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3161 } 3162 3163 return 0; 3164 } 3165 3166 module_init(fore200e_module_init); 3167 module_exit(fore200e_module_cleanup); 3168 3169 3170 static const struct atmdev_ops fore200e_ops = 3171 { 3172 .open = fore200e_open, 3173 .close = fore200e_close, 3174 .ioctl = fore200e_ioctl, 3175 .getsockopt = fore200e_getsockopt, 3176 .setsockopt = fore200e_setsockopt, 3177 .send = fore200e_send, 3178 .change_qos = fore200e_change_qos, 3179 .proc_read = fore200e_proc_read, 3180 .owner = THIS_MODULE 3181 }; 3182 3183 3184 #ifdef CONFIG_ATM_FORE200E_PCA 3185 extern const unsigned char _fore200e_pca_fw_data[]; 3186 extern const unsigned int _fore200e_pca_fw_size; 3187 #endif 3188 #ifdef CONFIG_ATM_FORE200E_SBA 3189 extern const unsigned char _fore200e_sba_fw_data[]; 3190 extern const unsigned int _fore200e_sba_fw_size; 3191 #endif 3192 3193 static const struct fore200e_bus fore200e_bus[] = { 3194 #ifdef CONFIG_ATM_FORE200E_PCA 3195 { "PCA-200E", "pca200e", 32, 4, 32, 3196 _fore200e_pca_fw_data, &_fore200e_pca_fw_size, 3197 fore200e_pca_read, 3198 fore200e_pca_write, 3199 fore200e_pca_dma_map, 3200 fore200e_pca_dma_unmap, 3201 fore200e_pca_dma_sync_for_cpu, 3202 fore200e_pca_dma_sync_for_device, 3203 fore200e_pca_dma_chunk_alloc, 3204 fore200e_pca_dma_chunk_free, 3205 NULL, 3206 fore200e_pca_configure, 3207 fore200e_pca_map, 3208 fore200e_pca_reset, 3209 fore200e_pca_prom_read, 3210 fore200e_pca_unmap, 3211 NULL, 3212 fore200e_pca_irq_check, 3213 fore200e_pca_irq_ack, 3214 fore200e_pca_proc_read, 3215 }, 3216 #endif 3217 #ifdef CONFIG_ATM_FORE200E_SBA 3218 { "SBA-200E", "sba200e", 32, 64, 32, 3219 _fore200e_sba_fw_data, &_fore200e_sba_fw_size, 3220 fore200e_sba_read, 3221 fore200e_sba_write, 3222 fore200e_sba_dma_map, 3223 fore200e_sba_dma_unmap, 3224 fore200e_sba_dma_sync_for_cpu, 3225 fore200e_sba_dma_sync_for_device, 3226 fore200e_sba_dma_chunk_alloc, 3227 fore200e_sba_dma_chunk_free, 3228 fore200e_sba_detect, 3229 fore200e_sba_configure, 3230 fore200e_sba_map, 3231 fore200e_sba_reset, 3232 fore200e_sba_prom_read, 3233 fore200e_sba_unmap, 3234 fore200e_sba_irq_enable, 3235 fore200e_sba_irq_check, 3236 fore200e_sba_irq_ack, 3237 fore200e_sba_proc_read, 3238 }, 3239 #endif 3240 {} 3241 }; 3242 3243 #ifdef MODULE_LICENSE 3244 MODULE_LICENSE("GPL"); 3245 #endif 3246