1 /* 2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $ 3 4 A FORE Systems 200E-series driver for ATM on Linux. 5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 6 7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 8 9 This driver simultaneously supports PCA-200E and SBA-200E adapters 10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 11 12 This program is free software; you can redistribute it and/or modify 13 it under the terms of the GNU General Public License as published by 14 the Free Software Foundation; either version 2 of the License, or 15 (at your option) any later version. 16 17 This program is distributed in the hope that it will be useful, 18 but WITHOUT ANY WARRANTY; without even the implied warranty of 19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 GNU General Public License for more details. 21 22 You should have received a copy of the GNU General Public License 23 along with this program; if not, write to the Free Software 24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 */ 26 27 28 #include <linux/kernel.h> 29 #include <linux/slab.h> 30 #include <linux/init.h> 31 #include <linux/capability.h> 32 #include <linux/interrupt.h> 33 #include <linux/bitops.h> 34 #include <linux/pci.h> 35 #include <linux/module.h> 36 #include <linux/atmdev.h> 37 #include <linux/sonet.h> 38 #include <linux/atm_suni.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/delay.h> 41 #include <asm/io.h> 42 #include <asm/string.h> 43 #include <asm/page.h> 44 #include <asm/irq.h> 45 #include <asm/dma.h> 46 #include <asm/byteorder.h> 47 #include <asm/uaccess.h> 48 #include <asm/atomic.h> 49 50 #ifdef CONFIG_ATM_FORE200E_SBA 51 #include <asm/idprom.h> 52 #include <asm/sbus.h> 53 #include <asm/openprom.h> 54 #include <asm/oplib.h> 55 #include <asm/pgtable.h> 56 #endif 57 58 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ 59 #define FORE200E_USE_TASKLET 60 #endif 61 62 #if 0 /* enable the debugging code of the buffer supply queues */ 63 #define FORE200E_BSQ_DEBUG 64 #endif 65 66 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ 67 #define FORE200E_52BYTE_AAL0_SDU 68 #endif 69 70 #include "fore200e.h" 71 #include "suni.h" 72 73 #define FORE200E_VERSION "0.3e" 74 75 #define FORE200E "fore200e: " 76 77 #if 0 /* override .config */ 78 #define CONFIG_ATM_FORE200E_DEBUG 1 79 #endif 80 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 81 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 82 printk(FORE200E format, ##args); } while (0) 83 #else 84 #define DPRINTK(level, format, args...) do {} while (0) 85 #endif 86 87 88 #define FORE200E_ALIGN(addr, alignment) \ 89 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 90 91 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 92 93 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 94 95 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo)) 96 97 #if 1 98 #define ASSERT(expr) if (!(expr)) { \ 99 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 100 __FUNCTION__, __LINE__, #expr); \ 101 panic(FORE200E "%s", __FUNCTION__); \ 102 } 103 #else 104 #define ASSERT(expr) do {} while (0) 105 #endif 106 107 108 static const struct atmdev_ops fore200e_ops; 109 static const struct fore200e_bus fore200e_bus[]; 110 111 static LIST_HEAD(fore200e_boards); 112 113 114 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 115 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 116 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); 117 118 119 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 120 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 121 { BUFFER_S2_NBR, BUFFER_L2_NBR } 122 }; 123 124 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 125 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 126 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 127 }; 128 129 130 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 131 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 132 #endif 133 134 135 #if 0 /* currently unused */ 136 static int 137 fore200e_fore2atm_aal(enum fore200e_aal aal) 138 { 139 switch(aal) { 140 case FORE200E_AAL0: return ATM_AAL0; 141 case FORE200E_AAL34: return ATM_AAL34; 142 case FORE200E_AAL5: return ATM_AAL5; 143 } 144 145 return -EINVAL; 146 } 147 #endif 148 149 150 static enum fore200e_aal 151 fore200e_atm2fore_aal(int aal) 152 { 153 switch(aal) { 154 case ATM_AAL0: return FORE200E_AAL0; 155 case ATM_AAL34: return FORE200E_AAL34; 156 case ATM_AAL1: 157 case ATM_AAL2: 158 case ATM_AAL5: return FORE200E_AAL5; 159 } 160 161 return -EINVAL; 162 } 163 164 165 static char* 166 fore200e_irq_itoa(int irq) 167 { 168 static char str[8]; 169 sprintf(str, "%d", irq); 170 return str; 171 } 172 173 174 /* allocate and align a chunk of memory intended to hold the data behing exchanged 175 between the driver and the adapter (using streaming DVMA) */ 176 177 static int 178 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 179 { 180 unsigned long offset = 0; 181 182 if (alignment <= sizeof(int)) 183 alignment = 0; 184 185 chunk->alloc_size = size + alignment; 186 chunk->align_size = size; 187 chunk->direction = direction; 188 189 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA); 190 if (chunk->alloc_addr == NULL) 191 return -ENOMEM; 192 193 if (alignment > 0) 194 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 195 196 chunk->align_addr = chunk->alloc_addr + offset; 197 198 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction); 199 200 return 0; 201 } 202 203 204 /* free a chunk of memory */ 205 206 static void 207 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 208 { 209 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction); 210 211 kfree(chunk->alloc_addr); 212 } 213 214 215 static void 216 fore200e_spin(int msecs) 217 { 218 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 219 while (time_before(jiffies, timeout)); 220 } 221 222 223 static int 224 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 225 { 226 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 227 int ok; 228 229 mb(); 230 do { 231 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 232 break; 233 234 } while (time_before(jiffies, timeout)); 235 236 #if 1 237 if (!ok) { 238 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 239 *addr, val); 240 } 241 #endif 242 243 return ok; 244 } 245 246 247 static int 248 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 249 { 250 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 251 int ok; 252 253 do { 254 if ((ok = (fore200e->bus->read(addr) == val))) 255 break; 256 257 } while (time_before(jiffies, timeout)); 258 259 #if 1 260 if (!ok) { 261 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 262 fore200e->bus->read(addr), val); 263 } 264 #endif 265 266 return ok; 267 } 268 269 270 static void 271 fore200e_free_rx_buf(struct fore200e* fore200e) 272 { 273 int scheme, magn, nbr; 274 struct buffer* buffer; 275 276 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 277 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 278 279 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 280 281 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 282 283 struct chunk* data = &buffer[ nbr ].data; 284 285 if (data->alloc_addr != NULL) 286 fore200e_chunk_free(fore200e, data); 287 } 288 } 289 } 290 } 291 } 292 293 294 static void 295 fore200e_uninit_bs_queue(struct fore200e* fore200e) 296 { 297 int scheme, magn; 298 299 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 300 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 301 302 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 303 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 304 305 if (status->alloc_addr) 306 fore200e->bus->dma_chunk_free(fore200e, status); 307 308 if (rbd_block->alloc_addr) 309 fore200e->bus->dma_chunk_free(fore200e, rbd_block); 310 } 311 } 312 } 313 314 315 static int 316 fore200e_reset(struct fore200e* fore200e, int diag) 317 { 318 int ok; 319 320 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 321 322 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 323 324 fore200e->bus->reset(fore200e); 325 326 if (diag) { 327 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 328 if (ok == 0) { 329 330 printk(FORE200E "device %s self-test failed\n", fore200e->name); 331 return -ENODEV; 332 } 333 334 printk(FORE200E "device %s self-test passed\n", fore200e->name); 335 336 fore200e->state = FORE200E_STATE_RESET; 337 } 338 339 return 0; 340 } 341 342 343 static void 344 fore200e_shutdown(struct fore200e* fore200e) 345 { 346 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 347 fore200e->name, fore200e->phys_base, 348 fore200e_irq_itoa(fore200e->irq)); 349 350 if (fore200e->state > FORE200E_STATE_RESET) { 351 /* first, reset the board to prevent further interrupts or data transfers */ 352 fore200e_reset(fore200e, 0); 353 } 354 355 /* then, release all allocated resources */ 356 switch(fore200e->state) { 357 358 case FORE200E_STATE_COMPLETE: 359 kfree(fore200e->stats); 360 361 case FORE200E_STATE_IRQ: 362 free_irq(fore200e->irq, fore200e->atm_dev); 363 364 case FORE200E_STATE_ALLOC_BUF: 365 fore200e_free_rx_buf(fore200e); 366 367 case FORE200E_STATE_INIT_BSQ: 368 fore200e_uninit_bs_queue(fore200e); 369 370 case FORE200E_STATE_INIT_RXQ: 371 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); 372 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 373 374 case FORE200E_STATE_INIT_TXQ: 375 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); 376 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 377 378 case FORE200E_STATE_INIT_CMDQ: 379 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 380 381 case FORE200E_STATE_INITIALIZE: 382 /* nothing to do for that state */ 383 384 case FORE200E_STATE_START_FW: 385 /* nothing to do for that state */ 386 387 case FORE200E_STATE_LOAD_FW: 388 /* nothing to do for that state */ 389 390 case FORE200E_STATE_RESET: 391 /* nothing to do for that state */ 392 393 case FORE200E_STATE_MAP: 394 fore200e->bus->unmap(fore200e); 395 396 case FORE200E_STATE_CONFIGURE: 397 /* nothing to do for that state */ 398 399 case FORE200E_STATE_REGISTER: 400 /* XXX shouldn't we *start* by deregistering the device? */ 401 atm_dev_deregister(fore200e->atm_dev); 402 403 case FORE200E_STATE_BLANK: 404 /* nothing to do for that state */ 405 break; 406 } 407 } 408 409 410 #ifdef CONFIG_ATM_FORE200E_PCA 411 412 static u32 fore200e_pca_read(volatile u32 __iomem *addr) 413 { 414 /* on big-endian hosts, the board is configured to convert 415 the endianess of slave RAM accesses */ 416 return le32_to_cpu(readl(addr)); 417 } 418 419 420 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 421 { 422 /* on big-endian hosts, the board is configured to convert 423 the endianess of slave RAM accesses */ 424 writel(cpu_to_le32(val), addr); 425 } 426 427 428 static u32 429 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 430 { 431 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction); 432 433 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n", 434 virt_addr, size, direction, dma_addr); 435 436 return dma_addr; 437 } 438 439 440 static void 441 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 442 { 443 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n", 444 dma_addr, size, direction); 445 446 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 447 } 448 449 450 static void 451 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 452 { 453 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 454 455 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 456 } 457 458 static void 459 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 460 { 461 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 462 463 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 464 } 465 466 467 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism 468 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 469 470 static int 471 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 472 int size, int nbr, int alignment) 473 { 474 /* returned chunks are page-aligned */ 475 chunk->alloc_size = size * nbr; 476 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev, 477 chunk->alloc_size, 478 &chunk->dma_addr); 479 480 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 481 return -ENOMEM; 482 483 chunk->align_addr = chunk->alloc_addr; 484 485 return 0; 486 } 487 488 489 /* free a DMA consistent chunk of memory */ 490 491 static void 492 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 493 { 494 pci_free_consistent((struct pci_dev*)fore200e->bus_dev, 495 chunk->alloc_size, 496 chunk->alloc_addr, 497 chunk->dma_addr); 498 } 499 500 501 static int 502 fore200e_pca_irq_check(struct fore200e* fore200e) 503 { 504 /* this is a 1 bit register */ 505 int irq_posted = readl(fore200e->regs.pca.psr); 506 507 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 508 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 509 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 510 } 511 #endif 512 513 return irq_posted; 514 } 515 516 517 static void 518 fore200e_pca_irq_ack(struct fore200e* fore200e) 519 { 520 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 521 } 522 523 524 static void 525 fore200e_pca_reset(struct fore200e* fore200e) 526 { 527 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 528 fore200e_spin(10); 529 writel(0, fore200e->regs.pca.hcr); 530 } 531 532 533 static int __devinit 534 fore200e_pca_map(struct fore200e* fore200e) 535 { 536 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 537 538 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 539 540 if (fore200e->virt_base == NULL) { 541 printk(FORE200E "can't map device %s\n", fore200e->name); 542 return -EFAULT; 543 } 544 545 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 546 547 /* gain access to the PCA specific registers */ 548 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 549 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 550 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 551 552 fore200e->state = FORE200E_STATE_MAP; 553 return 0; 554 } 555 556 557 static void 558 fore200e_pca_unmap(struct fore200e* fore200e) 559 { 560 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 561 562 if (fore200e->virt_base != NULL) 563 iounmap(fore200e->virt_base); 564 } 565 566 567 static int __devinit 568 fore200e_pca_configure(struct fore200e* fore200e) 569 { 570 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 571 u8 master_ctrl, latency; 572 573 DPRINTK(2, "device %s being configured\n", fore200e->name); 574 575 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 576 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 577 return -EIO; 578 } 579 580 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 581 582 master_ctrl = master_ctrl 583 #if defined(__BIG_ENDIAN) 584 /* request the PCA board to convert the endianess of slave RAM accesses */ 585 | PCA200E_CTRL_CONVERT_ENDIAN 586 #endif 587 #if 0 588 | PCA200E_CTRL_DIS_CACHE_RD 589 | PCA200E_CTRL_DIS_WRT_INVAL 590 | PCA200E_CTRL_ENA_CONT_REQ_MODE 591 | PCA200E_CTRL_2_CACHE_WRT_INVAL 592 #endif 593 | PCA200E_CTRL_LARGE_PCI_BURSTS; 594 595 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 596 597 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 598 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 599 this may impact the performances of other PCI devices on the same bus, though */ 600 latency = 192; 601 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 602 603 fore200e->state = FORE200E_STATE_CONFIGURE; 604 return 0; 605 } 606 607 608 static int __init 609 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 610 { 611 struct host_cmdq* cmdq = &fore200e->host_cmdq; 612 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 613 struct prom_opcode opcode; 614 int ok; 615 u32 prom_dma; 616 617 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 618 619 opcode.opcode = OPCODE_GET_PROM; 620 opcode.pad = 0; 621 622 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE); 623 624 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 625 626 *entry->status = STATUS_PENDING; 627 628 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 629 630 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 631 632 *entry->status = STATUS_FREE; 633 634 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 635 636 if (ok == 0) { 637 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 638 return -EIO; 639 } 640 641 #if defined(__BIG_ENDIAN) 642 643 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 644 645 /* MAC address is stored as little-endian */ 646 swap_here(&prom->mac_addr[0]); 647 swap_here(&prom->mac_addr[4]); 648 #endif 649 650 return 0; 651 } 652 653 654 static int 655 fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 656 { 657 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 658 659 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 660 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 661 } 662 663 #endif /* CONFIG_ATM_FORE200E_PCA */ 664 665 666 #ifdef CONFIG_ATM_FORE200E_SBA 667 668 static u32 669 fore200e_sba_read(volatile u32 __iomem *addr) 670 { 671 return sbus_readl(addr); 672 } 673 674 675 static void 676 fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 677 { 678 sbus_writel(val, addr); 679 } 680 681 682 static u32 683 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 684 { 685 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction); 686 687 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", 688 virt_addr, size, direction, dma_addr); 689 690 return dma_addr; 691 } 692 693 694 static void 695 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 696 { 697 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", 698 dma_addr, size, direction); 699 700 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 701 } 702 703 704 static void 705 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 706 { 707 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 708 709 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 710 } 711 712 static void 713 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 714 { 715 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 716 717 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 718 } 719 720 721 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism 722 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 723 724 static int 725 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 726 int size, int nbr, int alignment) 727 { 728 chunk->alloc_size = chunk->align_size = size * nbr; 729 730 /* returned chunks are page-aligned */ 731 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev, 732 chunk->alloc_size, 733 &chunk->dma_addr); 734 735 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 736 return -ENOMEM; 737 738 chunk->align_addr = chunk->alloc_addr; 739 740 return 0; 741 } 742 743 744 /* free a DVMA consistent chunk of memory */ 745 746 static void 747 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 748 { 749 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev, 750 chunk->alloc_size, 751 chunk->alloc_addr, 752 chunk->dma_addr); 753 } 754 755 756 static void 757 fore200e_sba_irq_enable(struct fore200e* fore200e) 758 { 759 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 760 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 761 } 762 763 764 static int 765 fore200e_sba_irq_check(struct fore200e* fore200e) 766 { 767 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 768 } 769 770 771 static void 772 fore200e_sba_irq_ack(struct fore200e* fore200e) 773 { 774 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 775 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 776 } 777 778 779 static void 780 fore200e_sba_reset(struct fore200e* fore200e) 781 { 782 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 783 fore200e_spin(10); 784 fore200e->bus->write(0, fore200e->regs.sba.hcr); 785 } 786 787 788 static int __init 789 fore200e_sba_map(struct fore200e* fore200e) 790 { 791 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 792 unsigned int bursts; 793 794 /* gain access to the SBA specific registers */ 795 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 796 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 797 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 798 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 799 800 if (fore200e->virt_base == NULL) { 801 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 802 return -EFAULT; 803 } 804 805 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 806 807 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 808 809 /* get the supported DVMA burst sizes */ 810 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00); 811 812 if (sbus_can_dma_64bit(sbus_dev)) 813 sbus_set_sbus64(sbus_dev, bursts); 814 815 fore200e->state = FORE200E_STATE_MAP; 816 return 0; 817 } 818 819 820 static void 821 fore200e_sba_unmap(struct fore200e* fore200e) 822 { 823 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 824 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 825 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 826 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH); 827 } 828 829 830 static int __init 831 fore200e_sba_configure(struct fore200e* fore200e) 832 { 833 fore200e->state = FORE200E_STATE_CONFIGURE; 834 return 0; 835 } 836 837 838 static struct fore200e* __init 839 fore200e_sba_detect(const struct fore200e_bus* bus, int index) 840 { 841 struct fore200e* fore200e; 842 struct sbus_bus* sbus_bus; 843 struct sbus_dev* sbus_dev = NULL; 844 845 unsigned int count = 0; 846 847 for_each_sbus (sbus_bus) { 848 for_each_sbusdev (sbus_dev, sbus_bus) { 849 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) { 850 if (count >= index) 851 goto found; 852 count++; 853 } 854 } 855 } 856 return NULL; 857 858 found: 859 if (sbus_dev->num_registers != 4) { 860 printk(FORE200E "this %s device has %d instead of 4 registers\n", 861 bus->model_name, sbus_dev->num_registers); 862 return NULL; 863 } 864 865 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 866 if (fore200e == NULL) 867 return NULL; 868 869 fore200e->bus = bus; 870 fore200e->bus_dev = sbus_dev; 871 fore200e->irq = sbus_dev->irqs[ 0 ]; 872 873 fore200e->phys_base = (unsigned long)sbus_dev; 874 875 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 876 877 return fore200e; 878 } 879 880 881 static int __init 882 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom) 883 { 884 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev; 885 int len; 886 887 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4); 888 if (len < 0) 889 return -EBUSY; 890 891 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4); 892 if (len < 0) 893 return -EBUSY; 894 895 prom_getproperty(sbus_dev->prom_node, "serialnumber", 896 (char*)&prom->serial_number, sizeof(prom->serial_number)); 897 898 prom_getproperty(sbus_dev->prom_node, "promversion", 899 (char*)&prom->hw_revision, sizeof(prom->hw_revision)); 900 901 return 0; 902 } 903 904 905 static int 906 fore200e_sba_proc_read(struct fore200e* fore200e, char *page) 907 { 908 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 909 910 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name); 911 } 912 #endif /* CONFIG_ATM_FORE200E_SBA */ 913 914 915 static void 916 fore200e_tx_irq(struct fore200e* fore200e) 917 { 918 struct host_txq* txq = &fore200e->host_txq; 919 struct host_txq_entry* entry; 920 struct atm_vcc* vcc; 921 struct fore200e_vc_map* vc_map; 922 923 if (fore200e->host_txq.txing == 0) 924 return; 925 926 for (;;) { 927 928 entry = &txq->host_entry[ txq->tail ]; 929 930 if ((*entry->status & STATUS_COMPLETE) == 0) { 931 break; 932 } 933 934 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 935 entry, txq->tail, entry->vc_map, entry->skb); 936 937 /* free copy of misaligned data */ 938 kfree(entry->data); 939 940 /* remove DMA mapping */ 941 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 942 DMA_TO_DEVICE); 943 944 vc_map = entry->vc_map; 945 946 /* vcc closed since the time the entry was submitted for tx? */ 947 if ((vc_map->vcc == NULL) || 948 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 949 950 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 951 fore200e->atm_dev->number); 952 953 dev_kfree_skb_any(entry->skb); 954 } 955 else { 956 ASSERT(vc_map->vcc); 957 958 /* vcc closed then immediately re-opened? */ 959 if (vc_map->incarn != entry->incarn) { 960 961 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 962 if the same vcc is immediately re-opened, those pending PDUs must 963 not be popped after the completion of their emission, as they refer 964 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 965 would be decremented by the size of the (unrelated) skb, possibly 966 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 967 we thus bind the tx entry to the current incarnation of the vcc 968 when the entry is submitted for tx. When the tx later completes, 969 if the incarnation number of the tx entry does not match the one 970 of the vcc, then this implies that the vcc has been closed then re-opened. 971 we thus just drop the skb here. */ 972 973 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 974 fore200e->atm_dev->number); 975 976 dev_kfree_skb_any(entry->skb); 977 } 978 else { 979 vcc = vc_map->vcc; 980 ASSERT(vcc); 981 982 /* notify tx completion */ 983 if (vcc->pop) { 984 vcc->pop(vcc, entry->skb); 985 } 986 else { 987 dev_kfree_skb_any(entry->skb); 988 } 989 #if 1 990 /* race fixed by the above incarnation mechanism, but... */ 991 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { 992 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); 993 } 994 #endif 995 /* check error condition */ 996 if (*entry->status & STATUS_ERROR) 997 atomic_inc(&vcc->stats->tx_err); 998 else 999 atomic_inc(&vcc->stats->tx); 1000 } 1001 } 1002 1003 *entry->status = STATUS_FREE; 1004 1005 fore200e->host_txq.txing--; 1006 1007 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 1008 } 1009 } 1010 1011 1012 #ifdef FORE200E_BSQ_DEBUG 1013 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 1014 { 1015 struct buffer* buffer; 1016 int count = 0; 1017 1018 buffer = bsq->freebuf; 1019 while (buffer) { 1020 1021 if (buffer->supplied) { 1022 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 1023 where, scheme, magn, buffer->index); 1024 } 1025 1026 if (buffer->magn != magn) { 1027 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 1028 where, scheme, magn, buffer->index, buffer->magn); 1029 } 1030 1031 if (buffer->scheme != scheme) { 1032 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 1033 where, scheme, magn, buffer->index, buffer->scheme); 1034 } 1035 1036 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 1037 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 1038 where, scheme, magn, buffer->index); 1039 } 1040 1041 count++; 1042 buffer = buffer->next; 1043 } 1044 1045 if (count != bsq->freebuf_count) { 1046 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 1047 where, scheme, magn, count, bsq->freebuf_count); 1048 } 1049 return 0; 1050 } 1051 #endif 1052 1053 1054 static void 1055 fore200e_supply(struct fore200e* fore200e) 1056 { 1057 int scheme, magn, i; 1058 1059 struct host_bsq* bsq; 1060 struct host_bsq_entry* entry; 1061 struct buffer* buffer; 1062 1063 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1064 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1065 1066 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1067 1068 #ifdef FORE200E_BSQ_DEBUG 1069 bsq_audit(1, bsq, scheme, magn); 1070 #endif 1071 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 1072 1073 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 1074 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 1075 1076 entry = &bsq->host_entry[ bsq->head ]; 1077 1078 for (i = 0; i < RBD_BLK_SIZE; i++) { 1079 1080 /* take the first buffer in the free buffer list */ 1081 buffer = bsq->freebuf; 1082 if (!buffer) { 1083 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 1084 scheme, magn, bsq->freebuf_count); 1085 return; 1086 } 1087 bsq->freebuf = buffer->next; 1088 1089 #ifdef FORE200E_BSQ_DEBUG 1090 if (buffer->supplied) 1091 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 1092 scheme, magn, buffer->index); 1093 buffer->supplied = 1; 1094 #endif 1095 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 1096 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 1097 } 1098 1099 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 1100 1101 /* decrease accordingly the number of free rx buffers */ 1102 bsq->freebuf_count -= RBD_BLK_SIZE; 1103 1104 *entry->status = STATUS_PENDING; 1105 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 1106 } 1107 } 1108 } 1109 } 1110 1111 1112 static int 1113 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 1114 { 1115 struct sk_buff* skb; 1116 struct buffer* buffer; 1117 struct fore200e_vcc* fore200e_vcc; 1118 int i, pdu_len = 0; 1119 #ifdef FORE200E_52BYTE_AAL0_SDU 1120 u32 cell_header = 0; 1121 #endif 1122 1123 ASSERT(vcc); 1124 1125 fore200e_vcc = FORE200E_VCC(vcc); 1126 ASSERT(fore200e_vcc); 1127 1128 #ifdef FORE200E_52BYTE_AAL0_SDU 1129 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 1130 1131 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 1132 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 1133 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 1134 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 1135 rpd->atm_header.clp; 1136 pdu_len = 4; 1137 } 1138 #endif 1139 1140 /* compute total PDU length */ 1141 for (i = 0; i < rpd->nseg; i++) 1142 pdu_len += rpd->rsd[ i ].length; 1143 1144 skb = alloc_skb(pdu_len, GFP_ATOMIC); 1145 if (skb == NULL) { 1146 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 1147 1148 atomic_inc(&vcc->stats->rx_drop); 1149 return -ENOMEM; 1150 } 1151 1152 __net_timestamp(skb); 1153 1154 #ifdef FORE200E_52BYTE_AAL0_SDU 1155 if (cell_header) { 1156 *((u32*)skb_put(skb, 4)) = cell_header; 1157 } 1158 #endif 1159 1160 /* reassemble segments */ 1161 for (i = 0; i < rpd->nseg; i++) { 1162 1163 /* rebuild rx buffer address from rsd handle */ 1164 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1165 1166 /* Make device DMA transfer visible to CPU. */ 1167 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1168 1169 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length); 1170 1171 /* Now let the device get at it again. */ 1172 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1173 } 1174 1175 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1176 1177 if (pdu_len < fore200e_vcc->rx_min_pdu) 1178 fore200e_vcc->rx_min_pdu = pdu_len; 1179 if (pdu_len > fore200e_vcc->rx_max_pdu) 1180 fore200e_vcc->rx_max_pdu = pdu_len; 1181 fore200e_vcc->rx_pdu++; 1182 1183 /* push PDU */ 1184 if (atm_charge(vcc, skb->truesize) == 0) { 1185 1186 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1187 vcc->itf, vcc->vpi, vcc->vci); 1188 1189 dev_kfree_skb_any(skb); 1190 1191 atomic_inc(&vcc->stats->rx_drop); 1192 return -ENOMEM; 1193 } 1194 1195 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1196 1197 vcc->push(vcc, skb); 1198 atomic_inc(&vcc->stats->rx); 1199 1200 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1201 1202 return 0; 1203 } 1204 1205 1206 static void 1207 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1208 { 1209 struct host_bsq* bsq; 1210 struct buffer* buffer; 1211 int i; 1212 1213 for (i = 0; i < rpd->nseg; i++) { 1214 1215 /* rebuild rx buffer address from rsd handle */ 1216 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1217 1218 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1219 1220 #ifdef FORE200E_BSQ_DEBUG 1221 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1222 1223 if (buffer->supplied == 0) 1224 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1225 buffer->scheme, buffer->magn, buffer->index); 1226 buffer->supplied = 0; 1227 #endif 1228 1229 /* re-insert the buffer into the free buffer list */ 1230 buffer->next = bsq->freebuf; 1231 bsq->freebuf = buffer; 1232 1233 /* then increment the number of free rx buffers */ 1234 bsq->freebuf_count++; 1235 } 1236 } 1237 1238 1239 static void 1240 fore200e_rx_irq(struct fore200e* fore200e) 1241 { 1242 struct host_rxq* rxq = &fore200e->host_rxq; 1243 struct host_rxq_entry* entry; 1244 struct atm_vcc* vcc; 1245 struct fore200e_vc_map* vc_map; 1246 1247 for (;;) { 1248 1249 entry = &rxq->host_entry[ rxq->head ]; 1250 1251 /* no more received PDUs */ 1252 if ((*entry->status & STATUS_COMPLETE) == 0) 1253 break; 1254 1255 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1256 1257 if ((vc_map->vcc == NULL) || 1258 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1259 1260 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1261 fore200e->atm_dev->number, 1262 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1263 } 1264 else { 1265 vcc = vc_map->vcc; 1266 ASSERT(vcc); 1267 1268 if ((*entry->status & STATUS_ERROR) == 0) { 1269 1270 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1271 } 1272 else { 1273 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1274 fore200e->atm_dev->number, 1275 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1276 atomic_inc(&vcc->stats->rx_err); 1277 } 1278 } 1279 1280 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1281 1282 fore200e_collect_rpd(fore200e, entry->rpd); 1283 1284 /* rewrite the rpd address to ack the received PDU */ 1285 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1286 *entry->status = STATUS_FREE; 1287 1288 fore200e_supply(fore200e); 1289 } 1290 } 1291 1292 1293 #ifndef FORE200E_USE_TASKLET 1294 static void 1295 fore200e_irq(struct fore200e* fore200e) 1296 { 1297 unsigned long flags; 1298 1299 spin_lock_irqsave(&fore200e->q_lock, flags); 1300 fore200e_rx_irq(fore200e); 1301 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1302 1303 spin_lock_irqsave(&fore200e->q_lock, flags); 1304 fore200e_tx_irq(fore200e); 1305 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1306 } 1307 #endif 1308 1309 1310 static irqreturn_t 1311 fore200e_interrupt(int irq, void* dev) 1312 { 1313 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1314 1315 if (fore200e->bus->irq_check(fore200e) == 0) { 1316 1317 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1318 return IRQ_NONE; 1319 } 1320 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1321 1322 #ifdef FORE200E_USE_TASKLET 1323 tasklet_schedule(&fore200e->tx_tasklet); 1324 tasklet_schedule(&fore200e->rx_tasklet); 1325 #else 1326 fore200e_irq(fore200e); 1327 #endif 1328 1329 fore200e->bus->irq_ack(fore200e); 1330 return IRQ_HANDLED; 1331 } 1332 1333 1334 #ifdef FORE200E_USE_TASKLET 1335 static void 1336 fore200e_tx_tasklet(unsigned long data) 1337 { 1338 struct fore200e* fore200e = (struct fore200e*) data; 1339 unsigned long flags; 1340 1341 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1342 1343 spin_lock_irqsave(&fore200e->q_lock, flags); 1344 fore200e_tx_irq(fore200e); 1345 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1346 } 1347 1348 1349 static void 1350 fore200e_rx_tasklet(unsigned long data) 1351 { 1352 struct fore200e* fore200e = (struct fore200e*) data; 1353 unsigned long flags; 1354 1355 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1356 1357 spin_lock_irqsave(&fore200e->q_lock, flags); 1358 fore200e_rx_irq((struct fore200e*) data); 1359 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1360 } 1361 #endif 1362 1363 1364 static int 1365 fore200e_select_scheme(struct atm_vcc* vcc) 1366 { 1367 /* fairly balance the VCs over (identical) buffer schemes */ 1368 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1369 1370 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1371 vcc->itf, vcc->vpi, vcc->vci, scheme); 1372 1373 return scheme; 1374 } 1375 1376 1377 static int 1378 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1379 { 1380 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1381 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1382 struct activate_opcode activ_opcode; 1383 struct deactivate_opcode deactiv_opcode; 1384 struct vpvc vpvc; 1385 int ok; 1386 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1387 1388 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1389 1390 if (activate) { 1391 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1392 1393 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1394 activ_opcode.aal = aal; 1395 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1396 activ_opcode.pad = 0; 1397 } 1398 else { 1399 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1400 deactiv_opcode.pad = 0; 1401 } 1402 1403 vpvc.vci = vcc->vci; 1404 vpvc.vpi = vcc->vpi; 1405 1406 *entry->status = STATUS_PENDING; 1407 1408 if (activate) { 1409 1410 #ifdef FORE200E_52BYTE_AAL0_SDU 1411 mtu = 48; 1412 #endif 1413 /* the MTU is not used by the cp, except in the case of AAL0 */ 1414 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1415 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1416 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1417 } 1418 else { 1419 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1420 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1421 } 1422 1423 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1424 1425 *entry->status = STATUS_FREE; 1426 1427 if (ok == 0) { 1428 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1429 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1430 return -EIO; 1431 } 1432 1433 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1434 activate ? "open" : "clos"); 1435 1436 return 0; 1437 } 1438 1439 1440 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ 1441 1442 static void 1443 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1444 { 1445 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1446 1447 /* compute the data cells to idle cells ratio from the tx PCR */ 1448 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1449 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1450 } 1451 else { 1452 /* disable rate control */ 1453 rate->data_cells = rate->idle_cells = 0; 1454 } 1455 } 1456 1457 1458 static int 1459 fore200e_open(struct atm_vcc *vcc) 1460 { 1461 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1462 struct fore200e_vcc* fore200e_vcc; 1463 struct fore200e_vc_map* vc_map; 1464 unsigned long flags; 1465 int vci = vcc->vci; 1466 short vpi = vcc->vpi; 1467 1468 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1469 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1470 1471 spin_lock_irqsave(&fore200e->q_lock, flags); 1472 1473 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1474 if (vc_map->vcc) { 1475 1476 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1477 1478 printk(FORE200E "VC %d.%d.%d already in use\n", 1479 fore200e->atm_dev->number, vpi, vci); 1480 1481 return -EINVAL; 1482 } 1483 1484 vc_map->vcc = vcc; 1485 1486 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1487 1488 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1489 if (fore200e_vcc == NULL) { 1490 vc_map->vcc = NULL; 1491 return -ENOMEM; 1492 } 1493 1494 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1495 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1496 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1497 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1498 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1499 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1500 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1501 1502 /* pseudo-CBR bandwidth requested? */ 1503 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1504 1505 down(&fore200e->rate_sf); 1506 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1507 up(&fore200e->rate_sf); 1508 1509 kfree(fore200e_vcc); 1510 vc_map->vcc = NULL; 1511 return -EAGAIN; 1512 } 1513 1514 /* reserve bandwidth */ 1515 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1516 up(&fore200e->rate_sf); 1517 } 1518 1519 vcc->itf = vcc->dev->number; 1520 1521 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1522 set_bit(ATM_VF_ADDR, &vcc->flags); 1523 1524 vcc->dev_data = fore200e_vcc; 1525 1526 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1527 1528 vc_map->vcc = NULL; 1529 1530 clear_bit(ATM_VF_ADDR, &vcc->flags); 1531 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1532 1533 vcc->dev_data = NULL; 1534 1535 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1536 1537 kfree(fore200e_vcc); 1538 return -EINVAL; 1539 } 1540 1541 /* compute rate control parameters */ 1542 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1543 1544 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1545 set_bit(ATM_VF_HASQOS, &vcc->flags); 1546 1547 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1548 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1549 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1550 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1551 } 1552 1553 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1554 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1555 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1556 1557 /* new incarnation of the vcc */ 1558 vc_map->incarn = ++fore200e->incarn_count; 1559 1560 /* VC unusable before this flag is set */ 1561 set_bit(ATM_VF_READY, &vcc->flags); 1562 1563 return 0; 1564 } 1565 1566 1567 static void 1568 fore200e_close(struct atm_vcc* vcc) 1569 { 1570 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1571 struct fore200e_vcc* fore200e_vcc; 1572 struct fore200e_vc_map* vc_map; 1573 unsigned long flags; 1574 1575 ASSERT(vcc); 1576 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1577 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1578 1579 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1580 1581 clear_bit(ATM_VF_READY, &vcc->flags); 1582 1583 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1584 1585 spin_lock_irqsave(&fore200e->q_lock, flags); 1586 1587 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1588 1589 /* the vc is no longer considered as "in use" by fore200e_open() */ 1590 vc_map->vcc = NULL; 1591 1592 vcc->itf = vcc->vci = vcc->vpi = 0; 1593 1594 fore200e_vcc = FORE200E_VCC(vcc); 1595 vcc->dev_data = NULL; 1596 1597 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1598 1599 /* release reserved bandwidth, if any */ 1600 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1601 1602 down(&fore200e->rate_sf); 1603 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1604 up(&fore200e->rate_sf); 1605 1606 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1607 } 1608 1609 clear_bit(ATM_VF_ADDR, &vcc->flags); 1610 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1611 1612 ASSERT(fore200e_vcc); 1613 kfree(fore200e_vcc); 1614 } 1615 1616 1617 static int 1618 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1619 { 1620 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1621 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1622 struct fore200e_vc_map* vc_map; 1623 struct host_txq* txq = &fore200e->host_txq; 1624 struct host_txq_entry* entry; 1625 struct tpd* tpd; 1626 struct tpd_haddr tpd_haddr; 1627 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1628 int tx_copy = 0; 1629 int tx_len = skb->len; 1630 u32* cell_header = NULL; 1631 unsigned char* skb_data; 1632 int skb_len; 1633 unsigned char* data; 1634 unsigned long flags; 1635 1636 ASSERT(vcc); 1637 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1638 ASSERT(fore200e); 1639 ASSERT(fore200e_vcc); 1640 1641 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1642 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1643 dev_kfree_skb_any(skb); 1644 return -EINVAL; 1645 } 1646 1647 #ifdef FORE200E_52BYTE_AAL0_SDU 1648 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1649 cell_header = (u32*) skb->data; 1650 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1651 skb_len = tx_len = skb->len - 4; 1652 1653 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1654 } 1655 else 1656 #endif 1657 { 1658 skb_data = skb->data; 1659 skb_len = skb->len; 1660 } 1661 1662 if (((unsigned long)skb_data) & 0x3) { 1663 1664 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1665 tx_copy = 1; 1666 tx_len = skb_len; 1667 } 1668 1669 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1670 1671 /* this simply NUKES the PCA board */ 1672 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1673 tx_copy = 1; 1674 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1675 } 1676 1677 if (tx_copy) { 1678 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA); 1679 if (data == NULL) { 1680 if (vcc->pop) { 1681 vcc->pop(vcc, skb); 1682 } 1683 else { 1684 dev_kfree_skb_any(skb); 1685 } 1686 return -ENOMEM; 1687 } 1688 1689 memcpy(data, skb_data, skb_len); 1690 if (skb_len < tx_len) 1691 memset(data + skb_len, 0x00, tx_len - skb_len); 1692 } 1693 else { 1694 data = skb_data; 1695 } 1696 1697 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1698 ASSERT(vc_map->vcc == vcc); 1699 1700 retry_here: 1701 1702 spin_lock_irqsave(&fore200e->q_lock, flags); 1703 1704 entry = &txq->host_entry[ txq->head ]; 1705 1706 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1707 1708 /* try to free completed tx queue entries */ 1709 fore200e_tx_irq(fore200e); 1710 1711 if (*entry->status != STATUS_FREE) { 1712 1713 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1714 1715 /* retry once again? */ 1716 if (--retry > 0) { 1717 udelay(50); 1718 goto retry_here; 1719 } 1720 1721 atomic_inc(&vcc->stats->tx_err); 1722 1723 fore200e->tx_sat++; 1724 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1725 fore200e->name, fore200e->cp_queues->heartbeat); 1726 if (vcc->pop) { 1727 vcc->pop(vcc, skb); 1728 } 1729 else { 1730 dev_kfree_skb_any(skb); 1731 } 1732 1733 if (tx_copy) 1734 kfree(data); 1735 1736 return -ENOBUFS; 1737 } 1738 } 1739 1740 entry->incarn = vc_map->incarn; 1741 entry->vc_map = vc_map; 1742 entry->skb = skb; 1743 entry->data = tx_copy ? data : NULL; 1744 1745 tpd = entry->tpd; 1746 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE); 1747 tpd->tsd[ 0 ].length = tx_len; 1748 1749 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1750 txq->txing++; 1751 1752 /* The dma_map call above implies a dma_sync so the device can use it, 1753 * thus no explicit dma_sync call is necessary here. 1754 */ 1755 1756 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1757 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1758 tpd->tsd[0].length, skb_len); 1759 1760 if (skb_len < fore200e_vcc->tx_min_pdu) 1761 fore200e_vcc->tx_min_pdu = skb_len; 1762 if (skb_len > fore200e_vcc->tx_max_pdu) 1763 fore200e_vcc->tx_max_pdu = skb_len; 1764 fore200e_vcc->tx_pdu++; 1765 1766 /* set tx rate control information */ 1767 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1768 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1769 1770 if (cell_header) { 1771 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1772 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1773 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1774 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1775 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1776 } 1777 else { 1778 /* set the ATM header, common to all cells conveying the PDU */ 1779 tpd->atm_header.clp = 0; 1780 tpd->atm_header.plt = 0; 1781 tpd->atm_header.vci = vcc->vci; 1782 tpd->atm_header.vpi = vcc->vpi; 1783 tpd->atm_header.gfc = 0; 1784 } 1785 1786 tpd->spec.length = tx_len; 1787 tpd->spec.nseg = 1; 1788 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1789 tpd->spec.intr = 1; 1790 1791 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1792 tpd_haddr.pad = 0; 1793 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1794 1795 *entry->status = STATUS_PENDING; 1796 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1797 1798 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1799 1800 return 0; 1801 } 1802 1803 1804 static int 1805 fore200e_getstats(struct fore200e* fore200e) 1806 { 1807 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1808 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1809 struct stats_opcode opcode; 1810 int ok; 1811 u32 stats_dma_addr; 1812 1813 if (fore200e->stats == NULL) { 1814 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA); 1815 if (fore200e->stats == NULL) 1816 return -ENOMEM; 1817 } 1818 1819 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats, 1820 sizeof(struct stats), DMA_FROM_DEVICE); 1821 1822 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1823 1824 opcode.opcode = OPCODE_GET_STATS; 1825 opcode.pad = 0; 1826 1827 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1828 1829 *entry->status = STATUS_PENDING; 1830 1831 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1832 1833 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1834 1835 *entry->status = STATUS_FREE; 1836 1837 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1838 1839 if (ok == 0) { 1840 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1841 return -EIO; 1842 } 1843 1844 return 0; 1845 } 1846 1847 1848 static int 1849 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1850 { 1851 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1852 1853 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1854 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1855 1856 return -EINVAL; 1857 } 1858 1859 1860 static int 1861 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1862 { 1863 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1864 1865 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1866 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1867 1868 return -EINVAL; 1869 } 1870 1871 1872 #if 0 /* currently unused */ 1873 static int 1874 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) 1875 { 1876 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1877 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1878 struct oc3_opcode opcode; 1879 int ok; 1880 u32 oc3_regs_dma_addr; 1881 1882 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1883 1884 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1885 1886 opcode.opcode = OPCODE_GET_OC3; 1887 opcode.reg = 0; 1888 opcode.value = 0; 1889 opcode.mask = 0; 1890 1891 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1892 1893 *entry->status = STATUS_PENDING; 1894 1895 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); 1896 1897 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1898 1899 *entry->status = STATUS_FREE; 1900 1901 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1902 1903 if (ok == 0) { 1904 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); 1905 return -EIO; 1906 } 1907 1908 return 0; 1909 } 1910 #endif 1911 1912 1913 static int 1914 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1915 { 1916 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1917 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1918 struct oc3_opcode opcode; 1919 int ok; 1920 1921 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1922 1923 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1924 1925 opcode.opcode = OPCODE_SET_OC3; 1926 opcode.reg = reg; 1927 opcode.value = value; 1928 opcode.mask = mask; 1929 1930 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1931 1932 *entry->status = STATUS_PENDING; 1933 1934 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1935 1936 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1937 1938 *entry->status = STATUS_FREE; 1939 1940 if (ok == 0) { 1941 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1942 return -EIO; 1943 } 1944 1945 return 0; 1946 } 1947 1948 1949 static int 1950 fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1951 { 1952 u32 mct_value, mct_mask; 1953 int error; 1954 1955 if (!capable(CAP_NET_ADMIN)) 1956 return -EPERM; 1957 1958 switch (loop_mode) { 1959 1960 case ATM_LM_NONE: 1961 mct_value = 0; 1962 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1963 break; 1964 1965 case ATM_LM_LOC_PHY: 1966 mct_value = mct_mask = SUNI_MCT_DLE; 1967 break; 1968 1969 case ATM_LM_RMT_PHY: 1970 mct_value = mct_mask = SUNI_MCT_LLE; 1971 break; 1972 1973 default: 1974 return -EINVAL; 1975 } 1976 1977 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 1978 if (error == 0) 1979 fore200e->loop_mode = loop_mode; 1980 1981 return error; 1982 } 1983 1984 1985 static int 1986 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 1987 { 1988 struct sonet_stats tmp; 1989 1990 if (fore200e_getstats(fore200e) < 0) 1991 return -EIO; 1992 1993 tmp.section_bip = cpu_to_be32(fore200e->stats->oc3.section_bip8_errors); 1994 tmp.line_bip = cpu_to_be32(fore200e->stats->oc3.line_bip24_errors); 1995 tmp.path_bip = cpu_to_be32(fore200e->stats->oc3.path_bip8_errors); 1996 tmp.line_febe = cpu_to_be32(fore200e->stats->oc3.line_febe_errors); 1997 tmp.path_febe = cpu_to_be32(fore200e->stats->oc3.path_febe_errors); 1998 tmp.corr_hcs = cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors); 1999 tmp.uncorr_hcs = cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors); 2000 tmp.tx_cells = cpu_to_be32(fore200e->stats->aal0.cells_transmitted) + 2001 cpu_to_be32(fore200e->stats->aal34.cells_transmitted) + 2002 cpu_to_be32(fore200e->stats->aal5.cells_transmitted); 2003 tmp.rx_cells = cpu_to_be32(fore200e->stats->aal0.cells_received) + 2004 cpu_to_be32(fore200e->stats->aal34.cells_received) + 2005 cpu_to_be32(fore200e->stats->aal5.cells_received); 2006 2007 if (arg) 2008 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 2009 2010 return 0; 2011 } 2012 2013 2014 static int 2015 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 2016 { 2017 struct fore200e* fore200e = FORE200E_DEV(dev); 2018 2019 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 2020 2021 switch (cmd) { 2022 2023 case SONET_GETSTAT: 2024 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 2025 2026 case SONET_GETDIAG: 2027 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 2028 2029 case ATM_SETLOOP: 2030 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 2031 2032 case ATM_GETLOOP: 2033 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 2034 2035 case ATM_QUERYLOOP: 2036 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 2037 } 2038 2039 return -ENOSYS; /* not implemented */ 2040 } 2041 2042 2043 static int 2044 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 2045 { 2046 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 2047 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 2048 2049 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 2050 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 2051 return -EINVAL; 2052 } 2053 2054 DPRINTK(2, "change_qos %d.%d.%d, " 2055 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 2056 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 2057 "available_cell_rate = %u", 2058 vcc->itf, vcc->vpi, vcc->vci, 2059 fore200e_traffic_class[ qos->txtp.traffic_class ], 2060 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 2061 fore200e_traffic_class[ qos->rxtp.traffic_class ], 2062 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 2063 flags, fore200e->available_cell_rate); 2064 2065 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 2066 2067 down(&fore200e->rate_sf); 2068 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 2069 up(&fore200e->rate_sf); 2070 return -EAGAIN; 2071 } 2072 2073 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 2074 fore200e->available_cell_rate -= qos->txtp.max_pcr; 2075 2076 up(&fore200e->rate_sf); 2077 2078 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 2079 2080 /* update rate control parameters */ 2081 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 2082 2083 set_bit(ATM_VF_HASQOS, &vcc->flags); 2084 2085 return 0; 2086 } 2087 2088 return -EINVAL; 2089 } 2090 2091 2092 static int __devinit 2093 fore200e_irq_request(struct fore200e* fore200e) 2094 { 2095 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { 2096 2097 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 2098 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2099 return -EBUSY; 2100 } 2101 2102 printk(FORE200E "IRQ %s reserved for device %s\n", 2103 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2104 2105 #ifdef FORE200E_USE_TASKLET 2106 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 2107 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 2108 #endif 2109 2110 fore200e->state = FORE200E_STATE_IRQ; 2111 return 0; 2112 } 2113 2114 2115 static int __devinit 2116 fore200e_get_esi(struct fore200e* fore200e) 2117 { 2118 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA); 2119 int ok, i; 2120 2121 if (!prom) 2122 return -ENOMEM; 2123 2124 ok = fore200e->bus->prom_read(fore200e, prom); 2125 if (ok < 0) { 2126 kfree(prom); 2127 return -EBUSY; 2128 } 2129 2130 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n", 2131 fore200e->name, 2132 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 2133 prom->serial_number & 0xFFFF, 2134 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ], 2135 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]); 2136 2137 for (i = 0; i < ESI_LEN; i++) { 2138 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 2139 } 2140 2141 kfree(prom); 2142 2143 return 0; 2144 } 2145 2146 2147 static int __devinit 2148 fore200e_alloc_rx_buf(struct fore200e* fore200e) 2149 { 2150 int scheme, magn, nbr, size, i; 2151 2152 struct host_bsq* bsq; 2153 struct buffer* buffer; 2154 2155 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2156 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2157 2158 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2159 2160 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 2161 size = fore200e_rx_buf_size[ scheme ][ magn ]; 2162 2163 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 2164 2165 /* allocate the array of receive buffers */ 2166 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL); 2167 2168 if (buffer == NULL) 2169 return -ENOMEM; 2170 2171 bsq->freebuf = NULL; 2172 2173 for (i = 0; i < nbr; i++) { 2174 2175 buffer[ i ].scheme = scheme; 2176 buffer[ i ].magn = magn; 2177 #ifdef FORE200E_BSQ_DEBUG 2178 buffer[ i ].index = i; 2179 buffer[ i ].supplied = 0; 2180 #endif 2181 2182 /* allocate the receive buffer body */ 2183 if (fore200e_chunk_alloc(fore200e, 2184 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2185 DMA_FROM_DEVICE) < 0) { 2186 2187 while (i > 0) 2188 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2189 kfree(buffer); 2190 2191 return -ENOMEM; 2192 } 2193 2194 /* insert the buffer into the free buffer list */ 2195 buffer[ i ].next = bsq->freebuf; 2196 bsq->freebuf = &buffer[ i ]; 2197 } 2198 /* all the buffers are free, initially */ 2199 bsq->freebuf_count = nbr; 2200 2201 #ifdef FORE200E_BSQ_DEBUG 2202 bsq_audit(3, bsq, scheme, magn); 2203 #endif 2204 } 2205 } 2206 2207 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2208 return 0; 2209 } 2210 2211 2212 static int __devinit 2213 fore200e_init_bs_queue(struct fore200e* fore200e) 2214 { 2215 int scheme, magn, i; 2216 2217 struct host_bsq* bsq; 2218 struct cp_bsq_entry __iomem * cp_entry; 2219 2220 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2221 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2222 2223 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2224 2225 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2226 2227 /* allocate and align the array of status words */ 2228 if (fore200e->bus->dma_chunk_alloc(fore200e, 2229 &bsq->status, 2230 sizeof(enum status), 2231 QUEUE_SIZE_BS, 2232 fore200e->bus->status_alignment) < 0) { 2233 return -ENOMEM; 2234 } 2235 2236 /* allocate and align the array of receive buffer descriptors */ 2237 if (fore200e->bus->dma_chunk_alloc(fore200e, 2238 &bsq->rbd_block, 2239 sizeof(struct rbd_block), 2240 QUEUE_SIZE_BS, 2241 fore200e->bus->descr_alignment) < 0) { 2242 2243 fore200e->bus->dma_chunk_free(fore200e, &bsq->status); 2244 return -ENOMEM; 2245 } 2246 2247 /* get the base address of the cp resident buffer supply queue entries */ 2248 cp_entry = fore200e->virt_base + 2249 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2250 2251 /* fill the host resident and cp resident buffer supply queue entries */ 2252 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2253 2254 bsq->host_entry[ i ].status = 2255 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2256 bsq->host_entry[ i ].rbd_block = 2257 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2258 bsq->host_entry[ i ].rbd_block_dma = 2259 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2260 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2261 2262 *bsq->host_entry[ i ].status = STATUS_FREE; 2263 2264 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2265 &cp_entry[ i ].status_haddr); 2266 } 2267 } 2268 } 2269 2270 fore200e->state = FORE200E_STATE_INIT_BSQ; 2271 return 0; 2272 } 2273 2274 2275 static int __devinit 2276 fore200e_init_rx_queue(struct fore200e* fore200e) 2277 { 2278 struct host_rxq* rxq = &fore200e->host_rxq; 2279 struct cp_rxq_entry __iomem * cp_entry; 2280 int i; 2281 2282 DPRINTK(2, "receive queue is being initialized\n"); 2283 2284 /* allocate and align the array of status words */ 2285 if (fore200e->bus->dma_chunk_alloc(fore200e, 2286 &rxq->status, 2287 sizeof(enum status), 2288 QUEUE_SIZE_RX, 2289 fore200e->bus->status_alignment) < 0) { 2290 return -ENOMEM; 2291 } 2292 2293 /* allocate and align the array of receive PDU descriptors */ 2294 if (fore200e->bus->dma_chunk_alloc(fore200e, 2295 &rxq->rpd, 2296 sizeof(struct rpd), 2297 QUEUE_SIZE_RX, 2298 fore200e->bus->descr_alignment) < 0) { 2299 2300 fore200e->bus->dma_chunk_free(fore200e, &rxq->status); 2301 return -ENOMEM; 2302 } 2303 2304 /* get the base address of the cp resident rx queue entries */ 2305 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2306 2307 /* fill the host resident and cp resident rx entries */ 2308 for (i=0; i < QUEUE_SIZE_RX; i++) { 2309 2310 rxq->host_entry[ i ].status = 2311 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2312 rxq->host_entry[ i ].rpd = 2313 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2314 rxq->host_entry[ i ].rpd_dma = 2315 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2316 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2317 2318 *rxq->host_entry[ i ].status = STATUS_FREE; 2319 2320 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2321 &cp_entry[ i ].status_haddr); 2322 2323 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2324 &cp_entry[ i ].rpd_haddr); 2325 } 2326 2327 /* set the head entry of the queue */ 2328 rxq->head = 0; 2329 2330 fore200e->state = FORE200E_STATE_INIT_RXQ; 2331 return 0; 2332 } 2333 2334 2335 static int __devinit 2336 fore200e_init_tx_queue(struct fore200e* fore200e) 2337 { 2338 struct host_txq* txq = &fore200e->host_txq; 2339 struct cp_txq_entry __iomem * cp_entry; 2340 int i; 2341 2342 DPRINTK(2, "transmit queue is being initialized\n"); 2343 2344 /* allocate and align the array of status words */ 2345 if (fore200e->bus->dma_chunk_alloc(fore200e, 2346 &txq->status, 2347 sizeof(enum status), 2348 QUEUE_SIZE_TX, 2349 fore200e->bus->status_alignment) < 0) { 2350 return -ENOMEM; 2351 } 2352 2353 /* allocate and align the array of transmit PDU descriptors */ 2354 if (fore200e->bus->dma_chunk_alloc(fore200e, 2355 &txq->tpd, 2356 sizeof(struct tpd), 2357 QUEUE_SIZE_TX, 2358 fore200e->bus->descr_alignment) < 0) { 2359 2360 fore200e->bus->dma_chunk_free(fore200e, &txq->status); 2361 return -ENOMEM; 2362 } 2363 2364 /* get the base address of the cp resident tx queue entries */ 2365 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2366 2367 /* fill the host resident and cp resident tx entries */ 2368 for (i=0; i < QUEUE_SIZE_TX; i++) { 2369 2370 txq->host_entry[ i ].status = 2371 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2372 txq->host_entry[ i ].tpd = 2373 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2374 txq->host_entry[ i ].tpd_dma = 2375 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2376 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2377 2378 *txq->host_entry[ i ].status = STATUS_FREE; 2379 2380 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2381 &cp_entry[ i ].status_haddr); 2382 2383 /* although there is a one-to-one mapping of tx queue entries and tpds, 2384 we do not write here the DMA (physical) base address of each tpd into 2385 the related cp resident entry, because the cp relies on this write 2386 operation to detect that a new pdu has been submitted for tx */ 2387 } 2388 2389 /* set the head and tail entries of the queue */ 2390 txq->head = 0; 2391 txq->tail = 0; 2392 2393 fore200e->state = FORE200E_STATE_INIT_TXQ; 2394 return 0; 2395 } 2396 2397 2398 static int __devinit 2399 fore200e_init_cmd_queue(struct fore200e* fore200e) 2400 { 2401 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2402 struct cp_cmdq_entry __iomem * cp_entry; 2403 int i; 2404 2405 DPRINTK(2, "command queue is being initialized\n"); 2406 2407 /* allocate and align the array of status words */ 2408 if (fore200e->bus->dma_chunk_alloc(fore200e, 2409 &cmdq->status, 2410 sizeof(enum status), 2411 QUEUE_SIZE_CMD, 2412 fore200e->bus->status_alignment) < 0) { 2413 return -ENOMEM; 2414 } 2415 2416 /* get the base address of the cp resident cmd queue entries */ 2417 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2418 2419 /* fill the host resident and cp resident cmd entries */ 2420 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2421 2422 cmdq->host_entry[ i ].status = 2423 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2424 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2425 2426 *cmdq->host_entry[ i ].status = STATUS_FREE; 2427 2428 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2429 &cp_entry[ i ].status_haddr); 2430 } 2431 2432 /* set the head entry of the queue */ 2433 cmdq->head = 0; 2434 2435 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2436 return 0; 2437 } 2438 2439 2440 static void __init 2441 fore200e_param_bs_queue(struct fore200e* fore200e, 2442 enum buffer_scheme scheme, enum buffer_magn magn, 2443 int queue_length, int pool_size, int supply_blksize) 2444 { 2445 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2446 2447 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2448 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2449 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2450 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2451 } 2452 2453 2454 static int __devinit 2455 fore200e_initialize(struct fore200e* fore200e) 2456 { 2457 struct cp_queues __iomem * cpq; 2458 int ok, scheme, magn; 2459 2460 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2461 2462 init_MUTEX(&fore200e->rate_sf); 2463 spin_lock_init(&fore200e->q_lock); 2464 2465 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2466 2467 /* enable cp to host interrupts */ 2468 fore200e->bus->write(1, &cpq->imask); 2469 2470 if (fore200e->bus->irq_enable) 2471 fore200e->bus->irq_enable(fore200e); 2472 2473 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2474 2475 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2476 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2477 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2478 2479 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2480 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2481 2482 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2483 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2484 fore200e_param_bs_queue(fore200e, scheme, magn, 2485 QUEUE_SIZE_BS, 2486 fore200e_rx_buf_nbr[ scheme ][ magn ], 2487 RBD_BLK_SIZE); 2488 2489 /* issue the initialize command */ 2490 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2491 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2492 2493 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2494 if (ok == 0) { 2495 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2496 return -ENODEV; 2497 } 2498 2499 printk(FORE200E "device %s initialized\n", fore200e->name); 2500 2501 fore200e->state = FORE200E_STATE_INITIALIZE; 2502 return 0; 2503 } 2504 2505 2506 static void __devinit 2507 fore200e_monitor_putc(struct fore200e* fore200e, char c) 2508 { 2509 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2510 2511 #if 0 2512 printk("%c", c); 2513 #endif 2514 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2515 } 2516 2517 2518 static int __devinit 2519 fore200e_monitor_getc(struct fore200e* fore200e) 2520 { 2521 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2522 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2523 int c; 2524 2525 while (time_before(jiffies, timeout)) { 2526 2527 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2528 2529 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2530 2531 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2532 #if 0 2533 printk("%c", c & 0xFF); 2534 #endif 2535 return c & 0xFF; 2536 } 2537 } 2538 2539 return -1; 2540 } 2541 2542 2543 static void __devinit 2544 fore200e_monitor_puts(struct fore200e* fore200e, char* str) 2545 { 2546 while (*str) { 2547 2548 /* the i960 monitor doesn't accept any new character if it has something to say */ 2549 while (fore200e_monitor_getc(fore200e) >= 0); 2550 2551 fore200e_monitor_putc(fore200e, *str++); 2552 } 2553 2554 while (fore200e_monitor_getc(fore200e) >= 0); 2555 } 2556 2557 2558 static int __devinit 2559 fore200e_start_fw(struct fore200e* fore200e) 2560 { 2561 int ok; 2562 char cmd[ 48 ]; 2563 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data; 2564 2565 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2566 2567 #if defined(__sparc_v9__) 2568 /* reported to be required by SBA cards on some sparc64 hosts */ 2569 fore200e_spin(100); 2570 #endif 2571 2572 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2573 2574 fore200e_monitor_puts(fore200e, cmd); 2575 2576 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000); 2577 if (ok == 0) { 2578 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2579 return -ENODEV; 2580 } 2581 2582 printk(FORE200E "device %s firmware started\n", fore200e->name); 2583 2584 fore200e->state = FORE200E_STATE_START_FW; 2585 return 0; 2586 } 2587 2588 2589 static int __devinit 2590 fore200e_load_fw(struct fore200e* fore200e) 2591 { 2592 u32* fw_data = (u32*) fore200e->bus->fw_data; 2593 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32); 2594 2595 struct fw_header* fw_header = (struct fw_header*) fw_data; 2596 2597 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2598 2599 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2600 fore200e->name, load_addr, fw_size); 2601 2602 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2603 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2604 return -ENODEV; 2605 } 2606 2607 for (; fw_size--; fw_data++, load_addr++) 2608 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2609 2610 fore200e->state = FORE200E_STATE_LOAD_FW; 2611 return 0; 2612 } 2613 2614 2615 static int __devinit 2616 fore200e_register(struct fore200e* fore200e) 2617 { 2618 struct atm_dev* atm_dev; 2619 2620 DPRINTK(2, "device %s being registered\n", fore200e->name); 2621 2622 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1, 2623 NULL); 2624 if (atm_dev == NULL) { 2625 printk(FORE200E "unable to register device %s\n", fore200e->name); 2626 return -ENODEV; 2627 } 2628 2629 atm_dev->dev_data = fore200e; 2630 fore200e->atm_dev = atm_dev; 2631 2632 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2633 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2634 2635 fore200e->available_cell_rate = ATM_OC3_PCR; 2636 2637 fore200e->state = FORE200E_STATE_REGISTER; 2638 return 0; 2639 } 2640 2641 2642 static int __devinit 2643 fore200e_init(struct fore200e* fore200e) 2644 { 2645 if (fore200e_register(fore200e) < 0) 2646 return -ENODEV; 2647 2648 if (fore200e->bus->configure(fore200e) < 0) 2649 return -ENODEV; 2650 2651 if (fore200e->bus->map(fore200e) < 0) 2652 return -ENODEV; 2653 2654 if (fore200e_reset(fore200e, 1) < 0) 2655 return -ENODEV; 2656 2657 if (fore200e_load_fw(fore200e) < 0) 2658 return -ENODEV; 2659 2660 if (fore200e_start_fw(fore200e) < 0) 2661 return -ENODEV; 2662 2663 if (fore200e_initialize(fore200e) < 0) 2664 return -ENODEV; 2665 2666 if (fore200e_init_cmd_queue(fore200e) < 0) 2667 return -ENOMEM; 2668 2669 if (fore200e_init_tx_queue(fore200e) < 0) 2670 return -ENOMEM; 2671 2672 if (fore200e_init_rx_queue(fore200e) < 0) 2673 return -ENOMEM; 2674 2675 if (fore200e_init_bs_queue(fore200e) < 0) 2676 return -ENOMEM; 2677 2678 if (fore200e_alloc_rx_buf(fore200e) < 0) 2679 return -ENOMEM; 2680 2681 if (fore200e_get_esi(fore200e) < 0) 2682 return -EIO; 2683 2684 if (fore200e_irq_request(fore200e) < 0) 2685 return -EBUSY; 2686 2687 fore200e_supply(fore200e); 2688 2689 /* all done, board initialization is now complete */ 2690 fore200e->state = FORE200E_STATE_COMPLETE; 2691 return 0; 2692 } 2693 2694 2695 static int __devinit 2696 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) 2697 { 2698 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data; 2699 struct fore200e* fore200e; 2700 int err = 0; 2701 static int index = 0; 2702 2703 if (pci_enable_device(pci_dev)) { 2704 err = -EINVAL; 2705 goto out; 2706 } 2707 2708 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2709 if (fore200e == NULL) { 2710 err = -ENOMEM; 2711 goto out_disable; 2712 } 2713 2714 fore200e->bus = bus; 2715 fore200e->bus_dev = pci_dev; 2716 fore200e->irq = pci_dev->irq; 2717 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2718 2719 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 2720 2721 pci_set_master(pci_dev); 2722 2723 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2724 fore200e->bus->model_name, 2725 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2726 2727 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2728 2729 err = fore200e_init(fore200e); 2730 if (err < 0) { 2731 fore200e_shutdown(fore200e); 2732 goto out_free; 2733 } 2734 2735 ++index; 2736 pci_set_drvdata(pci_dev, fore200e); 2737 2738 out: 2739 return err; 2740 2741 out_free: 2742 kfree(fore200e); 2743 out_disable: 2744 pci_disable_device(pci_dev); 2745 goto out; 2746 } 2747 2748 2749 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev) 2750 { 2751 struct fore200e *fore200e; 2752 2753 fore200e = pci_get_drvdata(pci_dev); 2754 2755 fore200e_shutdown(fore200e); 2756 kfree(fore200e); 2757 pci_disable_device(pci_dev); 2758 } 2759 2760 2761 #ifdef CONFIG_ATM_FORE200E_PCA 2762 static struct pci_device_id fore200e_pca_tbl[] = { 2763 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 2764 0, 0, (unsigned long) &fore200e_bus[0] }, 2765 { 0, } 2766 }; 2767 2768 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2769 2770 static struct pci_driver fore200e_pca_driver = { 2771 .name = "fore_200e", 2772 .probe = fore200e_pca_detect, 2773 .remove = __devexit_p(fore200e_pca_remove_one), 2774 .id_table = fore200e_pca_tbl, 2775 }; 2776 #endif 2777 2778 2779 static int __init 2780 fore200e_module_init(void) 2781 { 2782 const struct fore200e_bus* bus; 2783 struct fore200e* fore200e; 2784 int index; 2785 2786 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2787 2788 /* for each configured bus interface */ 2789 for (bus = fore200e_bus; bus->model_name; bus++) { 2790 2791 /* detect all boards present on that bus */ 2792 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) { 2793 2794 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2795 fore200e->bus->model_name, 2796 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2797 2798 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2799 2800 if (fore200e_init(fore200e) < 0) { 2801 2802 fore200e_shutdown(fore200e); 2803 break; 2804 } 2805 2806 list_add(&fore200e->entry, &fore200e_boards); 2807 } 2808 } 2809 2810 #ifdef CONFIG_ATM_FORE200E_PCA 2811 if (!pci_register_driver(&fore200e_pca_driver)) 2812 return 0; 2813 #endif 2814 2815 if (!list_empty(&fore200e_boards)) 2816 return 0; 2817 2818 return -ENODEV; 2819 } 2820 2821 2822 static void __exit 2823 fore200e_module_cleanup(void) 2824 { 2825 struct fore200e *fore200e, *next; 2826 2827 #ifdef CONFIG_ATM_FORE200E_PCA 2828 pci_unregister_driver(&fore200e_pca_driver); 2829 #endif 2830 2831 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) { 2832 fore200e_shutdown(fore200e); 2833 kfree(fore200e); 2834 } 2835 DPRINTK(1, "module being removed\n"); 2836 } 2837 2838 2839 static int 2840 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2841 { 2842 struct fore200e* fore200e = FORE200E_DEV(dev); 2843 struct fore200e_vcc* fore200e_vcc; 2844 struct atm_vcc* vcc; 2845 int i, len, left = *pos; 2846 unsigned long flags; 2847 2848 if (!left--) { 2849 2850 if (fore200e_getstats(fore200e) < 0) 2851 return -EIO; 2852 2853 len = sprintf(page,"\n" 2854 " device:\n" 2855 " internal name:\t\t%s\n", fore200e->name); 2856 2857 /* print bus-specific information */ 2858 if (fore200e->bus->proc_read) 2859 len += fore200e->bus->proc_read(fore200e, page + len); 2860 2861 len += sprintf(page + len, 2862 " interrupt line:\t\t%s\n" 2863 " physical base address:\t0x%p\n" 2864 " virtual base address:\t0x%p\n" 2865 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n" 2866 " board serial number:\t\t%d\n\n", 2867 fore200e_irq_itoa(fore200e->irq), 2868 (void*)fore200e->phys_base, 2869 fore200e->virt_base, 2870 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2], 2871 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5], 2872 fore200e->esi[4] * 256 + fore200e->esi[5]); 2873 2874 return len; 2875 } 2876 2877 if (!left--) 2878 return sprintf(page, 2879 " free small bufs, scheme 1:\t%d\n" 2880 " free large bufs, scheme 1:\t%d\n" 2881 " free small bufs, scheme 2:\t%d\n" 2882 " free large bufs, scheme 2:\t%d\n", 2883 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2884 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2885 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2886 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2887 2888 if (!left--) { 2889 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2890 2891 len = sprintf(page,"\n\n" 2892 " cell processor:\n" 2893 " heartbeat state:\t\t"); 2894 2895 if (hb >> 16 != 0xDEAD) 2896 len += sprintf(page + len, "0x%08x\n", hb); 2897 else 2898 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2899 2900 return len; 2901 } 2902 2903 if (!left--) { 2904 static const char* media_name[] = { 2905 "unshielded twisted pair", 2906 "multimode optical fiber ST", 2907 "multimode optical fiber SC", 2908 "single-mode optical fiber ST", 2909 "single-mode optical fiber SC", 2910 "unknown" 2911 }; 2912 2913 static const char* oc3_mode[] = { 2914 "normal operation", 2915 "diagnostic loopback", 2916 "line loopback", 2917 "unknown" 2918 }; 2919 2920 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2921 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2922 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2923 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2924 u32 oc3_index; 2925 2926 if ((media_index < 0) || (media_index > 4)) 2927 media_index = 5; 2928 2929 switch (fore200e->loop_mode) { 2930 case ATM_LM_NONE: oc3_index = 0; 2931 break; 2932 case ATM_LM_LOC_PHY: oc3_index = 1; 2933 break; 2934 case ATM_LM_RMT_PHY: oc3_index = 2; 2935 break; 2936 default: oc3_index = 3; 2937 } 2938 2939 return sprintf(page, 2940 " firmware release:\t\t%d.%d.%d\n" 2941 " monitor release:\t\t%d.%d\n" 2942 " media type:\t\t\t%s\n" 2943 " OC-3 revision:\t\t0x%x\n" 2944 " OC-3 mode:\t\t\t%s", 2945 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2946 mon960_release >> 16, mon960_release << 16 >> 16, 2947 media_name[ media_index ], 2948 oc3_revision, 2949 oc3_mode[ oc3_index ]); 2950 } 2951 2952 if (!left--) { 2953 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2954 2955 return sprintf(page, 2956 "\n\n" 2957 " monitor:\n" 2958 " version number:\t\t%d\n" 2959 " boot status word:\t\t0x%08x\n", 2960 fore200e->bus->read(&cp_monitor->mon_version), 2961 fore200e->bus->read(&cp_monitor->bstat)); 2962 } 2963 2964 if (!left--) 2965 return sprintf(page, 2966 "\n" 2967 " device statistics:\n" 2968 " 4b5b:\n" 2969 " crc_header_errors:\t\t%10u\n" 2970 " framing_errors:\t\t%10u\n", 2971 cpu_to_be32(fore200e->stats->phy.crc_header_errors), 2972 cpu_to_be32(fore200e->stats->phy.framing_errors)); 2973 2974 if (!left--) 2975 return sprintf(page, "\n" 2976 " OC-3:\n" 2977 " section_bip8_errors:\t%10u\n" 2978 " path_bip8_errors:\t\t%10u\n" 2979 " line_bip24_errors:\t\t%10u\n" 2980 " line_febe_errors:\t\t%10u\n" 2981 " path_febe_errors:\t\t%10u\n" 2982 " corr_hcs_errors:\t\t%10u\n" 2983 " ucorr_hcs_errors:\t\t%10u\n", 2984 cpu_to_be32(fore200e->stats->oc3.section_bip8_errors), 2985 cpu_to_be32(fore200e->stats->oc3.path_bip8_errors), 2986 cpu_to_be32(fore200e->stats->oc3.line_bip24_errors), 2987 cpu_to_be32(fore200e->stats->oc3.line_febe_errors), 2988 cpu_to_be32(fore200e->stats->oc3.path_febe_errors), 2989 cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors), 2990 cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors)); 2991 2992 if (!left--) 2993 return sprintf(page,"\n" 2994 " ATM:\t\t\t\t cells\n" 2995 " TX:\t\t\t%10u\n" 2996 " RX:\t\t\t%10u\n" 2997 " vpi out of range:\t\t%10u\n" 2998 " vpi no conn:\t\t%10u\n" 2999 " vci out of range:\t\t%10u\n" 3000 " vci no conn:\t\t%10u\n", 3001 cpu_to_be32(fore200e->stats->atm.cells_transmitted), 3002 cpu_to_be32(fore200e->stats->atm.cells_received), 3003 cpu_to_be32(fore200e->stats->atm.vpi_bad_range), 3004 cpu_to_be32(fore200e->stats->atm.vpi_no_conn), 3005 cpu_to_be32(fore200e->stats->atm.vci_bad_range), 3006 cpu_to_be32(fore200e->stats->atm.vci_no_conn)); 3007 3008 if (!left--) 3009 return sprintf(page,"\n" 3010 " AAL0:\t\t\t cells\n" 3011 " TX:\t\t\t%10u\n" 3012 " RX:\t\t\t%10u\n" 3013 " dropped:\t\t\t%10u\n", 3014 cpu_to_be32(fore200e->stats->aal0.cells_transmitted), 3015 cpu_to_be32(fore200e->stats->aal0.cells_received), 3016 cpu_to_be32(fore200e->stats->aal0.cells_dropped)); 3017 3018 if (!left--) 3019 return sprintf(page,"\n" 3020 " AAL3/4:\n" 3021 " SAR sublayer:\t\t cells\n" 3022 " TX:\t\t\t%10u\n" 3023 " RX:\t\t\t%10u\n" 3024 " dropped:\t\t\t%10u\n" 3025 " CRC errors:\t\t%10u\n" 3026 " protocol errors:\t\t%10u\n\n" 3027 " CS sublayer:\t\t PDUs\n" 3028 " TX:\t\t\t%10u\n" 3029 " RX:\t\t\t%10u\n" 3030 " dropped:\t\t\t%10u\n" 3031 " protocol errors:\t\t%10u\n", 3032 cpu_to_be32(fore200e->stats->aal34.cells_transmitted), 3033 cpu_to_be32(fore200e->stats->aal34.cells_received), 3034 cpu_to_be32(fore200e->stats->aal34.cells_dropped), 3035 cpu_to_be32(fore200e->stats->aal34.cells_crc_errors), 3036 cpu_to_be32(fore200e->stats->aal34.cells_protocol_errors), 3037 cpu_to_be32(fore200e->stats->aal34.cspdus_transmitted), 3038 cpu_to_be32(fore200e->stats->aal34.cspdus_received), 3039 cpu_to_be32(fore200e->stats->aal34.cspdus_dropped), 3040 cpu_to_be32(fore200e->stats->aal34.cspdus_protocol_errors)); 3041 3042 if (!left--) 3043 return sprintf(page,"\n" 3044 " AAL5:\n" 3045 " SAR sublayer:\t\t cells\n" 3046 " TX:\t\t\t%10u\n" 3047 " RX:\t\t\t%10u\n" 3048 " dropped:\t\t\t%10u\n" 3049 " congestions:\t\t%10u\n\n" 3050 " CS sublayer:\t\t PDUs\n" 3051 " TX:\t\t\t%10u\n" 3052 " RX:\t\t\t%10u\n" 3053 " dropped:\t\t\t%10u\n" 3054 " CRC errors:\t\t%10u\n" 3055 " protocol errors:\t\t%10u\n", 3056 cpu_to_be32(fore200e->stats->aal5.cells_transmitted), 3057 cpu_to_be32(fore200e->stats->aal5.cells_received), 3058 cpu_to_be32(fore200e->stats->aal5.cells_dropped), 3059 cpu_to_be32(fore200e->stats->aal5.congestion_experienced), 3060 cpu_to_be32(fore200e->stats->aal5.cspdus_transmitted), 3061 cpu_to_be32(fore200e->stats->aal5.cspdus_received), 3062 cpu_to_be32(fore200e->stats->aal5.cspdus_dropped), 3063 cpu_to_be32(fore200e->stats->aal5.cspdus_crc_errors), 3064 cpu_to_be32(fore200e->stats->aal5.cspdus_protocol_errors)); 3065 3066 if (!left--) 3067 return sprintf(page,"\n" 3068 " AUX:\t\t allocation failures\n" 3069 " small b1:\t\t\t%10u\n" 3070 " large b1:\t\t\t%10u\n" 3071 " small b2:\t\t\t%10u\n" 3072 " large b2:\t\t\t%10u\n" 3073 " RX PDUs:\t\t\t%10u\n" 3074 " TX PDUs:\t\t\t%10lu\n", 3075 cpu_to_be32(fore200e->stats->aux.small_b1_failed), 3076 cpu_to_be32(fore200e->stats->aux.large_b1_failed), 3077 cpu_to_be32(fore200e->stats->aux.small_b2_failed), 3078 cpu_to_be32(fore200e->stats->aux.large_b2_failed), 3079 cpu_to_be32(fore200e->stats->aux.rpd_alloc_failed), 3080 fore200e->tx_sat); 3081 3082 if (!left--) 3083 return sprintf(page,"\n" 3084 " receive carrier:\t\t\t%s\n", 3085 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 3086 3087 if (!left--) { 3088 return sprintf(page,"\n" 3089 " VCCs:\n address VPI VCI AAL " 3090 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 3091 } 3092 3093 for (i = 0; i < NBR_CONNECT; i++) { 3094 3095 vcc = fore200e->vc_map[i].vcc; 3096 3097 if (vcc == NULL) 3098 continue; 3099 3100 spin_lock_irqsave(&fore200e->q_lock, flags); 3101 3102 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 3103 3104 fore200e_vcc = FORE200E_VCC(vcc); 3105 ASSERT(fore200e_vcc); 3106 3107 len = sprintf(page, 3108 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3109 (u32)(unsigned long)vcc, 3110 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 3111 fore200e_vcc->tx_pdu, 3112 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 3113 fore200e_vcc->tx_max_pdu, 3114 fore200e_vcc->rx_pdu, 3115 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 3116 fore200e_vcc->rx_max_pdu); 3117 3118 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3119 return len; 3120 } 3121 3122 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3123 } 3124 3125 return 0; 3126 } 3127 3128 module_init(fore200e_module_init); 3129 module_exit(fore200e_module_cleanup); 3130 3131 3132 static const struct atmdev_ops fore200e_ops = 3133 { 3134 .open = fore200e_open, 3135 .close = fore200e_close, 3136 .ioctl = fore200e_ioctl, 3137 .getsockopt = fore200e_getsockopt, 3138 .setsockopt = fore200e_setsockopt, 3139 .send = fore200e_send, 3140 .change_qos = fore200e_change_qos, 3141 .proc_read = fore200e_proc_read, 3142 .owner = THIS_MODULE 3143 }; 3144 3145 3146 #ifdef CONFIG_ATM_FORE200E_PCA 3147 extern const unsigned char _fore200e_pca_fw_data[]; 3148 extern const unsigned int _fore200e_pca_fw_size; 3149 #endif 3150 #ifdef CONFIG_ATM_FORE200E_SBA 3151 extern const unsigned char _fore200e_sba_fw_data[]; 3152 extern const unsigned int _fore200e_sba_fw_size; 3153 #endif 3154 3155 static const struct fore200e_bus fore200e_bus[] = { 3156 #ifdef CONFIG_ATM_FORE200E_PCA 3157 { "PCA-200E", "pca200e", 32, 4, 32, 3158 _fore200e_pca_fw_data, &_fore200e_pca_fw_size, 3159 fore200e_pca_read, 3160 fore200e_pca_write, 3161 fore200e_pca_dma_map, 3162 fore200e_pca_dma_unmap, 3163 fore200e_pca_dma_sync_for_cpu, 3164 fore200e_pca_dma_sync_for_device, 3165 fore200e_pca_dma_chunk_alloc, 3166 fore200e_pca_dma_chunk_free, 3167 NULL, 3168 fore200e_pca_configure, 3169 fore200e_pca_map, 3170 fore200e_pca_reset, 3171 fore200e_pca_prom_read, 3172 fore200e_pca_unmap, 3173 NULL, 3174 fore200e_pca_irq_check, 3175 fore200e_pca_irq_ack, 3176 fore200e_pca_proc_read, 3177 }, 3178 #endif 3179 #ifdef CONFIG_ATM_FORE200E_SBA 3180 { "SBA-200E", "sba200e", 32, 64, 32, 3181 _fore200e_sba_fw_data, &_fore200e_sba_fw_size, 3182 fore200e_sba_read, 3183 fore200e_sba_write, 3184 fore200e_sba_dma_map, 3185 fore200e_sba_dma_unmap, 3186 fore200e_sba_dma_sync_for_cpu, 3187 fore200e_sba_dma_sync_for_device, 3188 fore200e_sba_dma_chunk_alloc, 3189 fore200e_sba_dma_chunk_free, 3190 fore200e_sba_detect, 3191 fore200e_sba_configure, 3192 fore200e_sba_map, 3193 fore200e_sba_reset, 3194 fore200e_sba_prom_read, 3195 fore200e_sba_unmap, 3196 fore200e_sba_irq_enable, 3197 fore200e_sba_irq_check, 3198 fore200e_sba_irq_ack, 3199 fore200e_sba_proc_read, 3200 }, 3201 #endif 3202 {} 3203 }; 3204 3205 #ifdef MODULE_LICENSE 3206 MODULE_LICENSE("GPL"); 3207 #endif 3208