1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 A FORE Systems 200E-series driver for ATM on Linux. 4 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 5 6 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 7 8 This driver simultaneously supports PCA-200E and SBA-200E adapters 9 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 10 11 */ 12 13 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/capability.h> 18 #include <linux/interrupt.h> 19 #include <linux/bitops.h> 20 #include <linux/pci.h> 21 #include <linux/module.h> 22 #include <linux/atmdev.h> 23 #include <linux/sonet.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/delay.h> 26 #include <linux/firmware.h> 27 #include <linux/pgtable.h> 28 #include <asm/io.h> 29 #include <asm/string.h> 30 #include <asm/page.h> 31 #include <asm/irq.h> 32 #include <asm/dma.h> 33 #include <asm/byteorder.h> 34 #include <linux/uaccess.h> 35 #include <linux/atomic.h> 36 37 #ifdef CONFIG_SBUS 38 #include <linux/of.h> 39 #include <linux/platform_device.h> 40 #include <asm/idprom.h> 41 #include <asm/openprom.h> 42 #include <asm/oplib.h> 43 #endif 44 45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ 46 #define FORE200E_USE_TASKLET 47 #endif 48 49 #if 0 /* enable the debugging code of the buffer supply queues */ 50 #define FORE200E_BSQ_DEBUG 51 #endif 52 53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ 54 #define FORE200E_52BYTE_AAL0_SDU 55 #endif 56 57 #include "fore200e.h" 58 #include "suni.h" 59 60 #define FORE200E_VERSION "0.3e" 61 62 #define FORE200E "fore200e: " 63 64 #if 0 /* override .config */ 65 #define CONFIG_ATM_FORE200E_DEBUG 1 66 #endif 67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 68 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 69 printk(FORE200E format, ##args); } while (0) 70 #else 71 #define DPRINTK(level, format, args...) do {} while (0) 72 #endif 73 74 75 #define FORE200E_ALIGN(addr, alignment) \ 76 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 77 78 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 79 80 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 81 82 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo)) 83 84 #if 1 85 #define ASSERT(expr) if (!(expr)) { \ 86 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 87 __func__, __LINE__, #expr); \ 88 panic(FORE200E "%s", __func__); \ 89 } 90 #else 91 #define ASSERT(expr) do {} while (0) 92 #endif 93 94 95 static const struct atmdev_ops fore200e_ops; 96 97 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 98 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 99 100 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 101 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 102 { BUFFER_S2_NBR, BUFFER_L2_NBR } 103 }; 104 105 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 106 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 107 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 108 }; 109 110 111 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 112 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 113 #endif 114 115 116 #if 0 /* currently unused */ 117 static int 118 fore200e_fore2atm_aal(enum fore200e_aal aal) 119 { 120 switch(aal) { 121 case FORE200E_AAL0: return ATM_AAL0; 122 case FORE200E_AAL34: return ATM_AAL34; 123 case FORE200E_AAL5: return ATM_AAL5; 124 } 125 126 return -EINVAL; 127 } 128 #endif 129 130 131 static enum fore200e_aal 132 fore200e_atm2fore_aal(int aal) 133 { 134 switch(aal) { 135 case ATM_AAL0: return FORE200E_AAL0; 136 case ATM_AAL34: return FORE200E_AAL34; 137 case ATM_AAL1: 138 case ATM_AAL2: 139 case ATM_AAL5: return FORE200E_AAL5; 140 } 141 142 return -EINVAL; 143 } 144 145 146 static char* 147 fore200e_irq_itoa(int irq) 148 { 149 static char str[8]; 150 sprintf(str, "%d", irq); 151 return str; 152 } 153 154 155 /* allocate and align a chunk of memory intended to hold the data behing exchanged 156 between the driver and the adapter (using streaming DVMA) */ 157 158 static int 159 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 160 { 161 unsigned long offset = 0; 162 163 if (alignment <= sizeof(int)) 164 alignment = 0; 165 166 chunk->alloc_size = size + alignment; 167 chunk->direction = direction; 168 169 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL); 170 if (chunk->alloc_addr == NULL) 171 return -ENOMEM; 172 173 if (alignment > 0) 174 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 175 176 chunk->align_addr = chunk->alloc_addr + offset; 177 178 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr, 179 size, direction); 180 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) { 181 kfree(chunk->alloc_addr); 182 return -ENOMEM; 183 } 184 return 0; 185 } 186 187 188 /* free a chunk of memory */ 189 190 static void 191 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 192 { 193 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size, 194 chunk->direction); 195 kfree(chunk->alloc_addr); 196 } 197 198 /* 199 * Allocate a DMA consistent chunk of memory intended to act as a communication 200 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver 201 * and the adapter. 202 */ 203 static int 204 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, 205 int size, int nbr, int alignment) 206 { 207 /* returned chunks are page-aligned */ 208 chunk->alloc_size = size * nbr; 209 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, 210 &chunk->dma_addr, GFP_KERNEL); 211 if (!chunk->alloc_addr) 212 return -ENOMEM; 213 chunk->align_addr = chunk->alloc_addr; 214 return 0; 215 } 216 217 /* 218 * Free a DMA consistent chunk of memory. 219 */ 220 static void 221 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 222 { 223 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, 224 chunk->dma_addr); 225 } 226 227 static void 228 fore200e_spin(int msecs) 229 { 230 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 231 while (time_before(jiffies, timeout)); 232 } 233 234 235 static int 236 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 237 { 238 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 239 int ok; 240 241 mb(); 242 do { 243 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 244 break; 245 246 } while (time_before(jiffies, timeout)); 247 248 #if 1 249 if (!ok) { 250 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 251 *addr, val); 252 } 253 #endif 254 255 return ok; 256 } 257 258 259 static int 260 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 261 { 262 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 263 int ok; 264 265 do { 266 if ((ok = (fore200e->bus->read(addr) == val))) 267 break; 268 269 } while (time_before(jiffies, timeout)); 270 271 #if 1 272 if (!ok) { 273 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 274 fore200e->bus->read(addr), val); 275 } 276 #endif 277 278 return ok; 279 } 280 281 282 static void 283 fore200e_free_rx_buf(struct fore200e* fore200e) 284 { 285 int scheme, magn, nbr; 286 struct buffer* buffer; 287 288 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 289 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 290 291 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 292 293 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 294 295 struct chunk* data = &buffer[ nbr ].data; 296 297 if (data->alloc_addr != NULL) 298 fore200e_chunk_free(fore200e, data); 299 } 300 } 301 } 302 } 303 } 304 305 306 static void 307 fore200e_uninit_bs_queue(struct fore200e* fore200e) 308 { 309 int scheme, magn; 310 311 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 312 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 313 314 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 315 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 316 317 if (status->alloc_addr) 318 fore200e_dma_chunk_free(fore200e, status); 319 320 if (rbd_block->alloc_addr) 321 fore200e_dma_chunk_free(fore200e, rbd_block); 322 } 323 } 324 } 325 326 327 static int 328 fore200e_reset(struct fore200e* fore200e, int diag) 329 { 330 int ok; 331 332 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 333 334 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 335 336 fore200e->bus->reset(fore200e); 337 338 if (diag) { 339 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 340 if (ok == 0) { 341 342 printk(FORE200E "device %s self-test failed\n", fore200e->name); 343 return -ENODEV; 344 } 345 346 printk(FORE200E "device %s self-test passed\n", fore200e->name); 347 348 fore200e->state = FORE200E_STATE_RESET; 349 } 350 351 return 0; 352 } 353 354 355 static void 356 fore200e_shutdown(struct fore200e* fore200e) 357 { 358 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 359 fore200e->name, fore200e->phys_base, 360 fore200e_irq_itoa(fore200e->irq)); 361 362 if (fore200e->state > FORE200E_STATE_RESET) { 363 /* first, reset the board to prevent further interrupts or data transfers */ 364 fore200e_reset(fore200e, 0); 365 } 366 367 /* then, release all allocated resources */ 368 switch(fore200e->state) { 369 370 case FORE200E_STATE_COMPLETE: 371 kfree(fore200e->stats); 372 373 fallthrough; 374 case FORE200E_STATE_IRQ: 375 free_irq(fore200e->irq, fore200e->atm_dev); 376 #ifdef FORE200E_USE_TASKLET 377 tasklet_kill(&fore200e->tx_tasklet); 378 tasklet_kill(&fore200e->rx_tasklet); 379 #endif 380 381 fallthrough; 382 case FORE200E_STATE_ALLOC_BUF: 383 fore200e_free_rx_buf(fore200e); 384 385 fallthrough; 386 case FORE200E_STATE_INIT_BSQ: 387 fore200e_uninit_bs_queue(fore200e); 388 389 fallthrough; 390 case FORE200E_STATE_INIT_RXQ: 391 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); 392 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 393 394 fallthrough; 395 case FORE200E_STATE_INIT_TXQ: 396 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); 397 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 398 399 fallthrough; 400 case FORE200E_STATE_INIT_CMDQ: 401 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 402 403 fallthrough; 404 case FORE200E_STATE_INITIALIZE: 405 /* nothing to do for that state */ 406 407 case FORE200E_STATE_START_FW: 408 /* nothing to do for that state */ 409 410 case FORE200E_STATE_RESET: 411 /* nothing to do for that state */ 412 413 case FORE200E_STATE_MAP: 414 fore200e->bus->unmap(fore200e); 415 416 fallthrough; 417 case FORE200E_STATE_CONFIGURE: 418 /* nothing to do for that state */ 419 420 case FORE200E_STATE_REGISTER: 421 /* XXX shouldn't we *start* by deregistering the device? */ 422 atm_dev_deregister(fore200e->atm_dev); 423 424 fallthrough; 425 case FORE200E_STATE_BLANK: 426 /* nothing to do for that state */ 427 break; 428 } 429 } 430 431 432 #ifdef CONFIG_PCI 433 434 static u32 fore200e_pca_read(volatile u32 __iomem *addr) 435 { 436 /* on big-endian hosts, the board is configured to convert 437 the endianess of slave RAM accesses */ 438 return le32_to_cpu(readl(addr)); 439 } 440 441 442 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 443 { 444 /* on big-endian hosts, the board is configured to convert 445 the endianess of slave RAM accesses */ 446 writel(cpu_to_le32(val), addr); 447 } 448 449 static int 450 fore200e_pca_irq_check(struct fore200e* fore200e) 451 { 452 /* this is a 1 bit register */ 453 int irq_posted = readl(fore200e->regs.pca.psr); 454 455 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 456 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 457 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 458 } 459 #endif 460 461 return irq_posted; 462 } 463 464 465 static void 466 fore200e_pca_irq_ack(struct fore200e* fore200e) 467 { 468 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 469 } 470 471 472 static void 473 fore200e_pca_reset(struct fore200e* fore200e) 474 { 475 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 476 fore200e_spin(10); 477 writel(0, fore200e->regs.pca.hcr); 478 } 479 480 481 static int fore200e_pca_map(struct fore200e* fore200e) 482 { 483 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 484 485 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 486 487 if (fore200e->virt_base == NULL) { 488 printk(FORE200E "can't map device %s\n", fore200e->name); 489 return -EFAULT; 490 } 491 492 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 493 494 /* gain access to the PCA specific registers */ 495 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 496 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 497 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 498 499 fore200e->state = FORE200E_STATE_MAP; 500 return 0; 501 } 502 503 504 static void 505 fore200e_pca_unmap(struct fore200e* fore200e) 506 { 507 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 508 509 if (fore200e->virt_base != NULL) 510 iounmap(fore200e->virt_base); 511 } 512 513 514 static int fore200e_pca_configure(struct fore200e *fore200e) 515 { 516 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); 517 u8 master_ctrl, latency; 518 519 DPRINTK(2, "device %s being configured\n", fore200e->name); 520 521 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 522 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 523 return -EIO; 524 } 525 526 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 527 528 master_ctrl = master_ctrl 529 #if defined(__BIG_ENDIAN) 530 /* request the PCA board to convert the endianess of slave RAM accesses */ 531 | PCA200E_CTRL_CONVERT_ENDIAN 532 #endif 533 #if 0 534 | PCA200E_CTRL_DIS_CACHE_RD 535 | PCA200E_CTRL_DIS_WRT_INVAL 536 | PCA200E_CTRL_ENA_CONT_REQ_MODE 537 | PCA200E_CTRL_2_CACHE_WRT_INVAL 538 #endif 539 | PCA200E_CTRL_LARGE_PCI_BURSTS; 540 541 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 542 543 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 544 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 545 this may impact the performances of other PCI devices on the same bus, though */ 546 latency = 192; 547 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 548 549 fore200e->state = FORE200E_STATE_CONFIGURE; 550 return 0; 551 } 552 553 554 static int __init 555 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 556 { 557 struct host_cmdq* cmdq = &fore200e->host_cmdq; 558 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 559 struct prom_opcode opcode; 560 int ok; 561 u32 prom_dma; 562 563 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 564 565 opcode.opcode = OPCODE_GET_PROM; 566 opcode.pad = 0; 567 568 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data), 569 DMA_FROM_DEVICE); 570 if (dma_mapping_error(fore200e->dev, prom_dma)) 571 return -ENOMEM; 572 573 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 574 575 *entry->status = STATUS_PENDING; 576 577 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 578 579 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 580 581 *entry->status = STATUS_FREE; 582 583 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 584 585 if (ok == 0) { 586 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 587 return -EIO; 588 } 589 590 #if defined(__BIG_ENDIAN) 591 592 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 593 594 /* MAC address is stored as little-endian */ 595 swap_here(&prom->mac_addr[0]); 596 swap_here(&prom->mac_addr[4]); 597 #endif 598 599 return 0; 600 } 601 602 603 static int 604 fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 605 { 606 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); 607 608 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 609 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 610 } 611 612 static const struct fore200e_bus fore200e_pci_ops = { 613 .model_name = "PCA-200E", 614 .proc_name = "pca200e", 615 .descr_alignment = 32, 616 .buffer_alignment = 4, 617 .status_alignment = 32, 618 .read = fore200e_pca_read, 619 .write = fore200e_pca_write, 620 .configure = fore200e_pca_configure, 621 .map = fore200e_pca_map, 622 .reset = fore200e_pca_reset, 623 .prom_read = fore200e_pca_prom_read, 624 .unmap = fore200e_pca_unmap, 625 .irq_check = fore200e_pca_irq_check, 626 .irq_ack = fore200e_pca_irq_ack, 627 .proc_read = fore200e_pca_proc_read, 628 }; 629 #endif /* CONFIG_PCI */ 630 631 #ifdef CONFIG_SBUS 632 633 static u32 fore200e_sba_read(volatile u32 __iomem *addr) 634 { 635 return sbus_readl(addr); 636 } 637 638 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 639 { 640 sbus_writel(val, addr); 641 } 642 643 static void fore200e_sba_irq_enable(struct fore200e *fore200e) 644 { 645 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 646 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 647 } 648 649 static int fore200e_sba_irq_check(struct fore200e *fore200e) 650 { 651 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 652 } 653 654 static void fore200e_sba_irq_ack(struct fore200e *fore200e) 655 { 656 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 657 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 658 } 659 660 static void fore200e_sba_reset(struct fore200e *fore200e) 661 { 662 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 663 fore200e_spin(10); 664 fore200e->bus->write(0, fore200e->regs.sba.hcr); 665 } 666 667 static int __init fore200e_sba_map(struct fore200e *fore200e) 668 { 669 struct platform_device *op = to_platform_device(fore200e->dev); 670 unsigned int bursts; 671 672 /* gain access to the SBA specific registers */ 673 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 674 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 675 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 676 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 677 678 if (!fore200e->virt_base) { 679 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 680 return -EFAULT; 681 } 682 683 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 684 685 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 686 687 /* get the supported DVMA burst sizes */ 688 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00); 689 690 if (sbus_can_dma_64bit()) 691 sbus_set_sbus64(&op->dev, bursts); 692 693 fore200e->state = FORE200E_STATE_MAP; 694 return 0; 695 } 696 697 static void fore200e_sba_unmap(struct fore200e *fore200e) 698 { 699 struct platform_device *op = to_platform_device(fore200e->dev); 700 701 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 702 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 703 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 704 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH); 705 } 706 707 static int __init fore200e_sba_configure(struct fore200e *fore200e) 708 { 709 fore200e->state = FORE200E_STATE_CONFIGURE; 710 return 0; 711 } 712 713 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom) 714 { 715 struct platform_device *op = to_platform_device(fore200e->dev); 716 const u8 *prop; 717 int len; 718 719 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len); 720 if (!prop) 721 return -ENODEV; 722 memcpy(&prom->mac_addr[4], prop, 4); 723 724 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len); 725 if (!prop) 726 return -ENODEV; 727 memcpy(&prom->mac_addr[2], prop, 4); 728 729 prom->serial_number = of_getintprop_default(op->dev.of_node, 730 "serialnumber", 0); 731 prom->hw_revision = of_getintprop_default(op->dev.of_node, 732 "promversion", 0); 733 734 return 0; 735 } 736 737 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) 738 { 739 struct platform_device *op = to_platform_device(fore200e->dev); 740 const struct linux_prom_registers *regs; 741 742 regs = of_get_property(op->dev.of_node, "reg", NULL); 743 744 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n", 745 (regs ? regs->which_io : 0), op->dev.of_node); 746 } 747 748 static const struct fore200e_bus fore200e_sbus_ops = { 749 .model_name = "SBA-200E", 750 .proc_name = "sba200e", 751 .descr_alignment = 32, 752 .buffer_alignment = 64, 753 .status_alignment = 32, 754 .read = fore200e_sba_read, 755 .write = fore200e_sba_write, 756 .configure = fore200e_sba_configure, 757 .map = fore200e_sba_map, 758 .reset = fore200e_sba_reset, 759 .prom_read = fore200e_sba_prom_read, 760 .unmap = fore200e_sba_unmap, 761 .irq_enable = fore200e_sba_irq_enable, 762 .irq_check = fore200e_sba_irq_check, 763 .irq_ack = fore200e_sba_irq_ack, 764 .proc_read = fore200e_sba_proc_read, 765 }; 766 #endif /* CONFIG_SBUS */ 767 768 static void 769 fore200e_tx_irq(struct fore200e* fore200e) 770 { 771 struct host_txq* txq = &fore200e->host_txq; 772 struct host_txq_entry* entry; 773 struct atm_vcc* vcc; 774 struct fore200e_vc_map* vc_map; 775 776 if (fore200e->host_txq.txing == 0) 777 return; 778 779 for (;;) { 780 781 entry = &txq->host_entry[ txq->tail ]; 782 783 if ((*entry->status & STATUS_COMPLETE) == 0) { 784 break; 785 } 786 787 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 788 entry, txq->tail, entry->vc_map, entry->skb); 789 790 /* free copy of misaligned data */ 791 kfree(entry->data); 792 793 /* remove DMA mapping */ 794 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 795 DMA_TO_DEVICE); 796 797 vc_map = entry->vc_map; 798 799 /* vcc closed since the time the entry was submitted for tx? */ 800 if ((vc_map->vcc == NULL) || 801 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 802 803 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 804 fore200e->atm_dev->number); 805 806 dev_kfree_skb_any(entry->skb); 807 } 808 else { 809 ASSERT(vc_map->vcc); 810 811 /* vcc closed then immediately re-opened? */ 812 if (vc_map->incarn != entry->incarn) { 813 814 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 815 if the same vcc is immediately re-opened, those pending PDUs must 816 not be popped after the completion of their emission, as they refer 817 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 818 would be decremented by the size of the (unrelated) skb, possibly 819 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 820 we thus bind the tx entry to the current incarnation of the vcc 821 when the entry is submitted for tx. When the tx later completes, 822 if the incarnation number of the tx entry does not match the one 823 of the vcc, then this implies that the vcc has been closed then re-opened. 824 we thus just drop the skb here. */ 825 826 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 827 fore200e->atm_dev->number); 828 829 dev_kfree_skb_any(entry->skb); 830 } 831 else { 832 vcc = vc_map->vcc; 833 ASSERT(vcc); 834 835 /* notify tx completion */ 836 if (vcc->pop) { 837 vcc->pop(vcc, entry->skb); 838 } 839 else { 840 dev_kfree_skb_any(entry->skb); 841 } 842 843 /* check error condition */ 844 if (*entry->status & STATUS_ERROR) 845 atomic_inc(&vcc->stats->tx_err); 846 else 847 atomic_inc(&vcc->stats->tx); 848 } 849 } 850 851 *entry->status = STATUS_FREE; 852 853 fore200e->host_txq.txing--; 854 855 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 856 } 857 } 858 859 860 #ifdef FORE200E_BSQ_DEBUG 861 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 862 { 863 struct buffer* buffer; 864 int count = 0; 865 866 buffer = bsq->freebuf; 867 while (buffer) { 868 869 if (buffer->supplied) { 870 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 871 where, scheme, magn, buffer->index); 872 } 873 874 if (buffer->magn != magn) { 875 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 876 where, scheme, magn, buffer->index, buffer->magn); 877 } 878 879 if (buffer->scheme != scheme) { 880 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 881 where, scheme, magn, buffer->index, buffer->scheme); 882 } 883 884 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 885 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 886 where, scheme, magn, buffer->index); 887 } 888 889 count++; 890 buffer = buffer->next; 891 } 892 893 if (count != bsq->freebuf_count) { 894 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 895 where, scheme, magn, count, bsq->freebuf_count); 896 } 897 return 0; 898 } 899 #endif 900 901 902 static void 903 fore200e_supply(struct fore200e* fore200e) 904 { 905 int scheme, magn, i; 906 907 struct host_bsq* bsq; 908 struct host_bsq_entry* entry; 909 struct buffer* buffer; 910 911 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 912 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 913 914 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 915 916 #ifdef FORE200E_BSQ_DEBUG 917 bsq_audit(1, bsq, scheme, magn); 918 #endif 919 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 920 921 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 922 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 923 924 entry = &bsq->host_entry[ bsq->head ]; 925 926 for (i = 0; i < RBD_BLK_SIZE; i++) { 927 928 /* take the first buffer in the free buffer list */ 929 buffer = bsq->freebuf; 930 if (!buffer) { 931 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 932 scheme, magn, bsq->freebuf_count); 933 return; 934 } 935 bsq->freebuf = buffer->next; 936 937 #ifdef FORE200E_BSQ_DEBUG 938 if (buffer->supplied) 939 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 940 scheme, magn, buffer->index); 941 buffer->supplied = 1; 942 #endif 943 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 944 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 945 } 946 947 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 948 949 /* decrease accordingly the number of free rx buffers */ 950 bsq->freebuf_count -= RBD_BLK_SIZE; 951 952 *entry->status = STATUS_PENDING; 953 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 954 } 955 } 956 } 957 } 958 959 960 static int 961 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 962 { 963 struct sk_buff* skb; 964 struct buffer* buffer; 965 struct fore200e_vcc* fore200e_vcc; 966 int i, pdu_len = 0; 967 #ifdef FORE200E_52BYTE_AAL0_SDU 968 u32 cell_header = 0; 969 #endif 970 971 ASSERT(vcc); 972 973 fore200e_vcc = FORE200E_VCC(vcc); 974 ASSERT(fore200e_vcc); 975 976 #ifdef FORE200E_52BYTE_AAL0_SDU 977 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 978 979 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 980 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 981 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 982 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 983 rpd->atm_header.clp; 984 pdu_len = 4; 985 } 986 #endif 987 988 /* compute total PDU length */ 989 for (i = 0; i < rpd->nseg; i++) 990 pdu_len += rpd->rsd[ i ].length; 991 992 skb = alloc_skb(pdu_len, GFP_ATOMIC); 993 if (skb == NULL) { 994 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 995 996 atomic_inc(&vcc->stats->rx_drop); 997 return -ENOMEM; 998 } 999 1000 __net_timestamp(skb); 1001 1002 #ifdef FORE200E_52BYTE_AAL0_SDU 1003 if (cell_header) { 1004 *((u32*)skb_put(skb, 4)) = cell_header; 1005 } 1006 #endif 1007 1008 /* reassemble segments */ 1009 for (i = 0; i < rpd->nseg; i++) { 1010 1011 /* rebuild rx buffer address from rsd handle */ 1012 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1013 1014 /* Make device DMA transfer visible to CPU. */ 1015 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr, 1016 rpd->rsd[i].length, DMA_FROM_DEVICE); 1017 1018 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length); 1019 1020 /* Now let the device get at it again. */ 1021 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr, 1022 rpd->rsd[i].length, DMA_FROM_DEVICE); 1023 } 1024 1025 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1026 1027 if (pdu_len < fore200e_vcc->rx_min_pdu) 1028 fore200e_vcc->rx_min_pdu = pdu_len; 1029 if (pdu_len > fore200e_vcc->rx_max_pdu) 1030 fore200e_vcc->rx_max_pdu = pdu_len; 1031 fore200e_vcc->rx_pdu++; 1032 1033 /* push PDU */ 1034 if (atm_charge(vcc, skb->truesize) == 0) { 1035 1036 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1037 vcc->itf, vcc->vpi, vcc->vci); 1038 1039 dev_kfree_skb_any(skb); 1040 1041 atomic_inc(&vcc->stats->rx_drop); 1042 return -ENOMEM; 1043 } 1044 1045 vcc->push(vcc, skb); 1046 atomic_inc(&vcc->stats->rx); 1047 1048 return 0; 1049 } 1050 1051 1052 static void 1053 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1054 { 1055 struct host_bsq* bsq; 1056 struct buffer* buffer; 1057 int i; 1058 1059 for (i = 0; i < rpd->nseg; i++) { 1060 1061 /* rebuild rx buffer address from rsd handle */ 1062 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1063 1064 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1065 1066 #ifdef FORE200E_BSQ_DEBUG 1067 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1068 1069 if (buffer->supplied == 0) 1070 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1071 buffer->scheme, buffer->magn, buffer->index); 1072 buffer->supplied = 0; 1073 #endif 1074 1075 /* re-insert the buffer into the free buffer list */ 1076 buffer->next = bsq->freebuf; 1077 bsq->freebuf = buffer; 1078 1079 /* then increment the number of free rx buffers */ 1080 bsq->freebuf_count++; 1081 } 1082 } 1083 1084 1085 static void 1086 fore200e_rx_irq(struct fore200e* fore200e) 1087 { 1088 struct host_rxq* rxq = &fore200e->host_rxq; 1089 struct host_rxq_entry* entry; 1090 struct atm_vcc* vcc; 1091 struct fore200e_vc_map* vc_map; 1092 1093 for (;;) { 1094 1095 entry = &rxq->host_entry[ rxq->head ]; 1096 1097 /* no more received PDUs */ 1098 if ((*entry->status & STATUS_COMPLETE) == 0) 1099 break; 1100 1101 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1102 1103 if ((vc_map->vcc == NULL) || 1104 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1105 1106 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1107 fore200e->atm_dev->number, 1108 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1109 } 1110 else { 1111 vcc = vc_map->vcc; 1112 ASSERT(vcc); 1113 1114 if ((*entry->status & STATUS_ERROR) == 0) { 1115 1116 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1117 } 1118 else { 1119 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1120 fore200e->atm_dev->number, 1121 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1122 atomic_inc(&vcc->stats->rx_err); 1123 } 1124 } 1125 1126 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1127 1128 fore200e_collect_rpd(fore200e, entry->rpd); 1129 1130 /* rewrite the rpd address to ack the received PDU */ 1131 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1132 *entry->status = STATUS_FREE; 1133 1134 fore200e_supply(fore200e); 1135 } 1136 } 1137 1138 1139 #ifndef FORE200E_USE_TASKLET 1140 static void 1141 fore200e_irq(struct fore200e* fore200e) 1142 { 1143 unsigned long flags; 1144 1145 spin_lock_irqsave(&fore200e->q_lock, flags); 1146 fore200e_rx_irq(fore200e); 1147 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1148 1149 spin_lock_irqsave(&fore200e->q_lock, flags); 1150 fore200e_tx_irq(fore200e); 1151 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1152 } 1153 #endif 1154 1155 1156 static irqreturn_t 1157 fore200e_interrupt(int irq, void* dev) 1158 { 1159 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1160 1161 if (fore200e->bus->irq_check(fore200e) == 0) { 1162 1163 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1164 return IRQ_NONE; 1165 } 1166 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1167 1168 #ifdef FORE200E_USE_TASKLET 1169 tasklet_schedule(&fore200e->tx_tasklet); 1170 tasklet_schedule(&fore200e->rx_tasklet); 1171 #else 1172 fore200e_irq(fore200e); 1173 #endif 1174 1175 fore200e->bus->irq_ack(fore200e); 1176 return IRQ_HANDLED; 1177 } 1178 1179 1180 #ifdef FORE200E_USE_TASKLET 1181 static void 1182 fore200e_tx_tasklet(unsigned long data) 1183 { 1184 struct fore200e* fore200e = (struct fore200e*) data; 1185 unsigned long flags; 1186 1187 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1188 1189 spin_lock_irqsave(&fore200e->q_lock, flags); 1190 fore200e_tx_irq(fore200e); 1191 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1192 } 1193 1194 1195 static void 1196 fore200e_rx_tasklet(unsigned long data) 1197 { 1198 struct fore200e* fore200e = (struct fore200e*) data; 1199 unsigned long flags; 1200 1201 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1202 1203 spin_lock_irqsave(&fore200e->q_lock, flags); 1204 fore200e_rx_irq((struct fore200e*) data); 1205 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1206 } 1207 #endif 1208 1209 1210 static int 1211 fore200e_select_scheme(struct atm_vcc* vcc) 1212 { 1213 /* fairly balance the VCs over (identical) buffer schemes */ 1214 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1215 1216 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1217 vcc->itf, vcc->vpi, vcc->vci, scheme); 1218 1219 return scheme; 1220 } 1221 1222 1223 static int 1224 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1225 { 1226 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1227 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1228 struct activate_opcode activ_opcode; 1229 struct deactivate_opcode deactiv_opcode; 1230 struct vpvc vpvc; 1231 int ok; 1232 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1233 1234 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1235 1236 if (activate) { 1237 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1238 1239 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1240 activ_opcode.aal = aal; 1241 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1242 activ_opcode.pad = 0; 1243 } 1244 else { 1245 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1246 deactiv_opcode.pad = 0; 1247 } 1248 1249 vpvc.vci = vcc->vci; 1250 vpvc.vpi = vcc->vpi; 1251 1252 *entry->status = STATUS_PENDING; 1253 1254 if (activate) { 1255 1256 #ifdef FORE200E_52BYTE_AAL0_SDU 1257 mtu = 48; 1258 #endif 1259 /* the MTU is not used by the cp, except in the case of AAL0 */ 1260 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1261 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1262 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1263 } 1264 else { 1265 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1266 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1267 } 1268 1269 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1270 1271 *entry->status = STATUS_FREE; 1272 1273 if (ok == 0) { 1274 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1275 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1276 return -EIO; 1277 } 1278 1279 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1280 activate ? "open" : "clos"); 1281 1282 return 0; 1283 } 1284 1285 1286 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ 1287 1288 static void 1289 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1290 { 1291 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1292 1293 /* compute the data cells to idle cells ratio from the tx PCR */ 1294 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1295 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1296 } 1297 else { 1298 /* disable rate control */ 1299 rate->data_cells = rate->idle_cells = 0; 1300 } 1301 } 1302 1303 1304 static int 1305 fore200e_open(struct atm_vcc *vcc) 1306 { 1307 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1308 struct fore200e_vcc* fore200e_vcc; 1309 struct fore200e_vc_map* vc_map; 1310 unsigned long flags; 1311 int vci = vcc->vci; 1312 short vpi = vcc->vpi; 1313 1314 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1315 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1316 1317 spin_lock_irqsave(&fore200e->q_lock, flags); 1318 1319 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1320 if (vc_map->vcc) { 1321 1322 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1323 1324 printk(FORE200E "VC %d.%d.%d already in use\n", 1325 fore200e->atm_dev->number, vpi, vci); 1326 1327 return -EINVAL; 1328 } 1329 1330 vc_map->vcc = vcc; 1331 1332 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1333 1334 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1335 if (fore200e_vcc == NULL) { 1336 vc_map->vcc = NULL; 1337 return -ENOMEM; 1338 } 1339 1340 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1341 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1342 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1343 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1344 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1345 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1346 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1347 1348 /* pseudo-CBR bandwidth requested? */ 1349 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1350 1351 mutex_lock(&fore200e->rate_mtx); 1352 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1353 mutex_unlock(&fore200e->rate_mtx); 1354 1355 kfree(fore200e_vcc); 1356 vc_map->vcc = NULL; 1357 return -EAGAIN; 1358 } 1359 1360 /* reserve bandwidth */ 1361 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1362 mutex_unlock(&fore200e->rate_mtx); 1363 } 1364 1365 vcc->itf = vcc->dev->number; 1366 1367 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1368 set_bit(ATM_VF_ADDR, &vcc->flags); 1369 1370 vcc->dev_data = fore200e_vcc; 1371 1372 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1373 1374 vc_map->vcc = NULL; 1375 1376 clear_bit(ATM_VF_ADDR, &vcc->flags); 1377 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1378 1379 vcc->dev_data = NULL; 1380 1381 mutex_lock(&fore200e->rate_mtx); 1382 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1383 mutex_unlock(&fore200e->rate_mtx); 1384 1385 kfree(fore200e_vcc); 1386 return -EINVAL; 1387 } 1388 1389 /* compute rate control parameters */ 1390 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1391 1392 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1393 set_bit(ATM_VF_HASQOS, &vcc->flags); 1394 1395 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1396 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1397 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1398 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1399 } 1400 1401 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1402 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1403 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1404 1405 /* new incarnation of the vcc */ 1406 vc_map->incarn = ++fore200e->incarn_count; 1407 1408 /* VC unusable before this flag is set */ 1409 set_bit(ATM_VF_READY, &vcc->flags); 1410 1411 return 0; 1412 } 1413 1414 1415 static void 1416 fore200e_close(struct atm_vcc* vcc) 1417 { 1418 struct fore200e_vcc* fore200e_vcc; 1419 struct fore200e* fore200e; 1420 struct fore200e_vc_map* vc_map; 1421 unsigned long flags; 1422 1423 ASSERT(vcc); 1424 fore200e = FORE200E_DEV(vcc->dev); 1425 1426 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1427 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1428 1429 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1430 1431 clear_bit(ATM_VF_READY, &vcc->flags); 1432 1433 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1434 1435 spin_lock_irqsave(&fore200e->q_lock, flags); 1436 1437 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1438 1439 /* the vc is no longer considered as "in use" by fore200e_open() */ 1440 vc_map->vcc = NULL; 1441 1442 vcc->itf = vcc->vci = vcc->vpi = 0; 1443 1444 fore200e_vcc = FORE200E_VCC(vcc); 1445 vcc->dev_data = NULL; 1446 1447 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1448 1449 /* release reserved bandwidth, if any */ 1450 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1451 1452 mutex_lock(&fore200e->rate_mtx); 1453 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1454 mutex_unlock(&fore200e->rate_mtx); 1455 1456 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1457 } 1458 1459 clear_bit(ATM_VF_ADDR, &vcc->flags); 1460 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1461 1462 ASSERT(fore200e_vcc); 1463 kfree(fore200e_vcc); 1464 } 1465 1466 1467 static int 1468 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1469 { 1470 struct fore200e* fore200e; 1471 struct fore200e_vcc* fore200e_vcc; 1472 struct fore200e_vc_map* vc_map; 1473 struct host_txq* txq; 1474 struct host_txq_entry* entry; 1475 struct tpd* tpd; 1476 struct tpd_haddr tpd_haddr; 1477 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1478 int tx_copy = 0; 1479 int tx_len = skb->len; 1480 u32* cell_header = NULL; 1481 unsigned char* skb_data; 1482 int skb_len; 1483 unsigned char* data; 1484 unsigned long flags; 1485 1486 if (!vcc) 1487 return -EINVAL; 1488 1489 fore200e = FORE200E_DEV(vcc->dev); 1490 fore200e_vcc = FORE200E_VCC(vcc); 1491 1492 if (!fore200e) 1493 return -EINVAL; 1494 1495 txq = &fore200e->host_txq; 1496 if (!fore200e_vcc) 1497 return -EINVAL; 1498 1499 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1500 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1501 dev_kfree_skb_any(skb); 1502 return -EINVAL; 1503 } 1504 1505 #ifdef FORE200E_52BYTE_AAL0_SDU 1506 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1507 cell_header = (u32*) skb->data; 1508 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1509 skb_len = tx_len = skb->len - 4; 1510 1511 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1512 } 1513 else 1514 #endif 1515 { 1516 skb_data = skb->data; 1517 skb_len = skb->len; 1518 } 1519 1520 if (((unsigned long)skb_data) & 0x3) { 1521 1522 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1523 tx_copy = 1; 1524 tx_len = skb_len; 1525 } 1526 1527 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1528 1529 /* this simply NUKES the PCA board */ 1530 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1531 tx_copy = 1; 1532 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1533 } 1534 1535 if (tx_copy) { 1536 data = kmalloc(tx_len, GFP_ATOMIC); 1537 if (data == NULL) { 1538 if (vcc->pop) { 1539 vcc->pop(vcc, skb); 1540 } 1541 else { 1542 dev_kfree_skb_any(skb); 1543 } 1544 return -ENOMEM; 1545 } 1546 1547 memcpy(data, skb_data, skb_len); 1548 if (skb_len < tx_len) 1549 memset(data + skb_len, 0x00, tx_len - skb_len); 1550 } 1551 else { 1552 data = skb_data; 1553 } 1554 1555 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1556 ASSERT(vc_map->vcc == vcc); 1557 1558 retry_here: 1559 1560 spin_lock_irqsave(&fore200e->q_lock, flags); 1561 1562 entry = &txq->host_entry[ txq->head ]; 1563 1564 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1565 1566 /* try to free completed tx queue entries */ 1567 fore200e_tx_irq(fore200e); 1568 1569 if (*entry->status != STATUS_FREE) { 1570 1571 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1572 1573 /* retry once again? */ 1574 if (--retry > 0) { 1575 udelay(50); 1576 goto retry_here; 1577 } 1578 1579 atomic_inc(&vcc->stats->tx_err); 1580 1581 fore200e->tx_sat++; 1582 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1583 fore200e->name, fore200e->cp_queues->heartbeat); 1584 if (vcc->pop) { 1585 vcc->pop(vcc, skb); 1586 } 1587 else { 1588 dev_kfree_skb_any(skb); 1589 } 1590 1591 if (tx_copy) 1592 kfree(data); 1593 1594 return -ENOBUFS; 1595 } 1596 } 1597 1598 entry->incarn = vc_map->incarn; 1599 entry->vc_map = vc_map; 1600 entry->skb = skb; 1601 entry->data = tx_copy ? data : NULL; 1602 1603 tpd = entry->tpd; 1604 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len, 1605 DMA_TO_DEVICE); 1606 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) { 1607 if (tx_copy) 1608 kfree(data); 1609 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1610 return -ENOMEM; 1611 } 1612 tpd->tsd[ 0 ].length = tx_len; 1613 1614 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1615 txq->txing++; 1616 1617 /* The dma_map call above implies a dma_sync so the device can use it, 1618 * thus no explicit dma_sync call is necessary here. 1619 */ 1620 1621 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1622 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1623 tpd->tsd[0].length, skb_len); 1624 1625 if (skb_len < fore200e_vcc->tx_min_pdu) 1626 fore200e_vcc->tx_min_pdu = skb_len; 1627 if (skb_len > fore200e_vcc->tx_max_pdu) 1628 fore200e_vcc->tx_max_pdu = skb_len; 1629 fore200e_vcc->tx_pdu++; 1630 1631 /* set tx rate control information */ 1632 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1633 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1634 1635 if (cell_header) { 1636 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1637 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1638 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1639 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1640 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1641 } 1642 else { 1643 /* set the ATM header, common to all cells conveying the PDU */ 1644 tpd->atm_header.clp = 0; 1645 tpd->atm_header.plt = 0; 1646 tpd->atm_header.vci = vcc->vci; 1647 tpd->atm_header.vpi = vcc->vpi; 1648 tpd->atm_header.gfc = 0; 1649 } 1650 1651 tpd->spec.length = tx_len; 1652 tpd->spec.nseg = 1; 1653 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1654 tpd->spec.intr = 1; 1655 1656 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1657 tpd_haddr.pad = 0; 1658 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1659 1660 *entry->status = STATUS_PENDING; 1661 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1662 1663 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1664 1665 return 0; 1666 } 1667 1668 1669 static int 1670 fore200e_getstats(struct fore200e* fore200e) 1671 { 1672 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1673 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1674 struct stats_opcode opcode; 1675 int ok; 1676 u32 stats_dma_addr; 1677 1678 if (fore200e->stats == NULL) { 1679 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL); 1680 if (fore200e->stats == NULL) 1681 return -ENOMEM; 1682 } 1683 1684 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats, 1685 sizeof(struct stats), DMA_FROM_DEVICE); 1686 if (dma_mapping_error(fore200e->dev, stats_dma_addr)) 1687 return -ENOMEM; 1688 1689 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1690 1691 opcode.opcode = OPCODE_GET_STATS; 1692 opcode.pad = 0; 1693 1694 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1695 1696 *entry->status = STATUS_PENDING; 1697 1698 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1699 1700 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1701 1702 *entry->status = STATUS_FREE; 1703 1704 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1705 1706 if (ok == 0) { 1707 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1708 return -EIO; 1709 } 1710 1711 return 0; 1712 } 1713 1714 #if 0 /* currently unused */ 1715 static int 1716 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) 1717 { 1718 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1719 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1720 struct oc3_opcode opcode; 1721 int ok; 1722 u32 oc3_regs_dma_addr; 1723 1724 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1725 1726 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1727 1728 opcode.opcode = OPCODE_GET_OC3; 1729 opcode.reg = 0; 1730 opcode.value = 0; 1731 opcode.mask = 0; 1732 1733 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1734 1735 *entry->status = STATUS_PENDING; 1736 1737 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); 1738 1739 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1740 1741 *entry->status = STATUS_FREE; 1742 1743 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1744 1745 if (ok == 0) { 1746 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); 1747 return -EIO; 1748 } 1749 1750 return 0; 1751 } 1752 #endif 1753 1754 1755 static int 1756 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1757 { 1758 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1759 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1760 struct oc3_opcode opcode; 1761 int ok; 1762 1763 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1764 1765 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1766 1767 opcode.opcode = OPCODE_SET_OC3; 1768 opcode.reg = reg; 1769 opcode.value = value; 1770 opcode.mask = mask; 1771 1772 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1773 1774 *entry->status = STATUS_PENDING; 1775 1776 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1777 1778 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1779 1780 *entry->status = STATUS_FREE; 1781 1782 if (ok == 0) { 1783 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1784 return -EIO; 1785 } 1786 1787 return 0; 1788 } 1789 1790 1791 static int 1792 fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1793 { 1794 u32 mct_value, mct_mask; 1795 int error; 1796 1797 if (!capable(CAP_NET_ADMIN)) 1798 return -EPERM; 1799 1800 switch (loop_mode) { 1801 1802 case ATM_LM_NONE: 1803 mct_value = 0; 1804 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1805 break; 1806 1807 case ATM_LM_LOC_PHY: 1808 mct_value = mct_mask = SUNI_MCT_DLE; 1809 break; 1810 1811 case ATM_LM_RMT_PHY: 1812 mct_value = mct_mask = SUNI_MCT_LLE; 1813 break; 1814 1815 default: 1816 return -EINVAL; 1817 } 1818 1819 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 1820 if (error == 0) 1821 fore200e->loop_mode = loop_mode; 1822 1823 return error; 1824 } 1825 1826 1827 static int 1828 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 1829 { 1830 struct sonet_stats tmp; 1831 1832 if (fore200e_getstats(fore200e) < 0) 1833 return -EIO; 1834 1835 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors); 1836 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors); 1837 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors); 1838 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors); 1839 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors); 1840 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors); 1841 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors); 1842 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) + 1843 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) + 1844 be32_to_cpu(fore200e->stats->aal5.cells_transmitted); 1845 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) + 1846 be32_to_cpu(fore200e->stats->aal34.cells_received) + 1847 be32_to_cpu(fore200e->stats->aal5.cells_received); 1848 1849 if (arg) 1850 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 1851 1852 return 0; 1853 } 1854 1855 1856 static int 1857 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 1858 { 1859 struct fore200e* fore200e = FORE200E_DEV(dev); 1860 1861 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 1862 1863 switch (cmd) { 1864 1865 case SONET_GETSTAT: 1866 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 1867 1868 case SONET_GETDIAG: 1869 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 1870 1871 case ATM_SETLOOP: 1872 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 1873 1874 case ATM_GETLOOP: 1875 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 1876 1877 case ATM_QUERYLOOP: 1878 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 1879 } 1880 1881 return -ENOSYS; /* not implemented */ 1882 } 1883 1884 1885 static int 1886 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 1887 { 1888 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1889 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1890 1891 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1892 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 1893 return -EINVAL; 1894 } 1895 1896 DPRINTK(2, "change_qos %d.%d.%d, " 1897 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1898 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 1899 "available_cell_rate = %u", 1900 vcc->itf, vcc->vpi, vcc->vci, 1901 fore200e_traffic_class[ qos->txtp.traffic_class ], 1902 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 1903 fore200e_traffic_class[ qos->rxtp.traffic_class ], 1904 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 1905 flags, fore200e->available_cell_rate); 1906 1907 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 1908 1909 mutex_lock(&fore200e->rate_mtx); 1910 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 1911 mutex_unlock(&fore200e->rate_mtx); 1912 return -EAGAIN; 1913 } 1914 1915 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1916 fore200e->available_cell_rate -= qos->txtp.max_pcr; 1917 1918 mutex_unlock(&fore200e->rate_mtx); 1919 1920 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 1921 1922 /* update rate control parameters */ 1923 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 1924 1925 set_bit(ATM_VF_HASQOS, &vcc->flags); 1926 1927 return 0; 1928 } 1929 1930 return -EINVAL; 1931 } 1932 1933 1934 static int fore200e_irq_request(struct fore200e *fore200e) 1935 { 1936 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { 1937 1938 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 1939 fore200e_irq_itoa(fore200e->irq), fore200e->name); 1940 return -EBUSY; 1941 } 1942 1943 printk(FORE200E "IRQ %s reserved for device %s\n", 1944 fore200e_irq_itoa(fore200e->irq), fore200e->name); 1945 1946 #ifdef FORE200E_USE_TASKLET 1947 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 1948 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 1949 #endif 1950 1951 fore200e->state = FORE200E_STATE_IRQ; 1952 return 0; 1953 } 1954 1955 1956 static int fore200e_get_esi(struct fore200e *fore200e) 1957 { 1958 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL); 1959 int ok, i; 1960 1961 if (!prom) 1962 return -ENOMEM; 1963 1964 ok = fore200e->bus->prom_read(fore200e, prom); 1965 if (ok < 0) { 1966 kfree(prom); 1967 return -EBUSY; 1968 } 1969 1970 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n", 1971 fore200e->name, 1972 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 1973 prom->serial_number & 0xFFFF, &prom->mac_addr[2]); 1974 1975 for (i = 0; i < ESI_LEN; i++) { 1976 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 1977 } 1978 1979 kfree(prom); 1980 1981 return 0; 1982 } 1983 1984 1985 static int fore200e_alloc_rx_buf(struct fore200e *fore200e) 1986 { 1987 int scheme, magn, nbr, size, i; 1988 1989 struct host_bsq* bsq; 1990 struct buffer* buffer; 1991 1992 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1993 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1994 1995 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1996 1997 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 1998 size = fore200e_rx_buf_size[ scheme ][ magn ]; 1999 2000 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 2001 2002 /* allocate the array of receive buffers */ 2003 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer), 2004 GFP_KERNEL); 2005 2006 if (buffer == NULL) 2007 return -ENOMEM; 2008 2009 bsq->freebuf = NULL; 2010 2011 for (i = 0; i < nbr; i++) { 2012 2013 buffer[ i ].scheme = scheme; 2014 buffer[ i ].magn = magn; 2015 #ifdef FORE200E_BSQ_DEBUG 2016 buffer[ i ].index = i; 2017 buffer[ i ].supplied = 0; 2018 #endif 2019 2020 /* allocate the receive buffer body */ 2021 if (fore200e_chunk_alloc(fore200e, 2022 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2023 DMA_FROM_DEVICE) < 0) { 2024 2025 while (i > 0) 2026 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2027 kfree(buffer); 2028 2029 return -ENOMEM; 2030 } 2031 2032 /* insert the buffer into the free buffer list */ 2033 buffer[ i ].next = bsq->freebuf; 2034 bsq->freebuf = &buffer[ i ]; 2035 } 2036 /* all the buffers are free, initially */ 2037 bsq->freebuf_count = nbr; 2038 2039 #ifdef FORE200E_BSQ_DEBUG 2040 bsq_audit(3, bsq, scheme, magn); 2041 #endif 2042 } 2043 } 2044 2045 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2046 return 0; 2047 } 2048 2049 2050 static int fore200e_init_bs_queue(struct fore200e *fore200e) 2051 { 2052 int scheme, magn, i; 2053 2054 struct host_bsq* bsq; 2055 struct cp_bsq_entry __iomem * cp_entry; 2056 2057 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2058 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2059 2060 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2061 2062 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2063 2064 /* allocate and align the array of status words */ 2065 if (fore200e_dma_chunk_alloc(fore200e, 2066 &bsq->status, 2067 sizeof(enum status), 2068 QUEUE_SIZE_BS, 2069 fore200e->bus->status_alignment) < 0) { 2070 return -ENOMEM; 2071 } 2072 2073 /* allocate and align the array of receive buffer descriptors */ 2074 if (fore200e_dma_chunk_alloc(fore200e, 2075 &bsq->rbd_block, 2076 sizeof(struct rbd_block), 2077 QUEUE_SIZE_BS, 2078 fore200e->bus->descr_alignment) < 0) { 2079 2080 fore200e_dma_chunk_free(fore200e, &bsq->status); 2081 return -ENOMEM; 2082 } 2083 2084 /* get the base address of the cp resident buffer supply queue entries */ 2085 cp_entry = fore200e->virt_base + 2086 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2087 2088 /* fill the host resident and cp resident buffer supply queue entries */ 2089 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2090 2091 bsq->host_entry[ i ].status = 2092 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2093 bsq->host_entry[ i ].rbd_block = 2094 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2095 bsq->host_entry[ i ].rbd_block_dma = 2096 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2097 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2098 2099 *bsq->host_entry[ i ].status = STATUS_FREE; 2100 2101 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2102 &cp_entry[ i ].status_haddr); 2103 } 2104 } 2105 } 2106 2107 fore200e->state = FORE200E_STATE_INIT_BSQ; 2108 return 0; 2109 } 2110 2111 2112 static int fore200e_init_rx_queue(struct fore200e *fore200e) 2113 { 2114 struct host_rxq* rxq = &fore200e->host_rxq; 2115 struct cp_rxq_entry __iomem * cp_entry; 2116 int i; 2117 2118 DPRINTK(2, "receive queue is being initialized\n"); 2119 2120 /* allocate and align the array of status words */ 2121 if (fore200e_dma_chunk_alloc(fore200e, 2122 &rxq->status, 2123 sizeof(enum status), 2124 QUEUE_SIZE_RX, 2125 fore200e->bus->status_alignment) < 0) { 2126 return -ENOMEM; 2127 } 2128 2129 /* allocate and align the array of receive PDU descriptors */ 2130 if (fore200e_dma_chunk_alloc(fore200e, 2131 &rxq->rpd, 2132 sizeof(struct rpd), 2133 QUEUE_SIZE_RX, 2134 fore200e->bus->descr_alignment) < 0) { 2135 2136 fore200e_dma_chunk_free(fore200e, &rxq->status); 2137 return -ENOMEM; 2138 } 2139 2140 /* get the base address of the cp resident rx queue entries */ 2141 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2142 2143 /* fill the host resident and cp resident rx entries */ 2144 for (i=0; i < QUEUE_SIZE_RX; i++) { 2145 2146 rxq->host_entry[ i ].status = 2147 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2148 rxq->host_entry[ i ].rpd = 2149 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2150 rxq->host_entry[ i ].rpd_dma = 2151 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2152 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2153 2154 *rxq->host_entry[ i ].status = STATUS_FREE; 2155 2156 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2157 &cp_entry[ i ].status_haddr); 2158 2159 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2160 &cp_entry[ i ].rpd_haddr); 2161 } 2162 2163 /* set the head entry of the queue */ 2164 rxq->head = 0; 2165 2166 fore200e->state = FORE200E_STATE_INIT_RXQ; 2167 return 0; 2168 } 2169 2170 2171 static int fore200e_init_tx_queue(struct fore200e *fore200e) 2172 { 2173 struct host_txq* txq = &fore200e->host_txq; 2174 struct cp_txq_entry __iomem * cp_entry; 2175 int i; 2176 2177 DPRINTK(2, "transmit queue is being initialized\n"); 2178 2179 /* allocate and align the array of status words */ 2180 if (fore200e_dma_chunk_alloc(fore200e, 2181 &txq->status, 2182 sizeof(enum status), 2183 QUEUE_SIZE_TX, 2184 fore200e->bus->status_alignment) < 0) { 2185 return -ENOMEM; 2186 } 2187 2188 /* allocate and align the array of transmit PDU descriptors */ 2189 if (fore200e_dma_chunk_alloc(fore200e, 2190 &txq->tpd, 2191 sizeof(struct tpd), 2192 QUEUE_SIZE_TX, 2193 fore200e->bus->descr_alignment) < 0) { 2194 2195 fore200e_dma_chunk_free(fore200e, &txq->status); 2196 return -ENOMEM; 2197 } 2198 2199 /* get the base address of the cp resident tx queue entries */ 2200 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2201 2202 /* fill the host resident and cp resident tx entries */ 2203 for (i=0; i < QUEUE_SIZE_TX; i++) { 2204 2205 txq->host_entry[ i ].status = 2206 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2207 txq->host_entry[ i ].tpd = 2208 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2209 txq->host_entry[ i ].tpd_dma = 2210 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2211 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2212 2213 *txq->host_entry[ i ].status = STATUS_FREE; 2214 2215 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2216 &cp_entry[ i ].status_haddr); 2217 2218 /* although there is a one-to-one mapping of tx queue entries and tpds, 2219 we do not write here the DMA (physical) base address of each tpd into 2220 the related cp resident entry, because the cp relies on this write 2221 operation to detect that a new pdu has been submitted for tx */ 2222 } 2223 2224 /* set the head and tail entries of the queue */ 2225 txq->head = 0; 2226 txq->tail = 0; 2227 2228 fore200e->state = FORE200E_STATE_INIT_TXQ; 2229 return 0; 2230 } 2231 2232 2233 static int fore200e_init_cmd_queue(struct fore200e *fore200e) 2234 { 2235 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2236 struct cp_cmdq_entry __iomem * cp_entry; 2237 int i; 2238 2239 DPRINTK(2, "command queue is being initialized\n"); 2240 2241 /* allocate and align the array of status words */ 2242 if (fore200e_dma_chunk_alloc(fore200e, 2243 &cmdq->status, 2244 sizeof(enum status), 2245 QUEUE_SIZE_CMD, 2246 fore200e->bus->status_alignment) < 0) { 2247 return -ENOMEM; 2248 } 2249 2250 /* get the base address of the cp resident cmd queue entries */ 2251 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2252 2253 /* fill the host resident and cp resident cmd entries */ 2254 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2255 2256 cmdq->host_entry[ i ].status = 2257 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2258 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2259 2260 *cmdq->host_entry[ i ].status = STATUS_FREE; 2261 2262 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2263 &cp_entry[ i ].status_haddr); 2264 } 2265 2266 /* set the head entry of the queue */ 2267 cmdq->head = 0; 2268 2269 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2270 return 0; 2271 } 2272 2273 2274 static void fore200e_param_bs_queue(struct fore200e *fore200e, 2275 enum buffer_scheme scheme, 2276 enum buffer_magn magn, int queue_length, 2277 int pool_size, int supply_blksize) 2278 { 2279 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2280 2281 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2282 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2283 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2284 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2285 } 2286 2287 2288 static int fore200e_initialize(struct fore200e *fore200e) 2289 { 2290 struct cp_queues __iomem * cpq; 2291 int ok, scheme, magn; 2292 2293 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2294 2295 mutex_init(&fore200e->rate_mtx); 2296 spin_lock_init(&fore200e->q_lock); 2297 2298 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2299 2300 /* enable cp to host interrupts */ 2301 fore200e->bus->write(1, &cpq->imask); 2302 2303 if (fore200e->bus->irq_enable) 2304 fore200e->bus->irq_enable(fore200e); 2305 2306 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2307 2308 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2309 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2310 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2311 2312 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2313 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2314 2315 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2316 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2317 fore200e_param_bs_queue(fore200e, scheme, magn, 2318 QUEUE_SIZE_BS, 2319 fore200e_rx_buf_nbr[ scheme ][ magn ], 2320 RBD_BLK_SIZE); 2321 2322 /* issue the initialize command */ 2323 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2324 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2325 2326 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2327 if (ok == 0) { 2328 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2329 return -ENODEV; 2330 } 2331 2332 printk(FORE200E "device %s initialized\n", fore200e->name); 2333 2334 fore200e->state = FORE200E_STATE_INITIALIZE; 2335 return 0; 2336 } 2337 2338 2339 static void fore200e_monitor_putc(struct fore200e *fore200e, char c) 2340 { 2341 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2342 2343 #if 0 2344 printk("%c", c); 2345 #endif 2346 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2347 } 2348 2349 2350 static int fore200e_monitor_getc(struct fore200e *fore200e) 2351 { 2352 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2353 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2354 int c; 2355 2356 while (time_before(jiffies, timeout)) { 2357 2358 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2359 2360 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2361 2362 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2363 #if 0 2364 printk("%c", c & 0xFF); 2365 #endif 2366 return c & 0xFF; 2367 } 2368 } 2369 2370 return -1; 2371 } 2372 2373 2374 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str) 2375 { 2376 while (*str) { 2377 2378 /* the i960 monitor doesn't accept any new character if it has something to say */ 2379 while (fore200e_monitor_getc(fore200e) >= 0); 2380 2381 fore200e_monitor_putc(fore200e, *str++); 2382 } 2383 2384 while (fore200e_monitor_getc(fore200e) >= 0); 2385 } 2386 2387 #ifdef __LITTLE_ENDIAN 2388 #define FW_EXT ".bin" 2389 #else 2390 #define FW_EXT "_ecd.bin2" 2391 #endif 2392 2393 static int fore200e_load_and_start_fw(struct fore200e *fore200e) 2394 { 2395 const struct firmware *firmware; 2396 const struct fw_header *fw_header; 2397 const __le32 *fw_data; 2398 u32 fw_size; 2399 u32 __iomem *load_addr; 2400 char buf[48]; 2401 int err; 2402 2403 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT); 2404 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) { 2405 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name); 2406 return err; 2407 } 2408 2409 fw_data = (const __le32 *)firmware->data; 2410 fw_size = firmware->size / sizeof(u32); 2411 fw_header = (const struct fw_header *)firmware->data; 2412 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2413 2414 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2415 fore200e->name, load_addr, fw_size); 2416 2417 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2418 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2419 goto release; 2420 } 2421 2422 for (; fw_size--; fw_data++, load_addr++) 2423 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2424 2425 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2426 2427 #if defined(__sparc_v9__) 2428 /* reported to be required by SBA cards on some sparc64 hosts */ 2429 fore200e_spin(100); 2430 #endif 2431 2432 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2433 fore200e_monitor_puts(fore200e, buf); 2434 2435 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) { 2436 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2437 goto release; 2438 } 2439 2440 printk(FORE200E "device %s firmware started\n", fore200e->name); 2441 2442 fore200e->state = FORE200E_STATE_START_FW; 2443 err = 0; 2444 2445 release: 2446 release_firmware(firmware); 2447 return err; 2448 } 2449 2450 2451 static int fore200e_register(struct fore200e *fore200e, struct device *parent) 2452 { 2453 struct atm_dev* atm_dev; 2454 2455 DPRINTK(2, "device %s being registered\n", fore200e->name); 2456 2457 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops, 2458 -1, NULL); 2459 if (atm_dev == NULL) { 2460 printk(FORE200E "unable to register device %s\n", fore200e->name); 2461 return -ENODEV; 2462 } 2463 2464 atm_dev->dev_data = fore200e; 2465 fore200e->atm_dev = atm_dev; 2466 2467 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2468 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2469 2470 fore200e->available_cell_rate = ATM_OC3_PCR; 2471 2472 fore200e->state = FORE200E_STATE_REGISTER; 2473 return 0; 2474 } 2475 2476 2477 static int fore200e_init(struct fore200e *fore200e, struct device *parent) 2478 { 2479 if (fore200e_register(fore200e, parent) < 0) 2480 return -ENODEV; 2481 2482 if (fore200e->bus->configure(fore200e) < 0) 2483 return -ENODEV; 2484 2485 if (fore200e->bus->map(fore200e) < 0) 2486 return -ENODEV; 2487 2488 if (fore200e_reset(fore200e, 1) < 0) 2489 return -ENODEV; 2490 2491 if (fore200e_load_and_start_fw(fore200e) < 0) 2492 return -ENODEV; 2493 2494 if (fore200e_initialize(fore200e) < 0) 2495 return -ENODEV; 2496 2497 if (fore200e_init_cmd_queue(fore200e) < 0) 2498 return -ENOMEM; 2499 2500 if (fore200e_init_tx_queue(fore200e) < 0) 2501 return -ENOMEM; 2502 2503 if (fore200e_init_rx_queue(fore200e) < 0) 2504 return -ENOMEM; 2505 2506 if (fore200e_init_bs_queue(fore200e) < 0) 2507 return -ENOMEM; 2508 2509 if (fore200e_alloc_rx_buf(fore200e) < 0) 2510 return -ENOMEM; 2511 2512 if (fore200e_get_esi(fore200e) < 0) 2513 return -EIO; 2514 2515 if (fore200e_irq_request(fore200e) < 0) 2516 return -EBUSY; 2517 2518 fore200e_supply(fore200e); 2519 2520 /* all done, board initialization is now complete */ 2521 fore200e->state = FORE200E_STATE_COMPLETE; 2522 return 0; 2523 } 2524 2525 #ifdef CONFIG_SBUS 2526 static int fore200e_sba_probe(struct platform_device *op) 2527 { 2528 struct fore200e *fore200e; 2529 static int index = 0; 2530 int err; 2531 2532 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2533 if (!fore200e) 2534 return -ENOMEM; 2535 2536 fore200e->bus = &fore200e_sbus_ops; 2537 fore200e->dev = &op->dev; 2538 fore200e->irq = op->archdata.irqs[0]; 2539 fore200e->phys_base = op->resource[0].start; 2540 2541 sprintf(fore200e->name, "SBA-200E-%d", index); 2542 2543 err = fore200e_init(fore200e, &op->dev); 2544 if (err < 0) { 2545 fore200e_shutdown(fore200e); 2546 kfree(fore200e); 2547 return err; 2548 } 2549 2550 index++; 2551 dev_set_drvdata(&op->dev, fore200e); 2552 2553 return 0; 2554 } 2555 2556 static void fore200e_sba_remove(struct platform_device *op) 2557 { 2558 struct fore200e *fore200e = dev_get_drvdata(&op->dev); 2559 2560 fore200e_shutdown(fore200e); 2561 kfree(fore200e); 2562 } 2563 2564 static const struct of_device_id fore200e_sba_match[] = { 2565 { 2566 .name = SBA200E_PROM_NAME, 2567 }, 2568 {}, 2569 }; 2570 MODULE_DEVICE_TABLE(of, fore200e_sba_match); 2571 2572 static struct platform_driver fore200e_sba_driver = { 2573 .driver = { 2574 .name = "fore_200e", 2575 .of_match_table = fore200e_sba_match, 2576 }, 2577 .probe = fore200e_sba_probe, 2578 .remove = fore200e_sba_remove, 2579 }; 2580 #endif 2581 2582 #ifdef CONFIG_PCI 2583 static int fore200e_pca_detect(struct pci_dev *pci_dev, 2584 const struct pci_device_id *pci_ent) 2585 { 2586 struct fore200e* fore200e; 2587 int err = 0; 2588 static int index = 0; 2589 2590 if (pci_enable_device(pci_dev)) { 2591 err = -EINVAL; 2592 goto out; 2593 } 2594 2595 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { 2596 err = -EINVAL; 2597 goto out; 2598 } 2599 2600 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2601 if (fore200e == NULL) { 2602 err = -ENOMEM; 2603 goto out_disable; 2604 } 2605 2606 fore200e->bus = &fore200e_pci_ops; 2607 fore200e->dev = &pci_dev->dev; 2608 fore200e->irq = pci_dev->irq; 2609 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2610 2611 sprintf(fore200e->name, "PCA-200E-%d", index - 1); 2612 2613 pci_set_master(pci_dev); 2614 2615 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n", 2616 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2617 2618 sprintf(fore200e->name, "PCA-200E-%d", index); 2619 2620 err = fore200e_init(fore200e, &pci_dev->dev); 2621 if (err < 0) { 2622 fore200e_shutdown(fore200e); 2623 goto out_free; 2624 } 2625 2626 ++index; 2627 pci_set_drvdata(pci_dev, fore200e); 2628 2629 out: 2630 return err; 2631 2632 out_free: 2633 kfree(fore200e); 2634 out_disable: 2635 pci_disable_device(pci_dev); 2636 goto out; 2637 } 2638 2639 2640 static void fore200e_pca_remove_one(struct pci_dev *pci_dev) 2641 { 2642 struct fore200e *fore200e; 2643 2644 fore200e = pci_get_drvdata(pci_dev); 2645 2646 fore200e_shutdown(fore200e); 2647 kfree(fore200e); 2648 pci_disable_device(pci_dev); 2649 } 2650 2651 2652 static const struct pci_device_id fore200e_pca_tbl[] = { 2653 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID }, 2654 { 0, } 2655 }; 2656 2657 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2658 2659 static struct pci_driver fore200e_pca_driver = { 2660 .name = "fore_200e", 2661 .probe = fore200e_pca_detect, 2662 .remove = fore200e_pca_remove_one, 2663 .id_table = fore200e_pca_tbl, 2664 }; 2665 #endif 2666 2667 static int __init fore200e_module_init(void) 2668 { 2669 int err = 0; 2670 2671 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2672 2673 #ifdef CONFIG_SBUS 2674 err = platform_driver_register(&fore200e_sba_driver); 2675 if (err) 2676 return err; 2677 #endif 2678 2679 #ifdef CONFIG_PCI 2680 err = pci_register_driver(&fore200e_pca_driver); 2681 #endif 2682 2683 #ifdef CONFIG_SBUS 2684 if (err) 2685 platform_driver_unregister(&fore200e_sba_driver); 2686 #endif 2687 2688 return err; 2689 } 2690 2691 static void __exit fore200e_module_cleanup(void) 2692 { 2693 #ifdef CONFIG_PCI 2694 pci_unregister_driver(&fore200e_pca_driver); 2695 #endif 2696 #ifdef CONFIG_SBUS 2697 platform_driver_unregister(&fore200e_sba_driver); 2698 #endif 2699 } 2700 2701 static int 2702 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2703 { 2704 struct fore200e* fore200e = FORE200E_DEV(dev); 2705 struct fore200e_vcc* fore200e_vcc; 2706 struct atm_vcc* vcc; 2707 int i, len, left = *pos; 2708 unsigned long flags; 2709 2710 if (!left--) { 2711 2712 if (fore200e_getstats(fore200e) < 0) 2713 return -EIO; 2714 2715 len = sprintf(page,"\n" 2716 " device:\n" 2717 " internal name:\t\t%s\n", fore200e->name); 2718 2719 /* print bus-specific information */ 2720 if (fore200e->bus->proc_read) 2721 len += fore200e->bus->proc_read(fore200e, page + len); 2722 2723 len += sprintf(page + len, 2724 " interrupt line:\t\t%s\n" 2725 " physical base address:\t0x%p\n" 2726 " virtual base address:\t0x%p\n" 2727 " factory address (ESI):\t%pM\n" 2728 " board serial number:\t\t%d\n\n", 2729 fore200e_irq_itoa(fore200e->irq), 2730 (void*)fore200e->phys_base, 2731 fore200e->virt_base, 2732 fore200e->esi, 2733 fore200e->esi[4] * 256 + fore200e->esi[5]); 2734 2735 return len; 2736 } 2737 2738 if (!left--) 2739 return sprintf(page, 2740 " free small bufs, scheme 1:\t%d\n" 2741 " free large bufs, scheme 1:\t%d\n" 2742 " free small bufs, scheme 2:\t%d\n" 2743 " free large bufs, scheme 2:\t%d\n", 2744 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2745 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2746 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2747 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2748 2749 if (!left--) { 2750 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2751 2752 len = sprintf(page,"\n\n" 2753 " cell processor:\n" 2754 " heartbeat state:\t\t"); 2755 2756 if (hb >> 16 != 0xDEAD) 2757 len += sprintf(page + len, "0x%08x\n", hb); 2758 else 2759 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2760 2761 return len; 2762 } 2763 2764 if (!left--) { 2765 static const char* media_name[] = { 2766 "unshielded twisted pair", 2767 "multimode optical fiber ST", 2768 "multimode optical fiber SC", 2769 "single-mode optical fiber ST", 2770 "single-mode optical fiber SC", 2771 "unknown" 2772 }; 2773 2774 static const char* oc3_mode[] = { 2775 "normal operation", 2776 "diagnostic loopback", 2777 "line loopback", 2778 "unknown" 2779 }; 2780 2781 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2782 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2783 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2784 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2785 u32 oc3_index; 2786 2787 if (media_index > 4) 2788 media_index = 5; 2789 2790 switch (fore200e->loop_mode) { 2791 case ATM_LM_NONE: oc3_index = 0; 2792 break; 2793 case ATM_LM_LOC_PHY: oc3_index = 1; 2794 break; 2795 case ATM_LM_RMT_PHY: oc3_index = 2; 2796 break; 2797 default: oc3_index = 3; 2798 } 2799 2800 return sprintf(page, 2801 " firmware release:\t\t%d.%d.%d\n" 2802 " monitor release:\t\t%d.%d\n" 2803 " media type:\t\t\t%s\n" 2804 " OC-3 revision:\t\t0x%x\n" 2805 " OC-3 mode:\t\t\t%s", 2806 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2807 mon960_release >> 16, mon960_release << 16 >> 16, 2808 media_name[ media_index ], 2809 oc3_revision, 2810 oc3_mode[ oc3_index ]); 2811 } 2812 2813 if (!left--) { 2814 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2815 2816 return sprintf(page, 2817 "\n\n" 2818 " monitor:\n" 2819 " version number:\t\t%d\n" 2820 " boot status word:\t\t0x%08x\n", 2821 fore200e->bus->read(&cp_monitor->mon_version), 2822 fore200e->bus->read(&cp_monitor->bstat)); 2823 } 2824 2825 if (!left--) 2826 return sprintf(page, 2827 "\n" 2828 " device statistics:\n" 2829 " 4b5b:\n" 2830 " crc_header_errors:\t\t%10u\n" 2831 " framing_errors:\t\t%10u\n", 2832 be32_to_cpu(fore200e->stats->phy.crc_header_errors), 2833 be32_to_cpu(fore200e->stats->phy.framing_errors)); 2834 2835 if (!left--) 2836 return sprintf(page, "\n" 2837 " OC-3:\n" 2838 " section_bip8_errors:\t%10u\n" 2839 " path_bip8_errors:\t\t%10u\n" 2840 " line_bip24_errors:\t\t%10u\n" 2841 " line_febe_errors:\t\t%10u\n" 2842 " path_febe_errors:\t\t%10u\n" 2843 " corr_hcs_errors:\t\t%10u\n" 2844 " ucorr_hcs_errors:\t\t%10u\n", 2845 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors), 2846 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors), 2847 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors), 2848 be32_to_cpu(fore200e->stats->oc3.line_febe_errors), 2849 be32_to_cpu(fore200e->stats->oc3.path_febe_errors), 2850 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors), 2851 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors)); 2852 2853 if (!left--) 2854 return sprintf(page,"\n" 2855 " ATM:\t\t\t\t cells\n" 2856 " TX:\t\t\t%10u\n" 2857 " RX:\t\t\t%10u\n" 2858 " vpi out of range:\t\t%10u\n" 2859 " vpi no conn:\t\t%10u\n" 2860 " vci out of range:\t\t%10u\n" 2861 " vci no conn:\t\t%10u\n", 2862 be32_to_cpu(fore200e->stats->atm.cells_transmitted), 2863 be32_to_cpu(fore200e->stats->atm.cells_received), 2864 be32_to_cpu(fore200e->stats->atm.vpi_bad_range), 2865 be32_to_cpu(fore200e->stats->atm.vpi_no_conn), 2866 be32_to_cpu(fore200e->stats->atm.vci_bad_range), 2867 be32_to_cpu(fore200e->stats->atm.vci_no_conn)); 2868 2869 if (!left--) 2870 return sprintf(page,"\n" 2871 " AAL0:\t\t\t cells\n" 2872 " TX:\t\t\t%10u\n" 2873 " RX:\t\t\t%10u\n" 2874 " dropped:\t\t\t%10u\n", 2875 be32_to_cpu(fore200e->stats->aal0.cells_transmitted), 2876 be32_to_cpu(fore200e->stats->aal0.cells_received), 2877 be32_to_cpu(fore200e->stats->aal0.cells_dropped)); 2878 2879 if (!left--) 2880 return sprintf(page,"\n" 2881 " AAL3/4:\n" 2882 " SAR sublayer:\t\t cells\n" 2883 " TX:\t\t\t%10u\n" 2884 " RX:\t\t\t%10u\n" 2885 " dropped:\t\t\t%10u\n" 2886 " CRC errors:\t\t%10u\n" 2887 " protocol errors:\t\t%10u\n\n" 2888 " CS sublayer:\t\t PDUs\n" 2889 " TX:\t\t\t%10u\n" 2890 " RX:\t\t\t%10u\n" 2891 " dropped:\t\t\t%10u\n" 2892 " protocol errors:\t\t%10u\n", 2893 be32_to_cpu(fore200e->stats->aal34.cells_transmitted), 2894 be32_to_cpu(fore200e->stats->aal34.cells_received), 2895 be32_to_cpu(fore200e->stats->aal34.cells_dropped), 2896 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors), 2897 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors), 2898 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted), 2899 be32_to_cpu(fore200e->stats->aal34.cspdus_received), 2900 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped), 2901 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors)); 2902 2903 if (!left--) 2904 return sprintf(page,"\n" 2905 " AAL5:\n" 2906 " SAR sublayer:\t\t cells\n" 2907 " TX:\t\t\t%10u\n" 2908 " RX:\t\t\t%10u\n" 2909 " dropped:\t\t\t%10u\n" 2910 " congestions:\t\t%10u\n\n" 2911 " CS sublayer:\t\t PDUs\n" 2912 " TX:\t\t\t%10u\n" 2913 " RX:\t\t\t%10u\n" 2914 " dropped:\t\t\t%10u\n" 2915 " CRC errors:\t\t%10u\n" 2916 " protocol errors:\t\t%10u\n", 2917 be32_to_cpu(fore200e->stats->aal5.cells_transmitted), 2918 be32_to_cpu(fore200e->stats->aal5.cells_received), 2919 be32_to_cpu(fore200e->stats->aal5.cells_dropped), 2920 be32_to_cpu(fore200e->stats->aal5.congestion_experienced), 2921 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted), 2922 be32_to_cpu(fore200e->stats->aal5.cspdus_received), 2923 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped), 2924 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors), 2925 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors)); 2926 2927 if (!left--) 2928 return sprintf(page,"\n" 2929 " AUX:\t\t allocation failures\n" 2930 " small b1:\t\t\t%10u\n" 2931 " large b1:\t\t\t%10u\n" 2932 " small b2:\t\t\t%10u\n" 2933 " large b2:\t\t\t%10u\n" 2934 " RX PDUs:\t\t\t%10u\n" 2935 " TX PDUs:\t\t\t%10lu\n", 2936 be32_to_cpu(fore200e->stats->aux.small_b1_failed), 2937 be32_to_cpu(fore200e->stats->aux.large_b1_failed), 2938 be32_to_cpu(fore200e->stats->aux.small_b2_failed), 2939 be32_to_cpu(fore200e->stats->aux.large_b2_failed), 2940 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed), 2941 fore200e->tx_sat); 2942 2943 if (!left--) 2944 return sprintf(page,"\n" 2945 " receive carrier:\t\t\t%s\n", 2946 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 2947 2948 if (!left--) { 2949 return sprintf(page,"\n" 2950 " VCCs:\n address VPI VCI AAL " 2951 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 2952 } 2953 2954 for (i = 0; i < NBR_CONNECT; i++) { 2955 2956 vcc = fore200e->vc_map[i].vcc; 2957 2958 if (vcc == NULL) 2959 continue; 2960 2961 spin_lock_irqsave(&fore200e->q_lock, flags); 2962 2963 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 2964 2965 fore200e_vcc = FORE200E_VCC(vcc); 2966 ASSERT(fore200e_vcc); 2967 2968 len = sprintf(page, 2969 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 2970 vcc, 2971 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 2972 fore200e_vcc->tx_pdu, 2973 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 2974 fore200e_vcc->tx_max_pdu, 2975 fore200e_vcc->rx_pdu, 2976 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 2977 fore200e_vcc->rx_max_pdu); 2978 2979 spin_unlock_irqrestore(&fore200e->q_lock, flags); 2980 return len; 2981 } 2982 2983 spin_unlock_irqrestore(&fore200e->q_lock, flags); 2984 } 2985 2986 return 0; 2987 } 2988 2989 module_init(fore200e_module_init); 2990 module_exit(fore200e_module_cleanup); 2991 2992 2993 static const struct atmdev_ops fore200e_ops = { 2994 .open = fore200e_open, 2995 .close = fore200e_close, 2996 .ioctl = fore200e_ioctl, 2997 .send = fore200e_send, 2998 .change_qos = fore200e_change_qos, 2999 .proc_read = fore200e_proc_read, 3000 .owner = THIS_MODULE 3001 }; 3002 3003 MODULE_LICENSE("GPL"); 3004 #ifdef CONFIG_PCI 3005 #ifdef __LITTLE_ENDIAN__ 3006 MODULE_FIRMWARE("pca200e.bin"); 3007 #else 3008 MODULE_FIRMWARE("pca200e_ecd.bin2"); 3009 #endif 3010 #endif /* CONFIG_PCI */ 3011 #ifdef CONFIG_SBUS 3012 MODULE_FIRMWARE("sba200e_ecd.bin2"); 3013 #endif 3014